code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from django.db.backends.sqlite3.base import DatabaseWrapper
from io import BytesIO
import boto3
import botocore
import hashlib
import logging
import os
class DatabaseWrapper(DatabaseWrapper):
"""
Wraps the normal Django SQLite DB engine in an S3 backer!
"""
def load_remote_db(self):
"""
Load remote S3 DB
"""
signature_version = self.settings_dict.get("SIGNATURE_VERSION", "s3v4")
s3 = boto3.resource(
's3',
config=botocore.client.Config(signature_version=signature_version),
)
if '/tmp/' not in self.settings_dict['NAME']:
try:
etag = ''
if os.path.isfile('/tmp/' + self.settings_dict['NAME']):
m = hashlib.md5()
with open('/tmp/' + self.settings_dict['NAME'], 'rb') as f:
m.update(f.read())
# In general the ETag is the md5 of the file, in some cases it's not,
# and in that case we will just need to reload the file, I don't see any other way
etag = m.hexdigest()
obj = s3.Object(self.settings_dict['BUCKET'], self.settings_dict['NAME'])
obj_bytes = obj.get(IfNoneMatch=etag)["Body"] # Will throw E on 304 or 404
with open('/tmp/' + self.settings_dict['NAME'], 'wb') as f:
f.write(obj_bytes.read())
m = hashlib.md5()
with open('/tmp/' + self.settings_dict['NAME'], 'rb') as f:
m.update(f.read())
self.db_hash = m.hexdigest()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "304":
logging.debug("ETag matches md5 of local copy, using local copy of DB!")
self.db_hash = etag
else:
logging.debug("Couldn't load remote DB object.")
except Exception as e:
# Weird one
logging.debug(e)
# SQLite DatabaseWrapper will treat our tmp as normal now
# Check because Django likes to call this function a lot more than it should
if '/tmp/' not in self.settings_dict['NAME']:
self.settings_dict['REMOTE_NAME'] = self.settings_dict['NAME']
self.settings_dict['NAME'] = '/tmp/' + self.settings_dict['NAME']
# Make sure it exists if it doesn't yet
if not os.path.isfile(self.settings_dict['NAME']):
open(self.settings_dict['NAME'], 'a').close()
logging.debug("Loaded remote DB!")
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.db_hash = None
self.load_remote_db()
def close(self, *args, **kwargs):
"""
Engine closed, copy file to DB if it has changed
"""
super(DatabaseWrapper, self).close(*args, **kwargs)
signature_version = self.settings_dict.get("SIGNATURE_VERSION", "s3v4")
s3 = boto3.resource(
's3',
config=botocore.client.Config(signature_version=signature_version),
)
try:
with open(self.settings_dict['NAME'], 'rb') as f:
fb = f.read()
m = hashlib.md5()
m.update(fb)
if self.db_hash == m.hexdigest():
logging.debug("Database unchanged, not saving to remote DB!")
return
bytesIO = BytesIO()
bytesIO.write(fb)
bytesIO.seek(0)
s3_object = s3.Object(self.settings_dict['BUCKET'], self.settings_dict['REMOTE_NAME'])
result = s3_object.put('rb', Body=bytesIO)
except Exception as e:
logging.debug(e)
logging.debug("Saved to remote DB!") | zappa-django-utils | /zappa_django_utils-0.4.1-py3-none-any.whl/zappa_django_utils/db/backends/s3sqlite/base.py | base.py |
# Zappa Env
Zappa-env is a simple package that allows you to call Zappa commands from a custom virtualenv for build purposes
## Installing & Usage
Install zappa
`pip install zappa-env`
Run your Zappa commands with `zappa-env` followed by your virtualenv and Zappa command
`zappa-env my-virtual-env zappa update prod`
## Requirements
Zappa-env currently assumes that your virtualenvs are installed with the `virtualenvwrapper` package and located in `$HOME/.virtualenvs/`. Currently this is unable to be changed unless you modify the source code.
## Why?
The reason I made this project was because I was getting annoyed activating a special deployment virtualenv just to deploy or package my Zappa repo. This small project combines the `workon virtualenv` and zappa commands so I can run my Zappa commands in just one line.
## Contributions
Submit an issue or pull request and I'll do my best to merge the PR or help with the issue.
## To-do
- [ ] Support running zappa commands with a requirements.txt file (eliminates need for virtualenvs)
- [ ] Support Anaconda
- [ ] Support stock Python virtualenvs
| zappa-env | /zappa-env-0.252.tar.gz/zappa-env-0.252/README.md | README.md |
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/anush0247/zappa-file-widget/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "feature"
is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
Django Zappa File Widget could always use more documentation, whether as part of the
official Django Zappa File Widget docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/anush0247/zappa-file-widget/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `zappa-file-widget` for local development.
1. Fork the `zappa-file-widget` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/zappa-file-widget.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv zappa-file-widget
$ cd zappa-file-widget/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the
tests, including testing other Python versions with tox::
$ flake8 zappa_file_widget tests
$ python setup.py test
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 2.6, 2.7, and 3.3, and for PyPy. Check
https://travis-ci.org/anush0247/zappa-file-widget/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ python -m unittest tests.test_zappa_file_widget
| zappa-file-widget-mediabucket | /zappa-file-widget-mediabucket-0.1.7.tar.gz/zappa-file-widget-mediabucket-0.1.7/CONTRIBUTING.rst | CONTRIBUTING.rst |
from django import forms
from django.conf import settings
from django.template import Template, Context
from django.utils.safestring import mark_safe
class URLWidget(forms.widgets.URLInput):
upload_to = ''
def __init__(self, *args, **kwargs):
self.upload_to = kwargs.pop('upload_to', self.upload_to)
super(URLWidget, self).__init__(*args, **kwargs)
def render(self, name, value, attrs=None):
html_template = """
<script src="https://sdk.amazonaws.com/js/aws-sdk-2.1.24.min.js"></script>
<script type="text/javascript">
function {{field_name}}_previewFile() {
AWS.config.update({
accessKeyId : '{{AWS_ACCESS_KEY_ID}}',
secretAccessKey : '{{AWS_SECRET_ACCESS_KEY}}'
});
AWS.config.region = '{{AWS_DEFAULT_REGION}}';
AWS.config.sslEnabled = true;
AWS.config.logger = true;
var bucket = new AWS.S3();
var {{field_name}}file_input = document.getElementById('id_{{field_name}}');
var {{field_name}}file_url = document.getElementById('id_{{field_name}}_url');
var {{field_name}}file_loading = document.getElementById('id_{{field_name}}_loading');
var file = document.getElementById('id_{{field_name}}_tmp').files[0];
var reader = new FileReader();
reader.addEventListener("load", function () {
var params = {
Bucket: '{{AWS_STORAGE_BUCKET_NAME}}',
Key: '{{prefix}}' + file.name,
ContentType: file.type,
Body: file,
Prefix: '{{prefix}}',
ACL: 'public-read'
};
bucket.upload(params, function (err, data) {
// results.innerHTML = err ? 'ERROR!' : 'UPLOADED.';
if (err) {
alert('Failed to Upload file to s3 ' + err);
}
else{
var s3_key_url = "{{ prefix_url }}" + encodeURI(file.name);
{{field_name}}file_url.href = s3_key_url;
{{field_name}}file_url.innerHTML = s3_key_url;
{{field_name}}file_url.style = 'display:block';
{{field_name}}file_loading.style = 'display:none';
{{field_name}}file_input.value = s3_key_url;
}
}).on('httpUploadProgress',function(progress) {
// Log Progress Information
{{field_name}}file_loading.style = 'display:block';
var msg = 'Please Wait, Uploading ' + file.name + ' ('
{{field_name}}file_loading.innerHTML = msg + Math.round(progress.loaded / progress.total * 100) +'% done)';
});
}, false);
if (file) {
reader.readAsDataURL(file);
}
}
</script>
<input type="file" id="id_{{field_name}}_tmp"
onchange="{{field_name}}_previewFile()"/>
<a href="{{field_value}}" id="id_{{field_name}}_url" target="_blank"
style="display:{% if field_value %}block{% else %}none{% endif%}" >{{field_value}}</a><br>
<p id="id_{{field_name}}_loading" style="display:none" ></p>
{{parent_html}}
"""
attrs['type'] = 'hidden'
parent_html = super(URLWidget, self).render(name, value, attrs=attrs)
template = Template(html_template)
prefix = settings.MEDIAFILES_LOCATION + "/" + self.upload_to
prefix_url = settings.MEDIA_URL + self.upload_to
aws_access_key_id = getattr(settings, 'MEDIA_AWS_ACCESS_KEY_ID', 'AWS_ACCESS_KEY_ID')
aws_secret_access_key = getattr(settings, 'MEDIA_AWS_SECRET_ACCESS_KEY', 'AWS_SECRET_ACCESS_KEY')
s3_host_from_settings = getattr(settings, 'MEDIA_AWS_S3_HOST', 'AWS_S3_HOST')
s3_host = s3_host_from_settings or 's3-ap-southeast-1.amazonaws.com'
default_region = s3_host.split(".amazonaws.com")[0].split("s3-")[1]
aws_storage_bucket_name = getattr(settings, 'MEDIA_AWS_STORAGE_BUCKET_NAME', 'AWS_STORAGE_BUCKET_NAME')
context = Context({
'field_name': name,
"field_value": value,
'parent_html': parent_html,
'prefix': prefix,
"prefix_url": prefix_url,
'AWS_ACCESS_KEY_ID': aws_access_key_id,
'AWS_SECRET_ACCESS_KEY': aws_secret_access_key,
'AWS_DEFAULT_REGION': default_region,
'AWS_STORAGE_BUCKET_NAME': aws_storage_bucket_name,
})
html = template.render(context)
return mark_safe(html) | zappa-file-widget-mediabucket | /zappa-file-widget-mediabucket-0.1.7.tar.gz/zappa-file-widget-mediabucket-0.1.7/zappa_file_widget/url_widget.py | url_widget.py |
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/anush0247/zappa-file-widget/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "feature"
is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
Django Zappa File Widget could always use more documentation, whether as part of the
official Django Zappa File Widget docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/anush0247/zappa-file-widget/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `zappa-file-widget` for local development.
1. Fork the `zappa-file-widget` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/zappa-file-widget.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv zappa-file-widget
$ cd zappa-file-widget/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the
tests, including testing other Python versions with tox::
$ flake8 zappa_file_widget tests
$ python setup.py test
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 2.6, 2.7, and 3.3, and for PyPy. Check
https://travis-ci.org/anush0247/zappa-file-widget/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ python -m unittest tests.test_zappa_file_widget
| zappa-file-widget | /zappa-file-widget-0.1.5.tar.gz/zappa-file-widget-0.1.5/CONTRIBUTING.rst | CONTRIBUTING.rst |
from django import forms
from django.conf import settings
from django.template import Template, Context
from django.utils.safestring import mark_safe
class URLWidget(forms.widgets.URLInput):
upload_to = ''
def __init__(self, *args, **kwargs):
self.upload_to = kwargs.pop('upload_to', self.upload_to)
super(URLWidget, self).__init__(*args, **kwargs)
def render(self, name, value, attrs=None):
html_template = """
<script src="https://sdk.amazonaws.com/js/aws-sdk-2.1.24.min.js"></script>
<script type="text/javascript">
function {{field_name}}_previewFile() {
AWS.config.update({
accessKeyId : '{{AWS_ACCESS_KEY_ID}}',
secretAccessKey : '{{AWS_SECRET_ACCESS_KEY}}'
});
AWS.config.region = '{{AWS_DEFAULT_REGION}}';
AWS.config.sslEnabled = true;
AWS.config.logger = true;
var bucket = new AWS.S3();
var {{field_name}}file_input = document.getElementById('id_{{field_name}}');
var {{field_name}}file_url = document.getElementById('id_{{field_name}}_url');
var {{field_name}}file_loading = document.getElementById('id_{{field_name}}_loading');
var file = document.getElementById('id_{{field_name}}_tmp').files[0];
var reader = new FileReader();
reader.addEventListener("load", function () {
var params = {
Bucket: '{{AWS_STORAGE_BUCKET_NAME}}',
Key: '{{prefix}}' + file.name,
ContentType: file.type,
Body: file,
Prefix: '{{prefix}}',
ACL: 'public-read'
};
bucket.upload(params, function (err, data) {
// results.innerHTML = err ? 'ERROR!' : 'UPLOADED.';
if (err) {
alert('Failed to Upload file to s3 ' + err);
}
else{
var s3_key_url = "{{ prefix_url }}" + encodeURI(file.name);
{{field_name}}file_url.href = s3_key_url;
{{field_name}}file_url.innerHTML = s3_key_url;
{{field_name}}file_url.style = 'display:block';
{{field_name}}file_loading.style = 'display:none';
{{field_name}}file_input.value = s3_key_url;
}
}).on('httpUploadProgress',function(progress) {
// Log Progress Information
{{field_name}}file_loading.style = 'display:block';
var msg = 'Please Wait, Uploading ' + file.name + ' ('
{{field_name}}file_loading.innerHTML = msg + Math.round(progress.loaded / progress.total * 100) +'% done)';
});
}, false);
if (file) {
reader.readAsDataURL(file);
}
}
</script>
<input type="file" id="id_{{field_name}}_tmp"
onchange="{{field_name}}_previewFile()"/>
<a href="{{field_value}}" id="id_{{field_name}}_url" target="_blank"
style="display:{% if field_value %}block{% else %}none{% endif%}" >{{field_value}}</a><br>
<p id="id_{{field_name}}_loading" style="display:none" ></p>
{{parent_html}}
"""
attrs['type'] = 'hidden'
parent_html = super(URLWidget, self).render(name, value, attrs=attrs)
template = Template(html_template)
s3_host = 's3-ap-southeast-1.amazonaws.com'
default_region = getattr(settings, 'AWS_S3_HOST', s3_host).split(".amazonaws.com")[0].split("s3-")[1]
prefix = settings.MEDIAFILES_LOCATION + "/" + self.upload_to
context = Context({
'field_name': name,
"field_value": value,
'parent_html': parent_html,
'prefix': prefix,
"prefix_url": settings.MEDIA_URL + self.upload_to,
'AWS_ACCESS_KEY_ID': settings.AWS_ACCESS_KEY_ID,
'AWS_SECRET_ACCESS_KEY': settings.AWS_SECRET_ACCESS_KEY,
'AWS_DEFAULT_REGION': default_region,
'AWS_STORAGE_BUCKET_NAME': settings.AWS_STORAGE_BUCKET_NAME
})
html = template.render(context)
return mark_safe(html) | zappa-file-widget | /zappa-file-widget-0.1.5.tar.gz/zappa-file-widget-0.1.5/zappa_file_widget/url_widget.py | url_widget.py |
import os
import subprocess
import json
from datetime import datetime
import click
import typing as t
import click
from click.decorators import command
from click.core import (
Command,
Context,
Group,
Option,
Parameter,
ParameterSource,
HelpFormatter,
)
from gettext import gettext as _
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
def colored_echo(text, color=None, bold=False):
if color is None and not text.startswith(" ") and not text.startswith(" "):
color = "cyan"
bold = True
return click.echo(click.style(text, fg=color, bold=bold))
class ColoredGroup(Group):
subcommand_sections = [
{"name": "package", "ends_with": "pkgs"},
{"name": "layer", "ends_with": "layer"},
{"name": "allinone", "ends_with": "allinone"},
]
def format_options(self, ctx: Context, formatter: HelpFormatter) -> None:
"""Writes all the options into the formatter if they exist."""
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
opts.append(rv)
if opts:
with formatter.section(_(click.style("Options", bold=True))):
formatter.write_dl(opts)
self.format_commands(ctx, formatter)
def format_commands(self, ctx: Context, formatter: HelpFormatter) -> None:
"""Extra format methods for multi methods that adds all the commands
after the options.
"""
commands = []
reversed_commands = reversed(self.list_commands(ctx))
for subcommand in reversed_commands:
cmd = self.get_command(ctx, subcommand)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
if cmd.hidden:
continue
commands.append((subcommand, cmd))
# allow for 3 times the default spacing
if len(commands):
limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands)
rows = []
for subcommand_section in self.subcommand_sections:
name = subcommand_section["name"]
rows.append(("", ""))
rows.append((click.style(f"[{name}]", bold=True, fg="red"), ""))
for subcommand, cmd in commands:
help = cmd.get_short_help_str(limit)
for subcommand_section in self.subcommand_sections:
if subcommand.endswith(subcommand_section["ends_with"]):
# insert next to the section
rows.insert(
rows.index(
(
click.style(
f"[{subcommand_section['name']}]",
bold=True,
fg="red",
),
"",
)
)
+ 1,
(
click.style(" " + subcommand, bold=True),
click.style(help, fg="bright_black"),
),
)
break
if rows:
with formatter.section(
_(click.style("Available subcommands", bold=True))
):
formatter.write_dl(rows)
def colored_group(
name: t.Optional[str] = None, **attrs: t.Any
) -> t.Callable[[F], ColoredGroup]:
"""Creates a new :class:`Group` with a function as callback. This
works otherwise the same as :func:`command` just that the `cls`
parameter is set to :class:`Group`.
"""
attrs.setdefault("cls", ColoredGroup)
return t.cast(ColoredGroup, command(name, **attrs))
def get_django_project_name():
# get only folders in current directory
folders = [f for f in os.listdir(".") if os.path.isdir(f)]
for folder in folders:
if "wsgi.py" in os.listdir(folder):
return folder
return None
@colored_group(
help="Manage layers for zappadock",
)
def cli():
pass
@cli.command(
help="Check packages in venv",
)
@click.argument("venv_type", type=click.Choice(["code", "layer"]))
def checkpkgs(venv_type):
venv_name = f"zappa-{venv_type}-venv"
# get site package path for venv
site_packages_path = os.path.join(
os.getcwd(), venv_name, "lib", "python3.9", "site-packages"
)
colored_echo(f"Checking site packages in {site_packages_path}")
# colored_echo pip freeze from site_packages_path
subprocess.call(f"pip freeze --path {site_packages_path}", shell=True)
@cli.command(
help="Install packages in venv (default: in layer)",
)
@click.argument("venv_type", type=click.Choice(["code", "layer"]), default="layer")
def installpkgs(venv_type):
venv_name = f"zappa-{venv_type}-venv"
# get site package path for venv
site_packages_path = os.path.join(
os.getcwd(), venv_name, "lib", "python3.9", "site-packages"
)
colored_echo(f"Installing packages to {site_packages_path}")
# install requirements.txt to site_packages_path
subprocess.call(
f"pip install -r requirements.txt -t {site_packages_path}", shell=True
)
@cli.command(
help="Make layer.zip from zappa-layer-venv",
)
@click.argument("mode", type=click.Choice(["new", "update"]), default="update")
def makelayer(mode):
venv_type = "layer"
venv_name = f"zappa-{venv_type}-venv"
# get site package path for venv
site_packages_path = os.path.join(
os.getcwd(), venv_name, "lib", "python3.9", "site-packages"
)
# # copy packages into python
parent_of_site_package_path = os.path.dirname(site_packages_path)
colored_echo("Site packages path:")
colored_echo(" " + site_packages_path)
# change site_packages_path name to python
colored_echo("Change site_packages_path name as python...")
try:
subprocess.call(
f"cd /{parent_of_site_package_path} && mv site-packages python", shell=True
)
colored_echo(" Done.")
except:
pass
else:
time_stamp = datetime.now().strftime("%Y%m%d%H%M%S")
colored_echo(f"Creating layer zip with {mode} mode...")
if mode == "new":
# delete old zip
subprocess.call(
f"cd /{parent_of_site_package_path} && rm layer.zip", shell=True
)
subprocess.call(
f'cd /{parent_of_site_package_path} && zip --quiet -r -v -X layer.zip python -x "**/__pycache__/*"',
shell=True,
stdout=subprocess.DEVNULL,
)
elif mode == "update":
subprocess.call(
f'cd /{parent_of_site_package_path} && zip -u --quiet -r -v -X layer.zip python -x "**/__pycache__/*"',
shell=True,
stdout=subprocess.DEVNULL,
)
colored_echo(" Done.")
finally:
colored_echo("Restore site_packages_path name...")
subprocess.call(
f"cd /{parent_of_site_package_path} && mv python site-packages", shell=True
)
colored_echo(" Done.")
# move layer to current directory
colored_echo("Copy layer to current directory...")
subprocess.call(
f"cd /{parent_of_site_package_path} && cp layer.zip /{os.getcwd()}/layer.zip",
shell=True,
)
colored_echo(" Done.")
colored_echo("Layer zip file created at:")
colored_echo(f" {os.getcwd()}/layer.zip")
@cli.command(
help="Publish layer to AWS",
)
@click.option("--profile", "-p", help="AWS profile name, defalult: default")
@click.option("--layer_name", "-l", help="Layer name, default: [project_name]_layer")
@click.option("--region", "-r", default="ap-northeast-2", help="AWS region")
@click.option(
"--project_name", "-n", help="project name, default: [django project name]"
)
def publishlayer(profile, layer_name, region, project_name):
if not project_name:
project_name = get_django_project_name()
if layer_name is None:
layer_name = f"{project_name}_layer"
if not profile:
profile = "default"
aws_credentials_path = os.path.join(os.path.expanduser("~"), ".aws", "credentials")
with open(aws_credentials_path, "r") as f:
if f"[{profile}]" not in f.read():
colored_echo(
f"Profile {profile} not found in {aws_credentials_path}, using default profile"
)
else:
colored_echo(f"Using aws profile:")
colored_echo(f" {profile}")
colored_echo("PublishLayer command:")
colored_echo(
f" aws lambda publish-layer-version\n"
+ f" --zip-file fileb://layer.zip\n"
+ f" --layer-name {layer_name}\n"
+ f" --profile {profile}\n"
+ f" --region {region}\n"
)
colored_echo("Publishing layer...")
layer_output = subprocess.check_output(
[
f"aws lambda publish-layer-version"
+ f" --zip-file fileb://layer.zip"
+ f" --layer-name {layer_name}"
+ f" --profile {profile}"
+ f" --region {region}",
],
shell=True,
)
click.echo(layer_output)
colored_echo(" Done.")
@cli.command(
help="Update zappa_settings.json with layer ARN",
)
@click.option("--profile", "-p", help="AWS profile name, defalult: default")
@click.option("--layer_name", "-l", help="Layer name, default: [project_name]_layer")
@click.option("--region", "-r", default="ap-northeast-2", help="AWS region")
def updatezappalayer(profile, layer_name, region, project_name):
if not project_name:
project_name = get_django_project_name()
if layer_name is None:
layer_name = f"{project_name}_layer"
if not profile:
profile = "default"
aws_credentials_path = os.path.join(os.path.expanduser("~"), ".aws", "credentials")
with open(aws_credentials_path, "r") as f:
if f"[{profile}]" not in f.read():
colored_echo(
f"Profile {profile} not found in {aws_credentials_path}, using default profile"
)
else:
colored_echo(f"Using aws profile:")
colored_echo(f" {profile}")
colored_echo("Latest layer version:")
layer_output = subprocess.check_output(
[
f"aws lambda list-layer-versions"
+ f" --query LayerVersions[0].Version "
+ f" --layer-name {layer_name}"
+ f" --profile {profile}"
+ f" --region {region}",
],
shell=True,
)
colored_echo(f" {int(layer_output)}")
latest_layer_version = int(layer_output)
with open("zappa_settings.json", "r") as f:
zappa_settings = json.load(f)
layers = zappa_settings["dev"]["layers"]
layer = layers[0]
*layer_info, old_layer_version = layer.split(":")
new_layer = ":".join(layer_info) + ":" + str(latest_layer_version)
colored_echo("New layer info:")
colored_echo(f" {new_layer}")
layers[0] = new_layer
zappa_settings["dev"]["layers"] = layers
# save zappa_settings.json
with open("zappa_settings.json", "w") as f:
colored_echo("Save zappa_settings.json")
json.dump(zappa_settings, f, indent=4)
# print zappa_settings.json pretty
click.echo(json.dumps(zappa_settings, indent=4))
colored_echo(" Done.")
colored_echo("Ready to deploy with Zappa", color="green", bold=True)
colored_echo(f' Use {click.style("zappa deploy dev", bold=True)}')
colored_echo(f' or {click.style("zappa update dev", bold=True)}')
@cli.command(
help="Execute all commands with one command",
)
@click.option("--profile", "-p", help="AWS profile name, defalult: [project_name]_dev")
@click.option("--layer_name", "-l", help="Layer name, default: [project_name]_layer")
@click.option("--region", "-r", default="ap-northeast-2", help="AWS region")
@click.option(
"--project_name", "-n", help="project name, default: [django project name]"
)
@click.pass_context
def allinone(context, profile, layer_name, region, project_name):
context.invoke(installpkgs)
context.invoke(makelayer)
context.invoke(
publishlayer,
profile=profile,
layer_name=layer_name,
region=region,
project_name=project_name,
)
context.invoke(
updatezappalayer,
profile=profile,
layer_name=layer_name,
region=region,
project_name=project_name,
)
@cli.command(
help="Execute all commands, except updatelayer",
)
@click.option("--profile", "-p", help="AWS profile name, defalult: default")
@click.option("--layer_name", "-l", help="Layer name, default: [project_name]_layer")
@click.option("--region", "-r", default="ap-northeast-2", help="AWS region")
@click.option(
"--project_name", "-n", help="project name, default: [django project name]"
)
@click.pass_context
def no_update_allinone(context, profile, layer_name, region, project_name):
context.invoke(installpkgs)
context.invoke(makelayer)
context.invoke(
publishlayer,
profile=profile,
layer_name=layer_name,
region=region,
project_name=project_name,
)
if __name__ == "__main__":
cli() | zappa-layer | /zappa_layer-0.1.2-py3-none-any.whl/zappa_layer/layer_manage.py | layer_manage.py |
# Zappa Bot Manage
### How to Create a New Release
```bash
python3 setup.py sdist bdist_wheel
python3 -m pip install --user --upgrade twine
twine upload --skip-existing dist/*
```
### To Do
- [x] Repo Name
- `zappa_manage`
- [x] Figure out how to publish to `edX.org` repo
- Change url in `setup.py`
- [x] Ask Cory about ...
- `zappa_manage/tests/` : Is it necessary? If so, here's the [guide](https://python-packaging.readthedocs.io/en/latest/testing.html). Otherwise, delete the directory.
- No need for tests at the moment.
- `bin/` : Does this have CLI usage? If so, here's the [guide](https://python-packaging.readthedocs.io/en/latest/command-line-scripts.html). Otherwise, delete the repository.
- It does have cli usage since it's used when deploying lambdas envs in GoCD.
| zappa-manage | /zappa-manage-0.0.3.tar.gz/zappa-manage-0.0.3/README.md | README.md |
import boto3
import click
from pybase64 import b64encode
from asym_crypto_yaml import (
load, Encrypted, decrypt_value,
load_private_key_from_file,
load_private_key_from_string
)
def perform_deploy_lambda_envs(config_file_path, private_key_content, private_key_path, kms_key_arn, lambda_name):
"""
Loads private key to deploy the application's secret values to corresponding lambda
:config_file_path = path to config file
:private_key_content = content of private key
:private_key_path = path to the private key
:kms_key_arn = arn for an aws kms_key
:lambda_name = name of an aws lambda function
"""
private_key = None
if private_key_path is not None:
private_key = load_private_key_from_file(private_key_path)
elif private_key_content is not None:
# GoCD will mangle the encrypted key when it is passed in this way
# The following lines unmangle the key.
private_key_content = private_key_content.replace(' ', '\n')
private_key_content = private_key_content.replace('-----BEGIN\nRSA\nPRIVATE\nKEY-----', '-----BEGIN RSA PRIVATE KEY-----')
private_key_content = private_key_content.replace('-----END\nRSA\nPRIVATE\nKEY-----', '-----END RSA PRIVATE KEY-----')
private_key = load_private_key_from_string(private_key_content.encode('utf-8'))
if private_key is None:
raise ValueError('You must specify the private key either by PRIVATE_KEY ENV, or with private-key-path')
push_config_and_secrets_to_lambda_env(config_file_path, private_key, kms_key_arn, lambda_name)
def push_config_and_secrets_to_lambda_env(config_file_path, private_key, kms_key_arn, lambda_name):
"""
Pushes the application's configurations and secret (encrypted) values to
the corresponding lambda function. The application will have to decrypt value
:config_file_path = path to config file
:private_key = private key of application
:kms_key_arn = arn for an aws kms_key
:lambda_name = name of an aws lambda function
"""
with open(config_file_path, "r") as f:
config = load(f)
if (config is None):
config = {}
for key,value in config.items():
if (type(value) == Encrypted):
config[key] = kms_encrypt(kms_key_arn, decrypt_value(value, private_key))
client = boto3.client('lambda')
response = client.update_function_configuration(
FunctionName=lambda_name,
Environment={
'Variables': config
}
)
def kms_encrypt(kms_key_arn, value):
"""
Uses AWS KMS to encrypt the value of an environment variable
:kms_key_arn = arn for an aws kms_key
:value = the value of an environment variable
"""
client = boto3.client('kms')
response = client.encrypt(
KeyId=kms_key_arn,
Plaintext=value,
)
# returns the encrypted 64 bit string
return b64encode(response[u'CiphertextBlob']).decode() | zappa-manage | /zappa-manage-0.0.3.tar.gz/zappa-manage-0.0.3/zappa_manage/manage.py | manage.py |
<p align="center">
<img src="http://i.imgur.com/oePnHJn.jpg" alt="Zappa Rocks!"/>
</p>
## Zappa - Serverless Python
[](https://travis-ci.org/zappa/Zappa)
[](https://coveralls.io/github/zappa/Zappa)
[](https://pypi.python.org/pypi/zappa)
[](https://zappateam.slack.com/)
[](https://gun.io/)
[](https://patreon.com/zappa)
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [About](#about)
- [Installation and Configuration](#installation-and-configuration)
- [Running the Initial Setup / Settings](#running-the-initial-setup--settings)
- [Basic Usage](#basic-usage)
- [Initial Deployments](#initial-deployments)
- [Updates](#updates)
- [Rollback](#rollback)
- [Scheduling](#scheduling)
- [Advanced Scheduling](#advanced-scheduling)
- [Undeploy](#undeploy)
- [Package](#package)
- [How Zappa Makes Packages](#how-zappa-makes-packages)
- [Template](#template)
- [Status](#status)
- [Tailing Logs](#tailing-logs)
- [Remote Function Invocation](#remote-function-invocation)
- [Django Management Commands](#django-management-commands)
- [SSL Certification](#ssl-certification)
- [Deploying to a Domain With AWS Certificate Manager](#deploying-to-a-domain-with-aws-certificate-manager)
- [Deploying to a Domain With a Let's Encrypt Certificate (DNS Auth)](#deploying-to-a-domain-with-a-lets-encrypt-certificate-dns-auth)
- [Deploying to a Domain With a Let's Encrypt Certificate (HTTP Auth)](#deploying-to-a-domain-with-a-lets-encrypt-certificate-http-auth)
- [Deploying to a Domain With Your Own SSL Certs](#deploying-to-a-domain-with-your-own-ssl-certs)
- [Executing in Response to AWS Events](#executing-in-response-to-aws-events)
- [Asynchronous Task Execution](#asynchronous-task-execution)
- [Catching Exceptions](#catching-exceptions)
- [Task Sources](#task-sources)
- [Direct Invocation](#direct-invocation)
- [Remote Invocations](#remote-invocations)
- [Restrictions](#restrictions)
- [Running Tasks in a VPC](#running-tasks-in-a-vpc)
- [Responses](#responses)
- [Advanced Settings](#advanced-settings)
- [YAML Settings](#yaml-settings)
- [Advanced Usage](#advanced-usage)
- [Keeping The Server Warm](#keeping-the-server-warm)
- [Serving Static Files / Binary Uploads](#serving-static-files--binary-uploads)
- [Enabling CORS](#enabling-cors)
- [Large Projects](#large-projects)
- [Enabling Bash Completion](#enabling-bash-completion)
- [Enabling Secure Endpoints on API Gateway](#enabling-secure-endpoints-on-api-gateway)
- [API Key](#api-key)
- [IAM Policy](#iam-policy)
- [API Gateway Lambda Authorizers](#api-gateway-lambda-authorizers)
- [Cognito User Pool Authorizer](#cognito-user-pool-authorizer)
- [API Gateway Resource Policy](#api-gateway-resource-policy)
- [Setting Environment Variables](#setting-environment-variables)
- [Local Environment Variables](#local-environment-variables)
- [Remote AWS Environment Variables](#remote-aws-environment-variables)
- [Remote Environment Variables](#remote-environment-variables)
- [Remote Environment Variables (via an S3 file)](#remote-environment-variables-via-an-s3-file)
- [API Gateway Context Variables](#api-gateway-context-variables)
- [Catching Unhandled Exceptions](#catching-unhandled-exceptions)
- [Using Custom AWS IAM Roles and Policies](#using-custom-aws-iam-roles-and-policies)
- [Custom AWS IAM Roles and Policies for Deployment](#custom-aws-iam-roles-and-policies-for-deployment)
- [Custom AWS IAM Roles and Policies for Execution](#custom-aws-iam-roles-and-policies-for-execution)
- [AWS X-Ray](#aws-x-ray)
- [Globally Available Server-less Architectures](#globally-available-server-less-architectures)
- [Raising AWS Service Limits](#raising-aws-service-limits)
- [Dead Letter Queues](#dead-letter-queues)
- [Unique Package ID](#unique-package-id)
- [Application Load Balancer Event Source](#application-load-balancer-event-source)
- [Endpoint Configuration](#endpoint-configuration)
- [Example Private API Gateway configuration](#example-private-api-gateway-configuration)
- [Zappa Guides](#zappa-guides)
- [Zappa in the Press](#zappa-in-the-press)
- [Sites Using Zappa](#sites-using-zappa)
- [Related Projects](#related-projects)
- [Hacks](#hacks)
- [Contributing](#contributing)
- [Using a Local Repo](#using-a-local-repo)
- [Patrons](#patrons)
- [Support / Development / Training / Consulting](#support--development--training--consulting)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## About
<p align="center">
<a href="https://htmlpreview.github.io/?https://raw.githubusercontent.com/Miserlou/Talks/master/serverless-sf/big.quickstart.html"><img src="http://i.imgur.com/c23kDNT.png?1" alt="Zappa Slides"/></a>
</p>
<p align="center">
<i>In a hurry? Click to see <a href="https://htmlpreview.github.io/?https://raw.githubusercontent.com/Miserlou/Talks/master/serverless-sf/big.quickstart.html">(now slightly out-dated) slides from Serverless SF</a>!</i>
</p>
**Zappa** makes it super easy to build and deploy server-less, event-driven Python applications (including, but not limited to, WSGI web apps) on AWS Lambda + API Gateway. Think of it as "serverless" web hosting for your Python apps. That means **infinite scaling**, **zero downtime**, **zero maintenance** - and at a fraction of the cost of your current deployments!
If you've got a Python web app (including Django and Flask apps), it's as easy as:
```
$ pip install zappa
$ zappa init
$ zappa deploy
```
and now you're server-less! _Wow!_
> What do you mean "serverless"?
Okay, so there still is a server - but it only has a _40 millisecond_ life cycle! Serverless in this case means **"without any permanent infrastructure."**
With a traditional HTTP server, the server is online 24/7, processing requests one by one as they come in. If the queue of incoming requests grows too large, some requests will time out. With Zappa, **each request is given its own virtual HTTP "server"** by Amazon API Gateway. AWS handles the horizontal scaling automatically, so no requests ever time out. Each request then calls your application from a memory cache in AWS Lambda and returns the response via Python's WSGI interface. After your app returns, the "server" dies.
Better still, with Zappa you only pay for the milliseconds of server time that you use, so it's many **orders of magnitude cheaper** than VPS/PaaS hosts like Linode or Heroku - and in most cases, it's completely free. Plus, there's no need to worry about load balancing or keeping servers online ever again.
It's great for deploying serverless microservices with frameworks like Flask and Bottle, and for hosting larger web apps and CMSes with Django. Or, you can use any WSGI-compatible app you like! You **probably don't need to change your existing applications** to use it, and you're not locked into using it.
Zappa also lets you build hybrid event-driven applications that can scale to **trillions of events** a year with **no additional effort** on your part! You also get **free SSL certificates**, **global app deployment**, **API access management**, **automatic security policy generation**, **precompiled C-extensions**, **auto keep-warms**, **oversized Lambda packages**, and **many other exclusive features**!
And finally, Zappa is **super easy to use**. You can deploy your application with a single command out of the box!
__Awesome!__
<p align="center">
<img src="http://i.imgur.com/f1PJxCQ.gif" alt="Zappa Demo Gif"/>
</p>
## Installation and Configuration
_Before you begin, make sure you are running Python 3.6/3.7/3.8 and you have a valid AWS account and your [AWS credentials file](https://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs) is properly installed._
**Zappa** can easily be installed through pip, like so:
$ pip install zappa
Please note that Zappa _**must**_ be installed into your project's [virtual environment](http://docs.python-guide.org/en/latest/dev/virtualenvs/). The virtual environment name should not be the same as the Zappa project name, as this may cause errors.
_(If you use [pyenv](https://github.com/yyuu/pyenv) and love to manage virtualenvs with **pyenv-virtualenv**, you just have to call `pyenv local [your_venv_name]` and it's ready. [Conda](http://conda.pydata.org/docs/) users should comment [here](https://github.com/Miserlou/Zappa/pull/108).)_
Next, you'll need to define your local and server-side settings.
### Running the Initial Setup / Settings
**Zappa** can automatically set up your deployment settings for you with the `init` command:
$ zappa init
This will automatically detect your application type (Flask/Django - Pyramid users [see here](https://github.com/Miserlou/Zappa/issues/278#issuecomment-241917956)) and help you define your deployment configuration settings. Once you finish initialization, you'll have a file named *zappa_settings.json* in your project directory defining your basic deployment settings. It will probably look something like this for most WSGI apps:
```javascript
{
// The name of your stage
"dev": {
// The name of your S3 bucket
"s3_bucket": "lambda",
// The modular python path to your WSGI application function.
// In Flask and Bottle, this is your 'app' object.
// Flask (your_module.py):
// app = Flask()
// Bottle (your_module.py):
// app = bottle.default_app()
"app_function": "your_module.app"
}
}
```
or for Django:
```javascript
{
"dev": { // The name of your stage
"s3_bucket": "lambda", // The name of your S3 bucket
"django_settings": "your_project.settings" // The python path to your Django settings.
}
}
```
_Psst: If you're deploying a Django application with Zappa for the first time, you might want to read Edgar Roman's [Django Zappa Guide](https://edgarroman.github.io/zappa-django-guide/)._
You can define as many stages as your like - we recommend having _dev_, _staging_, and _production_.
Now, you're ready to deploy!
## Basic Usage
### Initial Deployments
Once your settings are configured, you can package and deploy your application to a stage called "production" with a single command:
$ zappa deploy production
Deploying..
Your application is now live at: https://7k6anj0k99.execute-api.us-east-1.amazonaws.com/production
And now your app is **live!** How cool is that?!
To explain what's going on, when you call `deploy`, Zappa will automatically package up your application and local virtual environment into a Lambda-compatible archive, replace any dependencies with versions with wheels compatible with lambda, set up the function handler and necessary WSGI Middleware, upload the archive to S3, create and manage the necessary Amazon IAM policies and roles, register it as a new Lambda function, create a new API Gateway resource, create WSGI-compatible routes for it, link it to the new Lambda function, and finally delete the archive from your S3 bucket. Handy!
Be aware that the default IAM role and policy created for executing Lambda applies a liberal set of permissions.
These are most likely not appropriate for production deployment of important applications. See the section
[Custom AWS IAM Roles and Policies for Execution](#custom-aws-iam-roles-and-policies-for-execution) for more detail.
### Updates
If your application has already been deployed and you only need to upload new Python code, but not touch the underlying routes, you can simply:
$ zappa update production
Updating..
Your application is now live at: https://7k6anj0k99.execute-api.us-east-1.amazonaws.com/production
This creates a new archive, uploads it to S3 and updates the Lambda function to use the new code, but doesn't touch the API Gateway routes.
### Rollback
You can also `rollback` the deployed code to a previous version by supplying the number of revisions to return to. For instance, to rollback to the version deployed 3 versions ago:
$ zappa rollback production -n 3
### Scheduling
Zappa can be used to easily schedule functions to occur on regular intervals. This provides a much nicer, maintenance-free alternative to Celery!
These functions will be packaged and deployed along with your `app_function` and called from the handler automatically.
Just list your functions and the expression to schedule them using [cron or rate syntax](http://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html) in your *zappa_settings.json* file:
```javascript
{
"production": {
...
"events": [{
"function": "your_module.your_function", // The function to execute
"expression": "rate(1 minute)" // When to execute it (in cron or rate format)
}],
...
}
}
```
And then:
$ zappa schedule production
And now your function will execute every minute!
If you want to cancel these, you can simply use the `unschedule` command:
$ zappa unschedule production
And now your scheduled event rules are deleted.
See the [example](example/) for more details.
#### Advanced Scheduling
Sometimes a function needs multiple expressions to describe its schedule. To set multiple expressions, simply list your functions, and the list of expressions to schedule them using [cron or rate syntax](http://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html) in your *zappa_settings.json* file:
```javascript
{
"production": {
...
"events": [{
"function": "your_module.your_function", // The function to execute
"expressions": ["cron(0 20-23 ? * SUN-THU *)", "cron(0 0-8 ? * MON-FRI *)"] // When to execute it (in cron or rate format)
}],
...
}
}
```
This can be used to deal with issues arising from the UTC timezone crossing midnight during business hours in your local timezone.
It should be noted that overlapping expressions will not throw a warning, and should be checked for, to prevent duplicate triggering of functions.
### Undeploy
If you need to remove the API Gateway and Lambda function that you have previously published, you can simply:
$ zappa undeploy production
You will be asked for confirmation before it executes.
If you enabled CloudWatch Logs for your API Gateway service and you don't
want to keep those logs, you can specify the `--remove-logs` argument to purge the logs for your API Gateway and your Lambda function:
$ zappa undeploy production --remove-logs
### Package
If you want to build your application package without actually uploading and registering it as a Lambda function, you can use the `package` command:
$ zappa package production
If you have a `zip` callback in your `callbacks` setting, this will also be invoked.
```javascript
{
"production": { // The name of your stage
"callbacks": {
"zip": "my_app.zip_callback"// After creating the package
}
}
}
```
You can also specify the output filename of the package with `-o`:
$ zappa package production -o my_awesome_package.zip
#### How Zappa Makes Packages
Zappa will automatically package your active virtual environment into a package which runs smoothly on AWS Lambda.
During this process, it will replace any local dependencies with AWS Lambda compatible versions. Dependencies are included in this order:
* Lambda-compatible `manylinux` wheels from a local cache
* Lambda-compatible `manylinux` wheels from PyPI
* Packages from the active virtual environment
* Packages from the local project directory
It also skips certain unnecessary files, and ignores any .py files if .pyc files are available.
In addition, Zappa will also automatically set the correct execution permissions, configure package settings, and create a unique, auditable package manifest file.
To further reduce the final package file size, you can:
* Set `slim_handler` to `True` to upload a small handler to Lambda and the rest of the package to S3. For more details, see the [merged pull request](https://github.com/Miserlou/Zappa/pull/548) and the [discussion in the original issue](https://github.com/Miserlou/Zappa/issues/510). See also: [Large Projects](#large-projects).
* Use the `exclude` setting and provide a list of regex patterns to exclude from the archive. By default, Zappa will exclude Boto, because [it's already available in the Lambda execution environment](http://docs.aws.amazon.com/lambda/latest/dg/current-supported-versions.html).
### Template
Similarly to `package`, if you only want the API Gateway CloudFormation template, use the `template` command:
$ zappa template production --l your-lambda-arn -r your-role-arn
Note that you must supply your own Lambda ARN and Role ARNs in this case, as they may not have been created for you.
You can get the JSON output directly with `--json`, and specify the output file with `--output`.
### Status
If you need to see the status of your deployment and event schedules, simply use the `status` command.
$ zappa status production
### Tailing Logs
You can watch the logs of a deployment by calling the `tail` management command.
$ zappa tail production
By default, this will show all log items. In addition to HTTP and other events, anything `print`ed to `stdout` or `stderr` will be shown in the logs.
You can use the argument `--http` to filter for HTTP requests, which will be in the Apache Common Log Format.
$ zappa tail production --http
Similarly, you can do the inverse and only show non-HTTP events and log messages:
$ zappa tail production --non-http
If you don't like the default log colors, you can turn them off with `--no-color`.
You can also limit the length of the tail with `--since`, which accepts a simple duration string:
$ zappa tail production --since 4h # 4 hours
$ zappa tail production --since 1m # 1 minute
$ zappa tail production --since 1mm # 1 month
You can filter out the contents of the logs with `--filter`, like so:
$ zappa tail production --http --filter "POST" # Only show POST HTTP requests
Note that this uses the [CloudWatch Logs filter syntax](http://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html).
To tail logs without following (to exit immediately after displaying the end of the requested logs), pass `--disable-keep-open`:
$ zappa tail production --since 1h --disable-keep-open
### Remote Function Invocation
You can execute any function in your application directly at any time by using the `invoke` command.
For instance, suppose you have a basic application in a file called "my_app.py", and you want to invoke a function in it called "my_function". Once your application is deployed, you can invoke that function at any time by calling:
$ zappa invoke production 'my_app.my_function'
Any remote print statements made and the value the function returned will then be printed to your local console. **Nifty!**
You can also invoke interpretable Python 3.6/3.7/3.8 strings directly by using `--raw`, like so:
$ zappa invoke production "print(1 + 2 + 3)" --raw
For instance, it can come in handy if you want to create your first `superuser` on a RDS database running in a VPC (like Serverless Aurora):
$ zappa invoke staging "from django.contrib.auth import get_user_model; User = get_user_model(); User.objects.create_superuser('username', 'email', 'password')" --raw
### Django Management Commands
As a convenience, Zappa can also invoke remote Django 'manage.py' commands with the `manage` command. For instance, to perform the basic Django status check:
$ zappa manage production showmigrations admin
Obviously, this only works for Django projects which have their settings properly defined.
For commands which have their own arguments, you can also pass the command in as a string, like so:
$ zappa manage production "shell --version"
Commands which require direct user input, such as `createsuperuser`, should be [replaced by commands](http://stackoverflow.com/a/26091252) which use `zappa invoke <env> --raw`.
For more Django integration, take a look at the [zappa-django-utils](https://github.com/Miserlou/zappa-django-utils) project.
_(Please note that commands which take over 30 seconds to execute may time-out preventing output from being returned - but the command may continue to run. See [this related issue](https://github.com/Miserlou/Zappa/issues/205#issuecomment-236391248) for a work-around.)_
### SSL Certification
Zappa can be deployed to custom domain names and subdomains with custom SSL certificates, Let's Encrypt certificates, and [AWS Certificate Manager](https://aws.amazon.com/certificate-manager/) (ACM) certificates.
Currently, the easiest of these to use are the AWS Certificate Manager certificates, as they are free, self-renewing, and require the least amount of work.
Once configured as described below, all of these methods use the same command:
$ zappa certify
When deploying from a CI/CD system, you can use:
$ zappa certify --yes
to skip the confirmation prompt.
#### Deploying to a Domain With AWS Certificate Manager
Amazon provides their own free alternative to Let's Encrypt called [AWS Certificate Manager](https://aws.amazon.com/certificate-manager/) (ACM). To use this service with Zappa:
1. Verify your domain in the AWS Certificate Manager console.
2. In the console, select the N. Virginia (us-east-1) region and request a certificate for your domain or subdomain (`sub.yourdomain.tld`), or request a wildcard domain (`*.yourdomain.tld`).
3. Copy the entire ARN of that certificate and place it in the Zappa setting `certificate_arn`.
4. Set your desired domain in the `domain` setting.
5. Call `$ zappa certify` to create and associate the API Gateway distribution using that certificate.
#### Deploying to a Domain With a Let's Encrypt Certificate (DNS Auth)
If you want to use Zappa on a domain with a free Let's Encrypt certificate using automatic Route 53 based DNS Authentication, you can follow [this handy guide](https://github.com/zappa/Zappa/blob/master/docs/domain_with_free_ssl_dns.md).
#### Deploying to a Domain With a Let's Encrypt Certificate (HTTP Auth)
If you want to use Zappa on a domain with a free Let's Encrypt certificate using HTTP Authentication, you can follow [this guide](https://github.com/zappa/Zappa/blob/master/docs/domain_with_free_ssl_http.md).
However, it's now far easier to use Route 53-based DNS authentication, which will allow you to use a Let's Encrypt certificate with a single `$ zappa certify` command.
#### Deploying to a Domain With Your Own SSL Certs
1. The first step is to create a custom domain and obtain your SSL cert / key / bundle.
2. Ensure you have set the `domain` setting within your Zappa settings JSON - this will avoid problems with the Base Path mapping between the Custom Domain and the API invoke URL, which gets the Stage Name appended in the URI
3. Add the paths to your SSL cert / key / bundle to the `certificate`, `certificate_key`, and `certificate_chain` settings, respectively, in your Zappa settings JSON
4. Set `route53_enabled` to `false` if you plan on using your own DNS provider, and not an AWS Route53 Hosted zone.
5. Deploy or update your app using Zappa
6. Run `$ zappa certify` to upload your certificates and register the custom domain name with your API gateway.
## Executing in Response to AWS Events
Similarly, you can have your functions execute in response to events that happen in the AWS ecosystem, such as S3 uploads, DynamoDB entries, Kinesis streams, SNS messages, and SQS queues.
In your *zappa_settings.json* file, define your [event sources](http://docs.aws.amazon.com/lambda/latest/dg/invoking-lambda-function.html) and the function you wish to execute. For instance, this will execute `your_module.process_upload_function` in response to new objects in your `my-bucket` S3 bucket. Note that `process_upload_function` must accept `event` and `context` parameters.
```javascript
{
"production": {
...
"events": [{
"function": "your_module.process_upload_function",
"event_source": {
"arn": "arn:aws:s3:::my-bucket",
"events": [
"s3:ObjectCreated:*" // Supported event types: http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#supported-notification-event-types
]
}
}],
...
}
}
```
And then:
$ zappa schedule production
And now your function will execute every time a new upload appears in your bucket!
To access the key's information in your application context, you'll want `process_upload_function` to look something like this:
```python
import boto3
s3_client = boto3.client('s3')
def process_upload_function(event, context):
"""
Process a file upload.
"""
# Get the uploaded file's information
bucket = event['Records'][0]['s3']['bucket']['name'] # Will be `my-bucket`
key = event['Records'][0]['s3']['object']['key'] # Will be the file path of whatever file was uploaded.
# Get the bytes from S3
s3_client.download_file(bucket, key, '/tmp/' + key) # Download this file to writable tmp space.
file_bytes = open('/tmp/' + key).read()
```
Similarly, for a [Simple Notification Service](https://aws.amazon.com/sns/) event:
```javascript
"events": [
{
"function": "your_module.your_function",
"event_source": {
"arn": "arn:aws:sns:::your-event-topic-arn",
"events": [
"sns:Publish"
]
}
}
]
```
Optionally you can add [SNS message filters](http://docs.aws.amazon.com/sns/latest/dg/message-filtering.html):
```javascript
"events": [
{
"function": "your_module.your_function",
"event_source": {
"arn": "arn:aws:sns:::your-event-topic-arn",
"filters": {
"interests": ["python", "aws", "zappa"],
"version": ["1.0"]
},
...
}
}
]
```
[DynamoDB](http://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html) and [Kinesis](http://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html) are slightly different as it is not event based but pulling from a stream:
```javascript
"events": [
{
"function": "replication.replicate_records",
"event_source": {
"arn": "arn:aws:dynamodb:us-east-1:1234554:table/YourTable/stream/2016-05-11T00:00:00.000",
"starting_position": "TRIM_HORIZON", // Supported values: TRIM_HORIZON, LATEST
"batch_size": 50, // Max: 1000
"enabled": true // Default is false
}
}
]
```
[SQS](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html) is also pulling messages from a stream. At this time, [only "Standard" queues can trigger lambda events, not "FIFO" queues](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html). Read the AWS Documentation carefully since Lambda calls the SQS DeleteMessage API on your behalf once your function completes successfully.
```javascript
"events": [
{
"function": "your_module.process_messages",
"event_source": {
"arn": "arn:aws:sqs:us-east-1:12341234:your-queue-name-arn",
"batch_size": 10, // Max: 10. Use 1 to trigger immediate processing
"enabled": true // Default is false
}
}
]
```
For configuring Lex Bot's intent triggered events:
```javascript
"bot_events": [
{
"function": "lexbot.handlers.book_appointment.handler",
"event_source": {
"arn": "arn:aws:lex:us-east-1:01234123123:intent:TestLexEventNames:$LATEST", // optional. In future it will be used to configure the intent
"intent":"intentName", // name of the bot event configured
"invocation_source":"DialogCodeHook", // either FulfillmentCodeHook or DialogCodeHook
}
}
]
```
Events can also take keyword arguments:
```javascript
"events": [
{
"function": "your_module.your_recurring_function", // The function to execute
"kwargs": {"key": "val", "key2": "val2"}, // Keyword arguments to pass. These are available in the event
"expression": "rate(1 minute)" // When to execute it (in cron or rate format)
}
]
```
To get the keyword arguments you will need to look inside the event dictionary:
```python
def your_recurring_function(event, context):
my_kwargs = event.get("kwargs") # dict of kwargs given in zappa_settings file
```
You can find more [example event sources here](http://docs.aws.amazon.com/lambda/latest/dg/eventsources.html).
## Asynchronous Task Execution
Zappa also now offers the ability to seamlessly execute functions asynchronously in a completely separate AWS Lambda instance!
For example, if you have a Flask API for ordering a pie, you can call your `bake` function seamlessly in a completely separate Lambda instance by using the `zappa.asynchronous.task` decorator like so:
```python
from flask import Flask
from zappa.asynchronous import task
app = Flask(__name__)
@task
def make_pie():
""" This takes a long time! """
ingredients = get_ingredients()
pie = bake(ingredients)
deliver(pie)
@app.route('/api/order/pie')
def order_pie():
""" This returns immediately! """
make_pie()
return "Your pie is being made!"
```
And that's it! Your API response will return immediately, while the `make_pie` function executes in a completely different Lambda instance.
When calls to @task decorated functions or the zappa.asynchronous.run command occur outside of Lambda, such as your local dev environment,
the functions will execute immediately and locally. The zappa asynchronous functionality only works
when in the Lambda environment or when specifying [Remote Invocations](https://github.com/zappa/zappa#remote-invocations).
### Catching Exceptions
Putting a try..except block on an asynchronous task like this:
```python
@task
def make_pie():
try:
ingredients = get_ingredients()
pie = bake(ingredients)
deliver(pie)
except Fault as error:
"""send an email"""
...
return Response('Web services down', status=503)
```
will cause an email to be sent twice for the same error. See [asynchronous retries at AWS](https://docs.aws.amazon.com/lambda/latest/dg/retries-on-errors.html). To work around this side-effect, and have the fault handler execute only once, change the return value to:
```python
@task
def make_pie():
try:
"""code block"""
except Fault as error:
"""send an email"""
...
return {} #or return True
```
### Task Sources
By default, this feature uses direct AWS Lambda invocation. You can instead use AWS Simple Notification Service as the task event source by using the `task_sns` decorator, like so:
```python
from zappa.asynchronous import task_sns
@task_sns
```
Using SNS also requires setting the following settings in your `zappa_settings`:
```javascript
{
"dev": {
..
"async_source": "sns", // Source of async tasks. Defaults to "lambda"
"async_resources": true, // Create the SNS topic to use. Defaults to true.
..
}
}
```
This will automatically create and subscribe to the SNS topic the code will use when you call the `zappa schedule` command.
Using SNS will also return a message ID in case you need to track your invocations.
### Direct Invocation
You can also use this functionality without a decorator by passing your function to `zappa.asynchronous.run`, like so:
```python
from zappa.asynchronous import run
run(your_function, args, kwargs) # Using Lambda
run(your_function, args, kwargs, service='sns') # Using SNS
```
### Remote Invocations
By default, Zappa will use lambda's current function name and current AWS region. If you wish to invoke a lambda with
a different function name/region or invoke your lambda from outside of lambda, you must specify the
`remote_aws_lambda_function_name` and `remote_aws_region` arguments so that the application knows which function and
region to use. For example, if some part of our pizza making application had to live on an EC2 instance, but we
wished to call the make_pie() function on its own Lambda instance, we would do it as follows:
```python
@task(remote_aws_lambda_function_name='pizza-pie-prod', remote_aws_region='us-east-1')
def make_pie():
""" This takes a long time! """
ingredients = get_ingredients()
pie = bake(ingredients)
deliver(pie)
```
If those task() parameters were not used, then EC2 would execute the function locally. These same
`remote_aws_lambda_function_name` and `remote_aws_region` arguments can be used on the zappa.asynchronous.run() function as well.
### Restrictions
The following restrictions to this feature apply:
* Functions must have a clean import path -- i.e. no closures, lambdas, or methods.
* `args` and `kwargs` must be JSON-serializable.
* The JSON-serialized arguments must be within the size limits for Lambda (256K) or SNS (256K) events.
All of this code is still backwards-compatible with non-Lambda environments - it simply executes in a blocking fashion and returns the result.
### Running Tasks in a VPC
If you're running Zappa in a Virtual Private Cloud (VPC), you'll need to configure your subnets to allow your lambda to communicate with services inside your VPC as well as the public Internet. A minimal setup requires two subnets.
In __subnet-a__:
* Create a NAT
* Create an Internet gateway
* In the route table, create a route pointing the Internet gateway to 0.0.0.0/0.
In __subnet-b__:
* Place your lambda function
* In the route table, create a route pointing the NAT that belongs to __subnet-a__ to 0.0.0.0/0.
You can place your lambda in multiple subnets that are configured the same way as __subnet-b__ for high availability.
Some helpful resources are [this tutorial](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Tutorials.WebServerDB.CreateVPC.html), [this other tutorial](https://gist.github.com/reggi/dc5f2620b7b4f515e68e46255ac042a7) and [this AWS doc page](http://docs.aws.amazon.com/lambda/latest/dg/vpc.html#vpc-internet).
### Responses
It is possible to capture the responses of Asynchronous tasks.
Zappa uses DynamoDB as the backend for these.
To capture responses, you must configure a `async_response_table` in `zappa_settings`. This is the DynamoDB table name. Then, when decorating with `@task`, pass `capture_response=True`.
Async responses are assigned a `response_id`. This is returned as a property of the `LambdaAsyncResponse` (or `SnsAsyncResponse`) object that is returned by the `@task` decorator.
Example:
```python
from zappa.asynchronous import task, get_async_response
from flask import Flask, make_response, abort, url_for, redirect, request, jsonify
from time import sleep
app = Flask(__name__)
@app.route('/payload')
def payload():
delay = request.args.get('delay', 60)
x = longrunner(delay)
return redirect(url_for('response', response_id=x.response_id))
@app.route('/async-response/<response_id>')
def response(response_id):
response = get_async_response(response_id)
if response is None:
abort(404)
if response['status'] == 'complete':
return jsonify(response['response'])
sleep(5)
return "Not yet ready. Redirecting.", 302, {
'Content-Type': 'text/plain; charset=utf-8',
'Location': url_for('response', response_id=response_id, backoff=5),
'X-redirect-reason': "Not yet ready.",
}
@task(capture_response=True)
def longrunner(delay):
sleep(float(delay))
return {'MESSAGE': "It took {} seconds to generate this.".format(delay)}
```
## Advanced Settings
There are other settings that you can define in your local settings
to change Zappa's behavior. Use these at your own risk!
```javascript
{
"dev": {
"alb_enabled": false, // enable provisioning of application load balancing resources. If set to true, you _must_ fill out the alb_vpc_config option as well.
"alb_vpc_config": {
"CertificateArn": "your_acm_certificate_arn", // ACM certificate ARN for ALB
"SubnetIds": [], // list of subnets for ALB
"SecurityGroupIds": [] // list of security groups for ALB
},
"api_key_required": false, // enable securing API Gateway endpoints with x-api-key header (default False)
"api_key": "your_api_key_id", // optional, use an existing API key. The option "api_key_required" must be true to apply
"apigateway_enabled": true, // Set to false if you don't want to create an API Gateway resource. Default true.
"apigateway_description": "My funky application!", // Define a custom description for the API Gateway console. Default None.
"assume_policy": "my_assume_policy.json", // optional, IAM assume policy JSON file
"attach_policy": "my_attach_policy.json", // optional, IAM attach policy JSON file
"apigateway_policy": "my_apigateway_policy.json", // optional, API Gateway resource policy JSON file
"async_source": "sns", // Source of async tasks. Defaults to "lambda"
"async_resources": true, // Create the SNS topic and DynamoDB table to use. Defaults to true.
"async_response_table": "your_dynamodb_table_name", // the DynamoDB table name to use for captured async responses; defaults to None (can't capture)
"async_response_table_read_capacity": 1, // DynamoDB table read capacity; defaults to 1
"async_response_table_write_capacity": 1, // DynamoDB table write capacity; defaults to 1
"aws_endpoint_urls": { "aws_service_name": "endpoint_url" }, // a dictionary of endpoint_urls that emulate the appropriate service. Usually used for testing, for instance with `localstack`.
"aws_environment_variables" : {"your_key": "your_value"}, // A dictionary of environment variables that will be available to your deployed app via AWS Lambdas native environment variables. See also "environment_variables" and "remote_env" . Default {}.
"aws_kms_key_arn": "your_aws_kms_key_arn", // Your AWS KMS Key ARN
"aws_region": "aws-region-name", // optional, uses region set in profile or environment variables if not set here,
"binary_support": true, // Enable automatic MIME-type based response encoding through API Gateway. Default true.
"callbacks": { // Call custom functions during the local Zappa deployment/update process
"settings": "my_app.settings_callback", // After loading the settings
"zip": "my_app.zip_callback", // After creating the package
"post": "my_app.post_callback", // After command has executed
},
"cache_cluster_enabled": false, // Use APIGW cache cluster (default False)
"cache_cluster_size": 0.5, // APIGW Cache Cluster size (default 0.5)
"cache_cluster_ttl": 300, // APIGW Cache Cluster time-to-live (default 300)
"cache_cluster_encrypted": false, // Whether or not APIGW Cache Cluster encrypts data (default False)
"certificate": "my_cert.crt", // SSL certificate file location. Used to manually certify a custom domain
"certificate_key": "my_key.key", // SSL key file location. Used to manually certify a custom domain
"certificate_chain": "my_cert_chain.pem", // SSL certificate chain file location. Used to manually certify a custom domain
"certificate_arn": "arn:aws:acm:us-east-1:1234512345:certificate/aaaa-bbb-cccc-dddd", // ACM certificate ARN (needs to be in us-east-1 region).
"cloudwatch_log_level": "OFF", // Enables/configures a level of logging for the given staging. Available options: "OFF", "INFO", "ERROR", default "OFF".
"cloudwatch_data_trace": false, // Logs all data about received events. Default false.
"cloudwatch_metrics_enabled": false, // Additional metrics for the API Gateway. Default false.
"cognito": { // for Cognito event triggers
"user_pool": "user-pool-id", // User pool ID from AWS Cognito
"triggers": [{
"source": "PreSignUp_SignUp", // triggerSource from http://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html#cognito-user-pools-lambda-trigger-syntax-pre-signup
"function": "my_app.pre_signup_function"
}]
},
"context_header_mappings": { "HTTP_header_name": "API_Gateway_context_variable" }, // A dictionary mapping HTTP header names to API Gateway context variables
"cors": false, // Enable Cross-Origin Resource Sharing. Default false. If true, simulates the "Enable CORS" button on the API Gateway console. Can also be a dictionary specifying lists of "allowed_headers", "allowed_methods", and string of "allowed_origin"
"dead_letter_arn": "arn:aws:<sns/sqs>:::my-topic/queue", // Optional Dead Letter configuration for when Lambda async invoke fails thrice
"debug": true, // Print Zappa configuration errors tracebacks in the 500. Default true.
"delete_local_zip": true, // Delete the local zip archive after code updates. Default true.
"delete_s3_zip": true, // Delete the s3 zip archive. Default true.
"django_settings": "your_project.production_settings", // The modular path to your Django project's settings. For Django projects only.
"domain": "yourapp.yourdomain.com", // Required if you're using a domain
"base_path": "your-base-path", // Optional base path for API gateway custom domain base path mapping. Default None. Not supported for use with Application Load Balancer event sources.
"environment_variables": {"your_key": "your_value"}, // A dictionary of environment variables that will be available to your deployed app. See also "remote_env" and "aws_environment_variables". Default {}.
"events": [
{ // Recurring events
"function": "your_module.your_recurring_function", // The function to execute
"expression": "rate(1 minute)" // When to execute it (in cron or rate format)
},
{ // AWS Reactive events
"function": "your_module.your_reactive_function", // The function to execute
"event_source": {
"arn": "arn:aws:s3:::my-bucket", // The ARN of this event source
"events": [
"s3:ObjectCreated:*" // The specific event to execute in response to.
]
}
}
],
"endpoint_configuration": ["EDGE", "REGIONAL", "PRIVATE"], // Specify APIGateway endpoint None (default) or list `EDGE`, `REGION`, `PRIVATE`
"exception_handler": "your_module.report_exception", // function that will be invoked in case Zappa sees an unhandled exception raised from your code
"exclude": ["*.gz", "*.rar"], // A list of regex patterns to exclude from the archive. To exclude boto3 and botocore (available in an older version on Lambda), add "boto3*" and "botocore*".
"extends": "stage_name", // Duplicate and extend another stage's settings. For example, `dev-asia` could extend from `dev-common` with a different `s3_bucket` value.
"extra_permissions": [{ // Attach any extra permissions to this policy. Default None
"Effect": "Allow",
"Action": ["rekognition:*"], // AWS Service ARN
"Resource": "*"
}],
"iam_authorization": false, // optional, use IAM to require request signing. Default false. Note that enabling this will override the authorizer configuration.
"include": ["your_special_library_to_load_at_handler_init"], // load special libraries into PYTHONPATH at handler init that certain modules cannot find on path
"authorizer": {
"function": "your_module.your_auth_function", // Local function to run for token validation. For more information about the function see below.
"arn": "arn:aws:lambda:<region>:<account_id>:function:<function_name>", // Existing Lambda function to run for token validation.
"result_ttl": 300, // Optional. Default 300. The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches authorizer results. Currently, the maximum TTL value is 3600 seconds.
"token_header": "Authorization", // Optional. Default 'Authorization'. The name of a custom authorization header containing the token that clients submit as part of their requests.
"validation_expression": "^Bearer \\w+$", // Optional. A validation expression for the incoming token, specify a regular expression.
},
"keep_warm": true, // Create CloudWatch events to keep the server warm. Default true. To remove, set to false and then `unschedule`.
"keep_warm_expression": "rate(4 minutes)", // How often to execute the keep-warm, in cron and rate format. Default 4 minutes.
"lambda_description": "Your Description", // However you want to describe your project for the AWS console. Default "Zappa Deployment".
"lambda_handler": "your_custom_handler", // The name of Lambda handler. Default: handler.lambda_handler
"layers": ["arn:aws:lambda:<region>:<account_id>:layer:<layer_name>:<layer_version>"], // optional lambda layers
"lambda_concurrency": 10, // Sets the maximum number of simultaneous executions for a function, and reserves capacity for that concurrency level. Default is None.
"lets_encrypt_key": "s3://your-bucket/account.key", // Let's Encrypt account key path. Can either be an S3 path or a local file path.
"log_level": "DEBUG", // Set the Zappa log level. Can be one of CRITICAL, ERROR, WARNING, INFO and DEBUG. Default: DEBUG
"manage_roles": true, // Have Zappa automatically create and define IAM execution roles and policies. Default true. If false, you must define your own IAM Role and role_name setting.
"memory_size": 512, // Lambda function memory in MB. Default 512.
"num_retained_versions":null, // Indicates the number of old versions to retain for the lambda. If absent, keeps all the versions of the function.
"payload_compression": true, // Whether or not to enable API gateway payload compression (default: true)
"payload_minimum_compression_size": 0, // The threshold size (in bytes) below which payload compression will not be applied (default: 0)
"prebuild_script": "your_module.your_function", // Function to execute before uploading code
"profile_name": "your-profile-name", // AWS profile credentials to use. Default 'default'. Removing this setting will use the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables instead.
"project_name": "MyProject", // The name of the project as it appears on AWS. Defaults to a slugified `pwd`.
"remote_env": "s3://my-project-config-files/filename.json", // optional file in s3 bucket containing a flat json object which will be used to set custom environment variables.
"role_name": "MyLambdaRole", // Name of Zappa execution role. Default <project_name>-<env>-ZappaExecutionRole. To use a different, pre-existing policy, you must also set manage_roles to false.
"role_arn": "arn:aws:iam::12345:role/app-ZappaLambdaExecutionRole", // ARN of Zappa execution role. Default to None. To use a different, pre-existing policy, you must also set manage_roles to false. This overrides role_name. Use with temporary credentials via GetFederationToken.
"route53_enabled": true, // Have Zappa update your Route53 Hosted Zones when certifying with a custom domain. Default true.
"runtime": "python3.6", // Python runtime to use on Lambda. Can be one of "python3.6", "python3.7" or "python3.8". Defaults to whatever the current Python being used is.
"s3_bucket": "dev-bucket", // Zappa zip bucket,
"slim_handler": false, // Useful if project >50M. Set true to just upload a small handler to Lambda and load actual project from S3 at runtime. Default false.
"settings_file": "~/Projects/MyApp/settings/dev_settings.py", // Server side settings file location,
"tags": { // Attach additional tags to AWS Resources
"Key": "Value", // Example Key and value
"Key2": "Value2",
},
"timeout_seconds": 30, // Maximum lifespan for the Lambda function (default 30, max 900.)
"touch": true, // GET the production URL upon initial deployment (default True)
"touch_path": "/", // The endpoint path to GET when checking the initial deployment (default "/")
"use_precompiled_packages": true, // If possible, use C-extension packages which have been pre-compiled for AWS Lambda. Default true.
"vpc_config": { // Optional Virtual Private Cloud (VPC) configuration for Lambda function
"SubnetIds": [ "subnet-12345678" ], // Note: not all availability zones support Lambda!
"SecurityGroupIds": [ "sg-12345678" ]
},
"xray_tracing": false // Optional, enable AWS X-Ray tracing on your lambda function.
}
}
```
#### YAML Settings
If you prefer YAML over JSON, you can also use a `zappa_settings.yml`, like so:
```yaml
---
dev:
app_function: your_module.your_app
s3_bucket: your-code-bucket
events:
- function: your_module.your_function
event_source:
arn: arn:aws:s3:::your-event-bucket
events:
- s3:ObjectCreated:*
```
You can also supply a custom settings file at any time with the `-s` argument, ex:
```
$ zappa deploy dev -s my-custom-settings.yml
```
Similarly, you can supply a `zappa_settings.toml` file:
```toml
[dev]
app_function = "your_module.your_app"
s3_bucket = "your-code-bucket"
```
## Advanced Usage
### Keeping The Server Warm
Zappa will automatically set up a regularly occurring execution of your application in order to keep the Lambda function warm. This can be disabled via the `keep_warm` setting.
#### Serving Static Files / Binary Uploads
Zappa is now able to serve and receive binary files, as detected by their MIME-type.
However, generally Zappa is designed for running your application code, not for serving static web assets. If you plan on serving custom static assets in your web application (CSS/JavaScript/images/etc.,), you'll likely want to use a combination of AWS S3 and AWS CloudFront.
Your web application framework will likely be able to handle this for you automatically. For Flask, there is [Flask-S3](https://github.com/e-dard/flask-s3), and for Django, there is [Django-Storages](https://django-storages.readthedocs.io/en/latest/).
Similarly, you may want to design your application so that static binary uploads go [directly to S3](http://docs.aws.amazon.com/AWSJavaScriptSDK/guide/browser-examples.html#Uploading_a_local_file_using_the_File_API), which then triggers an event response defined in your `events` setting! That's thinking serverlessly!
### Enabling CORS
The simplest way to enable CORS (Cross-Origin Resource Sharing) for your Zappa application is to set `cors` to `true` in your Zappa settings file and update, which is the equivalent of pushing the "Enable CORS" button in the AWS API Gateway console. This is disabled by default, but you may wish to enable it for APIs which are accessed from other domains, etc.
You can also simply handle CORS directly in your application. Your web framework will probably have an extension to do this, such as [django-cors-headers](https://github.com/ottoyiu/django-cors-headers) or [Flask-CORS](https://github.com/corydolphin/flask-cors). Using these will make your code more portable.
### Large Projects
AWS currently limits Lambda zip sizes to 50 megabytes. If your project is larger than that, set `slim_handler: true` in your `zappa_settings.json`. In this case, your fat application package will be replaced with a small handler-only package. The handler file then pulls the rest of the large project down from S3 at run time! The initial load of the large project may add to startup overhead, but the difference should be minimal on a warm lambda function. Note that this will also eat into the storage space of your application function. Note that AWS currently [limits](https://docs.aws.amazon.com/lambda/latest/dg/limits.html) the `/tmp` directory storage to 512 MB, so your project must still be smaller than that.
### Enabling Bash Completion
Bash completion can be enabled by adding the following to your .bashrc:
```bash
eval "$(register-python-argcomplete zappa)"
```
`register-python-argcomplete` is provided by the argcomplete Python package. If this package was installed in a virtualenv
then the command must be run there. Alternatively you can execute:
activate-global-python-argcomplete --dest=- > file
The file's contents should then be sourced in e.g. ~/.bashrc.
### Enabling Secure Endpoints on API Gateway
#### API Key
You can use the `api_key_required` setting to generate an API key to all the routes of your API Gateway. The process is as follows:
1. Deploy/redeploy (update won't work) and write down the *id* for the key that has been created
2. Go to AWS console > Amazon API Gateway and
* select "API Keys" and find the key *value* (for example `key_value`)
* select "Usage Plans", create a new usage plan and link the API Key and the API that Zappa has created for you
3. Send a request where you pass the key value as a header called `x-api-key` to access the restricted endpoints (for example with curl: `curl --header "x-api-key: key_value"`). Note that without the x-api-key header, you will receive a 403.
#### IAM Policy
You can enable IAM-based (v4 signing) authorization on an API by setting the `iam_authorization` setting to `true`. Your API will then require signed requests and access can be controlled via [IAM policy](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-iam-policy-examples.html). Unsigned requests will receive a 403 response, as will requesters who are not authorized to access the API. Enabling this will override the Authorizer configuration (see below).
#### API Gateway Lambda Authorizers
If you deploy an API endpoint with Zappa, you can take advantage of [API Gateway Lambda Authorizers](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-use-lambda-authorizer.html) to implement a token-based authentication - all you need to do is to provide a function to create the required output, Zappa takes care of the rest. A good start for the function is the [AWS Labs blueprint example](https://github.com/awslabs/aws-apigateway-lambda-authorizer-blueprints/blob/master/blueprints/python/api-gateway-authorizer-python.py).
If you are wondering for what you would use an Authorizer, here are some potential use cases:
1. Call out to OAuth provider
2. Decode a JWT token inline
3. Lookup in a self-managed DB (for example DynamoDB)
Zappa can be configured to call a function inside your code to do the authorization, or to call some other existing lambda function (which lets you share the authorizer between multiple lambdas). You control the behavior by specifying either the `arn` or `function_name` values in the `authorizer` settings block.
For example, to get the Cognito identity, add this to a `zappa_settings.yaml`:
```yaml
context_header_mappings:
user_id: authorizer.user_id
```
Which can now be accessed in Flask like this:
```python
from flask import request
@route('/hello')
def hello_world:
print(request.headers.get('user_id'))
```
#### Cognito User Pool Authorizer
You can also use AWS Cognito User Pool Authorizer by adding:
```javascript
{
"authorizer": {
"type": "COGNITO_USER_POOLS",
"provider_arns": [
"arn:aws:cognito-idp:{region}:{account_id}:userpool/{user_pool_id}"
]
}
}
```
#### API Gateway Resource Policy
You can also use API Gateway Resource Policies. Example of IP Whitelisting:
```javascript
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": "*",
"Action": "execute-api:Invoke",
"Resource": "execute-api:/*",
"Condition": {
"IpAddress": {
"aws:SourceIp": [
"1.2.3.4/32"
]
}
}
}
]
}
```
### Setting Environment Variables
#### Local Environment Variables
If you want to set local environment variables for a deployment stage, you can simply set them in your `zappa_settings.json`:
```javascript
{
"dev": {
...
"environment_variables": {
"your_key": "your_value"
}
},
...
}
```
You can then access these inside your application with:
```python
import os
your_value = os.environ.get('your_key')
```
If your project needs to be aware of the type of environment you're deployed to, you'll also be able to get `SERVERTYPE` (AWS Lambda), `FRAMEWORK` (Zappa), `PROJECT` (your project name) and `STAGE` (_dev_, _production_, etc.) variables at any time.
#### Remote AWS Environment Variables
If you want to use native AWS Lambda environment variables you can use the `aws_environment_variables` configuration setting. These are useful as you can easily change them via the AWS Lambda console or cli at runtime. They are also useful for storing sensitive credentials and to take advantage of KMS encryption of environment variables.
During development, you can add your Zappa defined variables to your locally running app by, for example, using the below (for Django, to manage.py).
```python
if 'SERVERTYPE' in os.environ and os.environ['SERVERTYPE'] == 'AWS Lambda':
import json
import os
json_data = open('zappa_settings.json')
env_vars = json.load(json_data)['dev']['environment_variables']
for key, val in env_vars.items():
os.environ[key] = val
```
#### Remote Environment Variables
Any environment variables that you have set outside of Zappa (via AWS Lambda console or cli) will remain as they are when running `update`, unless they are also in `aws_environment_variables`, in which case the remote value will be overwritten by the one in the settings file. If you are using KMS-encrypted AWS environment variables, you can set your KMS Key ARN in the `aws_kms_key_arn` setting. Make sure that the values you set are encrypted in such case.
_Note: if you rely on these as well as `environment_variables`, and you have the same key names, then those in `environment_variables` will take precedence as they are injected in the lambda handler._
#### Remote Environment Variables (via an S3 file)
_S3 remote environment variables were added to Zappa before AWS introduced native environment variables for Lambda (via the console and cli). Before going down this route check if above make more sense for your usecase._
If you want to use remote environment variables to configure your application (which is especially useful for things like sensitive credentials), you can create a file and place it in an S3 bucket to which your Zappa application has access. To do this, add the `remote_env` key to zappa_settings pointing to a file containing a flat JSON object, so that each key-value pair on the object will be set as an environment variable and value whenever a new lambda instance spins up.
For example, to ensure your application has access to the database credentials without storing them in your version control, you can add a file to S3 with the connection string and load it into the lambda environment using the `remote_env` configuration setting.
super-secret-config.json (uploaded to my-config-bucket):
```javascript
{
"DB_CONNECTION_STRING": "super-secret:database"
}
```
zappa_settings.json:
```javascript
{
"dev": {
...
"remote_env": "s3://my-config-bucket/super-secret-config.json",
},
...
}
```
Now in your application you can use:
```python
import os
db_string = os.environ.get('DB_CONNECTION_STRING')
```
### API Gateway Context Variables
If you want to map an API Gateway context variable (http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html) to an HTTP header you can set up the mapping in `zappa_settings.json`:
```javascript
{
"dev": {
...
"context_header_mappings": {
"HTTP_header_name": "API_Gateway_context_variable"
}
},
...
}
```
For example, if you want to expose the $context.identity.cognitoIdentityId variable as the HTTP header CognitoIdentityId, and $context.stage as APIStage, you would have:
```javascript
{
"dev": {
...
"context_header_mappings": {
"CognitoIdentityId": "identity.cognitoIdentityId",
"APIStage": "stage"
}
},
...
}
```
### Catching Unhandled Exceptions
By default, if an _unhandled_ exception happens in your code, Zappa will just print the stacktrace into a CloudWatch log. If you wish to use an external reporting tool to take note of those exceptions, you can use the `exception_handler` configuration option.
zappa_settings.json:
```javascript
{
"dev": {
...
"exception_handler": "your_module.unhandled_exceptions",
},
...
}
```
The function has to accept three arguments: exception, event, and context:
your_module.py
```python
def unhandled_exceptions(e, event, context):
send_to_raygun(e, event) # gather data you need and send
return True # Prevent invocation retry
```
You may still need a similar exception handler inside your application, this is just a way to catch exception which happen at the Zappa/WSGI layer (typically event-based invocations, misconfigured settings, bad Lambda packages, and permissions issues).
By default, AWS Lambda will attempt to retry an event based (non-API Gateway, e.g. CloudWatch) invocation if an exception has been thrown. However, you can prevent this by returning True, as in example above, so Zappa that will not re-raise the uncaught exception, thus preventing AWS Lambda from retrying the current invocation.
### Using Custom AWS IAM Roles and Policies
#### Custom AWS IAM Roles and Policies for Deployment
You can specify which _local_ profile to use for deploying your Zappa application by defining
the `profile_name` setting, which will correspond to a profile in your AWS credentials file.
#### Custom AWS IAM Roles and Policies for Execution
The default IAM policy created by Zappa for executing the Lambda is very permissive.
It grants access to all actions for
all resources for types CloudWatch, S3, Kinesis, SNS, SQS, DynamoDB, and Route53; lambda:InvokeFunction
for all Lambda resources; Put to all X-Ray resources; and all Network Interface operations to all EC2
resources. While this allows most Lambdas to work correctly with no extra permissions, it is
generally not an acceptable set of permissions for most continuous integration pipelines or
production deployments. Instead, you will probably want to manually manage your IAM policies.
To manually define the policy of your Lambda execution role, you must set *manage_roles* to false and define
either the *role_name* or *role_arn* in your Zappa settings file.
```javascript
{
"dev": {
...
"manage_roles": false, // Disable Zappa client managing roles.
"role_name": "MyLambdaRole", // Name of your Zappa execution role. Optional, default: <project_name>-<env>-ZappaExecutionRole.
"role_arn": "arn:aws:iam::12345:role/app-ZappaLambdaExecutionRole", // ARN of your Zappa execution role. Optional.
...
},
...
}
```
Ongoing discussion about the minimum policy requirements necessary for a Zappa deployment [can be found here](https://github.com/Miserlou/Zappa/issues/244).
A more robust solution to managing these entitlements will likely be implemented soon.
To add permissions to the default Zappa execution policy, use the `extra_permissions` setting:
```javascript
{
"dev": {
...
"extra_permissions": [{ // Attach any extra permissions to this policy.
"Effect": "Allow",
"Action": ["rekognition:*"], // AWS Service ARN
"Resource": "*"
}]
},
...
}
```
### AWS X-Ray
Zappa can enable [AWS X-Ray](https://aws.amazon.com/xray/) support on your function with a configuration setting:
```javascript
{
"dev": {
...
"xray_tracing": true
},
...
}
```
This will enable it on the Lambda function and allow you to instrument your code with X-Ray.
For example, with Flask:
```python
from aws_xray_sdk.core import xray_recorder
app = Flask(__name__)
xray_recorder.configure(service='my_app_name')
@route('/hello')
@xray_recorder.capture('hello')
def hello_world:
return 'Hello'
```
You may use the capture decorator to create subsegments around functions, or `xray_recorder.begin_subsegment('subsegment_name')` and `xray_recorder.end_subsegment()` within a function. The official [X-Ray documentation for Python](http://docs.aws.amazon.com/xray-sdk-for-python/latest/reference/) has more information on how to use this with your code.
Note that you may create subsegments in your code but an exception will be raised if you try to create a segment, as it is [created by the lambda worker](https://github.com/aws/aws-xray-sdk-python/issues/2). This also means that if you use Flask you must not use the [XRayMiddleware the documentation suggests](https://docs.aws.amazon.com/xray/latest/devguide/xray-sdk-python-middleware.html).
### Globally Available Server-less Architectures
<p align="center">
<a href="https://htmlpreview.github.io/?https://github.com/Miserlou/Talks/blob/master/serverless-london/global.html#0"><img src="http://i.imgur.com/oR61Qau.png" alt="Global Zappa Slides"/></a>
</p>
<p align="center">
<i>Click to see <a href="https://htmlpreview.github.io/?https://github.com/Miserlou/Talks/blob/master/serverless-london/global.html#0">slides from ServerlessConf London</a>!</i>
</p>
During the `init` process, you will be given the option to deploy your application "globally." This will allow you to deploy your application to all available AWS regions simultaneously in order to provide a consistent global speed, increased redundancy, data isolation, and legal compliance. You can also choose to deploy only to "primary" locations, the AWS regions with `-1` in their names.
To learn more about these capabilities, see [these slides](https://htmlpreview.github.io/?https://github.com/Miserlou/Talks/blob/master/serverless-london/global.html#0) from ServerlessConf London.
### Raising AWS Service Limits
Out of the box, AWS sets a limit of [1000 concurrent executions](http://docs.aws.amazon.com/lambda/latest/dg/limits.html) for your functions. If you start to breach these limits, you may start to see errors like `ClientError: An error occurred (LimitExceededException) when calling the PutTargets.."` or something similar.
To avoid this, you can file a [service ticket](https://console.aws.amazon.com/support/home#/) with Amazon to raise your limits up to the many tens of thousands of concurrent executions which you may need. This is a fairly common practice with Amazon, designed to prevent you from accidentally creating extremely expensive bug reports. So, before raising your service limits, make sure that you don't have any rogue scripts which could accidentally create tens of thousands of parallel executions that you don't want to pay for.
### Dead Letter Queues
If you want to utilise [AWS Lambda's Dead Letter Queue feature](http://docs.aws.amazon.com/lambda/latest/dg/dlq.html) simply add the key `dead_letter_arn`, with the value being the complete ARN to the corresponding SNS topic or SQS queue in your `zappa_settings.json`.
You must have already created the corresponding SNS/SQS topic/queue, and the Lambda function execution role must have been provisioned with read/publish/sendMessage access to the DLQ resource.
### Unique Package ID
For monitoring of different deployments, a unique UUID for each package is available in `package_info.json` in the root directory of your application's package. You can use this information or a hash of this file for such things as tracking errors across different deployments, monitoring status of deployments and other such things on services such as Sentry and New Relic. The package will contain:
```json
{
"build_platform": "darwin",
"build_user": "frank",
"build_time": "1509732511",
"uuid": "9c2df9e6-30f4-4c0a-ac4d-4ecb51831a74"
}
```
### Application Load Balancer Event Source
Zappa can be used to handle events triggered by Application Load Balancers (ALB). This can be useful in a few circumstances:
- Since API Gateway has a hard limit of 30 seconds before timing out, you can use an ALB for longer running requests.
- API Gateway is billed per-request; therefore, costs can become excessive with high throughput services. ALBs pricing model makes much more sense financially if you're expecting a lot of traffic to your Lambda.
- ALBs can be placed within a VPC, which may make more sense for private endpoints than using API Gateway's private model (using AWS PrivateLink).
Like API Gateway, Zappa can automatically provision ALB resources for you. You'll need to add the following to your `zappa_settings`:
```
"alb_enabled": true,
"alb_vpc_config": {
"CertificateArn": "arn:aws:acm:us-east-1:[your-account-id]:certificate/[certificate-id]",
"SubnetIds": [
// Here, you'll want to provide a list of subnets for your ALB, eg. 'subnet-02a58266'
],
"SecurityGroupIds": [
// And here, a list of security group IDs, eg. 'sg-fbacb791'
]
}
```
More information on using ALB as an event source for Lambda can be found [here](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html).
*An important note*: right now, Zappa will provision ONE lambda to ONE load balancer, which means using `base_path` along with ALB configuration is currently unsupported.
### Endpoint Configuration
API Gateway can be configured to be only accessible in a VPC. To enable this; [configure your VPC to support](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-private-apis.html) then set the `endpoint_configuration` to `PRIVATE` and set up Resource Policy on the API Gateway. A note about this; if you're using a private endpoint, Zappa won't be able to tell if the API is returning a successful status code upon deploy or update, so you'll have to check it manually to ensure your setup is working properly.
For full list of options for endpoint configuration refer to [API Gateway EndpointConfiguration documentation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-apigateway-restapi-endpointconfiguration.html)
#### Example Private API Gateway configuration
zappa_settings.json:
```json
{
"dev": {
...
"endpoint_configuration": ["PRIVATE"],
"apigateway_policy": "apigateway_resource_policy.json",
...
},
...
}
```
apigateway_resource_policy.json:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Deny",
"Principal": "*",
"Action": "execute-api:Invoke",
"Resource": "execute-api:/*",
"Condition": {
"StringNotEquals": {
"aws:sourceVpc": "{{vpcID}}" // UPDATE ME
}
}
},
{
"Effect": "Allow",
"Principal": "*",
"Action": "execute-api:Invoke",
"Resource": "execute-api:/*"
}
]
}
```
## Zappa Guides
* [Django-Zappa tutorial (screencast)](https://www.youtube.com/watch?v=plUrbPN0xc8&feature=youtu.be).
* [Using Django-Zappa, Part 1](https://serverlesscode.com/post/zappa-wsgi-for-python/).
* [Using Django-Zappa, Part 2: VPCs](https://serverlesscode.com/post/zappa-wsgi-for-python-pt-2/).
* [Building Serverless Microservices with Zappa and Flask](https://gun.io/blog/serverless-microservices-with-zappa-and-flask/)
* [Zappa で Hello World するまで (Japanese)](http://qiita.com/satoshi_iwashita/items/505492193317819772c7)
* [How to Deploy Zappa with CloudFront, RDS and VPC](https://jinwright.net/how-deploy-serverless-wsgi-app-using-zappa/)
* [Secure 'Serverless' File Uploads with AWS Lambda, S3, and Zappa](http://blog.stratospark.com/secure-serverless-file-uploads-with-aws-lambda-s3-zappa.html)
* [Deploy a Serverless WSGI App using Zappa, CloudFront, RDS, and VPC](https://docs.google.com/presentation/d/1aYeOMgQl4V_fFgT5VNoycdXtob1v6xVUWlyxoTEiTw0/edit#slide=id.p)
* [AWS: Deploy Alexa Ask Skills with Flask-Ask and Zappa](https://developer.amazon.com/blogs/post/8e8ad73a-99e9-4c0f-a7b3-60f92287b0bf/New-Alexa-Tutorial-Deploy-Flask-Ask-Skills-to-AWS-Lambda-with-Zappa)
* [Guide to using Django with Zappa](https://edgarroman.github.io/zappa-django-guide/)
* [Zappa and LambCI](https://seancoates.com/blogs/zappa-and-lambci/)
* [Building A Serverless Image Processing SaaS using Zappa](https://medium.com/99serverless/building-a-serverless-image-processing-saas-9ef68b594076)
* [Serverless Slack Slash Commands with Python and Zappa](https://renzo.lucioni.xyz/serverless-slash-commands-with-python/)
* [Bringing Tokusatsu to AWS using Python, Flask, Zappa and Contentful](https://www.contentful.com/blog/2018/03/07/bringing-tokusatsu-to-aws-using-python-flask-zappa-and-contentful/)
* [AWS Summit 2018 Seoul - Zappa와 함께하는 Serverless Microservice](https://www.slideshare.net/YunSeopSong/zappa-serverless-microservice-94410308/)
* [Book - Building Serverless Python Web Services with Zappa](https://github.com/PacktPublishing/Building-Serverless-Python-Web-Services-with-Zappa)
* [Vider sa flask dans une lambda](http://free_zed.gitlab.io/articles/2019/11/vider-sa-flask-dans-une-lambda/)[French]
* _Your guide here?_
## Zappa in the Press
* _[Zappa Serves Python, Minus the Servers](http://www.infoworld.com/article/3031665/application-development/zappa-serves-python-web-apps-minus-the-servers.html)_
* _[Zappa lyfter serverlösa applikationer med Python](http://computersweden.idg.se/2.2683/1.649895/zappa-lyfter-python)_
* _[Interview: Rich Jones on Zappa](https://serverlesscode.com/post/rich-jones-interview-django-zappa/)_
* [Top 10 Python Libraries of 2016](https://tryolabs.com/blog/2016/12/20/top-10-python-libraries-of-2016/)
## Sites Using Zappa
* [Mailchimp Signup Utility](https://github.com/sasha42/Mailchimp-utility) - A microservice for adding people to a mailing list via API.
* [Zappa Slack Inviter](https://github.com/Miserlou/zappa-slack-inviter) - A tiny, server-less service for inviting new users to your Slack channel.
* [Serverless Image Host](https://github.com/Miserlou/serverless-imagehost) - A thumbnailing service with Flask, Zappa and Pillow.
* [Zappa BitTorrent Tracker](https://github.com/Miserlou/zappa-bittorrent-tracker) - An experimental server-less BitTorrent tracker. Work in progress.
* [JankyGlance](https://github.com/Miserlou/JankyGlance) - A server-less Yahoo! Pipes replacement.
* [LambdaMailer](https://github.com/tryolabs/lambda-mailer) - A server-less endpoint for processing a contact form.
* [Voter Registration Microservice](https://topics.arlingtonva.us/2016/11/voter-registration-search-microservice/) - Official backup to to the Virginia Department of Elections portal.
* [FreePoll Online](https://www.freepoll.online) - A simple and awesome say for groups to make decisions.
* [PasteOfCode](https://paste.ofcode.org/) - A Zappa-powered paste bin.
* And many more, including banks, governments, startups, enterprises and schools!
Are you using Zappa? Let us know and we'll list your site here!
## Related Projects
* [Mackenzie](http://github.com/Miserlou/Mackenzie) - AWS Lambda Infection Toolkit
* [NoDB](https://github.com/Miserlou/NoDB) - A simple, server-less, Pythonic object store based on S3.
* [zappa-cms](http://github.com/Miserlou/zappa-cms) - A tiny server-less CMS for busy hackers. Work in progress.
* [zappa-django-utils](https://github.com/Miserlou/zappa-django-utils) - Utility commands to help Django deployments.
* [flask-ask](https://github.com/johnwheeler/flask-ask) - A framework for building Amazon Alexa applications. Uses Zappa for deployments.
* [zappa-file-widget](https://github.com/anush0247/zappa-file-widget) - A Django plugin for supporting binary file uploads in Django on Zappa.
* [zops](https://github.com/bjinwright/zops) - Utilities for teams and continuous integrations using Zappa.
* [cookiecutter-mobile-backend](https://github.com/narfman0/cookiecutter-mobile-backend/) - A `cookiecutter` Django project with Zappa and S3 uploads support.
* [zappa-examples](https://github.com/narfman0/zappa-examples/) - Flask, Django, image uploads, and more!
* [zappa-hug-example](https://github.com/mcrowson/zappa-hug-example) - Example of a Hug application using Zappa.
* [Zappa Docker Image](https://github.com/danielwhatmuff/zappa) - A Docker image for running Zappa locally, based on Lambda Docker.
* [zappa-dashing](https://github.com/nikos/zappa-dashing) - Monitor your AWS environment (health/metrics) with Zappa and CloudWatch.
* [s3env](https://github.com/cameronmaske/s3env) - Manipulate a remote Zappa environment variable key/value JSON object file in an S3 bucket through the CLI.
* [zappa_resize_image_on_fly](https://github.com/wobeng/zappa_resize_image_on_fly) - Resize images on the fly using Flask, Zappa, Pillow, and OpenCV-python.
* [zappa-ffmpeg](https://github.com/ubergarm/zappa-ffmpeg) - Run ffmpeg inside a lambda for serverless transformations.
* [gdrive-lambda](https://github.com/richiverse/gdrive-lambda) - pass json data to a csv file for end users who use Gdrive across the organization.
* [travis-build-repeat](https://github.com/bcongdon/travis-build-repeat) - Repeat TravisCI builds to avoid stale test results.
* [wunderskill-alexa-skill](https://github.com/mcrowson/wunderlist-alexa-skill) - An Alexa skill for adding to a Wunderlist.
* [xrayvision](https://github.com/mathom/xrayvision) - Utilities and wrappers for using AWS X-Ray with Zappa.
* [terraform-aws-zappa](https://github.com/dpetzold/terraform-aws-zappa) - Terraform modules for creating a VPC, RDS instance, ElastiCache Redis and CloudFront Distribution for use with Zappa.
* [zappa-sentry](https://github.com/jneves/zappa-sentry) - Integration with Zappa and Sentry
* [IOpipe](https://github.com/iopipe/iopipe-python#zappa) - Monitor, profile and analyze your Zappa apps.
## Hacks
Zappa goes quite far beyond what Lambda and API Gateway were ever intended to handle. As a result, there are quite a few hacks in here that allow it to work. Some of those include, but aren't limited to..
* Using VTL to map body, headers, method, params and query strings into JSON, and then turning that into valid WSGI.
* Attaching response codes to response bodies, Base64 encoding the whole thing, using that as a regex to route the response code, decoding the body in VTL, and mapping the response body to that.
* Packing and _Base58_ encoding multiple cookies into a single cookie because we can only map one kind.
* Forcing the case permutations of "Set-Cookie" in order to return multiple headers at the same time.
* Turning cookie-setting 301/302 responses into 200 responses with HTML redirects, because we have no way to set headers on redirects.
## Contributing
This project is still young, so there is still plenty to be done. Contributions are more than welcome!
Please file tickets for discussion before submitting patches. Pull requests should target `master` and should leave Zappa in a "shippable" state if merged.
If you are adding a non-trivial amount of new code, please include a functioning test in your PR. For AWS calls, we use the `placebo` library, which you can learn to use [in their README](https://github.com/garnaat/placebo#usage-as-a-decorator). The test suite will be run by [Travis CI](https://travis-ci.org/zappa/Zappa) once you open a pull request.
Please include the GitHub issue or pull request URL that has discussion related to your changes as a comment in the code ([example](https://github.com/zappa/Zappa/blob/fae2925431b820eaedf088a632022e4120a29f89/zappa/zappa.py#L241-L243)). This greatly helps for project maintainability, as it allows us to trace back use cases and explain decision making. Similarly, please make sure that you meet all of the requirements listed in the [pull request template](https://raw.githubusercontent.com/zappa/Zappa/master/.github/PULL_REQUEST_TEMPLATE.md).
Please feel free to work on any open ticket, especially any ticket marked with the "help-wanted" label. If you get stuck or want to discuss an issue further, please join [our Slack channel](https://zappateam.slack.com/), where you'll find a community of smart and interesting people working dilligently on hard problems.
Zappa does not intend to conform to PEP8, isolate your commits so that changes to functionality with changes made by your linter.
#### Using a Local Repo
To use the git HEAD, you *probably can't* use `pip install -e `. Instead, you should clone the repo to your machine and then `pip install /path/to/zappa/repo` or `ln -s /path/to/zappa/repo/zappa zappa` in your local project.
## Patrons
If you or your company uses **Zappa**, please consider giving what you can to support the ongoing development of the project!
You can become a patron by **[visiting our Patreon page](https://patreon.com/zappa)**.
Zappa is currently supported by these awesome individuals and companies:
* Nathan Lawrence
* LaunchLab
* Sean Paley
* Theo Chitayat
* George Sibble
* Joe Weiss
* Nik Bora
* Zerong Toby Wang
* Gareth E
* Matt Jackson
* Sean Coates
* Alexander Loschilov
* Korey Peters
* Joe Weiss
* Kimmo Parvianen-Jalanko
* Patrick Agin
* Roberto Martinez
* Charles Dimino
* Doug Beney
* Dan "The Man" Gayle
* Juancito
* Will Childs-Klein
* Efi Merdler Kravitz
* **Philippe Trounev**
Thank you very, very much!
## Support / Development / Training / Consulting
Do you need help with..
* Porting existing Flask and Django applications to Zappa?
* Building new applications and services that scale infinitely?
* Reducing your operations and hosting costs?
* Adding new custom features into Zappa?
* Training your team to use AWS and other server-less paradigms?
Good news! We're currently available for remote and on-site consulting for small, large and enterprise teams. Please contact <[email protected]> with your needs and let's work together!
<br />
<p align="center">
<a href="https://gun.io"><img src="http://i.imgur.com/M7wJipR.png" alt="Made by Gun.io"/></a>
</p>
| zappa-mathking | /zappa-mathking-0.52.4.tar.gz/zappa-mathking-0.52.4/README.md | README.md |
import atexit
import base64
import binascii
import copy
import hashlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import time
from urllib.request import urlopen
import requests
# Staging
# Amazon doesn't accept these though.
# DEFAULT_CA = "https://acme-staging.api.letsencrypt.org"
# Production
DEFAULT_CA = "https://acme-v02.api.letsencrypt.org"
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.StreamHandler())
def get_cert_and_update_domain(
zappa_instance,
lambda_name,
api_stage,
domain=None,
manual=False,
):
"""
Main cert installer path.
"""
try:
create_domain_key()
create_domain_csr(domain)
get_cert(zappa_instance)
create_chained_certificate()
with open('{}/signed.crt'.format(gettempdir())) as f:
certificate_body = f.read()
with open('{}/domain.key'.format(gettempdir())) as f:
certificate_private_key = f.read()
with open('{}/intermediate.pem'.format(gettempdir())) as f:
certificate_chain = f.read()
if not manual:
if domain:
if not zappa_instance.get_domain_name(domain):
zappa_instance.create_domain_name(
domain_name=domain,
certificate_name=domain + "-Zappa-LE-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=None,
lambda_name=lambda_name,
stage=api_stage
)
print("Created a new domain name. Please note that it can take up to 40 minutes for this domain to be created and propagated through AWS, but it requires no further work on your part.")
else:
zappa_instance.update_domain_name(
domain_name=domain,
certificate_name=domain + "-Zappa-LE-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=None,
lambda_name=lambda_name,
stage=api_stage
)
else:
print("Cerificate body:\n")
print(certificate_body)
print("\nCerificate private key:\n")
print(certificate_private_key)
print("\nCerificate chain:\n")
print(certificate_chain)
except Exception as e:
print(e)
return False
return True
def create_domain_key():
devnull = open(os.devnull, 'wb')
out = subprocess.check_output(['openssl', 'genrsa', '2048'], stderr=devnull)
with open(os.path.join(gettempdir(), 'domain.key'), 'wb') as f:
f.write(out)
def create_domain_csr(domain):
subj = "/CN=" + domain
cmd = [
'openssl', 'req',
'-new',
'-sha256',
'-key', os.path.join(gettempdir(), 'domain.key'),
'-subj', subj
]
devnull = open(os.devnull, 'wb')
out = subprocess.check_output(cmd, stderr=devnull)
with open(os.path.join(gettempdir(), 'domain.csr'), 'wb') as f:
f.write(out)
def create_chained_certificate():
signed_crt = open(os.path.join(gettempdir(), 'signed.crt'), 'rb').read()
cross_cert_url = "https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem"
cert = requests.get(cross_cert_url)
with open(os.path.join(gettempdir(), 'intermediate.pem'), 'wb') as intermediate_pem:
intermediate_pem.write(cert.content)
with open(os.path.join(gettempdir(), 'chained.pem'), 'wb') as chained_pem:
chained_pem.write(signed_crt)
chained_pem.write(cert.content)
def parse_account_key():
"""Parse account key to get public key"""
LOGGER.info("Parsing account key...")
cmd = [
'openssl', 'rsa',
'-in', os.path.join(gettempdir(), 'account.key'),
'-noout',
'-text'
]
devnull = open(os.devnull, 'wb')
return subprocess.check_output(cmd, stderr=devnull)
def parse_csr():
"""
Parse certificate signing request for domains
"""
LOGGER.info("Parsing CSR...")
cmd = [
'openssl', 'req',
'-in', os.path.join(gettempdir(), 'domain.csr'),
'-noout',
'-text'
]
devnull = open(os.devnull, 'wb')
out = subprocess.check_output(cmd, stderr=devnull)
domains = set([])
common_name = re.search(r"Subject:.*? CN\s?=\s?([^\s,;/]+)", out.decode('utf8'))
if common_name is not None:
domains.add(common_name.group(1))
subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE | re.DOTALL)
if subject_alt_names is not None:
for san in subject_alt_names.group(1).split(", "):
if san.startswith("DNS:"):
domains.add(san[4:])
return domains
def get_boulder_header(key_bytes):
"""
Use regular expressions to find crypto values from parsed account key,
and return a header we can send to our Boulder instance.
"""
pub_hex, pub_exp = re.search(
r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)",
key_bytes.decode('utf8'), re.MULTILINE | re.DOTALL).groups()
pub_exp = "{0:x}".format(int(pub_exp))
pub_exp = "0{0}".format(pub_exp) if len(pub_exp) % 2 else pub_exp
header = {
"alg": "RS256",
"jwk": {
"e": _b64(binascii.unhexlify(pub_exp.encode("utf-8"))),
"kty": "RSA",
"n": _b64(binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex).encode("utf-8"))),
},
}
return header
def register_account():
"""
Agree to LE TOS
"""
LOGGER.info("Registering account...")
code, result = _send_signed_request(DEFAULT_CA + "/acme/new-reg", {
"resource": "new-reg",
"agreement": "https://letsencrypt.org/documents/LE-SA-v1.2-November-15-2017.pdf",
})
if code == 201: # pragma: no cover
LOGGER.info("Registered!")
elif code == 409: # pragma: no cover
LOGGER.info("Already registered!")
else: # pragma: no cover
raise ValueError("Error registering: {0} {1}".format(code, result))
def get_cert(zappa_instance, log=LOGGER, CA=DEFAULT_CA):
"""
Call LE to get a new signed CA.
"""
out = parse_account_key()
header = get_boulder_header(out)
accountkey_json = json.dumps(header['jwk'], sort_keys=True, separators=(',', ':'))
thumbprint = _b64(hashlib.sha256(accountkey_json.encode('utf8')).digest())
# find domains
domains = parse_csr()
# get the certificate domains and expiration
register_account()
# verify each domain
for domain in domains:
log.info("Verifying {0}...".format(domain))
# get new challenge
code, result = _send_signed_request(CA + "/acme/new-authz", {
"resource": "new-authz",
"identifier": {"type": "dns", "value": domain},
})
if code != 201:
raise ValueError("Error requesting challenges: {0} {1}".format(code, result))
challenge = [ch for ch in json.loads(result.decode('utf8'))['challenges'] if ch['type'] == "dns-01"][0]
token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token'])
keyauthorization = "{0}.{1}".format(token, thumbprint).encode('utf-8')
# sha256_b64
digest = _b64(hashlib.sha256(keyauthorization).digest())
zone_id = zappa_instance.get_hosted_zone_id_for_domain(domain)
if not zone_id:
raise ValueError("Could not find Zone ID for: " + domain)
zappa_instance.set_dns_challenge_txt(zone_id, domain, digest) # resp is unused
print("Waiting for DNS to propagate..")
# What's optimal here?
# import time # double import; import in loop; shadowed import
time.sleep(45)
# notify challenge are met
code, result = _send_signed_request(challenge['uri'], {
"resource": "challenge",
"keyAuthorization": keyauthorization.decode('utf-8'),
})
if code != 202:
raise ValueError("Error triggering challenge: {0} {1}".format(code, result))
# wait for challenge to be verified
verify_challenge(challenge['uri'])
# Challenge verified, clean up R53
zappa_instance.remove_dns_challenge_txt(zone_id, domain, digest)
# Sign
result = sign_certificate()
# Encode to PEM format
encode_certificate(result)
return True
def verify_challenge(uri):
"""
Loop until our challenge is verified, else fail.
"""
while True:
try:
resp = urlopen(uri)
challenge_status = json.loads(resp.read().decode('utf8'))
except IOError as e:
raise ValueError("Error checking challenge: {0} {1}".format(
e.code, json.loads(e.read().decode('utf8'))))
if challenge_status['status'] == "pending":
time.sleep(2)
elif challenge_status['status'] == "valid":
LOGGER.info("Domain verified!")
break
else:
raise ValueError("Domain challenge did not pass: {0}".format(
challenge_status))
def sign_certificate():
"""
Get the new certificate.
Returns the signed bytes.
"""
LOGGER.info("Signing certificate...")
cmd = [
'openssl', 'req',
'-in', os.path.join(gettempdir(), 'domain.csr'),
'-outform', 'DER'
]
devnull = open(os.devnull, 'wb')
csr_der = subprocess.check_output(cmd, stderr=devnull)
code, result = _send_signed_request(DEFAULT_CA + "/acme/new-cert", {
"resource": "new-cert",
"csr": _b64(csr_der),
})
if code != 201:
raise ValueError("Error signing certificate: {0} {1}".format(code, result))
LOGGER.info("Certificate signed!")
return result
def encode_certificate(result):
"""
Encode cert bytes to PEM encoded cert file.
"""
cert_body = """-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""".format(
"\n".join(textwrap.wrap(base64.b64encode(result).decode('utf8'), 64)))
signed_crt = open("{}/signed.crt".format(gettempdir()), "w")
signed_crt.write(cert_body)
signed_crt.close()
return True
##
# Request Utility
##
def _b64(b):
"""
Helper function base64 encode for jose spec
"""
return base64.urlsafe_b64encode(b).decode('utf8').replace("=", "")
def _send_signed_request(url, payload):
"""
Helper function to make signed requests to Boulder
"""
payload64 = _b64(json.dumps(payload).encode('utf8'))
out = parse_account_key()
header = get_boulder_header(out)
protected = copy.deepcopy(header)
protected["nonce"] = urlopen(DEFAULT_CA + "/directory").headers['Replay-Nonce']
protected64 = _b64(json.dumps(protected).encode('utf8'))
cmd = [
'openssl', 'dgst',
'-sha256',
'-sign', os.path.join(gettempdir(), 'account.key')
]
proc = subprocess.Popen(
cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = proc.communicate("{0}.{1}".format(protected64, payload64).encode('utf8'))
if proc.returncode != 0: # pragma: no cover
raise IOError("OpenSSL Error: {0}".format(err))
data = json.dumps({
"header": header, "protected": protected64,
"payload": payload64, "signature": _b64(out),
})
try:
resp = urlopen(url, data.encode('utf8'))
return resp.getcode(), resp.read()
except IOError as e:
return getattr(e, "code", None), getattr(e, "read", e.__str__)()
##
# Temporary Directory Utility
##
__tempdir = None
def gettempdir():
"""
Lazily creates a temporary directory in a secure manner. When Python exits,
or the cleanup() function is called, the directory is erased.
"""
global __tempdir
if __tempdir is not None:
return __tempdir
__tempdir = tempfile.mkdtemp()
return __tempdir
@atexit.register
def cleanup():
"""
Delete any temporary files.
"""
global __tempdir
if __tempdir is not None:
shutil.rmtree(__tempdir)
__tempdir = None | zappa-mathking | /zappa-mathking-0.52.4.tar.gz/zappa-mathking-0.52.4/zappa/letsencrypt.py | letsencrypt.py |
from werkzeug.wsgi import ClosingIterator
def all_casings(input_string):
"""
Permute all casings of a given string.
A pretty algorithm, via @Amber
http://stackoverflow.com/questions/6792803/finding-all-possible-case-permutations-in-python
"""
if not input_string:
yield ""
else:
first = input_string[:1]
if first.lower() == first.upper():
for sub_casing in all_casings(input_string[1:]):
yield first + sub_casing
else:
for sub_casing in all_casings(input_string[1:]):
yield first.lower() + sub_casing
yield first.upper() + sub_casing
class ZappaWSGIMiddleware:
"""
Middleware functions necessary for a Zappa deployment.
Most hacks have now been remove except for Set-Cookie permutation.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
"""
We must case-mangle the Set-Cookie header name or AWS will use only a
single one of these headers.
"""
def encode_response(status, headers, exc_info=None):
"""
This makes the 'set-cookie' headers name lowercase,
all the non-cookie headers should be sent unharmed.
Related: https://github.com/Miserlou/Zappa/issues/1965
"""
new_headers = [header for header in headers
if ((type(header[0]) != str) or (header[0].lower() != 'set-cookie'))]
cookie_headers = [(header[0].lower(), header[1]) for header in headers
if ((type(header[0]) == str) and (header[0].lower() == "set-cookie"))]
new_headers = new_headers + cookie_headers
return start_response(status, new_headers, exc_info)
# Call the application with our modifier
response = self.application(environ, encode_response)
# Return the response as a WSGI-safe iterator
return ClosingIterator(response) | zappa-mathking | /zappa-mathking-0.52.4.tar.gz/zappa-mathking-0.52.4/zappa/middleware.py | middleware.py |
import base64
import collections
import datetime
import importlib
import inspect
import json
import logging
import os
import sys
import tarfile
import traceback
from builtins import str
import boto3
from werkzeug.wrappers import Response
# This file may be copied into a project's root,
# so handle both scenarios.
try:
from zappa.middleware import ZappaWSGIMiddleware
from zappa.utilities import merge_headers, parse_s3_url
from zappa.wsgi import common_log, create_wsgi_request
except ImportError as e: # pragma: no cover
from .middleware import ZappaWSGIMiddleware
from .utilities import merge_headers, parse_s3_url
from .wsgi import common_log, create_wsgi_request
# Set up logging
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class LambdaHandler:
"""
Singleton for avoiding duplicate setup.
Pattern provided by @benbangert.
"""
__instance = None
settings = None
settings_name = None
session = None
# Application
app_module = None
wsgi_app = None
trailing_slash = False
def __new__(cls, settings_name="zappa_settings", session=None):
"""Singleton instance to avoid repeat setup"""
if LambdaHandler.__instance is None:
print("Instancing..")
LambdaHandler.__instance = object.__new__(cls)
return LambdaHandler.__instance
def __init__(self, settings_name="zappa_settings", session=None):
# We haven't cached our settings yet, load the settings and app.
if not self.settings:
# Loading settings from a python module
self.settings = importlib.import_module(settings_name)
self.settings_name = settings_name
self.session = session
# Custom log level
if self.settings.LOG_LEVEL:
level = logging.getLevelName(self.settings.LOG_LEVEL)
logger.setLevel(level)
remote_env = getattr(self.settings, 'REMOTE_ENV', None)
remote_bucket, remote_file = parse_s3_url(remote_env)
if remote_bucket and remote_file:
self.load_remote_settings(remote_bucket, remote_file)
# Let the system know that this will be a Lambda/Zappa/Stack
os.environ["SERVERTYPE"] = "AWS Lambda"
os.environ["FRAMEWORK"] = "Zappa"
try:
os.environ["PROJECT"] = self.settings.PROJECT_NAME
os.environ["STAGE"] = self.settings.API_STAGE
except Exception: # pragma: no cover
pass
# Set any locally defined env vars
# Environment variable keys can't be Unicode
# https://github.com/Miserlou/Zappa/issues/604
for key in self.settings.ENVIRONMENT_VARIABLES.keys():
os.environ[str(key)] = self.settings.ENVIRONMENT_VARIABLES[key]
# # Pulling from S3 if given a zip path
# project_archive_path = getattr(self.settings, 'ARCHIVE_PATH', None)
# if project_archive_path:
# self.load_remote_project_archive(project_archive_path)
project_folder = '/tmp/{0!s}'.format(self.settings.PROJECT_NAME)
# Add to project path
sys.path.insert(0, project_folder)
# Change working directory to project folder
# Related: https://github.com/Miserlou/Zappa/issues/702
os.chdir(project_folder)
# Load compiled library to the PythonPath
# checks if we are the slim_handler since this is not needed otherwise
# https://github.com/Miserlou/Zappa/issues/776
is_slim_handler = getattr(self.settings, 'SLIM_HANDLER', False)
if is_slim_handler:
included_libraries = getattr(self.settings, 'INCLUDE', [])
try:
from ctypes import cdll, util
for library in included_libraries:
try:
cdll.LoadLibrary(os.path.join(os.getcwd(), library))
except OSError:
print("Failed to find library: {}...right filename?".format(library))
except ImportError:
print ("Failed to import cytpes library")
# This is a non-WSGI application
# https://github.com/Miserlou/Zappa/pull/748
if not hasattr(self.settings, 'APP_MODULE') and not self.settings.DJANGO_SETTINGS:
self.app_module = None
wsgi_app_function = None
# This is probably a normal WSGI app (Or django with overloaded wsgi application)
# https://github.com/Miserlou/Zappa/issues/1164
elif hasattr(self.settings, 'APP_MODULE'):
if self.settings.DJANGO_SETTINGS:
sys.path.append('/var/task')
from django.conf import \
ENVIRONMENT_VARIABLE as SETTINGS_ENVIRONMENT_VARIABLE
# add the Lambda root path into the sys.path
self.trailing_slash = True
os.environ[SETTINGS_ENVIRONMENT_VARIABLE] = self.settings.DJANGO_SETTINGS
else:
self.trailing_slash = False
# The app module
self.app_module = importlib.import_module(self.settings.APP_MODULE)
# The application
wsgi_app_function = getattr(self.app_module, self.settings.APP_FUNCTION)
# Django gets special treatment.
else:
try: # Support both for tests
from zappa.ext.django_zappa import get_django_wsgi
except ImportError: # pragma: no cover
from django_zappa_app import get_django_wsgi
# Get the Django WSGI app from our extension
wsgi_app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS)
self.trailing_slash = True
self.wsgi_app = ZappaWSGIMiddleware(wsgi_app_function)
def load_remote_project_archive(self, project_zip_path):
"""
Puts the project files from S3 in /tmp and adds to path
"""
project_folder = '/tmp/{0!s}'.format(self.settings.PROJECT_NAME)
if not os.path.isdir(project_folder):
# The project folder doesn't exist in this cold lambda, get it from S3
if not self.session:
boto_session = boto3.Session()
else:
boto_session = self.session
# Download zip file from S3
remote_bucket, remote_file = parse_s3_url(project_zip_path)
s3 = boto_session.resource('s3')
archive_on_s3 = s3.Object(remote_bucket, remote_file).get()
with tarfile.open(fileobj=archive_on_s3['Body'], mode="r|gz") as t:
t.extractall(project_folder)
# Add to project path
sys.path.insert(0, project_folder)
# Change working directory to project folder
# Related: https://github.com/Miserlou/Zappa/issues/702
os.chdir(project_folder)
return True
def load_remote_settings(self, remote_bucket, remote_file):
"""
Attempt to read a file from s3 containing a flat json object. Adds each
key->value pair as environment variables. Helpful for keeping
sensitiZve or stage-specific configuration variables in s3 instead of
version control.
"""
if not self.session:
boto_session = boto3.Session()
else:
boto_session = self.session
s3 = boto_session.resource('s3')
try:
remote_env_object = s3.Object(remote_bucket, remote_file).get()
except Exception as e: # pragma: no cover
# catch everything aws might decide to raise
print('Could not load remote settings file.', e)
return
try:
content = remote_env_object['Body'].read()
except Exception as e: # pragma: no cover
# catch everything aws might decide to raise
print('Exception while reading remote settings file.', e)
return
try:
settings_dict = json.loads(content)
except (ValueError, TypeError): # pragma: no cover
print('Failed to parse remote settings!')
return
# add each key-value to environment - overwrites existing keys!
for key, value in settings_dict.items():
if self.settings.LOG_LEVEL == "DEBUG":
print('Adding {} -> {} to environment'.format(
key,
value
))
# Environment variable keys can't be Unicode
# https://github.com/Miserlou/Zappa/issues/604
try:
os.environ[str(key)] = value
except Exception:
if self.settings.LOG_LEVEL == "DEBUG":
print("Environment variable keys must be non-unicode!")
@staticmethod
def import_module_and_get_function(whole_function):
"""
Given a modular path to a function, import that module
and return the function.
"""
module, function = whole_function.rsplit('.', 1)
app_module = importlib.import_module(module)
app_function = getattr(app_module, function)
return app_function
@classmethod
def lambda_handler(cls, event, context): # pragma: no cover
handler = cls()
exception_handler = handler.settings.EXCEPTION_HANDLER
try:
return handler.handler(event, context)
except Exception as ex:
exception_processed = cls._process_exception(exception_handler=exception_handler,
event=event, context=context, exception=ex)
if not exception_processed:
# Only re-raise exception if handler directed so. Allows handler to control if lambda has to retry
# an event execution in case of failure.
raise
@classmethod
def _process_exception(cls, exception_handler, event, context, exception):
exception_processed = False
if exception_handler:
try:
handler_function = cls.import_module_and_get_function(exception_handler)
exception_processed = handler_function(exception, event, context)
except Exception as cex:
logger.error(msg='Failed to process exception via custom handler.')
print(cex)
return exception_processed
@staticmethod
def run_function(app_function, event, context):
"""
Given a function and event context,
detect signature and execute, returning any result.
"""
# getargspec does not support python 3 method with type hints
# Related issue: https://github.com/Miserlou/Zappa/issues/1452
if hasattr(inspect, "getfullargspec"): # Python 3
args, varargs, keywords, defaults, _, _, _ = inspect.getfullargspec(app_function)
else: # Python 2
args, varargs, keywords, defaults = inspect.getargspec(app_function)
num_args = len(args)
if num_args == 0:
result = app_function(event, context) if varargs else app_function()
elif num_args == 1:
result = app_function(event, context) if varargs else app_function(event)
elif num_args == 2:
result = app_function(event, context)
else:
raise RuntimeError("Function signature is invalid. Expected a function that accepts at most "
"2 arguments or varargs.")
return result
def get_function_for_aws_event(self, record):
"""
Get the associated function to execute for a triggered AWS event
Support S3, SNS, DynamoDB, kinesis and SQS events
"""
if 's3' in record:
if ':' in record['s3']['configurationId']:
return record['s3']['configurationId'].split(':')[-1]
arn = None
if 'Sns' in record:
try:
message = json.loads(record['Sns']['Message'])
if message.get('command'):
return message['command']
except ValueError:
pass
arn = record['Sns'].get('TopicArn')
elif 'dynamodb' in record or 'kinesis' in record:
arn = record.get('eventSourceARN')
elif 'eventSource' in record and record.get('eventSource') == 'aws:sqs':
arn = record.get('eventSourceARN')
elif 's3' in record:
arn = record['s3']['bucket']['arn']
if arn:
return self.settings.AWS_EVENT_MAPPING.get(arn)
return None
def get_function_from_bot_intent_trigger(self, event):
"""
For the given event build ARN and return the configured function
"""
intent = event.get('currentIntent')
if intent:
intent = intent.get('name')
if intent:
return self.settings.AWS_BOT_EVENT_MAPPING.get(
"{}:{}".format(intent, event.get('invocationSource'))
)
def get_function_for_cognito_trigger(self, trigger):
"""
Get the associated function to execute for a cognito trigger
"""
print("get_function_for_cognito_trigger", self.settings.COGNITO_TRIGGER_MAPPING, trigger, self.settings.COGNITO_TRIGGER_MAPPING.get(trigger))
return self.settings.COGNITO_TRIGGER_MAPPING.get(trigger)
def handler(self, event, context):
"""
An AWS Lambda function which parses specific API Gateway input into a
WSGI request, feeds it to our WSGI app, processes the response, and returns
that back to the API Gateway.
"""
settings = self.settings
# If in DEBUG mode, log all raw incoming events.
if settings.DEBUG:
logger.debug('Zappa Event: {}'.format(event))
# Set any API Gateway defined Stage Variables
# as env vars
if event.get('stageVariables'):
for key in event['stageVariables'].keys():
os.environ[str(key)] = event['stageVariables'][key]
# This is the result of a keep alive, recertify
# or scheduled event.
if event.get('detail-type') == 'Scheduled Event':
whole_function = event['resources'][0].split('/')[-1].split('-')[-1]
# This is a scheduled function.
if '.' in whole_function:
app_function = self.import_module_and_get_function(whole_function)
# Execute the function!
return self.run_function(app_function, event, context)
# Else, let this execute as it were.
# This is a direct command invocation.
elif event.get('command', None):
whole_function = event['command']
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
print("Result of %s:" % whole_function)
print(result)
return result
# This is a direct, raw python invocation.
# It's _extremely_ important we don't allow this event source
# to be overridden by unsanitized, non-admin user input.
elif event.get('raw_command', None):
raw_command = event['raw_command']
exec(raw_command)
return
# This is a Django management command invocation.
elif event.get('manage', None):
from django.core import management
try: # Support both for tests
from zappa.ext.django_zappa import get_django_wsgi
except ImportError as e: # pragma: no cover
from django_zappa_app import get_django_wsgi
# Get the Django WSGI app from our extension
# We don't actually need the function,
# but we do need to do all of the required setup for it.
app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS)
# Couldn't figure out how to get the value into stdout with StringIO..
# Read the log for now. :[]
management.call_command(*event['manage'].split(' '))
return {}
# This is an AWS-event triggered invocation.
elif event.get('Records', None):
records = event.get('Records')
result = None
whole_function = self.get_function_for_aws_event(records[0])
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# this is an AWS-event triggered from Lex bot's intent
elif event.get('bot'):
result = None
whole_function = self.get_function_from_bot_intent_trigger(event)
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# This is an API Gateway authorizer event
elif event.get('type') == 'TOKEN':
whole_function = self.settings.AUTHORIZER_FUNCTION
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
policy = self.run_function(app_function, event, context)
return policy
else:
logger.error("Cannot find a function to process the authorization request.")
raise Exception('Unauthorized')
# This is an AWS Cognito Trigger Event
elif event.get('triggerSource', None):
triggerSource = event.get('triggerSource')
whole_function = self.get_function_for_cognito_trigger(triggerSource)
result = event
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to handle cognito trigger {}".format(triggerSource))
return result
# This is a CloudWatch event
# Related: https://github.com/Miserlou/Zappa/issues/1924
elif event.get('awslogs', None):
result = None
whole_function = '{}.{}'.format(settings.APP_MODULE, settings.APP_FUNCTION)
app_function = self.import_module_and_get_function(whole_function)
if app_function:
result = self.run_function(app_function, event, context)
logger.debug("Result of %s:" % whole_function)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# Normal web app flow
try:
# Timing
time_start = datetime.datetime.now()
# This is a normal HTTP request
if event.get('httpMethod', None):
script_name = ''
is_elb_context = False
headers = merge_headers(event)
if event.get('requestContext', None) and event['requestContext'].get('elb', None):
# Related: https://github.com/Miserlou/Zappa/issues/1715
# inputs/outputs for lambda loadbalancer
# https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html
is_elb_context = True
# host is lower-case when forwarded from ELB
host = headers.get('host')
# TODO: pathParameters is a first-class citizen in apigateway but not available without
# some parsing work for ELB (is this parameter used for anything?)
event['pathParameters'] = ''
else:
if headers:
host = headers.get('Host')
else:
host = None
logger.debug('host found: [{}]'.format(host))
if host:
if 'amazonaws.com' in host:
logger.debug('amazonaws found in host')
# The path provided in th event doesn't include the
# stage, so we must tell Flask to include the API
# stage in the url it calculates. See https://github.com/Miserlou/Zappa/issues/1014
script_name = '/' + settings.API_STAGE
else:
# This is a test request sent from the AWS console
if settings.DOMAIN:
# Assume the requests received will be on the specified
# domain. No special handling is required
pass
else:
# Assume the requests received will be to the
# amazonaws.com endpoint, so tell Flask to include the
# API stage
script_name = '/' + settings.API_STAGE
base_path = getattr(settings, 'BASE_PATH', None)
# Create the environment for WSGI and handle the request
environ = create_wsgi_request(
event,
script_name=script_name,
base_path=base_path,
trailing_slash=self.trailing_slash,
binary_support=settings.BINARY_SUPPORT,
context_header_mappings=settings.CONTEXT_HEADER_MAPPINGS
)
# We are always on https on Lambda, so tell our wsgi app that.
environ['HTTPS'] = 'on'
environ['wsgi.url_scheme'] = 'https'
environ['lambda.context'] = context
environ['lambda.event'] = event
# Execute the application
with Response.from_app(self.wsgi_app, environ) as response:
# This is the object we're going to return.
# Pack the WSGI response into our special dictionary.
zappa_returndict = dict()
# Issue #1715: ALB support. ALB responses must always include
# base64 encoding and status description
if is_elb_context:
zappa_returndict.setdefault('isBase64Encoded', False)
zappa_returndict.setdefault('statusDescription', response.status)
if response.data:
if settings.BINARY_SUPPORT and \
not response.mimetype.startswith("text/") \
and response.mimetype != "application/json":
zappa_returndict['body'] = base64.b64encode(response.data).decode('utf-8')
zappa_returndict["isBase64Encoded"] = True
else:
zappa_returndict['body'] = response.get_data(as_text=True)
zappa_returndict['statusCode'] = response.status_code
if 'headers' in event:
zappa_returndict['headers'] = {}
for key, value in response.headers:
zappa_returndict['headers'][key] = value
if 'multiValueHeaders' in event:
zappa_returndict['multiValueHeaders'] = {}
for key, value in response.headers:
zappa_returndict['multiValueHeaders'][key] = response.headers.getlist(key)
# Calculate the total response time,
# and log it in the Common Log format.
time_end = datetime.datetime.now()
delta = time_end - time_start
response_time_ms = delta.total_seconds() * 1000
response.content = response.data
common_log(environ, response, response_time=response_time_ms)
return zappa_returndict
except Exception as e: # pragma: no cover
# Print statements are visible in the logs either way
print(e)
exc_info = sys.exc_info()
message = ('An uncaught exception happened while servicing this request. '
'You can investigate this with the `zappa tail` command.')
# If we didn't even build an app_module, just raise.
if not settings.DJANGO_SETTINGS:
try:
self.app_module
except NameError as ne:
message = 'Failed to import module: {}'.format(ne.message)
# Call exception handler for unhandled exceptions
exception_handler = self.settings.EXCEPTION_HANDLER
self._process_exception(exception_handler=exception_handler,
event=event, context=context, exception=e)
# Return this unspecified exception as a 500, using template that API Gateway expects.
content = collections.OrderedDict()
content['statusCode'] = 500
body = {'message': message}
if settings.DEBUG: # only include traceback if debug is on.
body['traceback'] = traceback.format_exception(*exc_info) # traceback as a list for readability.
content['body'] = json.dumps(str(body), sort_keys=True, indent=4)
return content
def lambda_handler(event, context): # pragma: no cover
return LambdaHandler.lambda_handler(event, context)
def keep_warm_callback(event, context):
"""Method is triggered by the CloudWatch event scheduled when keep_warm setting is set to true."""
lambda_handler(event={}, context=context) # overriding event with an empty one so that web app initialization will
# be triggered. | zappa-mathking | /zappa-mathking-0.52.4.tar.gz/zappa-mathking-0.52.4/zappa/handler.py | handler.py |
import base64
import logging
import sys
from urllib.parse import urlencode
import six
from requestlogger import ApacheFormatter
from werkzeug import urls
from .utilities import merge_headers, titlecase_keys
BINARY_METHODS = [
"POST",
"PUT",
"PATCH",
"DELETE",
"CONNECT",
"OPTIONS"
]
def create_wsgi_request(event_info,
server_name='zappa',
script_name=None,
trailing_slash=True,
binary_support=False,
base_path=None,
context_header_mappings={},
):
"""
Given some event_info via API Gateway,
create and return a valid WSGI request environ.
"""
method = event_info['httpMethod']
headers = merge_headers(event_info) or {} # Allow for the AGW console 'Test' button to work (Pull #735)
"""
API Gateway and ALB both started allowing for multi-value querystring
params in Nov. 2018. If there aren't multi-value params present, then
it acts identically to 'queryStringParameters', so we can use it as a
drop-in replacement.
The one caveat here is that ALB will only include _one_ of
queryStringParameters _or_ multiValueQueryStringParameters, which means
we have to check for the existence of one and then fall back to the
other.
"""
if 'multiValueQueryStringParameters' in event_info:
query = event_info['multiValueQueryStringParameters']
query_string = urlencode(query, doseq=True) if query else ''
else:
query = event_info.get('queryStringParameters', {})
query_string = urlencode(query) if query else ''
if context_header_mappings:
for key, value in context_header_mappings.items():
parts = value.split('.')
header_val = event_info['requestContext']
for part in parts:
if part not in header_val:
header_val = None
break
else:
header_val = header_val[part]
if header_val is not None:
headers[key] = header_val
# Extract remote user from context if Authorizer is enabled
remote_user = None
if event_info['requestContext'].get('authorizer'):
remote_user = event_info['requestContext']['authorizer'].get('principalId')
elif event_info['requestContext'].get('identity'):
remote_user = event_info['requestContext']['identity'].get('userArn')
# Related: https://github.com/Miserlou/Zappa/issues/677
# https://github.com/Miserlou/Zappa/issues/683
# https://github.com/Miserlou/Zappa/issues/696
# https://github.com/Miserlou/Zappa/issues/836
# https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Summary_table
if binary_support and (method in BINARY_METHODS):
if event_info.get('isBase64Encoded', False):
encoded_body = event_info['body']
body = base64.b64decode(encoded_body)
else:
body = event_info['body']
if isinstance(body, six.string_types):
body = body.encode("utf-8")
else:
body = event_info['body']
if isinstance(body, six.string_types):
body = body.encode("utf-8")
# Make header names canonical, e.g. content-type => Content-Type
# https://github.com/Miserlou/Zappa/issues/1188
headers = titlecase_keys(headers)
path = urls.url_unquote(event_info['path'])
if base_path:
script_name = '/' + base_path
if path.startswith(script_name):
path = path[len(script_name):]
x_forwarded_for = headers.get('X-Forwarded-For', '')
if ',' in x_forwarded_for:
# The last one is the cloudfront proxy ip. The second to last is the real client ip.
# Everything else is user supplied and untrustworthy.
remote_addr = x_forwarded_for.split(', ')[-2]
else:
remote_addr = x_forwarded_for or '127.0.0.1'
environ = {
'PATH_INFO': get_wsgi_string(path),
'QUERY_STRING': get_wsgi_string(query_string),
'REMOTE_ADDR': remote_addr,
'REQUEST_METHOD': method,
'SCRIPT_NAME': get_wsgi_string(str(script_name)) if script_name else '',
'SERVER_NAME': str(server_name),
'SERVER_PORT': headers.get('X-Forwarded-Port', '80'),
'SERVER_PROTOCOL': str('HTTP/1.1'),
'wsgi.version': (1, 0),
'wsgi.url_scheme': headers.get('X-Forwarded-Proto', 'http'),
'wsgi.input': body,
'wsgi.errors': sys.stderr,
'wsgi.multiprocess': False,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
# Input processing
if method in ["POST", "PUT", "PATCH", "DELETE"]:
if 'Content-Type' in headers:
environ['CONTENT_TYPE'] = headers['Content-Type']
# This must be Bytes or None
environ['wsgi.input'] = six.BytesIO(body)
if body:
environ['CONTENT_LENGTH'] = str(len(body))
else:
environ['CONTENT_LENGTH'] = '0'
for header in headers:
wsgi_name = "HTTP_" + header.upper().replace('-', '_')
environ[wsgi_name] = str(headers[header])
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if script_name in path_info:
environ['PATH_INFO'].replace(script_name, '')
if remote_user:
environ['REMOTE_USER'] = remote_user
if event_info['requestContext'].get('authorizer'):
environ['API_GATEWAY_AUTHORIZER'] = event_info['requestContext']['authorizer']
return environ
def common_log(environ, response, response_time=None):
"""
Given the WSGI environ and the response,
log this event in Common Log Format.
"""
logger = logging.getLogger()
if response_time:
formatter = ApacheFormatter(with_response_time=True)
try:
log_entry = formatter(response.status_code, environ,
len(response.content), rt_us=response_time)
except TypeError:
# Upstream introduced a very annoying breaking change on the rt_ms/rt_us kwarg.
log_entry = formatter(response.status_code, environ,
len(response.content), rt_ms=response_time)
else:
formatter = ApacheFormatter(with_response_time=False)
log_entry = formatter(response.status_code, environ,
len(response.content))
logger.info(log_entry)
return log_entry
# Related: https://github.com/Miserlou/Zappa/issues/1199
def get_wsgi_string(string, encoding='utf-8'):
"""
Returns wsgi-compatible string
"""
return string.encode(encoding).decode('iso-8859-1') | zappa-mathking | /zappa-mathking-0.52.4.tar.gz/zappa-mathking-0.52.4/zappa/wsgi.py | wsgi.py |
import importlib
import inspect
import json
import os
import time
import uuid
from functools import update_wrapper, wraps
import boto3
import botocore
from .utilities import get_topic_name
try:
from zappa_settings import ASYNC_RESPONSE_TABLE
except ImportError:
ASYNC_RESPONSE_TABLE = None
# Declare these here so they're kept warm.
try:
aws_session = boto3.Session()
LAMBDA_CLIENT = aws_session.client('lambda')
SNS_CLIENT = aws_session.client('sns')
STS_CLIENT = aws_session.client('sts')
DYNAMODB_CLIENT = aws_session.client('dynamodb')
except botocore.exceptions.NoRegionError as e: # pragma: no cover
# This can happen while testing on Travis, but it's taken care of
# during class initialization.
pass
##
# Response and Exception classes
##
LAMBDA_ASYNC_PAYLOAD_LIMIT = 256000
SNS_ASYNC_PAYLOAD_LIMIT = 256000
class AsyncException(Exception): # pragma: no cover
""" Simple exception class for async tasks. """
pass
class LambdaAsyncResponse:
"""
Base Response Dispatcher class
Can be used directly or subclassed if the method to send the message is changed.
"""
def __init__(self, lambda_function_name=None, aws_region=None, capture_response=False, **kwargs):
""" """
if kwargs.get('boto_session'):
self.client = kwargs.get('boto_session').client('lambda')
else: # pragma: no cover
self.client = LAMBDA_CLIENT
self.lambda_function_name = lambda_function_name
self.aws_region = aws_region
if capture_response:
if ASYNC_RESPONSE_TABLE is None:
print(
"Warning! Attempted to capture a response without "
"async_response_table configured in settings (you won't "
"capture async responses)."
)
capture_response = False
self.response_id = "MISCONFIGURED"
else:
self.response_id = str(uuid.uuid4())
else:
self.response_id = None
self.capture_response = capture_response
def send(self, task_path, args, kwargs):
"""
Create the message object and pass it to the actual sender.
"""
message = {
'task_path': task_path,
'capture_response': self.capture_response,
'response_id': self.response_id,
'args': args,
'kwargs': kwargs
}
self._send(message)
return self
def _send(self, message):
"""
Given a message, directly invoke the lamdba function for this task.
"""
message['command'] = 'zappa.asynchronous.route_lambda_task'
payload = json.dumps(message).encode('utf-8')
if len(payload) > LAMBDA_ASYNC_PAYLOAD_LIMIT: # pragma: no cover
raise AsyncException("Payload too large for async Lambda call")
self.response = self.client.invoke(
FunctionName=self.lambda_function_name,
InvocationType='Event', #makes the call async
Payload=payload
)
self.sent = (self.response.get('StatusCode', 0) == 202)
class SnsAsyncResponse(LambdaAsyncResponse):
"""
Send a SNS message to a specified SNS topic
Serialise the func path and arguments
"""
def __init__(self, lambda_function_name=None, aws_region=None, capture_response=False, **kwargs):
self.lambda_function_name = lambda_function_name
self.aws_region = aws_region
if kwargs.get('boto_session'):
self.client = kwargs.get('boto_session').client('sns')
else: # pragma: no cover
self.client = SNS_CLIENT
if kwargs.get('arn'):
self.arn = kwargs.get('arn')
else:
if kwargs.get('boto_session'):
sts_client = kwargs.get('boto_session').client('sts')
else:
sts_client = STS_CLIENT
AWS_ACCOUNT_ID = sts_client.get_caller_identity()['Account']
self.arn = 'arn:aws:sns:{region}:{account}:{topic_name}'.format(
region=self.aws_region,
account=AWS_ACCOUNT_ID,
topic_name=get_topic_name(self.lambda_function_name)
)
# Issue: https://github.com/Miserlou/Zappa/issues/1209
# TODO: Refactor
self.capture_response = capture_response
if capture_response:
if ASYNC_RESPONSE_TABLE is None:
print(
"Warning! Attempted to capture a response without "
"async_response_table configured in settings (you won't "
"capture async responses)."
)
capture_response = False
self.response_id = "MISCONFIGURED"
else:
self.response_id = str(uuid.uuid4())
else:
self.response_id = None
self.capture_response = capture_response
def _send(self, message):
"""
Given a message, publish to this topic.
"""
message['command'] = 'zappa.asynchronous.route_sns_task'
payload = json.dumps(message).encode('utf-8')
if len(payload) > LAMBDA_ASYNC_PAYLOAD_LIMIT: # pragma: no cover
raise AsyncException("Payload too large for SNS")
self.response = self.client.publish(
TargetArn=self.arn,
Message=payload
)
self.sent = self.response.get('MessageId')
##
# Aync Routers
##
ASYNC_CLASSES = {
'lambda': LambdaAsyncResponse,
'sns': SnsAsyncResponse,
}
def route_lambda_task(event, context):
"""
Deserialises the message from event passed to zappa.handler.run_function
imports the function, calls the function with args
"""
message = event
return run_message(message)
def route_sns_task(event, context):
"""
Gets SNS Message, deserialises the message,
imports the function, calls the function with args
"""
record = event['Records'][0]
message = json.loads(
record['Sns']['Message']
)
return run_message(message)
def run_message(message):
"""
Runs a function defined by a message object with keys:
'task_path', 'args', and 'kwargs' used by lambda routing
and a 'command' in handler.py
"""
if message.get('capture_response', False):
DYNAMODB_CLIENT.put_item(
TableName=ASYNC_RESPONSE_TABLE,
Item={
'id': {'S': str(message['response_id'])},
'ttl': {'N': str(int(time.time()+600))},
'async_status': {'S': 'in progress'},
'async_response': {'S': str(json.dumps('N/A'))},
}
)
func = import_and_get_task(message['task_path'])
if hasattr(func, 'sync'):
response = func.sync(
*message['args'],
**message['kwargs']
)
else:
response = func(
*message['args'],
**message['kwargs']
)
if message.get('capture_response', False):
DYNAMODB_CLIENT.update_item(
TableName=ASYNC_RESPONSE_TABLE,
Key={'id': {'S': str(message['response_id'])}},
UpdateExpression="SET async_response = :r, async_status = :s",
ExpressionAttributeValues={
':r': {'S': str(json.dumps(response))},
':s': {'S': 'complete'},
},
)
return response
##
# Execution interfaces and classes
##
def run(func, args=[], kwargs={}, service='lambda', capture_response=False,
remote_aws_lambda_function_name=None, remote_aws_region=None, **task_kwargs):
"""
Instead of decorating a function with @task, you can just run it directly.
If you were going to do func(*args, **kwargs), then you will call this:
import zappa.asynchronous.run
zappa.asynchronous.run(func, args, kwargs)
If you want to use SNS, then do:
zappa.asynchronous.run(func, args, kwargs, service='sns')
and other arguments are similar to @task
"""
lambda_function_name = remote_aws_lambda_function_name or os.environ.get('AWS_LAMBDA_FUNCTION_NAME')
aws_region = remote_aws_region or os.environ.get('AWS_REGION')
task_path = get_func_task_path(func)
return ASYNC_CLASSES[service](lambda_function_name=lambda_function_name,
aws_region=aws_region,
capture_response=capture_response,
**task_kwargs).send(task_path, args, kwargs)
# Handy:
# http://stackoverflow.com/questions/10294014/python-decorator-best-practice-using-a-class-vs-a-function
# However, this needs to pass inspect.getargspec() in handler.py which does not take classes
# Wrapper written to take optional arguments
# http://chase-seibert.github.io/blog/2013/12/17/python-decorator-optional-parameter.html
def task(*args, **kwargs):
"""Async task decorator so that running
Args:
func (function): the function to be wrapped
Further requirements:
func must be an independent top-level function.
i.e. not a class method or an anonymous function
service (str): either 'lambda' or 'sns'
remote_aws_lambda_function_name (str): the name of a remote lambda function to call with this task
remote_aws_region (str): the name of a remote region to make lambda/sns calls against
Returns:
A replacement function that dispatches func() to
run asynchronously through the service in question
"""
func = None
if len(args) == 1 and callable(args[0]):
func = args[0]
if not kwargs: # Default Values
service = 'lambda'
lambda_function_name_arg = None
aws_region_arg = None
else: # Arguments were passed
service = kwargs.get('service', 'lambda')
lambda_function_name_arg = kwargs.get('remote_aws_lambda_function_name')
aws_region_arg = kwargs.get('remote_aws_region')
capture_response = kwargs.get('capture_response', False)
def func_wrapper(func):
task_path = get_func_task_path(func)
@wraps(func)
def _run_async(*args, **kwargs):
"""
This is the wrapping async function that replaces the function
that is decorated with @task.
Args:
These are just passed through to @task's func
Assuming a valid service is passed to task() and it is run
inside a Lambda process (i.e. AWS_LAMBDA_FUNCTION_NAME exists),
it dispatches the function to be run through the service variable.
Otherwise, it runs the task synchronously.
Returns:
In async mode, the object returned includes state of the dispatch.
For instance
When outside of Lambda, the func passed to @task is run and we
return the actual value.
"""
lambda_function_name = lambda_function_name_arg or os.environ.get('AWS_LAMBDA_FUNCTION_NAME')
aws_region = aws_region_arg or os.environ.get('AWS_REGION')
if (service in ASYNC_CLASSES) and (lambda_function_name):
send_result = ASYNC_CLASSES[service](lambda_function_name=lambda_function_name,
aws_region=aws_region,
capture_response=capture_response).send(task_path, args, kwargs)
return send_result
else:
return func(*args, **kwargs)
update_wrapper(_run_async, func)
_run_async.service = service
_run_async.sync = func
return _run_async
return func_wrapper(func) if func else func_wrapper
def task_sns(func):
"""
SNS-based task dispatcher. Functions the same way as task()
"""
return task(func, service='sns')
##
# Utility Functions
##
def import_and_get_task(task_path):
"""
Given a modular path to a function, import that module
and return the function.
"""
module, function = task_path.rsplit('.', 1)
app_module = importlib.import_module(module)
app_function = getattr(app_module, function)
return app_function
def get_func_task_path(func):
"""
Format the modular task path for a function via inspection.
"""
module_path = inspect.getmodule(func).__name__
task_path = '{module_path}.{func_name}'.format(
module_path=module_path,
func_name=func.__name__
)
return task_path
def get_async_response(response_id):
"""
Get the response from the async table
"""
response = DYNAMODB_CLIENT.get_item(
TableName=ASYNC_RESPONSE_TABLE,
Key={'id': {'S': str(response_id)}}
)
if 'Item' not in response:
return None
return {
'status': response['Item']['async_status']['S'],
'response': json.loads(response['Item']['async_response']['S']),
} | zappa-mathking | /zappa-mathking-0.52.4.tar.gz/zappa-mathking-0.52.4/zappa/asynchronous.py | asynchronous.py |
import getpass
import glob
import hashlib
import json
import logging
import os
import random
import re
import shutil
import string
import subprocess
import tarfile
import tempfile
import time
import uuid
import zipfile
from builtins import bytes, int
from distutils.dir_util import copy_tree
from io import open
import boto3
import botocore
import requests
import troposphere
import troposphere.apigateway
from botocore.exceptions import ClientError
from setuptools import find_packages
from tqdm import tqdm
from .utilities import (add_event_source, conflicts_with_a_neighbouring_module,
contains_python_files_or_subdirs, copytree,
get_topic_name, get_venv_from_python_version,
human_size, remove_event_source)
##
# Logging Config
##
logging.basicConfig(format='%(levelname)s:%(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
##
# Policies And Template Mappings
##
ASSUME_POLICY = """{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": [
"apigateway.amazonaws.com",
"lambda.amazonaws.com",
"events.amazonaws.com"
]
},
"Action": "sts:AssumeRole"
}
]
}"""
ATTACH_POLICY = """{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:*"
],
"Resource": "arn:aws:logs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"lambda:InvokeFunction"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"xray:PutTraceSegments",
"xray:PutTelemetryRecords"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"ec2:AttachNetworkInterface",
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:DescribeInstances",
"ec2:DescribeNetworkInterfaces",
"ec2:DetachNetworkInterface",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:ResetNetworkInterfaceAttribute"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": "arn:aws:s3:::*"
},
{
"Effect": "Allow",
"Action": [
"kinesis:*"
],
"Resource": "arn:aws:kinesis:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"sns:*"
],
"Resource": "arn:aws:sns:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"sqs:*"
],
"Resource": "arn:aws:sqs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"dynamodb:*"
],
"Resource": "arn:aws:dynamodb:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"route53:*"
],
"Resource": "*"
}
]
}"""
# Latest list: https://docs.aws.amazon.com/general/latest/gr/rande.html#apigateway_region
API_GATEWAY_REGIONS = ['us-east-1', 'us-east-2',
'us-west-1', 'us-west-2',
'eu-central-1',
'eu-north-1',
'eu-west-1', 'eu-west-2', 'eu-west-3',
'eu-north-1',
'ap-northeast-1', 'ap-northeast-2', 'ap-northeast-3',
'ap-southeast-1', 'ap-southeast-2',
'ap-east-1',
'ap-south-1',
'ca-central-1',
'cn-north-1',
'cn-northwest-1',
'sa-east-1',
'us-gov-east-1', 'us-gov-west-1']
# Latest list: https://docs.aws.amazon.com/general/latest/gr/rande.html#lambda_region
LAMBDA_REGIONS = ['us-east-1', 'us-east-2',
'us-west-1', 'us-west-2',
'eu-central-1',
'eu-north-1',
'eu-west-1', 'eu-west-2', 'eu-west-3',
'eu-north-1',
'ap-northeast-1', 'ap-northeast-2', 'ap-northeast-3',
'ap-southeast-1', 'ap-southeast-2',
'ap-east-1',
'ap-south-1',
'ca-central-1',
'cn-north-1',
'cn-northwest-1',
'sa-east-1',
'us-gov-east-1',
'us-gov-west-1']
# We never need to include these.
# Related: https://github.com/Miserlou/Zappa/pull/56
# Related: https://github.com/Miserlou/Zappa/pull/581
ZIP_EXCLUDES = [
'*.exe', '*.DS_Store', '*.Python', '*.git', '.git/*', '*.zip', '*.tar.gz',
'*.hg', 'pip', 'docutils*', 'setuputils*', '__pycache__/*'
]
# When using ALB as an event source for Lambdas, we need to create an alias
# to ensure that, on zappa update, the ALB doesn't lose permissions to access
# the Lambda.
# See: https://github.com/Miserlou/Zappa/pull/1730
ALB_LAMBDA_ALIAS = 'current-alb-version'
##
# Classes
##
class Zappa:
"""
Zappa!
Makes it easy to run Python web applications on AWS Lambda/API Gateway.
"""
##
# Configurables
##
http_methods = ['ANY']
role_name = "ZappaLambdaExecution"
extra_permissions = None
assume_policy = ASSUME_POLICY
attach_policy = ATTACH_POLICY
apigateway_policy = None
cloudwatch_log_levels = ['OFF', 'ERROR', 'INFO']
xray_tracing = False
##
# Credentials
##
boto_session = None
credentials_arn = None
def __init__(self,
boto_session=None,
profile_name=None,
aws_region=None,
load_credentials=True,
desired_role_name=None,
desired_role_arn=None,
runtime='python3.6', # Detected at runtime in CLI
tags=(),
endpoint_urls={},
xray_tracing=False
):
"""
Instantiate this new Zappa instance, loading any custom credentials if necessary.
"""
# Set aws_region to None to use the system's region instead
if aws_region is None:
# https://github.com/Miserlou/Zappa/issues/413
self.aws_region = boto3.Session().region_name
logger.debug("Set region from boto: %s", self.aws_region)
else:
self.aws_region = aws_region
if desired_role_name:
self.role_name = desired_role_name
if desired_role_arn:
self.credentials_arn = desired_role_arn
self.runtime = runtime
if self.runtime == 'python3.6':
self.manylinux_suffix_start = 'cp36m'
elif self.runtime == 'python3.7':
self.manylinux_suffix_start = 'cp37m'
else:
# The 'm' has been dropped in python 3.8+ since builds with and without pymalloc are ABI compatible
# See https://github.com/pypa/manylinux for a more detailed explanation
self.manylinux_suffix_start = 'cp38'
# AWS Lambda supports manylinux1/2010 and manylinux2014
manylinux_suffixes = ("2014", "2010", "1")
self.manylinux_wheel_file_match = re.compile(f'^.*{self.manylinux_suffix_start}-manylinux({"|".join(manylinux_suffixes)})_x86_64.whl$')
self.manylinux_wheel_abi3_file_match = re.compile(f'^.*cp3.-abi3-manylinux({"|".join(manylinux_suffixes)})_x86_64.whl$')
self.endpoint_urls = endpoint_urls
self.xray_tracing = xray_tracing
# Some common invocations, such as DB migrations,
# can take longer than the default.
# Note that this is set to 300s, but if connected to
# APIGW, Lambda will max out at 30s.
# Related: https://github.com/Miserlou/Zappa/issues/205
long_config_dict = {
'region_name': aws_region,
'connect_timeout': 5,
'read_timeout': 300
}
long_config = botocore.client.Config(**long_config_dict)
if load_credentials:
self.load_credentials(boto_session, profile_name)
# Initialize clients
self.s3_client = self.boto_client('s3')
self.lambda_client = self.boto_client('lambda', config=long_config)
self.elbv2_client = self.boto_client('elbv2')
self.events_client = self.boto_client('events')
self.apigateway_client = self.boto_client('apigateway')
# AWS ACM certificates need to be created from us-east-1 to be used by API gateway
east_config = botocore.client.Config(region_name='us-east-1')
self.acm_client = self.boto_client('acm', config=east_config)
self.logs_client = self.boto_client('logs')
self.iam_client = self.boto_client('iam')
self.iam = self.boto_resource('iam')
self.cloudwatch = self.boto_client('cloudwatch')
self.route53 = self.boto_client('route53')
self.sns_client = self.boto_client('sns')
self.cf_client = self.boto_client('cloudformation')
self.dynamodb_client = self.boto_client('dynamodb')
self.cognito_client = self.boto_client('cognito-idp')
self.sts_client = self.boto_client('sts')
self.tags = tags
self.cf_template = troposphere.Template()
self.cf_api_resources = []
self.cf_parameters = {}
def configure_boto_session_method_kwargs(self, service, kw):
"""Allow for custom endpoint urls for non-AWS (testing and bootleg cloud) deployments"""
if service in self.endpoint_urls and not 'endpoint_url' in kw:
kw['endpoint_url'] = self.endpoint_urls[service]
return kw
def boto_client(self, service, *args, **kwargs):
"""A wrapper to apply configuration options to boto clients"""
return self.boto_session.client(service, *args, **self.configure_boto_session_method_kwargs(service, kwargs))
def boto_resource(self, service, *args, **kwargs):
"""A wrapper to apply configuration options to boto resources"""
return self.boto_session.resource(service, *args, **self.configure_boto_session_method_kwargs(service, kwargs))
def cache_param(self, value):
'''Returns a troposphere Ref to a value cached as a parameter.'''
if value not in self.cf_parameters:
keyname = chr(ord('A') + len(self.cf_parameters))
param = self.cf_template.add_parameter(troposphere.Parameter(
keyname, Type="String", Default=value, tags=self.tags
))
self.cf_parameters[value] = param
return troposphere.Ref(self.cf_parameters[value])
##
# Packaging
##
def copy_editable_packages(self, egg_links, temp_package_path):
""" """
for egg_link in egg_links:
with open(egg_link, 'rb') as df:
egg_path = df.read().decode('utf-8').splitlines()[0].strip()
pkgs = set([x.split(".")[0] for x in find_packages(egg_path, exclude=['test', 'tests'])])
for pkg in pkgs:
copytree(os.path.join(egg_path, pkg), os.path.join(temp_package_path, pkg), metadata=False, symlinks=False)
if temp_package_path:
# now remove any egg-links as they will cause issues if they still exist
for link in glob.glob(os.path.join(temp_package_path, "*.egg-link")):
os.remove(link)
def get_deps_list(self, pkg_name, installed_distros=None):
"""
For a given package, returns a list of required packages. Recursive.
"""
# https://github.com/Miserlou/Zappa/issues/1478. Using `pkg_resources`
# instead of `pip` is the recommended approach. The usage is nearly
# identical.
import pkg_resources
deps = []
if not installed_distros:
installed_distros = pkg_resources.WorkingSet()
for package in installed_distros:
if package.project_name.lower() == pkg_name.lower():
deps = [(package.project_name, package.version)]
for req in package.requires():
deps += self.get_deps_list(pkg_name=req.project_name, installed_distros=installed_distros)
return list(set(deps)) # de-dupe before returning
def create_handler_venv(self):
"""
Takes the installed zappa and brings it into a fresh virtualenv-like folder. All dependencies are then downloaded.
"""
import subprocess
# We will need the currenv venv to pull Zappa from
current_venv = self.get_current_venv()
# Make a new folder for the handler packages
ve_path = os.path.join(os.getcwd(), 'handler_venv')
if os.sys.platform == 'win32':
current_site_packages_dir = os.path.join(current_venv, 'Lib', 'site-packages')
venv_site_packages_dir = os.path.join(ve_path, 'Lib', 'site-packages')
else:
current_site_packages_dir = os.path.join(current_venv, 'lib', get_venv_from_python_version(), 'site-packages')
venv_site_packages_dir = os.path.join(ve_path, 'lib', get_venv_from_python_version(), 'site-packages')
if not os.path.isdir(venv_site_packages_dir):
os.makedirs(venv_site_packages_dir)
# Copy zappa* to the new virtualenv
zappa_things = [z for z in os.listdir(current_site_packages_dir) if z.lower()[:5] == 'zappa']
for z in zappa_things:
copytree(os.path.join(current_site_packages_dir, z), os.path.join(venv_site_packages_dir, z))
# Use pip to download zappa's dependencies. Copying from current venv causes issues with things like PyYAML that installs as yaml
zappa_deps = self.get_deps_list('zappa-mathking')
pkg_list = ['{0!s}=={1!s}'.format(dep, version) for dep, version in zappa_deps]
# Need to manually add setuptools
pkg_list.append('setuptools')
command = ["pip", "install", "--quiet", "--target", venv_site_packages_dir] + pkg_list
# This is the recommended method for installing packages if you don't
# to depend on `setuptools`
# https://github.com/pypa/pip/issues/5240#issuecomment-381662679
pip_process = subprocess.Popen(command, stdout=subprocess.PIPE)
# Using communicate() to avoid deadlocks
pip_process.communicate()
pip_return_code = pip_process.returncode
if pip_return_code:
raise EnvironmentError("Pypi lookup failed")
return ve_path
# staticmethod as per https://github.com/Miserlou/Zappa/issues/780
@staticmethod
def get_current_venv():
"""
Returns the path to the current virtualenv
"""
if 'VIRTUAL_ENV' in os.environ:
venv = os.environ['VIRTUAL_ENV']
elif os.path.exists('.python-version'): # pragma: no cover
try:
subprocess.check_output(['pyenv', 'help'], stderr=subprocess.STDOUT)
except OSError:
print("This directory seems to have pyenv's local venv, "
"but pyenv executable was not found.")
with open('.python-version', 'r') as f:
# minor fix in how .python-version is read
# Related: https://github.com/Miserlou/Zappa/issues/921
env_name = f.readline().strip()
bin_path = subprocess.check_output(['pyenv', 'which', 'python']).decode('utf-8')
venv = bin_path[:bin_path.rfind(env_name)] + env_name
else: # pragma: no cover
return None
return venv
def create_lambda_zip( self,
prefix='lambda_package',
handler_file=None,
slim_handler=False,
minify=True,
exclude=None,
exclude_glob=None,
use_precompiled_packages=True,
include=None,
venv=None,
output=None,
disable_progress=False,
archive_format='zip'
):
"""
Create a Lambda-ready zip file of the current virtualenvironment and working directory.
Returns path to that file.
"""
# Validate archive_format
if archive_format not in ['zip', 'tarball']:
raise KeyError("The archive format to create a lambda package must be zip or tarball")
# Pip is a weird package.
# Calling this function in some environments without this can cause.. funkiness.
import pip
if not venv:
venv = self.get_current_venv()
build_time = str(int(time.time()))
cwd = os.getcwd()
if not output:
if archive_format == 'zip':
archive_fname = prefix + '-' + build_time + '.zip'
elif archive_format == 'tarball':
archive_fname = prefix + '-' + build_time + '.tar.gz'
else:
archive_fname = output
archive_path = os.path.join(cwd, archive_fname)
# Files that should be excluded from the zip
if exclude is None:
exclude = list()
if exclude_glob is None:
exclude_glob = list()
# Exclude the zip itself
exclude.append(archive_path)
# Make sure that 'concurrent' is always forbidden.
# https://github.com/Miserlou/Zappa/issues/827
if not 'concurrent' in exclude:
exclude.append('concurrent')
def splitpath(path):
parts = []
(path, tail) = os.path.split(path)
while path and tail:
parts.append(tail)
(path, tail) = os.path.split(path)
parts.append(os.path.join(path, tail))
return list(map(os.path.normpath, parts))[::-1]
split_venv = splitpath(venv)
split_cwd = splitpath(cwd)
# Ideally this should be avoided automatically,
# but this serves as an okay stop-gap measure.
if split_venv[-1] == split_cwd[-1]: # pragma: no cover
print(
"Warning! Your project and virtualenv have the same name! You may want "
"to re-create your venv with a new name, or explicitly define a "
"'project_name', as this may cause errors."
)
# First, do the project..
temp_project_path = tempfile.mkdtemp(prefix='zappa-project')
if not slim_handler:
# Slim handler does not take the project files.
if minify:
# Related: https://github.com/Miserlou/Zappa/issues/744
excludes = ZIP_EXCLUDES + exclude + [split_venv[-1]]
copytree(cwd, temp_project_path, metadata=False, symlinks=False, ignore=shutil.ignore_patterns(*excludes))
else:
copytree(cwd, temp_project_path, metadata=False, symlinks=False)
for glob_path in exclude_glob:
for path in glob.glob(os.path.join(temp_project_path, glob_path)):
try:
os.remove(path)
except OSError: # is a directory
shutil.rmtree(path)
# If a handler_file is supplied, copy that to the root of the package,
# because that's where AWS Lambda looks for it. It can't be inside a package.
if handler_file:
filename = handler_file.split(os.sep)[-1]
shutil.copy(handler_file, os.path.join(temp_project_path, filename))
# Create and populate package ID file and write to temp project path
package_info = {}
package_info['uuid'] = str(uuid.uuid4())
package_info['build_time'] = build_time
package_info['build_platform'] = os.sys.platform
package_info['build_user'] = getpass.getuser()
# TODO: Add git head and info?
# Ex, from @scoates:
# def _get_git_branch():
# chdir(DIR)
# out = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
# lambci_branch = environ.get('LAMBCI_BRANCH', None)
# if out == "HEAD" and lambci_branch:
# out += " lambci:{}".format(lambci_branch)
# return out
# def _get_git_hash():
# chdir(DIR)
# return check_output(['git', 'rev-parse', 'HEAD']).strip()
# def _get_uname():
# return check_output(['uname', '-a']).strip()
# def _get_user():
# return check_output(['whoami']).strip()
# def set_id_info(zappa_cli):
# build_info = {
# 'branch': _get_git_branch(),
# 'hash': _get_git_hash(),
# 'build_uname': _get_uname(),
# 'build_user': _get_user(),
# 'build_time': datetime.datetime.utcnow().isoformat(),
# }
# with open(path.join(DIR, 'id_info.json'), 'w') as f:
# json.dump(build_info, f)
# return True
package_id_file = open(os.path.join(temp_project_path, 'package_info.json'), 'w')
dumped = json.dumps(package_info, indent=4)
try:
package_id_file.write(dumped)
except TypeError: # This is a Python 2/3 issue. TODO: Make pretty!
package_id_file.write(str(dumped))
package_id_file.close()
# Then, do site site-packages..
egg_links = []
temp_package_path = tempfile.mkdtemp(prefix='zappa-packages')
if os.sys.platform == 'win32':
site_packages = os.path.join(venv, 'Lib', 'site-packages')
else:
site_packages = os.path.join(venv, 'lib', get_venv_from_python_version(), 'site-packages')
egg_links.extend(glob.glob(os.path.join(site_packages, '*.egg-link')))
if minify:
excludes = ZIP_EXCLUDES + exclude
copytree(site_packages, temp_package_path, metadata=False, symlinks=False, ignore=shutil.ignore_patterns(*excludes))
else:
copytree(site_packages, temp_package_path, metadata=False, symlinks=False)
# We may have 64-bin specific packages too.
site_packages_64 = os.path.join(venv, 'lib64', get_venv_from_python_version(), 'site-packages')
if os.path.exists(site_packages_64):
egg_links.extend(glob.glob(os.path.join(site_packages_64, '*.egg-link')))
if minify:
excludes = ZIP_EXCLUDES + exclude
copytree(site_packages_64, temp_package_path, metadata = False, symlinks=False, ignore=shutil.ignore_patterns(*excludes))
else:
copytree(site_packages_64, temp_package_path, metadata = False, symlinks=False)
if egg_links:
self.copy_editable_packages(egg_links, temp_package_path)
copy_tree(temp_package_path, temp_project_path, update=True)
# Then the pre-compiled packages..
if use_precompiled_packages:
print("Downloading and installing dependencies..")
installed_packages = self.get_installed_packages(site_packages, site_packages_64)
try:
for installed_package_name, installed_package_version in installed_packages.items():
cached_wheel_path = self.get_cached_manylinux_wheel(installed_package_name, installed_package_version, disable_progress)
if cached_wheel_path:
# Otherwise try to use manylinux packages from PyPi..
# Related: https://github.com/Miserlou/Zappa/issues/398
shutil.rmtree(os.path.join(temp_project_path, installed_package_name), ignore_errors=True)
with zipfile.ZipFile(cached_wheel_path) as zfile:
zfile.extractall(temp_project_path)
except Exception as e:
print(e)
# XXX - What should we do here?
# Cleanup
for glob_path in exclude_glob:
for path in glob.glob(os.path.join(temp_project_path, glob_path)):
try:
os.remove(path)
except OSError: # is a directory
shutil.rmtree(path)
# Then archive it all up..
if archive_format == 'zip':
print("Packaging project as zip.")
try:
compression_method = zipfile.ZIP_DEFLATED
except ImportError: # pragma: no cover
compression_method = zipfile.ZIP_STORED
archivef = zipfile.ZipFile(archive_path, 'w', compression_method)
elif archive_format == 'tarball':
print("Packaging project as gzipped tarball.")
archivef = tarfile.open(archive_path, 'w|gz')
for root, dirs, files in os.walk(temp_project_path):
for filename in files:
# Skip .pyc files for Django migrations
# https://github.com/Miserlou/Zappa/issues/436
# https://github.com/Miserlou/Zappa/issues/464
if filename[-4:] == '.pyc' and root[-10:] == 'migrations':
continue
# If there is a .pyc file in this package,
# we can skip the python source code as we'll just
# use the compiled bytecode anyway..
if filename[-3:] == '.py' and root[-10:] != 'migrations':
abs_filname = os.path.join(root, filename)
abs_pyc_filename = abs_filname + 'c'
if os.path.isfile(abs_pyc_filename):
# but only if the pyc is older than the py,
# otherwise we'll deploy outdated code!
py_time = os.stat(abs_filname).st_mtime
pyc_time = os.stat(abs_pyc_filename).st_mtime
if pyc_time > py_time:
continue
# Make sure that the files are all correctly chmodded
# Related: https://github.com/Miserlou/Zappa/issues/484
# Related: https://github.com/Miserlou/Zappa/issues/682
os.chmod(os.path.join(root, filename), 0o755)
if archive_format == 'zip':
# Actually put the file into the proper place in the zip
# Related: https://github.com/Miserlou/Zappa/pull/716
zipi = zipfile.ZipInfo(os.path.join(root.replace(temp_project_path, '').lstrip(os.sep), filename))
zipi.create_system = 3
zipi.external_attr = 0o755 << int(16) # Is this P2/P3 functional?
with open(os.path.join(root, filename), 'rb') as f:
archivef.writestr(zipi, f.read(), compression_method)
elif archive_format == 'tarball':
tarinfo = tarfile.TarInfo(os.path.join(root.replace(temp_project_path, '').lstrip(os.sep), filename))
tarinfo.mode = 0o755
stat = os.stat(os.path.join(root, filename))
tarinfo.mtime = stat.st_mtime
tarinfo.size = stat.st_size
with open(os.path.join(root, filename), 'rb') as f:
archivef.addfile(tarinfo, f)
# Create python init file if it does not exist
# Only do that if there are sub folders or python files and does not conflict with a neighbouring module
# Related: https://github.com/Miserlou/Zappa/issues/766
if not contains_python_files_or_subdirs(root):
# if the directory does not contain any .py file at any level, we can skip the rest
dirs[:] = [d for d in dirs if d != root]
else:
if '__init__.py' not in files and not conflicts_with_a_neighbouring_module(root):
tmp_init = os.path.join(temp_project_path, '__init__.py')
open(tmp_init, 'a').close()
os.chmod(tmp_init, 0o755)
arcname = os.path.join(root.replace(temp_project_path, ''),
os.path.join(root.replace(temp_project_path, ''), '__init__.py'))
if archive_format == 'zip':
archivef.write(tmp_init, arcname)
elif archive_format == 'tarball':
archivef.add(tmp_init, arcname)
# And, we're done!
archivef.close()
# Trash the temp directory
shutil.rmtree(temp_project_path)
shutil.rmtree(temp_package_path)
if os.path.isdir(venv) and slim_handler:
# Remove the temporary handler venv folder
shutil.rmtree(venv)
return archive_fname
@staticmethod
def get_installed_packages(site_packages, site_packages_64):
"""
Returns a dict of installed packages that Zappa cares about.
"""
import pkg_resources
package_to_keep = []
if os.path.isdir(site_packages):
package_to_keep += os.listdir(site_packages)
if os.path.isdir(site_packages_64):
package_to_keep += os.listdir(site_packages_64)
package_to_keep = [x.lower() for x in package_to_keep]
installed_packages = {package.project_name.lower(): package.version for package in
pkg_resources.WorkingSet()
if package.project_name.lower() in package_to_keep
or package.location.lower() in [site_packages.lower(), site_packages_64.lower()]}
return installed_packages
@staticmethod
def download_url_with_progress(url, stream, disable_progress):
"""
Downloads a given url in chunks and writes to the provided stream (can be any io stream).
Displays the progress bar for the download.
"""
resp = requests.get(url, timeout=float(os.environ.get('PIP_TIMEOUT', 2)), stream=True)
resp.raw.decode_content = True
progress = tqdm(unit="B", unit_scale=True, total=int(resp.headers.get('Content-Length', 0)), disable=disable_progress)
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
progress.update(len(chunk))
stream.write(chunk)
progress.close()
def get_cached_manylinux_wheel(self, package_name, package_version, disable_progress=False):
"""
Gets the locally stored version of a manylinux wheel. If one does not exist, the function downloads it.
"""
cached_wheels_dir = os.path.join(tempfile.gettempdir(), 'cached_wheels')
if not os.path.isdir(cached_wheels_dir):
os.makedirs(cached_wheels_dir)
else:
# Check if we already have a cached copy
wheel_name = re.sub("[^\w\d.]+", "_", package_name, re.UNICODE)
wheel_file = f'{wheel_name}-{package_version}-*_x86_64.whl'
wheel_path = os.path.join(cached_wheels_dir, wheel_file)
for pathname in glob.iglob(wheel_path):
if re.match(self.manylinux_wheel_file_match, pathname) or re.match(self.manylinux_wheel_abi3_file_match, pathname):
print(f" - {package_name}=={package_version}: Using locally cached manylinux wheel")
return pathname
# The file is not cached, download it.
wheel_url, filename = self.get_manylinux_wheel_url(package_name, package_version)
if not wheel_url:
return None
wheel_path = os.path.join(cached_wheels_dir, filename)
print(f" - {package_name}=={package_version}: Downloading")
with open(wheel_path, 'wb') as f:
self.download_url_with_progress(wheel_url, f, disable_progress)
if not zipfile.is_zipfile(wheel_path):
return None
return wheel_path
def get_manylinux_wheel_url(self, package_name, package_version):
"""
For a given package name, returns a link to the download URL,
else returns None.
Related: https://github.com/Miserlou/Zappa/issues/398
Examples here: https://gist.github.com/perrygeo/9545f94eaddec18a65fd7b56880adbae
This function downloads metadata JSON of `package_name` from Pypi
and examines if the package has a manylinux wheel. This function
also caches the JSON file so that we don't have to poll Pypi
every time.
"""
cached_pypi_info_dir = os.path.join(tempfile.gettempdir(), 'cached_pypi_info')
if not os.path.isdir(cached_pypi_info_dir):
os.makedirs(cached_pypi_info_dir)
# Even though the metadata is for the package, we save it in a
# filename that includes the package's version. This helps in
# invalidating the cached file if the user moves to a different
# version of the package.
# Related: https://github.com/Miserlou/Zappa/issues/899
json_file = '{0!s}-{1!s}.json'.format(package_name, package_version)
json_file_path = os.path.join(cached_pypi_info_dir, json_file)
if os.path.exists(json_file_path):
with open(json_file_path, 'rb') as metafile:
data = json.load(metafile)
else:
url = 'https://pypi.python.org/pypi/{}/json'.format(package_name)
try:
res = requests.get(url, timeout=float(os.environ.get('PIP_TIMEOUT', 1.5)))
data = res.json()
except Exception as e: # pragma: no cover
return None, None
with open(json_file_path, 'wb') as metafile:
jsondata = json.dumps(data)
metafile.write(bytes(jsondata, "utf-8"))
if package_version not in data['releases']:
return None, None
for f in data['releases'][package_version]:
if re.match(self.manylinux_wheel_file_match, f['filename']):
return f['url'], f['filename']
elif re.match(self.manylinux_wheel_abi3_file_match, f['filename']):
return f['url'], f['filename']
return None, None
##
# S3
##
def upload_to_s3(self, source_path, bucket_name, disable_progress=False):
r"""
Given a file, upload it to S3.
Credentials should be stored in environment variables or ~/.aws/credentials (%USERPROFILE%\.aws\credentials on Windows).
Returns True on success, false on failure.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError:
# This is really stupid S3 quirk. Technically, us-east-1 one has no S3,
# it's actually "US Standard", or something.
# More here: https://github.com/boto/boto3/issues/125
if self.aws_region == 'us-east-1':
self.s3_client.create_bucket(
Bucket=bucket_name,
)
else:
self.s3_client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={'LocationConstraint': self.aws_region},
)
if self.tags:
tags = {
'TagSet': [{'Key': key, 'Value': self.tags[key]} for key in self.tags.keys()]
}
self.s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging=tags)
if not os.path.isfile(source_path) or os.stat(source_path).st_size == 0:
print("Problem with source file {}".format(source_path))
return False
dest_path = os.path.split(source_path)[1]
try:
source_size = os.stat(source_path).st_size
print("Uploading {0} ({1})..".format(dest_path, human_size(source_size)))
progress = tqdm(total=float(os.path.getsize(source_path)), unit_scale=True, unit='B', disable=disable_progress)
# Attempt to upload to S3 using the S3 meta client with the progress bar.
# If we're unable to do that, try one more time using a session client,
# which cannot use the progress bar.
# Related: https://github.com/boto/boto3/issues/611
try:
self.s3_client.upload_file(
source_path, bucket_name, dest_path,
Callback=progress.update
)
except Exception as e: # pragma: no cover
self.s3_client.upload_file(source_path, bucket_name, dest_path)
progress.close()
except (KeyboardInterrupt, SystemExit): # pragma: no cover
raise
except Exception as e: # pragma: no cover
print(e)
return False
return True
def copy_on_s3(self, src_file_name, dst_file_name, bucket_name):
"""
Copies src file to destination within a bucket.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
return False
copy_src = {
"Bucket": bucket_name,
"Key": src_file_name
}
try:
self.s3_client.copy(
CopySource=copy_src,
Bucket=bucket_name,
Key=dst_file_name
)
return True
except botocore.exceptions.ClientError: # pragma: no cover
return False
def remove_from_s3(self, file_name, bucket_name):
"""
Given a file name and a bucket, remove it from S3.
There's no reason to keep the file hosted on S3 once its been made into a Lambda function, so we can delete it from S3.
Returns True on success, False on failure.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
return False
try:
self.s3_client.delete_object(Bucket=bucket_name, Key=file_name)
return True
except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError): # pragma: no cover
return False
##
# Lambda
##
def create_lambda_function( self,
bucket=None,
function_name=None,
handler=None,
s3_key=None,
description='Zappa Deployment',
timeout=30,
memory_size=512,
publish=True,
vpc_config=None,
dead_letter_config=None,
runtime='python3.6',
aws_environment_variables=None,
aws_kms_key_arn=None,
xray_tracing=False,
local_zip=None,
use_alb=False,
layers=None,
concurrency=None,
):
"""
Given a bucket and key (or a local path) of a valid Lambda-zip, a function name and a handler, register that Lambda function.
"""
if not vpc_config:
vpc_config = {}
if not dead_letter_config:
dead_letter_config = {}
if not self.credentials_arn:
self.get_credentials_arn()
if not aws_environment_variables:
aws_environment_variables = {}
if not aws_kms_key_arn:
aws_kms_key_arn = ''
if not layers:
layers = []
kwargs = dict(
FunctionName=function_name,
Runtime=runtime,
Role=self.credentials_arn,
Handler=handler,
Description=description,
Timeout=timeout,
MemorySize=memory_size,
Publish=publish,
VpcConfig=vpc_config,
DeadLetterConfig=dead_letter_config,
Environment={'Variables': aws_environment_variables},
KMSKeyArn=aws_kms_key_arn,
TracingConfig={
'Mode': 'Active' if self.xray_tracing else 'PassThrough'
},
Layers=layers
)
if local_zip:
kwargs['Code'] = {
'ZipFile': local_zip
}
else:
kwargs['Code'] = {
'S3Bucket': bucket,
'S3Key': s3_key
}
response = self.lambda_client.create_function(**kwargs)
resource_arn = response['FunctionArn']
version = response['Version']
# If we're using an ALB, let's create an alias mapped to the newly
# created function. This allows clean, no downtime association when
# using application load balancers as an event source.
# See: https://github.com/Miserlou/Zappa/pull/1730
# https://github.com/Miserlou/Zappa/issues/1823
if use_alb:
self.lambda_client.create_alias(
FunctionName=resource_arn,
FunctionVersion=version,
Name=ALB_LAMBDA_ALIAS,
)
if self.tags:
self.lambda_client.tag_resource(Resource=resource_arn, Tags=self.tags)
if concurrency is not None:
self.lambda_client.put_function_concurrency(
FunctionName=resource_arn,
ReservedConcurrentExecutions=concurrency,
)
return resource_arn
def update_lambda_function(self, bucket, function_name, s3_key=None, publish=True, local_zip=None, num_revisions=None, concurrency=None):
"""
Given a bucket and key (or a local path) of a valid Lambda-zip, a function name and a handler, update that Lambda function's code.
Optionally, delete previous versions if they exceed the optional limit.
"""
print("Updating Lambda function code..")
kwargs = dict(
FunctionName=function_name,
Publish=publish
)
if local_zip:
kwargs['ZipFile'] = local_zip
else:
kwargs['S3Bucket'] = bucket
kwargs['S3Key'] = s3_key
response = self.lambda_client.update_function_code(**kwargs)
resource_arn = response['FunctionArn']
version = response['Version']
# If the lambda has an ALB alias, let's update the alias
# to point to the newest version of the function. We have to use a GET
# here, as there's no HEAD-esque call to retrieve metadata about a
# function alias.
# Related: https://github.com/Miserlou/Zappa/pull/1730
# https://github.com/Miserlou/Zappa/issues/1823
try:
response = self.lambda_client.get_alias(
FunctionName=function_name,
Name=ALB_LAMBDA_ALIAS,
)
alias_exists = True
except botocore.exceptions.ClientError as e: # pragma: no cover
if "ResourceNotFoundException" not in e.response["Error"]["Code"]:
raise e
alias_exists = False
if alias_exists:
self.lambda_client.update_alias(
FunctionName=function_name,
FunctionVersion=version,
Name=ALB_LAMBDA_ALIAS,
)
if concurrency is not None:
self.lambda_client.put_function_concurrency(
FunctionName=function_name,
ReservedConcurrentExecutions=concurrency,
)
else:
self.lambda_client.delete_function_concurrency(
FunctionName=function_name
)
if num_revisions:
# Find the existing revision IDs for the given function
# Related: https://github.com/Miserlou/Zappa/issues/1402
versions_in_lambda = []
versions = self.lambda_client.list_versions_by_function(FunctionName=function_name)
for version in versions['Versions']:
versions_in_lambda.append(version['Version'])
while 'NextMarker' in versions:
versions = self.lambda_client.list_versions_by_function(FunctionName=function_name,Marker=versions['NextMarker'])
for version in versions['Versions']:
versions_in_lambda.append(version['Version'])
versions_in_lambda.remove('$LATEST')
# Delete older revisions if their number exceeds the specified limit
for version in versions_in_lambda[::-1][num_revisions:]:
self.lambda_client.delete_function(FunctionName=function_name,Qualifier=version)
return resource_arn
def update_lambda_configuration( self,
lambda_arn,
function_name,
handler,
description='Zappa Deployment',
timeout=30,
memory_size=512,
publish=True,
vpc_config=None,
runtime='python3.6',
aws_environment_variables=None,
aws_kms_key_arn=None,
layers=None,
project_folder=None
):
"""
Given an existing function ARN, update the configuration variables.
"""
print("Updating Lambda function configuration..")
if not vpc_config:
vpc_config = {}
if not self.credentials_arn:
self.get_credentials_arn()
if not aws_kms_key_arn:
aws_kms_key_arn = ''
if not aws_environment_variables:
aws_environment_variables = {}
if not layers:
layers = []
# Check if there are any remote aws lambda env vars so they don't get trashed.
# https://github.com/Miserlou/Zappa/issues/987, Related: https://github.com/Miserlou/Zappa/issues/765
lambda_aws_config = self.lambda_client.get_function_configuration(FunctionName=function_name)
if "Environment" in lambda_aws_config:
lambda_aws_environment_variables = lambda_aws_config["Environment"].get("Variables", {})
# Append keys that are remote but not in settings file
for key, value in lambda_aws_environment_variables.items():
if key not in aws_environment_variables:
aws_environment_variables[key] = value
response = self.lambda_client.update_function_configuration(
FunctionName=function_name,
Runtime=runtime,
Role=self.credentials_arn,
Handler=handler,
Description=description,
Timeout=timeout,
MemorySize=memory_size,
VpcConfig=vpc_config,
Environment={'Variables': aws_environment_variables},
KMSKeyArn=aws_kms_key_arn,
TracingConfig={
'Mode': 'Active' if self.xray_tracing else 'PassThrough'
},
Layers=layers,
FileSystemConfigs=[
{
'Arn': 'arn:aws:elasticfilesystem:ap-northeast-2:755784673120:access-point/fsap-0742b8ba7aec1fd88',
'LocalMountPath': project_folder
},
],
)
resource_arn = response['FunctionArn']
if self.tags:
self.lambda_client.tag_resource(Resource=resource_arn, Tags=self.tags)
return resource_arn
def invoke_lambda_function( self,
function_name,
payload,
invocation_type='Event',
log_type='Tail',
client_context=None,
qualifier=None
):
"""
Directly invoke a named Lambda function with a payload.
Returns the response.
"""
return self.lambda_client.invoke(
FunctionName=function_name,
InvocationType=invocation_type,
LogType=log_type,
Payload=payload
)
def rollback_lambda_function_version(self, function_name, versions_back=1, publish=True):
"""
Rollback the lambda function code 'versions_back' number of revisions.
Returns the Function ARN.
"""
response = self.lambda_client.list_versions_by_function(FunctionName=function_name)
# Take into account $LATEST
if len(response['Versions']) < versions_back + 1:
print("We do not have {} revisions. Aborting".format(str(versions_back)))
return False
revisions = [int(revision['Version']) for revision in response['Versions'] if revision['Version'] != '$LATEST']
revisions.sort(reverse=True)
response = self.lambda_client.get_function(FunctionName='function:{}:{}'.format(function_name, revisions[versions_back]))
response = requests.get(response['Code']['Location'])
if response.status_code != 200:
print("Failed to get version {} of {} code".format(versions_back, function_name))
return False
response = self.lambda_client.update_function_code(FunctionName=function_name, ZipFile=response.content, Publish=publish) # pragma: no cover
return response['FunctionArn']
def get_lambda_function(self, function_name):
"""
Returns the lambda function ARN, given a name
This requires the "lambda:GetFunction" role.
"""
response = self.lambda_client.get_function(
FunctionName=function_name)
return response['Configuration']['FunctionArn']
def get_lambda_function_versions(self, function_name):
"""
Simply returns the versions available for a Lambda function, given a function name.
"""
try:
response = self.lambda_client.list_versions_by_function(
FunctionName=function_name
)
return response.get('Versions', [])
except Exception:
return []
def delete_lambda_function(self, function_name):
"""
Given a function name, delete it from AWS Lambda.
Returns the response.
"""
print("Deleting Lambda function..")
return self.lambda_client.delete_function(
FunctionName=function_name,
)
##
# Application load balancer
##
def deploy_lambda_alb( self,
lambda_arn,
lambda_name,
alb_vpc_config,
timeout
):
"""
The `zappa deploy` functionality for ALB infrastructure.
"""
if not alb_vpc_config:
raise EnvironmentError('When creating an ALB, alb_vpc_config must be filled out in zappa_settings.')
if 'SubnetIds' not in alb_vpc_config:
raise EnvironmentError('When creating an ALB, you must supply two subnets in different availability zones.')
if 'SecurityGroupIds' not in alb_vpc_config:
alb_vpc_config["SecurityGroupIds"] = []
if not alb_vpc_config.get('CertificateArn'):
raise EnvironmentError('When creating an ALB, you must supply a CertificateArn for the HTTPS listener.')
# Related: https://github.com/Miserlou/Zappa/issues/1856
if 'Scheme' not in alb_vpc_config:
alb_vpc_config["Scheme"] = "internet-facing"
print("Deploying ALB infrastructure...")
# Create load balancer
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_load_balancer
kwargs = dict(
Name=lambda_name,
Subnets=alb_vpc_config["SubnetIds"],
SecurityGroups=alb_vpc_config["SecurityGroupIds"],
Scheme=alb_vpc_config["Scheme"],
# TODO: Tags might be a useful means of stock-keeping zappa-generated assets.
#Tags=[],
Type="application",
# TODO: can be ipv4 or dualstack (for ipv4 and ipv6) ipv4 is required for internal Scheme.
IpAddressType="ipv4"
)
response = self.elbv2_client.create_load_balancer(**kwargs)
if not(response["LoadBalancers"]) or len(response["LoadBalancers"]) != 1:
raise EnvironmentError("Failure to create application load balancer. Response was in unexpected format. Response was: {}".format(repr(response)))
if response["LoadBalancers"][0]['State']['Code'] == 'failed':
raise EnvironmentError("Failure to create application load balancer. Response reported a failed state: {}".format(response["LoadBalancers"][0]['State']['Reason']))
load_balancer_arn = response["LoadBalancers"][0]["LoadBalancerArn"]
load_balancer_dns = response["LoadBalancers"][0]["DNSName"]
load_balancer_vpc = response["LoadBalancers"][0]["VpcId"]
waiter = self.elbv2_client.get_waiter('load_balancer_available')
print('Waiting for load balancer [{}] to become active..'.format(load_balancer_arn))
waiter.wait(LoadBalancerArns=[load_balancer_arn], WaiterConfig={"Delay": 3})
# Match the lambda timeout on the load balancer.
self.elbv2_client.modify_load_balancer_attributes(
LoadBalancerArn=load_balancer_arn,
Attributes=[{
'Key': 'idle_timeout.timeout_seconds',
'Value': str(timeout)
}]
)
# Create/associate target group.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_target_group
kwargs = dict(
Name=lambda_name,
TargetType="lambda",
# TODO: Add options for health checks
)
response = self.elbv2_client.create_target_group(**kwargs)
if not(response["TargetGroups"]) or len(response["TargetGroups"]) != 1:
raise EnvironmentError("Failure to create application load balancer target group. Response was in unexpected format. Response was: {}".format(repr(response)))
target_group_arn = response["TargetGroups"][0]["TargetGroupArn"]
# Enable multi-value headers by default.
response = self.elbv2_client.modify_target_group_attributes(
TargetGroupArn=target_group_arn,
Attributes=[
{
'Key': 'lambda.multi_value_headers.enabled',
'Value': 'true'
},
]
)
# Allow execute permissions from target group to lambda.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.add_permission
kwargs = dict(
Action="lambda:InvokeFunction",
FunctionName="{}:{}".format(lambda_arn, ALB_LAMBDA_ALIAS),
Principal="elasticloadbalancing.amazonaws.com",
SourceArn=target_group_arn,
StatementId=lambda_name
)
response = self.lambda_client.add_permission(**kwargs)
# Register target group to lambda association.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.register_targets
kwargs = dict(
TargetGroupArn=target_group_arn,
Targets=[{"Id": "{}:{}".format(lambda_arn, ALB_LAMBDA_ALIAS)}]
)
response = self.elbv2_client.register_targets(**kwargs)
# Bind listener to load balancer with default rule to target group.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_listener
kwargs = dict(
# TODO: Listeners support custom ssl certificates (Certificates). For now we leave this default.
Certificates=[{"CertificateArn": alb_vpc_config['CertificateArn']}],
DefaultActions=[{
"Type": "forward",
"TargetGroupArn": target_group_arn,
}],
LoadBalancerArn=load_balancer_arn,
Protocol="HTTPS",
# TODO: Add option for custom ports
Port=443,
# TODO: Listeners support custom ssl security policy (SslPolicy). For now we leave this default.
)
response = self.elbv2_client.create_listener(**kwargs)
print("ALB created with DNS: {}".format(load_balancer_dns))
print("Note it may take several minutes for load balancer to become available.")
def undeploy_lambda_alb(self, lambda_name):
"""
The `zappa undeploy` functionality for ALB infrastructure.
"""
print("Undeploying ALB infrastructure...")
# Locate and delete alb/lambda permissions
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.remove_permission
self.lambda_client.remove_permission(
FunctionName=lambda_name,
StatementId=lambda_name
)
except botocore.exceptions.ClientError as e: # pragma: no cover
if "ResourceNotFoundException" in e.response["Error"]["Code"]:
pass
else:
raise e
# Locate and delete load balancer
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_load_balancers
response = self.elbv2_client.describe_load_balancers(
Names=[lambda_name]
)
if not(response["LoadBalancers"]) or len(response["LoadBalancers"]) > 1:
raise EnvironmentError("Failure to locate/delete ALB named [{}]. Response was: {}".format(lambda_name, repr(response)))
load_balancer_arn = response["LoadBalancers"][0]["LoadBalancerArn"]
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_listeners
response = self.elbv2_client.describe_listeners(LoadBalancerArn=load_balancer_arn)
if not(response["Listeners"]):
print('No listeners found.')
elif len(response["Listeners"]) > 1:
raise EnvironmentError("Failure to locate/delete listener for ALB named [{}]. Response was: {}".format(lambda_name, repr(response)))
else:
listener_arn = response["Listeners"][0]["ListenerArn"]
# Remove the listener. This explicit deletion of the listener seems necessary to avoid ResourceInUseExceptions when deleting target groups.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_listener
response = self.elbv2_client.delete_listener(ListenerArn=listener_arn)
# Remove the load balancer and wait for completion
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_load_balancer
response = self.elbv2_client.delete_load_balancer(LoadBalancerArn=load_balancer_arn)
waiter = self.elbv2_client.get_waiter('load_balancers_deleted')
print('Waiting for load balancer [{}] to be deleted..'.format(lambda_name))
waiter.wait(LoadBalancerArns=[load_balancer_arn], WaiterConfig={"Delay": 3})
except botocore.exceptions.ClientError as e: # pragma: no cover
print(e.response["Error"]["Code"])
if "LoadBalancerNotFound" in e.response["Error"]["Code"]:
pass
else:
raise e
# Locate and delete target group
try:
# Locate the lambda ARN
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.get_function
response = self.lambda_client.get_function(FunctionName=lambda_name)
lambda_arn = response["Configuration"]["FunctionArn"]
# Locate the target group ARN
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_target_groups
response = self.elbv2_client.describe_target_groups(Names=[lambda_name])
if not(response["TargetGroups"]) or len(response["TargetGroups"]) > 1:
raise EnvironmentError("Failure to locate/delete ALB target group named [{}]. Response was: {}".format(lambda_name, repr(response)))
target_group_arn = response["TargetGroups"][0]["TargetGroupArn"]
# Deregister targets and wait for completion
self.elbv2_client.deregister_targets(
TargetGroupArn=target_group_arn,
Targets=[{"Id": lambda_arn}]
)
waiter = self.elbv2_client.get_waiter('target_deregistered')
print('Waiting for target [{}] to be deregistered...'.format(lambda_name))
waiter.wait(
TargetGroupArn=target_group_arn,
Targets=[{"Id": lambda_arn}],
WaiterConfig={"Delay": 3}
)
# Remove the target group
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_target_group
self.elbv2_client.delete_target_group(TargetGroupArn=target_group_arn)
except botocore.exceptions.ClientError as e: # pragma: no cover
print(e.response["Error"]["Code"])
if "TargetGroupNotFound" in e.response["Error"]["Code"]:
pass
else:
raise e
##
# API Gateway
##
def create_api_gateway_routes( self,
lambda_arn,
api_name=None,
api_key_required=False,
authorization_type='NONE',
authorizer=None,
cors_options=None,
description=None,
endpoint_configuration=None
):
"""
Create the API Gateway for this Zappa deployment.
Returns the new RestAPI CF resource.
"""
restapi = troposphere.apigateway.RestApi('Api')
restapi.Name = api_name or lambda_arn.split(':')[-1]
if not description:
description = 'Created automatically by Zappa.'
restapi.Description = description
endpoint_configuration = [] if endpoint_configuration is None else endpoint_configuration
if self.boto_session.region_name == "us-gov-west-1":
endpoint_configuration.append("REGIONAL")
if endpoint_configuration:
endpoint = troposphere.apigateway.EndpointConfiguration()
endpoint.Types = list(set(endpoint_configuration))
restapi.EndpointConfiguration = endpoint
if self.apigateway_policy:
restapi.Policy = json.loads(self.apigateway_policy)
self.cf_template.add_resource(restapi)
root_id = troposphere.GetAtt(restapi, 'RootResourceId')
invocation_prefix = "aws" if self.boto_session.region_name != "us-gov-west-1" else "aws-us-gov"
invocations_uri = 'arn:' + invocation_prefix + ':apigateway:' + self.boto_session.region_name + ':lambda:path/2015-03-31/functions/' + lambda_arn + '/invocations'
##
# The Resources
##
authorizer_resource = None
if authorizer:
authorizer_lambda_arn = authorizer.get('arn', lambda_arn)
lambda_uri = 'arn:{invocation_prefix}:apigateway:{region_name}:lambda:path/2015-03-31/functions/{lambda_arn}/invocations'.format(
invocation_prefix=invocation_prefix,
region_name=self.boto_session.region_name,
lambda_arn=authorizer_lambda_arn
)
authorizer_resource = self.create_authorizer(
restapi, lambda_uri, authorizer
)
self.create_and_setup_methods( restapi,
root_id,
api_key_required,
invocations_uri,
authorization_type,
authorizer_resource,
0
)
if cors_options:
self.create_and_setup_cors( restapi,
root_id,
invocations_uri,
0,
cors_options
)
resource = troposphere.apigateway.Resource('ResourceAnyPathSlashed')
self.cf_api_resources.append(resource.title)
resource.RestApiId = troposphere.Ref(restapi)
resource.ParentId = root_id
resource.PathPart = "{proxy+}"
self.cf_template.add_resource(resource)
self.create_and_setup_methods( restapi,
resource,
api_key_required,
invocations_uri,
authorization_type,
authorizer_resource,
1
) # pragma: no cover
if cors_options:
self.create_and_setup_cors( restapi,
resource,
invocations_uri,
1,
cors_options
) # pragma: no cover
return restapi
def create_authorizer(self, restapi, uri, authorizer):
"""
Create Authorizer for API gateway
"""
authorizer_type = authorizer.get("type", "TOKEN").upper()
identity_validation_expression = authorizer.get('validation_expression', None)
authorizer_resource = troposphere.apigateway.Authorizer("Authorizer")
authorizer_resource.RestApiId = troposphere.Ref(restapi)
authorizer_resource.Name = authorizer.get("name", "ZappaAuthorizer")
authorizer_resource.Type = authorizer_type
authorizer_resource.AuthorizerUri = uri
authorizer_resource.IdentitySource = "method.request.header.%s" % authorizer.get('token_header', 'Authorization')
if identity_validation_expression:
authorizer_resource.IdentityValidationExpression = identity_validation_expression
if authorizer_type == 'TOKEN':
if not self.credentials_arn:
self.get_credentials_arn()
authorizer_resource.AuthorizerResultTtlInSeconds = authorizer.get('result_ttl', 300)
authorizer_resource.AuthorizerCredentials = self.credentials_arn
if authorizer_type == 'COGNITO_USER_POOLS':
authorizer_resource.ProviderARNs = authorizer.get('provider_arns')
self.cf_api_resources.append(authorizer_resource.title)
self.cf_template.add_resource(authorizer_resource)
return authorizer_resource
def create_and_setup_methods(
self,
restapi,
resource,
api_key_required,
uri,
authorization_type,
authorizer_resource,
depth
):
"""
Set up the methods, integration responses and method responses for a given API Gateway resource.
"""
for method_name in self.http_methods:
method = troposphere.apigateway.Method(method_name + str(depth))
method.RestApiId = troposphere.Ref(restapi)
if type(resource) is troposphere.apigateway.Resource:
method.ResourceId = troposphere.Ref(resource)
else:
method.ResourceId = resource
method.HttpMethod = method_name.upper()
method.AuthorizationType = authorization_type
if authorizer_resource:
method.AuthorizerId = troposphere.Ref(authorizer_resource)
method.ApiKeyRequired = api_key_required
method.MethodResponses = []
self.cf_template.add_resource(method)
self.cf_api_resources.append(method.title)
if not self.credentials_arn:
self.get_credentials_arn()
credentials = self.credentials_arn # This must be a Role ARN
integration = troposphere.apigateway.Integration()
integration.CacheKeyParameters = []
integration.CacheNamespace = 'none'
integration.Credentials = credentials
integration.IntegrationHttpMethod = 'POST'
integration.IntegrationResponses = []
integration.PassthroughBehavior = 'NEVER'
integration.Type = 'AWS_PROXY'
integration.Uri = uri
method.Integration = integration
def create_and_setup_cors(self, restapi, resource, uri, depth, config):
"""
Set up the methods, integration responses and method responses for a given API Gateway resource.
"""
if config is True:
config = {}
method_name = "OPTIONS"
method = troposphere.apigateway.Method(method_name + str(depth))
method.RestApiId = troposphere.Ref(restapi)
if type(resource) is troposphere.apigateway.Resource:
method.ResourceId = troposphere.Ref(resource)
else:
method.ResourceId = resource
method.HttpMethod = method_name.upper()
method.AuthorizationType = "NONE"
method_response = troposphere.apigateway.MethodResponse()
method_response.ResponseModels = {
"application/json": "Empty"
}
response_headers = {
"Access-Control-Allow-Headers": "'%s'" % ",".join(config.get(
"allowed_headers", ["Content-Type", "X-Amz-Date",
"Authorization", "X-Api-Key",
"X-Amz-Security-Token"])),
"Access-Control-Allow-Methods": "'%s'" % ",".join(config.get(
"allowed_methods", ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"])),
"Access-Control-Allow-Origin": "'%s'" % config.get(
"allowed_origin", "*")
}
method_response.ResponseParameters = {
"method.response.header.%s" % key: True for key in response_headers
}
method_response.StatusCode = "200"
method.MethodResponses = [
method_response
]
self.cf_template.add_resource(method)
self.cf_api_resources.append(method.title)
integration = troposphere.apigateway.Integration()
integration.Type = 'MOCK'
integration.PassthroughBehavior = 'NEVER'
integration.RequestTemplates = {
"application/json": "{\"statusCode\": 200}"
}
integration_response = troposphere.apigateway.IntegrationResponse()
integration_response.ResponseParameters = {
"method.response.header.%s" % key: value for key, value in response_headers.items()
}
integration_response.ResponseTemplates = {
"application/json": ""
}
integration_response.StatusCode = "200"
integration.IntegrationResponses = [
integration_response
]
integration.Uri = uri
method.Integration = integration
def deploy_api_gateway( self,
api_id,
stage_name,
stage_description="",
description="",
cache_cluster_enabled=False,
cache_cluster_size='0.5',
variables=None,
cloudwatch_log_level='OFF',
cloudwatch_data_trace=False,
cloudwatch_metrics_enabled=False,
cache_cluster_ttl=300,
cache_cluster_encrypted=False
):
"""
Deploy the API Gateway!
Return the deployed API URL.
"""
print("Deploying API Gateway..")
self.apigateway_client.create_deployment(
restApiId=api_id,
stageName=stage_name,
stageDescription=stage_description,
description=description,
cacheClusterEnabled=cache_cluster_enabled,
cacheClusterSize=cache_cluster_size,
variables=variables or {}
)
if cloudwatch_log_level not in self.cloudwatch_log_levels:
cloudwatch_log_level = 'OFF'
self.apigateway_client.update_stage(
restApiId=api_id,
stageName=stage_name,
patchOperations=[
self.get_patch_op('logging/loglevel', cloudwatch_log_level),
self.get_patch_op('logging/dataTrace', cloudwatch_data_trace),
self.get_patch_op('metrics/enabled', cloudwatch_metrics_enabled),
self.get_patch_op('caching/ttlInSeconds', str(cache_cluster_ttl)),
self.get_patch_op('caching/dataEncrypted', cache_cluster_encrypted)
]
)
return "https://{}.execute-api.{}.amazonaws.com/{}".format(api_id, self.boto_session.region_name, stage_name)
def add_binary_support(self, api_id, cors=False):
"""
Add binary support
"""
response = self.apigateway_client.get_rest_api(
restApiId=api_id
)
if "binaryMediaTypes" not in response or "*/*" not in response["binaryMediaTypes"]:
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
'op': "add",
'path': '/binaryMediaTypes/*~1*'
}
]
)
if cors:
# fix for issue 699 and 1035, cors+binary support don't work together
# go through each resource and update the contentHandling type
response = self.apigateway_client.get_resources(restApiId=api_id)
resource_ids = [
item['id'] for item in response['items']
if 'OPTIONS' in item.get('resourceMethods', {})
]
for resource_id in resource_ids:
self.apigateway_client.update_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod='OPTIONS',
patchOperations=[
{
"op": "replace",
"path": "/contentHandling",
"value": "CONVERT_TO_TEXT"
}
]
)
def remove_binary_support(self, api_id, cors=False):
"""
Remove binary support
"""
response = self.apigateway_client.get_rest_api(
restApiId=api_id
)
if "binaryMediaTypes" in response and "*/*" in response["binaryMediaTypes"]:
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
'op': 'remove',
'path': '/binaryMediaTypes/*~1*'
}
]
)
if cors:
# go through each resource and change the contentHandling type
response = self.apigateway_client.get_resources(restApiId=api_id)
resource_ids = [
item['id'] for item in response['items']
if 'OPTIONS' in item.get('resourceMethods', {})
]
for resource_id in resource_ids:
self.apigateway_client.update_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod='OPTIONS',
patchOperations=[
{
"op": "replace",
"path": "/contentHandling",
"value": ""
}
]
)
def add_api_compression(self, api_id, min_compression_size):
"""
Add Rest API compression
"""
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
'op': 'replace',
'path': '/minimumCompressionSize',
'value': str(min_compression_size)
}
]
)
def remove_api_compression(self, api_id):
"""
Remove Rest API compression
"""
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
'op': 'replace',
'path': '/minimumCompressionSize',
}
]
)
def get_api_keys(self, api_id, stage_name):
"""
Generator that allows to iterate per API keys associated to an api_id and a stage_name.
"""
response = self.apigateway_client.get_api_keys(limit=500)
stage_key = '{}/{}'.format(api_id, stage_name)
for api_key in response.get('items'):
if stage_key in api_key.get('stageKeys'):
yield api_key.get('id')
def create_api_key(self, api_id, stage_name):
"""
Create new API key and link it with an api_id and a stage_name
"""
response = self.apigateway_client.create_api_key(
name='{}_{}'.format(stage_name, api_id),
description='Api Key for {}'.format(api_id),
enabled=True,
stageKeys=[
{
'restApiId': '{}'.format(api_id),
'stageName': '{}'.format(stage_name)
},
]
)
print('Created a new x-api-key: {}'.format(response['id']))
def remove_api_key(self, api_id, stage_name):
"""
Remove a generated API key for api_id and stage_name
"""
response = self.apigateway_client.get_api_keys(
limit=1,
nameQuery='{}_{}'.format(stage_name, api_id)
)
for api_key in response.get('items'):
self.apigateway_client.delete_api_key(
apiKey="{}".format(api_key['id'])
)
def add_api_stage_to_api_key(self, api_key, api_id, stage_name):
"""
Add api stage to Api key
"""
self.apigateway_client.update_api_key(
apiKey=api_key,
patchOperations=[
{
'op': 'add',
'path': '/stages',
'value': '{}/{}'.format(api_id, stage_name)
}
]
)
def get_patch_op(self, keypath, value, op='replace'):
"""
Return an object that describes a change of configuration on the given staging.
Setting will be applied on all available HTTP methods.
"""
if isinstance(value, bool):
value = str(value).lower()
return {'op': op, 'path': '/*/*/{}'.format(keypath), 'value': value}
def get_rest_apis(self, project_name):
"""
Generator that allows to iterate per every available apis.
"""
all_apis = self.apigateway_client.get_rest_apis(
limit=500
)
for api in all_apis['items']:
if api['name'] != project_name:
continue
yield api
def undeploy_api_gateway(self, lambda_name, domain_name=None, base_path=None):
"""
Delete a deployed REST API Gateway.
"""
print("Deleting API Gateway..")
api_id = self.get_api_id(lambda_name)
if domain_name:
# XXX - Remove Route53 smartly here?
# XXX - This doesn't raise, but doesn't work either.
try:
self.apigateway_client.delete_base_path_mapping(
domainName=domain_name,
basePath='(none)' if base_path is None else base_path
)
except Exception as e:
# We may not have actually set up the domain.
pass
was_deleted = self.delete_stack(lambda_name, wait=True)
if not was_deleted:
# try erasing it with the older method
for api in self.get_rest_apis(lambda_name):
self.apigateway_client.delete_rest_api(
restApiId=api['id']
)
def update_stage_config( self,
project_name,
stage_name,
cloudwatch_log_level,
cloudwatch_data_trace,
cloudwatch_metrics_enabled
):
"""
Update CloudWatch metrics configuration.
"""
if cloudwatch_log_level not in self.cloudwatch_log_levels:
cloudwatch_log_level = 'OFF'
for api in self.get_rest_apis(project_name):
self.apigateway_client.update_stage(
restApiId=api['id'],
stageName=stage_name,
patchOperations=[
self.get_patch_op('logging/loglevel', cloudwatch_log_level),
self.get_patch_op('logging/dataTrace', cloudwatch_data_trace),
self.get_patch_op('metrics/enabled', cloudwatch_metrics_enabled),
]
)
def update_cognito(self, lambda_name, user_pool, lambda_configs, lambda_arn):
LambdaConfig = {}
for config in lambda_configs:
LambdaConfig[config] = lambda_arn
description = self.cognito_client.describe_user_pool(UserPoolId=user_pool)
description_kwargs = {}
for key, value in description['UserPool'].items():
if key in ('UserPoolId', 'Policies', 'AutoVerifiedAttributes', 'SmsVerificationMessage',
'EmailVerificationMessage', 'EmailVerificationSubject', 'VerificationMessageTemplate',
'SmsAuthenticationMessage', 'MfaConfiguration', 'DeviceConfiguration',
'EmailConfiguration', 'SmsConfiguration', 'UserPoolTags',
'AdminCreateUserConfig'):
description_kwargs[key] = value
elif key == 'LambdaConfig':
for lckey, lcvalue in value.items():
if lckey in LambdaConfig:
value[lckey] = LambdaConfig[lckey]
print("value", value)
description_kwargs[key] = value
if 'LambdaConfig' not in description_kwargs:
description_kwargs['LambdaConfig'] = LambdaConfig
if 'TemporaryPasswordValidityDays' in description_kwargs['Policies']['PasswordPolicy']:
description_kwargs['AdminCreateUserConfig'].pop(
'UnusedAccountValidityDays', None)
if 'UnusedAccountValidityDays' in description_kwargs['AdminCreateUserConfig']:
description_kwargs['Policies']['PasswordPolicy']\
['TemporaryPasswordValidityDays'] = description_kwargs['AdminCreateUserConfig'].pop(
'UnusedAccountValidityDays', None)
result = self.cognito_client.update_user_pool(UserPoolId=user_pool, **description_kwargs)
if result['ResponseMetadata']['HTTPStatusCode'] != 200:
print("Cognito: Failed to update user pool", result)
# Now we need to add a policy to the IAM that allows cognito access
result = self.create_event_permission(lambda_name,
'cognito-idp.amazonaws.com',
'arn:aws:cognito-idp:{}:{}:userpool/{}'.
format(self.aws_region,
self.sts_client.get_caller_identity().get('Account'),
user_pool)
)
if result['ResponseMetadata']['HTTPStatusCode'] != 201:
print("Cognito: Failed to update lambda permission", result)
def delete_stack(self, name, wait=False):
"""
Delete the CF stack managed by Zappa.
"""
try:
stack = self.cf_client.describe_stacks(StackName=name)['Stacks'][0]
except: # pragma: no cover
print('No Zappa stack named {0}'.format(name))
return False
tags = {x['Key']:x['Value'] for x in stack['Tags']}
if tags.get('ZappaProject') == name:
self.cf_client.delete_stack(StackName=name)
if wait:
waiter = self.cf_client.get_waiter('stack_delete_complete')
print('Waiting for stack {0} to be deleted..'.format(name))
waiter.wait(StackName=name)
return True
else:
print('ZappaProject tag not found on {0}, doing nothing'.format(name))
return False
def create_stack_template( self,
lambda_arn,
lambda_name,
api_key_required,
iam_authorization,
authorizer,
cors_options=None,
description=None,
endpoint_configuration=None
):
"""
Build the entire CF stack.
Just used for the API Gateway, but could be expanded in the future.
"""
auth_type = "NONE"
if iam_authorization and authorizer:
logger.warn("Both IAM Authorization and Authorizer are specified, this is not possible. "
"Setting Auth method to IAM Authorization")
authorizer = None
auth_type = "AWS_IAM"
elif iam_authorization:
auth_type = "AWS_IAM"
elif authorizer:
auth_type = authorizer.get("type", "CUSTOM")
# build a fresh template
self.cf_template = troposphere.Template()
self.cf_template.add_description('Automatically generated with Zappa')
self.cf_api_resources = []
self.cf_parameters = {}
restapi = self.create_api_gateway_routes(
lambda_arn,
api_name=lambda_name,
api_key_required=api_key_required,
authorization_type=auth_type,
authorizer=authorizer,
cors_options=cors_options,
description=description,
endpoint_configuration=endpoint_configuration
)
return self.cf_template
def update_stack(self, name, working_bucket, wait=False, update_only=False, disable_progress=False):
"""
Update or create the CF stack managed by Zappa.
"""
capabilities = []
template = name + '-template-' + str(int(time.time())) + '.json'
with open(template, 'wb') as out:
out.write(bytes(self.cf_template.to_json(indent=None, separators=(',',':')), "utf-8"))
self.upload_to_s3(template, working_bucket, disable_progress=disable_progress)
if self.boto_session.region_name == "us-gov-west-1":
url = 'https://s3-us-gov-west-1.amazonaws.com/{0}/{1}'.format(working_bucket, template)
else:
url = 'https://s3.amazonaws.com/{0}/{1}'.format(working_bucket, template)
tags = [{'Key': key, 'Value': self.tags[key]}
for key in self.tags.keys()
if key != 'ZappaProject']
tags.append({'Key':'ZappaProject','Value':name})
update = True
try:
self.cf_client.describe_stacks(StackName=name)
except botocore.client.ClientError:
update = False
if update_only and not update:
print('CloudFormation stack missing, re-deploy to enable updates')
return
if not update:
self.cf_client.create_stack(StackName=name,
Capabilities=capabilities,
TemplateURL=url,
Tags=tags)
print('Waiting for stack {0} to create (this can take a bit)..'.format(name))
else:
try:
self.cf_client.update_stack(StackName=name,
Capabilities=capabilities,
TemplateURL=url,
Tags=tags)
print('Waiting for stack {0} to update..'.format(name))
except botocore.client.ClientError as e:
if e.response['Error']['Message'] == 'No updates are to be performed.':
wait = False
else:
raise
if wait:
total_resources = len(self.cf_template.resources)
current_resources = 0
sr = self.cf_client.get_paginator('list_stack_resources')
progress = tqdm(total=total_resources, unit='res', disable=disable_progress)
while True:
time.sleep(3)
result = self.cf_client.describe_stacks(StackName=name)
if not result['Stacks']:
continue # might need to wait a bit
if result['Stacks'][0]['StackStatus'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']:
break
# Something has gone wrong.
# Is raising enough? Should we also remove the Lambda function?
if result['Stacks'][0]['StackStatus'] in [
'DELETE_COMPLETE',
'DELETE_IN_PROGRESS',
'ROLLBACK_IN_PROGRESS',
'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS',
'UPDATE_ROLLBACK_COMPLETE'
]:
raise EnvironmentError("Stack creation failed. "
"Please check your CloudFormation console. "
"You may also need to `undeploy`.")
count = 0
for result in sr.paginate(StackName=name):
done = (1 for x in result['StackResourceSummaries']
if 'COMPLETE' in x['ResourceStatus'])
count += sum(done)
if count:
# We can end up in a situation where we have more resources being created
# than anticipated.
if (count - current_resources) > 0:
progress.update(count - current_resources)
current_resources = count
progress.close()
try:
os.remove(template)
except OSError:
pass
self.remove_from_s3(template, working_bucket)
def stack_outputs(self, name):
"""
Given a name, describes CloudFront stacks and returns dict of the stack Outputs
, else returns an empty dict.
"""
try:
stack = self.cf_client.describe_stacks(StackName=name)['Stacks'][0]
return {x['OutputKey']: x['OutputValue'] for x in stack['Outputs']}
except botocore.client.ClientError:
return {}
def get_api_url(self, lambda_name, stage_name):
"""
Given a lambda_name and stage_name, return a valid API URL.
"""
api_id = self.get_api_id(lambda_name)
if api_id:
return "https://{}.execute-api.{}.amazonaws.com/{}".format(api_id, self.boto_session.region_name, stage_name)
else:
return None
def get_api_id(self, lambda_name):
"""
Given a lambda_name, return the API id.
"""
try:
response = self.cf_client.describe_stack_resource(StackName=lambda_name,
LogicalResourceId='Api')
return response['StackResourceDetail'].get('PhysicalResourceId', None)
except: # pragma: no cover
try:
# Try the old method (project was probably made on an older, non CF version)
response = self.apigateway_client.get_rest_apis(limit=500)
for item in response['items']:
if item['name'] == lambda_name:
return item['id']
logger.exception('Could not get API ID.')
return None
except: # pragma: no cover
# We don't even have an API deployed. That's okay!
return None
def create_domain_name(self,
domain_name,
certificate_name,
certificate_body=None,
certificate_private_key=None,
certificate_chain=None,
certificate_arn=None,
lambda_name=None,
stage=None,
base_path=None):
"""
Creates the API GW domain and returns the resulting DNS name.
"""
# This is a Let's Encrypt or custom certificate
if not certificate_arn:
agw_response = self.apigateway_client.create_domain_name(
domainName=domain_name,
certificateName=certificate_name,
certificateBody=certificate_body,
certificatePrivateKey=certificate_private_key,
certificateChain=certificate_chain
)
# This is an AWS ACM-hosted Certificate
else:
agw_response = self.apigateway_client.create_domain_name(
domainName=domain_name,
certificateName=certificate_name,
certificateArn=certificate_arn
)
api_id = self.get_api_id(lambda_name)
if not api_id:
raise LookupError("No API URL to certify found - did you deploy?")
self.apigateway_client.create_base_path_mapping(
domainName=domain_name,
basePath='' if base_path is None else base_path,
restApiId=api_id,
stage=stage
)
return agw_response['distributionDomainName']
def update_route53_records(self, domain_name, dns_name):
"""
Updates Route53 Records following GW domain creation
"""
zone_id = self.get_hosted_zone_id_for_domain(domain_name)
is_apex = self.route53.get_hosted_zone(Id=zone_id)['HostedZone']['Name'][:-1] == domain_name
if is_apex:
record_set = {
'Name': domain_name,
'Type': 'A',
'AliasTarget': {
'HostedZoneId': 'Z2FDTNDATAQYW2', # This is a magic value that means "CloudFront"
'DNSName': dns_name,
'EvaluateTargetHealth': False
}
}
else:
record_set = {
'Name': domain_name,
'Type': 'CNAME',
'ResourceRecords': [
{
'Value': dns_name
}
],
'TTL': 60
}
# Related: https://github.com/boto/boto3/issues/157
# and: http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html
# and policy: https://spin.atomicobject.com/2016/04/28/route-53-hosted-zone-managment/
# pure_zone_id = zone_id.split('/hostedzone/')[1]
# XXX: ClientError: An error occurred (InvalidChangeBatch) when calling the ChangeResourceRecordSets operation:
# Tried to create an alias that targets d1awfeji80d0k2.cloudfront.net., type A in zone Z1XWOQP59BYF6Z,
# but the alias target name does not lie within the target zone
response = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
'Changes': [
{
'Action': 'UPSERT',
'ResourceRecordSet': record_set
}
]
}
)
return response
def update_domain_name(self,
domain_name,
certificate_name=None,
certificate_body=None,
certificate_private_key=None,
certificate_chain=None,
certificate_arn=None,
lambda_name=None,
stage=None,
route53=True,
base_path=None):
"""
This updates your certificate information for an existing domain,
with similar arguments to boto's update_domain_name API Gateway api.
It returns the resulting new domain information including the new certificate's ARN
if created during this process.
Previously, this method involved downtime that could take up to 40 minutes
because the API Gateway api only allowed this by deleting, and then creating it.
Related issues: https://github.com/Miserlou/Zappa/issues/590
https://github.com/Miserlou/Zappa/issues/588
https://github.com/Miserlou/Zappa/pull/458
https://github.com/Miserlou/Zappa/issues/882
https://github.com/Miserlou/Zappa/pull/883
"""
print("Updating domain name!")
certificate_name = certificate_name + str(time.time())
api_gateway_domain = self.apigateway_client.get_domain_name(domainName=domain_name)
if not certificate_arn\
and certificate_body and certificate_private_key and certificate_chain:
acm_certificate = self.acm_client.import_certificate(Certificate=certificate_body,
PrivateKey=certificate_private_key,
CertificateChain=certificate_chain)
certificate_arn = acm_certificate['CertificateArn']
self.update_domain_base_path_mapping(domain_name, lambda_name, stage, base_path)
return self.apigateway_client.update_domain_name(domainName=domain_name,
patchOperations=[
{"op" : "replace",
"path" : "/certificateName",
"value" : certificate_name},
{"op" : "replace",
"path" : "/certificateArn",
"value" : certificate_arn}
])
def update_domain_base_path_mapping(self, domain_name, lambda_name, stage, base_path):
"""
Update domain base path mapping on API Gateway if it was changed
"""
api_id = self.get_api_id(lambda_name)
if not api_id:
print("Warning! Can't update base path mapping!")
return
base_path_mappings = self.apigateway_client.get_base_path_mappings(domainName=domain_name)
found = False
for base_path_mapping in base_path_mappings.get('items', []):
if base_path_mapping['restApiId'] == api_id and base_path_mapping['stage'] == stage:
found = True
if base_path_mapping['basePath'] != base_path:
self.apigateway_client.update_base_path_mapping(domainName=domain_name,
basePath=base_path_mapping['basePath'],
patchOperations=[
{"op" : "replace",
"path" : "/basePath",
"value" : '' if base_path is None else base_path}
])
if not found:
self.apigateway_client.create_base_path_mapping(
domainName=domain_name,
basePath='' if base_path is None else base_path,
restApiId=api_id,
stage=stage
)
def get_all_zones(self):
"""Same behaviour of list_host_zones, but transparently handling pagination."""
zones = {'HostedZones': []}
new_zones = self.route53.list_hosted_zones(MaxItems='100')
while new_zones['IsTruncated']:
zones['HostedZones'] += new_zones['HostedZones']
new_zones = self.route53.list_hosted_zones(Marker=new_zones['NextMarker'], MaxItems='100')
zones['HostedZones'] += new_zones['HostedZones']
return zones
def get_domain_name(self, domain_name, route53=True):
"""
Scan our hosted zones for the record of a given name.
Returns the record entry, else None.
"""
# Make sure api gateway domain is present
try:
self.apigateway_client.get_domain_name(domainName=domain_name)
except Exception:
return None
if not route53:
return True
try:
zones = self.get_all_zones()
for zone in zones['HostedZones']:
records = self.route53.list_resource_record_sets(HostedZoneId=zone['Id'])
for record in records['ResourceRecordSets']:
if record['Type'] in ('CNAME', 'A') and record['Name'][:-1] == domain_name:
return record
except Exception as e:
return None
##
# Old, automatic logic.
# If re-introduced, should be moved to a new function.
# Related ticket: https://github.com/Miserlou/Zappa/pull/458
##
# We may be in a position where Route53 doesn't have a domain, but the API Gateway does.
# We need to delete this before we can create the new Route53.
# try:
# api_gateway_domain = self.apigateway_client.get_domain_name(domainName=domain_name)
# self.apigateway_client.delete_domain_name(domainName=domain_name)
# except Exception:
# pass
return None
##
# IAM
##
def get_credentials_arn(self):
"""
Given our role name, get and set the credentials_arn.
"""
role = self.iam.Role(self.role_name)
self.credentials_arn = role.arn
return role, self.credentials_arn
def create_iam_roles(self):
"""
Create and defines the IAM roles and policies necessary for Zappa.
If the IAM role already exists, it will be updated if necessary.
"""
attach_policy_obj = json.loads(self.attach_policy)
assume_policy_obj = json.loads(self.assume_policy)
if self.extra_permissions:
for permission in self.extra_permissions:
attach_policy_obj['Statement'].append(dict(permission))
self.attach_policy = json.dumps(attach_policy_obj)
updated = False
# Create the role if needed
try:
role, credentials_arn = self.get_credentials_arn()
except botocore.client.ClientError:
print("Creating " + self.role_name + " IAM Role..")
role = self.iam.create_role(
RoleName=self.role_name,
AssumeRolePolicyDocument=self.assume_policy
)
self.credentials_arn = role.arn
updated = True
# create or update the role's policies if needed
policy = self.iam.RolePolicy(self.role_name, 'zappa-permissions')
try:
if policy.policy_document != attach_policy_obj:
print("Updating zappa-permissions policy on " + self.role_name + " IAM Role.")
policy.put(PolicyDocument=self.attach_policy)
updated = True
except botocore.client.ClientError:
print("Creating zappa-permissions policy on " + self.role_name + " IAM Role.")
policy.put(PolicyDocument=self.attach_policy)
updated = True
if role.assume_role_policy_document != assume_policy_obj and \
set(role.assume_role_policy_document['Statement'][0]['Principal']['Service']) != set(assume_policy_obj['Statement'][0]['Principal']['Service']):
print("Updating assume role policy on " + self.role_name + " IAM Role.")
self.iam_client.update_assume_role_policy(
RoleName=self.role_name,
PolicyDocument=self.assume_policy
)
updated = True
return self.credentials_arn, updated
def _clear_policy(self, lambda_name):
"""
Remove obsolete policy statements to prevent policy from bloating over the limit after repeated updates.
"""
try:
policy_response = self.lambda_client.get_policy(
FunctionName=lambda_name
)
if policy_response['ResponseMetadata']['HTTPStatusCode'] == 200:
statement = json.loads(policy_response['Policy'])['Statement']
for s in statement:
delete_response = self.lambda_client.remove_permission(
FunctionName=lambda_name,
StatementId=s['Sid']
)
if delete_response['ResponseMetadata']['HTTPStatusCode'] != 204:
logger.error('Failed to delete an obsolete policy statement: {}'.format(policy_response))
else:
logger.debug('Failed to load Lambda function policy: {}'.format(policy_response))
except ClientError as e:
if e.args[0].find('ResourceNotFoundException') > -1:
logger.debug('No policy found, must be first run.')
else:
logger.error('Unexpected client error {}'.format(e.args[0]))
##
# CloudWatch Events
##
def create_event_permission(self, lambda_name, principal, source_arn):
"""
Create permissions to link to an event.
Related: http://docs.aws.amazon.com/lambda/latest/dg/with-s3-example-configure-event-source.html
"""
logger.debug('Adding new permission to invoke Lambda function: {}'.format(lambda_name))
permission_response = self.lambda_client.add_permission(
FunctionName=lambda_name,
StatementId=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8)),
Action='lambda:InvokeFunction',
Principal=principal,
SourceArn=source_arn,
)
if permission_response['ResponseMetadata']['HTTPStatusCode'] != 201:
print('Problem creating permission to invoke Lambda function')
return None # XXX: Raise?
return permission_response
def schedule_events(self, lambda_arn, lambda_name, events, default=True):
"""
Given a Lambda ARN, name and a list of events, schedule this as CloudWatch Events.
'events' is a list of dictionaries, where the dict must contains the string
of a 'function' and the string of the event 'expression', and an optional 'name' and 'description'.
Expressions can be in rate or cron format:
http://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html
"""
# The stream sources - DynamoDB, Kinesis and SQS - are working differently than the other services (pull vs push)
# and do not require event permissions. They do require additional permissions on the Lambda roles though.
# http://docs.aws.amazon.com/lambda/latest/dg/lambda-api-permissions-ref.html
pull_services = ['dynamodb', 'kinesis', 'sqs']
# XXX: Not available in Lambda yet.
# We probably want to execute the latest code.
# if default:
# lambda_arn = lambda_arn + ":$LATEST"
self.unschedule_events(lambda_name=lambda_name, lambda_arn=lambda_arn, events=events,
excluded_source_services=pull_services)
for event in events:
function = event['function']
expression = event.get('expression', None) # single expression
expressions = event.get('expressions', None) # multiple expression
kwargs = event.get('kwargs', {}) # optional dict of keyword arguments for the event
event_source = event.get('event_source', None)
description = event.get('description', function)
# - If 'cron' or 'rate' in expression, use ScheduleExpression
# - Else, use EventPattern
# - ex https://github.com/awslabs/aws-lambda-ddns-function
if not self.credentials_arn:
self.get_credentials_arn()
if expression:
expressions = [expression] # same code for single and multiple expression
if expressions:
for index, expression in enumerate(expressions):
name = self.get_scheduled_event_name(event, function, lambda_name, index)
# if it's possible that we truncated name, generate a unique, shortened name
# https://github.com/Miserlou/Zappa/issues/970
if len(name) >= 64:
rule_name = self.get_hashed_rule_name(event, function, lambda_name)
else:
rule_name = name
rule_response = self.events_client.put_rule(
Name=rule_name,
ScheduleExpression=expression,
State='ENABLED',
Description=description,
RoleArn=self.credentials_arn
)
if 'RuleArn' in rule_response:
logger.debug('Rule created. ARN {}'.format(rule_response['RuleArn']))
# Specific permissions are necessary for any trigger to work.
self.create_event_permission(lambda_name, 'events.amazonaws.com', rule_response['RuleArn'])
# Overwriting the input, supply the original values and add kwargs
input_template = '{"time": <time>, ' \
'"detail-type": <detail-type>, ' \
'"source": <source>,' \
'"account": <account>, ' \
'"region": <region>,' \
'"detail": <detail>, ' \
'"version": <version>,' \
'"resources": <resources>,' \
'"id": <id>,' \
'"kwargs": %s' \
'}' % json.dumps(kwargs)
# Create the CloudWatch event ARN for this function.
# https://github.com/Miserlou/Zappa/issues/359
target_response = self.events_client.put_targets(
Rule=rule_name,
Targets=[
{
'Id': 'Id' + ''.join(random.choice(string.digits) for _ in range(12)),
'Arn': lambda_arn,
'InputTransformer': {
'InputPathsMap': {
'time': '$.time',
'detail-type': '$.detail-type',
'source': '$.source',
'account': '$.account',
'region': '$.region',
'detail': '$.detail',
'version': '$.version',
'resources': '$.resources',
'id': '$.id'
},
'InputTemplate': input_template
}
}
]
)
if target_response['ResponseMetadata']['HTTPStatusCode'] == 200:
print("Scheduled {} with expression {}!".format(rule_name, expression))
else:
print("Problem scheduling {} with expression {}.".format(rule_name, expression))
elif event_source:
service = self.service_from_arn(event_source['arn'])
if service not in pull_services:
svc = ','.join(event['event_source']['events'])
self.create_event_permission(
lambda_name,
service + '.amazonaws.com',
event['event_source']['arn']
)
else:
svc = service
rule_response = add_event_source(
event_source,
lambda_arn,
function,
self.boto_session
)
if rule_response == 'successful':
print("Created {} event schedule for {}!".format(svc, function))
elif rule_response == 'failed':
print("Problem creating {} event schedule for {}!".format(svc, function))
elif rule_response == 'exists':
print("{} event schedule for {} already exists - Nothing to do here.".format(svc, function))
elif rule_response == 'dryrun':
print("Dryrun for creating {} event schedule for {}!!".format(svc, function))
else:
print("Could not create event {} - Please define either an expression or an event source".format(name))
@staticmethod
def get_scheduled_event_name(event, function, lambda_name, index=0):
name = event.get('name', function)
if name != function:
# a custom event name has been provided, make sure function name is included as postfix,
# otherwise zappa's handler won't be able to locate the function.
name = '{}-{}'.format(name, function)
if index:
# to ensure unique cloudwatch rule names in the case of multiple expressions
# prefix all entries bar the first with the index
# Related: https://github.com/Miserlou/Zappa/pull/1051
name = '{}-{}'.format(index, name)
# prefix scheduled event names with lambda name. So we can look them up later via the prefix.
return Zappa.get_event_name(lambda_name, name)
@staticmethod
def get_event_name(lambda_name, name):
"""
Returns an AWS-valid Lambda event name.
"""
return '{prefix:.{width}}-{postfix}'.format(prefix=lambda_name, width=max(0, 63 - len(name)), postfix=name)[:64]
@staticmethod
def get_hashed_rule_name(event, function, lambda_name):
"""
Returns an AWS-valid CloudWatch rule name using a digest of the event name, lambda name, and function.
This allows support for rule names that may be longer than the 64 char limit.
"""
event_name = event.get('name', function)
name_hash = hashlib.sha1('{}-{}'.format(lambda_name, event_name).encode('UTF-8')).hexdigest()
return Zappa.get_event_name(name_hash, function)
def delete_rule(self, rule_name):
"""
Delete a CWE rule.
This deletes them, but they will still show up in the AWS console.
Annoying.
"""
logger.debug('Deleting existing rule {}'.format(rule_name))
# All targets must be removed before
# we can actually delete the rule.
try:
targets = self.events_client.list_targets_by_rule(Rule=rule_name)
except botocore.exceptions.ClientError as e:
# This avoids misbehavior if low permissions, related: https://github.com/Miserlou/Zappa/issues/286
error_code = e.response['Error']['Code']
if error_code == 'AccessDeniedException':
raise
else:
logger.debug('No target found for this rule: {} {}'.format(rule_name, e.args[0]))
return
if 'Targets' in targets and targets['Targets']:
self.events_client.remove_targets(Rule=rule_name, Ids=[x['Id'] for x in targets['Targets']])
else: # pragma: no cover
logger.debug('No target to delete')
# Delete our rule.
self.events_client.delete_rule(Name=rule_name)
def get_event_rule_names_for_lambda(self, lambda_arn):
"""
Get all of the rule names associated with a lambda function.
"""
response = self.events_client.list_rule_names_by_target(TargetArn=lambda_arn)
rule_names = response['RuleNames']
# Iterate when the results are paginated
while 'NextToken' in response:
response = self.events_client.list_rule_names_by_target(TargetArn=lambda_arn,
NextToken=response['NextToken'])
rule_names.extend(response['RuleNames'])
return rule_names
def get_event_rules_for_lambda(self, lambda_arn):
"""
Get all of the rule details associated with this function.
"""
rule_names = self.get_event_rule_names_for_lambda(lambda_arn=lambda_arn)
return [self.events_client.describe_rule(Name=r) for r in rule_names]
def unschedule_events(self, events, lambda_arn=None, lambda_name=None, excluded_source_services=None):
excluded_source_services = excluded_source_services or []
"""
Given a list of events, unschedule these CloudWatch Events.
'events' is a list of dictionaries, where the dict must contains the string
of a 'function' and the string of the event 'expression', and an optional 'name' and 'description'.
"""
self._clear_policy(lambda_name)
rule_names = self.get_event_rule_names_for_lambda(lambda_arn=lambda_arn)
for rule_name in rule_names:
self.delete_rule(rule_name)
print('Unscheduled ' + rule_name + '.')
non_cwe = [e for e in events if 'event_source' in e]
for event in non_cwe:
# TODO: This WILL miss non CW events that have been deployed but changed names. Figure out a way to remove
# them no matter what.
# These are non CWE event sources.
function = event['function']
name = event.get('name', function)
event_source = event.get('event_source', function)
service = self.service_from_arn(event_source['arn'])
# DynamoDB and Kinesis streams take quite a while to setup after they are created and do not need to be
# re-scheduled when a new Lambda function is deployed. Therefore, they should not be removed during zappa
# update or zappa schedule.
if service not in excluded_source_services:
remove_event_source(
event_source,
lambda_arn,
function,
self.boto_session
)
print("Removed event {}{}.".format(
name,
" ({})".format(str(event_source['events'])) if 'events' in event_source else '')
)
###
# Async / SNS
##
def create_async_sns_topic(self, lambda_name, lambda_arn):
"""
Create the SNS-based async topic.
"""
topic_name = get_topic_name(lambda_name)
# Create SNS topic
topic_arn = self.sns_client.create_topic(
Name=topic_name)['TopicArn']
# Create subscription
self.sns_client.subscribe(
TopicArn=topic_arn,
Protocol='lambda',
Endpoint=lambda_arn
)
# Add Lambda permission for SNS to invoke function
self.create_event_permission(
lambda_name=lambda_name,
principal='sns.amazonaws.com',
source_arn=topic_arn
)
# Add rule for SNS topic as a event source
add_event_source(
event_source={
"arn": topic_arn,
"events": ["sns:Publish"]
},
lambda_arn=lambda_arn,
target_function="zappa.asynchronous.route_task",
boto_session=self.boto_session
)
return topic_arn
def remove_async_sns_topic(self, lambda_name):
"""
Remove the async SNS topic.
"""
topic_name = get_topic_name(lambda_name)
removed_arns = []
for sub in self.sns_client.list_subscriptions()['Subscriptions']:
if topic_name in sub['TopicArn']:
self.sns_client.delete_topic(TopicArn=sub['TopicArn'])
removed_arns.append(sub['TopicArn'])
return removed_arns
###
# Async / DynamoDB
##
def _set_async_dynamodb_table_ttl(self, table_name):
self.dynamodb_client.update_time_to_live(
TableName=table_name,
TimeToLiveSpecification={
'Enabled': True,
'AttributeName': 'ttl'
}
)
def create_async_dynamodb_table(self, table_name, read_capacity, write_capacity):
"""
Create the DynamoDB table for async task return values
"""
try:
dynamodb_table = self.dynamodb_client.describe_table(TableName=table_name)
return False, dynamodb_table
# catch this exception (triggered if the table doesn't exist)
except botocore.exceptions.ClientError:
dynamodb_table = self.dynamodb_client.create_table(
AttributeDefinitions=[
{
'AttributeName': 'id',
'AttributeType': 'S'
}
],
TableName=table_name,
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
ProvisionedThroughput = {
'ReadCapacityUnits': read_capacity,
'WriteCapacityUnits': write_capacity
}
)
if dynamodb_table:
try:
self._set_async_dynamodb_table_ttl(table_name)
except botocore.exceptions.ClientError:
# this fails because the operation is async, so retry
time.sleep(10)
self._set_async_dynamodb_table_ttl(table_name)
return True, dynamodb_table
def remove_async_dynamodb_table(self, table_name):
"""
Remove the DynamoDB Table used for async return values
"""
self.dynamodb_client.delete_table(TableName=table_name)
##
# CloudWatch Logging
##
def fetch_logs(self, lambda_name, filter_pattern='', limit=10000, start_time=0):
"""
Fetch the CloudWatch logs for a given Lambda name.
"""
log_name = '/aws/lambda/' + lambda_name
streams = self.logs_client.describe_log_streams(
logGroupName=log_name,
descending=True,
orderBy='LastEventTime'
)
all_streams = streams['logStreams']
all_names = [stream['logStreamName'] for stream in all_streams]
events = []
response = {}
while not response or 'nextToken' in response:
extra_args = {}
if 'nextToken' in response:
extra_args['nextToken'] = response['nextToken']
# Amazon uses millisecond epoch for some reason.
# Thanks, Jeff.
start_time = start_time * 1000
end_time = int(time.time()) * 1000
response = self.logs_client.filter_log_events(
logGroupName=log_name,
logStreamNames=all_names,
startTime=start_time,
endTime=end_time,
filterPattern=filter_pattern,
limit=limit,
interleaved=True, # Does this actually improve performance?
**extra_args
)
if response and 'events' in response:
events += response['events']
return sorted(events, key=lambda k: k['timestamp'])
def remove_log_group(self, group_name):
"""
Filter all log groups that match the name given in log_filter.
"""
print("Removing log group: {}".format(group_name))
try:
self.logs_client.delete_log_group(logGroupName=group_name)
except botocore.exceptions.ClientError as e:
print("Couldn't remove '{}' because of: {}".format(group_name, e))
def remove_lambda_function_logs(self, lambda_function_name):
"""
Remove all logs that are assigned to a given lambda function id.
"""
self.remove_log_group('/aws/lambda/{}'.format(lambda_function_name))
def remove_api_gateway_logs(self, project_name):
"""
Removed all logs that are assigned to a given rest api id.
"""
for rest_api in self.get_rest_apis(project_name):
for stage in self.apigateway_client.get_stages(restApiId=rest_api['id'])['item']:
self.remove_log_group('API-Gateway-Execution-Logs_{}/{}'.format(rest_api['id'], stage['stageName']))
##
# Route53 Domain Name Entries
##
def get_hosted_zone_id_for_domain(self, domain):
"""
Get the Hosted Zone ID for a given domain.
"""
all_zones = self.get_all_zones()
return self.get_best_match_zone(all_zones, domain)
@staticmethod
def get_best_match_zone(all_zones, domain):
"""Return zone id which name is closer matched with domain name."""
# Related: https://github.com/Miserlou/Zappa/issues/459
public_zones = [zone for zone in all_zones['HostedZones'] if not zone['Config']['PrivateZone']]
zones = {zone['Name'][:-1]: zone['Id'] for zone in public_zones if zone['Name'][:-1] in domain}
if zones:
keys = max(zones.keys(), key=lambda a: len(a)) # get longest key -- best match.
return zones[keys]
else:
return None
def set_dns_challenge_txt(self, zone_id, domain, txt_challenge):
"""
Set DNS challenge TXT.
"""
print("Setting DNS challenge..")
resp = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch=self.get_dns_challenge_change_batch('UPSERT', domain, txt_challenge)
)
return resp
def remove_dns_challenge_txt(self, zone_id, domain, txt_challenge):
"""
Remove DNS challenge TXT.
"""
print("Deleting DNS challenge..")
resp = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch=self.get_dns_challenge_change_batch('DELETE', domain, txt_challenge)
)
return resp
@staticmethod
def get_dns_challenge_change_batch(action, domain, txt_challenge):
"""
Given action, domain and challenge, return a change batch to use with
route53 call.
:param action: DELETE | UPSERT
:param domain: domain name
:param txt_challenge: challenge
:return: change set for a given action, domain and TXT challenge.
"""
return {
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': '_acme-challenge.{0}'.format(domain),
'Type': 'TXT',
'TTL': 60,
'ResourceRecords': [{
'Value': '"{0}"'.format(txt_challenge)
}]
}
}]
}
##
# Utility
##
def shell(self):
"""
Spawn a PDB shell.
"""
import pdb
pdb.set_trace()
def load_credentials(self, boto_session=None, profile_name=None):
"""
Load AWS credentials.
An optional boto_session can be provided, but that's usually for testing.
An optional profile_name can be provided for config files that have multiple sets
of credentials.
"""
# Automatically load credentials from config or environment
if not boto_session:
# If provided, use the supplied profile name.
if profile_name:
self.boto_session = boto3.Session(profile_name=profile_name, region_name=self.aws_region)
elif os.environ.get('AWS_ACCESS_KEY_ID') and os.environ.get('AWS_SECRET_ACCESS_KEY'):
region_name = os.environ.get('AWS_DEFAULT_REGION') or self.aws_region
session_kw = {
"aws_access_key_id": os.environ.get('AWS_ACCESS_KEY_ID'),
"aws_secret_access_key": os.environ.get('AWS_SECRET_ACCESS_KEY'),
"region_name": region_name,
}
# If we're executing in a role, AWS_SESSION_TOKEN will be present, too.
if os.environ.get("AWS_SESSION_TOKEN"):
session_kw["aws_session_token"] = os.environ.get("AWS_SESSION_TOKEN")
self.boto_session = boto3.Session(**session_kw)
else:
self.boto_session = boto3.Session(region_name=self.aws_region)
logger.debug("Loaded boto session from config: %s", boto_session)
else:
logger.debug("Using provided boto session: %s", boto_session)
self.boto_session = boto_session
# use provided session's region in case it differs
self.aws_region = self.boto_session.region_name
if self.boto_session.region_name not in LAMBDA_REGIONS:
print("Warning! AWS Lambda may not be available in this AWS Region!")
if self.boto_session.region_name not in API_GATEWAY_REGIONS:
print("Warning! AWS API Gateway may not be available in this AWS Region!")
@staticmethod
def service_from_arn(arn):
return arn.split(':')[2] | zappa-mathking | /zappa-mathking-0.52.4.tar.gz/zappa-mathking-0.52.4/zappa/core.py | core.py |
import calendar
import datetime
import fnmatch
import io
import json
import logging
import os
import re
import shutil
import stat
import sys
from urllib.parse import urlparse
import botocore
import durationpy
from past.builtins import basestring
LOG = logging.getLogger(__name__)
##
# Settings / Packaging
##
def copytree(src, dst, metadata=True, symlinks=False, ignore=None):
"""
This is a contributed re-implementation of 'copytree' that
should work with the exact same behavior on multiple platforms.
When `metadata` is False, file metadata such as permissions and modification
times are not copied.
"""
def copy_file(src, dst, item):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s): # pragma: no cover
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
if metadata:
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass # lchmod not available
elif os.path.isdir(s):
copytree(s, d, metadata, symlinks, ignore)
else:
shutil.copy2(s, d) if metadata else shutil.copy(s, d)
try:
lst = os.listdir(src)
if not os.path.exists(dst):
os.makedirs(dst)
if metadata:
shutil.copystat(src, dst)
except NotADirectoryError: # egg-link files
copy_file(os.path.dirname(src), os.path.dirname(dst), os.path.basename(src))
return
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
copy_file(src, dst, item)
def parse_s3_url(url):
"""
Parses S3 URL.
Returns bucket (domain) and file (full path).
"""
bucket = ''
path = ''
if url:
result = urlparse(url)
bucket = result.netloc
path = result.path.strip('/')
return bucket, path
def human_size(num, suffix='B'):
"""
Convert bytes length to a human-readable version
"""
for unit in ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi'):
if abs(num) < 1024.0:
return "{0:3.1f}{1!s}{2!s}".format(num, unit, suffix)
num /= 1024.0
return "{0:.1f}{1!s}{2!s}".format(num, 'Yi', suffix)
def string_to_timestamp(timestring):
"""
Accepts a str, returns an int timestamp.
"""
ts = None
# Uses an extended version of Go's duration string.
try:
delta = durationpy.from_str(timestring);
past = datetime.datetime.utcnow() - delta
ts = calendar.timegm(past.timetuple())
return ts
except Exception as e:
pass
if ts:
return ts
# else:
# print("Unable to parse timestring.")
return 0
##
# `init` related
##
def detect_django_settings():
"""
Automatically try to discover Django settings files,
return them as relative module paths.
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, '*settings.py'):
full = os.path.join(root, filename)
if 'site-packages' in full:
continue
full = os.path.join(root, filename)
package_path = full.replace(os.getcwd(), '')
package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '')
matches.append(package_module)
return matches
def detect_flask_apps():
"""
Automatically try to discover Flask apps files,
return them as relative module paths.
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, '*.py'):
full = os.path.join(root, filename)
if 'site-packages' in full:
continue
full = os.path.join(root, filename)
with io.open(full, 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
app = None
# Kind of janky..
if '= Flask(' in line:
app = line.split('= Flask(')[0].strip()
if '=Flask(' in line:
app = line.split('=Flask(')[0].strip()
if not app:
continue
package_path = full.replace(os.getcwd(), '')
package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '')
app_module = package_module + '.' + app
matches.append(app_module)
return matches
def get_venv_from_python_version():
return 'python{}.{}'.format(*sys.version_info)
def get_runtime_from_python_version():
"""
"""
if sys.version_info[0] < 3:
raise ValueError("Python 2.x is no longer supported.")
else:
if sys.version_info[1] <= 6:
return 'python3.6'
elif sys.version_info[1] <= 7:
return 'python3.7'
else:
return 'python3.8'
##
# Async Tasks
##
def get_topic_name(lambda_name):
""" Topic name generation """
return '%s-zappa-async' % lambda_name
##
# Event sources / Kappa
##
def get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False):
"""
Given an event_source dictionary item, a session and a lambda_arn,
hack into Kappa's Gibson, create out an object we can call
to schedule this event, and return the event source.
"""
import kappa.awsclient
import kappa.event_source.base
import kappa.event_source.cloudwatch
import kappa.event_source.dynamodb_stream
import kappa.event_source.kinesis
import kappa.event_source.s3
import kappa.event_source.sns
import kappa.function
import kappa.policy
import kappa.restapi
import kappa.role
class PseudoContext:
def __init__(self):
return
class PseudoFunction:
def __init__(self):
return
# Mostly adapted from kappa - will probably be replaced by kappa support
class SqsEventSource(kappa.event_source.base.EventSource):
def __init__(self, context, config):
super().__init__(context, config)
self._lambda = kappa.awsclient.create_client(
'lambda', context.session)
def _get_uuid(self, function):
uuid = None
response = self._lambda.call(
'list_event_source_mappings',
FunctionName=function.name,
EventSourceArn=self.arn)
LOG.debug(response)
if len(response['EventSourceMappings']) > 0:
uuid = response['EventSourceMappings'][0]['UUID']
return uuid
def add(self, function):
try:
response = self._lambda.call(
'create_event_source_mapping',
FunctionName=function.name,
EventSourceArn=self.arn,
BatchSize=self.batch_size,
Enabled=self.enabled
)
LOG.debug(response)
except Exception:
LOG.exception('Unable to add event source')
def enable(self, function):
self._config['enabled'] = True
try:
response = self._lambda.call(
'update_event_source_mapping',
UUID=self._get_uuid(function),
Enabled=self.enabled
)
LOG.debug(response)
except Exception:
LOG.exception('Unable to enable event source')
def disable(self, function):
self._config['enabled'] = False
try:
response = self._lambda.call(
'update_event_source_mapping',
FunctionName=function.name,
Enabled=self.enabled
)
LOG.debug(response)
except Exception:
LOG.exception('Unable to disable event source')
def update(self, function):
response = None
uuid = self._get_uuid(function)
if uuid:
try:
response = self._lambda.call(
'update_event_source_mapping',
BatchSize=self.batch_size,
Enabled=self.enabled,
FunctionName=function.arn)
LOG.debug(response)
except Exception:
LOG.exception('Unable to update event source')
def remove(self, function):
response = None
uuid = self._get_uuid(function)
if uuid:
response = self._lambda.call(
'delete_event_source_mapping',
UUID=uuid)
LOG.debug(response)
return response
def status(self, function):
response = None
LOG.debug('getting status for event source %s', self.arn)
uuid = self._get_uuid(function)
if uuid:
try:
response = self._lambda.call(
'get_event_source_mapping',
UUID=self._get_uuid(function))
LOG.debug(response)
except botocore.exceptions.ClientError:
LOG.debug('event source %s does not exist', self.arn)
response = None
else:
LOG.debug('No UUID for event source %s', self.arn)
return response
class ExtendedSnsEventSource(kappa.event_source.sns.SNSEventSource):
@property
def filters(self):
return self._config.get('filters')
def add_filters(self, function):
try:
subscription = self.exists(function)
if subscription:
response = self._sns.call(
'set_subscription_attributes',
SubscriptionArn=subscription['SubscriptionArn'],
AttributeName='FilterPolicy',
AttributeValue=json.dumps(self.filters)
)
kappa.event_source.sns.LOG.debug(response)
except Exception:
kappa.event_source.sns.LOG.exception('Unable to add filters for SNS topic %s', self.arn)
def add(self, function):
super().add(function)
if self.filters:
self.add_filters(function)
event_source_map = {
'dynamodb': kappa.event_source.dynamodb_stream.DynamoDBStreamEventSource,
'kinesis': kappa.event_source.kinesis.KinesisEventSource,
's3': kappa.event_source.s3.S3EventSource,
'sns': ExtendedSnsEventSource,
'sqs': SqsEventSource,
'events': kappa.event_source.cloudwatch.CloudWatchEventSource
}
arn = event_source['arn']
_, _, svc, _ = arn.split(':', 3)
event_source_func = event_source_map.get(svc, None)
if not event_source_func:
raise ValueError('Unknown event source: {0}'.format(arn))
def autoreturn(self, function_name):
return function_name
event_source_func._make_notification_id = autoreturn
ctx = PseudoContext()
ctx.session = boto_session
funk = PseudoFunction()
funk.name = lambda_arn
# Kappa 0.6.0 requires this nasty hacking,
# hopefully we can remove at least some of this soon.
# Kappa 0.7.0 introduces a whole host over other changes we don't
# really want, so we're stuck here for a little while.
# Related: https://github.com/Miserlou/Zappa/issues/684
# https://github.com/Miserlou/Zappa/issues/688
# https://github.com/Miserlou/Zappa/commit/3216f7e5149e76921ecdf9451167846b95616313
if svc == 's3':
split_arn = lambda_arn.split(':')
arn_front = ':'.join(split_arn[:-1])
arn_back = split_arn[-1]
ctx.environment = arn_back
funk.arn = arn_front
funk.name = ':'.join([arn_back, target_function])
else:
funk.arn = lambda_arn
funk._context = ctx
event_source_obj = event_source_func(ctx, event_source)
return event_source_obj, ctx, funk
def add_event_source(event_source, lambda_arn, target_function, boto_session, dry=False):
"""
Given an event_source dictionary, create the object and add the event source.
"""
event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False)
# TODO: Detect changes in config and refine exists algorithm
if not dry:
if not event_source_obj.status(funk):
event_source_obj.add(funk)
return 'successful' if event_source_obj.status(funk) else 'failed'
else:
return 'exists'
return 'dryrun'
def remove_event_source(event_source, lambda_arn, target_function, boto_session, dry=False):
"""
Given an event_source dictionary, create the object and remove the event source.
"""
event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False)
# This is slightly dirty, but necessary for using Kappa this way.
funk.arn = lambda_arn
if not dry:
rule_response = event_source_obj.remove(funk)
return rule_response
else:
return event_source_obj
def get_event_source_status(event_source, lambda_arn, target_function, boto_session, dry=False):
"""
Given an event_source dictionary, create the object and get the event source status.
"""
event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False)
return event_source_obj.status(funk)
##
# Analytics / Surveillance / Nagging
##
def check_new_version_available(this_version):
"""
Checks if a newer version of Zappa is available.
Returns True is updateable, else False.
"""
import requests
pypi_url = 'https://pypi.org/pypi/Zappa/json'
resp = requests.get(pypi_url, timeout=1.5)
top_version = resp.json()['info']['version']
return this_version != top_version
class InvalidAwsLambdaName(Exception):
"""Exception: proposed AWS Lambda name is invalid"""
pass
def validate_name(name, maxlen=80):
"""Validate name for AWS Lambda function.
name: actual name (without `arn:aws:lambda:...:` prefix and without
`:$LATEST`, alias or version suffix.
maxlen: max allowed length for name without prefix and suffix.
The value 80 was calculated from prefix with longest known region name
and assuming that no alias or version would be longer than `$LATEST`.
Based on AWS Lambda spec
http://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html
Return: the name
Raise: InvalidAwsLambdaName, if the name is invalid.
"""
if not isinstance(name, basestring):
msg = "Name must be of type string"
raise InvalidAwsLambdaName(msg)
if len(name) > maxlen:
msg = "Name is longer than {maxlen} characters."
raise InvalidAwsLambdaName(msg.format(maxlen=maxlen))
if len(name) == 0:
msg = "Name must not be empty string."
raise InvalidAwsLambdaName(msg)
if not re.match("^[a-zA-Z0-9-_]+$", name):
msg = "Name can only contain characters from a-z, A-Z, 0-9, _ and -"
raise InvalidAwsLambdaName(msg)
return name
def contains_python_files_or_subdirs(folder):
"""
Checks (recursively) if the directory contains .py or .pyc files
"""
for root, dirs, files in os.walk(folder):
if [filename for filename in files if filename.endswith('.py') or filename.endswith('.pyc')]:
return True
for d in dirs:
for _, subdirs, subfiles in os.walk(d):
if [filename for filename in subfiles if filename.endswith('.py') or filename.endswith('.pyc')]:
return True
return False
def conflicts_with_a_neighbouring_module(directory_path):
"""
Checks if a directory lies in the same directory as a .py file with the same name.
"""
parent_dir_path, current_dir_name = os.path.split(os.path.normpath(directory_path))
neighbours = os.listdir(parent_dir_path)
conflicting_neighbour_filename = current_dir_name+'.py'
return conflicting_neighbour_filename in neighbours
# https://github.com/Miserlou/Zappa/issues/1188
def titlecase_keys(d):
"""
Takes a dict with keys of type str and returns a new dict with all keys titlecased.
"""
return {k.title(): v for k, v in d.items()}
# https://github.com/Miserlou/Zappa/issues/1688
def is_valid_bucket_name(name):
"""
Checks if an S3 bucket name is valid according to https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules
"""
# Bucket names must be at least 3 and no more than 63 characters long.
if (len(name) < 3 or len(name) > 63):
return False
# Bucket names must not contain uppercase characters or underscores.
if (any(x.isupper() for x in name)):
return False
if "_" in name:
return False
# Bucket names must start with a lowercase letter or number.
if not (name[0].islower() or name[0].isdigit()):
return False
# Bucket names must be a series of one or more labels. Adjacent labels are separated by a single period (.).
for label in name.split("."):
# Each label must start and end with a lowercase letter or a number.
if len(label) < 1:
return False
if not (label[0].islower() or label[0].isdigit()):
return False
if not (label[-1].islower() or label[-1].isdigit()):
return False
# Bucket names must not be formatted as an IP address (for example, 192.168.5.4).
looks_like_IP = True
for label in name.split("."):
if not label.isdigit():
looks_like_IP = False
break
if looks_like_IP:
return False
return True
def merge_headers(event):
"""
Merge the values of headers and multiValueHeaders into a single dict.
Opens up support for multivalue headers via API Gateway and ALB.
See: https://github.com/Miserlou/Zappa/pull/1756
"""
headers = event.get('headers') or {}
multi_headers = (event.get('multiValueHeaders') or {}).copy()
for h in set(headers.keys()):
if h not in multi_headers:
multi_headers[h] = [headers[h]]
for h in multi_headers.keys():
multi_headers[h] = ', '.join(multi_headers[h])
return multi_headers | zappa-mathking | /zappa-mathking-0.52.4.tar.gz/zappa-mathking-0.52.4/zappa/utilities.py | utilities.py |
import argparse
import base64
import collections
import importlib
import inspect
import logging
import os
import pkgutil
import random
import re
import string
import sys
import tarfile
import tempfile
import time
import zipfile
from builtins import bytes, input
from datetime import datetime, timedelta
import argcomplete
import boto3
import botocore
import click
import hjson as json
import pkg_resources
import requests
import slugify
import toml
import yaml
from click import BaseCommand, Context
from click.exceptions import ClickException
from click.globals import push_context
from dateutil import parser
from past.builtins import basestring
from .core import API_GATEWAY_REGIONS, Zappa, logger
from .utilities import (InvalidAwsLambdaName, check_new_version_available,
detect_django_settings, detect_flask_apps,
get_runtime_from_python_version,
get_venv_from_python_version, human_size,
is_valid_bucket_name, parse_s3_url,
string_to_timestamp, validate_name)
CUSTOM_SETTINGS = [
'apigateway_policy',
'assume_policy',
'attach_policy',
'aws_region',
'delete_local_zip',
'delete_s3_zip',
'exclude',
'exclude_glob',
'extra_permissions',
'include',
'role_name',
'touch',
]
BOTO3_CONFIG_DOCS_URL = 'https://boto3.readthedocs.io/en/latest/guide/quickstart.html#configuration'
##
# Main Input Processing
##
class ZappaCLI:
"""
ZappaCLI object is responsible for loading the settings,
handling the input arguments and executing the calls to the core library.
"""
# CLI
vargs = None
command = None
stage_env = None
# Zappa settings
zappa = None
zappa_settings = None
load_credentials = True
disable_progress = False
# Specific settings
api_stage = None
app_function = None
aws_region = None
debug = None
prebuild_script = None
project_name = None
profile_name = None
lambda_arn = None
lambda_name = None
lambda_description = None
lambda_concurrency = None
s3_bucket_name = None
settings_file = None
zip_path = None
handler_path = None
vpc_config = None
memory_size = None
use_apigateway = None
lambda_handler = None
django_settings = None
manage_roles = True
exception_handler = None
environment_variables = None
authorizer = None
xray_tracing = False
aws_kms_key_arn = ''
context_header_mappings = None
tags = []
layers = None
stage_name_env_pattern = re.compile('^[a-zA-Z0-9_]+$')
def __init__(self):
self._stage_config_overrides = {} # change using self.override_stage_config_setting(key, val)
@property
def stage_config(self):
"""
A shortcut property for settings of a stage.
"""
def get_stage_setting(stage, extended_stages=None):
if extended_stages is None:
extended_stages = []
if stage in extended_stages:
raise RuntimeError(stage + " has already been extended to these settings. "
"There is a circular extends within the settings file.")
extended_stages.append(stage)
try:
stage_settings = dict(self.zappa_settings[stage].copy())
except KeyError:
raise ClickException("Cannot extend settings for undefined stage '" + stage + "'.")
extends_stage = self.zappa_settings[stage].get('extends', None)
if not extends_stage:
return stage_settings
extended_settings = get_stage_setting(stage=extends_stage, extended_stages=extended_stages)
extended_settings.update(stage_settings)
return extended_settings
settings = get_stage_setting(stage=self.api_stage)
# Backwards compatible for delete_zip setting that was more explicitly named delete_local_zip
if 'delete_zip' in settings:
settings['delete_local_zip'] = settings.get('delete_zip')
settings.update(self.stage_config_overrides)
return settings
@property
def stage_config_overrides(self):
"""
Returns zappa_settings we forcefully override for the current stage
set by `self.override_stage_config_setting(key, value)`
"""
return getattr(self, '_stage_config_overrides', {}).get(self.api_stage, {})
def override_stage_config_setting(self, key, val):
"""
Forcefully override a setting set by zappa_settings (for the current stage only)
:param key: settings key
:param val: value
"""
self._stage_config_overrides = getattr(self, '_stage_config_overrides', {})
self._stage_config_overrides.setdefault(self.api_stage, {})[key] = val
def handle(self, argv=None):
"""
Main function.
Parses command, load settings and dispatches accordingly.
"""
desc = ('Zappa - Deploy Python applications to AWS Lambda'
' and API Gateway.\n')
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'-v', '--version', action='version',
version=pkg_resources.get_distribution("zappa-mathking").version,
help='Print the zappa version'
)
parser.add_argument(
'--color', default='auto', choices=['auto','never','always']
)
env_parser = argparse.ArgumentParser(add_help=False)
me_group = env_parser.add_mutually_exclusive_group()
all_help = ('Execute this command for all of our defined '
'Zappa stages.')
me_group.add_argument('--all', action='store_true', help=all_help)
me_group.add_argument('stage_env', nargs='?')
group = env_parser.add_argument_group()
group.add_argument(
'-a', '--app_function', help='The WSGI application function.'
)
group.add_argument(
'-s', '--settings_file', help='The path to a Zappa settings file.'
)
group.add_argument(
'-q', '--quiet', action='store_true', help='Silence all output.'
)
# https://github.com/Miserlou/Zappa/issues/407
# Moved when 'template' command added.
# Fuck Terraform.
group.add_argument(
'-j', '--json', action='store_true', help='Make the output of this command be machine readable.'
)
# https://github.com/Miserlou/Zappa/issues/891
group.add_argument(
'--disable_progress', action='store_true', help='Disable progress bars.'
)
group.add_argument(
"--no_venv", action="store_true", help="Skip venv check."
)
##
# Certify
##
subparsers = parser.add_subparsers(title='subcommands', dest='command')
cert_parser = subparsers.add_parser(
'certify', parents=[env_parser],
help='Create and install SSL certificate'
)
cert_parser.add_argument(
'--manual', action='store_true',
help=("Gets new Let's Encrypt certificates, but prints them to console."
"Does not update API Gateway domains.")
)
cert_parser.add_argument(
'-y', '--yes', action='store_true', help='Auto confirm yes.'
)
##
# Deploy
##
deploy_parser = subparsers.add_parser(
'deploy', parents=[env_parser], help='Deploy application.'
)
deploy_parser.add_argument(
'-z', '--zip', help='Deploy Lambda with specific local or S3 hosted zip package'
)
##
# Init
##
init_parser = subparsers.add_parser('init', help='Initialize Zappa app.')
##
# Package
##
package_parser = subparsers.add_parser(
'package', parents=[env_parser], help='Build the application zip package locally.'
)
package_parser.add_argument(
'-o', '--output', help='Name of file to output the package to.'
)
##
# Template
##
template_parser = subparsers.add_parser(
'template', parents=[env_parser], help='Create a CloudFormation template for this API Gateway.'
)
template_parser.add_argument(
'-l', '--lambda-arn', required=True, help='ARN of the Lambda function to template to.'
)
template_parser.add_argument(
'-r', '--role-arn', required=True, help='ARN of the Role to template with.'
)
template_parser.add_argument(
'-o', '--output', help='Name of file to output the template to.'
)
##
# Invocation
##
invoke_parser = subparsers.add_parser(
'invoke', parents=[env_parser],
help='Invoke remote function.'
)
invoke_parser.add_argument(
'--raw', action='store_true',
help=('When invoking remotely, invoke this python as a string,'
' not as a modular path.')
)
invoke_parser.add_argument(
'--no-color', action='store_true',
help=("Don't color the output")
)
invoke_parser.add_argument('command_rest')
##
# Manage
##
manage_parser = subparsers.add_parser(
'manage',
help='Invoke remote Django manage.py commands.'
)
rest_help = ("Command in the form of <env> <command>. <env> is not "
"required if --all is specified")
manage_parser.add_argument('--all', action='store_true', help=all_help)
manage_parser.add_argument('command_rest', nargs='+', help=rest_help)
manage_parser.add_argument(
'--no-color', action='store_true',
help=("Don't color the output")
)
# This is explicitly added here because this is the only subcommand that doesn't inherit from env_parser
# https://github.com/Miserlou/Zappa/issues/1002
manage_parser.add_argument(
'-s', '--settings_file', help='The path to a Zappa settings file.'
)
##
# Rollback
##
def positive_int(s):
""" Ensure an arg is positive """
i = int(s)
if i < 0:
msg = "This argument must be positive (got {})".format(s)
raise argparse.ArgumentTypeError(msg)
return i
rollback_parser = subparsers.add_parser(
'rollback', parents=[env_parser],
help='Rollback deployed code to a previous version.'
)
rollback_parser.add_argument(
'-n', '--num-rollback', type=positive_int, default=1,
help='The number of versions to rollback.'
)
##
# Scheduling
##
subparsers.add_parser(
'schedule', parents=[env_parser],
help='Schedule functions to occur at regular intervals.'
)
##
# Status
##
subparsers.add_parser(
'status', parents=[env_parser],
help='Show deployment status and event schedules.'
)
##
# Log Tailing
##
tail_parser = subparsers.add_parser(
'tail', parents=[env_parser], help='Tail deployment logs.'
)
tail_parser.add_argument(
'--no-color', action='store_true',
help="Don't color log tail output."
)
tail_parser.add_argument(
'--http', action='store_true',
help='Only show HTTP requests in tail output.'
)
tail_parser.add_argument(
'--non-http', action='store_true',
help='Only show non-HTTP requests in tail output.'
)
tail_parser.add_argument(
'--since', type=str, default="100000s",
help="Only show lines since a certain timeframe."
)
tail_parser.add_argument(
'--filter', type=str, default="",
help="Apply a filter pattern to the logs."
)
tail_parser.add_argument(
'--force-color', action='store_true',
help='Force coloring log tail output even if coloring support is not auto-detected. (example: piping)'
)
tail_parser.add_argument(
'--disable-keep-open', action='store_true',
help="Exit after printing the last available log, rather than keeping the log open."
)
##
# Undeploy
##
undeploy_parser = subparsers.add_parser(
'undeploy', parents=[env_parser], help='Undeploy application.'
)
undeploy_parser.add_argument(
'--remove-logs', action='store_true',
help=('Removes log groups of api gateway and lambda task'
' during the undeployment.'),
)
undeploy_parser.add_argument(
'-y', '--yes', action='store_true', help='Auto confirm yes.'
)
##
# Unschedule
##
subparsers.add_parser('unschedule', parents=[env_parser],
help='Unschedule functions.')
##
# Updating
##
update_parser = subparsers.add_parser(
'update', parents=[env_parser], help='Update deployed application.'
)
update_parser.add_argument(
'-z', '--zip', help='Update Lambda with specific local or S3 hosted zip package'
)
update_parser.add_argument(
'-n', '--no-upload', help="Update configuration where appropriate, but don't upload new code"
)
##
# Debug
##
subparsers.add_parser(
'shell', parents=[env_parser], help='A debug shell with a loaded Zappa object.'
)
argcomplete.autocomplete(parser)
args = parser.parse_args(argv)
self.vargs = vars(args)
if args.color == 'never':
disable_click_colors()
elif args.color == 'always':
#TODO: Support aggressive coloring like "--force-color" on all commands
pass
elif args.color == 'auto':
pass
# Parse the input
# NOTE(rmoe): Special case for manage command
# The manage command can't have both stage_env and command_rest
# arguments. Since they are both positional arguments argparse can't
# differentiate the two. This causes problems when used with --all.
# (e.g. "manage --all showmigrations admin" argparse thinks --all has
# been specified AND that stage_env='showmigrations')
# By having command_rest collect everything but --all we can split it
# apart here instead of relying on argparse.
if not args.command:
parser.print_help()
return
if args.command == 'manage' and not self.vargs.get('all'):
self.stage_env = self.vargs['command_rest'].pop(0)
else:
self.stage_env = self.vargs.get('stage_env')
if args.command == 'package':
self.load_credentials = False
self.command = args.command
self.disable_progress = self.vargs.get('disable_progress')
if self.vargs.get('quiet'):
self.silence()
# We don't have any settings yet, so make those first!
# (Settings-based interactions will fail
# before a project has been initialized.)
if self.command == 'init':
self.init()
return
# Make sure there isn't a new version available
if not self.vargs.get('json'):
self.check_for_update()
# Load and Validate Settings File
self.load_settings_file(self.vargs.get('settings_file'))
# Should we execute this for all stages, or just one?
all_stages = self.vargs.get('all')
stages = []
if all_stages: # All stages!
stages = self.zappa_settings.keys()
else: # Just one env.
if not self.stage_env:
# If there's only one stage defined in the settings,
# use that as the default.
if len(self.zappa_settings.keys()) == 1:
stages.append(list(self.zappa_settings.keys())[0])
else:
parser.error("Please supply a stage to interact with.")
else:
stages.append(self.stage_env)
for stage in stages:
try:
self.dispatch_command(self.command, stage)
except ClickException as e:
# Discussion on exit codes: https://github.com/Miserlou/Zappa/issues/407
e.show()
sys.exit(e.exit_code)
def dispatch_command(self, command, stage):
"""
Given a command to execute and stage,
execute that command.
"""
self.api_stage = stage
if command not in ['status', 'manage']:
if not self.vargs.get('json', None):
click.echo("Calling " + click.style(command, fg="green", bold=True) + " for stage " +
click.style(self.api_stage, bold=True) + ".." )
# Explicitly define the app function.
# Related: https://github.com/Miserlou/Zappa/issues/832
if self.vargs.get('app_function', None):
self.app_function = self.vargs['app_function']
# Load our settings, based on api_stage.
try:
self.load_settings(self.vargs.get('settings_file'))
except ValueError as e:
if hasattr(e, 'message'):
print("Error: {}".format(e.message))
else:
print(str(e))
sys.exit(-1)
self.callback('settings')
# Hand it off
if command == 'deploy': # pragma: no cover
self.deploy(self.vargs['zip'])
if command == 'package': # pragma: no cover
self.package(self.vargs['output'])
if command == 'template': # pragma: no cover
self.template( self.vargs['lambda_arn'],
self.vargs['role_arn'],
output=self.vargs['output'],
json=self.vargs['json']
)
elif command == 'update': # pragma: no cover
self.update(self.vargs['zip'], self.vargs['no_upload'])
elif command == 'rollback': # pragma: no cover
self.rollback(self.vargs['num_rollback'])
elif command == 'invoke': # pragma: no cover
if not self.vargs.get('command_rest'):
print("Please enter the function to invoke.")
return
self.invoke(
self.vargs['command_rest'],
raw_python=self.vargs['raw'],
no_color=self.vargs['no_color'],
)
elif command == 'manage': # pragma: no cover
if not self.vargs.get('command_rest'):
print("Please enter the management command to invoke.")
return
if not self.django_settings:
print("This command is for Django projects only!")
print("If this is a Django project, please define django_settings in your zappa_settings.")
return
command_tail = self.vargs.get('command_rest')
if len(command_tail) > 1:
command = " ".join(command_tail) # ex: zappa manage dev "shell --version"
else:
command = command_tail[0] # ex: zappa manage dev showmigrations admin
self.invoke(
command,
command="manage",
no_color=self.vargs['no_color'],
)
elif command == 'tail': # pragma: no cover
self.tail(
colorize=(not self.vargs['no_color']),
http=self.vargs['http'],
non_http=self.vargs['non_http'],
since=self.vargs['since'],
filter_pattern=self.vargs['filter'],
force_colorize=self.vargs['force_color'] or None,
keep_open=not self.vargs['disable_keep_open']
)
elif command == 'undeploy': # pragma: no cover
self.undeploy(
no_confirm=self.vargs['yes'],
remove_logs=self.vargs['remove_logs']
)
elif command == 'schedule': # pragma: no cover
self.schedule()
elif command == 'unschedule': # pragma: no cover
self.unschedule()
elif command == 'status': # pragma: no cover
self.status(return_json=self.vargs['json'])
elif command == 'certify': # pragma: no cover
self.certify(
no_confirm=self.vargs['yes'],
manual=self.vargs['manual']
)
elif command == 'shell': # pragma: no cover
self.shell()
##
# The Commands
##
def package(self, output=None):
"""
Only build the package
"""
# Make sure we're in a venv.
self.check_venv()
# force not to delete the local zip
self.override_stage_config_setting('delete_local_zip', False)
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Create the Lambda Zip
self.create_package(output)
self.callback('zip')
size = human_size(os.path.getsize(self.zip_path))
click.echo(click.style("Package created", fg="green", bold=True) + ": " + click.style(self.zip_path, bold=True) + " (" + size + ")")
def template(self, lambda_arn, role_arn, output=None, json=False):
"""
Only build the template file.
"""
if not lambda_arn:
raise ClickException("Lambda ARN is required to template.")
if not role_arn:
raise ClickException("Role ARN is required to template.")
self.zappa.credentials_arn = role_arn
# Create the template!
template = self.zappa.create_stack_template(
lambda_arn=lambda_arn,
lambda_name=self.lambda_name,
api_key_required=self.api_key_required,
iam_authorization=self.iam_authorization,
authorizer=self.authorizer,
cors_options=self.cors,
description=self.apigateway_description,
endpoint_configuration=self.endpoint_configuration
)
if not output:
template_file = self.lambda_name + '-template-' + str(int(time.time())) + '.json'
else:
template_file = output
with open(template_file, 'wb') as out:
out.write(bytes(template.to_json(indent=None, separators=(',',':')), "utf-8"))
if not json:
click.echo(click.style("Template created", fg="green", bold=True) + ": " + click.style(template_file, bold=True))
else:
with open(template_file, 'r') as out:
print(out.read())
def deploy(self, source_zip=None):
"""
Package your project, upload it to S3, register the Lambda function
and create the API Gateway routes.
"""
if not source_zip:
# Make sure we're in a venv.
self.check_venv()
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Make sure this isn't already deployed.
deployed_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if len(deployed_versions) > 0:
raise ClickException("This application is " + click.style("already deployed", fg="red") +
" - did you mean to call " + click.style("update", bold=True) + "?")
# Make sure the necessary IAM execution roles are available
if self.manage_roles:
try:
self.zappa.create_iam_roles()
except botocore.client.ClientError as ce:
raise ClickException(
click.style("Failed", fg="red") + " to " + click.style("manage IAM roles", bold=True) + "!\n" +
"You may " + click.style("lack the necessary AWS permissions", bold=True) +
" to automatically manage a Zappa execution role.\n" +
click.style("Exception reported by AWS:", bold=True) + format(ce) + '\n' +
"To fix this, see here: " +
click.style(
"https://github.com/Zappa/Zappa#custom-aws-iam-roles-and-policies-for-deployment",
bold=True)
+ '\n')
# Create the Lambda Zip
self.create_package()
self.callback('zip')
# Upload it to S3
success = self.zappa.upload_to_s3(
self.zip_path, self.s3_bucket_name, disable_progress=self.disable_progress)
if not success: # pragma: no cover
raise ClickException("Unable to upload to S3. Quitting.")
# If using a slim handler, upload it to S3 and tell lambda to use this slim handler zip
if self.stage_config.get('slim_handler', False):
# https://github.com/Miserlou/Zappa/issues/510
success = self.zappa.upload_to_s3(self.handler_path, self.s3_bucket_name, disable_progress=self.disable_progress)
if not success: # pragma: no cover
raise ClickException("Unable to upload handler to S3. Quitting.")
# Copy the project zip to the current project zip
current_project_name = '{0!s}_{1!s}_current_project.tar.gz'.format(self.api_stage, self.project_name)
success = self.zappa.copy_on_s3(src_file_name=self.zip_path, dst_file_name=current_project_name,
bucket_name=self.s3_bucket_name)
if not success: # pragma: no cover
raise ClickException("Unable to copy the zip to be the current project. Quitting.")
handler_file = self.handler_path
else:
handler_file = self.zip_path
# Fixes https://github.com/Miserlou/Zappa/issues/613
try:
self.lambda_arn = self.zappa.get_lambda_function(
function_name=self.lambda_name)
except botocore.client.ClientError:
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
kwargs = dict(
handler=self.lambda_handler,
description=self.lambda_description,
vpc_config=self.vpc_config,
dead_letter_config=self.dead_letter_config,
timeout=self.timeout_seconds,
memory_size=self.memory_size,
runtime=self.runtime,
aws_environment_variables=self.aws_environment_variables,
aws_kms_key_arn=self.aws_kms_key_arn,
use_alb=self.use_alb,
layers=self.layers,
concurrency=self.lambda_concurrency,
)
if source_zip and source_zip.startswith('s3://'):
bucket, key_name = parse_s3_url(source_zip)
kwargs['function_name'] = self.lambda_name
kwargs['bucket'] = bucket
kwargs['s3_key'] = key_name
elif source_zip and not source_zip.startswith('s3://'):
with open(source_zip, mode='rb') as fh:
byte_stream = fh.read()
kwargs['function_name'] = self.lambda_name
kwargs['local_zip'] = byte_stream
else:
kwargs['function_name'] = self.lambda_name
kwargs['bucket'] = self.s3_bucket_name
kwargs['s3_key'] = handler_file
self.lambda_arn = self.zappa.create_lambda_function(**kwargs)
# Schedule events for this deployment
self.schedule()
endpoint_url = ''
deployment_string = click.style("Deployment complete", fg="green", bold=True) + "!"
if self.use_alb:
kwargs = dict(
lambda_arn=self.lambda_arn,
lambda_name=self.lambda_name,
alb_vpc_config=self.alb_vpc_config,
timeout=self.timeout_seconds
)
self.zappa.deploy_lambda_alb(**kwargs)
if self.use_apigateway:
# Create and configure the API Gateway
template = self.zappa.create_stack_template(
lambda_arn=self.lambda_arn,
lambda_name=self.lambda_name,
api_key_required=self.api_key_required,
iam_authorization=self.iam_authorization,
authorizer=self.authorizer,
cors_options=self.cors,
description=self.apigateway_description,
endpoint_configuration=self.endpoint_configuration
)
self.zappa.update_stack(
self.lambda_name,
self.s3_bucket_name,
wait=True,
disable_progress=self.disable_progress
)
api_id = self.zappa.get_api_id(self.lambda_name)
# Add binary support
if self.binary_support:
self.zappa.add_binary_support(api_id=api_id, cors=self.cors)
# Add payload compression
if self.stage_config.get('payload_compression', True):
self.zappa.add_api_compression(
api_id=api_id,
min_compression_size=self.stage_config.get('payload_minimum_compression_size', 0))
# Deploy the API!
endpoint_url = self.deploy_api_gateway(api_id)
deployment_string = deployment_string + ": {}".format(endpoint_url)
# Create/link API key
if self.api_key_required:
if self.api_key is None:
self.zappa.create_api_key(api_id=api_id, stage_name=self.api_stage)
else:
self.zappa.add_api_stage_to_api_key(api_key=self.api_key, api_id=api_id, stage_name=self.api_stage)
if self.stage_config.get('touch', True):
self.touch_endpoint(endpoint_url)
# Finally, delete the local copy our zip package
if not source_zip:
if self.stage_config.get('delete_local_zip', True):
self.remove_local_zip()
# Remove the project zip from S3.
if not source_zip:
self.remove_uploaded_zip()
self.callback('post')
click.echo(deployment_string)
def update(self, source_zip=None, no_upload=False):
"""
Repackage and update the function code.
"""
if not source_zip:
# Make sure we're in a venv.
self.check_venv()
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Temporary version check
try:
updated_time = 1472581018
function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name)
conf = function_response['Configuration']
last_updated = parser.parse(conf['LastModified'])
last_updated_unix = time.mktime(last_updated.timetuple())
except botocore.exceptions.BotoCoreError as e:
click.echo(click.style(type(e).__name__, fg="red") + ": " + e.args[0])
sys.exit(-1)
except Exception as e:
click.echo(click.style("Warning!", fg="red") + " Couldn't get function " + self.lambda_name +
" in " + self.zappa.aws_region + " - have you deployed yet?")
sys.exit(-1)
if last_updated_unix <= updated_time:
click.echo(click.style("Warning!", fg="red") +
" You may have upgraded Zappa since deploying this application. You will need to " +
click.style("redeploy", bold=True) + " for this deployment to work properly!")
# Make sure the necessary IAM execution roles are available
if self.manage_roles:
try:
self.zappa.create_iam_roles()
except botocore.client.ClientError:
click.echo(click.style("Failed", fg="red") + " to " + click.style("manage IAM roles", bold=True) + "!")
click.echo("You may " + click.style("lack the necessary AWS permissions", bold=True) +
" to automatically manage a Zappa execution role.")
click.echo("To fix this, see here: " +
click.style("https://github.com/Zappa/Zappa#custom-aws-iam-roles-and-policies-for-deployment",
bold=True))
sys.exit(-1)
# Create the Lambda Zip,
if not no_upload:
self.create_package()
self.callback('zip')
# Upload it to S3
if not no_upload:
success = self.zappa.upload_to_s3(self.zip_path, self.s3_bucket_name, disable_progress=self.disable_progress)
if not success: # pragma: no cover
raise ClickException("Unable to upload project to S3. Quitting.")
# If using a slim handler, upload it to S3 and tell lambda to use this slim handler zip
if self.stage_config.get('slim_handler', False):
# https://github.com/Miserlou/Zappa/issues/510
success = self.zappa.upload_to_s3(self.handler_path, self.s3_bucket_name, disable_progress=self.disable_progress)
if not success: # pragma: no cover
raise ClickException("Unable to upload handler to S3. Quitting.")
# Copy the project zip to the current project zip
current_project_name = '{0!s}_{1!s}_current_project.tar.gz'.format(self.api_stage, self.project_name)
success = self.zappa.copy_on_s3(src_file_name=self.zip_path, dst_file_name=current_project_name,
bucket_name=self.s3_bucket_name)
if not success: # pragma: no cover
raise ClickException("Unable to copy the zip to be the current project. Quitting.")
handler_file = self.handler_path
else:
handler_file = self.zip_path
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
kwargs = dict(
bucket=self.s3_bucket_name,
function_name=self.lambda_name,
num_revisions=self.num_retained_versions,
concurrency=self.lambda_concurrency,
)
if source_zip and source_zip.startswith('s3://'):
bucket, key_name = parse_s3_url(source_zip)
kwargs.update(dict(
bucket=bucket,
s3_key=key_name
))
self.lambda_arn = self.zappa.update_lambda_function(**kwargs)
elif source_zip and not source_zip.startswith('s3://'):
with open(source_zip, mode='rb') as fh:
byte_stream = fh.read()
kwargs['local_zip'] = byte_stream
self.lambda_arn = self.zappa.update_lambda_function(**kwargs)
else:
if not no_upload:
kwargs['s3_key'] = handler_file
self.lambda_arn = self.zappa.update_lambda_function(**kwargs)
# Remove the uploaded zip from S3, because it is now registered..
if not source_zip and not no_upload:
self.remove_uploaded_zip()
project_folder = '/mnt/tmp/{0!s}'.format(self.project_name)
# Update the configuration, in case there are changes.
self.lambda_arn = self.zappa.update_lambda_configuration(
lambda_arn=self.lambda_arn,
function_name=self.lambda_name,
handler=self.lambda_handler,
description=self.lambda_description,
vpc_config=self.vpc_config,
timeout=self.timeout_seconds,
memory_size=self.memory_size,
runtime=self.runtime,
aws_environment_variables=self.aws_environment_variables,
aws_kms_key_arn=self.aws_kms_key_arn,
layers=self.layers,
project_folder=project_folder
)
# Finally, delete the local copy our zip package
if not source_zip and not no_upload:
if self.stage_config.get('delete_local_zip', True):
self.remove_local_zip()
if self.use_apigateway:
self.zappa.create_stack_template(
lambda_arn=self.lambda_arn,
lambda_name=self.lambda_name,
api_key_required=self.api_key_required,
iam_authorization=self.iam_authorization,
authorizer=self.authorizer,
cors_options=self.cors,
description=self.apigateway_description,
endpoint_configuration=self.endpoint_configuration
)
self.zappa.update_stack(
self.lambda_name,
self.s3_bucket_name,
wait=True,
update_only=True,
disable_progress=self.disable_progress)
api_id = self.zappa.get_api_id(self.lambda_name)
# Update binary support
if self.binary_support:
self.zappa.add_binary_support(api_id=api_id, cors=self.cors)
else:
self.zappa.remove_binary_support(api_id=api_id, cors=self.cors)
if self.stage_config.get('payload_compression', True):
self.zappa.add_api_compression(
api_id=api_id,
min_compression_size=self.stage_config.get('payload_minimum_compression_size', 0))
else:
self.zappa.remove_api_compression(api_id=api_id)
# It looks a bit like we might actually be using this just to get the URL,
# but we're also updating a few of the APIGW settings.
endpoint_url = self.deploy_api_gateway(api_id)
if self.stage_config.get('domain', None):
endpoint_url = self.stage_config.get('domain')
else:
endpoint_url = None
self.schedule()
# Update any cognito pool with the lambda arn
# do this after schedule as schedule clears the lambda policy and we need to add one
self.update_cognito_triggers()
self.callback('post')
if endpoint_url and 'https://' not in endpoint_url:
endpoint_url = 'https://' + endpoint_url
if self.base_path:
endpoint_url += '/' + self.base_path
deployed_string = "Your updated Zappa deployment is " + click.style("live", fg='green', bold=True) + "!"
if self.use_apigateway:
deployed_string = deployed_string + ": " + click.style("{}".format(endpoint_url), bold=True)
api_url = None
if endpoint_url and 'amazonaws.com' not in endpoint_url:
api_url = self.zappa.get_api_url(
self.lambda_name,
self.api_stage)
if endpoint_url != api_url:
deployed_string = deployed_string + " (" + api_url + ")"
if self.stage_config.get('touch', True):
if api_url:
self.touch_endpoint(api_url)
elif endpoint_url:
self.touch_endpoint(endpoint_url)
print("s3 to efs downloading.......")
boto_session = boto3.Session()
project_zip_path = "s3://{0!s}/{1!s}_{2!s}_current_project.tar.gz".format(self.s3_bucket_name, self.api_stage, self.project_name)
remote_bucket, remote_file = parse_s3_url(project_zip_path)
s3 = boto_session.resource('s3')
archive_on_s3 = s3.Object(remote_bucket, remote_file).get()
with tarfile.open(fileobj=archive_on_s3['Body'], mode="r|gz") as t:
t.extractall(project_folder)
# Add to project path
sys.path.insert(0, project_folder)
# Change working directory to project folder
# Related: https://github.com/Miserlou/Zappa/issues/702
os.chdir(project_folder)
print("s3 to efs end!!")
click.echo(deployed_string)
def rollback(self, revision):
"""
Rollsback the currently deploy lambda code to a previous revision.
"""
print("Rolling back..")
self.zappa.rollback_lambda_function_version(
self.lambda_name, versions_back=revision)
print("Done!")
def tail(self, since, filter_pattern, limit=10000, keep_open=True, colorize=True, http=False, non_http=False, force_colorize=False):
"""
Tail this function's logs.
if keep_open, do so repeatedly, printing any new logs
"""
try:
since_stamp = string_to_timestamp(since)
last_since = since_stamp
while True:
new_logs = self.zappa.fetch_logs(
self.lambda_name,
start_time=since_stamp,
limit=limit,
filter_pattern=filter_pattern,
)
new_logs = [ e for e in new_logs if e['timestamp'] > last_since ]
self.print_logs(new_logs, colorize, http, non_http, force_colorize)
if not keep_open:
break
if new_logs:
last_since = new_logs[-1]['timestamp']
time.sleep(1)
except KeyboardInterrupt: # pragma: no cover
# Die gracefully
try:
sys.exit(0)
except SystemExit:
os._exit(130)
def undeploy(self, no_confirm=False, remove_logs=False):
"""
Tear down an existing deployment.
"""
if not no_confirm: # pragma: no cover
confirm = input("Are you sure you want to undeploy? [y/n] ")
if confirm != 'y':
return
if self.use_alb:
self.zappa.undeploy_lambda_alb(self.lambda_name)
if self.use_apigateway:
if remove_logs:
self.zappa.remove_api_gateway_logs(self.lambda_name)
domain_name = self.stage_config.get('domain', None)
base_path = self.stage_config.get('base_path', None)
# Only remove the api key when not specified
if self.api_key_required and self.api_key is None:
api_id = self.zappa.get_api_id(self.lambda_name)
self.zappa.remove_api_key(api_id, self.api_stage)
gateway_id = self.zappa.undeploy_api_gateway(
self.lambda_name,
domain_name=domain_name,
base_path=base_path
)
self.unschedule() # removes event triggers, including warm up event.
self.zappa.delete_lambda_function(self.lambda_name)
if remove_logs:
self.zappa.remove_lambda_function_logs(self.lambda_name)
click.echo(click.style("Done", fg="green", bold=True) + "!")
def update_cognito_triggers(self):
"""
Update any cognito triggers
"""
if self.cognito:
user_pool = self.cognito.get('user_pool')
triggers = self.cognito.get('triggers', [])
lambda_configs = set()
for trigger in triggers:
lambda_configs.add(trigger['source'].split('_')[0])
self.zappa.update_cognito(self.lambda_name, user_pool, lambda_configs, self.lambda_arn)
def schedule(self):
"""
Given a a list of functions and a schedule to execute them,
setup up regular execution.
"""
events = self.stage_config.get('events', [])
if events:
if not isinstance(events, list): # pragma: no cover
print("Events must be supplied as a list.")
return
for event in events:
self.collision_warning(event.get('function'))
if self.stage_config.get('keep_warm', True):
if not events:
events = []
keep_warm_rate = self.stage_config.get('keep_warm_expression', "rate(4 minutes)")
events.append({'name': 'zappa-keep-warm',
'function': 'handler.keep_warm_callback',
'expression': keep_warm_rate,
'description': 'Zappa Keep Warm - {}'.format(self.lambda_name)})
if events:
try:
function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
click.echo(click.style("Function does not exist", fg="yellow") + ", please " +
click.style("deploy", bold=True) + "first. Ex:" +
click.style("zappa deploy {}.".format(self.api_stage), bold=True))
sys.exit(-1)
print("Scheduling..")
self.zappa.schedule_events(
lambda_arn=function_response['Configuration']['FunctionArn'],
lambda_name=self.lambda_name,
events=events
)
# Add async tasks SNS
if self.stage_config.get('async_source', None) == 'sns' \
and self.stage_config.get('async_resources', True):
self.lambda_arn = self.zappa.get_lambda_function(
function_name=self.lambda_name)
topic_arn = self.zappa.create_async_sns_topic(
lambda_name=self.lambda_name,
lambda_arn=self.lambda_arn
)
click.echo('SNS Topic created: %s' % topic_arn)
# Add async tasks DynamoDB
table_name = self.stage_config.get('async_response_table', False)
read_capacity = self.stage_config.get('async_response_table_read_capacity', 1)
write_capacity = self.stage_config.get('async_response_table_write_capacity', 1)
if table_name and self.stage_config.get('async_resources', True):
created, response_table = self.zappa.create_async_dynamodb_table(
table_name, read_capacity, write_capacity)
if created:
click.echo('DynamoDB table created: %s' % table_name)
else:
click.echo('DynamoDB table exists: %s' % table_name)
provisioned_throughput = response_table['Table']['ProvisionedThroughput']
if provisioned_throughput['ReadCapacityUnits'] != read_capacity or \
provisioned_throughput['WriteCapacityUnits'] != write_capacity:
click.echo(click.style(
"\nWarning! Existing DynamoDB table ({}) does not match configured capacity.\n".format(table_name),
fg='red'
))
def unschedule(self):
"""
Given a a list of scheduled functions,
tear down their regular execution.
"""
# Run even if events are not defined to remove previously existing ones (thus default to []).
events = self.stage_config.get('events', [])
if not isinstance(events, list): # pragma: no cover
print("Events must be supplied as a list.")
return
function_arn = None
try:
function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name)
function_arn = function_response['Configuration']['FunctionArn']
except botocore.exceptions.ClientError as e: # pragma: no cover
raise ClickException("Function does not exist, you should deploy first. Ex: zappa deploy {}. "
"Proceeding to unschedule CloudWatch based events.".format(self.api_stage))
print("Unscheduling..")
self.zappa.unschedule_events(
lambda_name=self.lambda_name,
lambda_arn=function_arn,
events=events,
)
# Remove async task SNS
if self.stage_config.get('async_source', None) == 'sns' \
and self.stage_config.get('async_resources', True):
removed_arns = self.zappa.remove_async_sns_topic(self.lambda_name)
click.echo('SNS Topic removed: %s' % ', '.join(removed_arns))
def invoke(self, function_name, raw_python=False, command=None, no_color=False):
"""
Invoke a remote function.
"""
# There are three likely scenarios for 'command' here:
# command, which is a modular function path
# raw_command, which is a string of python to execute directly
# manage, which is a Django-specific management command invocation
key = command if command is not None else 'command'
if raw_python:
command = {'raw_command': function_name}
else:
command = {key: function_name}
# Can't use hjson
import json as json
response = self.zappa.invoke_lambda_function(
self.lambda_name,
json.dumps(command),
invocation_type='RequestResponse',
)
if 'LogResult' in response:
if no_color:
print(base64.b64decode(response['LogResult']))
else:
decoded = base64.b64decode(response['LogResult']).decode()
formatted = self.format_invoke_command(decoded)
colorized = self.colorize_invoke_command(formatted)
print(colorized)
else:
print(response)
# For a successful request FunctionError is not in response.
# https://github.com/Miserlou/Zappa/pull/1254/
if 'FunctionError' in response:
raise ClickException(
"{} error occurred while invoking command.".format(response['FunctionError'])
)
def format_invoke_command(self, string):
"""
Formats correctly the string output from the invoke() method,
replacing line breaks and tabs when necessary.
"""
string = string.replace('\\n', '\n')
formated_response = ''
for line in string.splitlines():
if line.startswith('REPORT'):
line = line.replace('\t', '\n')
if line.startswith('[DEBUG]'):
line = line.replace('\t', ' ')
formated_response += line + '\n'
formated_response = formated_response.replace('\n\n', '\n')
return formated_response
def colorize_invoke_command(self, string):
"""
Apply various heuristics to return a colorized version the invoke
command string. If these fail, simply return the string in plaintext.
Inspired by colorize_log_entry().
"""
final_string = string
try:
# Line headers
try:
for token in ['START', 'END', 'REPORT', '[DEBUG]']:
if token in final_string:
format_string = '[{}]'
# match whole words only
pattern = r'\b{}\b'
if token == '[DEBUG]':
format_string = '{}'
pattern = re.escape(token)
repl = click.style(
format_string.format(token),
bold=True,
fg='cyan'
)
final_string = re.sub(
pattern.format(token), repl, final_string
)
except Exception: # pragma: no cover
pass
# Green bold Tokens
try:
for token in [
'Zappa Event:',
'RequestId:',
'Version:',
'Duration:',
'Billed',
'Memory Size:',
'Max Memory Used:'
]:
if token in final_string:
final_string = final_string.replace(token, click.style(
token,
bold=True,
fg='green'
))
except Exception: # pragma: no cover
pass
# UUIDs
for token in final_string.replace('\t', ' ').split(' '):
try:
if token.count('-') == 4 and token.replace('-', '').isalnum():
final_string = final_string.replace(
token,
click.style(token, fg='magenta')
)
except Exception: # pragma: no cover
pass
return final_string
except Exception:
return string
def status(self, return_json=False):
"""
Describe the status of the current deployment.
"""
def tabular_print(title, value):
"""
Convenience function for priting formatted table items.
"""
click.echo('%-*s%s' % (32, click.style("\t" + title, fg='green') + ':', str(value)))
return
# Lambda Env Details
lambda_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if not lambda_versions:
raise ClickException(click.style("No Lambda %s detected in %s - have you deployed yet?" %
(self.lambda_name, self.zappa.aws_region), fg='red'))
status_dict = collections.OrderedDict()
status_dict["Lambda Versions"] = len(lambda_versions)
function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name)
conf = function_response['Configuration']
self.lambda_arn = conf['FunctionArn']
status_dict["Lambda Name"] = self.lambda_name
status_dict["Lambda ARN"] = self.lambda_arn
status_dict["Lambda Role ARN"] = conf['Role']
status_dict["Lambda Handler"] = conf['Handler']
status_dict["Lambda Code Size"] = conf['CodeSize']
status_dict["Lambda Version"] = conf['Version']
status_dict["Lambda Last Modified"] = conf['LastModified']
status_dict["Lambda Memory Size"] = conf['MemorySize']
status_dict["Lambda Timeout"] = conf['Timeout']
status_dict["Lambda Runtime"] = conf['Runtime']
if 'VpcConfig' in conf.keys():
status_dict["Lambda VPC ID"] = conf.get('VpcConfig', {}).get('VpcId', 'Not assigned')
else:
status_dict["Lambda VPC ID"] = None
# Calculated statistics
try:
function_invocations = self.zappa.cloudwatch.get_metric_statistics(
Namespace='AWS/Lambda',
MetricName='Invocations',
StartTime=datetime.utcnow()-timedelta(days=1),
EndTime=datetime.utcnow(),
Period=1440,
Statistics=['Sum'],
Dimensions=[{'Name': 'FunctionName',
'Value': '{}'.format(self.lambda_name)}]
)['Datapoints'][0]['Sum']
except Exception as e:
function_invocations = 0
try:
function_errors = self.zappa.cloudwatch.get_metric_statistics(
Namespace='AWS/Lambda',
MetricName='Errors',
StartTime=datetime.utcnow()-timedelta(days=1),
EndTime=datetime.utcnow(),
Period=1440,
Statistics=['Sum'],
Dimensions=[{'Name': 'FunctionName',
'Value': '{}'.format(self.lambda_name)}]
)['Datapoints'][0]['Sum']
except Exception as e:
function_errors = 0
try:
error_rate = "{0:.2f}%".format(function_errors / function_invocations * 100)
except:
error_rate = "Error calculating"
status_dict["Invocations (24h)"] = int(function_invocations)
status_dict["Errors (24h)"] = int(function_errors)
status_dict["Error Rate (24h)"] = error_rate
# URLs
if self.use_apigateway:
api_url = self.zappa.get_api_url(
self.lambda_name,
self.api_stage)
status_dict["API Gateway URL"] = api_url
# Api Keys
api_id = self.zappa.get_api_id(self.lambda_name)
for api_key in self.zappa.get_api_keys(api_id, self.api_stage):
status_dict["API Gateway x-api-key"] = api_key
# There literally isn't a better way to do this.
# AWS provides no way to tie a APIGW domain name to its Lambda function.
domain_url = self.stage_config.get('domain', None)
base_path = self.stage_config.get('base_path', None)
if domain_url:
status_dict["Domain URL"] = 'https://' + domain_url
if base_path:
status_dict["Domain URL"] += '/' + base_path
else:
status_dict["Domain URL"] = "None Supplied"
# Scheduled Events
event_rules = self.zappa.get_event_rules_for_lambda(lambda_arn=self.lambda_arn)
status_dict["Num. Event Rules"] = len(event_rules)
if len(event_rules) > 0:
status_dict['Events'] = []
for rule in event_rules:
event_dict = {}
rule_name = rule['Name']
event_dict["Event Rule Name"] = rule_name
event_dict["Event Rule Schedule"] = rule.get('ScheduleExpression', None)
event_dict["Event Rule State"] = rule.get('State', None).title()
event_dict["Event Rule ARN"] = rule.get('Arn', None)
status_dict['Events'].append(event_dict)
if return_json:
# Putting the status in machine readable format
# https://github.com/Miserlou/Zappa/issues/407
print(json.dumpsJSON(status_dict))
else:
click.echo("Status for " + click.style(self.lambda_name, bold=True) + ": ")
for k, v in status_dict.items():
if k == 'Events':
# Events are a list of dicts
for event in v:
for item_k, item_v in event.items():
tabular_print(item_k, item_v)
else:
tabular_print(k, v)
# TODO: S3/SQS/etc. type events?
return True
def check_stage_name(self, stage_name):
"""
Make sure the stage name matches the AWS-allowed pattern
(calls to apigateway_client.create_deployment, will fail with error
message "ClientError: An error occurred (BadRequestException) when
calling the CreateDeployment operation: Stage name only allows
a-zA-Z0-9_" if the pattern does not match)
"""
if self.stage_name_env_pattern.match(stage_name):
return True
raise ValueError("AWS requires stage name to match a-zA-Z0-9_")
def check_environment(self, environment):
"""
Make sure the environment contains only strings
(since putenv needs a string)
"""
non_strings = []
for (k,v) in environment.items():
if not isinstance(v, basestring):
non_strings.append(k)
if non_strings:
raise ValueError("The following environment variables are not strings: {}".format(", ".join(non_strings)))
else:
return True
def init(self, settings_file="zappa_settings.json"):
"""
Initialize a new Zappa project by creating a new zappa_settings.json in a guided process.
This should probably be broken up into few separate componants once it's stable.
Testing these inputs requires monkeypatching with mock, which isn't pretty.
"""
# Make sure we're in a venv.
self.check_venv()
# Ensure that we don't already have a zappa_settings file.
if os.path.isfile(settings_file):
raise ClickException("This project already has a " + click.style("{0!s} file".format(settings_file), fg="red", bold=True) + "!")
# Explain system.
click.echo(click.style("""\n███████╗ █████╗ ██████╗ ██████╗ █████╗
╚══███╔╝██╔══██╗██╔══██╗██╔══██╗██╔══██╗
███╔╝ ███████║██████╔╝██████╔╝███████║
███╔╝ ██╔══██║██╔═══╝ ██╔═══╝ ██╔══██║
███████╗██║ ██║██║ ██║ ██║ ██║
╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ ╚═╝\n""", fg='green', bold=True))
click.echo(click.style("Welcome to ", bold=True) + click.style("Zappa", fg='green', bold=True) + click.style("!\n", bold=True))
click.echo(click.style("Zappa", bold=True) + " is a system for running server-less Python web applications"
" on AWS Lambda and AWS API Gateway.")
click.echo("This `init` command will help you create and configure your new Zappa deployment.")
click.echo("Let's get started!\n")
# Create Env
while True:
click.echo("Your Zappa configuration can support multiple production stages, like '" +
click.style("dev", bold=True) + "', '" + click.style("staging", bold=True) + "', and '" +
click.style("production", bold=True) + "'.")
env = input("What do you want to call this environment (default 'dev'): ") or "dev"
try:
self.check_stage_name(env)
break
except ValueError:
click.echo(click.style("Stage names must match a-zA-Z0-9_", fg="red"))
# Detect AWS profiles and regions
# If anyone knows a more straightforward way to easily detect and parse AWS profiles I'm happy to change this, feels like a hack
session = botocore.session.Session()
config = session.full_config
profiles = config.get("profiles", {})
profile_names = list(profiles.keys())
click.echo("\nAWS Lambda and API Gateway are only available in certain regions. "\
"Let's check to make sure you have a profile set up in one that will work.")
if not profile_names:
profile_name, profile = None, None
click.echo("We couldn't find an AWS profile to use. Before using Zappa, you'll need to set one up. See here for more info: {}"
.format(click.style(BOTO3_CONFIG_DOCS_URL, fg="blue", underline=True)))
elif len(profile_names) == 1:
profile_name = profile_names[0]
profile = profiles[profile_name]
click.echo("Okay, using profile {}!".format(click.style(profile_name, bold=True)))
else:
if "default" in profile_names:
default_profile = [p for p in profile_names if p == "default"][0]
else:
default_profile = profile_names[0]
while True:
profile_name = input("We found the following profiles: {}, and {}. "\
"Which would you like us to use? (default '{}'): "
.format(
', '.join(profile_names[:-1]),
profile_names[-1],
default_profile
)) or default_profile
if profile_name in profiles:
profile = profiles[profile_name]
break
else:
click.echo("Please enter a valid name for your AWS profile.")
profile_region = profile.get("region") if profile else None
# Create Bucket
click.echo("\nYour Zappa deployments will need to be uploaded to a " + click.style("private S3 bucket", bold=True) + ".")
click.echo("If you don't have a bucket yet, we'll create one for you too.")
default_bucket = "zappa-" + ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(9))
while True:
bucket = input("What do you want to call your bucket? (default '%s'): " % default_bucket) or default_bucket
if is_valid_bucket_name(bucket):
break
click.echo(click.style("Invalid bucket name!", bold=True))
click.echo("S3 buckets must be named according to the following rules:")
click.echo("""* Bucket names must be unique across all existing bucket names in Amazon S3.
* Bucket names must comply with DNS naming conventions.
* Bucket names must be at least 3 and no more than 63 characters long.
* Bucket names must not contain uppercase characters or underscores.
* Bucket names must start with a lowercase letter or number.
* Bucket names must be a series of one or more labels. Adjacent labels are separated
by a single period (.). Bucket names can contain lowercase letters, numbers, and
hyphens. Each label must start and end with a lowercase letter or a number.
* Bucket names must not be formatted as an IP address (for example, 192.168.5.4).
* When you use virtual hosted–style buckets with Secure Sockets Layer (SSL), the SSL
wildcard certificate only matches buckets that don't contain periods. To work around
this, use HTTP or write your own certificate verification logic. We recommend that
you do not use periods (".") in bucket names when using virtual hosted–style buckets.
""")
# Detect Django/Flask
try: # pragma: no cover
import django
has_django = True
except ImportError as e:
has_django = False
try: # pragma: no cover
import flask
has_flask = True
except ImportError as e:
has_flask = False
print('')
# App-specific
if has_django: # pragma: no cover
click.echo("It looks like this is a " + click.style("Django", bold=True) + " application!")
click.echo("What is the " + click.style("module path", bold=True) + " to your projects's Django settings?")
django_settings = None
matches = detect_django_settings()
while django_settings in [None, '']:
if matches:
click.echo("We discovered: " + click.style(', '.join('{}'.format(i) for v, i in enumerate(matches)), bold=True))
django_settings = input("Where are your project's settings? (default '%s'): " % matches[0]) or matches[0]
else:
click.echo("(This will likely be something like 'your_project.settings')")
django_settings = input("Where are your project's settings?: ")
django_settings = django_settings.replace("'", "")
django_settings = django_settings.replace('"', "")
else:
matches = None
if has_flask:
click.echo("It looks like this is a " + click.style("Flask", bold=True) + " application.")
matches = detect_flask_apps()
click.echo("What's the " + click.style("modular path", bold=True) + " to your app's function?")
click.echo("This will likely be something like 'your_module.app'.")
app_function = None
while app_function in [None, '']:
if matches:
click.echo("We discovered: " + click.style(', '.join('{}'.format(i) for v, i in enumerate(matches)), bold=True))
app_function = input("Where is your app's function? (default '%s'): " % matches[0]) or matches[0]
else:
app_function = input("Where is your app's function?: ")
app_function = app_function.replace("'", "")
app_function = app_function.replace('"', "")
# TODO: Create VPC?
# Memory size? Time limit?
# Domain? LE keys? Region?
# 'Advanced Settings' mode?
# Globalize
click.echo("\nYou can optionally deploy to " + click.style("all available regions", bold=True) + " in order to provide fast global service.")
click.echo("If you are using Zappa for the first time, you probably don't want to do this!")
global_deployment = False
while True:
global_type = input("Would you like to deploy this application " + click.style("globally", bold=True) + "? (default 'n') [y/n/(p)rimary]: ")
if not global_type:
break
if global_type.lower() in ["y", "yes", "p", "primary"]:
global_deployment = True
break
if global_type.lower() in ["n", "no"]:
global_deployment = False
break
# The given environment name
zappa_settings = {
env: {
'profile_name': profile_name,
's3_bucket': bucket,
'runtime': get_venv_from_python_version(),
'project_name': self.get_project_name()
}
}
if profile_region:
zappa_settings[env]['aws_region'] = profile_region
if has_django:
zappa_settings[env]['django_settings'] = django_settings
else:
zappa_settings[env]['app_function'] = app_function
# Global Region Deployment
if global_deployment:
additional_regions = [r for r in API_GATEWAY_REGIONS if r != profile_region]
# Create additional stages
if global_type.lower() in ["p", "primary"]:
additional_regions = [r for r in additional_regions if '-1' in r]
for region in additional_regions:
env_name = env + '_' + region.replace('-', '_')
g_env = {
env_name: {
'extends': env,
'aws_region': region
}
}
zappa_settings.update(g_env)
import json as json # hjson is fine for loading, not fine for writing.
zappa_settings_json = json.dumps(zappa_settings, sort_keys=True, indent=4)
click.echo("\nOkay, here's your " + click.style("zappa_settings.json", bold=True) + ":\n")
click.echo(click.style(zappa_settings_json, fg="yellow", bold=False))
confirm = input("\nDoes this look " + click.style("okay", bold=True, fg="green") + "? (default 'y') [y/n]: ") or 'yes'
if confirm[0] not in ['y', 'Y', 'yes', 'YES']:
click.echo("" + click.style("Sorry", bold=True, fg='red') + " to hear that! Please init again.")
return
# Write
with open("zappa_settings.json", "w") as zappa_settings_file:
zappa_settings_file.write(zappa_settings_json)
if global_deployment:
click.echo("\n" + click.style("Done", bold=True) + "! You can also " + click.style("deploy all", bold=True) + " by executing:\n")
click.echo(click.style("\t$ zappa deploy --all", bold=True))
click.echo("\nAfter that, you can " + click.style("update", bold=True) + " your application code with:\n")
click.echo(click.style("\t$ zappa update --all", bold=True))
else:
click.echo("\n" + click.style("Done", bold=True) + "! Now you can " + click.style("deploy", bold=True) + " your Zappa application by executing:\n")
click.echo(click.style("\t$ zappa deploy %s" % env, bold=True))
click.echo("\nAfter that, you can " + click.style("update", bold=True) + " your application code with:\n")
click.echo(click.style("\t$ zappa update %s" % env, bold=True))
click.echo("\nTo learn more, check out our project page on " + click.style("GitHub", bold=True) +
" here: " + click.style("https://github.com/Zappa/Zappa", fg="cyan", bold=True))
click.echo("and stop by our " + click.style("Slack", bold=True) + " channel here: " +
click.style("https://zappateam.slack.com", fg="cyan", bold=True))
click.echo("\nEnjoy!,")
click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!")
return
def certify(self, no_confirm=True, manual=False):
"""
Register or update a domain certificate for this env.
"""
if not self.domain:
raise ClickException("Can't certify a domain without " + click.style("domain", fg="red", bold=True) + " configured!")
if not no_confirm: # pragma: no cover
confirm = input("Are you sure you want to certify? [y/n] ")
if confirm != 'y':
return
# Make sure this isn't already deployed.
deployed_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if len(deployed_versions) == 0:
raise ClickException("This application " + click.style("isn't deployed yet", fg="red") +
" - did you mean to call " + click.style("deploy", bold=True) + "?")
account_key_location = self.stage_config.get('lets_encrypt_key', None)
cert_location = self.stage_config.get('certificate', None)
cert_key_location = self.stage_config.get('certificate_key', None)
cert_chain_location = self.stage_config.get('certificate_chain', None)
cert_arn = self.stage_config.get('certificate_arn', None)
base_path = self.stage_config.get('base_path', None)
# These are sensitive
certificate_body = None
certificate_private_key = None
certificate_chain = None
# Prepare for custom Let's Encrypt
if not cert_location and not cert_arn:
if not account_key_location:
raise ClickException("Can't certify a domain without " + click.style("lets_encrypt_key", fg="red", bold=True) +
" or " + click.style("certificate", fg="red", bold=True)+
" or " + click.style("certificate_arn", fg="red", bold=True) + " configured!")
# Get install account_key to /tmp/account_key.pem
from .letsencrypt import gettempdir
if account_key_location.startswith('s3://'):
bucket, key_name = parse_s3_url(account_key_location)
self.zappa.s3_client.download_file(bucket, key_name, os.path.join(gettempdir(), 'account.key'))
else:
from shutil import copyfile
copyfile(account_key_location, os.path.join(gettempdir(), 'account.key'))
# Prepare for Custom SSL
elif not account_key_location and not cert_arn:
if not cert_location or not cert_key_location or not cert_chain_location:
raise ClickException("Can't certify a domain without " +
click.style("certificate, certificate_key and certificate_chain", fg="red", bold=True) + " configured!")
# Read the supplied certificates.
with open(cert_location) as f:
certificate_body = f.read()
with open(cert_key_location) as f:
certificate_private_key = f.read()
with open(cert_chain_location) as f:
certificate_chain = f.read()
click.echo("Certifying domain " + click.style(self.domain, fg="green", bold=True) + "..")
# Get cert and update domain.
# Let's Encrypt
if not cert_location and not cert_arn:
from .letsencrypt import get_cert_and_update_domain
cert_success = get_cert_and_update_domain(
self.zappa,
self.lambda_name,
self.api_stage,
self.domain,
manual
)
# Custom SSL / ACM
else:
route53 = self.stage_config.get('route53_enabled', True)
if not self.zappa.get_domain_name(self.domain, route53=route53):
dns_name = self.zappa.create_domain_name(
domain_name=self.domain,
certificate_name=self.domain + "-Zappa-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=cert_arn,
lambda_name=self.lambda_name,
stage=self.api_stage,
base_path=base_path
)
if route53:
self.zappa.update_route53_records(self.domain, dns_name)
print("Created a new domain name with supplied certificate. Please note that it can take up to 40 minutes for this domain to be "
"created and propagated through AWS, but it requires no further work on your part.")
else:
self.zappa.update_domain_name(
domain_name=self.domain,
certificate_name=self.domain + "-Zappa-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=cert_arn,
lambda_name=self.lambda_name,
stage=self.api_stage,
route53=route53,
base_path=base_path
)
cert_success = True
if cert_success:
click.echo("Certificate " + click.style("updated", fg="green", bold=True) + "!")
else:
click.echo(click.style("Failed", fg="red", bold=True) + " to generate or install certificate! :(")
click.echo("\n==============\n")
shamelessly_promote()
##
# Shell
##
def shell(self):
"""
Spawn a debug shell.
"""
click.echo(click.style("NOTICE!", fg="yellow", bold=True) + " This is a " + click.style("local", fg="green", bold=True) + " shell, inside a " + click.style("Zappa", bold=True) + " object!")
self.zappa.shell()
return
##
# Utility
##
def callback(self, position):
"""
Allows the execution of custom code between creation of the zip file and deployment to AWS.
:return: None
"""
callbacks = self.stage_config.get('callbacks', {})
callback = callbacks.get(position)
if callback:
(mod_path, cb_func_name) = callback.rsplit('.', 1)
try: # Prefer callback in working directory
if mod_path.count('.') >= 1: # Callback function is nested in a folder
(mod_folder_path, mod_name) = mod_path.rsplit('.', 1)
mod_folder_path_fragments = mod_folder_path.split('.')
working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments)
else:
mod_name = mod_path
working_dir = os.getcwd()
working_dir_importer = pkgutil.get_importer(working_dir)
module_ = working_dir_importer.find_module(mod_name).load_module(mod_name)
except (ImportError, AttributeError):
try: # Callback func might be in virtualenv
module_ = importlib.import_module(mod_path)
except ImportError: # pragma: no cover
raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style(
"import {position} callback ".format(position=position),
bold=True) + 'module: "{mod_path}"'.format(mod_path=click.style(mod_path, bold=True)))
if not hasattr(module_, cb_func_name): # pragma: no cover
raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style(
"find {position} callback ".format(position=position), bold=True) + 'function: "{cb_func_name}" '.format(
cb_func_name=click.style(cb_func_name, bold=True)) + 'in module "{mod_path}"'.format(mod_path=mod_path))
cb_func = getattr(module_, cb_func_name)
cb_func(self) # Call the function passing self
def check_for_update(self):
"""
Print a warning if there's a new Zappa version available.
"""
try:
version = pkg_resources.require("zappa")[0].version
updateable = check_new_version_available(version)
if updateable:
click.echo(click.style("Important!", fg="yellow", bold=True) +
" A new version of " + click.style("Zappa", bold=True) + " is available!")
click.echo("Upgrade with: " + click.style("pip install zappa --upgrade", bold=True))
click.echo("Visit the project page on GitHub to see the latest changes: " +
click.style("https://github.com/Zappa/Zappa", bold=True))
except Exception as e: # pragma: no cover
print(e)
return
def load_settings(self, settings_file=None, session=None):
"""
Load the local zappa_settings file.
An existing boto session can be supplied, though this is likely for testing purposes.
Returns the loaded Zappa object.
"""
# Ensure we're passed a valid settings file.
if not settings_file:
settings_file = self.get_json_or_yaml_settings()
if not os.path.isfile(settings_file):
raise ClickException("Please configure your zappa_settings file.")
# Load up file
self.load_settings_file(settings_file)
# Make sure that the stages are valid names:
for stage_name in self.zappa_settings.keys():
try:
self.check_stage_name(stage_name)
except ValueError:
raise ValueError("API stage names must match a-zA-Z0-9_ ; '{0!s}' does not.".format(stage_name))
# Make sure that this stage is our settings
if self.api_stage not in self.zappa_settings.keys():
raise ClickException("Please define stage '{0!s}' in your Zappa settings.".format(self.api_stage))
# We need a working title for this project. Use one if supplied, else cwd dirname.
if 'project_name' in self.stage_config: # pragma: no cover
# If the name is invalid, this will throw an exception with message up stack
self.project_name = validate_name(self.stage_config['project_name'])
else:
self.project_name = self.get_project_name()
# The name of the actual AWS Lambda function, ex, 'helloworld-dev'
# Assume that we already have have validated the name beforehand.
# Related: https://github.com/Miserlou/Zappa/pull/664
# https://github.com/Miserlou/Zappa/issues/678
# And various others from Slack.
self.lambda_name = slugify.slugify(self.project_name + '-' + self.api_stage)
# Load stage-specific settings
self.s3_bucket_name = self.stage_config.get('s3_bucket', "zappa-" + ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(9)))
self.vpc_config = self.stage_config.get('vpc_config', {})
self.memory_size = self.stage_config.get('memory_size', 512)
self.app_function = self.stage_config.get('app_function', None)
self.exception_handler = self.stage_config.get('exception_handler', None)
self.aws_region = self.stage_config.get('aws_region', None)
self.debug = self.stage_config.get('debug', True)
self.prebuild_script = self.stage_config.get('prebuild_script', None)
self.profile_name = self.stage_config.get('profile_name', None)
self.log_level = self.stage_config.get('log_level', "DEBUG")
self.domain = self.stage_config.get('domain', None)
self.base_path = self.stage_config.get('base_path', None)
self.timeout_seconds = self.stage_config.get('timeout_seconds', 30)
dead_letter_arn = self.stage_config.get('dead_letter_arn', '')
self.dead_letter_config = {'TargetArn': dead_letter_arn} if dead_letter_arn else {}
self.cognito = self.stage_config.get('cognito', None)
self.num_retained_versions = self.stage_config.get('num_retained_versions',None)
# Check for valid values of num_retained_versions
if self.num_retained_versions is not None and type(self.num_retained_versions) is not int:
raise ClickException("Please supply either an integer or null for num_retained_versions in the zappa_settings.json. Found %s" % type(self.num_retained_versions))
elif type(self.num_retained_versions) is int and self.num_retained_versions<1:
raise ClickException("The value for num_retained_versions in the zappa_settings.json should be greater than 0.")
# Provide legacy support for `use_apigateway`, now `apigateway_enabled`.
# https://github.com/Miserlou/Zappa/issues/490
# https://github.com/Miserlou/Zappa/issues/493
self.use_apigateway = self.stage_config.get('use_apigateway', True)
if self.use_apigateway:
self.use_apigateway = self.stage_config.get('apigateway_enabled', True)
self.apigateway_description = self.stage_config.get('apigateway_description', None)
self.lambda_handler = self.stage_config.get('lambda_handler', 'handler.lambda_handler')
# DEPRECATED. https://github.com/Miserlou/Zappa/issues/456
self.remote_env_bucket = self.stage_config.get('remote_env_bucket', None)
self.remote_env_file = self.stage_config.get('remote_env_file', None)
self.remote_env = self.stage_config.get('remote_env', None)
self.settings_file = self.stage_config.get('settings_file', None)
self.django_settings = self.stage_config.get('django_settings', None)
self.manage_roles = self.stage_config.get('manage_roles', True)
self.binary_support = self.stage_config.get('binary_support', True)
self.api_key_required = self.stage_config.get('api_key_required', False)
self.api_key = self.stage_config.get('api_key')
self.endpoint_configuration = self.stage_config.get('endpoint_configuration', None)
self.iam_authorization = self.stage_config.get('iam_authorization', False)
self.cors = self.stage_config.get("cors", False)
self.lambda_description = self.stage_config.get('lambda_description', "Zappa Deployment")
self.lambda_concurrency = self.stage_config.get('lambda_concurrency', None)
self.environment_variables = self.stage_config.get('environment_variables', {})
self.aws_environment_variables = self.stage_config.get('aws_environment_variables', {})
self.check_environment(self.environment_variables)
self.authorizer = self.stage_config.get('authorizer', {})
self.runtime = self.stage_config.get('runtime', get_runtime_from_python_version())
self.aws_kms_key_arn = self.stage_config.get('aws_kms_key_arn', '')
self.context_header_mappings = self.stage_config.get('context_header_mappings', {})
self.xray_tracing = self.stage_config.get('xray_tracing', False)
self.desired_role_arn = self.stage_config.get('role_arn')
self.layers = self.stage_config.get('layers', None)
# Load ALB-related settings
self.use_alb = self.stage_config.get('alb_enabled', False)
self.alb_vpc_config = self.stage_config.get('alb_vpc_config', {})
# Additional tags
self.tags = self.stage_config.get('tags', {})
desired_role_name = self.lambda_name + "-ZappaLambdaExecutionRole"
self.zappa = Zappa( boto_session=session,
profile_name=self.profile_name,
aws_region=self.aws_region,
load_credentials=self.load_credentials,
desired_role_name=desired_role_name,
desired_role_arn=self.desired_role_arn,
runtime=self.runtime,
tags=self.tags,
endpoint_urls=self.stage_config.get('aws_endpoint_urls',{}),
xray_tracing=self.xray_tracing
)
for setting in CUSTOM_SETTINGS:
if setting in self.stage_config:
setting_val = self.stage_config[setting]
# Read the policy file contents.
if setting.endswith('policy'):
with open(setting_val, 'r') as f:
setting_val = f.read()
setattr(self.zappa, setting, setting_val)
if self.app_function:
self.collision_warning(self.app_function)
if self.app_function[-3:] == '.py':
click.echo(click.style("Warning!", fg="red", bold=True) +
" Your app_function is pointing to a " + click.style("file and not a function", bold=True) +
"! It should probably be something like 'my_file.app', not 'my_file.py'!")
return self.zappa
def get_json_or_yaml_settings(self, settings_name="zappa_settings"):
"""
Return zappa_settings path as JSON or YAML (or TOML), as appropriate.
"""
zs_json = settings_name + ".json"
zs_yml = settings_name + ".yml"
zs_yaml = settings_name + ".yaml"
zs_toml = settings_name + ".toml"
# Must have at least one
if not os.path.isfile(zs_json) \
and not os.path.isfile(zs_yml) \
and not os.path.isfile(zs_yaml) \
and not os.path.isfile(zs_toml):
raise ClickException("Please configure a zappa_settings file or call `zappa init`.")
# Prefer JSON
if os.path.isfile(zs_json):
settings_file = zs_json
elif os.path.isfile(zs_toml):
settings_file = zs_toml
elif os.path.isfile(zs_yml):
settings_file = zs_yml
else:
settings_file = zs_yaml
return settings_file
def load_settings_file(self, settings_file=None):
"""
Load our settings file.
"""
if not settings_file:
settings_file = self.get_json_or_yaml_settings()
if not os.path.isfile(settings_file):
raise ClickException("Please configure your zappa_settings file or call `zappa init`.")
path, ext = os.path.splitext(settings_file)
if ext == '.yml' or ext == '.yaml':
with open(settings_file) as yaml_file:
try:
self.zappa_settings = yaml.safe_load(yaml_file)
except ValueError: # pragma: no cover
raise ValueError("Unable to load the Zappa settings YAML. It may be malformed.")
elif ext == '.toml':
with open(settings_file) as toml_file:
try:
self.zappa_settings = toml.load(toml_file)
except ValueError: # pragma: no cover
raise ValueError("Unable to load the Zappa settings TOML. It may be malformed.")
else:
with open(settings_file) as json_file:
try:
self.zappa_settings = json.load(json_file)
except ValueError: # pragma: no cover
raise ValueError("Unable to load the Zappa settings JSON. It may be malformed.")
def create_package(self, output=None):
"""
Ensure that the package can be properly configured,
and then create it.
"""
# Create the Lambda zip package (includes project and virtualenvironment)
# Also define the path the handler file so it can be copied to the zip
# root for Lambda.
current_file = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
handler_file = os.sep.join(current_file.split(os.sep)[0:]) + os.sep + 'handler.py'
# Create the zip file(s)
if self.stage_config.get('slim_handler', False):
# Create two zips. One with the application and the other with just the handler.
# https://github.com/Miserlou/Zappa/issues/510
self.zip_path = self.zappa.create_lambda_zip(
prefix=self.lambda_name,
use_precompiled_packages=self.stage_config.get('use_precompiled_packages', True),
exclude=self.stage_config.get('exclude', []),
exclude_glob=self.stage_config.get('exclude_glob', []),
disable_progress=self.disable_progress,
archive_format='tarball'
)
# Make sure the normal venv is not included in the handler's zip
exclude = self.stage_config.get('exclude', [])
cur_venv = self.zappa.get_current_venv()
exclude.append(cur_venv.split('/')[-1])
self.handler_path = self.zappa.create_lambda_zip(
prefix='handler_{0!s}'.format(self.lambda_name),
venv=self.zappa.create_handler_venv(),
handler_file=handler_file,
slim_handler=True,
exclude=exclude,
exclude_glob=self.stage_config.get('exclude_glob', []),
output=output,
disable_progress=self.disable_progress
)
else:
# This could be python3.6 optimized.
exclude = self.stage_config.get(
'exclude', [
"boto3",
"dateutil",
"botocore",
"s3transfer",
"concurrent"
])
# Create a single zip that has the handler and application
self.zip_path = self.zappa.create_lambda_zip(
prefix=self.lambda_name,
handler_file=handler_file,
use_precompiled_packages=self.stage_config.get('use_precompiled_packages', True),
exclude=exclude,
exclude_glob=self.stage_config.get('exclude_glob', []),
output=output,
disable_progress=self.disable_progress
)
# Warn if this is too large for Lambda.
file_stats = os.stat(self.zip_path)
if file_stats.st_size > 52428800: # pragma: no cover
print('\n\nWarning: Application zip package is likely to be too large for AWS Lambda. '
'Try setting "slim_handler" to true in your Zappa settings file.\n\n')
# Throw custom settings into the zip that handles requests
if self.stage_config.get('slim_handler', False):
handler_zip = self.handler_path
else:
handler_zip = self.zip_path
with zipfile.ZipFile(handler_zip, 'a') as lambda_zip:
settings_s = "# Generated by Zappa\n"
if self.app_function:
if '.' not in self.app_function: # pragma: no cover
raise ClickException("Your " + click.style("app_function", fg='red', bold=True) + " value is not a modular path." +
" It needs to be in the format `" + click.style("your_module.your_app_object", bold=True) + "`.")
app_module, app_function = self.app_function.rsplit('.', 1)
settings_s = settings_s + "APP_MODULE='{0!s}'\nAPP_FUNCTION='{1!s}'\n".format(app_module, app_function)
if self.exception_handler:
settings_s += "EXCEPTION_HANDLER='{0!s}'\n".format(self.exception_handler)
else:
settings_s += "EXCEPTION_HANDLER=None\n"
if self.debug:
settings_s = settings_s + "DEBUG=True\n"
else:
settings_s = settings_s + "DEBUG=False\n"
settings_s = settings_s + "LOG_LEVEL='{0!s}'\n".format((self.log_level))
if self.binary_support:
settings_s = settings_s + "BINARY_SUPPORT=True\n"
else:
settings_s = settings_s + "BINARY_SUPPORT=False\n"
head_map_dict = {}
head_map_dict.update(dict(self.context_header_mappings))
settings_s = settings_s + "CONTEXT_HEADER_MAPPINGS={0}\n".format(
head_map_dict
)
# If we're on a domain, we don't need to define the /<<env>> in
# the WSGI PATH
if self.domain:
settings_s = settings_s + "DOMAIN='{0!s}'\n".format((self.domain))
else:
settings_s = settings_s + "DOMAIN=None\n"
if self.base_path:
settings_s = settings_s + "BASE_PATH='{0!s}'\n".format((self.base_path))
else:
settings_s = settings_s + "BASE_PATH=None\n"
# Pass through remote config bucket and path
if self.remote_env:
settings_s = settings_s + "REMOTE_ENV='{0!s}'\n".format(
self.remote_env
)
# DEPRECATED. use remove_env instead
elif self.remote_env_bucket and self.remote_env_file:
settings_s = settings_s + "REMOTE_ENV='s3://{0!s}/{1!s}'\n".format(
self.remote_env_bucket, self.remote_env_file
)
# Local envs
env_dict = {}
if self.aws_region:
env_dict['AWS_REGION'] = self.aws_region
env_dict.update(dict(self.environment_variables))
# Environment variable keys must be ascii
# https://github.com/Miserlou/Zappa/issues/604
# https://github.com/Miserlou/Zappa/issues/998
try:
env_dict = dict((k.encode('ascii').decode('ascii'), v) for (k, v) in env_dict.items())
except Exception:
raise ValueError("Environment variable keys must be ascii.")
settings_s = settings_s + "ENVIRONMENT_VARIABLES={0}\n".format(
env_dict
)
# We can be environment-aware
settings_s = settings_s + "API_STAGE='{0!s}'\n".format((self.api_stage))
settings_s = settings_s + "PROJECT_NAME='{0!s}'\n".format((self.project_name))
if self.settings_file:
settings_s = settings_s + "SETTINGS_FILE='{0!s}'\n".format((self.settings_file))
else:
settings_s = settings_s + "SETTINGS_FILE=None\n"
if self.django_settings:
settings_s = settings_s + "DJANGO_SETTINGS='{0!s}'\n".format((self.django_settings))
else:
settings_s = settings_s + "DJANGO_SETTINGS=None\n"
# If slim handler, path to project zip
if self.stage_config.get('slim_handler', False):
settings_s += "ARCHIVE_PATH='s3://{0!s}/{1!s}_{2!s}_current_project.tar.gz'\n".format(
self.s3_bucket_name, self.api_stage, self.project_name)
# since includes are for slim handler add the setting here by joining arbitrary list from zappa_settings file
# and tell the handler we are the slim_handler
# https://github.com/Miserlou/Zappa/issues/776
settings_s += "SLIM_HANDLER=True\n"
include = self.stage_config.get('include', [])
if len(include) >= 1:
settings_s += "INCLUDE=" + str(include) + '\n'
# AWS Events function mapping
event_mapping = {}
events = self.stage_config.get('events', [])
for event in events:
arn = event.get('event_source', {}).get('arn')
function = event.get('function')
if arn and function:
event_mapping[arn] = function
settings_s = settings_s + "AWS_EVENT_MAPPING={0!s}\n".format(event_mapping)
# Map Lext bot events
bot_events = self.stage_config.get('bot_events', [])
bot_events_mapping = {}
for bot_event in bot_events:
event_source = bot_event.get('event_source', {})
intent = event_source.get('intent')
invocation_source = event_source.get('invocation_source')
function = bot_event.get('function')
if intent and invocation_source and function:
bot_events_mapping[str(intent) + ':' + str(invocation_source)] = function
settings_s = settings_s + "AWS_BOT_EVENT_MAPPING={0!s}\n".format(bot_events_mapping)
# Map cognito triggers
cognito_trigger_mapping = {}
cognito_config = self.stage_config.get('cognito', {})
triggers = cognito_config.get('triggers', [])
for trigger in triggers:
source = trigger.get('source')
function = trigger.get('function')
if source and function:
cognito_trigger_mapping[source] = function
settings_s = settings_s + "COGNITO_TRIGGER_MAPPING={0!s}\n".format(cognito_trigger_mapping)
# Authorizer config
authorizer_function = self.authorizer.get('function', None)
if authorizer_function:
settings_s += "AUTHORIZER_FUNCTION='{0!s}'\n".format(authorizer_function)
# Copy our Django app into root of our package.
# It doesn't work otherwise.
if self.django_settings:
base = __file__.rsplit(os.sep, 1)[0]
django_py = ''.join(os.path.join(base, 'ext', 'django_zappa.py'))
lambda_zip.write(django_py, 'django_zappa_app.py')
# async response
async_response_table = self.stage_config.get('async_response_table', '')
settings_s += "ASYNC_RESPONSE_TABLE='{0!s}'\n".format(async_response_table)
# Lambda requires a specific chmod
temp_settings = tempfile.NamedTemporaryFile(delete=False)
os.chmod(temp_settings.name, 0o644)
temp_settings.write(bytes(settings_s, "utf-8"))
temp_settings.close()
lambda_zip.write(temp_settings.name, 'zappa_settings.py')
os.unlink(temp_settings.name)
def remove_local_zip(self):
"""
Remove our local zip file.
"""
if self.stage_config.get('delete_local_zip', True):
try:
if os.path.isfile(self.zip_path):
os.remove(self.zip_path)
if self.handler_path and os.path.isfile(self.handler_path):
os.remove(self.handler_path)
except Exception as e: # pragma: no cover
sys.exit(-1)
def remove_uploaded_zip(self):
"""
Remove the local and S3 zip file after uploading and updating.
"""
# Remove the uploaded zip from S3, because it is now registered..
if self.stage_config.get('delete_s3_zip', True):
self.zappa.remove_from_s3(self.zip_path, self.s3_bucket_name)
if self.stage_config.get('slim_handler', False):
# Need to keep the project zip as the slim handler uses it.
self.zappa.remove_from_s3(self.handler_path, self.s3_bucket_name)
def on_exit(self):
"""
Cleanup after the command finishes.
Always called: SystemExit, KeyboardInterrupt and any other Exception that occurs.
"""
if self.zip_path:
# Only try to remove uploaded zip if we're running a command that has loaded credentials
if self.load_credentials:
self.remove_uploaded_zip()
self.remove_local_zip()
def print_logs(self, logs, colorize=True, http=False, non_http=False, force_colorize=None):
"""
Parse, filter and print logs to the console.
"""
for log in logs:
timestamp = log['timestamp']
message = log['message']
if "START RequestId" in message:
continue
if "REPORT RequestId" in message:
continue
if "END RequestId" in message:
continue
if not colorize and not force_colorize:
if http:
if self.is_http_log_entry(message.strip()):
print("[" + str(timestamp) + "] " + message.strip())
elif non_http:
if not self.is_http_log_entry(message.strip()):
print("[" + str(timestamp) + "] " + message.strip())
else:
print("[" + str(timestamp) + "] " + message.strip())
else:
if http:
if self.is_http_log_entry(message.strip()):
click.echo(click.style("[", fg='cyan') + click.style(str(timestamp), bold=True) + click.style("]", fg='cyan') + self.colorize_log_entry(message.strip()), color=force_colorize)
elif non_http:
if not self.is_http_log_entry(message.strip()):
click.echo(click.style("[", fg='cyan') + click.style(str(timestamp), bold=True) + click.style("]", fg='cyan') + self.colorize_log_entry(message.strip()), color=force_colorize)
else:
click.echo(click.style("[", fg='cyan') + click.style(str(timestamp), bold=True) + click.style("]", fg='cyan') + self.colorize_log_entry(message.strip()), color=force_colorize)
def is_http_log_entry(self, string):
"""
Determines if a log entry is an HTTP-formatted log string or not.
"""
# Debug event filter
if 'Zappa Event' in string:
return False
# IP address filter
for token in string.replace('\t', ' ').split(' '):
try:
if (token.count('.') == 3 and token.replace('.', '').isnumeric()):
return True
except Exception: # pragma: no cover
pass
return False
def get_project_name(self):
return slugify.slugify(os.getcwd().split(os.sep)[-1])[:15]
def colorize_log_entry(self, string):
"""
Apply various heuristics to return a colorized version of a string.
If these fail, simply return the string in plaintext.
"""
final_string = string
try:
# First, do stuff in square brackets
inside_squares = re.findall(r'\[([^]]*)\]', string)
for token in inside_squares:
if token in ['CRITICAL', 'ERROR', 'WARNING', 'DEBUG', 'INFO', 'NOTSET']:
final_string = final_string.replace('[' + token + ']', click.style("[", fg='cyan') + click.style(token, fg='cyan', bold=True) + click.style("]", fg='cyan'))
else:
final_string = final_string.replace('[' + token + ']', click.style("[", fg='cyan') + click.style(token, bold=True) + click.style("]", fg='cyan'))
# Then do quoted strings
quotes = re.findall(r'"[^"]*"', string)
for token in quotes:
final_string = final_string.replace(token, click.style(token, fg="yellow"))
# And UUIDs
for token in final_string.replace('\t', ' ').split(' '):
try:
if token.count('-') == 4 and token.replace('-', '').isalnum():
final_string = final_string.replace(token, click.style(token, fg="magenta"))
except Exception: # pragma: no cover
pass
# And IP addresses
try:
if token.count('.') == 3 and token.replace('.', '').isnumeric():
final_string = final_string.replace(token, click.style(token, fg="red"))
except Exception: # pragma: no cover
pass
# And status codes
try:
if token in ['200']:
final_string = final_string.replace(token, click.style(token, fg="green"))
if token in ['400', '401', '403', '404', '405', '500']:
final_string = final_string.replace(token, click.style(token, fg="red"))
except Exception: # pragma: no cover
pass
# And Zappa Events
try:
if "Zappa Event:" in final_string:
final_string = final_string.replace("Zappa Event:", click.style("Zappa Event:", bold=True, fg="green"))
except Exception: # pragma: no cover
pass
# And dates
for token in final_string.split('\t'):
try:
is_date = parser.parse(token)
final_string = final_string.replace(token, click.style(token, fg="green"))
except Exception: # pragma: no cover
pass
final_string = final_string.replace('\t', ' ').replace(' ', ' ')
if final_string[0] != ' ':
final_string = ' ' + final_string
return final_string
except Exception as e: # pragma: no cover
return string
def execute_prebuild_script(self):
"""
Parse and execute the prebuild_script from the zappa_settings.
"""
(pb_mod_path, pb_func) = self.prebuild_script.rsplit('.', 1)
try: # Prefer prebuild script in working directory
if pb_mod_path.count('.') >= 1: # Prebuild script func is nested in a folder
(mod_folder_path, mod_name) = pb_mod_path.rsplit('.', 1)
mod_folder_path_fragments = mod_folder_path.split('.')
working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments)
else:
mod_name = pb_mod_path
working_dir = os.getcwd()
working_dir_importer = pkgutil.get_importer(working_dir)
module_ = working_dir_importer.find_module(mod_name).load_module(mod_name)
except (ImportError, AttributeError):
try: # Prebuild func might be in virtualenv
module_ = importlib.import_module(pb_mod_path)
except ImportError: # pragma: no cover
raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style(
"import prebuild script ", bold=True) + 'module: "{pb_mod_path}"'.format(
pb_mod_path=click.style(pb_mod_path, bold=True)))
if not hasattr(module_, pb_func): # pragma: no cover
raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style(
"find prebuild script ", bold=True) + 'function: "{pb_func}" '.format(
pb_func=click.style(pb_func, bold=True)) + 'in module "{pb_mod_path}"'.format(
pb_mod_path=pb_mod_path))
prebuild_function = getattr(module_, pb_func)
prebuild_function() # Call the function
def collision_warning(self, item):
"""
Given a string, print a warning if this could
collide with a Zappa core package module.
Use for app functions and events.
"""
namespace_collisions = [
"zappa.", "wsgi.", "middleware.", "handler.", "util.", "letsencrypt.", "cli."
]
for namespace_collision in namespace_collisions:
if item.startswith(namespace_collision):
click.echo(click.style("Warning!", fg="red", bold=True) +
" You may have a namespace collision between " +
click.style(item, bold=True) +
" and " +
click.style(namespace_collision, bold=True) +
"! You may want to rename that file.")
def deploy_api_gateway(self, api_id):
cache_cluster_enabled = self.stage_config.get('cache_cluster_enabled', False)
cache_cluster_size = str(self.stage_config.get('cache_cluster_size', .5))
endpoint_url = self.zappa.deploy_api_gateway(
api_id=api_id,
stage_name=self.api_stage,
cache_cluster_enabled=cache_cluster_enabled,
cache_cluster_size=cache_cluster_size,
cloudwatch_log_level=self.stage_config.get('cloudwatch_log_level', 'OFF'),
cloudwatch_data_trace=self.stage_config.get('cloudwatch_data_trace', False),
cloudwatch_metrics_enabled=self.stage_config.get('cloudwatch_metrics_enabled', False),
cache_cluster_ttl=self.stage_config.get('cache_cluster_ttl', 300),
cache_cluster_encrypted=self.stage_config.get('cache_cluster_encrypted', False)
)
return endpoint_url
def check_venv(self):
""" Ensure we're inside a virtualenv. """
if self.vargs and self.vargs.get("no_venv"):
return
if self.zappa:
venv = self.zappa.get_current_venv()
else:
# Just for `init`, when we don't have settings yet.
venv = Zappa.get_current_venv()
if not venv:
raise ClickException(
click.style("Zappa", bold=True) + " requires an " + click.style("active virtual environment", bold=True, fg="red") + "!\n" +
"Learn more about virtual environments here: " + click.style("http://docs.python-guide.org/en/latest/dev/virtualenvs/", bold=False, fg="cyan"))
def silence(self):
"""
Route all stdout to null.
"""
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
def touch_endpoint(self, endpoint_url):
"""
Test the deployed endpoint with a GET request.
"""
# Private APIGW endpoints most likely can't be reached by a deployer
# unless they're connected to the VPC by VPN. Instead of trying
# connect to the service, print a warning and let the user know
# to check it manually.
# See: https://github.com/Miserlou/Zappa/pull/1719#issuecomment-471341565
if 'PRIVATE' in self.stage_config.get('endpoint_configuration', []):
print(
click.style("Warning!", fg="yellow", bold=True) +
" Since you're deploying a private API Gateway endpoint,"
" Zappa cannot determine if your function is returning "
" a correct status code. You should check your API's response"
" manually before considering this deployment complete."
)
return
touch_path = self.stage_config.get('touch_path', '/')
req = requests.get(endpoint_url + touch_path)
# Sometimes on really large packages, it can take 60-90 secs to be
# ready and requests will return 504 status_code until ready.
# So, if we get a 504 status code, rerun the request up to 4 times or
# until we don't get a 504 error
if req.status_code == 504:
i = 0
status_code = 504
while status_code == 504 and i <= 4:
req = requests.get(endpoint_url + touch_path)
status_code = req.status_code
i += 1
if req.status_code >= 500:
raise ClickException(click.style("Warning!", fg="red", bold=True) +
" Status check on the deployed lambda failed." +
" A GET request to '" + touch_path + "' yielded a " +
click.style(str(req.status_code), fg="red", bold=True) + " response code.")
####################################################################
# Main
####################################################################
def shamelessly_promote():
"""
Shamelessly promote our little community.
"""
click.echo("Need " + click.style("help", fg='green', bold=True) +
"? Found a " + click.style("bug", fg='green', bold=True) +
"? Let us " + click.style("know", fg='green', bold=True) + "! :D")
click.echo("File bug reports on " + click.style("GitHub", bold=True) + " here: "
+ click.style("https://github.com/Zappa/Zappa", fg='cyan', bold=True))
click.echo("And join our " + click.style("Slack", bold=True) + " channel here: "
+ click.style("https://zappateam.slack.com", fg='cyan', bold=True))
click.echo("Love!,")
click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!")
def disable_click_colors():
"""
Set a Click context where colors are disabled. Creates a throwaway BaseCommand
to play nicely with the Context constructor.
The intended side-effect here is that click.echo() checks this context and will
suppress colors.
https://github.com/pallets/click/blob/e1aa43a3/click/globals.py#L39
"""
ctx = Context(BaseCommand('AllYourBaseAreBelongToUs'))
ctx.color = False
push_context(ctx)
def handle(): # pragma: no cover
"""
Main program execution handler.
"""
try:
cli = ZappaCLI()
sys.exit(cli.handle())
except SystemExit as e: # pragma: no cover
cli.on_exit()
sys.exit(e.code)
except KeyboardInterrupt: # pragma: no cover
cli.on_exit()
sys.exit(130)
except Exception as e:
cli.on_exit()
click.echo("Oh no! An " + click.style("error occurred", fg='red', bold=True) + "! :(")
click.echo("\n==============\n")
import traceback
traceback.print_exc()
click.echo("\n==============\n")
shamelessly_promote()
sys.exit(-1)
if __name__ == '__main__': # pragma: no cover
handle() | zappa-mathking | /zappa-mathking-0.52.4.tar.gz/zappa-mathking-0.52.4/zappa/cli.py | cli.py |
from werkzeug.wsgi import ClosingIterator
def all_casings(input_string):
"""
Permute all casings of a given string.
A pretty algorithm, via @Amber
http://stackoverflow.com/questions/6792803/finding-all-possible-case-permutations-in-python
"""
if not input_string:
yield ""
else:
first = input_string[:1]
if first.lower() == first.upper():
for sub_casing in all_casings(input_string[1:]):
yield first + sub_casing
else:
for sub_casing in all_casings(input_string[1:]):
yield first.lower() + sub_casing
yield first.upper() + sub_casing
class ZappaWSGIMiddleware:
"""
Middleware functions necessary for a Zappa deployment.
Most hacks have now been remove except for Set-Cookie permutation.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
"""
We must case-mangle the Set-Cookie header name or AWS will use only a
single one of these headers.
"""
def encode_response(status, headers, exc_info=None):
"""
This makes the 'set-cookie' headers name lowercase,
all the non-cookie headers should be sent unharmed.
Related: https://github.com/Miserlou/Zappa/issues/1965
"""
new_headers = [header for header in headers if ((type(header[0]) != str) or (header[0].lower() != "set-cookie"))]
cookie_headers = [
(header[0].lower(), header[1])
for header in headers
if ((type(header[0]) == str) and (header[0].lower() == "set-cookie"))
]
new_headers = new_headers + cookie_headers
return start_response(status, new_headers, exc_info)
# Call the application with our modifier
response = self.application(environ, encode_response)
# Return the response as a WSGI-safe iterator
return ClosingIterator(response) | zappa-packer | /zappa-packer-0.0.4.tar.gz/zappa-packer-0.0.4/zappa_packer/middleware.py | middleware.py |
import base64
import collections
import datetime
import importlib
import inspect
import json
import logging
import os
import sys
import traceback
from builtins import str
from types import ModuleType
from typing import Tuple
from werkzeug.wrappers import Response
# This file may be copied into a project's root,
# so handle both scenarios.
try:
from zappa_packer.middleware import ZappaWSGIMiddleware
from zappa_packer.utilities import DEFAULT_TEXT_MIMETYPES, merge_headers
from zappa_packer.wsgi import common_log, create_wsgi_request
except ImportError: # pragma: no cover
from .middleware import ZappaWSGIMiddleware
from .utilities import DEFAULT_TEXT_MIMETYPES, merge_headers
from .wsgi import common_log, create_wsgi_request
# Set up logging
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class LambdaHandler:
"""
Singleton for avoiding duplicate setup.
Pattern provided by @benbangert.
"""
__instance = None
settings = None
settings_name = None
session = None
# Application
app_module = None
wsgi_app = None
trailing_slash = False
def __new__(cls, settings_name="zappa_settings", session=None):
"""Singleton instance to avoid repeat setup"""
if LambdaHandler.__instance is None:
print("Instancing..")
LambdaHandler.__instance = object.__new__(cls)
return LambdaHandler.__instance
def __init__(self, settings_name="zappa_settings", session=None):
# We haven't cached our settings yet, load the settings and app.
if not self.settings:
# Loading settings from a python module
self.settings = importlib.import_module(settings_name)
self.settings_name = settings_name
self.session = session
# Custom log level
if self.settings.LOG_LEVEL:
level = logging.getLevelName(self.settings.LOG_LEVEL)
logger.setLevel(level)
# Let the system know that this will be a Lambda/Zappa/Stack
os.environ["SERVERTYPE"] = "AWS Lambda"
os.environ["FRAMEWORK"] = "Zappa"
try:
os.environ["PROJECT"] = self.settings.PROJECT_NAME
os.environ["STAGE"] = self.settings.API_STAGE
except Exception: # pragma: no cover
pass
# Set any locally defined env vars
# Environment variable keys can't be Unicode
# https://github.com/Miserlou/Zappa/issues/604
for key in self.settings.ENVIRONMENT_VARIABLES.keys():
os.environ[str(key)] = self.settings.ENVIRONMENT_VARIABLES[key]
# Pulling from S3 if given a zip path
# WE AvOID USING boto3 DEPENDENCY
# project_archive_path = getattr(self.settings, "ARCHIVE_PATH", None)
# if project_archive_path:
# self.load_remote_project_archive(project_archive_path)
# Load compiled library to the PythonPath
# checks if we are the slim_handler since this is not needed otherwise
# https://github.com/Miserlou/Zappa/issues/776
is_slim_handler = getattr(self.settings, "SLIM_HANDLER", False)
if is_slim_handler:
included_libraries = getattr(self.settings, "INCLUDE", [])
try:
from ctypes import cdll
for library in included_libraries:
try:
cdll.LoadLibrary(os.path.join(os.getcwd(), library))
except OSError:
print("Failed to find library: {}...right filename?".format(library))
except ImportError:
print("Failed to import cytpes library")
# This is a non-WSGI application
# https://github.com/Miserlou/Zappa/pull/748
if not hasattr(self.settings, "APP_MODULE") and not self.settings.DJANGO_SETTINGS:
self.app_module = None
wsgi_app_function = None
# This is probably a normal WSGI app (Or django with overloaded wsgi application)
# https://github.com/Miserlou/Zappa/issues/1164
elif hasattr(self.settings, "APP_MODULE"):
if self.settings.DJANGO_SETTINGS:
sys.path.append("/var/task")
from django.conf import (
ENVIRONMENT_VARIABLE as SETTINGS_ENVIRONMENT_VARIABLE,
)
# add the Lambda root path into the sys.path
self.trailing_slash = True
os.environ[SETTINGS_ENVIRONMENT_VARIABLE] = self.settings.DJANGO_SETTINGS
else:
self.trailing_slash = False
# The app module
self.app_module = importlib.import_module(self.settings.APP_MODULE)
# The application
wsgi_app_function = getattr(self.app_module, self.settings.APP_FUNCTION)
# Django gets special treatment.
else:
try: # Support both for tests
from zappa.ext.django_zappa import get_django_wsgi
except ImportError: # pragma: no cover
from django_zappa_app import get_django_wsgi
# Get the Django WSGI app from our extension
wsgi_app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS)
self.trailing_slash = True
self.wsgi_app = ZappaWSGIMiddleware(wsgi_app_function)
@staticmethod
def import_module_and_get_function(whole_function):
"""
Given a modular path to a function, import that module
and return the function.
"""
module, function = whole_function.rsplit(".", 1)
app_module = importlib.import_module(module)
app_function = getattr(app_module, function)
return app_function
@classmethod
def lambda_handler(cls, event, context): # pragma: no cover
handler = global_handler or cls()
exception_handler = handler.settings.EXCEPTION_HANDLER
try:
return handler.handler(event, context)
except Exception as ex:
exception_processed = cls._process_exception(
exception_handler=exception_handler,
event=event,
context=context,
exception=ex,
)
if not exception_processed:
# Only re-raise exception if handler directed so. Allows handler to control if lambda has to retry
# an event execution in case of failure.
raise
@classmethod
def _process_exception(cls, exception_handler, event, context, exception):
exception_processed = False
if exception_handler:
try:
handler_function = cls.import_module_and_get_function(exception_handler)
exception_processed = handler_function(exception, event, context)
except Exception as cex:
logger.error(msg="Failed to process exception via custom handler.")
print(cex)
return exception_processed
@staticmethod
def _process_response_body(response: Response, settings: ModuleType) -> Tuple[str, bool]:
"""
Perform Response body encoding/decoding
Related: https://github.com/zappa/Zappa/issues/908
API Gateway requires binary data be base64 encoded:
https://aws.amazon.com/blogs/compute/handling-binary-data-using-amazon-api-gateway-http-apis/
When BINARY_SUPPORT is enabled the body is base64 encoded in the following cases:
- Content-Encoding defined, commonly used to specify compression (br/gzip/deflate/etc)
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding
Content like this must be transmitted as b64.
- Response assumed binary when Response.mimetype does
not start with an entry defined in 'handle_as_text_mimetypes'
"""
encode_body_as_base64 = False
if settings.BINARY_SUPPORT:
handle_as_text_mimetypes = DEFAULT_TEXT_MIMETYPES
additional_text_mimetypes = getattr(settings, "ADDITIONAL_TEXT_MIMETYPES", None)
if additional_text_mimetypes:
handle_as_text_mimetypes += tuple(additional_text_mimetypes)
if response.headers.get("Content-Encoding"): # Assume br/gzip/deflate/etc encoding
encode_body_as_base64 = True
# werkzeug Response.mimetype: lowercase without parameters
# https://werkzeug.palletsprojects.com/en/2.2.x/wrappers/#werkzeug.wrappers.Request.mimetype
elif not response.mimetype.startswith(handle_as_text_mimetypes):
encode_body_as_base64 = True
if encode_body_as_base64:
body = base64.b64encode(response.data).decode("utf8")
else:
# response.data decoded by werkzeug
# https://werkzeug.palletsprojects.com/en/2.2.x/wrappers/#werkzeug.wrappers.Request.get_data
body = response.get_data(as_text=True)
return body, encode_body_as_base64
@staticmethod
def run_function(app_function, event, context):
"""
Given a function and event context,
detect signature and execute, returning any result.
"""
# getargspec does not support python 3 method with type hints
# Related issue: https://github.com/Miserlou/Zappa/issues/1452
if hasattr(inspect, "getfullargspec"): # Python 3
args, varargs, keywords, defaults, _, _, _ = inspect.getfullargspec(app_function)
else: # Python 2
args, varargs, keywords, defaults = inspect.getargspec(app_function)
num_args = len(args)
if num_args == 0:
result = app_function(event, context) if varargs else app_function()
elif num_args == 1:
result = app_function(event, context) if varargs else app_function(event)
elif num_args == 2:
result = app_function(event, context)
else:
raise RuntimeError(
"Function signature is invalid. Expected a function that accepts at most " "2 arguments or varargs."
)
return result
def get_function_for_aws_event(self, record):
"""
Get the associated function to execute for a triggered AWS event
Support S3, SNS, DynamoDB, kinesis and SQS events
"""
if "s3" in record:
if ":" in record["s3"]["configurationId"]:
return record["s3"]["configurationId"].split(":")[-1]
arn = None
if "Sns" in record:
try:
message = json.loads(record["Sns"]["Message"])
if message.get("command"):
return message["command"]
except ValueError:
pass
arn = record["Sns"].get("TopicArn")
elif "dynamodb" in record or "kinesis" in record:
arn = record.get("eventSourceARN")
elif "eventSource" in record and record.get("eventSource") == "aws:sqs":
arn = record.get("eventSourceARN")
elif "s3" in record:
arn = record["s3"]["bucket"]["arn"]
if arn:
return self.settings.AWS_EVENT_MAPPING.get(arn)
return None
def get_function_from_bot_intent_trigger(self, event):
"""
For the given event build ARN and return the configured function
"""
intent = event.get("currentIntent")
if intent:
intent = intent.get("name")
if intent:
return self.settings.AWS_BOT_EVENT_MAPPING.get("{}:{}".format(intent, event.get("invocationSource")))
def get_function_for_cognito_trigger(self, trigger):
"""
Get the associated function to execute for a cognito trigger
"""
print(
"get_function_for_cognito_trigger",
self.settings.COGNITO_TRIGGER_MAPPING,
trigger,
self.settings.COGNITO_TRIGGER_MAPPING.get(trigger),
)
return self.settings.COGNITO_TRIGGER_MAPPING.get(trigger)
def handler(self, event, context):
"""
An AWS Lambda function which parses specific API Gateway input into a
WSGI request, feeds it to our WSGI app, processes the response, and returns
that back to the API Gateway.
"""
settings = self.settings
# If in DEBUG mode, log all raw incoming events.
if settings.DEBUG:
logger.debug("Zappa Event: {}".format(event))
# Set any API Gateway defined Stage Variables
# as env vars
if event.get("stageVariables"):
for key in event["stageVariables"].keys():
os.environ[str(key)] = event["stageVariables"][key]
# This is the result of a keep alive, recertify
# or scheduled event.
if event.get("detail-type") == "Scheduled Event":
whole_function = event["resources"][0].split("/")[-1].split("-")[-1]
# This is a scheduled function.
if "." in whole_function:
app_function = self.import_module_and_get_function(whole_function)
# Execute the function!
return self.run_function(app_function, event, context)
# Else, let this execute as it were.
# This is a direct command invocation.
elif event.get("command", None):
whole_function = event["command"]
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
print("Result of %s:" % whole_function)
print(result)
return result
# This is a direct, raw python invocation.
# It's _extremely_ important we don't allow this event source
# to be overridden by unsanitized, non-admin user input.
elif event.get("raw_command", None):
raw_command = event["raw_command"]
exec(raw_command)
return
# This is a Django management command invocation.
elif event.get("manage", None):
from django.core import management
try: # Support both for tests
from zappa.ext.django_zappa import get_django_wsgi
except ImportError: # pragma: no cover
from django_zappa_app import get_django_wsgi
# Get the Django WSGI app from our extension
# We don't actually need the function,
# but we do need to do all of the required setup for it.
app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS)
# Couldn't figure out how to get the value into stdout with StringIO..
# Read the log for now. :[]
management.call_command(*event["manage"].split(" "))
return {}
# This is an AWS-event triggered invocation.
elif event.get("Records", None):
records = event.get("Records")
result = None
whole_function = self.get_function_for_aws_event(records[0])
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# this is an AWS-event triggered from Lex bot's intent
elif event.get("bot"):
result = None
whole_function = self.get_function_from_bot_intent_trigger(event)
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# This is an API Gateway authorizer event
elif event.get("type") == "TOKEN":
whole_function = self.settings.AUTHORIZER_FUNCTION
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
policy = self.run_function(app_function, event, context)
return policy
else:
logger.error("Cannot find a function to process the authorization request.")
raise Exception("Unauthorized")
# This is an AWS Cognito Trigger Event
elif event.get("triggerSource", None):
triggerSource = event.get("triggerSource")
whole_function = self.get_function_for_cognito_trigger(triggerSource)
result = event
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to handle cognito trigger {}".format(triggerSource))
return result
# This is a CloudWatch event
# Related: https://github.com/Miserlou/Zappa/issues/1924
elif event.get("awslogs", None):
result = None
whole_function = "{}.{}".format(settings.APP_MODULE, settings.APP_FUNCTION)
app_function = self.import_module_and_get_function(whole_function)
if app_function:
result = self.run_function(app_function, event, context)
logger.debug("Result of %s:" % whole_function)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# Normal web app flow
try:
# Timing
time_start = datetime.datetime.now()
# This is a normal HTTP request
if event.get("httpMethod", None):
script_name = ""
is_elb_context = False
headers = merge_headers(event)
if event.get("requestContext", None) and event["requestContext"].get("elb", None):
# Related: https://github.com/Miserlou/Zappa/issues/1715
# inputs/outputs for lambda loadbalancer
# https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html
is_elb_context = True
# host is lower-case when forwarded from ELB
host = headers.get("host")
# TODO: pathParameters is a first-class citizen in apigateway but not available without
# some parsing work for ELB (is this parameter used for anything?)
event["pathParameters"] = ""
else:
if headers:
host = headers.get("Host")
else:
host = None
logger.debug("host found: [{}]".format(host))
if host:
if "amazonaws.com" in host:
logger.debug("amazonaws found in host")
# The path provided in th event doesn't include the
# stage, so we must tell Flask to include the API
# stage in the url it calculates. See https://github.com/Miserlou/Zappa/issues/1014
script_name = "/" + settings.API_STAGE
else:
# This is a test request sent from the AWS console
if settings.DOMAIN:
# Assume the requests received will be on the specified
# domain. No special handling is required
pass
else:
# Assume the requests received will be to the
# amazonaws.com endpoint, so tell Flask to include the
# API stage
script_name = "/" + settings.API_STAGE
base_path = getattr(settings, "BASE_PATH", None)
# Create the environment for WSGI and handle the request
environ = create_wsgi_request(
event,
script_name=script_name,
base_path=base_path,
trailing_slash=self.trailing_slash,
binary_support=settings.BINARY_SUPPORT,
context_header_mappings=settings.CONTEXT_HEADER_MAPPINGS,
)
# We are always on https on Lambda, so tell our wsgi app that.
environ["HTTPS"] = "on"
environ["wsgi.url_scheme"] = "https"
environ["lambda.context"] = context
environ["lambda.event"] = event
# Execute the application
with Response.from_app(self.wsgi_app, environ) as response:
# This is the object we're going to return.
# Pack the WSGI response into our special dictionary.
zappa_returndict = dict()
# Issue #1715: ALB support. ALB responses must always include
# base64 encoding and status description
if is_elb_context:
zappa_returndict.setdefault("isBase64Encoded", False)
zappa_returndict.setdefault("statusDescription", response.status)
if response.data:
processed_body, is_base64_encoded = self._process_response_body(response, settings=settings)
zappa_returndict["body"] = processed_body
if is_base64_encoded:
zappa_returndict["isBase64Encoded"] = is_base64_encoded
zappa_returndict["statusCode"] = response.status_code
if "headers" in event:
zappa_returndict["headers"] = {}
for key, value in response.headers:
zappa_returndict["headers"][key] = value
if "multiValueHeaders" in event:
zappa_returndict["multiValueHeaders"] = {}
for key, value in response.headers:
zappa_returndict["multiValueHeaders"][key] = response.headers.getlist(key)
# Calculate the total response time,
# and log it in the Common Log format.
time_end = datetime.datetime.now()
delta = time_end - time_start
response_time_ms = delta.total_seconds() * 1000
response.content = response.data
common_log(environ, response, response_time=response_time_ms)
return zappa_returndict
except Exception as e: # pragma: no cover
# Print statements are visible in the logs either way
print(e)
exc_info = sys.exc_info()
message = (
"An uncaught exception happened while servicing this request. "
"You can investigate this with the `zappa tail` command."
)
# If we didn't even build an app_module, just raise.
if not settings.DJANGO_SETTINGS:
try:
self.app_module
except NameError as ne:
message = "Failed to import module: {}".format(ne.message)
# Call exception handler for unhandled exceptions
exception_handler = self.settings.EXCEPTION_HANDLER
self._process_exception(
exception_handler=exception_handler,
event=event,
context=context,
exception=e,
)
# Return this unspecified exception as a 500, using template that API Gateway expects.
content = collections.OrderedDict()
content["statusCode"] = 500
body = {"message": message}
if settings.DEBUG: # only include traceback if debug is on.
body["traceback"] = traceback.format_exception(*exc_info) # traceback as a list for readability.
content["body"] = json.dumps(str(body), sort_keys=True, indent=4)
return content
def lambda_handler(event, context): # pragma: no cover
return LambdaHandler.lambda_handler(event, context)
def keep_warm_callback(event, context):
"""Method is triggered by the CloudWatch event scheduled when keep_warm setting is set to true."""
lambda_handler(event={}, context=context) # overriding event with an empty one so that web app initialization will
# be triggered.
global_handler = None
if os.environ.get("INSTANTIATE_LAMBDA_HANDLER_ON_IMPORT"):
global_handler = LambdaHandler() | zappa-packer | /zappa-packer-0.0.4.tar.gz/zappa-packer-0.0.4/zappa_packer/handler.py | handler.py |
import getpass
import glob
import json
import logging
import os
import re
import shutil
import requests
import subprocess
import tarfile
import tempfile
import time
import uuid
import zipfile
from builtins import bytes, int
from distutils.dir_util import copy_tree
from io import open
from typing import Optional
from setuptools import find_packages
import sys
import stat
from tqdm import tqdm
##
# Logging Config
##
logging.basicConfig(format="%(levelname)s:%(message)s")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# We never need to include these.
# Related: https://github.com/Miserlou/Zappa/pull/56
# Related: https://github.com/Miserlou/Zappa/pull/581
ZIP_EXCLUDES = [
"*.exe",
"*.DS_Store",
"*.Python",
"*.git",
".git/*",
"*.zip",
"*.tar.gz",
"*.hg",
"pip",
"docutils*",
"setuputils*",
"__pycache__/*",
]
##
# Classes
##
def copytree(src, dst, metadata=True, symlinks=False, ignore=None):
"""
This is a contributed re-implementation of 'copytree' that
should work with the exact same behavior on multiple platforms.
When `metadata` is False, file metadata such as permissions and modification
times are not copied.
"""
def copy_file(src, dst, item):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s): # pragma: no cover
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
if metadata:
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except Exception:
pass # lchmod not available
elif os.path.isdir(s):
copytree(s, d, metadata, symlinks, ignore)
else:
shutil.copy2(s, d) if metadata else shutil.copy(s, d)
try:
lst = os.listdir(src)
if not os.path.exists(dst):
os.makedirs(dst)
if metadata:
shutil.copystat(src, dst)
except NotADirectoryError: # egg-link files
copy_file(os.path.dirname(src), os.path.dirname(dst), os.path.basename(src))
return
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
copy_file(src, dst, item)
def contains_python_files_or_subdirs(folder):
"""
Checks (recursively) if the directory contains .py or .pyc files
"""
for root, dirs, files in os.walk(folder):
if [filename for filename in files if filename.endswith(".py") or filename.endswith(".pyc")]:
return True
for d in dirs:
for _, subdirs, subfiles in os.walk(d):
if [filename for filename in subfiles if filename.endswith(".py") or filename.endswith(".pyc")]:
return True
return False
def conflicts_with_a_neighbouring_module(directory_path):
"""
Checks if a directory lies in the same directory as a .py file with the same name.
"""
parent_dir_path, current_dir_name = os.path.split(os.path.normpath(directory_path))
neighbours = os.listdir(parent_dir_path)
conflicting_neighbour_filename = current_dir_name + ".py"
return conflicting_neighbour_filename in neighbours
class Zappa:
"""
Zappa!
Makes it easy to run Python web applications on AWS Lambda/API Gateway.
"""
##
# Configurables
##
http_methods = ["ANY"]
role_name = "ZappaLambdaExecution"
extra_permissions = None
apigateway_policy = None
cloudwatch_log_levels = ["OFF", "ERROR", "INFO"]
xray_tracing = False
def __init__(
self,
runtime="python3.7" # Detected at runtime in CLI
):
"""
Instantiate this new Zappa instance, loading any custom credentials if necessary.
"""
self.runtime = runtime
if self.runtime == "python3.7":
self.manylinux_suffix_start = "cp37m"
elif self.runtime == "python3.8":
# The 'm' has been dropped in python 3.8+ since builds with and without pymalloc are ABI compatible
# See https://github.com/pypa/manylinux for a more detailed explanation
self.manylinux_suffix_start = "cp38"
elif self.runtime == "python3.9":
self.manylinux_suffix_start = "cp39"
else:
self.manylinux_suffix_start = "cp310"
# AWS Lambda supports manylinux1/2010, manylinux2014, and manylinux_2_24
manylinux_suffixes = ("_2_24", "2014", "2010", "1")
self.manylinux_wheel_file_match = re.compile(
rf'^.*{self.manylinux_suffix_start}-(manylinux_\d+_\d+_x86_64[.])?manylinux({"|".join(manylinux_suffixes)})_x86_64[.]whl$' # noqa: E501
)
self.manylinux_wheel_abi3_file_match = re.compile(
rf'^.*cp3.-abi3-manylinux({"|".join(manylinux_suffixes)})_x86_64.whl$'
)
##
# Packaging
##
def copy_editable_packages(self, egg_links, temp_package_path):
""" """
for egg_link in egg_links:
with open(egg_link, "rb") as df:
egg_path = df.read().decode("utf-8").splitlines()[0].strip()
pkgs = set([x.split(".")[0] for x in find_packages(egg_path, exclude=["test", "tests"])])
for pkg in pkgs:
copytree(
os.path.join(egg_path, pkg),
os.path.join(temp_package_path, pkg),
metadata=False,
symlinks=False,
)
if temp_package_path:
# now remove any egg-links as they will cause issues if they still exist
for link in glob.glob(os.path.join(temp_package_path, "*.egg-link")):
os.remove(link)
def get_deps_list(self, pkg_name, installed_distros=None):
"""
For a given package, returns a list of required packages. Recursive.
"""
# https://github.com/Miserlou/Zappa/issues/1478. Using `pkg_resources`
# instead of `pip` is the recommended approach. The usage is nearly
# identical.
import pkg_resources
deps = []
if not installed_distros:
installed_distros = pkg_resources.WorkingSet()
for package in installed_distros:
if package.project_name.lower() == pkg_name.lower():
deps = [(package.project_name, package.version)]
for req in package.requires():
deps += self.get_deps_list(pkg_name=req.project_name, installed_distros=installed_distros)
return list(set(deps)) # de-dupe before returning
def create_handler_venv(self, use_zappa_release: Optional[str] = None):
"""
Takes the installed zappa and brings it into a fresh virtualenv-like folder. All dependencies are then downloaded.
"""
import subprocess
# We will need the currenv venv to pull Zappa from
current_venv = self.get_current_venv()
# Make a new folder for the handler packages
ve_path = os.path.join(os.getcwd(), "handler_venv")
if os.sys.platform == "win32":
current_site_packages_dir = os.path.join(current_venv, "Lib", "site-packages")
venv_site_packages_dir = os.path.join(ve_path, "Lib", "site-packages")
else:
current_site_packages_dir = os.path.join(current_venv, "lib", self.get_venv_from_python_version(), "site-packages")
venv_site_packages_dir = os.path.join(ve_path, "lib", self.get_venv_from_python_version(), "site-packages")
if not os.path.isdir(venv_site_packages_dir):
os.makedirs(venv_site_packages_dir)
# Copy zappa* to the new virtualenv
zappa_things = [z for z in os.listdir(current_site_packages_dir) if z.lower()[:5] == "zappa"]
for z in zappa_things:
copytree(
os.path.join(current_site_packages_dir, z),
os.path.join(venv_site_packages_dir, z),
)
# Use pip to download zappa's dependencies.
# Copying from current venv causes issues with things like PyYAML that installs as yaml
zappa_deps = self.get_deps_list("zappa")
pkg_list = []
for dep, version in zappa_deps:
# allow specified zappa version for slim_handler_test
if dep == "zappa" and use_zappa_release:
pkg_version_str = f"{dep}=={use_zappa_release}"
else:
pkg_version_str = f"{dep}=={version}"
pkg_list.append(pkg_version_str)
# Need to manually add setuptools
pkg_list.append("setuptools")
command = [
"pip",
"install",
"--quiet",
"--target",
venv_site_packages_dir,
] + pkg_list
# This is the recommended method for installing packages if you don't
# to depend on `setuptools`
# https://github.com/pypa/pip/issues/5240#issuecomment-381662679
pip_process = subprocess.Popen(command, stdout=subprocess.PIPE)
# Using communicate() to avoid deadlocks
pip_process.communicate()
pip_return_code = pip_process.returncode
if pip_return_code:
raise EnvironmentError("Pypi lookup failed")
return ve_path
# staticmethod as per https://github.com/Miserlou/Zappa/issues/780
@staticmethod
def get_current_venv():
"""
Returns the path to the current virtualenv
"""
if "VIRTUAL_ENV" in os.environ:
venv = os.environ["VIRTUAL_ENV"]
return venv
# pyenv available check
try: # progma: no cover
subprocess.check_output(["pyenv", "help"], stderr=subprocess.STDOUT)
pyenv_available = True
except OSError:
pyenv_available = False
if pyenv_available: # progma: no cover
# Each Python version is installed into its own directory under $(pyenv root)/versions
# https://github.com/pyenv/pyenv#locating-pyenv-provided-python-installations
# Related: https://github.com/zappa/Zappa/issues/1132
pyenv_root = subprocess.check_output(["pyenv", "root"]).decode("utf-8").strip()
pyenv_version = subprocess.check_output(["pyenv", "version-name"]).decode("utf-8").strip()
venv = os.path.join(pyenv_root, "versions", pyenv_version)
return venv
return None
def get_venv_from_python_version(self):
return "python{}.{}".format(*sys.version_info)
def create_lambda_zip(
self,
prefix="lambda_package",
handler_file=None,
slim_handler=False,
minify=True,
exclude=None,
exclude_glob=None,
use_precompiled_packages=True,
include=None,
venv=None,
output=None,
disable_progress=False,
archive_format="zip",
):
"""
Create a Lambda-ready zip file of the current virtualenvironment and working directory.
Returns path to that file.
"""
# Validate archive_format
if archive_format not in ["zip", "tarball"]:
raise KeyError("The archive format to create a lambda package must be zip or tarball")
# Pip is a weird package.
# Calling this function in some environments without this can cause.. funkiness.
import pip # noqa: 547
if not venv:
venv = self.get_current_venv()
build_time = str(int(time.time()))
cwd = os.getcwd()
if not output:
if archive_format == "zip":
archive_fname = prefix + "-" + build_time + ".zip"
elif archive_format == "tarball":
archive_fname = prefix + "-" + build_time + ".tar.gz"
else:
archive_fname = output
archive_path = os.path.join(cwd, archive_fname)
# Files that should be excluded from the zip
if exclude is None:
exclude = list()
if exclude_glob is None:
exclude_glob = list()
# Exclude the zip itself
exclude.append(archive_path)
# Make sure that 'concurrent' is always forbidden.
# https://github.com/Miserlou/Zappa/issues/827
if "concurrent" not in exclude:
exclude.append("concurrent")
def splitpath(path):
parts = []
(path, tail) = os.path.split(path)
while path and tail:
parts.append(tail)
(path, tail) = os.path.split(path)
parts.append(os.path.join(path, tail))
return list(map(os.path.normpath, parts))[::-1]
split_venv = splitpath(venv)
split_cwd = splitpath(cwd)
# Ideally this should be avoided automatically,
# but this serves as an okay stop-gap measure.
if split_venv[-1] == split_cwd[-1]: # pragma: no cover
print(
"Warning! Your project and virtualenv have the same name! You may want "
"to re-create your venv with a new name, or explicitly define a "
"'project_name', as this may cause errors."
)
# First, do the project..
temp_project_path = tempfile.mkdtemp(prefix="zappa-project")
if not slim_handler:
# Slim handler does not take the project files.
if minify:
# Related: https://github.com/Miserlou/Zappa/issues/744
excludes = ZIP_EXCLUDES + exclude + [split_venv[-1]]
copytree(
cwd,
temp_project_path,
metadata=False,
symlinks=False,
ignore=shutil.ignore_patterns(*excludes),
)
else:
copytree(cwd, temp_project_path, metadata=False, symlinks=False)
for glob_path in exclude_glob:
for path in glob.glob(os.path.join(temp_project_path, glob_path)):
try:
os.remove(path)
except OSError: # is a directory
shutil.rmtree(path)
# If a handler_file is supplied, copy that to the root of the package,
# because that's where AWS Lambda looks for it. It can't be inside a package.
if handler_file:
filename = handler_file.split(os.sep)[-1]
shutil.copy(handler_file, os.path.join(temp_project_path, filename))
# Create and populate package ID file and write to temp project path
package_info = {}
package_info["uuid"] = str(uuid.uuid4())
package_info["build_time"] = build_time
package_info["build_platform"] = os.sys.platform
package_info["build_user"] = getpass.getuser()
# TODO: Add git head and info?
# Ex, from @scoates:
# def _get_git_branch():
# chdir(DIR)
# out = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
# lambci_branch = environ.get('LAMBCI_BRANCH', None)
# if out == "HEAD" and lambci_branch:
# out += " lambci:{}".format(lambci_branch)
# return out
# def _get_git_hash():
# chdir(DIR)
# return check_output(['git', 'rev-parse', 'HEAD']).strip()
# def _get_uname():
# return check_output(['uname', '-a']).strip()
# def _get_user():
# return check_output(['whoami']).strip()
# def set_id_info(zappa_cli):
# build_info = {
# 'branch': _get_git_branch(),
# 'hash': _get_git_hash(),
# 'build_uname': _get_uname(),
# 'build_user': _get_user(),
# 'build_time': datetime.datetime.utcnow().isoformat(),
# }
# with open(path.join(DIR, 'id_info.json'), 'w') as f:
# json.dump(build_info, f)
# return True
package_id_file = open(os.path.join(temp_project_path, "package_info.json"), "w")
dumped = json.dumps(package_info, indent=4)
try:
package_id_file.write(dumped)
except TypeError: # This is a Python 2/3 issue. TODO: Make pretty!
package_id_file.write(str(dumped))
package_id_file.close()
# Then, do site site-packages..
egg_links = []
temp_package_path = tempfile.mkdtemp(prefix="zappa-packages")
if os.sys.platform == "win32":
site_packages = os.path.join(venv, "Lib", "site-packages")
else:
site_packages = os.path.join(venv, "lib", self.get_venv_from_python_version(), "site-packages")
egg_links.extend(glob.glob(os.path.join(site_packages, "*.egg-link")))
if minify:
excludes = ZIP_EXCLUDES + exclude
copytree(
site_packages,
temp_package_path,
metadata=False,
symlinks=False,
ignore=shutil.ignore_patterns(*excludes),
)
else:
copytree(site_packages, temp_package_path, metadata=False, symlinks=False)
# We may have 64-bin specific packages too.
site_packages_64 = os.path.join(venv, "lib64", self.get_venv_from_python_version(), "site-packages")
if os.path.exists(site_packages_64):
egg_links.extend(glob.glob(os.path.join(site_packages_64, "*.egg-link")))
if minify:
excludes = ZIP_EXCLUDES + exclude
copytree(
site_packages_64,
temp_package_path,
metadata=False,
symlinks=False,
ignore=shutil.ignore_patterns(*excludes),
)
else:
copytree(site_packages_64, temp_package_path, metadata=False, symlinks=False)
if egg_links:
self.copy_editable_packages(egg_links, temp_package_path)
copy_tree(temp_package_path, temp_project_path, update=True)
# Then the pre-compiled packages..
if use_precompiled_packages:
print("Downloading and installing dependencies..")
installed_packages = self.get_installed_packages(site_packages, site_packages_64)
try:
for (
installed_package_name,
installed_package_version,
) in installed_packages.items():
cached_wheel_path = self.get_cached_manylinux_wheel(
installed_package_name,
installed_package_version,
disable_progress,
)
if cached_wheel_path:
# Otherwise try to use manylinux packages from PyPi..
# Related: https://github.com/Miserlou/Zappa/issues/398
shutil.rmtree(
os.path.join(temp_project_path, installed_package_name),
ignore_errors=True,
)
with zipfile.ZipFile(cached_wheel_path) as zfile:
zfile.extractall(temp_project_path)
except Exception as e:
print(e)
# XXX - What should we do here?
# Cleanup
for glob_path in exclude_glob:
for path in glob.glob(os.path.join(temp_project_path, glob_path)):
try:
os.remove(path)
except OSError: # is a directory
shutil.rmtree(path)
# Then archive it all up..
if archive_format == "zip":
print("Packaging project as zip.")
try:
compression_method = zipfile.ZIP_DEFLATED
except ImportError: # pragma: no cover
compression_method = zipfile.ZIP_STORED
archivef = zipfile.ZipFile(archive_path, "w", compression_method)
elif archive_format == "tarball":
print("Packaging project as gzipped tarball.")
archivef = tarfile.open(archive_path, "w|gz")
for root, dirs, files in os.walk(temp_project_path):
for filename in files:
# Skip .pyc files for Django migrations
# https://github.com/Miserlou/Zappa/issues/436
# https://github.com/Miserlou/Zappa/issues/464
if filename[-4:] == ".pyc" and root[-10:] == "migrations":
continue
# If there is a .pyc file in this package,
# we can skip the python source code as we'll just
# use the compiled bytecode anyway..
if filename[-3:] == ".py" and root[-10:] != "migrations":
abs_filname = os.path.join(root, filename)
abs_pyc_filename = abs_filname + "c"
if os.path.isfile(abs_pyc_filename):
# but only if the pyc is older than the py,
# otherwise we'll deploy outdated code!
py_time = os.stat(abs_filname).st_mtime
pyc_time = os.stat(abs_pyc_filename).st_mtime
if pyc_time > py_time:
continue
# Make sure that the files are all correctly chmodded
# Related: https://github.com/Miserlou/Zappa/issues/484
# Related: https://github.com/Miserlou/Zappa/issues/682
os.chmod(os.path.join(root, filename), 0o755)
if archive_format == "zip":
# Actually put the file into the proper place in the zip
# Related: https://github.com/Miserlou/Zappa/pull/716
zipi = zipfile.ZipInfo(os.path.join(root.replace(temp_project_path, "").lstrip(os.sep), filename))
zipi.create_system = 3
zipi.external_attr = 0o755 << int(16) # Is this P2/P3 functional?
with open(os.path.join(root, filename), "rb") as f:
archivef.writestr(zipi, f.read(), compression_method)
elif archive_format == "tarball":
tarinfo = tarfile.TarInfo(os.path.join(root.replace(temp_project_path, "").lstrip(os.sep), filename))
tarinfo.mode = 0o755
stat = os.stat(os.path.join(root, filename))
tarinfo.mtime = stat.st_mtime
tarinfo.size = stat.st_size
with open(os.path.join(root, filename), "rb") as f:
archivef.addfile(tarinfo, f)
# Create python init file if it does not exist
# Only do that if there are sub folders or python files and does not conflict with a neighbouring module
# Related: https://github.com/Miserlou/Zappa/issues/766
if not contains_python_files_or_subdirs(root):
# if the directory does not contain any .py file at any level, we can skip the rest
dirs[:] = [d for d in dirs if d != root]
else:
if "__init__.py" not in files and not conflicts_with_a_neighbouring_module(root):
tmp_init = os.path.join(temp_project_path, "__init__.py")
open(tmp_init, "a").close()
os.chmod(tmp_init, 0o755)
arcname = os.path.join(
root.replace(temp_project_path, ""),
os.path.join(root.replace(temp_project_path, ""), "__init__.py"),
)
if archive_format == "zip":
archivef.write(tmp_init, arcname)
elif archive_format == "tarball":
archivef.add(tmp_init, arcname)
# And, we're done!
archivef.close()
# Trash the temp directory
shutil.rmtree(temp_project_path)
shutil.rmtree(temp_package_path)
if os.path.isdir(venv) and slim_handler:
# Remove the temporary handler venv folder
shutil.rmtree(venv)
return archive_fname
@staticmethod
def get_installed_packages(site_packages, site_packages_64):
"""
Returns a dict of installed packages that Zappa cares about.
"""
import pkg_resources
package_to_keep = []
if os.path.isdir(site_packages):
package_to_keep += os.listdir(site_packages)
if os.path.isdir(site_packages_64):
package_to_keep += os.listdir(site_packages_64)
package_to_keep = [x.lower() for x in package_to_keep]
installed_packages = {
package.project_name.lower(): package.version
for package in pkg_resources.WorkingSet()
if package.project_name.lower() in package_to_keep
or package.location.lower() in [site_packages.lower(), site_packages_64.lower()]
}
return installed_packages
@staticmethod
def download_url_with_progress(url, stream, disable_progress):
"""
Downloads a given url in chunks and writes to the provided stream (can be any io stream).
Displays the progress bar for the download.
"""
resp = requests.get(url, timeout=float(os.environ.get("PIP_TIMEOUT", 2)), stream=True)
resp.raw.decode_content = True
progress = tqdm(
unit="B",
unit_scale=True,
total=int(resp.headers.get("Content-Length", 0)),
disable=disable_progress,
)
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
progress.update(len(chunk))
stream.write(chunk)
progress.close()
def get_cached_manylinux_wheel(self, package_name, package_version, disable_progress=False):
"""
Gets the locally stored version of a manylinux wheel. If one does not exist, the function downloads it.
"""
cached_wheels_dir = os.path.join(tempfile.gettempdir(), "cached_wheels")
if not os.path.isdir(cached_wheels_dir):
os.makedirs(cached_wheels_dir)
else:
# Check if we already have a cached copy
wheel_name = re.sub(r"[^\w\d.]+", "_", package_name, re.UNICODE)
wheel_file = f"{wheel_name}-{package_version}-*_x86_64.whl"
wheel_path = os.path.join(cached_wheels_dir, wheel_file)
for pathname in glob.iglob(wheel_path):
if re.match(self.manylinux_wheel_file_match, pathname) or re.match(
self.manylinux_wheel_abi3_file_match, pathname
):
print(f" - {package_name}=={package_version}: Using locally cached manylinux wheel")
return pathname
# The file is not cached, download it.
wheel_url, filename = self.get_manylinux_wheel_url(package_name, package_version)
if not wheel_url:
return None
wheel_path = os.path.join(cached_wheels_dir, filename)
print(f" - {package_name}=={package_version}: Downloading")
with open(wheel_path, "wb") as f:
self.download_url_with_progress(wheel_url, f, disable_progress)
if not zipfile.is_zipfile(wheel_path):
return None
return wheel_path
def get_manylinux_wheel_url(self, package_name, package_version):
"""
For a given package name, returns a link to the download URL,
else returns None.
Related: https://github.com/Miserlou/Zappa/issues/398
Examples here: https://gist.github.com/perrygeo/9545f94eaddec18a65fd7b56880adbae
This function downloads metadata JSON of `package_name` from Pypi
and examines if the package has a manylinux wheel. This function
also caches the JSON file so that we don't have to poll Pypi
every time.
"""
cached_pypi_info_dir = os.path.join(tempfile.gettempdir(), "cached_pypi_info")
if not os.path.isdir(cached_pypi_info_dir):
os.makedirs(cached_pypi_info_dir)
# Even though the metadata is for the package, we save it in a
# filename that includes the package's version. This helps in
# invalidating the cached file if the user moves to a different
# version of the package.
# Related: https://github.com/Miserlou/Zappa/issues/899
json_file = "{0!s}-{1!s}.json".format(package_name, package_version)
json_file_path = os.path.join(cached_pypi_info_dir, json_file)
if os.path.exists(json_file_path):
with open(json_file_path, "rb") as metafile:
data = json.load(metafile)
else:
url = "https://pypi.python.org/pypi/{}/json".format(package_name)
try:
res = requests.get(url, timeout=float(os.environ.get("PIP_TIMEOUT", 1.5)))
data = res.json()
except Exception: # pragma: no cover
return None, None
with open(json_file_path, "wb") as metafile:
jsondata = json.dumps(data)
metafile.write(bytes(jsondata, "utf-8"))
if package_version not in data.get("releases", []):
logger.warning(f"package_version({package_version}) not found in {package_name} metafile={json_file_path}")
return None, None
for f in data["releases"][package_version]:
if re.match(self.manylinux_wheel_file_match, f["filename"]):
return f["url"], f["filename"]
elif re.match(self.manylinux_wheel_abi3_file_match, f["filename"]):
return f["url"], f["filename"]
return None, None | zappa-packer | /zappa-packer-0.0.4.tar.gz/zappa-packer-0.0.4/zappa_packer/zappa.py | zappa.py |
import base64
import logging
import sys
from io import BytesIO
from urllib.parse import unquote, urlencode
from .utilities import ApacheNCSAFormatter, merge_headers, titlecase_keys
BINARY_METHODS = ["POST", "PUT", "PATCH", "DELETE", "CONNECT", "OPTIONS"]
def create_wsgi_request(
event_info,
server_name="zappa",
script_name=None,
trailing_slash=True,
binary_support=False,
base_path=None,
context_header_mappings={},
):
"""
Given some event_info via API Gateway,
create and return a valid WSGI request environ.
"""
method = event_info.get("httpMethod", None)
headers = merge_headers(event_info) or {} # Allow for the AGW console 'Test' button to work (Pull #735)
# API Gateway and ALB both started allowing for multi-value querystring
# params in Nov. 2018. If there aren't multi-value params present, then
# it acts identically to 'queryStringParameters', so we can use it as a
# drop-in replacement.
#
# The one caveat here is that ALB will only include _one_ of
# queryStringParameters _or_ multiValueQueryStringParameters, which means
# we have to check for the existence of one and then fall back to the
# other.
if "multiValueQueryStringParameters" in event_info:
query = event_info["multiValueQueryStringParameters"]
query_string = urlencode(query, doseq=True) if query else ""
else:
query = event_info.get("queryStringParameters", {})
query_string = urlencode(query) if query else ""
query_string = unquote(query_string)
if context_header_mappings:
for key, value in context_header_mappings.items():
parts = value.split(".")
header_val = event_info["requestContext"]
for part in parts:
if part not in header_val:
header_val = None
break
else:
header_val = header_val[part]
if header_val is not None:
headers[key] = header_val
# Related: https://github.com/Miserlou/Zappa/issues/677
# https://github.com/Miserlou/Zappa/issues/683
# https://github.com/Miserlou/Zappa/issues/696
# https://github.com/Miserlou/Zappa/issues/836
# https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Summary_table
if binary_support and (method in BINARY_METHODS):
if event_info.get("isBase64Encoded", False):
encoded_body = event_info["body"]
body = base64.b64decode(encoded_body)
else:
body = event_info["body"]
if isinstance(body, str):
body = body.encode("utf-8")
else:
body = event_info["body"]
if isinstance(body, str):
body = body.encode("utf-8")
# Make header names canonical, e.g. content-type => Content-Type
# https://github.com/Miserlou/Zappa/issues/1188
headers = titlecase_keys(headers)
path = unquote(event_info["path"])
if base_path:
script_name = "/" + base_path
if path.startswith(script_name):
path = path[len(script_name) :]
x_forwarded_for = headers.get("X-Forwarded-For", "")
if "," in x_forwarded_for:
# The last one is the cloudfront proxy ip. The second to last is the real client ip.
# Everything else is user supplied and untrustworthy.
addresses = [addr.strip() for addr in x_forwarded_for.split(",")]
remote_addr = addresses[-2]
else:
remote_addr = x_forwarded_for or "127.0.0.1"
environ = {
"PATH_INFO": get_wsgi_string(path),
"QUERY_STRING": get_wsgi_string(query_string),
"REMOTE_ADDR": remote_addr,
"REQUEST_METHOD": method,
"SCRIPT_NAME": get_wsgi_string(str(script_name)) if script_name else "",
"SERVER_NAME": str(server_name),
"SERVER_PORT": headers.get("X-Forwarded-Port", "80"),
"SERVER_PROTOCOL": str("HTTP/1.1"),
"wsgi.version": (1, 0),
"wsgi.url_scheme": headers.get("X-Forwarded-Proto", "http"),
# This must be Bytes or None
# - https://docs.djangoproject.com/en/4.2/releases/4.2/#miscellaneous
# - https://wsgi.readthedocs.io/en/latest/definitions.html#envvar-wsgi.input
# > Manually instantiated WSGIRequest objects must be provided
# > a file-like object for wsgi.input.
"wsgi.input": BytesIO(body),
"wsgi.errors": sys.stderr,
"wsgi.multiprocess": False,
"wsgi.multithread": False,
"wsgi.run_once": False,
}
# Systems calling the Lambda (other than API Gateway) may not provide the field requestContext
# Extract remote_user, authorizer if Authorizer is enabled
remote_user = None
if "requestContext" in event_info:
authorizer = event_info["requestContext"].get("authorizer", None)
if authorizer:
remote_user = authorizer.get("principalId")
environ["API_GATEWAY_AUTHORIZER"] = authorizer
elif event_info["requestContext"].get("identity"):
remote_user = event_info["requestContext"]["identity"].get("userArn")
# Input processing
if method in ["POST", "PUT", "PATCH", "DELETE"]:
if "Content-Type" in headers:
environ["CONTENT_TYPE"] = headers["Content-Type"]
if body:
environ["CONTENT_LENGTH"] = str(len(body))
else:
environ["CONTENT_LENGTH"] = "0"
for header in headers:
wsgi_name = "HTTP_" + header.upper().replace("-", "_")
environ[wsgi_name] = str(headers[header])
if script_name:
environ["SCRIPT_NAME"] = script_name
path_info = environ["PATH_INFO"]
if script_name in path_info:
environ["PATH_INFO"].replace(script_name, "")
if remote_user:
environ["REMOTE_USER"] = remote_user
return environ
def common_log(environ, response, response_time=None):
"""
Given the WSGI environ and the response,
log this event in Common Log Format.
"""
logger = logging.getLogger()
if response_time:
formatter = ApacheNCSAFormatter(with_response_time=True)
log_entry = formatter(
response.status_code,
environ,
len(response.content),
rt_us=response_time,
)
else:
formatter = ApacheNCSAFormatter(with_response_time=False)
log_entry = formatter(response.status_code, environ, len(response.content))
logger.info(log_entry)
return log_entry
# Related: https://github.com/Miserlou/Zappa/issues/1199
def get_wsgi_string(string, encoding="utf-8"):
"""
Returns wsgi-compatible string
"""
return string.encode(encoding).decode("iso-8859-1") | zappa-packer | /zappa-packer-0.0.4.tar.gz/zappa-packer-0.0.4/zappa_packer/wsgi.py | wsgi.py |
import datetime
import logging
from typing import Any, Callable
LOG = logging.getLogger(__name__)
##
# Settings / Packaging
##
# mimetypes starting with entries defined here are considered as TEXT when BINARTY_SUPPORT is True.
# - Additional TEXT mimetypes may be defined with the 'ADDITIONAL_TEXT_MIMETYPES' setting.
DEFAULT_TEXT_MIMETYPES = (
"text/",
"application/json", # RFC 4627
"application/javascript", # RFC 4329
"application/ecmascript", # RFC 4329
"application/xml", # RFC 3023
"application/xml-external-parsed-entity", # RFC 3023
"application/xml-dtd", # RFC 3023
"image/svg+xml", # RFC 3023
)
def merge_headers(event):
"""
Merge the values of headers and multiValueHeaders into a single dict.
Opens up support for multivalue headers via API Gateway and ALB.
See: https://github.com/Miserlou/Zappa/pull/1756
"""
headers = event.get("headers") or {}
multi_headers = (event.get("multiValueHeaders") or {}).copy()
for h in set(headers.keys()):
if h not in multi_headers:
multi_headers[h] = [headers[h]]
for h in multi_headers.keys():
multi_headers[h] = ", ".join(multi_headers[h])
return multi_headers
# https://github.com/Miserlou/Zappa/issues/1188
def titlecase_keys(d):
"""
Takes a dict with keys of type str and returns a new dict with all keys titlecased.
"""
return {k.title(): v for k, v in d.items()}
class ApacheNCSAFormatters:
"""
NCSA extended/combined Log Format:
"%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\""
%h: Remote hostname.
%l: Remote logname
%u: Remote user if the request was authenticated. May be bogus if return status (%s) is 401 (unauthorized).
%t: Time the request was received, in the format [18/Sep/2011:19:18:28 -0400].
The last number indicates the timezone offset from GMT
%r: First line of request.
%>s: Final Status
%b: Size of response in bytes, excluding HTTP headers.
In CLF format, i.e. a '-' rather than a 0 when no bytes are sent.
%{Referer}i:The contents of Referer: header line(s) in the request sent to the server.
%{User-agent}i: The contents of User-agent: header line(s) in the request sent to the server.
Refer to:
https://httpd.apache.org/docs/current/en/mod/mod_log_config.html
"""
@staticmethod
def format_log(status_code: int, environ: dict, content_length: int, **kwargs) -> str:
ip_header = kwargs.get("ip_header", None)
if ip_header:
host = environ.get(ip_header, "")
else:
host = environ.get("REMOTE_ADDR", "")
logname = "-"
user = "-"
now = datetime.datetime.now(datetime.timezone.utc)
display_datetime = now.strftime("%d/%b/%Y:%H:%M:%S %z")
method = environ.get("REQUEST_METHOD", "")
path_info = environ.get("PATH_INFO", "")
query_string = ""
raw_query_string = environ.get("QUERY_STRING", "")
if raw_query_string:
query_string = f"?{raw_query_string}"
server_protocol = environ.get("SERVER_PROTOCOL", "")
request = f"{method} {path_info}{query_string} {server_protocol}"
referer = environ.get("HTTP_REFERER", "")
agent = environ.get("HTTP_USER_AGENT", "")
log_entry = (
f'{host} {logname} {user} [{display_datetime}] "{request}" {status_code} {content_length} "{referer}" "{agent}"'
)
return log_entry
@staticmethod
def format_log_with_response_time(*args, **kwargs) -> str:
"""
Expect that kwargs includes response time in microseconds, 'rt_us'.
Mimics Apache-like access HTTP log where the response time data is enabled
NCSA extended/combined Log Format:
"%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %T/%D"
%T: The time taken to serve the request, in seconds.
%D: The time taken to serve the request, in microseconds.
"""
response_time_microseconds = kwargs.get("rt_us", None)
log_entry = ApacheNCSAFormatters.format_log(*args, **kwargs)
if response_time_microseconds:
response_time_seconds = int(response_time_microseconds / 1_000_000)
log_entry = f"{log_entry} {response_time_seconds}/{response_time_microseconds}"
return log_entry
def ApacheNCSAFormatter(with_response_time: bool = True) -> Callable:
"""A factory that returns the wanted formatter"""
if with_response_time:
return ApacheNCSAFormatters.format_log_with_response_time
else:
return ApacheNCSAFormatters.format_log | zappa-packer | /zappa-packer-0.0.4.tar.gz/zappa-packer-0.0.4/zappa_packer/utilities.py | utilities.py |
import argparse
import click
import os
import re
import sys
import subprocess
import inspect
import hjson as json
import tempfile
import zipfile
from past.builtins import basestring
import slugify
from .zappa import Zappa
from click import Context, BaseCommand
from click.exceptions import ClickException
from click.globals import push_context
class InvalidAwsLambdaName(Exception):
"""Exception: proposed AWS Lambda name is invalid"""
pass
def human_size(num, suffix="B"):
"""
Convert bytes length to a human-readable version
"""
for unit in ("", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"):
if abs(num) < 1024.0:
return "{0:3.1f}{1!s}{2!s}".format(num, unit, suffix)
num /= 1024.0
return "{0:.1f}{1!s}{2!s}".format(num, "Yi", suffix)
class ZappaPackager:
"""
ZappaPackager object is responsible for loading the settings,
handling the input arguments and executing the calls to the core library.
"""
# CLI
vargs = None
command = None
stage_env = None
# Zappa settings
zappa = None
zappa_settings = None
load_credentials = True
disable_progress = False
# Specific settings
api_stage = None
app_function = None
aws_region = None
debug = None
prebuild_script = None
project_name = None
profile_name = None
lambda_arn = None
lambda_name = None
lambda_description = None
lambda_concurrency = None
s3_bucket_name = None
settings_file = None
zip_path = None
handler_path = None
vpc_config = None
memory_size = None
use_apigateway = None
lambda_handler = None
django_settings = None
manage_roles = True
exception_handler = None
environment_variables = None
authorizer = None
xray_tracing = False
aws_kms_key_arn = ''
context_header_mappings = None
tags = []
layers = None
stage_name_env_pattern = re.compile('^[a-zA-Z0-9_]+$')
def __init__(self):
self._stage_config_overrides = {} # change using self.override_stage_config_setting(key, val)
@property
def stage_config_overrides(self):
"""
Returns zappa_settings we forcefully override for the current stage
set by `self.override_stage_config_setting(key, value)`
"""
return getattr(self, "_stage_config_overrides", {}).get(self.api_stage, {})
@property
def stage_config(self):
"""
A shortcut property for settings of a stage.
"""
def get_stage_setting(stage, extended_stages=None):
if extended_stages is None:
extended_stages = []
if stage in extended_stages:
raise RuntimeError(stage + " has already been extended to these settings. "
"There is a circular extends within the settings file.")
extended_stages.append(stage)
try:
stage_settings = dict(self.zappa_settings[stage].copy())
except KeyError:
raise ClickException("Cannot extend settings for undefined stage '" + stage + "'.")
extends_stage = self.zappa_settings[stage].get('extends', None)
if not extends_stage:
return stage_settings
extended_settings = get_stage_setting(stage=extends_stage, extended_stages=extended_stages)
extended_settings.update(stage_settings)
return extended_settings
settings = get_stage_setting(stage=self.api_stage)
# Backwards compatible for delete_zip setting that was more explicitly named delete_local_zip
if 'delete_zip' in settings:
settings['delete_local_zip'] = settings.get('delete_zip')
settings.update(self.stage_config_overrides)
return settings
def handle(self, argv=None):
"""
Main function.
Parses command, load settings and dispatches accordingly.
"""
desc = ('Zappa - Deploy Python applications to AWS Lambda'
' and API Gateway.\n')
parser = argparse.ArgumentParser(description=desc)
env_parser = argparse.ArgumentParser(add_help=False)
subparsers = parser.add_subparsers(title='subcommands', dest='command')
self.load_settings_file()
args = parser.parse_args(argv)
self.vargs = vars(args)
# Parse the input
# NOTE(rmoe): Special case for manage command
# The manage command can't have both stage_env and command_rest
# arguments. Since they are both positional arguments argparse can't
# differentiate the two. This causes problems when used with --all.
# (e.g. "manage --all showmigrations admin" argparse thinks --all has
# been specified AND that stage_env='showmigrations')
# By having command_rest collect everything but --all we can split it
# apart here instead of relying on argparse.
self.load_credentials = False
# Load and Validate Settings File
self.load_settings_file(self.vargs.get("settings_file"))
# Should we execute this for all stages, or just one?
all_stages = self.vargs.get("all")
stages = []
if all_stages: # All stages!
stages = self.zappa_settings.keys()
else: # Just one env.
if not self.stage_env:
# If there's only one stage defined in the settings,
# use that as the default.
if len(self.zappa_settings.keys()) == 1:
stages.append(list(self.zappa_settings.keys())[0])
else:
parser.error("Please supply a stage to interact with.")
else:
stages.append(self.stage_env)
for stage in stages:
try:
self.dispatch_command(self.command, stage)
except ClickException as e:
# Discussion on exit codes: https://github.com/Miserlou/Zappa/issues/407
e.show()
sys.exit(e.exit_code)
def dispatch_command(self, command, stage):
"""
Given a command to execute and stage,
execute that command.
"""
self.check_stage_name(stage)
self.api_stage = stage
# Explicitly define the app function.
# Related: https://github.com/Miserlou/Zappa/issues/832
if self.vargs.get("app_function", None):
self.app_function = self.vargs["app_function"]
# Load our settings, based on api_stage.
try:
self.load_settings(self.vargs.get("settings_file"))
except ValueError as e:
if hasattr(e, "message"):
print("Error: {}".format(e.message))
else:
print(str(e))
sys.exit(-1)
self.create_package("deployment.zip")
def validate_name(self, name, maxlen=80):
"""Validate name for AWS Lambda function.
name: actual name (without `arn:aws:lambda:...:` prefix and without
`:$LATEST`, alias or version suffix.
maxlen: max allowed length for name without prefix and suffix.
The value 80 was calculated from prefix with longest known region name
and assuming that no alias or version would be longer than `$LATEST`.
Based on AWS Lambda spec
http://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html
Return: the name
Raise: InvalidAwsLambdaName, if the name is invalid.
"""
if not isinstance(name, basestring):
msg = "Name must be of type string"
raise InvalidAwsLambdaName(msg)
if len(name) > maxlen:
msg = "Name is longer than {maxlen} characters."
raise InvalidAwsLambdaName(msg.format(maxlen=maxlen))
if len(name) == 0:
msg = "Name must not be empty string."
raise InvalidAwsLambdaName(msg)
if not re.match("^[a-zA-Z0-9-_]+$", name):
msg = "Name can only contain characters from a-z, A-Z, 0-9, _ and -"
raise InvalidAwsLambdaName(msg)
return name
def get_runtime_from_python_version(self):
""" """
if sys.version_info[0] < 3:
raise ValueError("Python 2.x is no longer supported.")
else:
if sys.version_info[1] <= 7:
return "python3.7"
elif sys.version_info[1] <= 8:
return "python3.8"
elif sys.version_info[1] <= 9:
return "python3.9"
else:
return "python3.10"
def get_current_venv():
"""
Returns the path to the current virtualenv
"""
if "VIRTUAL_ENV" in os.environ:
venv = os.environ["VIRTUAL_ENV"]
return venv
# pyenv available check
try: # progma: no cover
subprocess.check_output(["pyenv", "help"], stderr=subprocess.STDOUT)
pyenv_available = True
except OSError:
pyenv_available = False
if pyenv_available: # progma: no cover
# Each Python version is installed into its own directory under $(pyenv root)/versions
# https://github.com/pyenv/pyenv#locating-pyenv-provided-python-installations
# Related: https://github.com/zappa/Zappa/issues/1132
pyenv_root = subprocess.check_output(["pyenv", "root"]).decode("utf-8").strip()
pyenv_version = subprocess.check_output(["pyenv", "version-name"]).decode("utf-8").strip()
venv = os.path.join(pyenv_root, "versions", pyenv_version)
return venv
return None
def create_package(self, output=None):
"""
Ensure that the package can be properly configured,
and then create it.
"""
# Create the Lambda zip package (includes project and virtualenvironment)
# Also define the path the handler file so it can be copied to the zip
# root for Lambda.
current_file = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
handler_file = os.sep.join(current_file.split(os.sep)[0:]) + os.sep + 'handler.py'
# Create the zip file(s)
# This could be python3.6 optimized.
exclude = self.stage_config.get(
'exclude', [
"boto3",
"dateutil",
"botocore",
"s3transfer",
"concurrent"
])
# Create a single zip that has the handler and application
self.zip_path = self.zappa.create_lambda_zip(
prefix=self.lambda_name,
handler_file=handler_file,
use_precompiled_packages=self.stage_config.get('use_precompiled_packages', True),
exclude=exclude,
exclude_glob=self.stage_config.get('exclude_glob', []),
output=output,
disable_progress=self.disable_progress
)
# Warn if this is too large for Lambda.
file_stats = os.stat(self.zip_path)
if file_stats.st_size > 52428800: # pragma: no cover
print('\n\nWarning: Application zip package is likely to be too large for AWS Lambda. '
'Try setting "slim_handler" to true in your Zappa settings file.\n\n')
# Throw custom settings into the zip that handles requests
if self.stage_config.get('slim_handler', False):
handler_zip = self.handler_path
else:
handler_zip = self.zip_path
with zipfile.ZipFile(handler_zip, 'a') as lambda_zip:
settings_s = "# Generated by Zappa\n"
if self.app_function:
if '.' not in self.app_function: # pragma: no cover
raise ClickException("Your " + click.style("app_function", fg='red', bold=True) + " value is not a modular path." +
" It needs to be in the format `" + click.style("your_module.your_app_object", bold=True) + "`.")
app_module, app_function = self.app_function.rsplit('.', 1)
settings_s = settings_s + "APP_MODULE='{0!s}'\nAPP_FUNCTION='{1!s}'\n".format(app_module, app_function)
if self.exception_handler:
settings_s += "EXCEPTION_HANDLER='{0!s}'\n".format(self.exception_handler)
else:
settings_s += "EXCEPTION_HANDLER=None\n"
if self.debug:
settings_s = settings_s + "DEBUG=True\n"
else:
settings_s = settings_s + "DEBUG=False\n"
settings_s = settings_s + "LOG_LEVEL='{0!s}'\n".format((self.log_level))
if self.binary_support:
settings_s = settings_s + "BINARY_SUPPORT=True\n"
else:
settings_s = settings_s + "BINARY_SUPPORT=False\n"
head_map_dict = {}
head_map_dict.update(dict(self.context_header_mappings))
settings_s = settings_s + "CONTEXT_HEADER_MAPPINGS={0}\n".format(
head_map_dict
)
# If we're on a domain, we don't need to define the /<<env>> in
# the WSGI PATH
if self.domain:
settings_s = settings_s + "DOMAIN='{0!s}'\n".format((self.domain))
else:
settings_s = settings_s + "DOMAIN=None\n"
if self.base_path:
settings_s = settings_s + "BASE_PATH='{0!s}'\n".format((self.base_path))
else:
settings_s = settings_s + "BASE_PATH=None\n"
# Pass through remote config bucket and path
if self.remote_env:
settings_s = settings_s + "REMOTE_ENV='{0!s}'\n".format(
self.remote_env
)
# DEPRECATED. use remove_env instead
elif self.remote_env_bucket and self.remote_env_file:
settings_s = settings_s + "REMOTE_ENV='s3://{0!s}/{1!s}'\n".format(
self.remote_env_bucket, self.remote_env_file
)
# Local envs
env_dict = {}
if self.aws_region:
env_dict['AWS_REGION'] = self.aws_region
env_dict.update(dict(self.environment_variables))
# Environment variable keys must be ascii
# https://github.com/Miserlou/Zappa/issues/604
# https://github.com/Miserlou/Zappa/issues/998
try:
env_dict = dict((k.encode('ascii').decode('ascii'), v) for (k, v) in env_dict.items())
except Exception:
raise ValueError("Environment variable keys must be ascii.")
settings_s = settings_s + "ENVIRONMENT_VARIABLES={0}\n".format(
env_dict
)
# We can be environment-aware
settings_s = settings_s + "API_STAGE='{0!s}'\n".format((self.api_stage))
settings_s = settings_s + "PROJECT_NAME='{0!s}'\n".format((self.project_name))
if self.settings_file:
settings_s = settings_s + "SETTINGS_FILE='{0!s}'\n".format((self.settings_file))
else:
settings_s = settings_s + "SETTINGS_FILE=None\n"
if self.django_settings:
settings_s = settings_s + "DJANGO_SETTINGS='{0!s}'\n".format((self.django_settings))
else:
settings_s = settings_s + "DJANGO_SETTINGS=None\n"
# If slim handler, path to project zip
if self.stage_config.get('slim_handler', False):
settings_s += "ARCHIVE_PATH='s3://{0!s}/{1!s}_{2!s}_current_project.tar.gz'\n".format(
self.s3_bucket_name, self.api_stage, self.project_name)
# since includes are for slim handler add the setting here by joining arbitrary list from zappa_settings file
# and tell the handler we are the slim_handler
# https://github.com/Miserlou/Zappa/issues/776
settings_s += "SLIM_HANDLER=True\n"
include = self.stage_config.get('include', [])
if len(include) >= 1:
settings_s += "INCLUDE=" + str(include) + '\n'
# AWS Events function mapping
event_mapping = {}
events = self.stage_config.get('events', [])
for event in events:
arn = event.get('event_source', {}).get('arn')
function = event.get('function')
if arn and function:
event_mapping[arn] = function
settings_s = settings_s + "AWS_EVENT_MAPPING={0!s}\n".format(event_mapping)
# Map Lext bot events
bot_events = self.stage_config.get('bot_events', [])
bot_events_mapping = {}
for bot_event in bot_events:
event_source = bot_event.get('event_source', {})
intent = event_source.get('intent')
invocation_source = event_source.get('invocation_source')
function = bot_event.get('function')
if intent and invocation_source and function:
bot_events_mapping[str(intent) + ':' + str(invocation_source)] = function
settings_s = settings_s + "AWS_BOT_EVENT_MAPPING={0!s}\n".format(bot_events_mapping)
# Map cognito triggers
cognito_trigger_mapping = {}
cognito_config = self.stage_config.get('cognito', {})
triggers = cognito_config.get('triggers', [])
for trigger in triggers:
source = trigger.get('source')
function = trigger.get('function')
if source and function:
cognito_trigger_mapping[source] = function
settings_s = settings_s + "COGNITO_TRIGGER_MAPPING={0!s}\n".format(cognito_trigger_mapping)
# Authorizer config
authorizer_function = self.authorizer.get('function', None)
if authorizer_function:
settings_s += "AUTHORIZER_FUNCTION='{0!s}'\n".format(authorizer_function)
# Copy our Django app into root of our package.
# It doesn't work otherwise.
if self.django_settings:
base = __file__.rsplit(os.sep, 1)[0]
django_py = ''.join(os.path.join(base, 'ext', 'django_zappa.py'))
lambda_zip.write(django_py, 'django_zappa_app.py')
# async response
async_response_table = self.stage_config.get('async_response_table', '')
settings_s += "ASYNC_RESPONSE_TABLE='{0!s}'\n".format(async_response_table)
# Lambda requires a specific chmod
temp_settings = tempfile.NamedTemporaryFile(delete=False)
os.chmod(temp_settings.name, 0o644)
temp_settings.write(bytes(settings_s, "utf-8"))
temp_settings.close()
lambda_zip.write(temp_settings.name, 'zappa_settings.py')
os.unlink(temp_settings.name)
def get_json_settings(self, settings_name="zappa_settings"):
"""
Return zappa_settings path as JSON
"""
zs_json = settings_name + ".json"
# Must have at least one
if not os.path.isfile(zs_json):
raise ClickException("Please configure a zappa_settings file or call `zappa init`.")
# Prefer JSON
if os.path.isfile(zs_json):
settings_file = zs_json
else:
raise ClickException("Please configure a zappa_settings file or call `zappa init`. JSON file could not be found.")
return settings_file
def load_settings_file(self, settings_file=None):
"""
Load our settings file.
"""
if not settings_file:
settings_file = self.get_json_settings()
if not os.path.isfile(settings_file):
raise ClickException("Please configure your zappa_settings file or call `zappa init`.")
path, ext = os.path.splitext(settings_file)
if ext == '.json':
with open(settings_file) as json_file:
try:
self.zappa_settings = json.load(json_file)
except ValueError: # pragma: no cover
raise ValueError("Unable to load the Zappa settings JSON. It may be malformed.")
else:
raise ValueError("File needs to be in JSON format. It may be malformed.")
def check_stage_name(self, stage_name):
"""
Make sure the stage name matches the AWS-allowed pattern
(calls to apigateway_client.create_deployment, will fail with error
message "ClientError: An error occurred (BadRequestException) when
calling the CreateDeployment operation: Stage name only allows
a-zA-Z0-9_" if the pattern does not match)
"""
if self.stage_name_env_pattern.match(stage_name):
return True
raise ValueError("AWS requires stage name to match a-zA-Z0-9_")
def get_project_name(self):
return slugify.slugify(os.getcwd().split(os.sep)[-1])[:15]
def check_environment(self, environment):
"""
Make sure the environment contains only strings
(since putenv needs a string)
"""
non_strings = []
for k, v in environment.items():
if not isinstance(v, str):
non_strings.append(k)
if non_strings:
raise ValueError("The following environment variables are not strings: {}".format(", ".join(non_strings)))
else:
return True
def collision_warning(self, item):
"""
Given a string, print a warning if this could
collide with a Zappa core package module.
Use for app functions and events.
"""
namespace_collisions = [
"zappa.",
"wsgi.",
"middleware.",
"handler.",
"util.",
"letsencrypt.",
"cli.",
]
for namespace_collision in namespace_collisions:
if item.startswith(namespace_collision):
click.echo(
click.style("Warning!", fg="red", bold=True)
+ " You may have a namespace collision between "
+ click.style(item, bold=True)
+ " and "
+ click.style(namespace_collision, bold=True)
+ "! You may want to rename that file."
)
def load_settings(self, settings_file=None, session=None):
"""
Load the local zappa_settings file.
Returns the loaded Zappa object.
"""
# Ensure we're passed a valid settings file.
if not settings_file:
settings_file = self.get_json_settings()
if not os.path.isfile(settings_file):
raise ClickException("Please configure your zappa_settings file.")
# Load up file
self.load_settings_file(settings_file)
# Make sure that the stages are valid names:
for stage_name in self.zappa_settings.keys():
try:
self.check_stage_name(stage_name)
except ValueError:
raise ValueError("API stage names must match a-zA-Z0-9_ ; '{0!s}' does not.".format(stage_name))
# Make sure that this stage is our settings
if self.api_stage not in self.zappa_settings.keys():
raise ClickException("Please define stage '{0!s}' in your Zappa settings.".format(self.api_stage))
# We need a working title for this project. Use one if supplied, else cwd dirname.
if 'project_name' in self.stage_config: # pragma: no cover
# If the name is invalid, this will throw an exception with message up stack
self.project_name = self.validate_name(self.stage_config['project_name'])
else:
self.project_name = self.get_project_name()
# The name of the actual AWS Lambda function, ex, 'helloworld-dev'
# Assume that we already have have validated the name beforehand.
# Related: https://github.com/Miserlou/Zappa/pull/664
# https://github.com/Miserlou/Zappa/issues/678
# And various others from Slack.
self.lambda_name = slugify.slugify(self.project_name + '-' + self.api_stage)
# Load stage-specific settings
self.vpc_config = self.stage_config.get('vpc_config', {})
self.memory_size = self.stage_config.get('memory_size', 512)
self.app_function = self.stage_config.get('app_function', None)
self.exception_handler = self.stage_config.get('exception_handler', None)
self.aws_region = self.stage_config.get('aws_region', None)
self.debug = self.stage_config.get('debug', True)
self.prebuild_script = self.stage_config.get('prebuild_script', None)
self.profile_name = self.stage_config.get('profile_name', None)
self.log_level = self.stage_config.get('log_level', "DEBUG")
self.domain = self.stage_config.get('domain', None)
self.base_path = self.stage_config.get('base_path', None)
self.timeout_seconds = self.stage_config.get('timeout_seconds', 30)
dead_letter_arn = self.stage_config.get('dead_letter_arn', '')
self.dead_letter_config = {'TargetArn': dead_letter_arn} if dead_letter_arn else {}
self.cognito = self.stage_config.get('cognito', None)
self.num_retained_versions = self.stage_config.get('num_retained_versions',None)
# Check for valid values of num_retained_versions
if self.num_retained_versions is not None and type(self.num_retained_versions) is not int:
raise ClickException("Please supply either an integer or null for num_retained_versions in the zappa_settings.json. Found %s" % type(self.num_retained_versions))
elif type(self.num_retained_versions) is int and self.num_retained_versions<1:
raise ClickException("The value for num_retained_versions in the zappa_settings.json should be greater than 0.")
# Provide legacy support for `use_apigateway`, now `apigateway_enabled`.
# https://github.com/Miserlou/Zappa/issues/490
# https://github.com/Miserlou/Zappa/issues/493
self.use_apigateway = self.stage_config.get('use_apigateway', True)
if self.use_apigateway:
self.use_apigateway = self.stage_config.get('apigateway_enabled', True)
self.apigateway_description = self.stage_config.get('apigateway_description', None)
self.lambda_handler = self.stage_config.get('lambda_handler', 'handler.lambda_handler')
# DEPRECATED. https://github.com/Miserlou/Zappa/issues/456
self.remote_env_bucket = self.stage_config.get('remote_env_bucket', None)
self.remote_env_file = self.stage_config.get('remote_env_file', None)
self.remote_env = self.stage_config.get('remote_env', None)
self.settings_file = self.stage_config.get('settings_file', None)
self.django_settings = self.stage_config.get('django_settings', None)
self.manage_roles = self.stage_config.get('manage_roles', True)
self.binary_support = self.stage_config.get('binary_support', True)
self.api_key_required = self.stage_config.get('api_key_required', False)
self.api_key = self.stage_config.get('api_key')
self.endpoint_configuration = self.stage_config.get('endpoint_configuration', None)
self.iam_authorization = self.stage_config.get('iam_authorization', False)
self.cors = self.stage_config.get("cors", False)
self.lambda_description = self.stage_config.get('lambda_description', "Zappa Deployment")
self.lambda_concurrency = self.stage_config.get('lambda_concurrency', None)
self.environment_variables = self.stage_config.get('environment_variables', {})
self.aws_environment_variables = self.stage_config.get('aws_environment_variables', {})
self.check_environment(self.environment_variables)
self.authorizer = self.stage_config.get('authorizer', {})
self.runtime = self.stage_config.get('runtime', self.get_runtime_from_python_version())
self.aws_kms_key_arn = self.stage_config.get('aws_kms_key_arn', '')
self.context_header_mappings = self.stage_config.get('context_header_mappings', {})
self.xray_tracing = self.stage_config.get('xray_tracing', False)
self.desired_role_arn = self.stage_config.get('role_arn')
self.layers = self.stage_config.get('layers', None)
# Load ALB-related settings
self.use_alb = self.stage_config.get('alb_enabled', False)
self.alb_vpc_config = self.stage_config.get('alb_vpc_config', {})
# Additional tags
self.tags = self.stage_config.get('tags', {})
self.zappa = Zappa(runtime=self.runtime)
if self.app_function:
self.collision_warning(self.app_function)
if self.app_function[-3:] == '.py':
click.echo(click.style("Warning!", fg="red", bold=True) +
" Your app_function is pointing to a " + click.style("file and not a function", bold=True) +
"! It should probably be something like 'my_file.app', not 'my_file.py'!")
return self.zappa
def package(self, output=None):
"""
Only build the package
"""
# Make sure we're in a venv.
self.get_current_venv()
# Create the Lambda Zip
self.create_package(output)
self.callback('zip')
size = human_size(os.path.getsize(self.zip_path))
click.echo(click.style("Package created", fg="green", bold=True) + ": " + click.style(self.zip_path, bold=True) + " (" + size + ")")
####################################################################
# Main
####################################################################
def shamelessly_promote():
"""
Shamelessly promote our little community.
"""
click.echo("Need " + click.style("help", fg='green', bold=True) +
"? Found a " + click.style("bug", fg='green', bold=True) +
"? Let us " + click.style("know", fg='green', bold=True) + "! :D")
click.echo("File bug reports on " + click.style("GitHub", bold=True) + " here: "
+ click.style("https://github.com/Miserlou/Zappa", fg='cyan', bold=True))
click.echo("And join our " + click.style("Slack", bold=True) + " channel here: "
+ click.style("https://zappateam.slack.com", fg='cyan', bold=True))
click.echo("Love!,")
click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!")
def disable_click_colors():
"""
Set a Click context where colors are disabled. Creates a throwaway BaseCommand
to play nicely with the Context constructor.
The intended side-effect here is that click.echo() checks this context and will
suppress colors.
https://github.com/pallets/click/blob/e1aa43a3/click/globals.py#L39
"""
ctx = Context(BaseCommand('AllYourBaseAreBelongToUs'))
ctx.color = False
push_context(ctx)
def handle(): # pragma: no cover
"""
Main program execution handler.
"""
try:
cli = ZappaPackager()
sys.exit(cli.handle())
except SystemExit as e: # pragma: no cover
sys.exit(e.code)
except KeyboardInterrupt: # pragma: no cover
sys.exit(130)
except Exception as e:
click.echo("Oh no! An " + click.style("error occurred", fg='red', bold=True) + "! :(")
click.echo("\n==============\n")
import traceback
traceback.print_exc()
click.echo("\n==============\n")
shamelessly_promote()
sys.exit(-1)
if __name__ == '__main__': # pragma: no cover
handle() | zappa-packer | /zappa-packer-0.0.4.tar.gz/zappa-packer-0.0.4/zappa_packer/cli.py | cli.py |
# zappa_scheduler
## Description
A db driven way to run tasks at a future point in time, or at a regular interval, for Django Zappa projects (https://github.com/Miserlou/Zappa).
## Installation
```
pip install zappa_scheduler
```
To check for tasks every 4 minutes, add the below to zappa_settings.json:
```json
{
"dev": {
"keep_warm": false,
"events": [
{
"function": "zappa_scheduler.zappa_check.now",
"expression": "rate(4 minutes)"
}
]
}
}
```
## Usage
Low level currently, where you create and save your tasks straight to db.
```python
def test_function(_arg1, _arg2, _kwarg1=1, _kwarg2=2):
return _arg1, _arg2, _kwarg1, _kwarg2
call_later = CallLater()
call_later.function = test_function
call_later.args = (3, 4) # for the above function
call_later.kwargs = {'_kwarg1': 11, '_kwarg2': 22} # for the above function
call_later.time_to_run = timezone.now() + timedelta(minutes=8)
call_later.save()
```
You can also repeatedly call your task:
```python
call_later_twice.every = timedelta(seconds=1)
call_later_twice.repeat = 2
```
There are 2 types of failure:
- If a task fails to run, it is run on the next checking event. By default, there are 3 attempts to run a function.
- If a task takes too long to run, it is again run on the next checking event. By default, there are 3 retries.
...the task is labelled as problematic after repeated fails.
| zappa-scheduler | /zappa_scheduler-1.0.7.tar.gz/zappa_scheduler-1.0.7/README.md | README.md |
import inspect
import json
from datetime import timedelta
from logging import getLogger
import pytz
from dateutil.parser import parse
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.serializers import serialize
from django.utils import timezone
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from picklefield import PickledObjectField
logger = getLogger(__name__)
# used for testing purposes
events = {
'called_and_destroyed': 'deleted after 1 call',
'called': 'called',
'called_and_expired': 'called_and_expired',
'will_be_called_in_future_again': 'call in future',
'failed to run so must be rerun': 'failed to run so rerun',
'waiting': 'still waiting to run',
'failed to run before expired': 'call later function failed to run within allotted time!',
'error calling pickled function': 'error calling pickled function, or problem with polling',
'expired function was eventually called': 'expired yet called',
'repeatedly failed': 'repeatedly timed out, given up!',
}
MAX_TIME = 60 * 10 # 10 minutes
def far_future_fail_timeout():
return timezone.now() + timedelta(days=365)
def realistic_timeout(time_threshold):
return time_threshold + timedelta(MAX_TIME)
@python_2_unicode_compatible
class CallLater(models.Model):
name = models.CharField(max_length=64, default='', editable=True, verbose_name=u'additional lookup field')
time_to_run = models.DateTimeField(default=timezone.now)
time_to_stop = models.DateTimeField(null=True, blank=True)
function = PickledObjectField()
args = PickledObjectField(null=True)
kwargs = PickledObjectField(null=True)
repeat = models.PositiveIntegerField(default=1)
every = models.DurationField(null=True, blank=True)
when_check_if_failed = models.DateTimeField(default=far_future_fail_timeout)
retries = models.PositiveIntegerField(default=3)
timeout_retries = models.PositiveIntegerField(default=2)
problem = models.BooleanField(default=False)
def save(self, *args, **kwargs):
# (<django.db.models.fields.PositiveIntegerField>,) is returned if self.repeat not set
# I assume i must save() model before default value is given
if self.every is None and type(self.repeat) is int and self.repeat is not 1:
raise ValidationError('you must set a repeat time (via every=[timedelta]) if you want a function called many times'
' (each time after current time + repeat')
super(CallLater, self).save(*args, **kwargs)
def check_individual(self):
preprocess_instance(self, timezone.now())
#used for testing
def to_check(time_threshold=timezone.now()):
to_run = CallLater.objects.filter(time_to_run__lte=time_threshold,
when_check_if_failed__gt=time_threshold,
problem=False).count()
timedout_again_to_run = CallLater.objects.filter(when_check_if_failed__lte=time_threshold,
problem=False).count()
return 'to_run=' + str(to_run)+', timedout_again_to_run='+str(timedout_again_to_run)
def check_now(timenow=None):
if timenow is None:
timenow = timezone.now()
# what happens if there is a huge number of items and all cant be run within 30 time period?
# perhaps batch off groups of x to zappa.async
for to_run in CallLater.objects.filter(time_to_run__lte=timenow,
when_check_if_failed__gt=timenow,
problem=False):
preprocess_instance(to_run, timenow)
for timedout_again_to_run in CallLater.objects.filter(when_check_if_failed__lte=timenow,
problem=False):
if timedout_again_to_run.timeout_retries == 0:
timedout_again_to_run.problem = True
timedout_again_to_run.save()
log_error(events['repeatedly failed'], timedout_again_to_run)
continue
else:
log_error(events['failed to run before expired'], timedout_again_to_run)
timedout_again_to_run.timeout_retries -= 1
preprocess_instance(timedout_again_to_run, timenow)
def preprocess_instance(to_run, time_threshold):
to_run.when_check_if_failed = realistic_timeout(time_threshold)
to_run.save()
run(to_run.id, time_threshold.strftime("%Y-%m-%d %H:%M:%S"))
def log_error(message, instance):
# serialising model instance
# https://stackoverflow.com/questions/757022/how-do-you-serialize-a-model-instance-in-django
data = serialize('json', [instance, ])
struct = json.loads(data)
try:
# replace long String64 function with useful info
f = instance.function
signature = str(inspect.signature(f))
if hasattr(f, 'args'):
signature += ', args='+str(f.args)
if hasattr(f, 'kwargs'):
signature += ', kwargs=' + str(f.kwargs)
struct[0]['fields']['function'] = str(f.__module__+"."+f.__name__ + signature)
except Exception:
pass
data = json.dumps(struct[0])
logger.error(message + ' ' + data)
def test_run(call_later, time_threshold):
return run(call_later.id, time_threshold.astimezone().isoformat())
# using id to avoid pickle issues
# @task
def run(call_later_id, time_threshold_txt):
time_threshold = parse(time_threshold_txt)
try:
call_later = CallLater.objects.get(id=call_later_id)
except CallLater.DoesNotExist:
log_error(events['expired function was eventually called'], call_later)
return
try:
_args = call_later.args or ()
except AttributeError:
_args = ()
try:
_kwargs = call_later.kwargs or {}
except AttributeError:
_kwargs = {}
#attempt to call the function here
try:
call_later.function(*_args, **_kwargs)
except TypeError as e:
pass
# has been manually deleted
except Exception as e:
if call_later.retries == 0:
call_later.problem = True
log_error(events['error calling pickled function'] + str(e), call_later)
return
call_later.retries -= 1
call_later.save()
return
if call_later.repeat <= 1:
call_later.delete()
return events['called_and_destroyed'] # for testing purposes
# I assume i must save() model before default value is given
if type(call_later.time_to_stop) != tuple and call_later.time_to_stop is not None\
and call_later.time_to_stop <= time_threshold:
call_later.delete()
return events['called_and_expired']
if call_later.every is not None:
call_later.repeat -= 1
time_to_run = time_threshold + call_later.every
if time_to_run.tzinfo is None or time_to_run.tzinfo.utcoffset(time_to_run) is None:
time_to_run = pytz.timezone(settings.TIME_ZONE).localize(time_to_run)
call_later.time_to_run = time_to_run
call_later.when_check_if_failed = far_future_fail_timeout()
call_later.save()
return events['will_be_called_in_future_again']
return events['called'] | zappa-scheduler | /zappa_scheduler-1.0.7.tar.gz/zappa_scheduler-1.0.7/zappa_scheduler/models.py | models.py |
# zappa-secrets-manager
A package that will aid in deploying python projects via zappa while using AWS Secrets Manager. This package makes it easier to develop a project using only a few simple changes in order to swap between development and production
## Available Options
PROJECT_NAME (required) - The name of the project part of the key to look up on AWS Secrets Manager
ENV_PATH (required) - The path to the env file that you want to load when developing
EXTRA_ENVS_TO_SET (optional - defaults to []) - A list of two part tuples detailing any extra environment variables that you want to set
ENV_FILE_NAME (optional - defaults to "development.env") - The name of your local environment file
REGION_NAME (optional - defaults to "eu-west-2") - The region to get the AWS Secrets from
## Usage
Add a call to "load_secrets" somewhere in your project initialisation script, preferably before any project specific settings are initialised.
## Full Django Example
In `<project_name>/__init__.py` add the following.
.. code:: python
# This should be the full path to your local environment file (excluding the file itself)
ENV_PATH = '<the full path to your local environment file>'
# Any extra environment settings you wish to set. The second element of each tuple will get str formatted
# with your PROJECT_NAME and the STAGE from zappa in order to create a lookup on AWS Secrets Manager
EXTRA_ENVS_TO_SET = [('DATABASE_PASSWORD', '{PROJECT_NAME}/{STAGE}/db/{PROJECT_NAME}'),]
load_secrets(PROJECT_NAME='myproject',
ENV_PATH=ENV_PATH,
EXTRA_ENVS_TO_SET=EXTRA_ENVS_TO_SET,
ENV_FILE_NAME='testing.env',
REGION_NAME='us-west-1')
## How it works
Zappa Secrets Manager allows you to easily swap between a production environment on the server with only minimal changes to your local codebase. By default zappa will fail closed - so you need to actively go out of your way to accidentally get production env variables on your local system.
1) zappa-secrets-manager checks for the existence of a zappa STAGE environment variable. This will always be there if you deploy with zappa. If this exists and there is no local "development.env" file then it will use the relevant AWS credentials to obtain the AWS Secrets for anything with a key of 'myproject\<stage>'. It then loads these secrets into your environment for use in your project.
2) If STAGE is not set OR if there is a local "development.env" file then it will load the contents of that file into your environment for use in your project
3) If neither STAGE or "development.env" exists then the system fails with a RuntimeError
## WARNING
**DO NOT COMMIT YOUR LOCAL DEVELOPMENT.ENV FILE**
## How to structure your AWS Secrets Key Names
Zappa Secrets Manager will by default load any secrets that are stored in <PROJECT_NAME>\<STAGE> on the appropriate REGION_NAME into your environment.
For any values in EXTRA_ENVS_TO_SET you can structure your key names however you want. Zappa Secrets Manager will string format them to include the zappa STAGE and the PROJECT_NAME variables so you can do dynamic lookups.
## Example
Given an ENV_PATH of "path-to-env-file" PROJECT_NAME of "my_project" and a zappa STAGE of "prod" the following will happen:
1. Check to see if "path-to-env-file/development.env" exists. If it does then it loads the environment variables out of that.
2. If it doesn't exist then it loads all the secrets on the eu-west-2 region with the secret name "my_project/prod" into your environment.
| zappa-secrets-manager | /zappa_secrets_manager-0.5.3.tar.gz/zappa_secrets_manager-0.5.3/README.rst | README.rst |
import json
import os
from dotenv import load_dotenv
from .get_secrets import get_secret
def load_secrets(PROJECT_NAME,
ENV_PATH,
EXTRA_ENVS_TO_SET=None,
ENV_FILE_NAME='development.env',
REGION_NAME=None):
"""
Loads the secrets in PROJECT_NAME/STAGE into the local environment or
loads the contents of ENV_PATH/development.env.
:param PROJECT_NAME: The name of the project part of the key to lookup
on AWS Secrets Manager
:param ENV_PATH: The path to the local environment file
:param EXTRA_ENVS_TO_SET: A list of tuples detailing extra environment
variables to set
:param ENV_FILE_NAME: The name of the local environment file
:param REGION_NAME: The region name
:return:
"""
STAGE = os.environ.get('STAGE', None)
if EXTRA_ENVS_TO_SET is None:
EXTRA_ENVS_TO_SET = []
ENV_PATH = os.path.join(ENV_PATH, ENV_FILE_NAME)
env_file_exists = \
os.path.exists(ENV_PATH)
if STAGE is not None and not env_file_exists:
"""
As we only deploy via zappa, the STAGE environment variable will be there
so we can check for the existence of that to determine if we are on a
server or not. Never go into here if the local env file exists
"""
envs_to_set = [
('API_KEYS', '{PROJECT_NAME}/{STAGE}'.format(
PROJECT_NAME=PROJECT_NAME,
STAGE=STAGE)),
]
for extra_envs in EXTRA_ENVS_TO_SET:
envs_to_set.append(
(extra_envs[0].format(STAGE=STAGE,
PROJECT_NAME=PROJECT_NAME),
extra_envs[1].format(STAGE=STAGE,
PROJECT_NAME=PROJECT_NAME))
)
for env_name, secret_name in envs_to_set:
loaded_secret = get_secret(secret_name,
region_name=REGION_NAME)
if loaded_secret is not None:
json_loaded = json.loads(loaded_secret)
if 'password' in json_loaded:
secret = json_loaded['password']
os.environ[env_name] = secret
else:
for api_name, secret in json_loaded.items():
os.environ[api_name] = secret
elif env_file_exists:
"""
We don't deploy the development.env - use that for handling
development specific settings (local API keys etc)
"""
load_dotenv(ENV_PATH)
else:
raise RuntimeError('Running application and "{0}" '
'was not found. If running on a server ensure '
'that the "STAGE" environment variable is '
'set'.format(ENV_PATH)) | zappa-secrets-manager | /zappa_secrets_manager-0.5.3.tar.gz/zappa_secrets_manager-0.5.3/zappa_secrets_manager/load_secrets.py | load_secrets.py |
# zappa-sentry
Sentry handler configuration with defaults for zappa lambdas.
# Installation
`pip install zappa_sentry`
# Zappa configuration
Setup your sentry DSN as the value of environment variable `SENTRY_DSN`, either on the `zappa_setting.json` file or in any of the other methods on https://github.com/miserlou/zappa/#setting-environment-variables
Then you can setup the `zappa_sentry.unhandled_exceptions` handler.
Example:
```
{
"dev": {
...
"environment_variables": {
"SENTRY_DSN": "https://*key*:*pass*@sentry.io/*project*",
...
},
"exception_handler": "zappa_sentry.unhandled_exceptions",
...
},
...
}
```
And that's all. Deploy your zappa function and you should see any errors appearing on sentry.
If you want the exception handler to capture the exception and capture it, just replace `zappa_sentry.unhandled_exceptions` by `zappa_sentry.capture_exceptions`. This version won't let the exceptions propagate.
# Adding extra information
Just add it to the scope as normal for the new sentry-sdk: https://docs.sentry.io/enriching-error-data/context/?platform=python
| zappa-sentry | /zappa_sentry-0.4.1.tar.gz/zappa_sentry-0.4.1/README.md | README.md |
<p align="center">
<img src="http://i.imgur.com/oePnHJn.jpg" alt="Zappa Rocks!"/>
</p>
## Zappa - Serverless Python
[](https://github.com/zappa/Zappa/actions/workflows/ci.yaml)
[](https://coveralls.io/github/zappa/Zappa)
[](https://pypi.python.org/pypi/zappa)
[](https://zappateam.slack.com/)
[](https://gun.io/)
[](https://patreon.com/zappa)
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [About](#about)
- [Installation and Configuration](#installation-and-configuration)
- [Running the Initial Setup / Settings](#running-the-initial-setup--settings)
- [Basic Usage](#basic-usage)
- [Initial Deployments](#initial-deployments)
- [Updates](#updates)
- [Docker Workflows](#docker-workflows)
- [Rollback](#rollback)
- [Scheduling](#scheduling)
- [Advanced Scheduling](#advanced-scheduling)
- [Undeploy](#undeploy)
- [Package](#package)
- [How Zappa Makes Packages](#how-zappa-makes-packages)
- [Template](#template)
- [Status](#status)
- [Tailing Logs](#tailing-logs)
- [Remote Function Invocation](#remote-function-invocation)
- [Django Management Commands](#django-management-commands)
- [SSL Certification](#ssl-certification)
- [Deploying to a Domain With AWS Certificate Manager](#deploying-to-a-domain-with-aws-certificate-manager)
- [Deploying to a Domain With a Let's Encrypt Certificate (DNS Auth)](#deploying-to-a-domain-with-a-lets-encrypt-certificate-dns-auth)
- [Deploying to a Domain With a Let's Encrypt Certificate (HTTP Auth)](#deploying-to-a-domain-with-a-lets-encrypt-certificate-http-auth)
- [Deploying to a Domain With Your Own SSL Certs](#deploying-to-a-domain-with-your-own-ssl-certs)
- [Executing in Response to AWS Events](#executing-in-response-to-aws-events)
- [Asynchronous Task Execution](#asynchronous-task-execution)
- [Catching Exceptions](#catching-exceptions)
- [Task Sources](#task-sources)
- [Direct Invocation](#direct-invocation)
- [Remote Invocations](#remote-invocations)
- [Restrictions](#restrictions)
- [Running Tasks in a VPC](#running-tasks-in-a-vpc)
- [Responses](#responses)
- [Advanced Settings](#advanced-settings)
- [YAML Settings](#yaml-settings)
- [Advanced Usage](#advanced-usage)
- [Keeping The Server Warm](#keeping-the-server-warm)
- [Serving Static Files / Binary Uploads](#serving-static-files--binary-uploads)
- [Enabling CORS](#enabling-cors)
- [Large Projects](#large-projects)
- [Enabling Bash Completion](#enabling-bash-completion)
- [Enabling Secure Endpoints on API Gateway](#enabling-secure-endpoints-on-api-gateway)
- [API Key](#api-key)
- [IAM Policy](#iam-policy)
- [API Gateway Lambda Authorizers](#api-gateway-lambda-authorizers)
- [Cognito User Pool Authorizer](#cognito-user-pool-authorizer)
- [API Gateway Resource Policy](#api-gateway-resource-policy)
- [Setting Environment Variables](#setting-environment-variables)
- [Local Environment Variables](#local-environment-variables)
- [Remote AWS Environment Variables](#remote-aws-environment-variables)
- [Remote Environment Variables](#remote-environment-variables)
- [Remote Environment Variables (via an S3 file)](#remote-environment-variables-via-an-s3-file)
- [API Gateway Context Variables](#api-gateway-context-variables)
- [Catching Unhandled Exceptions](#catching-unhandled-exceptions)
- [Using Custom AWS IAM Roles and Policies](#using-custom-aws-iam-roles-and-policies)
- [Custom AWS IAM Roles and Policies for Deployment](#custom-aws-iam-roles-and-policies-for-deployment)
- [Custom AWS IAM Roles and Policies for Execution](#custom-aws-iam-roles-and-policies-for-execution)
- [AWS X-Ray](#aws-x-ray)
- [Globally Available Server-less Architectures](#globally-available-server-less-architectures)
- [Raising AWS Service Limits](#raising-aws-service-limits)
- [Dead Letter Queues](#dead-letter-queues)
- [Unique Package ID](#unique-package-id)
- [Application Load Balancer Event Source](#application-load-balancer-event-source)
- [Endpoint Configuration](#endpoint-configuration)
- [Example Private API Gateway configuration](#example-private-api-gateway-configuration)
- [Cold Starts (Experimental)](#cold-starts-experimental)
- [Zappa Guides](#zappa-guides)
- [Zappa in the Press](#zappa-in-the-press)
- [Sites Using Zappa](#sites-using-zappa)
- [Related Projects](#related-projects)
- [Hacks](#hacks)
- [Contributing](#contributing)
- [Using a Local Repo](#using-a-local-repo)
- [Patrons](#patrons)
- [Support / Development / Training / Consulting](#support--development--training--consulting)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## About
<p align="center">
<a href="https://htmlpreview.github.io/?https://raw.githubusercontent.com/Miserlou/Talks/master/serverless-sf/big.quickstart.html"><img src="http://i.imgur.com/c23kDNT.png?1" alt="Zappa Slides"/></a>
</p>
<p align="center">
<i>In a hurry? Click to see <a href="https://htmlpreview.github.io/?https://raw.githubusercontent.com/Miserlou/Talks/master/serverless-sf/big.quickstart.html">(now slightly out-dated) slides from Serverless SF</a>!</i>
</p>
**Zappa** makes it super easy to build and deploy server-less, event-driven Python applications (including, but not limited to, WSGI web apps) on AWS Lambda + API Gateway. Think of it as "serverless" web hosting for your Python apps. That means **infinite scaling**, **zero downtime**, **zero maintenance** - and at a fraction of the cost of your current deployments!
If you've got a Python web app (including Django and Flask apps), it's as easy as:
```
$ pip install zappa
$ zappa init
$ zappa deploy
```
and now you're server-less! _Wow!_
> What do you mean "serverless"?
Okay, so there still is a server - but it only has a _40 millisecond_ life cycle! Serverless in this case means **"without any permanent infrastructure."**
With a traditional HTTP server, the server is online 24/7, processing requests one by one as they come in. If the queue of incoming requests grows too large, some requests will time out. With Zappa, **each request is given its own virtual HTTP "server"** by Amazon API Gateway. AWS handles the horizontal scaling automatically, so no requests ever time out. Each request then calls your application from a memory cache in AWS Lambda and returns the response via Python's WSGI interface. After your app returns, the "server" dies.
Better still, with Zappa you only pay for the milliseconds of server time that you use, so it's many **orders of magnitude cheaper** than VPS/PaaS hosts like Linode or Heroku - and in most cases, it's completely free. Plus, there's no need to worry about load balancing or keeping servers online ever again.
It's great for deploying serverless microservices with frameworks like Flask and Bottle, and for hosting larger web apps and CMSes with Django. Or, you can use any WSGI-compatible app you like! You **probably don't need to change your existing applications** to use it, and you're not locked into using it.
Zappa also lets you build hybrid event-driven applications that can scale to **trillions of events** a year with **no additional effort** on your part! You also get **free SSL certificates**, **global app deployment**, **API access management**, **automatic security policy generation**, **precompiled C-extensions**, **auto keep-warms**, **oversized Lambda packages**, and **many other exclusive features**!
And finally, Zappa is **super easy to use**. You can deploy your application with a single command out of the box!
__Awesome!__
<p align="center">
<img src="http://i.imgur.com/f1PJxCQ.gif" alt="Zappa Demo Gif"/>
</p>
## Installation and Configuration
_Before you begin, make sure you are running Python 3.6/3.7/3.8 and you have a valid AWS account and your [AWS credentials file](https://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs) is properly installed._
**Zappa** can easily be installed through pip, like so:
$ pip install zappa
Please note that Zappa _**must**_ be installed into your project's [virtual environment](http://docs.python-guide.org/en/latest/dev/virtualenvs/). The virtual environment name should not be the same as the Zappa project name, as this may cause errors.
_(If you use [pyenv](https://github.com/yyuu/pyenv) and love to manage virtualenvs with **pyenv-virtualenv**, you just have to call `pyenv local [your_venv_name]` and it's ready. [Conda](http://conda.pydata.org/docs/) users should comment [here](https://github.com/Miserlou/Zappa/pull/108).)_
Next, you'll need to define your local and server-side settings.
### Running the Initial Setup / Settings
**Zappa** can automatically set up your deployment settings for you with the `init` command:
$ zappa init
This will automatically detect your application type (Flask/Django - Pyramid users [see here](https://github.com/Miserlou/Zappa/issues/278#issuecomment-241917956)) and help you define your deployment configuration settings. Once you finish initialization, you'll have a file named *zappa_settings.json* in your project directory defining your basic deployment settings. It will probably look something like this for most WSGI apps:
```javascript
{
// The name of your stage
"dev": {
// The name of your S3 bucket
"s3_bucket": "lambda",
// The modular python path to your WSGI application function.
// In Flask and Bottle, this is your 'app' object.
// Flask (your_module.py):
// app = Flask()
// Bottle (your_module.py):
// app = bottle.default_app()
"app_function": "your_module.app"
}
}
```
or for Django:
```javascript
{
"dev": { // The name of your stage
"s3_bucket": "lambda", // The name of your S3 bucket
"django_settings": "your_project.settings" // The python path to your Django settings.
}
}
```
_Psst: If you're deploying a Django application with Zappa for the first time, you might want to read Edgar Roman's [Django Zappa Guide](https://edgarroman.github.io/zappa-django-guide/)._
You can define as many stages as your like - we recommend having _dev_, _staging_, and _production_.
Now, you're ready to deploy!
## Basic Usage
### Initial Deployments
Once your settings are configured, you can package and deploy your application to a stage called "production" with a single command:
$ zappa deploy production
Deploying..
Your application is now live at: https://7k6anj0k99.execute-api.us-east-1.amazonaws.com/production
And now your app is **live!** How cool is that?!
To explain what's going on, when you call `deploy`, Zappa will automatically package up your application and local virtual environment into a Lambda-compatible archive, replace any dependencies with versions with wheels compatible with lambda, set up the function handler and necessary WSGI Middleware, upload the archive to S3, create and manage the necessary Amazon IAM policies and roles, register it as a new Lambda function, create a new API Gateway resource, create WSGI-compatible routes for it, link it to the new Lambda function, and finally delete the archive from your S3 bucket. Handy!
Be aware that the default IAM role and policy created for executing Lambda applies a liberal set of permissions.
These are most likely not appropriate for production deployment of important applications. See the section
[Custom AWS IAM Roles and Policies for Execution](#custom-aws-iam-roles-and-policies-for-execution) for more detail.
### Updates
If your application has already been deployed and you only need to upload new Python code, but not touch the underlying routes, you can simply:
$ zappa update production
Updating..
Your application is now live at: https://7k6anj0k99.execute-api.us-east-1.amazonaws.com/production
This creates a new archive, uploads it to S3 and updates the Lambda function to use the new code, but doesn't touch the API Gateway routes.
#### Docker Workflows
In [version 0.53.0](https://github.com/zappa/Zappa/blob/master/CHANGELOG.md), support was added to deploy & update Lambda functions using Docker. Refer to [the blog post](https://ianwhitestone.work/zappa-serverless-docker/) for more details about how to leverage this functionality, and when you may want to.
### Rollback
You can also `rollback` the deployed code to a previous version by supplying the number of revisions to return to. For instance, to rollback to the version deployed 3 versions ago:
$ zappa rollback production -n 3
### Scheduling
Zappa can be used to easily schedule functions to occur on regular intervals. This provides a much nicer, maintenance-free alternative to Celery!
These functions will be packaged and deployed along with your `app_function` and called from the handler automatically.
Just list your functions and the expression to schedule them using [cron or rate syntax](http://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html) in your *zappa_settings.json* file:
```javascript
{
"production": {
...
"events": [{
"function": "your_module.your_function", // The function to execute
"expression": "rate(1 minute)" // When to execute it (in cron or rate format)
}],
...
}
}
```
And then:
$ zappa schedule production
And now your function will execute every minute!
If you want to cancel these, you can simply use the `unschedule` command:
$ zappa unschedule production
And now your scheduled event rules are deleted.
See the [example](example/) for more details.
#### Advanced Scheduling
Sometimes a function needs multiple expressions to describe its schedule. To set multiple expressions, simply list your functions, and the list of expressions to schedule them using [cron or rate syntax](http://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html) in your *zappa_settings.json* file:
```javascript
{
"production": {
...
"events": [{
"function": "your_module.your_function", // The function to execute
"expressions": ["cron(0 20-23 ? * SUN-THU *)", "cron(0 0-8 ? * MON-FRI *)"] // When to execute it (in cron or rate format)
}],
...
}
}
```
This can be used to deal with issues arising from the UTC timezone crossing midnight during business hours in your local timezone.
It should be noted that overlapping expressions will not throw a warning, and should be checked for, to prevent duplicate triggering of functions.
### Undeploy
If you need to remove the API Gateway and Lambda function that you have previously published, you can simply:
$ zappa undeploy production
You will be asked for confirmation before it executes.
If you enabled CloudWatch Logs for your API Gateway service and you don't
want to keep those logs, you can specify the `--remove-logs` argument to purge the logs for your API Gateway and your Lambda function:
$ zappa undeploy production --remove-logs
### Package
If you want to build your application package without actually uploading and registering it as a Lambda function, you can use the `package` command:
$ zappa package production
If you have a `zip` callback in your `callbacks` setting, this will also be invoked.
```javascript
{
"production": { // The name of your stage
"callbacks": {
"zip": "my_app.zip_callback"// After creating the package
}
}
}
```
You can also specify the output filename of the package with `-o`:
$ zappa package production -o my_awesome_package.zip
#### How Zappa Makes Packages
Zappa will automatically package your active virtual environment into a package which runs smoothly on AWS Lambda.
During this process, it will replace any local dependencies with AWS Lambda compatible versions. Dependencies are included in this order:
* Lambda-compatible `manylinux` wheels from a local cache
* Lambda-compatible `manylinux` wheels from PyPI
* Packages from the active virtual environment
* Packages from the local project directory
It also skips certain unnecessary files, and ignores any .py files if .pyc files are available.
In addition, Zappa will also automatically set the correct execution permissions, configure package settings, and create a unique, auditable package manifest file.
To further reduce the final package file size, you can:
* Set `slim_handler` to `True` to upload a small handler to Lambda and the rest of the package to S3. For more details, see the [merged pull request](https://github.com/Miserlou/Zappa/pull/548) and the [discussion in the original issue](https://github.com/Miserlou/Zappa/issues/510). See also: [Large Projects](#large-projects).
* Use the `exclude` setting and provide a list of regex patterns to exclude from the archive. By default, Zappa will exclude Boto, because [it's already available in the Lambda execution environment](http://docs.aws.amazon.com/lambda/latest/dg/current-supported-versions.html).
### Template
Similarly to `package`, if you only want the API Gateway CloudFormation template, use the `template` command:
$ zappa template production --l your-lambda-arn -r your-role-arn
Note that you must supply your own Lambda ARN and Role ARNs in this case, as they may not have been created for you.
You can get the JSON output directly with `--json`, and specify the output file with `--output`.
### Status
If you need to see the status of your deployment and event schedules, simply use the `status` command.
$ zappa status production
### Tailing Logs
You can watch the logs of a deployment by calling the `tail` management command.
$ zappa tail production
By default, this will show all log items. In addition to HTTP and other events, anything `print`ed to `stdout` or `stderr` will be shown in the logs.
You can use the argument `--http` to filter for HTTP requests, which will be in the Apache Common Log Format.
$ zappa tail production --http
Similarly, you can do the inverse and only show non-HTTP events and log messages:
$ zappa tail production --non-http
If you don't like the default log colors, you can turn them off with `--no-color`.
You can also limit the length of the tail with `--since`, which accepts a simple duration string:
$ zappa tail production --since 4h # 4 hours
$ zappa tail production --since 1m # 1 minute
$ zappa tail production --since 1mm # 1 month
You can filter out the contents of the logs with `--filter`, like so:
$ zappa tail production --http --filter "POST" # Only show POST HTTP requests
Note that this uses the [CloudWatch Logs filter syntax](http://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html).
To tail logs without following (to exit immediately after displaying the end of the requested logs), pass `--disable-keep-open`:
$ zappa tail production --since 1h --disable-keep-open
### Remote Function Invocation
You can execute any function in your application directly at any time by using the `invoke` command.
For instance, suppose you have a basic application in a file called "my_app.py", and you want to invoke a function in it called "my_function". Once your application is deployed, you can invoke that function at any time by calling:
$ zappa invoke production 'my_app.my_function'
Any remote print statements made and the value the function returned will then be printed to your local console. **Nifty!**
You can also invoke interpretable Python 3.6/3.7/3.8 strings directly by using `--raw`, like so:
$ zappa invoke production "print(1 + 2 + 3)" --raw
For instance, it can come in handy if you want to create your first `superuser` on a RDS database running in a VPC (like Serverless Aurora):
$ zappa invoke staging "from django.contrib.auth import get_user_model; User = get_user_model(); User.objects.create_superuser('username', 'email', 'password')" --raw
### Django Management Commands
As a convenience, Zappa can also invoke remote Django 'manage.py' commands with the `manage` command. For instance, to perform the basic Django status check:
$ zappa manage production showmigrations admin
Obviously, this only works for Django projects which have their settings properly defined.
For commands which have their own arguments, you can also pass the command in as a string, like so:
$ zappa manage production "shell --version"
Commands which require direct user input, such as `createsuperuser`, should be [replaced by commands](http://stackoverflow.com/a/26091252) which use `zappa invoke <env> --raw`.
For more Django integration, take a look at the [zappa-django-utils](https://github.com/Miserlou/zappa-django-utils) project.
### SSL Certification
Zappa can be deployed to custom domain names and subdomains with custom SSL certificates, Let's Encrypt certificates, and [AWS Certificate Manager](https://aws.amazon.com/certificate-manager/) (ACM) certificates.
Currently, the easiest of these to use are the AWS Certificate Manager certificates, as they are free, self-renewing, and require the least amount of work.
Once configured as described below, all of these methods use the same command:
$ zappa certify
When deploying from a CI/CD system, you can use:
$ zappa certify --yes
to skip the confirmation prompt.
#### Deploying to a Domain With AWS Certificate Manager
Amazon provides their own free alternative to Let's Encrypt called [AWS Certificate Manager](https://aws.amazon.com/certificate-manager/) (ACM). To use this service with Zappa:
1. Verify your domain in the AWS Certificate Manager console.
2. In the console, select the N. Virginia (us-east-1) region and request a certificate for your domain or subdomain (`sub.yourdomain.tld`), or request a wildcard domain (`*.yourdomain.tld`).
3. Copy the entire ARN of that certificate and place it in the Zappa setting `certificate_arn`.
4. Set your desired domain in the `domain` setting.
5. Call `$ zappa certify` to create and associate the API Gateway distribution using that certificate.
#### Deploying to a Domain With a Let's Encrypt Certificate (DNS Auth)
If you want to use Zappa on a domain with a free Let's Encrypt certificate using automatic Route 53 based DNS Authentication, you can follow [this handy guide](https://github.com/zappa/Zappa/blob/master/docs/domain_with_free_ssl_dns.md).
#### Deploying to a Domain With a Let's Encrypt Certificate (HTTP Auth)
If you want to use Zappa on a domain with a free Let's Encrypt certificate using HTTP Authentication, you can follow [this guide](https://github.com/zappa/Zappa/blob/master/docs/domain_with_free_ssl_http.md).
However, it's now far easier to use Route 53-based DNS authentication, which will allow you to use a Let's Encrypt certificate with a single `$ zappa certify` command.
#### Deploying to a Domain With Your Own SSL Certs
1. The first step is to create a custom domain and obtain your SSL cert / key / bundle.
2. Ensure you have set the `domain` setting within your Zappa settings JSON - this will avoid problems with the Base Path mapping between the Custom Domain and the API invoke URL, which gets the Stage Name appended in the URI
3. Add the paths to your SSL cert / key / bundle to the `certificate`, `certificate_key`, and `certificate_chain` settings, respectively, in your Zappa settings JSON
4. Set `route53_enabled` to `false` if you plan on using your own DNS provider, and not an AWS Route53 Hosted zone.
5. Deploy or update your app using Zappa
6. Run `$ zappa certify` to upload your certificates and register the custom domain name with your API gateway.
## Executing in Response to AWS Events
Similarly, you can have your functions execute in response to events that happen in the AWS ecosystem, such as S3 uploads, DynamoDB entries, Kinesis streams, SNS messages, and SQS queues.
In your *zappa_settings.json* file, define your [event sources](http://docs.aws.amazon.com/lambda/latest/dg/invoking-lambda-function.html) and the function you wish to execute. For instance, this will execute `your_module.process_upload_function` in response to new objects in your `my-bucket` S3 bucket. Note that `process_upload_function` must accept `event` and `context` parameters.
```javascript
{
"production": {
...
"events": [{
"function": "your_module.process_upload_function",
"event_source": {
"arn": "arn:aws:s3:::my-bucket",
"events": [
"s3:ObjectCreated:*" // Supported event types: http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#supported-notification-event-types
]
}
}],
...
}
}
```
And then:
$ zappa schedule production
And now your function will execute every time a new upload appears in your bucket!
To access the key's information in your application context, you'll want `process_upload_function` to look something like this:
```python
import boto3
s3_client = boto3.client('s3')
def process_upload_function(event, context):
"""
Process a file upload.
"""
# Get the uploaded file's information
bucket = event['Records'][0]['s3']['bucket']['name'] # Will be `my-bucket`
key = event['Records'][0]['s3']['object']['key'] # Will be the file path of whatever file was uploaded.
# Get the bytes from S3
s3_client.download_file(bucket, key, '/tmp/' + key) # Download this file to writable tmp space.
file_bytes = open('/tmp/' + key).read()
```
Similarly, for a [Simple Notification Service](https://aws.amazon.com/sns/) event:
```javascript
"events": [
{
"function": "your_module.your_function",
"event_source": {
"arn": "arn:aws:sns:::your-event-topic-arn",
"events": [
"sns:Publish"
]
}
}
]
```
Optionally you can add [SNS message filters](http://docs.aws.amazon.com/sns/latest/dg/message-filtering.html):
```javascript
"events": [
{
"function": "your_module.your_function",
"event_source": {
"arn": "arn:aws:sns:::your-event-topic-arn",
"filters": {
"interests": ["python", "aws", "zappa"],
"version": ["1.0"]
},
...
}
}
]
```
[DynamoDB](http://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html) and [Kinesis](http://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html) are slightly different as it is not event based but pulling from a stream:
```javascript
"events": [
{
"function": "replication.replicate_records",
"event_source": {
"arn": "arn:aws:dynamodb:us-east-1:1234554:table/YourTable/stream/2016-05-11T00:00:00.000",
"starting_position": "TRIM_HORIZON", // Supported values: TRIM_HORIZON, LATEST
"batch_size": 50, // Max: 1000
"enabled": true // Default is false
}
}
]
```
[SQS](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html) is also pulling messages from a stream. At this time, [only "Standard" queues can trigger lambda events, not "FIFO" queues](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html). Read the AWS Documentation carefully since Lambda calls the SQS DeleteMessage API on your behalf once your function completes successfully.
```javascript
"events": [
{
"function": "your_module.process_messages",
"event_source": {
"arn": "arn:aws:sqs:us-east-1:12341234:your-queue-name-arn",
"batch_size": 10, // Max: 10. Use 1 to trigger immediate processing
"enabled": true // Default is false
}
}
]
```
For configuring Lex Bot's intent triggered events:
```javascript
"bot_events": [
{
"function": "lexbot.handlers.book_appointment.handler",
"event_source": {
"arn": "arn:aws:lex:us-east-1:01234123123:intent:TestLexEventNames:$LATEST", // optional. In future it will be used to configure the intent
"intent":"intentName", // name of the bot event configured
"invocation_source":"DialogCodeHook", // either FulfillmentCodeHook or DialogCodeHook
}
}
]
```
Events can also take keyword arguments:
```javascript
"events": [
{
"function": "your_module.your_recurring_function", // The function to execute
"kwargs": {"key": "val", "key2": "val2"}, // Keyword arguments to pass. These are available in the event
"expression": "rate(1 minute)" // When to execute it (in cron or rate format)
}
]
```
To get the keyword arguments you will need to look inside the event dictionary:
```python
def your_recurring_function(event, context):
my_kwargs = event.get("kwargs") # dict of kwargs given in zappa_settings file
```
You can find more [example event sources here](http://docs.aws.amazon.com/lambda/latest/dg/eventsources.html).
## Asynchronous Task Execution
Zappa also now offers the ability to seamlessly execute functions asynchronously in a completely separate AWS Lambda instance!
For example, if you have a Flask API for ordering a pie, you can call your `bake` function seamlessly in a completely separate Lambda instance by using the `zappa.asynchronous.task` decorator like so:
```python
from flask import Flask
from zappa.asynchronous import task
app = Flask(__name__)
@task
def make_pie():
""" This takes a long time! """
ingredients = get_ingredients()
pie = bake(ingredients)
deliver(pie)
@app.route('/api/order/pie')
def order_pie():
""" This returns immediately! """
make_pie()
return "Your pie is being made!"
```
And that's it! Your API response will return immediately, while the `make_pie` function executes in a completely different Lambda instance.
When calls to @task decorated functions or the zappa.asynchronous.run command occur outside of Lambda, such as your local dev environment,
the functions will execute immediately and locally. The zappa asynchronous functionality only works
when in the Lambda environment or when specifying [Remote Invocations](https://github.com/zappa/zappa#remote-invocations).
### Catching Exceptions
Putting a try..except block on an asynchronous task like this:
```python
@task
def make_pie():
try:
ingredients = get_ingredients()
pie = bake(ingredients)
deliver(pie)
except Fault as error:
"""send an email"""
...
return Response('Web services down', status=503)
```
will cause an email to be sent twice for the same error. See [asynchronous retries at AWS](https://docs.aws.amazon.com/lambda/latest/dg/retries-on-errors.html). To work around this side-effect, and have the fault handler execute only once, change the return value to:
```python
@task
def make_pie():
try:
"""code block"""
except Fault as error:
"""send an email"""
...
return {} #or return True
```
### Task Sources
By default, this feature uses direct AWS Lambda invocation. You can instead use AWS Simple Notification Service as the task event source by using the `task_sns` decorator, like so:
```python
from zappa.asynchronous import task_sns
@task_sns
```
Using SNS also requires setting the following settings in your `zappa_settings`:
```javascript
{
"dev": {
..
"async_source": "sns", // Source of async tasks. Defaults to "lambda"
"async_resources": true, // Create the SNS topic to use. Defaults to true.
..
}
}
```
This will automatically create and subscribe to the SNS topic the code will use when you call the `zappa schedule` command.
Using SNS will also return a message ID in case you need to track your invocations.
### Direct Invocation
You can also use this functionality without a decorator by passing your function to `zappa.asynchronous.run`, like so:
```python
from zappa.asynchronous import run
run(your_function, args, kwargs) # Using Lambda
run(your_function, args, kwargs, service='sns') # Using SNS
```
### Remote Invocations
By default, Zappa will use lambda's current function name and current AWS region. If you wish to invoke a lambda with
a different function name/region or invoke your lambda from outside of lambda, you must specify the
`remote_aws_lambda_function_name` and `remote_aws_region` arguments so that the application knows which function and
region to use. For example, if some part of our pizza making application had to live on an EC2 instance, but we
wished to call the make_pie() function on its own Lambda instance, we would do it as follows:
```python
@task(remote_aws_lambda_function_name='pizza-pie-prod', remote_aws_region='us-east-1')
def make_pie():
""" This takes a long time! """
ingredients = get_ingredients()
pie = bake(ingredients)
deliver(pie)
```
If those task() parameters were not used, then EC2 would execute the function locally. These same
`remote_aws_lambda_function_name` and `remote_aws_region` arguments can be used on the zappa.asynchronous.run() function as well.
### Restrictions
The following restrictions to this feature apply:
* Functions must have a clean import path -- i.e. no closures, lambdas, or methods.
* `args` and `kwargs` must be JSON-serializable.
* The JSON-serialized arguments must be within the size limits for Lambda (256K) or SNS (256K) events.
All of this code is still backwards-compatible with non-Lambda environments - it simply executes in a blocking fashion and returns the result.
### Running Tasks in a VPC
If you're running Zappa in a Virtual Private Cloud (VPC), you'll need to configure your subnets to allow your lambda to communicate with services inside your VPC as well as the public Internet. A minimal setup requires two subnets.
In __subnet-a__:
* Create a NAT
* Create an Internet gateway
* In the route table, create a route pointing the Internet gateway to 0.0.0.0/0.
In __subnet-b__:
* Place your lambda function
* In the route table, create a route pointing the NAT that belongs to __subnet-a__ to 0.0.0.0/0.
You can place your lambda in multiple subnets that are configured the same way as __subnet-b__ for high availability.
Some helpful resources are [this tutorial](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Tutorials.WebServerDB.CreateVPC.html), [this other tutorial](https://gist.github.com/reggi/dc5f2620b7b4f515e68e46255ac042a7) and [this AWS doc page](http://docs.aws.amazon.com/lambda/latest/dg/vpc.html#vpc-internet).
### Responses
It is possible to capture the responses of Asynchronous tasks.
Zappa uses DynamoDB as the backend for these.
To capture responses, you must configure a `async_response_table` in `zappa_settings`. This is the DynamoDB table name. Then, when decorating with `@task`, pass `capture_response=True`.
Async responses are assigned a `response_id`. This is returned as a property of the `LambdaAsyncResponse` (or `SnsAsyncResponse`) object that is returned by the `@task` decorator.
Example:
```python
from zappa.asynchronous import task, get_async_response
from flask import Flask, make_response, abort, url_for, redirect, request, jsonify
from time import sleep
app = Flask(__name__)
@app.route('/payload')
def payload():
delay = request.args.get('delay', 60)
x = longrunner(delay)
return redirect(url_for('response', response_id=x.response_id))
@app.route('/async-response/<response_id>')
def response(response_id):
response = get_async_response(response_id)
if response is None:
abort(404)
if response['status'] == 'complete':
return jsonify(response['response'])
sleep(5)
return "Not yet ready. Redirecting.", 302, {
'Content-Type': 'text/plain; charset=utf-8',
'Location': url_for('response', response_id=response_id, backoff=5),
'X-redirect-reason': "Not yet ready.",
}
@task(capture_response=True)
def longrunner(delay):
sleep(float(delay))
return {'MESSAGE': "It took {} seconds to generate this.".format(delay)}
```
## Advanced Settings
There are other settings that you can define in your local settings
to change Zappa's behavior. Use these at your own risk!
```javascript
{
"dev": {
"alb_enabled": false, // enable provisioning of application load balancing resources. If set to true, you _must_ fill out the alb_vpc_config option as well.
"alb_vpc_config": {
"CertificateArn": "your_acm_certificate_arn", // ACM certificate ARN for ALB
"SubnetIds": [], // list of subnets for ALB
"SecurityGroupIds": [] // list of security groups for ALB
},
"api_key_required": false, // enable securing API Gateway endpoints with x-api-key header (default False)
"api_key": "your_api_key_id", // optional, use an existing API key. The option "api_key_required" must be true to apply
"apigateway_enabled": true, // Set to false if you don't want to create an API Gateway resource. Default true.
"apigateway_description": "My funky application!", // Define a custom description for the API Gateway console. Default None.
"assume_policy": "my_assume_policy.json", // optional, IAM assume policy JSON file
"attach_policy": "my_attach_policy.json", // optional, IAM attach policy JSON file
"apigateway_policy": "my_apigateway_policy.json", // optional, API Gateway resource policy JSON file
"async_source": "sns", // Source of async tasks. Defaults to "lambda"
"async_resources": true, // Create the SNS topic and DynamoDB table to use. Defaults to true.
"async_response_table": "your_dynamodb_table_name", // the DynamoDB table name to use for captured async responses; defaults to None (can't capture)
"async_response_table_read_capacity": 1, // DynamoDB table read capacity; defaults to 1
"async_response_table_write_capacity": 1, // DynamoDB table write capacity; defaults to 1
"aws_endpoint_urls": { "aws_service_name": "endpoint_url" }, // a dictionary of endpoint_urls that emulate the appropriate service. Usually used for testing, for instance with `localstack`.
"aws_environment_variables" : {"your_key": "your_value"}, // A dictionary of environment variables that will be available to your deployed app via AWS Lambdas native environment variables. See also "environment_variables" and "remote_env" . Default {}.
"aws_kms_key_arn": "your_aws_kms_key_arn", // Your AWS KMS Key ARN
"aws_region": "aws-region-name", // optional, uses region set in profile or environment variables if not set here,
"binary_support": true, // Enable automatic MIME-type based response encoding through API Gateway. Default true.
"callbacks": { // Call custom functions during the local Zappa deployment/update process
"settings": "my_app.settings_callback", // After loading the settings
"zip": "my_app.zip_callback", // After creating the package
"post": "my_app.post_callback", // After command has executed
},
"cache_cluster_enabled": false, // Use APIGW cache cluster (default False)
"cache_cluster_size": 0.5, // APIGW Cache Cluster size (default 0.5)
"cache_cluster_ttl": 300, // APIGW Cache Cluster time-to-live (default 300)
"cache_cluster_encrypted": false, // Whether or not APIGW Cache Cluster encrypts data (default False)
"certificate": "my_cert.crt", // SSL certificate file location. Used to manually certify a custom domain
"certificate_key": "my_key.key", // SSL key file location. Used to manually certify a custom domain
"certificate_chain": "my_cert_chain.pem", // SSL certificate chain file location. Used to manually certify a custom domain
"certificate_arn": "arn:aws:acm:us-east-1:1234512345:certificate/aaaa-bbb-cccc-dddd", // ACM certificate ARN (needs to be in us-east-1 region).
"cloudwatch_log_level": "OFF", // Enables/configures a level of logging for the given staging. Available options: "OFF", "INFO", "ERROR", default "OFF".
"cloudwatch_data_trace": false, // Logs all data about received events. Default false.
"cloudwatch_metrics_enabled": false, // Additional metrics for the API Gateway. Default false.
"cognito": { // for Cognito event triggers
"user_pool": "user-pool-id", // User pool ID from AWS Cognito
"triggers": [{
"source": "PreSignUp_SignUp", // triggerSource from http://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html#cognito-user-pools-lambda-trigger-syntax-pre-signup
"function": "my_app.pre_signup_function"
}]
},
"context_header_mappings": { "HTTP_header_name": "API_Gateway_context_variable" }, // A dictionary mapping HTTP header names to API Gateway context variables
"cors": false, // Enable Cross-Origin Resource Sharing. Default false. If true, simulates the "Enable CORS" button on the API Gateway console. Can also be a dictionary specifying lists of "allowed_headers", "allowed_methods", and string of "allowed_origin"
"dead_letter_arn": "arn:aws:<sns/sqs>:::my-topic/queue", // Optional Dead Letter configuration for when Lambda async invoke fails thrice
"debug": true, // Print Zappa configuration errors tracebacks in the 500. Default true.
"delete_local_zip": true, // Delete the local zip archive after code updates. Default true.
"delete_s3_zip": true, // Delete the s3 zip archive. Default true.
"django_settings": "your_project.production_settings", // The modular path to your Django project's settings. For Django projects only.
"domain": "yourapp.yourdomain.com", // Required if you're using a domain
"base_path": "your-base-path", // Optional base path for API gateway custom domain base path mapping. Default None. Not supported for use with Application Load Balancer event sources.
"environment_variables": {"your_key": "your_value"}, // A dictionary of environment variables that will be available to your deployed app. See also "remote_env" and "aws_environment_variables". Default {}.
"events": [
{ // Recurring events
"function": "your_module.your_recurring_function", // The function to execute
"expression": "rate(1 minute)" // When to execute it (in cron or rate format)
},
{ // AWS Reactive events
"function": "your_module.your_reactive_function", // The function to execute
"event_source": {
"arn": "arn:aws:s3:::my-bucket", // The ARN of this event source
"events": [
"s3:ObjectCreated:*" // The specific event to execute in response to.
]
}
}
],
"endpoint_configuration": ["EDGE", "REGIONAL", "PRIVATE"], // Specify APIGateway endpoint None (default) or list `EDGE`, `REGION`, `PRIVATE`
"exception_handler": "your_module.report_exception", // function that will be invoked in case Zappa sees an unhandled exception raised from your code
"exclude": ["*.gz", "*.rar"], // A list of regex patterns to exclude from the archive. To exclude boto3 and botocore (available in an older version on Lambda), add "boto3*" and "botocore*".
"extends": "stage_name", // Duplicate and extend another stage's settings. For example, `dev-asia` could extend from `dev-common` with a different `s3_bucket` value.
"extra_permissions": [{ // Attach any extra permissions to this policy. Default None
"Effect": "Allow",
"Action": ["rekognition:*"], // AWS Service ARN
"Resource": "*"
}],
"iam_authorization": false, // optional, use IAM to require request signing. Default false. Note that enabling this will override the authorizer configuration.
"include": ["your_special_library_to_load_at_handler_init"], // load special libraries into PYTHONPATH at handler init that certain modules cannot find on path
"authorizer": {
"function": "your_module.your_auth_function", // Local function to run for token validation. For more information about the function see below.
"arn": "arn:aws:lambda:<region>:<account_id>:function:<function_name>", // Existing Lambda function to run for token validation.
"result_ttl": 300, // Optional. Default 300. The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches authorizer results. Currently, the maximum TTL value is 3600 seconds.
"token_header": "Authorization", // Optional. Default 'Authorization'. The name of a custom authorization header containing the token that clients submit as part of their requests.
"validation_expression": "^Bearer \\w+$", // Optional. A validation expression for the incoming token, specify a regular expression.
},
"keep_warm": true, // Create CloudWatch events to keep the server warm. Default true. To remove, set to false and then `unschedule`.
"keep_warm_expression": "rate(4 minutes)", // How often to execute the keep-warm, in cron and rate format. Default 4 minutes.
"lambda_description": "Your Description", // However you want to describe your project for the AWS console. Default "Zappa Deployment".
"lambda_handler": "your_custom_handler", // The name of Lambda handler. Default: handler.lambda_handler
"layers": ["arn:aws:lambda:<region>:<account_id>:layer:<layer_name>:<layer_version>"], // optional lambda layers
"lambda_concurrency": 10, // Sets the maximum number of simultaneous executions for a function, and reserves capacity for that concurrency level. Default is None.
"lets_encrypt_key": "s3://your-bucket/account.key", // Let's Encrypt account key path. Can either be an S3 path or a local file path.
"log_level": "DEBUG", // Set the Zappa log level. Can be one of CRITICAL, ERROR, WARNING, INFO and DEBUG. Default: DEBUG
"manage_roles": true, // Have Zappa automatically create and define IAM execution roles and policies. Default true. If false, you must define your own IAM Role and role_name setting.
"memory_size": 512, // Lambda function memory in MB. Default 512.
"num_retained_versions":null, // Indicates the number of old versions to retain for the lambda. If absent, keeps all the versions of the function.
"payload_compression": true, // Whether or not to enable API gateway payload compression (default: true)
"payload_minimum_compression_size": 0, // The threshold size (in bytes) below which payload compression will not be applied (default: 0)
"prebuild_script": "your_module.your_function", // Function to execute before uploading code
"profile_name": "your-profile-name", // AWS profile credentials to use. Default 'default'. Removing this setting will use the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables instead.
"project_name": "MyProject", // The name of the project as it appears on AWS. Defaults to a slugified `pwd`.
"remote_env": "s3://my-project-config-files/filename.json", // optional file in s3 bucket containing a flat json object which will be used to set custom environment variables.
"role_name": "MyLambdaRole", // Name of Zappa execution role. Default <project_name>-<env>-ZappaExecutionRole. To use a different, pre-existing policy, you must also set manage_roles to false.
"role_arn": "arn:aws:iam::12345:role/app-ZappaLambdaExecutionRole", // ARN of Zappa execution role. Default to None. To use a different, pre-existing policy, you must also set manage_roles to false. This overrides role_name. Use with temporary credentials via GetFederationToken.
"route53_enabled": true, // Have Zappa update your Route53 Hosted Zones when certifying with a custom domain. Default true.
"runtime": "python3.6", // Python runtime to use on Lambda. Can be one of "python3.6", "python3.7" or "python3.8". Defaults to whatever the current Python being used is.
"s3_bucket": "dev-bucket", // Zappa zip bucket,
"slim_handler": false, // Useful if project >50M. Set true to just upload a small handler to Lambda and load actual project from S3 at runtime. Default false.
"settings_file": "~/Projects/MyApp/settings/dev_settings.py", // Server side settings file location,
"tags": { // Attach additional tags to AWS Resources
"Key": "Value", // Example Key and value
"Key2": "Value2",
},
"timeout_seconds": 30, // Maximum lifespan for the Lambda function (default 30, max 900.)
"touch": true, // GET the production URL upon initial deployment (default True)
"touch_path": "/", // The endpoint path to GET when checking the initial deployment (default "/")
"use_precompiled_packages": true, // If possible, use C-extension packages which have been pre-compiled for AWS Lambda. Default true.
"vpc_config": { // Optional Virtual Private Cloud (VPC) configuration for Lambda function
"SubnetIds": [ "subnet-12345678" ], // Note: not all availability zones support Lambda!
"SecurityGroupIds": [ "sg-12345678" ]
},
"xray_tracing": false // Optional, enable AWS X-Ray tracing on your lambda function.
}
}
```
#### YAML Settings
If you prefer YAML over JSON, you can also use a `zappa_settings.yml`, like so:
```yaml
---
dev:
app_function: your_module.your_app
s3_bucket: your-code-bucket
events:
- function: your_module.your_function
event_source:
arn: arn:aws:s3:::your-event-bucket
events:
- s3:ObjectCreated:*
```
You can also supply a custom settings file at any time with the `-s` argument, ex:
```
$ zappa deploy dev -s my-custom-settings.yml
```
Similarly, you can supply a `zappa_settings.toml` file:
```toml
[dev]
app_function = "your_module.your_app"
s3_bucket = "your-code-bucket"
```
## Advanced Usage
### Keeping The Server Warm
Zappa will automatically set up a regularly occurring execution of your application in order to keep the Lambda function warm. This can be disabled via the `keep_warm` setting.
#### Serving Static Files / Binary Uploads
Zappa is now able to serve and receive binary files, as detected by their MIME-type.
However, generally Zappa is designed for running your application code, not for serving static web assets. If you plan on serving custom static assets in your web application (CSS/JavaScript/images/etc.,), you'll likely want to use a combination of AWS S3 and AWS CloudFront.
Your web application framework will likely be able to handle this for you automatically. For Flask, there is [Flask-S3](https://github.com/e-dard/flask-s3), and for Django, there is [Django-Storages](https://django-storages.readthedocs.io/en/latest/).
Similarly, you may want to design your application so that static binary uploads go [directly to S3](http://docs.aws.amazon.com/AWSJavaScriptSDK/guide/browser-examples.html#Uploading_a_local_file_using_the_File_API), which then triggers an event response defined in your `events` setting! That's thinking serverlessly!
### Enabling CORS
The simplest way to enable CORS (Cross-Origin Resource Sharing) for your Zappa application is to set `cors` to `true` in your Zappa settings file and update, which is the equivalent of pushing the "Enable CORS" button in the AWS API Gateway console. This is disabled by default, but you may wish to enable it for APIs which are accessed from other domains, etc.
You can also simply handle CORS directly in your application. Your web framework will probably have an extension to do this, such as [django-cors-headers](https://github.com/ottoyiu/django-cors-headers) or [Flask-CORS](https://github.com/corydolphin/flask-cors). Using these will make your code more portable.
### Large Projects
AWS currently limits Lambda zip sizes to 50 megabytes. If your project is larger than that, set `slim_handler: true` in your `zappa_settings.json`. In this case, your fat application package will be replaced with a small handler-only package. The handler file then pulls the rest of the large project down from S3 at run time! The initial load of the large project may add to startup overhead, but the difference should be minimal on a warm lambda function. Note that this will also eat into the storage space of your application function. Note that AWS currently [limits](https://docs.aws.amazon.com/lambda/latest/dg/limits.html) the `/tmp` directory storage to 512 MB, so your project must still be smaller than that.
### Enabling Bash Completion
Bash completion can be enabled by adding the following to your .bashrc:
```bash
eval "$(register-python-argcomplete zappa)"
```
`register-python-argcomplete` is provided by the argcomplete Python package. If this package was installed in a virtualenv
then the command must be run there. Alternatively you can execute:
activate-global-python-argcomplete --dest=- > file
The file's contents should then be sourced in e.g. ~/.bashrc.
### Enabling Secure Endpoints on API Gateway
#### API Key
You can use the `api_key_required` setting to generate an API key to all the routes of your API Gateway. The process is as follows:
1. Deploy/redeploy (update won't work) and write down the *id* for the key that has been created
2. Go to AWS console > Amazon API Gateway and
* select "API Keys" and find the key *value* (for example `key_value`)
* select "Usage Plans", create a new usage plan and link the API Key and the API that Zappa has created for you
3. Send a request where you pass the key value as a header called `x-api-key` to access the restricted endpoints (for example with curl: `curl --header "x-api-key: key_value"`). Note that without the x-api-key header, you will receive a 403.
#### IAM Policy
You can enable IAM-based (v4 signing) authorization on an API by setting the `iam_authorization` setting to `true`. Your API will then require signed requests and access can be controlled via [IAM policy](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-iam-policy-examples.html). Unsigned requests will receive a 403 response, as will requesters who are not authorized to access the API. Enabling this will override the Authorizer configuration (see below).
#### API Gateway Lambda Authorizers
If you deploy an API endpoint with Zappa, you can take advantage of [API Gateway Lambda Authorizers](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-use-lambda-authorizer.html) to implement a token-based authentication - all you need to do is to provide a function to create the required output, Zappa takes care of the rest. A good start for the function is the [AWS Labs blueprint example](https://github.com/awslabs/aws-apigateway-lambda-authorizer-blueprints/blob/master/blueprints/python/api-gateway-authorizer-python.py).
If you are wondering for what you would use an Authorizer, here are some potential use cases:
1. Call out to OAuth provider
2. Decode a JWT token inline
3. Lookup in a self-managed DB (for example DynamoDB)
Zappa can be configured to call a function inside your code to do the authorization, or to call some other existing lambda function (which lets you share the authorizer between multiple lambdas). You control the behavior by specifying either the `arn` or `function_name` values in the `authorizer` settings block.
For example, to get the Cognito identity, add this to a `zappa_settings.yaml`:
```yaml
context_header_mappings:
user_id: authorizer.user_id
```
Which can now be accessed in Flask like this:
```python
from flask import request
@route('/hello')
def hello_world:
print(request.headers.get('user_id'))
```
#### Cognito User Pool Authorizer
You can also use AWS Cognito User Pool Authorizer by adding:
```javascript
{
"authorizer": {
"type": "COGNITO_USER_POOLS",
"provider_arns": [
"arn:aws:cognito-idp:{region}:{account_id}:userpool/{user_pool_id}"
]
}
}
```
#### API Gateway Resource Policy
You can also use API Gateway Resource Policies. Example of IP Whitelisting:
```javascript
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": "*",
"Action": "execute-api:Invoke",
"Resource": "execute-api:/*",
"Condition": {
"IpAddress": {
"aws:SourceIp": [
"1.2.3.4/32"
]
}
}
}
]
}
```
### Setting Environment Variables
#### Local Environment Variables
If you want to set local environment variables for a deployment stage, you can simply set them in your `zappa_settings.json`:
```javascript
{
"dev": {
...
"environment_variables": {
"your_key": "your_value"
}
},
...
}
```
You can then access these inside your application with:
```python
import os
your_value = os.environ.get('your_key')
```
If your project needs to be aware of the type of environment you're deployed to, you'll also be able to get `SERVERTYPE` (AWS Lambda), `FRAMEWORK` (Zappa), `PROJECT` (your project name) and `STAGE` (_dev_, _production_, etc.) variables at any time.
#### Remote AWS Environment Variables
If you want to use native AWS Lambda environment variables you can use the `aws_environment_variables` configuration setting. These are useful as you can easily change them via the AWS Lambda console or cli at runtime. They are also useful for storing sensitive credentials and to take advantage of KMS encryption of environment variables.
During development, you can add your Zappa defined variables to your locally running app by, for example, using the below (for Django, to manage.py).
```python
if 'SERVERTYPE' in os.environ and os.environ['SERVERTYPE'] == 'AWS Lambda':
import json
import os
json_data = open('zappa_settings.json')
env_vars = json.load(json_data)['dev']['environment_variables']
for key, val in env_vars.items():
os.environ[key] = val
```
#### Remote Environment Variables
Any environment variables that you have set outside of Zappa (via AWS Lambda console or cli) will remain as they are when running `update`, unless they are also in `aws_environment_variables`, in which case the remote value will be overwritten by the one in the settings file. If you are using KMS-encrypted AWS environment variables, you can set your KMS Key ARN in the `aws_kms_key_arn` setting. Make sure that the values you set are encrypted in such case.
_Note: if you rely on these as well as `environment_variables`, and you have the same key names, then those in `environment_variables` will take precedence as they are injected in the lambda handler._
#### Remote Environment Variables (via an S3 file)
_S3 remote environment variables were added to Zappa before AWS introduced native environment variables for Lambda (via the console and cli). Before going down this route check if above make more sense for your usecase._
If you want to use remote environment variables to configure your application (which is especially useful for things like sensitive credentials), you can create a file and place it in an S3 bucket to which your Zappa application has access. To do this, add the `remote_env` key to zappa_settings pointing to a file containing a flat JSON object, so that each key-value pair on the object will be set as an environment variable and value whenever a new lambda instance spins up.
For example, to ensure your application has access to the database credentials without storing them in your version control, you can add a file to S3 with the connection string and load it into the lambda environment using the `remote_env` configuration setting.
super-secret-config.json (uploaded to my-config-bucket):
```javascript
{
"DB_CONNECTION_STRING": "super-secret:database"
}
```
zappa_settings.json:
```javascript
{
"dev": {
...
"remote_env": "s3://my-config-bucket/super-secret-config.json",
},
...
}
```
Now in your application you can use:
```python
import os
db_string = os.environ.get('DB_CONNECTION_STRING')
```
### API Gateway Context Variables
If you want to map an API Gateway context variable (http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html) to an HTTP header you can set up the mapping in `zappa_settings.json`:
```javascript
{
"dev": {
...
"context_header_mappings": {
"HTTP_header_name": "API_Gateway_context_variable"
}
},
...
}
```
For example, if you want to expose the $context.identity.cognitoIdentityId variable as the HTTP header CognitoIdentityId, and $context.stage as APIStage, you would have:
```javascript
{
"dev": {
...
"context_header_mappings": {
"CognitoIdentityId": "identity.cognitoIdentityId",
"APIStage": "stage"
}
},
...
}
```
### Catching Unhandled Exceptions
By default, if an _unhandled_ exception happens in your code, Zappa will just print the stacktrace into a CloudWatch log. If you wish to use an external reporting tool to take note of those exceptions, you can use the `exception_handler` configuration option.
zappa_settings.json:
```javascript
{
"dev": {
...
"exception_handler": "your_module.unhandled_exceptions",
},
...
}
```
The function has to accept three arguments: exception, event, and context:
your_module.py
```python
def unhandled_exceptions(e, event, context):
send_to_raygun(e, event) # gather data you need and send
return True # Prevent invocation retry
```
You may still need a similar exception handler inside your application, this is just a way to catch exception which happen at the Zappa/WSGI layer (typically event-based invocations, misconfigured settings, bad Lambda packages, and permissions issues).
By default, AWS Lambda will attempt to retry an event based (non-API Gateway, e.g. CloudWatch) invocation if an exception has been thrown. However, you can prevent this by returning True, as in example above, so Zappa that will not re-raise the uncaught exception, thus preventing AWS Lambda from retrying the current invocation.
### Using Custom AWS IAM Roles and Policies
#### Custom AWS IAM Roles and Policies for Deployment
You can specify which _local_ profile to use for deploying your Zappa application by defining
the `profile_name` setting, which will correspond to a profile in your AWS credentials file.
#### Custom AWS IAM Roles and Policies for Execution
The default IAM policy created by Zappa for executing the Lambda is very permissive.
It grants access to all actions for
all resources for types CloudWatch, S3, Kinesis, SNS, SQS, DynamoDB, and Route53; lambda:InvokeFunction
for all Lambda resources; Put to all X-Ray resources; and all Network Interface operations to all EC2
resources. While this allows most Lambdas to work correctly with no extra permissions, it is
generally not an acceptable set of permissions for most continuous integration pipelines or
production deployments. Instead, you will probably want to manually manage your IAM policies.
To manually define the policy of your Lambda execution role, you must set *manage_roles* to false and define
either the *role_name* or *role_arn* in your Zappa settings file.
```javascript
{
"dev": {
...
"manage_roles": false, // Disable Zappa client managing roles.
"role_name": "MyLambdaRole", // Name of your Zappa execution role. Optional, default: <project_name>-<env>-ZappaExecutionRole.
"role_arn": "arn:aws:iam::12345:role/app-ZappaLambdaExecutionRole", // ARN of your Zappa execution role. Optional.
...
},
...
}
```
Ongoing discussion about the minimum policy requirements necessary for a Zappa deployment [can be found here](https://github.com/Miserlou/Zappa/issues/244).
A more robust solution to managing these entitlements will likely be implemented soon.
To add permissions to the default Zappa execution policy, use the `extra_permissions` setting:
```javascript
{
"dev": {
...
"extra_permissions": [{ // Attach any extra permissions to this policy.
"Effect": "Allow",
"Action": ["rekognition:*"], // AWS Service ARN
"Resource": "*"
}]
},
...
}
```
### AWS X-Ray
Zappa can enable [AWS X-Ray](https://aws.amazon.com/xray/) support on your function with a configuration setting:
```javascript
{
"dev": {
...
"xray_tracing": true
},
...
}
```
This will enable it on the Lambda function and allow you to instrument your code with X-Ray.
For example, with Flask:
```python
from aws_xray_sdk.core import xray_recorder
app = Flask(__name__)
xray_recorder.configure(service='my_app_name')
@route('/hello')
@xray_recorder.capture('hello')
def hello_world:
return 'Hello'
```
You may use the capture decorator to create subsegments around functions, or `xray_recorder.begin_subsegment('subsegment_name')` and `xray_recorder.end_subsegment()` within a function. The official [X-Ray documentation for Python](http://docs.aws.amazon.com/xray-sdk-for-python/latest/reference/) has more information on how to use this with your code.
Note that you may create subsegments in your code but an exception will be raised if you try to create a segment, as it is [created by the lambda worker](https://github.com/aws/aws-xray-sdk-python/issues/2). This also means that if you use Flask you must not use the [XRayMiddleware the documentation suggests](https://docs.aws.amazon.com/xray/latest/devguide/xray-sdk-python-middleware.html).
### Globally Available Server-less Architectures
<p align="center">
<a href="https://htmlpreview.github.io/?https://github.com/Miserlou/Talks/blob/master/serverless-london/global.html#0"><img src="http://i.imgur.com/oR61Qau.png" alt="Global Zappa Slides"/></a>
</p>
<p align="center">
<i>Click to see <a href="https://htmlpreview.github.io/?https://github.com/Miserlou/Talks/blob/master/serverless-london/global.html#0">slides from ServerlessConf London</a>!</i>
</p>
During the `init` process, you will be given the option to deploy your application "globally." This will allow you to deploy your application to all available AWS regions simultaneously in order to provide a consistent global speed, increased redundancy, data isolation, and legal compliance. You can also choose to deploy only to "primary" locations, the AWS regions with `-1` in their names.
To learn more about these capabilities, see [these slides](https://htmlpreview.github.io/?https://github.com/Miserlou/Talks/blob/master/serverless-london/global.html#0) from ServerlessConf London.
### Raising AWS Service Limits
Out of the box, AWS sets a limit of [1000 concurrent executions](http://docs.aws.amazon.com/lambda/latest/dg/limits.html) for your functions. If you start to breach these limits, you may start to see errors like `ClientError: An error occurred (LimitExceededException) when calling the PutTargets.."` or something similar.
To avoid this, you can file a [service ticket](https://console.aws.amazon.com/support/home#/) with Amazon to raise your limits up to the many tens of thousands of concurrent executions which you may need. This is a fairly common practice with Amazon, designed to prevent you from accidentally creating extremely expensive bug reports. So, before raising your service limits, make sure that you don't have any rogue scripts which could accidentally create tens of thousands of parallel executions that you don't want to pay for.
### Dead Letter Queues
If you want to utilise [AWS Lambda's Dead Letter Queue feature](http://docs.aws.amazon.com/lambda/latest/dg/dlq.html) simply add the key `dead_letter_arn`, with the value being the complete ARN to the corresponding SNS topic or SQS queue in your `zappa_settings.json`.
You must have already created the corresponding SNS/SQS topic/queue, and the Lambda function execution role must have been provisioned with read/publish/sendMessage access to the DLQ resource.
### Unique Package ID
For monitoring of different deployments, a unique UUID for each package is available in `package_info.json` in the root directory of your application's package. You can use this information or a hash of this file for such things as tracking errors across different deployments, monitoring status of deployments and other such things on services such as Sentry and New Relic. The package will contain:
```json
{
"build_platform": "darwin",
"build_user": "frank",
"build_time": "1509732511",
"uuid": "9c2df9e6-30f4-4c0a-ac4d-4ecb51831a74"
}
```
### Application Load Balancer Event Source
Zappa can be used to handle events triggered by Application Load Balancers (ALB). This can be useful in a few circumstances:
- Since API Gateway has a hard limit of 30 seconds before timing out, you can use an ALB for longer running requests.
- API Gateway is billed per-request; therefore, costs can become excessive with high throughput services. ALBs pricing model makes much more sense financially if you're expecting a lot of traffic to your Lambda.
- ALBs can be placed within a VPC, which may make more sense for private endpoints than using API Gateway's private model (using AWS PrivateLink).
Like API Gateway, Zappa can automatically provision ALB resources for you. You'll need to add the following to your `zappa_settings`:
```
"alb_enabled": true,
"alb_vpc_config": {
"CertificateArn": "arn:aws:acm:us-east-1:[your-account-id]:certificate/[certificate-id]",
"SubnetIds": [
// Here, you'll want to provide a list of subnets for your ALB, eg. 'subnet-02a58266'
],
"SecurityGroupIds": [
// And here, a list of security group IDs, eg. 'sg-fbacb791'
]
}
```
More information on using ALB as an event source for Lambda can be found [here](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html).
*An important note*: right now, Zappa will provision ONE lambda to ONE load balancer, which means using `base_path` along with ALB configuration is currently unsupported.
### Endpoint Configuration
API Gateway can be configured to be only accessible in a VPC. To enable this; [configure your VPC to support](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-private-apis.html) then set the `endpoint_configuration` to `PRIVATE` and set up Resource Policy on the API Gateway. A note about this; if you're using a private endpoint, Zappa won't be able to tell if the API is returning a successful status code upon deploy or update, so you'll have to check it manually to ensure your setup is working properly.
For full list of options for endpoint configuration refer to [API Gateway EndpointConfiguration documentation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-apigateway-restapi-endpointconfiguration.html)
#### Example Private API Gateway configuration
zappa_settings.json:
```json
{
"dev": {
...
"endpoint_configuration": ["PRIVATE"],
"apigateway_policy": "apigateway_resource_policy.json",
...
},
...
}
```
apigateway_resource_policy.json:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Deny",
"Principal": "*",
"Action": "execute-api:Invoke",
"Resource": "execute-api:/*",
"Condition": {
"StringNotEquals": {
"aws:sourceVpc": "{{vpcID}}" // UPDATE ME
}
}
},
{
"Effect": "Allow",
"Principal": "*",
"Action": "execute-api:Invoke",
"Resource": "execute-api:/*"
}
]
}
```
### Cold Starts (Experimental)
Lambda may provide additional resources than provisioned during cold start initialization. Set `INSTANTIATE_LAMBDA_HANDLER_ON_IMPORT=True` to instantiate the lambda handler on import. This is an experimental feature - if startup time is critical, look into using Provisioned Concurrency.
## Zappa Guides
* [Django-Zappa tutorial (screencast)](https://www.youtube.com/watch?v=plUrbPN0xc8&feature=youtu.be).
* [Using Django-Zappa, Part 1](https://serverlesscode.com/post/zappa-wsgi-for-python/).
* [Using Django-Zappa, Part 2: VPCs](https://serverlesscode.com/post/zappa-wsgi-for-python-pt-2/).
* [Building Serverless Microservices with Zappa and Flask](https://gun.io/blog/serverless-microservices-with-zappa-and-flask/)
* [Zappa で Hello World するまで (Japanese)](http://qiita.com/satoshi_iwashita/items/505492193317819772c7)
* [How to Deploy Zappa with CloudFront, RDS and VPC](https://jinwright.net/how-deploy-serverless-wsgi-app-using-zappa/)
* [Secure 'Serverless' File Uploads with AWS Lambda, S3, and Zappa](http://blog.stratospark.com/secure-serverless-file-uploads-with-aws-lambda-s3-zappa.html)
* [Deploy a Serverless WSGI App using Zappa, CloudFront, RDS, and VPC](https://docs.google.com/presentation/d/1aYeOMgQl4V_fFgT5VNoycdXtob1v6xVUWlyxoTEiTw0/edit#slide=id.p)
* [AWS: Deploy Alexa Ask Skills with Flask-Ask and Zappa](https://developer.amazon.com/blogs/post/8e8ad73a-99e9-4c0f-a7b3-60f92287b0bf/New-Alexa-Tutorial-Deploy-Flask-Ask-Skills-to-AWS-Lambda-with-Zappa)
* [Guide to using Django with Zappa](https://edgarroman.github.io/zappa-django-guide/)
* [Zappa and LambCI](https://seancoates.com/blogs/zappa-and-lambci/)
* [Building A Serverless Image Processing SaaS using Zappa](https://medium.com/99serverless/building-a-serverless-image-processing-saas-9ef68b594076)
* [Serverless Slack Slash Commands with Python and Zappa](https://renzo.lucioni.xyz/serverless-slash-commands-with-python/)
* [Bringing Tokusatsu to AWS using Python, Flask, Zappa and Contentful](https://www.contentful.com/blog/2018/03/07/bringing-tokusatsu-to-aws-using-python-flask-zappa-and-contentful/)
* [AWS Summit 2018 Seoul - Zappa와 함께하는 Serverless Microservice](https://www.slideshare.net/YunSeopSong/zappa-serverless-microservice-94410308/)
* [Book - Building Serverless Python Web Services with Zappa](https://github.com/PacktPublishing/Building-Serverless-Python-Web-Services-with-Zappa)
* [Vider sa flask dans une lambda](http://free_zed.gitlab.io/articles/2019/11/vider-sa-flask-dans-une-lambda/)[French]
* _Your guide here?_
## Zappa in the Press
* _[Zappa Serves Python, Minus the Servers](http://www.infoworld.com/article/3031665/application-development/zappa-serves-python-web-apps-minus-the-servers.html)_
* _[Zappa lyfter serverlösa applikationer med Python](http://computersweden.idg.se/2.2683/1.649895/zappa-lyfter-python)_
* _[Interview: Rich Jones on Zappa](https://serverlesscode.com/post/rich-jones-interview-django-zappa/)_
* [Top 10 Python Libraries of 2016](https://tryolabs.com/blog/2016/12/20/top-10-python-libraries-of-2016/)
## Sites Using Zappa
* [Mailchimp Signup Utility](https://github.com/sasha42/Mailchimp-utility) - A microservice for adding people to a mailing list via API.
* [Zappa Slack Inviter](https://github.com/Miserlou/zappa-slack-inviter) - A tiny, server-less service for inviting new users to your Slack channel.
* [Serverless Image Host](https://github.com/Miserlou/serverless-imagehost) - A thumbnailing service with Flask, Zappa and Pillow.
* [Zappa BitTorrent Tracker](https://github.com/Miserlou/zappa-bittorrent-tracker) - An experimental server-less BitTorrent tracker. Work in progress.
* [JankyGlance](https://github.com/Miserlou/JankyGlance) - A server-less Yahoo! Pipes replacement.
* [LambdaMailer](https://github.com/tryolabs/lambda-mailer) - A server-less endpoint for processing a contact form.
* [Voter Registration Microservice](https://topics.arlingtonva.us/2016/11/voter-registration-search-microservice/) - Official backup to to the Virginia Department of Elections portal.
* [FreePoll Online](https://www.freepoll.online) - A simple and awesome say for groups to make decisions.
* [PasteOfCode](https://paste.ofcode.org/) - A Zappa-powered paste bin.
* And many more, including banks, governments, startups, enterprises and schools!
Are you using Zappa? Let us know and we'll list your site here!
## Related Projects
* [Mackenzie](http://github.com/Miserlou/Mackenzie) - AWS Lambda Infection Toolkit
* [NoDB](https://github.com/Miserlou/NoDB) - A simple, server-less, Pythonic object store based on S3.
* [zappa-cms](http://github.com/Miserlou/zappa-cms) - A tiny server-less CMS for busy hackers. Work in progress.
* [zappa-django-utils](https://github.com/Miserlou/zappa-django-utils) - Utility commands to help Django deployments.
* [flask-ask](https://github.com/johnwheeler/flask-ask) - A framework for building Amazon Alexa applications. Uses Zappa for deployments.
* [zappa-file-widget](https://github.com/anush0247/zappa-file-widget) - A Django plugin for supporting binary file uploads in Django on Zappa.
* [zops](https://github.com/bjinwright/zops) - Utilities for teams and continuous integrations using Zappa.
* [cookiecutter-mobile-backend](https://github.com/narfman0/cookiecutter-mobile-backend/) - A `cookiecutter` Django project with Zappa and S3 uploads support.
* [zappa-examples](https://github.com/narfman0/zappa-examples/) - Flask, Django, image uploads, and more!
* [zappa-hug-example](https://github.com/mcrowson/zappa-hug-example) - Example of a Hug application using Zappa.
* [Zappa Docker Image](https://github.com/danielwhatmuff/zappa) - A Docker image for running Zappa locally, based on Lambda Docker.
* [zappa-dashing](https://github.com/nikos/zappa-dashing) - Monitor your AWS environment (health/metrics) with Zappa and CloudWatch.
* [s3env](https://github.com/cameronmaske/s3env) - Manipulate a remote Zappa environment variable key/value JSON object file in an S3 bucket through the CLI.
* [zappa_resize_image_on_fly](https://github.com/wobeng/zappa_resize_image_on_fly) - Resize images on the fly using Flask, Zappa, Pillow, and OpenCV-python.
* [zappa-ffmpeg](https://github.com/ubergarm/zappa-ffmpeg) - Run ffmpeg inside a lambda for serverless transformations.
* [gdrive-lambda](https://github.com/richiverse/gdrive-lambda) - pass json data to a csv file for end users who use Gdrive across the organization.
* [travis-build-repeat](https://github.com/bcongdon/travis-build-repeat) - Repeat TravisCI builds to avoid stale test results.
* [wunderskill-alexa-skill](https://github.com/mcrowson/wunderlist-alexa-skill) - An Alexa skill for adding to a Wunderlist.
* [xrayvision](https://github.com/mathom/xrayvision) - Utilities and wrappers for using AWS X-Ray with Zappa.
* [terraform-aws-zappa](https://github.com/dpetzold/terraform-aws-zappa) - Terraform modules for creating a VPC, RDS instance, ElastiCache Redis and CloudFront Distribution for use with Zappa.
* [zappa-sentry](https://github.com/jneves/zappa-sentry) - Integration with Zappa and Sentry
* [IOpipe](https://github.com/iopipe/iopipe-python#zappa) - Monitor, profile and analyze your Zappa apps.
## Hacks
Zappa goes quite far beyond what Lambda and API Gateway were ever intended to handle. As a result, there are quite a few hacks in here that allow it to work. Some of those include, but aren't limited to..
* Using VTL to map body, headers, method, params and query strings into JSON, and then turning that into valid WSGI.
* Attaching response codes to response bodies, Base64 encoding the whole thing, using that as a regex to route the response code, decoding the body in VTL, and mapping the response body to that.
* Packing and _Base58_ encoding multiple cookies into a single cookie because we can only map one kind.
* Forcing the case permutations of "Set-Cookie" in order to return multiple headers at the same time.
* Turning cookie-setting 301/302 responses into 200 responses with HTML redirects, because we have no way to set headers on redirects.
## Contributing
This project is still young, so there is still plenty to be done. Contributions are more than welcome!
Please file tickets for discussion before submitting patches. Pull requests should target `master` and should leave Zappa in a "shippable" state if merged.
If you are adding a non-trivial amount of new code, please include a functioning test in your PR. For AWS calls, we use the `placebo` library, which you can learn to use [in their README](https://github.com/garnaat/placebo#usage-as-a-decorator). The test suite will be run by [Travis CI](https://travis-ci.org/zappa/Zappa) once you open a pull request.
Please include the GitHub issue or pull request URL that has discussion related to your changes as a comment in the code ([example](https://github.com/zappa/Zappa/blob/fae2925431b820eaedf088a632022e4120a29f89/zappa/zappa.py#L241-L243)). This greatly helps for project maintainability, as it allows us to trace back use cases and explain decision making. Similarly, please make sure that you meet all of the requirements listed in the [pull request template](https://raw.githubusercontent.com/zappa/Zappa/master/.github/PULL_REQUEST_TEMPLATE.md).
Please feel free to work on any open ticket, especially any ticket marked with the "help-wanted" label. If you get stuck or want to discuss an issue further, please join [our Slack channel](https://zappateam.slack.com/), where you'll find a community of smart and interesting people working dilligently on hard problems.
Zappa does not intend to conform to PEP8, isolate your commits so that changes to functionality with changes made by your linter.
#### Using a Local Repo
To use the git HEAD, you *probably can't* use `pip install -e `. Instead, you should clone the repo to your machine and then `pip install /path/to/zappa/repo` or `ln -s /path/to/zappa/repo/zappa zappa` in your local project.
## Patrons
If you or your company uses **Zappa**, please consider giving what you can to support the ongoing development of the project!
You can become a patron by **[visiting our Patreon page](https://patreon.com/zappa)**.
Zappa is currently supported by these awesome individuals and companies:
* Nathan Lawrence
* LaunchLab
* Sean Paley
* Theo Chitayat
* George Sibble
* Joe Weiss
* Nik Bora
* Zerong Toby Wang
* Gareth E
* Matt Jackson
* Sean Coates
* Alexander Loschilov
* Korey Peters
* Joe Weiss
* Kimmo Parvianen-Jalanko
* Patrick Agin
* Roberto Martinez
* Charles Dimino
* Doug Beney
* Dan "The Man" Gayle
* Juancito
* Will Childs-Klein
* Efi Merdler Kravitz
* **Philippe Trounev**
Thank you very, very much!
## Support / Development / Training / Consulting
Do you need help with..
* Porting existing Flask and Django applications to Zappa?
* Building new applications and services that scale infinitely?
* Reducing your operations and hosting costs?
* Adding new custom features into Zappa?
* Training your team to use AWS and other server-less paradigms?
Good news! We're currently available for remote and on-site consulting for small, large and enterprise teams. Please contact <[email protected]> with your needs and let's work together!
<br />
<p align="center">
<a href="https://gun.io"><img src="http://i.imgur.com/M7wJipR.png" alt="Made by Gun.io"/></a>
</p>
| zappa-teamturing | /zappa-teamturing-0.54.12.tar.gz/zappa-teamturing-0.54.12/README.md | README.md |
import atexit
import base64
import binascii
import copy
import hashlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import time
from urllib.request import urlopen
import requests
# Staging
# Amazon doesn't accept these though.
# DEFAULT_CA = "https://acme-staging.api.letsencrypt.org"
# Production
DEFAULT_CA = "https://acme-v02.api.letsencrypt.org"
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.StreamHandler())
def get_cert_and_update_domain(
zappa_instance,
lambda_name,
api_stage,
domain=None,
manual=False,
):
"""
Main cert installer path.
"""
try:
create_domain_key()
create_domain_csr(domain)
get_cert(zappa_instance)
create_chained_certificate()
with open("{}/signed.crt".format(gettempdir())) as f:
certificate_body = f.read()
with open("{}/domain.key".format(gettempdir())) as f:
certificate_private_key = f.read()
with open("{}/intermediate.pem".format(gettempdir())) as f:
certificate_chain = f.read()
if not manual:
if domain:
if not zappa_instance.get_domain_name(domain):
zappa_instance.create_domain_name(
domain_name=domain,
certificate_name=domain + "-Zappa-LE-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=None,
lambda_name=lambda_name,
stage=api_stage,
)
print(
"Created a new domain name. Please note that it can take up to 40 minutes for this domain to be created and propagated through AWS, but it requires no further work on your part."
)
else:
zappa_instance.update_domain_name(
domain_name=domain,
certificate_name=domain + "-Zappa-LE-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=None,
lambda_name=lambda_name,
stage=api_stage,
)
else:
print("Cerificate body:\n")
print(certificate_body)
print("\nCerificate private key:\n")
print(certificate_private_key)
print("\nCerificate chain:\n")
print(certificate_chain)
except Exception as e:
print(e)
return False
return True
def create_domain_key():
devnull = open(os.devnull, "wb")
out = subprocess.check_output(["openssl", "genrsa", "2048"], stderr=devnull)
with open(os.path.join(gettempdir(), "domain.key"), "wb") as f:
f.write(out)
def create_domain_csr(domain):
subj = "/CN=" + domain
cmd = [
"openssl",
"req",
"-new",
"-sha256",
"-key",
os.path.join(gettempdir(), "domain.key"),
"-subj",
subj,
]
devnull = open(os.devnull, "wb")
out = subprocess.check_output(cmd, stderr=devnull)
with open(os.path.join(gettempdir(), "domain.csr"), "wb") as f:
f.write(out)
def create_chained_certificate():
signed_crt = open(os.path.join(gettempdir(), "signed.crt"), "rb").read()
cross_cert_url = "https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem"
cert = requests.get(cross_cert_url)
with open(os.path.join(gettempdir(), "intermediate.pem"), "wb") as intermediate_pem:
intermediate_pem.write(cert.content)
with open(os.path.join(gettempdir(), "chained.pem"), "wb") as chained_pem:
chained_pem.write(signed_crt)
chained_pem.write(cert.content)
def parse_account_key():
"""Parse account key to get public key"""
LOGGER.info("Parsing account key...")
cmd = [
"openssl",
"rsa",
"-in",
os.path.join(gettempdir(), "account.key"),
"-noout",
"-text",
]
devnull = open(os.devnull, "wb")
return subprocess.check_output(cmd, stderr=devnull)
def parse_csr():
"""
Parse certificate signing request for domains
"""
LOGGER.info("Parsing CSR...")
cmd = [
"openssl",
"req",
"-in",
os.path.join(gettempdir(), "domain.csr"),
"-noout",
"-text",
]
devnull = open(os.devnull, "wb")
out = subprocess.check_output(cmd, stderr=devnull)
domains = set([])
common_name = re.search(r"Subject:.*? CN\s?=\s?([^\s,;/]+)", out.decode("utf8"))
if common_name is not None:
domains.add(common_name.group(1))
subject_alt_names = re.search(
r"X509v3 Subject Alternative Name: \n +([^\n]+)\n",
out.decode("utf8"),
re.MULTILINE | re.DOTALL,
)
if subject_alt_names is not None:
for san in subject_alt_names.group(1).split(", "):
if san.startswith("DNS:"):
domains.add(san[4:])
return domains
def get_boulder_header(key_bytes):
"""
Use regular expressions to find crypto values from parsed account key,
and return a header we can send to our Boulder instance.
"""
pub_hex, pub_exp = re.search(
r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)",
key_bytes.decode("utf8"),
re.MULTILINE | re.DOTALL,
).groups()
pub_exp = "{0:x}".format(int(pub_exp))
pub_exp = "0{0}".format(pub_exp) if len(pub_exp) % 2 else pub_exp
header = {
"alg": "RS256",
"jwk": {
"e": _b64(binascii.unhexlify(pub_exp.encode("utf-8"))),
"kty": "RSA",
"n": _b64(
binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex).encode("utf-8"))
),
},
}
return header
def register_account():
"""
Agree to LE TOS
"""
LOGGER.info("Registering account...")
code, result = _send_signed_request(
DEFAULT_CA + "/acme/new-reg",
{
"resource": "new-reg",
"agreement": "https://letsencrypt.org/documents/LE-SA-v1.2-November-15-2017.pdf",
},
)
if code == 201: # pragma: no cover
LOGGER.info("Registered!")
elif code == 409: # pragma: no cover
LOGGER.info("Already registered!")
else: # pragma: no cover
raise ValueError("Error registering: {0} {1}".format(code, result))
def get_cert(zappa_instance, log=LOGGER, CA=DEFAULT_CA):
"""
Call LE to get a new signed CA.
"""
out = parse_account_key()
header = get_boulder_header(out)
accountkey_json = json.dumps(header["jwk"], sort_keys=True, separators=(",", ":"))
thumbprint = _b64(hashlib.sha256(accountkey_json.encode("utf8")).digest())
# find domains
domains = parse_csr()
# get the certificate domains and expiration
register_account()
# verify each domain
for domain in domains:
log.info("Verifying {0}...".format(domain))
# get new challenge
code, result = _send_signed_request(
CA + "/acme/new-authz",
{
"resource": "new-authz",
"identifier": {"type": "dns", "value": domain},
},
)
if code != 201:
raise ValueError(
"Error requesting challenges: {0} {1}".format(code, result)
)
challenge = [
ch
for ch in json.loads(result.decode("utf8"))["challenges"]
if ch["type"] == "dns-01"
][0]
token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge["token"])
keyauthorization = "{0}.{1}".format(token, thumbprint).encode("utf-8")
# sha256_b64
digest = _b64(hashlib.sha256(keyauthorization).digest())
zone_id = zappa_instance.get_hosted_zone_id_for_domain(domain)
if not zone_id:
raise ValueError("Could not find Zone ID for: " + domain)
zappa_instance.set_dns_challenge_txt(zone_id, domain, digest) # resp is unused
print("Waiting for DNS to propagate..")
# What's optimal here?
# import time # double import; import in loop; shadowed import
time.sleep(45)
# notify challenge are met
code, result = _send_signed_request(
challenge["uri"],
{
"resource": "challenge",
"keyAuthorization": keyauthorization.decode("utf-8"),
},
)
if code != 202:
raise ValueError("Error triggering challenge: {0} {1}".format(code, result))
# wait for challenge to be verified
verify_challenge(challenge["uri"])
# Challenge verified, clean up R53
zappa_instance.remove_dns_challenge_txt(zone_id, domain, digest)
# Sign
result = sign_certificate()
# Encode to PEM format
encode_certificate(result)
return True
def verify_challenge(uri):
"""
Loop until our challenge is verified, else fail.
"""
while True:
try:
resp = urlopen(uri)
challenge_status = json.loads(resp.read().decode("utf8"))
except IOError as e:
raise ValueError(
"Error checking challenge: {0} {1}".format(
e.code, json.loads(e.read().decode("utf8"))
)
)
if challenge_status["status"] == "pending":
time.sleep(2)
elif challenge_status["status"] == "valid":
LOGGER.info("Domain verified!")
break
else:
raise ValueError(
"Domain challenge did not pass: {0}".format(challenge_status)
)
def sign_certificate():
"""
Get the new certificate.
Returns the signed bytes.
"""
LOGGER.info("Signing certificate...")
cmd = [
"openssl",
"req",
"-in",
os.path.join(gettempdir(), "domain.csr"),
"-outform",
"DER",
]
devnull = open(os.devnull, "wb")
csr_der = subprocess.check_output(cmd, stderr=devnull)
code, result = _send_signed_request(
DEFAULT_CA + "/acme/new-cert",
{
"resource": "new-cert",
"csr": _b64(csr_der),
},
)
if code != 201:
raise ValueError("Error signing certificate: {0} {1}".format(code, result))
LOGGER.info("Certificate signed!")
return result
def encode_certificate(result):
"""
Encode cert bytes to PEM encoded cert file.
"""
cert_body = (
"""-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""".format(
"\n".join(textwrap.wrap(base64.b64encode(result).decode("utf8"), 64))
)
)
signed_crt = open("{}/signed.crt".format(gettempdir()), "w")
signed_crt.write(cert_body)
signed_crt.close()
return True
##
# Request Utility
##
def _b64(b):
"""
Helper function base64 encode for jose spec
"""
return base64.urlsafe_b64encode(b).decode("utf8").replace("=", "")
def _send_signed_request(url, payload):
"""
Helper function to make signed requests to Boulder
"""
payload64 = _b64(json.dumps(payload).encode("utf8"))
out = parse_account_key()
header = get_boulder_header(out)
protected = copy.deepcopy(header)
protected["nonce"] = urlopen(DEFAULT_CA + "/directory").headers["Replay-Nonce"]
protected64 = _b64(json.dumps(protected).encode("utf8"))
cmd = [
"openssl",
"dgst",
"-sha256",
"-sign",
os.path.join(gettempdir(), "account.key"),
]
proc = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = proc.communicate("{0}.{1}".format(protected64, payload64).encode("utf8"))
if proc.returncode != 0: # pragma: no cover
raise IOError("OpenSSL Error: {0}".format(err))
data = json.dumps(
{
"header": header,
"protected": protected64,
"payload": payload64,
"signature": _b64(out),
}
)
try:
resp = urlopen(url, data.encode("utf8"))
return resp.getcode(), resp.read()
except IOError as e:
return getattr(e, "code", None), getattr(e, "read", e.__str__)()
##
# Temporary Directory Utility
##
__tempdir = None
def gettempdir():
"""
Lazily creates a temporary directory in a secure manner. When Python exits,
or the cleanup() function is called, the directory is erased.
"""
global __tempdir
if __tempdir is not None:
return __tempdir
__tempdir = tempfile.mkdtemp()
return __tempdir
@atexit.register
def cleanup():
"""
Delete any temporary files.
"""
global __tempdir
if __tempdir is not None:
shutil.rmtree(__tempdir)
__tempdir = None | zappa-teamturing | /zappa-teamturing-0.54.12.tar.gz/zappa-teamturing-0.54.12/zappa/letsencrypt.py | letsencrypt.py |
from werkzeug.wsgi import ClosingIterator
def all_casings(input_string):
"""
Permute all casings of a given string.
A pretty algorithm, via @Amber
http://stackoverflow.com/questions/6792803/finding-all-possible-case-permutations-in-python
"""
if not input_string:
yield ""
else:
first = input_string[:1]
if first.lower() == first.upper():
for sub_casing in all_casings(input_string[1:]):
yield first + sub_casing
else:
for sub_casing in all_casings(input_string[1:]):
yield first.lower() + sub_casing
yield first.upper() + sub_casing
class ZappaWSGIMiddleware:
"""
Middleware functions necessary for a Zappa deployment.
Most hacks have now been remove except for Set-Cookie permutation.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
"""
We must case-mangle the Set-Cookie header name or AWS will use only a
single one of these headers.
"""
def encode_response(status, headers, exc_info=None):
"""
This makes the 'set-cookie' headers name lowercase,
all the non-cookie headers should be sent unharmed.
Related: https://github.com/Miserlou/Zappa/issues/1965
"""
new_headers = [
header
for header in headers
if ((type(header[0]) != str) or (header[0].lower() != "set-cookie"))
]
cookie_headers = [
(header[0].lower(), header[1])
for header in headers
if ((type(header[0]) == str) and (header[0].lower() == "set-cookie"))
]
new_headers = new_headers + cookie_headers
return start_response(status, new_headers, exc_info)
# Call the application with our modifier
response = self.application(environ, encode_response)
# Return the response as a WSGI-safe iterator
return ClosingIterator(response) | zappa-teamturing | /zappa-teamturing-0.54.12.tar.gz/zappa-teamturing-0.54.12/zappa/middleware.py | middleware.py |
import base64
import collections
import datetime
import importlib
import inspect
import json
import logging
import os
import sys
import tarfile
import traceback
from builtins import str
import boto3
from werkzeug.wrappers import Response
# This file may be copied into a project's root,
# so handle both scenarios.
try:
from zappa.middleware import ZappaWSGIMiddleware
from zappa.utilities import merge_headers, parse_s3_url
from zappa.wsgi import common_log, create_wsgi_request
except ImportError as e: # pragma: no cover
from .middleware import ZappaWSGIMiddleware
from .utilities import merge_headers, parse_s3_url
from .wsgi import common_log, create_wsgi_request
# Set up logging
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class LambdaHandler:
"""
Singleton for avoiding duplicate setup.
Pattern provided by @benbangert.
"""
__instance = None
settings = None
settings_name = None
session = None
# Application
app_module = None
wsgi_app = None
trailing_slash = False
def __new__(cls, settings_name="zappa_settings", session=None):
"""Singleton instance to avoid repeat setup"""
if LambdaHandler.__instance is None:
print("Instancing..")
LambdaHandler.__instance = object.__new__(cls)
return LambdaHandler.__instance
def __init__(self, settings_name="zappa_settings", session=None):
# We haven't cached our settings yet, load the settings and app.
if not self.settings:
# Loading settings from a python module
self.settings = importlib.import_module(settings_name)
self.settings_name = settings_name
self.session = session
# Custom log level
if self.settings.LOG_LEVEL:
level = logging.getLevelName(self.settings.LOG_LEVEL)
logger.setLevel(level)
remote_env = getattr(self.settings, "REMOTE_ENV", None)
remote_bucket, remote_file = parse_s3_url(remote_env)
if remote_bucket and remote_file:
self.load_remote_settings(remote_bucket, remote_file)
# Let the system know that this will be a Lambda/Zappa/Stack
os.environ["SERVERTYPE"] = "AWS Lambda"
os.environ["FRAMEWORK"] = "Zappa"
try:
os.environ["PROJECT"] = self.settings.PROJECT_NAME
os.environ["STAGE"] = self.settings.API_STAGE
except Exception: # pragma: no cover
pass
# Set any locally defined env vars
# Environment variable keys can't be Unicode
# https://github.com/Miserlou/Zappa/issues/604
for key in self.settings.ENVIRONMENT_VARIABLES.keys():
os.environ[str(key)] = self.settings.ENVIRONMENT_VARIABLES[key]
# Pulling from S3 if given a zip path
project_archive_path = getattr(self.settings, "ARCHIVE_PATH", None)
if project_archive_path:
self.load_remote_project_archive(project_archive_path)
# Load compiled library to the PythonPath
# checks if we are the slim_handler since this is not needed otherwise
# https://github.com/Miserlou/Zappa/issues/776
is_slim_handler = getattr(self.settings, "SLIM_HANDLER", False)
if is_slim_handler:
included_libraries = getattr(self.settings, "INCLUDE", [])
try:
from ctypes import cdll, util
for library in included_libraries:
try:
cdll.LoadLibrary(os.path.join(os.getcwd(), library))
except OSError:
print(
"Failed to find library: {}...right filename?".format(
library
)
)
except ImportError:
print("Failed to import cytpes library")
# This is a non-WSGI application
# https://github.com/Miserlou/Zappa/pull/748
if (
not hasattr(self.settings, "APP_MODULE")
and not self.settings.DJANGO_SETTINGS
):
self.app_module = None
wsgi_app_function = None
# This is probably a normal WSGI app (Or django with overloaded wsgi application)
# https://github.com/Miserlou/Zappa/issues/1164
elif hasattr(self.settings, "APP_MODULE"):
if self.settings.DJANGO_SETTINGS:
sys.path.append("/var/task")
from django.conf import (
ENVIRONMENT_VARIABLE as SETTINGS_ENVIRONMENT_VARIABLE,
)
# add the Lambda root path into the sys.path
self.trailing_slash = True
os.environ[
SETTINGS_ENVIRONMENT_VARIABLE
] = self.settings.DJANGO_SETTINGS
else:
self.trailing_slash = False
# The app module
self.app_module = importlib.import_module(self.settings.APP_MODULE)
# The application
wsgi_app_function = getattr(self.app_module, self.settings.APP_FUNCTION)
# Django gets special treatment.
else:
try: # Support both for tests
from zappa.ext.django_zappa import get_django_wsgi
except ImportError: # pragma: no cover
from django_zappa_app import get_django_wsgi
# Get the Django WSGI app from our extension
wsgi_app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS)
self.trailing_slash = True
self.wsgi_app = ZappaWSGIMiddleware(wsgi_app_function)
def load_remote_project_archive(self, project_zip_path):
"""
Puts the project files from S3 in /tmp and adds to path
"""
project_folder = "/tmp/{0!s}".format(self.settings.PROJECT_NAME)
if not os.path.isdir(project_folder):
# The project folder doesn't exist in this cold lambda, get it from S3
if not self.session:
boto_session = boto3.Session()
else:
boto_session = self.session
# Download zip file from S3
remote_bucket, remote_file = parse_s3_url(project_zip_path)
s3 = boto_session.resource("s3")
archive_on_s3 = s3.Object(remote_bucket, remote_file).get()
with tarfile.open(fileobj=archive_on_s3["Body"], mode="r|gz") as t:
t.extractall(project_folder)
# Add to project path
sys.path.insert(0, project_folder)
# Change working directory to project folder
# Related: https://github.com/Miserlou/Zappa/issues/702
os.chdir(project_folder)
return True
def load_remote_settings(self, remote_bucket, remote_file):
"""
Attempt to read a file from s3 containing a flat json object. Adds each
key->value pair as environment variables. Helpful for keeping
sensitiZve or stage-specific configuration variables in s3 instead of
version control.
"""
if not self.session:
boto_session = boto3.Session()
else:
boto_session = self.session
s3 = boto_session.resource("s3")
try:
remote_env_object = s3.Object(remote_bucket, remote_file).get()
except Exception as e: # pragma: no cover
# catch everything aws might decide to raise
print("Could not load remote settings file.", e)
return
try:
content = remote_env_object["Body"].read()
except Exception as e: # pragma: no cover
# catch everything aws might decide to raise
print("Exception while reading remote settings file.", e)
return
try:
settings_dict = json.loads(content)
except (ValueError, TypeError): # pragma: no cover
print("Failed to parse remote settings!")
return
# add each key-value to environment - overwrites existing keys!
for key, value in settings_dict.items():
if self.settings.LOG_LEVEL == "DEBUG":
print("Adding {} -> {} to environment".format(key, value))
# Environment variable keys can't be Unicode
# https://github.com/Miserlou/Zappa/issues/604
try:
os.environ[str(key)] = value
except Exception:
if self.settings.LOG_LEVEL == "DEBUG":
print("Environment variable keys must be non-unicode!")
@staticmethod
def import_module_and_get_function(whole_function):
"""
Given a modular path to a function, import that module
and return the function.
"""
module, function = whole_function.rsplit(".", 1)
app_module = importlib.import_module(module)
app_function = getattr(app_module, function)
return app_function
@classmethod
def lambda_handler(cls, event, context): # pragma: no cover
handler = global_handler or cls()
exception_handler = handler.settings.EXCEPTION_HANDLER
try:
return handler.handler(event, context)
except Exception as ex:
exception_processed = cls._process_exception(
exception_handler=exception_handler,
event=event,
context=context,
exception=ex,
)
if not exception_processed:
# Only re-raise exception if handler directed so. Allows handler to control if lambda has to retry
# an event execution in case of failure.
raise
@classmethod
def _process_exception(cls, exception_handler, event, context, exception):
exception_processed = False
if exception_handler:
try:
handler_function = cls.import_module_and_get_function(exception_handler)
exception_processed = handler_function(exception, event, context)
except Exception as cex:
logger.error(msg="Failed to process exception via custom handler.")
print(cex)
return exception_processed
@staticmethod
def run_function(app_function, event, context):
"""
Given a function and event context,
detect signature and execute, returning any result.
"""
# getargspec does not support python 3 method with type hints
# Related issue: https://github.com/Miserlou/Zappa/issues/1452
if hasattr(inspect, "getfullargspec"): # Python 3
args, varargs, keywords, defaults, _, _, _ = inspect.getfullargspec(
app_function
)
else: # Python 2
args, varargs, keywords, defaults = inspect.getargspec(app_function)
num_args = len(args)
if num_args == 0:
result = app_function(event, context) if varargs else app_function()
elif num_args == 1:
result = app_function(event, context) if varargs else app_function(event)
elif num_args == 2:
result = app_function(event, context)
else:
raise RuntimeError(
"Function signature is invalid. Expected a function that accepts at most "
"2 arguments or varargs."
)
return result
def get_function_for_aws_event(self, record):
"""
Get the associated function to execute for a triggered AWS event
Support S3, SNS, DynamoDB, kinesis and SQS events
"""
if "s3" in record:
if ":" in record["s3"]["configurationId"]:
return record["s3"]["configurationId"].split(":")[-1]
arn = None
if "Sns" in record:
try:
message = json.loads(record["Sns"]["Message"])
if message.get("command"):
return message["command"]
except ValueError:
pass
arn = record["Sns"].get("TopicArn")
elif "dynamodb" in record or "kinesis" in record:
arn = record.get("eventSourceARN")
elif "eventSource" in record and record.get("eventSource") == "aws:sqs":
arn = record.get("eventSourceARN")
elif "s3" in record:
arn = record["s3"]["bucket"]["arn"]
if arn:
return self.settings.AWS_EVENT_MAPPING.get(arn)
return None
def get_function_from_bot_intent_trigger(self, event):
"""
For the given event build ARN and return the configured function
"""
intent = event.get("currentIntent")
if intent:
intent = intent.get("name")
if intent:
return self.settings.AWS_BOT_EVENT_MAPPING.get(
"{}:{}".format(intent, event.get("invocationSource"))
)
def get_function_for_cognito_trigger(self, trigger):
"""
Get the associated function to execute for a cognito trigger
"""
print(
"get_function_for_cognito_trigger",
self.settings.COGNITO_TRIGGER_MAPPING,
trigger,
self.settings.COGNITO_TRIGGER_MAPPING.get(trigger),
)
return self.settings.COGNITO_TRIGGER_MAPPING.get(trigger)
def handler(self, event, context):
"""
An AWS Lambda function which parses specific API Gateway input into a
WSGI request, feeds it to our WSGI app, processes the response, and returns
that back to the API Gateway.
"""
settings = self.settings
# If in DEBUG mode, log all raw incoming events.
if settings.DEBUG:
logger.debug("Zappa Event: {}".format(event))
# Set any API Gateway defined Stage Variables
# as env vars
if event.get("stageVariables"):
for key in event["stageVariables"].keys():
os.environ[str(key)] = event["stageVariables"][key]
# This is the result of a keep alive, recertify
# or scheduled event.
if event.get("detail-type") == "Scheduled Event":
whole_function = event["resources"][0].split("/")[-1].split("-")[-1]
# This is a scheduled function.
if "." in whole_function:
app_function = self.import_module_and_get_function(whole_function)
# Execute the function!
return self.run_function(app_function, event, context)
# Else, let this execute as it were.
# This is a direct command invocation.
elif event.get("command", None):
whole_function = event["command"]
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
print("Result of %s:" % whole_function)
print(result)
return result
# This is a direct, raw python invocation.
# It's _extremely_ important we don't allow this event source
# to be overridden by unsanitized, non-admin user input.
elif event.get("raw_command", None):
raw_command = event["raw_command"]
exec(raw_command)
return
# This is a Django management command invocation.
elif event.get("manage", None):
from django.core import management
try: # Support both for tests
from zappa.ext.django_zappa import get_django_wsgi
except ImportError as e: # pragma: no cover
from django_zappa_app import get_django_wsgi
# Get the Django WSGI app from our extension
# We don't actually need the function,
# but we do need to do all of the required setup for it.
app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS)
# Couldn't figure out how to get the value into stdout with StringIO..
# Read the log for now. :[]
management.call_command(*event["manage"].split(" "))
return {}
# This is an AWS-event triggered invocation.
elif event.get("Records", None):
records = event.get("Records")
result = None
whole_function = self.get_function_for_aws_event(records[0])
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# this is an AWS-event triggered from Lex bot's intent
elif event.get("bot"):
result = None
whole_function = self.get_function_from_bot_intent_trigger(event)
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# This is an API Gateway authorizer event
elif event.get("type") == "TOKEN":
whole_function = self.settings.AUTHORIZER_FUNCTION
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
policy = self.run_function(app_function, event, context)
return policy
else:
logger.error(
"Cannot find a function to process the authorization request."
)
raise Exception("Unauthorized")
# This is an AWS Cognito Trigger Event
elif event.get("triggerSource", None):
triggerSource = event.get("triggerSource")
whole_function = self.get_function_for_cognito_trigger(triggerSource)
result = event
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error(
"Cannot find a function to handle cognito trigger {}".format(
triggerSource
)
)
return result
# This is a CloudWatch event
# Related: https://github.com/Miserlou/Zappa/issues/1924
elif event.get("awslogs", None):
result = None
whole_function = "{}.{}".format(settings.APP_MODULE, settings.APP_FUNCTION)
app_function = self.import_module_and_get_function(whole_function)
if app_function:
result = self.run_function(app_function, event, context)
logger.debug("Result of %s:" % whole_function)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# Normal web app flow
try:
# Timing
time_start = datetime.datetime.now()
# This is a normal HTTP request
if event.get("httpMethod", None):
script_name = ""
is_elb_context = False
headers = merge_headers(event)
if event.get("requestContext", None) and event["requestContext"].get(
"elb", None
):
# Related: https://github.com/Miserlou/Zappa/issues/1715
# inputs/outputs for lambda loadbalancer
# https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html
is_elb_context = True
# host is lower-case when forwarded from ELB
host = headers.get("host")
# TODO: pathParameters is a first-class citizen in apigateway but not available without
# some parsing work for ELB (is this parameter used for anything?)
event["pathParameters"] = ""
else:
if headers:
host = headers.get("Host")
else:
host = None
logger.debug("host found: [{}]".format(host))
if host:
if "amazonaws.com" in host:
logger.debug("amazonaws found in host")
# The path provided in th event doesn't include the
# stage, so we must tell Flask to include the API
# stage in the url it calculates. See https://github.com/Miserlou/Zappa/issues/1014
script_name = "/" + settings.API_STAGE
else:
# This is a test request sent from the AWS console
if settings.DOMAIN:
# Assume the requests received will be on the specified
# domain. No special handling is required
pass
else:
# Assume the requests received will be to the
# amazonaws.com endpoint, so tell Flask to include the
# API stage
script_name = "/" + settings.API_STAGE
base_path = getattr(settings, "BASE_PATH", None)
# Create the environment for WSGI and handle the request
environ = create_wsgi_request(
event,
script_name=script_name,
base_path=base_path,
trailing_slash=self.trailing_slash,
binary_support=settings.BINARY_SUPPORT,
context_header_mappings=settings.CONTEXT_HEADER_MAPPINGS,
)
# We are always on https on Lambda, so tell our wsgi app that.
environ["HTTPS"] = "on"
environ["wsgi.url_scheme"] = "https"
environ["lambda.context"] = context
environ["lambda.event"] = event
# Execute the application
with Response.from_app(self.wsgi_app, environ) as response:
# This is the object we're going to return.
# Pack the WSGI response into our special dictionary.
zappa_returndict = dict()
# Issue #1715: ALB support. ALB responses must always include
# base64 encoding and status description
if is_elb_context:
zappa_returndict.setdefault("isBase64Encoded", False)
zappa_returndict.setdefault(
"statusDescription", response.status
)
if response.data:
if (
settings.BINARY_SUPPORT
and not response.mimetype.startswith("text/")
and response.mimetype != "application/json"
):
zappa_returndict["body"] = base64.b64encode(
response.data
).decode("utf-8")
zappa_returndict["isBase64Encoded"] = True
else:
zappa_returndict["body"] = response.get_data(as_text=True)
zappa_returndict["statusCode"] = response.status_code
if "headers" in event:
zappa_returndict["headers"] = {}
for key, value in response.headers:
zappa_returndict["headers"][key] = value
if "multiValueHeaders" in event:
zappa_returndict["multiValueHeaders"] = {}
for key, value in response.headers:
zappa_returndict["multiValueHeaders"][
key
] = response.headers.getlist(key)
# Calculate the total response time,
# and log it in the Common Log format.
time_end = datetime.datetime.now()
delta = time_end - time_start
response_time_ms = delta.total_seconds() * 1000
response.content = response.data
common_log(environ, response, response_time=response_time_ms)
return zappa_returndict
except Exception as e: # pragma: no cover
# Print statements are visible in the logs either way
print(e)
exc_info = sys.exc_info()
message = (
"An uncaught exception happened while servicing this request. "
"You can investigate this with the `zappa tail` command."
)
# If we didn't even build an app_module, just raise.
if not settings.DJANGO_SETTINGS:
try:
self.app_module
except NameError as ne:
message = "Failed to import module: {}".format(ne.message)
# Call exception handler for unhandled exceptions
exception_handler = self.settings.EXCEPTION_HANDLER
self._process_exception(
exception_handler=exception_handler,
event=event,
context=context,
exception=e,
)
# Return this unspecified exception as a 500, using template that API Gateway expects.
content = collections.OrderedDict()
content["statusCode"] = 500
body = {"message": message}
if settings.DEBUG: # only include traceback if debug is on.
body["traceback"] = traceback.format_exception(
*exc_info
) # traceback as a list for readability.
content["body"] = json.dumps(str(body), sort_keys=True, indent=4)
return content
def lambda_handler(event, context): # pragma: no cover
return LambdaHandler.lambda_handler(event, context)
def keep_warm_callback(event, context):
"""Method is triggered by the CloudWatch event scheduled when keep_warm setting is set to true."""
lambda_handler(
event={}, context=context
) # overriding event with an empty one so that web app initialization will
# be triggered.
global_handler = None
if os.environ.get("INSTANTIATE_LAMBDA_HANDLER_ON_IMPORT"):
global_handler = LambdaHandler() | zappa-teamturing | /zappa-teamturing-0.54.12.tar.gz/zappa-teamturing-0.54.12/zappa/handler.py | handler.py |
import base64
import logging
import sys
from urllib.parse import urlencode
import six
from requestlogger import ApacheFormatter
from werkzeug import urls
from .utilities import merge_headers, titlecase_keys
BINARY_METHODS = ["POST", "PUT", "PATCH", "DELETE", "CONNECT", "OPTIONS"]
def create_wsgi_request(
event_info,
server_name="zappa",
script_name=None,
trailing_slash=True,
binary_support=False,
base_path=None,
context_header_mappings={},
):
"""
Given some event_info via API Gateway,
create and return a valid WSGI request environ.
"""
method = event_info["httpMethod"]
headers = (
merge_headers(event_info) or {}
) # Allow for the AGW console 'Test' button to work (Pull #735)
"""
API Gateway and ALB both started allowing for multi-value querystring
params in Nov. 2018. If there aren't multi-value params present, then
it acts identically to 'queryStringParameters', so we can use it as a
drop-in replacement.
The one caveat here is that ALB will only include _one_ of
queryStringParameters _or_ multiValueQueryStringParameters, which means
we have to check for the existence of one and then fall back to the
other.
"""
if "multiValueQueryStringParameters" in event_info:
query = event_info["multiValueQueryStringParameters"]
query_string = urlencode(query, doseq=True) if query else ""
else:
query = event_info.get("queryStringParameters", {})
query_string = urlencode(query) if query else ""
if context_header_mappings:
for key, value in context_header_mappings.items():
parts = value.split(".")
header_val = event_info["requestContext"]
for part in parts:
if part not in header_val:
header_val = None
break
else:
header_val = header_val[part]
if header_val is not None:
headers[key] = header_val
# Extract remote user from context if Authorizer is enabled
remote_user = None
if event_info["requestContext"].get("authorizer"):
remote_user = event_info["requestContext"]["authorizer"].get("principalId")
elif event_info["requestContext"].get("identity"):
remote_user = event_info["requestContext"]["identity"].get("userArn")
# Related: https://github.com/Miserlou/Zappa/issues/677
# https://github.com/Miserlou/Zappa/issues/683
# https://github.com/Miserlou/Zappa/issues/696
# https://github.com/Miserlou/Zappa/issues/836
# https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Summary_table
if binary_support and (method in BINARY_METHODS):
if event_info.get("isBase64Encoded", False):
encoded_body = event_info["body"]
body = base64.b64decode(encoded_body)
else:
body = event_info["body"]
if isinstance(body, six.string_types):
body = body.encode("utf-8")
else:
body = event_info["body"]
if isinstance(body, six.string_types):
body = body.encode("utf-8")
# Make header names canonical, e.g. content-type => Content-Type
# https://github.com/Miserlou/Zappa/issues/1188
headers = titlecase_keys(headers)
path = urls.url_unquote(event_info["path"])
if base_path:
script_name = "/" + base_path
if path.startswith(script_name):
path = path[len(script_name) :]
x_forwarded_for = headers.get("X-Forwarded-For", "")
if "," in x_forwarded_for:
# The last one is the cloudfront proxy ip. The second to last is the real client ip.
# Everything else is user supplied and untrustworthy.
remote_addr = x_forwarded_for.split(", ")[-2]
else:
remote_addr = x_forwarded_for or "127.0.0.1"
environ = {
"PATH_INFO": get_wsgi_string(path),
"QUERY_STRING": get_wsgi_string(query_string),
"REMOTE_ADDR": remote_addr,
"REQUEST_METHOD": method,
"SCRIPT_NAME": get_wsgi_string(str(script_name)) if script_name else "",
"SERVER_NAME": str(server_name),
"SERVER_PORT": headers.get("X-Forwarded-Port", "80"),
"SERVER_PROTOCOL": str("HTTP/1.1"),
"wsgi.version": (1, 0),
"wsgi.url_scheme": headers.get("X-Forwarded-Proto", "http"),
"wsgi.input": body,
"wsgi.errors": sys.stderr,
"wsgi.multiprocess": False,
"wsgi.multithread": False,
"wsgi.run_once": False,
}
# Input processing
if method in ["POST", "PUT", "PATCH", "DELETE"]:
if "Content-Type" in headers:
environ["CONTENT_TYPE"] = headers["Content-Type"]
# This must be Bytes or None
environ["wsgi.input"] = six.BytesIO(body)
if body:
environ["CONTENT_LENGTH"] = str(len(body))
else:
environ["CONTENT_LENGTH"] = "0"
for header in headers:
wsgi_name = "HTTP_" + header.upper().replace("-", "_")
environ[wsgi_name] = str(headers[header])
if script_name:
environ["SCRIPT_NAME"] = script_name
path_info = environ["PATH_INFO"]
if script_name in path_info:
environ["PATH_INFO"].replace(script_name, "")
if remote_user:
environ["REMOTE_USER"] = remote_user
if event_info["requestContext"].get("authorizer"):
environ["API_GATEWAY_AUTHORIZER"] = event_info["requestContext"]["authorizer"]
return environ
def common_log(environ, response, response_time=None):
"""
Given the WSGI environ and the response,
log this event in Common Log Format.
"""
logger = logging.getLogger()
if response_time:
formatter = ApacheFormatter(with_response_time=True)
try:
log_entry = formatter(
response.status_code,
environ,
len(response.content),
rt_us=response_time,
)
except TypeError:
# Upstream introduced a very annoying breaking change on the rt_ms/rt_us kwarg.
log_entry = formatter(
response.status_code,
environ,
len(response.content),
rt_ms=response_time,
)
else:
formatter = ApacheFormatter(with_response_time=False)
log_entry = formatter(response.status_code, environ, len(response.content))
logger.info(log_entry)
return log_entry
# Related: https://github.com/Miserlou/Zappa/issues/1199
def get_wsgi_string(string, encoding="utf-8"):
"""
Returns wsgi-compatible string
"""
return string.encode(encoding).decode("iso-8859-1") | zappa-teamturing | /zappa-teamturing-0.54.12.tar.gz/zappa-teamturing-0.54.12/zappa/wsgi.py | wsgi.py |
import importlib
import inspect
import json
import os
import time
import uuid
from functools import update_wrapper, wraps
import boto3
import botocore
from .utilities import get_topic_name
try:
from zappa_settings import ASYNC_RESPONSE_TABLE
except ImportError:
ASYNC_RESPONSE_TABLE = None
# Declare these here so they're kept warm.
try:
aws_session = boto3.Session()
LAMBDA_CLIENT = aws_session.client("lambda")
SNS_CLIENT = aws_session.client("sns")
STS_CLIENT = aws_session.client("sts")
DYNAMODB_CLIENT = aws_session.client("dynamodb")
except botocore.exceptions.NoRegionError as e: # pragma: no cover
# This can happen while testing on Travis, but it's taken care of
# during class initialization.
pass
##
# Response and Exception classes
##
LAMBDA_ASYNC_PAYLOAD_LIMIT = 256000
SNS_ASYNC_PAYLOAD_LIMIT = 256000
class AsyncException(Exception): # pragma: no cover
"""Simple exception class for async tasks."""
pass
class LambdaAsyncResponse:
"""
Base Response Dispatcher class
Can be used directly or subclassed if the method to send the message is changed.
"""
def __init__(
self,
lambda_function_name=None,
aws_region=None,
capture_response=False,
**kwargs
):
""" """
if kwargs.get("boto_session"):
self.client = kwargs.get("boto_session").client("lambda")
else: # pragma: no cover
self.client = LAMBDA_CLIENT
self.lambda_function_name = lambda_function_name
self.aws_region = aws_region
if capture_response:
if ASYNC_RESPONSE_TABLE is None:
print(
"Warning! Attempted to capture a response without "
"async_response_table configured in settings (you won't "
"capture async responses)."
)
capture_response = False
self.response_id = "MISCONFIGURED"
else:
self.response_id = str(uuid.uuid4())
else:
self.response_id = None
self.capture_response = capture_response
def send(self, task_path, args, kwargs):
"""
Create the message object and pass it to the actual sender.
"""
message = {
"task_path": task_path,
"capture_response": self.capture_response,
"response_id": self.response_id,
"args": args,
"kwargs": kwargs,
}
self._send(message)
return self
def _send(self, message):
"""
Given a message, directly invoke the lamdba function for this task.
"""
message["command"] = "zappa.asynchronous.route_lambda_task"
payload = json.dumps(message)
if len(payload) > LAMBDA_ASYNC_PAYLOAD_LIMIT: # pragma: no cover
raise AsyncException("Payload too large for async Lambda call")
self.response = self.client.invoke(
FunctionName=self.lambda_function_name,
InvocationType="Event", # makes the call async
Payload=payload,
)
self.sent = self.response.get("StatusCode", 0) == 202
class SnsAsyncResponse(LambdaAsyncResponse):
"""
Send a SNS message to a specified SNS topic
Serialise the func path and arguments
"""
def __init__(
self,
lambda_function_name=None,
aws_region=None,
capture_response=False,
**kwargs
):
self.lambda_function_name = lambda_function_name
self.aws_region = aws_region
if kwargs.get("boto_session"):
self.client = kwargs.get("boto_session").client("sns")
else: # pragma: no cover
self.client = SNS_CLIENT
if kwargs.get("arn"):
self.arn = kwargs.get("arn")
else:
if kwargs.get("boto_session"):
sts_client = kwargs.get("boto_session").client("sts")
else:
sts_client = STS_CLIENT
AWS_ACCOUNT_ID = sts_client.get_caller_identity()["Account"]
self.arn = "arn:aws:sns:{region}:{account}:{topic_name}".format(
region=self.aws_region,
account=AWS_ACCOUNT_ID,
topic_name=get_topic_name(self.lambda_function_name),
)
# Issue: https://github.com/Miserlou/Zappa/issues/1209
# TODO: Refactor
self.capture_response = capture_response
if capture_response:
if ASYNC_RESPONSE_TABLE is None:
print(
"Warning! Attempted to capture a response without "
"async_response_table configured in settings (you won't "
"capture async responses)."
)
capture_response = False
self.response_id = "MISCONFIGURED"
else:
self.response_id = str(uuid.uuid4())
else:
self.response_id = None
self.capture_response = capture_response
def _send(self, message):
"""
Given a message, publish to this topic.
"""
message["command"] = "zappa.asynchronous.route_sns_task"
payload = json.dumps(message)
if len(payload) > LAMBDA_ASYNC_PAYLOAD_LIMIT: # pragma: no cover
raise AsyncException("Payload too large for SNS")
self.response = self.client.publish(TargetArn=self.arn, Message=payload)
self.sent = self.response.get("MessageId")
##
# Aync Routers
##
ASYNC_CLASSES = {
"lambda": LambdaAsyncResponse,
"sns": SnsAsyncResponse,
}
def route_lambda_task(event, context):
"""
Deserialises the message from event passed to zappa.handler.run_function
imports the function, calls the function with args
"""
message = event
return run_message(message)
def route_sns_task(event, context):
"""
Gets SNS Message, deserialises the message,
imports the function, calls the function with args
"""
record = event["Records"][0]
message = json.loads(record["Sns"]["Message"])
return run_message(message)
def run_message(message):
"""
Runs a function defined by a message object with keys:
'task_path', 'args', and 'kwargs' used by lambda routing
and a 'command' in handler.py
"""
if message.get("capture_response", False):
DYNAMODB_CLIENT.put_item(
TableName=ASYNC_RESPONSE_TABLE,
Item={
"id": {"S": str(message["response_id"])},
"ttl": {"N": str(int(time.time() + 600))},
"async_status": {"S": "in progress"},
"async_response": {"S": str(json.dumps("N/A"))},
},
)
func = import_and_get_task(message["task_path"])
if hasattr(func, "sync"):
response = func.sync(*message["args"], **message["kwargs"])
else:
response = func(*message["args"], **message["kwargs"])
if message.get("capture_response", False):
DYNAMODB_CLIENT.update_item(
TableName=ASYNC_RESPONSE_TABLE,
Key={"id": {"S": str(message["response_id"])}},
UpdateExpression="SET async_response = :r, async_status = :s",
ExpressionAttributeValues={
":r": {"S": str(json.dumps(response))},
":s": {"S": "complete"},
},
)
return response
##
# Execution interfaces and classes
##
def run(
func,
args=[],
kwargs={},
service="lambda",
capture_response=False,
remote_aws_lambda_function_name=None,
remote_aws_region=None,
**task_kwargs
):
"""
Instead of decorating a function with @task, you can just run it directly.
If you were going to do func(*args, **kwargs), then you will call this:
import zappa.asynchronous.run
zappa.asynchronous.run(func, args, kwargs)
If you want to use SNS, then do:
zappa.asynchronous.run(func, args, kwargs, service='sns')
and other arguments are similar to @task
"""
lambda_function_name = remote_aws_lambda_function_name or os.environ.get(
"AWS_LAMBDA_FUNCTION_NAME"
)
aws_region = remote_aws_region or os.environ.get("AWS_REGION")
task_path = get_func_task_path(func)
return ASYNC_CLASSES[service](
lambda_function_name=lambda_function_name,
aws_region=aws_region,
capture_response=capture_response,
**task_kwargs
).send(task_path, args, kwargs)
# Handy:
# http://stackoverflow.com/questions/10294014/python-decorator-best-practice-using-a-class-vs-a-function
# However, this needs to pass inspect.getargspec() in handler.py which does not take classes
# Wrapper written to take optional arguments
# http://chase-seibert.github.io/blog/2013/12/17/python-decorator-optional-parameter.html
def task(*args, **kwargs):
"""Async task decorator so that running
Args:
func (function): the function to be wrapped
Further requirements:
func must be an independent top-level function.
i.e. not a class method or an anonymous function
service (str): either 'lambda' or 'sns'
remote_aws_lambda_function_name (str): the name of a remote lambda function to call with this task
remote_aws_region (str): the name of a remote region to make lambda/sns calls against
Returns:
A replacement function that dispatches func() to
run asynchronously through the service in question
"""
func = None
if len(args) == 1 and callable(args[0]):
func = args[0]
if not kwargs: # Default Values
service = "lambda"
lambda_function_name_arg = None
aws_region_arg = None
else: # Arguments were passed
service = kwargs.get("service", "lambda")
lambda_function_name_arg = kwargs.get("remote_aws_lambda_function_name")
aws_region_arg = kwargs.get("remote_aws_region")
capture_response = kwargs.get("capture_response", False)
def func_wrapper(func):
task_path = get_func_task_path(func)
@wraps(func)
def _run_async(*args, **kwargs):
"""
This is the wrapping async function that replaces the function
that is decorated with @task.
Args:
These are just passed through to @task's func
Assuming a valid service is passed to task() and it is run
inside a Lambda process (i.e. AWS_LAMBDA_FUNCTION_NAME exists),
it dispatches the function to be run through the service variable.
Otherwise, it runs the task synchronously.
Returns:
In async mode, the object returned includes state of the dispatch.
For instance
When outside of Lambda, the func passed to @task is run and we
return the actual value.
"""
lambda_function_name = lambda_function_name_arg or os.environ.get(
"AWS_LAMBDA_FUNCTION_NAME"
)
aws_region = aws_region_arg or os.environ.get("AWS_REGION")
if (service in ASYNC_CLASSES) and (lambda_function_name):
send_result = ASYNC_CLASSES[service](
lambda_function_name=lambda_function_name,
aws_region=aws_region,
capture_response=capture_response,
).send(task_path, args, kwargs)
return send_result
else:
return func(*args, **kwargs)
update_wrapper(_run_async, func)
_run_async.service = service
_run_async.sync = func
return _run_async
return func_wrapper(func) if func else func_wrapper
def task_sns(func):
"""
SNS-based task dispatcher. Functions the same way as task()
"""
return task(func, service="sns")
##
# Utility Functions
##
def import_and_get_task(task_path):
"""
Given a modular path to a function, import that module
and return the function.
"""
module, function = task_path.rsplit(".", 1)
app_module = importlib.import_module(module)
app_function = getattr(app_module, function)
return app_function
def get_func_task_path(func):
"""
Format the modular task path for a function via inspection.
"""
module_path = inspect.getmodule(func).__name__
task_path = "{module_path}.{func_name}".format(
module_path=module_path, func_name=func.__name__
)
return task_path
def get_async_response(response_id):
"""
Get the response from the async table
"""
response = DYNAMODB_CLIENT.get_item(
TableName=ASYNC_RESPONSE_TABLE, Key={"id": {"S": str(response_id)}}
)
if "Item" not in response:
return None
return {
"status": response["Item"]["async_status"]["S"],
"response": json.loads(response["Item"]["async_response"]["S"]),
} | zappa-teamturing | /zappa-teamturing-0.54.12.tar.gz/zappa-teamturing-0.54.12/zappa/asynchronous.py | asynchronous.py |
import getpass
import glob
import hashlib
import json
import logging
import os
import random
import re
import shutil
import string
import subprocess
import tarfile
import tempfile
import time
import uuid
import zipfile
from builtins import bytes, int
from distutils.dir_util import copy_tree
from io import open
import boto3
import botocore
import requests
import troposphere
import troposphere.apigateway
from botocore.exceptions import ClientError
from setuptools import find_packages
from tqdm import tqdm
from .utilities import (
add_event_source,
conflicts_with_a_neighbouring_module,
contains_python_files_or_subdirs,
copytree,
get_topic_name,
get_venv_from_python_version,
human_size,
remove_event_source,
)
##
# Logging Config
##
logging.basicConfig(format="%(levelname)s:%(message)s")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
##
# Policies And Template Mappings
##
ASSUME_POLICY = """{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": [
"apigateway.amazonaws.com",
"lambda.amazonaws.com",
"events.amazonaws.com"
]
},
"Action": "sts:AssumeRole"
}
]
}"""
ATTACH_POLICY = """{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:*"
],
"Resource": "arn:aws:logs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"lambda:InvokeFunction"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"xray:PutTraceSegments",
"xray:PutTelemetryRecords"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"ec2:AttachNetworkInterface",
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:DescribeInstances",
"ec2:DescribeNetworkInterfaces",
"ec2:DetachNetworkInterface",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:ResetNetworkInterfaceAttribute"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": "arn:aws:s3:::*"
},
{
"Effect": "Allow",
"Action": [
"kinesis:*"
],
"Resource": "arn:aws:kinesis:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"sns:*"
],
"Resource": "arn:aws:sns:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"sqs:*"
],
"Resource": "arn:aws:sqs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"dynamodb:*"
],
"Resource": "arn:aws:dynamodb:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"route53:*"
],
"Resource": "*"
}
]
}"""
# Latest list: https://docs.aws.amazon.com/general/latest/gr/rande.html#apigateway_region
API_GATEWAY_REGIONS = [
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
"eu-central-1",
"eu-north-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"eu-north-1",
"ap-northeast-1",
"ap-northeast-2",
"ap-northeast-3",
"ap-southeast-1",
"ap-southeast-2",
"ap-east-1",
"ap-south-1",
"ca-central-1",
"cn-north-1",
"cn-northwest-1",
"sa-east-1",
"us-gov-east-1",
"us-gov-west-1",
]
# Latest list: https://docs.aws.amazon.com/general/latest/gr/rande.html#lambda_region
LAMBDA_REGIONS = [
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
"eu-central-1",
"eu-north-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"eu-north-1",
"ap-northeast-1",
"ap-northeast-2",
"ap-northeast-3",
"ap-southeast-1",
"ap-southeast-2",
"ap-east-1",
"ap-south-1",
"ca-central-1",
"cn-north-1",
"cn-northwest-1",
"sa-east-1",
"us-gov-east-1",
"us-gov-west-1",
]
# We never need to include these.
# Related: https://github.com/Miserlou/Zappa/pull/56
# Related: https://github.com/Miserlou/Zappa/pull/581
ZIP_EXCLUDES = [
"*.exe",
"*.DS_Store",
"*.Python",
"*.git",
".git/*",
"*.zip",
"*.tar.gz",
"*.hg",
"pip",
"docutils*",
"setuputils*",
"__pycache__/*",
]
# When using ALB as an event source for Lambdas, we need to create an alias
# to ensure that, on zappa update, the ALB doesn't lose permissions to access
# the Lambda.
# See: https://github.com/Miserlou/Zappa/pull/1730
ALB_LAMBDA_ALIAS = "current-alb-version"
##
# Classes
##
class Zappa:
"""
Zappa!
Makes it easy to run Python web applications on AWS Lambda/API Gateway.
"""
##
# Configurables
##
http_methods = ["ANY"]
role_name = "ZappaLambdaExecution"
extra_permissions = None
assume_policy = ASSUME_POLICY
attach_policy = ATTACH_POLICY
apigateway_policy = None
cloudwatch_log_levels = ["OFF", "ERROR", "INFO"]
xray_tracing = False
##
# Credentials
##
boto_session = None
credentials_arn = None
def __init__(
self,
boto_session=None,
profile_name=None,
aws_region=None,
load_credentials=True,
desired_role_name=None,
desired_role_arn=None,
runtime="python3.6", # Detected at runtime in CLI
tags=(),
endpoint_urls={},
xray_tracing=False,
):
"""
Instantiate this new Zappa instance, loading any custom credentials if necessary.
"""
# Set aws_region to None to use the system's region instead
if aws_region is None:
# https://github.com/Miserlou/Zappa/issues/413
self.aws_region = boto3.Session().region_name
logger.debug("Set region from boto: %s", self.aws_region)
else:
self.aws_region = aws_region
if desired_role_name:
self.role_name = desired_role_name
if desired_role_arn:
self.credentials_arn = desired_role_arn
self.runtime = runtime
if self.runtime == "python3.6":
self.manylinux_suffix_start = "cp36m"
elif self.runtime == "python3.7":
self.manylinux_suffix_start = "cp37m"
elif self.runtime == "python3.8":
# The 'm' has been dropped in python 3.8+ since builds with and without pymalloc are ABI compatible
# See https://github.com/pypa/manylinux for a more detailed explanation
self.manylinux_suffix_start = "cp38"
else:
self.manylinux_suffix_start = "cp39"
# AWS Lambda supports manylinux1/2010, manylinux2014, and manylinux_2_24
manylinux_suffixes = ("_2_24", "2014", "2010", "1")
self.manylinux_wheel_file_match = re.compile(
f'^.*{self.manylinux_suffix_start}-(manylinux_\d+_\d+_x86_64[.])?manylinux({"|".join(manylinux_suffixes)})_x86_64[.]whl$'
)
self.manylinux_wheel_abi3_file_match = re.compile(
f'^.*cp3.-abi3-manylinux({"|".join(manylinux_suffixes)})_x86_64.whl$'
)
self.endpoint_urls = endpoint_urls
self.xray_tracing = xray_tracing
# Some common invocations, such as DB migrations,
# can take longer than the default.
# Config used for direct invocations of Lambda functions from the Zappa CLI.
# Note that the maximum configurable Lambda function execution time (15 minutes)
# is longer than the maximum timeout configurable in API Gateway (30 seconds).
# Related: https://github.com/Miserlou/Zappa/issues/205
long_config_dict = {
"region_name": aws_region,
"connect_timeout": 5,
"read_timeout": 900,
}
long_config = botocore.client.Config(**long_config_dict)
if load_credentials:
self.load_credentials(boto_session, profile_name)
# Initialize clients
self.s3_client = self.boto_client("s3")
self.lambda_client = self.boto_client("lambda", config=long_config)
self.elbv2_client = self.boto_client("elbv2")
self.events_client = self.boto_client("events")
self.apigateway_client = self.boto_client("apigateway")
# AWS ACM certificates need to be created from us-east-1 to be used by API gateway
east_config = botocore.client.Config(region_name="us-east-1")
self.acm_client = self.boto_client("acm", config=east_config)
self.logs_client = self.boto_client("logs")
self.iam_client = self.boto_client("iam")
self.iam = self.boto_resource("iam")
self.cloudwatch = self.boto_client("cloudwatch")
self.route53 = self.boto_client("route53")
self.sns_client = self.boto_client("sns")
self.cf_client = self.boto_client("cloudformation")
self.dynamodb_client = self.boto_client("dynamodb")
self.cognito_client = self.boto_client("cognito-idp")
self.sts_client = self.boto_client("sts")
self.tags = tags
self.cf_template = troposphere.Template()
self.cf_api_resources = []
self.cf_parameters = {}
def configure_boto_session_method_kwargs(self, service, kw):
"""Allow for custom endpoint urls for non-AWS (testing and bootleg cloud) deployments"""
if service in self.endpoint_urls and not "endpoint_url" in kw:
kw["endpoint_url"] = self.endpoint_urls[service]
return kw
def boto_client(self, service, *args, **kwargs):
"""A wrapper to apply configuration options to boto clients"""
return self.boto_session.client(
service, *args, **self.configure_boto_session_method_kwargs(service, kwargs)
)
def boto_resource(self, service, *args, **kwargs):
"""A wrapper to apply configuration options to boto resources"""
return self.boto_session.resource(
service, *args, **self.configure_boto_session_method_kwargs(service, kwargs)
)
def cache_param(self, value):
"""Returns a troposphere Ref to a value cached as a parameter."""
if value not in self.cf_parameters:
keyname = chr(ord("A") + len(self.cf_parameters))
param = self.cf_template.add_parameter(
troposphere.Parameter(
keyname, Type="String", Default=value, tags=self.tags
)
)
self.cf_parameters[value] = param
return troposphere.Ref(self.cf_parameters[value])
##
# Packaging
##
def copy_editable_packages(self, egg_links, temp_package_path):
""" """
for egg_link in egg_links:
with open(egg_link, "rb") as df:
egg_path = df.read().decode("utf-8").splitlines()[0].strip()
pkgs = set(
[
x.split(".")[0]
for x in find_packages(egg_path, exclude=["test", "tests"])
]
)
for pkg in pkgs:
copytree(
os.path.join(egg_path, pkg),
os.path.join(temp_package_path, pkg),
metadata=False,
symlinks=False,
)
if temp_package_path:
# now remove any egg-links as they will cause issues if they still exist
for link in glob.glob(os.path.join(temp_package_path, "*.egg-link")):
os.remove(link)
def get_deps_list(self, pkg_name, installed_distros=None):
"""
For a given package, returns a list of required packages. Recursive.
"""
# https://github.com/Miserlou/Zappa/issues/1478. Using `pkg_resources`
# instead of `pip` is the recommended approach. The usage is nearly
# identical.
import pkg_resources
deps = []
if not installed_distros:
installed_distros = pkg_resources.WorkingSet()
for package in installed_distros:
if package.project_name.lower() == pkg_name.lower():
deps = [(package.project_name, package.version)]
for req in package.requires():
deps += self.get_deps_list(
pkg_name=req.project_name, installed_distros=installed_distros
)
return list(set(deps)) # de-dupe before returning
def create_handler_venv(self):
"""
Takes the installed zappa and brings it into a fresh virtualenv-like folder. All dependencies are then downloaded.
"""
import subprocess
# We will need the currenv venv to pull Zappa from
current_venv = self.get_current_venv()
# Make a new folder for the handler packages
ve_path = os.path.join(os.getcwd(), "handler_venv")
if os.sys.platform == "win32":
current_site_packages_dir = os.path.join(
current_venv, "Lib", "site-packages"
)
venv_site_packages_dir = os.path.join(ve_path, "Lib", "site-packages")
else:
current_site_packages_dir = os.path.join(
current_venv, "lib", get_venv_from_python_version(), "site-packages"
)
venv_site_packages_dir = os.path.join(
ve_path, "lib", get_venv_from_python_version(), "site-packages"
)
if not os.path.isdir(venv_site_packages_dir):
os.makedirs(venv_site_packages_dir)
# Copy zappa* to the new virtualenv
zappa_things = [
z for z in os.listdir(current_site_packages_dir) if z.lower()[:5] == "zappa"
]
for z in zappa_things:
copytree(
os.path.join(current_site_packages_dir, z),
os.path.join(venv_site_packages_dir, z),
)
# Use pip to download zappa's dependencies. Copying from current venv causes issues with things like PyYAML that installs as yaml
zappa_deps = self.get_deps_list("zappa")
pkg_list = ["{0!s}=={1!s}".format(dep, version) for dep, version in zappa_deps]
# Need to manually add setuptools
pkg_list.append("setuptools")
command = [
"pip",
"install",
"--quiet",
"--target",
venv_site_packages_dir,
] + pkg_list
# This is the recommended method for installing packages if you don't
# to depend on `setuptools`
# https://github.com/pypa/pip/issues/5240#issuecomment-381662679
pip_process = subprocess.Popen(command, stdout=subprocess.PIPE)
# Using communicate() to avoid deadlocks
pip_process.communicate()
pip_return_code = pip_process.returncode
if pip_return_code:
raise EnvironmentError("Pypi lookup failed")
return ve_path
# staticmethod as per https://github.com/Miserlou/Zappa/issues/780
@staticmethod
def get_current_venv():
"""
Returns the path to the current virtualenv
"""
if "VIRTUAL_ENV" in os.environ:
venv = os.environ["VIRTUAL_ENV"]
elif os.path.exists(".python-version"): # pragma: no cover
try:
subprocess.check_output(["pyenv", "help"], stderr=subprocess.STDOUT)
except OSError:
print(
"This directory seems to have pyenv's local venv, "
"but pyenv executable was not found."
)
with open(".python-version", "r") as f:
# minor fix in how .python-version is read
# Related: https://github.com/Miserlou/Zappa/issues/921
env_name = f.readline().strip()
bin_path = subprocess.check_output(["pyenv", "which", "python"]).decode(
"utf-8"
)
venv = bin_path[: bin_path.rfind(env_name)] + env_name
else: # pragma: no cover
return None
return venv
def create_lambda_zip(
self,
prefix="lambda_package",
handler_file=None,
slim_handler=False,
minify=True,
exclude=None,
exclude_glob=None,
use_precompiled_packages=True,
include=None,
venv=None,
output=None,
disable_progress=False,
archive_format="zip",
):
"""
Create a Lambda-ready zip file of the current virtualenvironment and working directory.
Returns path to that file.
"""
# Validate archive_format
if archive_format not in ["zip", "tarball"]:
raise KeyError(
"The archive format to create a lambda package must be zip or tarball"
)
# Pip is a weird package.
# Calling this function in some environments without this can cause.. funkiness.
import pip
if not venv:
venv = self.get_current_venv()
build_time = str(int(time.time()))
cwd = os.getcwd()
if not output:
if archive_format == "zip":
archive_fname = prefix + "-" + build_time + ".zip"
elif archive_format == "tarball":
archive_fname = prefix + "-" + build_time + ".tar.gz"
else:
archive_fname = output
archive_path = os.path.join(cwd, archive_fname)
# Files that should be excluded from the zip
if exclude is None:
exclude = list()
if exclude_glob is None:
exclude_glob = list()
# Exclude the zip itself
exclude.append(archive_path)
# Make sure that 'concurrent' is always forbidden.
# https://github.com/Miserlou/Zappa/issues/827
if not "concurrent" in exclude:
exclude.append("concurrent")
def splitpath(path):
parts = []
(path, tail) = os.path.split(path)
while path and tail:
parts.append(tail)
(path, tail) = os.path.split(path)
parts.append(os.path.join(path, tail))
return list(map(os.path.normpath, parts))[::-1]
split_venv = splitpath(venv)
split_cwd = splitpath(cwd)
# Ideally this should be avoided automatically,
# but this serves as an okay stop-gap measure.
if split_venv[-1] == split_cwd[-1]: # pragma: no cover
print(
"Warning! Your project and virtualenv have the same name! You may want "
"to re-create your venv with a new name, or explicitly define a "
"'project_name', as this may cause errors."
)
# First, do the project..
temp_project_path = tempfile.mkdtemp(prefix="zappa-project")
if not slim_handler:
# Slim handler does not take the project files.
if minify:
# Related: https://github.com/Miserlou/Zappa/issues/744
excludes = ZIP_EXCLUDES + exclude + [split_venv[-1]]
copytree(
cwd,
temp_project_path,
metadata=False,
symlinks=False,
ignore=shutil.ignore_patterns(*excludes),
)
else:
copytree(cwd, temp_project_path, metadata=False, symlinks=False)
for glob_path in exclude_glob:
for path in glob.glob(os.path.join(temp_project_path, glob_path)):
try:
os.remove(path)
except OSError: # is a directory
shutil.rmtree(path)
# If a handler_file is supplied, copy that to the root of the package,
# because that's where AWS Lambda looks for it. It can't be inside a package.
if handler_file:
filename = handler_file.split(os.sep)[-1]
shutil.copy(handler_file, os.path.join(temp_project_path, filename))
# Create and populate package ID file and write to temp project path
package_info = {}
package_info["uuid"] = str(uuid.uuid4())
package_info["build_time"] = build_time
package_info["build_platform"] = os.sys.platform
package_info["build_user"] = getpass.getuser()
# TODO: Add git head and info?
# Ex, from @scoates:
# def _get_git_branch():
# chdir(DIR)
# out = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
# lambci_branch = environ.get('LAMBCI_BRANCH', None)
# if out == "HEAD" and lambci_branch:
# out += " lambci:{}".format(lambci_branch)
# return out
# def _get_git_hash():
# chdir(DIR)
# return check_output(['git', 'rev-parse', 'HEAD']).strip()
# def _get_uname():
# return check_output(['uname', '-a']).strip()
# def _get_user():
# return check_output(['whoami']).strip()
# def set_id_info(zappa_cli):
# build_info = {
# 'branch': _get_git_branch(),
# 'hash': _get_git_hash(),
# 'build_uname': _get_uname(),
# 'build_user': _get_user(),
# 'build_time': datetime.datetime.utcnow().isoformat(),
# }
# with open(path.join(DIR, 'id_info.json'), 'w') as f:
# json.dump(build_info, f)
# return True
package_id_file = open(
os.path.join(temp_project_path, "package_info.json"), "w"
)
dumped = json.dumps(package_info, indent=4)
try:
package_id_file.write(dumped)
except TypeError: # This is a Python 2/3 issue. TODO: Make pretty!
package_id_file.write(str(dumped))
package_id_file.close()
# Then, do site site-packages..
egg_links = []
temp_package_path = tempfile.mkdtemp(prefix="zappa-packages")
if os.sys.platform == "win32":
site_packages = os.path.join(venv, "Lib", "site-packages")
else:
site_packages = os.path.join(
venv, "lib", get_venv_from_python_version(), "site-packages"
)
egg_links.extend(glob.glob(os.path.join(site_packages, "*.egg-link")))
if minify:
excludes = ZIP_EXCLUDES + exclude
copytree(
site_packages,
temp_package_path,
metadata=False,
symlinks=False,
ignore=shutil.ignore_patterns(*excludes),
)
else:
copytree(site_packages, temp_package_path, metadata=False, symlinks=False)
# We may have 64-bin specific packages too.
site_packages_64 = os.path.join(
venv, "lib64", get_venv_from_python_version(), "site-packages"
)
if os.path.exists(site_packages_64):
egg_links.extend(glob.glob(os.path.join(site_packages_64, "*.egg-link")))
if minify:
excludes = ZIP_EXCLUDES + exclude
copytree(
site_packages_64,
temp_package_path,
metadata=False,
symlinks=False,
ignore=shutil.ignore_patterns(*excludes),
)
else:
copytree(
site_packages_64, temp_package_path, metadata=False, symlinks=False
)
if egg_links:
self.copy_editable_packages(egg_links, temp_package_path)
copy_tree(temp_package_path, temp_project_path, update=True)
# Then the pre-compiled packages..
if use_precompiled_packages:
print("Downloading and installing dependencies..")
installed_packages = self.get_installed_packages(
site_packages, site_packages_64
)
try:
for (
installed_package_name,
installed_package_version,
) in installed_packages.items():
cached_wheel_path = self.get_cached_manylinux_wheel(
installed_package_name,
installed_package_version,
disable_progress,
)
if cached_wheel_path:
# Otherwise try to use manylinux packages from PyPi..
# Related: https://github.com/Miserlou/Zappa/issues/398
shutil.rmtree(
os.path.join(temp_project_path, installed_package_name),
ignore_errors=True,
)
with zipfile.ZipFile(cached_wheel_path) as zfile:
zfile.extractall(temp_project_path)
except Exception as e:
print(e)
# XXX - What should we do here?
# Cleanup
for glob_path in exclude_glob:
for path in glob.glob(os.path.join(temp_project_path, glob_path)):
try:
os.remove(path)
except OSError: # is a directory
shutil.rmtree(path)
# Then archive it all up..
if archive_format == "zip":
print("Packaging project as zip.")
try:
compression_method = zipfile.ZIP_DEFLATED
except ImportError: # pragma: no cover
compression_method = zipfile.ZIP_STORED
archivef = zipfile.ZipFile(archive_path, "w", compression_method)
elif archive_format == "tarball":
print("Packaging project as gzipped tarball.")
archivef = tarfile.open(archive_path, "w|gz")
for root, dirs, files in os.walk(temp_project_path):
for filename in files:
# Skip .pyc files for Django migrations
# https://github.com/Miserlou/Zappa/issues/436
# https://github.com/Miserlou/Zappa/issues/464
if filename[-4:] == ".pyc" and root[-10:] == "migrations":
continue
# If there is a .pyc file in this package,
# we can skip the python source code as we'll just
# use the compiled bytecode anyway..
if filename[-3:] == ".py" and root[-10:] != "migrations":
abs_filname = os.path.join(root, filename)
abs_pyc_filename = abs_filname + "c"
if os.path.isfile(abs_pyc_filename):
# but only if the pyc is older than the py,
# otherwise we'll deploy outdated code!
py_time = os.stat(abs_filname).st_mtime
pyc_time = os.stat(abs_pyc_filename).st_mtime
if pyc_time > py_time:
continue
# Make sure that the files are all correctly chmodded
# Related: https://github.com/Miserlou/Zappa/issues/484
# Related: https://github.com/Miserlou/Zappa/issues/682
os.chmod(os.path.join(root, filename), 0o755)
if archive_format == "zip":
# Actually put the file into the proper place in the zip
# Related: https://github.com/Miserlou/Zappa/pull/716
zipi = zipfile.ZipInfo(
os.path.join(
root.replace(temp_project_path, "").lstrip(os.sep), filename
)
)
zipi.create_system = 3
zipi.external_attr = 0o755 << int(16) # Is this P2/P3 functional?
with open(os.path.join(root, filename), "rb") as f:
archivef.writestr(zipi, f.read(), compression_method)
elif archive_format == "tarball":
tarinfo = tarfile.TarInfo(
os.path.join(
root.replace(temp_project_path, "").lstrip(os.sep), filename
)
)
tarinfo.mode = 0o755
stat = os.stat(os.path.join(root, filename))
tarinfo.mtime = stat.st_mtime
tarinfo.size = stat.st_size
with open(os.path.join(root, filename), "rb") as f:
archivef.addfile(tarinfo, f)
# Create python init file if it does not exist
# Only do that if there are sub folders or python files and does not conflict with a neighbouring module
# Related: https://github.com/Miserlou/Zappa/issues/766
if not contains_python_files_or_subdirs(root):
# if the directory does not contain any .py file at any level, we can skip the rest
dirs[:] = [d for d in dirs if d != root]
else:
if (
"__init__.py" not in files
and not conflicts_with_a_neighbouring_module(root)
):
tmp_init = os.path.join(temp_project_path, "__init__.py")
open(tmp_init, "a").close()
os.chmod(tmp_init, 0o755)
arcname = os.path.join(
root.replace(temp_project_path, ""),
os.path.join(
root.replace(temp_project_path, ""), "__init__.py"
),
)
if archive_format == "zip":
archivef.write(tmp_init, arcname)
elif archive_format == "tarball":
archivef.add(tmp_init, arcname)
# And, we're done!
archivef.close()
# Trash the temp directory
shutil.rmtree(temp_project_path)
shutil.rmtree(temp_package_path)
if os.path.isdir(venv) and slim_handler:
# Remove the temporary handler venv folder
shutil.rmtree(venv)
return archive_fname
@staticmethod
def get_installed_packages(site_packages, site_packages_64):
"""
Returns a dict of installed packages that Zappa cares about.
"""
import pkg_resources
package_to_keep = []
if os.path.isdir(site_packages):
package_to_keep += os.listdir(site_packages)
if os.path.isdir(site_packages_64):
package_to_keep += os.listdir(site_packages_64)
package_to_keep = [x.lower() for x in package_to_keep]
installed_packages = {
package.project_name.lower(): package.version
for package in pkg_resources.WorkingSet()
if package.project_name.lower() in package_to_keep
or package.location.lower()
in [site_packages.lower(), site_packages_64.lower()]
}
return installed_packages
@staticmethod
def download_url_with_progress(url, stream, disable_progress):
"""
Downloads a given url in chunks and writes to the provided stream (can be any io stream).
Displays the progress bar for the download.
"""
resp = requests.get(
url, timeout=float(os.environ.get("PIP_TIMEOUT", 2)), stream=True
)
resp.raw.decode_content = True
progress = tqdm(
unit="B",
unit_scale=True,
total=int(resp.headers.get("Content-Length", 0)),
disable=disable_progress,
)
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
progress.update(len(chunk))
stream.write(chunk)
progress.close()
def get_cached_manylinux_wheel(
self, package_name, package_version, disable_progress=False
):
"""
Gets the locally stored version of a manylinux wheel. If one does not exist, the function downloads it.
"""
cached_wheels_dir = os.path.join(tempfile.gettempdir(), "cached_wheels")
if not os.path.isdir(cached_wheels_dir):
os.makedirs(cached_wheels_dir)
else:
# Check if we already have a cached copy
wheel_name = re.sub("[^\w\d.]+", "_", package_name, re.UNICODE)
wheel_file = f"{wheel_name}-{package_version}-*_x86_64.whl"
wheel_path = os.path.join(cached_wheels_dir, wheel_file)
for pathname in glob.iglob(wheel_path):
if re.match(self.manylinux_wheel_file_match, pathname) or re.match(
self.manylinux_wheel_abi3_file_match, pathname
):
print(
f" - {package_name}=={package_version}: Using locally cached manylinux wheel"
)
return pathname
# The file is not cached, download it.
wheel_url, filename = self.get_manylinux_wheel_url(
package_name, package_version
)
if not wheel_url:
return None
wheel_path = os.path.join(cached_wheels_dir, filename)
print(f" - {package_name}=={package_version}: Downloading")
with open(wheel_path, "wb") as f:
self.download_url_with_progress(wheel_url, f, disable_progress)
if not zipfile.is_zipfile(wheel_path):
return None
return wheel_path
def get_manylinux_wheel_url(self, package_name, package_version):
"""
For a given package name, returns a link to the download URL,
else returns None.
Related: https://github.com/Miserlou/Zappa/issues/398
Examples here: https://gist.github.com/perrygeo/9545f94eaddec18a65fd7b56880adbae
This function downloads metadata JSON of `package_name` from Pypi
and examines if the package has a manylinux wheel. This function
also caches the JSON file so that we don't have to poll Pypi
every time.
"""
cached_pypi_info_dir = os.path.join(tempfile.gettempdir(), "cached_pypi_info")
if not os.path.isdir(cached_pypi_info_dir):
os.makedirs(cached_pypi_info_dir)
# Even though the metadata is for the package, we save it in a
# filename that includes the package's version. This helps in
# invalidating the cached file if the user moves to a different
# version of the package.
# Related: https://github.com/Miserlou/Zappa/issues/899
json_file = "{0!s}-{1!s}.json".format(package_name, package_version)
json_file_path = os.path.join(cached_pypi_info_dir, json_file)
if os.path.exists(json_file_path):
with open(json_file_path, "rb") as metafile:
data = json.load(metafile)
else:
url = "https://pypi.python.org/pypi/{}/json".format(package_name)
try:
res = requests.get(
url, timeout=float(os.environ.get("PIP_TIMEOUT", 1.5))
)
data = res.json()
except Exception as e: # pragma: no cover
return None, None
with open(json_file_path, "wb") as metafile:
jsondata = json.dumps(data)
metafile.write(bytes(jsondata, "utf-8"))
if package_version not in data["releases"]:
return None, None
for f in data["releases"][package_version]:
if re.match(self.manylinux_wheel_file_match, f["filename"]):
return f["url"], f["filename"]
elif re.match(self.manylinux_wheel_abi3_file_match, f["filename"]):
return f["url"], f["filename"]
return None, None
##
# S3
##
def upload_to_s3(self, source_path, bucket_name, disable_progress=False):
r"""
Given a file, upload it to S3.
Credentials should be stored in environment variables or ~/.aws/credentials (%USERPROFILE%\.aws\credentials on Windows).
Returns True on success, false on failure.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError:
# This is really stupid S3 quirk. Technically, us-east-1 one has no S3,
# it's actually "US Standard", or something.
# More here: https://github.com/boto/boto3/issues/125
if self.aws_region == "us-east-1":
self.s3_client.create_bucket(
Bucket=bucket_name,
)
else:
self.s3_client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={"LocationConstraint": self.aws_region},
)
if self.tags:
tags = {
"TagSet": [
{"Key": key, "Value": self.tags[key]}
for key in self.tags.keys()
]
}
self.s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging=tags)
if not os.path.isfile(source_path) or os.stat(source_path).st_size == 0:
print("Problem with source file {}".format(source_path))
return False
dest_path = os.path.split(source_path)[1]
try:
source_size = os.stat(source_path).st_size
print("Uploading {0} ({1})..".format(dest_path, human_size(source_size)))
progress = tqdm(
total=float(os.path.getsize(source_path)),
unit_scale=True,
unit="B",
disable=disable_progress,
)
# Attempt to upload to S3 using the S3 meta client with the progress bar.
# If we're unable to do that, try one more time using a session client,
# which cannot use the progress bar.
# Related: https://github.com/boto/boto3/issues/611
try:
self.s3_client.upload_file(
source_path, bucket_name, dest_path, Callback=progress.update
)
except Exception as e: # pragma: no cover
self.s3_client.upload_file(source_path, bucket_name, dest_path)
progress.close()
except (KeyboardInterrupt, SystemExit): # pragma: no cover
raise
except Exception as e: # pragma: no cover
print(e)
return False
return True
def copy_on_s3(self, src_file_name, dst_file_name, bucket_name):
"""
Copies src file to destination within a bucket.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response["Error"]["Code"])
if error_code == 404:
return False
copy_src = {"Bucket": bucket_name, "Key": src_file_name}
try:
self.s3_client.copy(
CopySource=copy_src, Bucket=bucket_name, Key=dst_file_name
)
return True
except botocore.exceptions.ClientError: # pragma: no cover
return False
def remove_from_s3(self, file_name, bucket_name):
"""
Given a file name and a bucket, remove it from S3.
There's no reason to keep the file hosted on S3 once its been made into a Lambda function, so we can delete it from S3.
Returns True on success, False on failure.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response["Error"]["Code"])
if error_code == 404:
return False
try:
self.s3_client.delete_object(Bucket=bucket_name, Key=file_name)
return True
except (
botocore.exceptions.ParamValidationError,
botocore.exceptions.ClientError,
): # pragma: no cover
return False
##
# Lambda
##
def create_lambda_function(
self,
bucket=None,
function_name=None,
handler=None,
s3_key=None,
description="Zappa Deployment",
timeout=30,
memory_size=512,
publish=True,
vpc_config=None,
dead_letter_config=None,
runtime="python3.6",
aws_environment_variables=None,
aws_kms_key_arn=None,
xray_tracing=False,
local_zip=None,
use_alb=False,
layers=None,
concurrency=None,
docker_image_uri=None,
):
"""
Given a bucket and key (or a local path) of a valid Lambda-zip, a function name and a handler, register that Lambda function.
"""
if not vpc_config:
vpc_config = {}
if not dead_letter_config:
dead_letter_config = {}
if not self.credentials_arn:
self.get_credentials_arn()
if not aws_environment_variables:
aws_environment_variables = {}
if not aws_kms_key_arn:
aws_kms_key_arn = ""
if not layers:
layers = []
kwargs = dict(
FunctionName=function_name,
Role=self.credentials_arn,
Description=description,
Timeout=timeout,
MemorySize=memory_size,
Publish=publish,
VpcConfig=vpc_config,
DeadLetterConfig=dead_letter_config,
Environment={"Variables": aws_environment_variables},
KMSKeyArn=aws_kms_key_arn,
TracingConfig={"Mode": "Active" if self.xray_tracing else "PassThrough"},
Layers=layers,
)
if not docker_image_uri:
kwargs["Runtime"] = runtime
kwargs["Handler"] = handler
kwargs["PackageType"] = "Zip"
if docker_image_uri:
kwargs["Code"] = {"ImageUri": docker_image_uri}
# default is ZIP. override to Image for container support
kwargs["PackageType"] = "Image"
# The create function operation times out when this is '' (the default)
# So just remove it from the kwargs if it is not specified
if aws_kms_key_arn == "":
kwargs.pop("KMSKeyArn")
elif local_zip:
kwargs["Code"] = {"ZipFile": local_zip}
else:
kwargs["Code"] = {"S3Bucket": bucket, "S3Key": s3_key}
response = self.lambda_client.create_function(**kwargs)
resource_arn = response["FunctionArn"]
version = response["Version"]
# If we're using an ALB, let's create an alias mapped to the newly
# created function. This allows clean, no downtime association when
# using application load balancers as an event source.
# See: https://github.com/Miserlou/Zappa/pull/1730
# https://github.com/Miserlou/Zappa/issues/1823
if use_alb:
self.lambda_client.create_alias(
FunctionName=resource_arn,
FunctionVersion=version,
Name=ALB_LAMBDA_ALIAS,
)
if self.tags:
self.lambda_client.tag_resource(Resource=resource_arn, Tags=self.tags)
if concurrency is not None:
self.lambda_client.put_function_concurrency(
FunctionName=resource_arn,
ReservedConcurrentExecutions=concurrency,
)
# Wait for lambda to become active, otherwise many operations will fail
self.wait_until_lambda_function_is_active(function_name)
return resource_arn
def update_lambda_function(
self,
bucket,
function_name,
s3_key=None,
publish=True,
local_zip=None,
num_revisions=None,
concurrency=None,
docker_image_uri=None,
):
"""
Given a bucket and key (or a local path) of a valid Lambda-zip, a function name and a handler, update that Lambda function's code.
Optionally, delete previous versions if they exceed the optional limit.
"""
print("Updating Lambda function code..")
kwargs = dict(FunctionName=function_name, Publish=publish)
if docker_image_uri:
kwargs["ImageUri"] = docker_image_uri
elif local_zip:
kwargs["ZipFile"] = local_zip
else:
kwargs["S3Bucket"] = bucket
kwargs["S3Key"] = s3_key
response = self.lambda_client.update_function_code(**kwargs)
resource_arn = response["FunctionArn"]
version = response["Version"]
# If the lambda has an ALB alias, let's update the alias
# to point to the newest version of the function. We have to use a GET
# here, as there's no HEAD-esque call to retrieve metadata about a
# function alias.
# Related: https://github.com/Miserlou/Zappa/pull/1730
# https://github.com/Miserlou/Zappa/issues/1823
try:
response = self.lambda_client.get_alias(
FunctionName=function_name,
Name=ALB_LAMBDA_ALIAS,
)
alias_exists = True
except botocore.exceptions.ClientError as e: # pragma: no cover
if "ResourceNotFoundException" not in e.response["Error"]["Code"]:
raise e
alias_exists = False
if alias_exists:
self.lambda_client.update_alias(
FunctionName=function_name,
FunctionVersion=version,
Name=ALB_LAMBDA_ALIAS,
)
if concurrency is not None:
self.lambda_client.put_function_concurrency(
FunctionName=function_name,
ReservedConcurrentExecutions=concurrency,
)
else:
self.lambda_client.delete_function_concurrency(FunctionName=function_name)
if num_revisions:
# Find the existing revision IDs for the given function
# Related: https://github.com/Miserlou/Zappa/issues/1402
versions_in_lambda = []
versions = self.lambda_client.list_versions_by_function(
FunctionName=function_name
)
for version in versions["Versions"]:
versions_in_lambda.append(version["Version"])
while "NextMarker" in versions:
versions = self.lambda_client.list_versions_by_function(
FunctionName=function_name, Marker=versions["NextMarker"]
)
for version in versions["Versions"]:
versions_in_lambda.append(version["Version"])
versions_in_lambda.remove("$LATEST")
# Delete older revisions if their number exceeds the specified limit
for version in versions_in_lambda[::-1][num_revisions:]:
self.lambda_client.delete_function(
FunctionName=function_name, Qualifier=version
)
self.wait_until_lambda_function_is_updated(function_name)
return resource_arn
def update_lambda_configuration(
self,
lambda_arn,
function_name,
handler,
description="Zappa Deployment",
timeout=30,
memory_size=512,
publish=True,
vpc_config=None,
runtime="python3.6",
aws_environment_variables=None,
aws_kms_key_arn=None,
layers=None,
wait=True,
):
"""
Given an existing function ARN, update the configuration variables.
"""
print("Updating Lambda function configuration..")
if not vpc_config:
vpc_config = {}
if not self.credentials_arn:
self.get_credentials_arn()
if not aws_kms_key_arn:
aws_kms_key_arn = ""
if not aws_environment_variables:
aws_environment_variables = {}
if not layers:
layers = []
if wait:
# Wait until function is ready, otherwise expected keys will be missing from 'lambda_aws_config'.
self.wait_until_lambda_function_is_updated(function_name)
# Check if there are any remote aws lambda env vars so they don't get trashed.
# https://github.com/Miserlou/Zappa/issues/987, Related: https://github.com/Miserlou/Zappa/issues/765
lambda_aws_config = self.lambda_client.get_function_configuration(
FunctionName=function_name
)
if "Environment" in lambda_aws_config:
lambda_aws_environment_variables = lambda_aws_config["Environment"].get(
"Variables", {}
)
# Append keys that are remote but not in settings file
for key, value in lambda_aws_environment_variables.items():
if key not in aws_environment_variables:
aws_environment_variables[key] = value
kwargs = {
"FunctionName": function_name,
"Role": self.credentials_arn,
"Description": description,
"Timeout": timeout,
"MemorySize": memory_size,
"VpcConfig": vpc_config,
"Environment": {"Variables": aws_environment_variables},
"KMSKeyArn": aws_kms_key_arn,
"TracingConfig": {"Mode": "Active" if self.xray_tracing else "PassThrough"},
}
if lambda_aws_config["PackageType"] != "Image":
kwargs.update(
{
"Handler": handler,
"Runtime": runtime,
"Layers": layers,
}
)
response = self.lambda_client.update_function_configuration(**kwargs)
resource_arn = response["FunctionArn"]
if self.tags:
self.lambda_client.tag_resource(Resource=resource_arn, Tags=self.tags)
return resource_arn
def invoke_lambda_function(
self,
function_name,
payload,
invocation_type="Event",
log_type="Tail",
client_context=None,
qualifier=None,
):
"""
Directly invoke a named Lambda function with a payload.
Returns the response.
"""
return self.lambda_client.invoke(
FunctionName=function_name,
InvocationType=invocation_type,
LogType=log_type,
Payload=payload,
)
def rollback_lambda_function_version(
self, function_name, versions_back=1, publish=True
):
"""
Rollback the lambda function code 'versions_back' number of revisions.
Returns the Function ARN.
"""
response = self.lambda_client.list_versions_by_function(
FunctionName=function_name
)
# https://github.com/Miserlou/Zappa/pull/2192
if (
len(response.get("Versions", [])) > 1
and response["Versions"][-1]["PackageType"] == "Image"
):
raise NotImplementedError(
"Zappa's rollback functionality is not available for Docker based deployments"
)
# Take into account $LATEST
if len(response["Versions"]) < versions_back + 1:
print("We do not have {} revisions. Aborting".format(str(versions_back)))
return False
revisions = [
int(revision["Version"])
for revision in response["Versions"]
if revision["Version"] != "$LATEST"
]
revisions.sort(reverse=True)
response = self.lambda_client.get_function(
FunctionName="function:{}:{}".format(
function_name, revisions[versions_back]
)
)
response = requests.get(response["Code"]["Location"])
if response.status_code != 200:
print(
"Failed to get version {} of {} code".format(
versions_back, function_name
)
)
return False
response = self.lambda_client.update_function_code(
FunctionName=function_name, ZipFile=response.content, Publish=publish
) # pragma: no cover
return response["FunctionArn"]
def wait_until_lambda_function_is_active(self, function_name):
"""
Wait until lambda State=Active
"""
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#waiters
waiter = self.lambda_client.get_waiter("function_active")
print(f"Waiting for lambda function [{function_name}] to become active...")
waiter.wait(FunctionName=function_name)
def wait_until_lambda_function_is_updated(self, function_name):
"""
Wait until lambda LastUpdateStatus=Successful
"""
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#waiters
waiter = self.lambda_client.get_waiter("function_updated")
print(f"Waiting for lambda function [{function_name}] to be updated...")
waiter.wait(FunctionName=function_name)
def get_lambda_function(self, function_name):
"""
Returns the lambda function ARN, given a name
This requires the "lambda:GetFunction" role.
"""
response = self.lambda_client.get_function(FunctionName=function_name)
return response["Configuration"]["FunctionArn"]
def get_lambda_function_versions(self, function_name):
"""
Simply returns the versions available for a Lambda function, given a function name.
"""
try:
response = self.lambda_client.list_versions_by_function(
FunctionName=function_name
)
return response.get("Versions", [])
except Exception:
return []
def delete_lambda_function(self, function_name):
"""
Given a function name, delete it from AWS Lambda.
Returns the response.
"""
print("Deleting Lambda function..")
return self.lambda_client.delete_function(
FunctionName=function_name,
)
##
# Application load balancer
##
def deploy_lambda_alb(self, lambda_arn, lambda_name, alb_vpc_config, timeout):
"""
The `zappa deploy` functionality for ALB infrastructure.
"""
if not alb_vpc_config:
raise EnvironmentError(
"When creating an ALB, alb_vpc_config must be filled out in zappa_settings."
)
if "SubnetIds" not in alb_vpc_config:
raise EnvironmentError(
"When creating an ALB, you must supply two subnets in different availability zones."
)
if "SecurityGroupIds" not in alb_vpc_config:
alb_vpc_config["SecurityGroupIds"] = []
if not alb_vpc_config.get("CertificateArn"):
raise EnvironmentError(
"When creating an ALB, you must supply a CertificateArn for the HTTPS listener."
)
# Related: https://github.com/Miserlou/Zappa/issues/1856
if "Scheme" not in alb_vpc_config:
alb_vpc_config["Scheme"] = "internet-facing"
print("Deploying ALB infrastructure...")
# Create load balancer
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_load_balancer
kwargs = dict(
Name=lambda_name,
Subnets=alb_vpc_config["SubnetIds"],
SecurityGroups=alb_vpc_config["SecurityGroupIds"],
Scheme=alb_vpc_config["Scheme"],
# TODO: Tags might be a useful means of stock-keeping zappa-generated assets.
# Tags=[],
Type="application",
# TODO: can be ipv4 or dualstack (for ipv4 and ipv6) ipv4 is required for internal Scheme.
IpAddressType="ipv4",
)
response = self.elbv2_client.create_load_balancer(**kwargs)
if not (response["LoadBalancers"]) or len(response["LoadBalancers"]) != 1:
raise EnvironmentError(
"Failure to create application load balancer. Response was in unexpected format. Response was: {}".format(
repr(response)
)
)
if response["LoadBalancers"][0]["State"]["Code"] == "failed":
raise EnvironmentError(
"Failure to create application load balancer. Response reported a failed state: {}".format(
response["LoadBalancers"][0]["State"]["Reason"]
)
)
load_balancer_arn = response["LoadBalancers"][0]["LoadBalancerArn"]
load_balancer_dns = response["LoadBalancers"][0]["DNSName"]
load_balancer_vpc = response["LoadBalancers"][0]["VpcId"]
waiter = self.elbv2_client.get_waiter("load_balancer_available")
print(
"Waiting for load balancer [{}] to become active..".format(
load_balancer_arn
)
)
waiter.wait(LoadBalancerArns=[load_balancer_arn], WaiterConfig={"Delay": 3})
# Match the lambda timeout on the load balancer.
self.elbv2_client.modify_load_balancer_attributes(
LoadBalancerArn=load_balancer_arn,
Attributes=[{"Key": "idle_timeout.timeout_seconds", "Value": str(timeout)}],
)
# Create/associate target group.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_target_group
kwargs = dict(
Name=lambda_name,
TargetType="lambda",
# TODO: Add options for health checks
)
response = self.elbv2_client.create_target_group(**kwargs)
if not (response["TargetGroups"]) or len(response["TargetGroups"]) != 1:
raise EnvironmentError(
"Failure to create application load balancer target group. Response was in unexpected format. Response was: {}".format(
repr(response)
)
)
target_group_arn = response["TargetGroups"][0]["TargetGroupArn"]
# Enable multi-value headers by default.
response = self.elbv2_client.modify_target_group_attributes(
TargetGroupArn=target_group_arn,
Attributes=[
{"Key": "lambda.multi_value_headers.enabled", "Value": "true"},
],
)
# Allow execute permissions from target group to lambda.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.add_permission
kwargs = dict(
Action="lambda:InvokeFunction",
FunctionName="{}:{}".format(lambda_arn, ALB_LAMBDA_ALIAS),
Principal="elasticloadbalancing.amazonaws.com",
SourceArn=target_group_arn,
StatementId=lambda_name,
)
response = self.lambda_client.add_permission(**kwargs)
# Register target group to lambda association.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.register_targets
kwargs = dict(
TargetGroupArn=target_group_arn,
Targets=[{"Id": "{}:{}".format(lambda_arn, ALB_LAMBDA_ALIAS)}],
)
response = self.elbv2_client.register_targets(**kwargs)
# Bind listener to load balancer with default rule to target group.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_listener
kwargs = dict(
# TODO: Listeners support custom ssl certificates (Certificates). For now we leave this default.
Certificates=[{"CertificateArn": alb_vpc_config["CertificateArn"]}],
DefaultActions=[
{
"Type": "forward",
"TargetGroupArn": target_group_arn,
}
],
LoadBalancerArn=load_balancer_arn,
Protocol="HTTPS",
# TODO: Add option for custom ports
Port=443,
# TODO: Listeners support custom ssl security policy (SslPolicy). For now we leave this default.
)
response = self.elbv2_client.create_listener(**kwargs)
print("ALB created with DNS: {}".format(load_balancer_dns))
print("Note it may take several minutes for load balancer to become available.")
def undeploy_lambda_alb(self, lambda_name):
"""
The `zappa undeploy` functionality for ALB infrastructure.
"""
print("Undeploying ALB infrastructure...")
# Locate and delete alb/lambda permissions
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.remove_permission
self.lambda_client.remove_permission(
FunctionName=lambda_name, StatementId=lambda_name
)
except botocore.exceptions.ClientError as e: # pragma: no cover
if "ResourceNotFoundException" in e.response["Error"]["Code"]:
pass
else:
raise e
# Locate and delete load balancer
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_load_balancers
response = self.elbv2_client.describe_load_balancers(Names=[lambda_name])
if not (response["LoadBalancers"]) or len(response["LoadBalancers"]) > 1:
raise EnvironmentError(
"Failure to locate/delete ALB named [{}]. Response was: {}".format(
lambda_name, repr(response)
)
)
load_balancer_arn = response["LoadBalancers"][0]["LoadBalancerArn"]
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_listeners
response = self.elbv2_client.describe_listeners(
LoadBalancerArn=load_balancer_arn
)
if not (response["Listeners"]):
print("No listeners found.")
elif len(response["Listeners"]) > 1:
raise EnvironmentError(
"Failure to locate/delete listener for ALB named [{}]. Response was: {}".format(
lambda_name, repr(response)
)
)
else:
listener_arn = response["Listeners"][0]["ListenerArn"]
# Remove the listener. This explicit deletion of the listener seems necessary to avoid ResourceInUseExceptions when deleting target groups.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_listener
response = self.elbv2_client.delete_listener(ListenerArn=listener_arn)
# Remove the load balancer and wait for completion
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_load_balancer
response = self.elbv2_client.delete_load_balancer(
LoadBalancerArn=load_balancer_arn
)
waiter = self.elbv2_client.get_waiter("load_balancers_deleted")
print("Waiting for load balancer [{}] to be deleted..".format(lambda_name))
waiter.wait(LoadBalancerArns=[load_balancer_arn], WaiterConfig={"Delay": 3})
except botocore.exceptions.ClientError as e: # pragma: no cover
print(e.response["Error"]["Code"])
if "LoadBalancerNotFound" in e.response["Error"]["Code"]:
pass
else:
raise e
# Locate and delete target group
try:
# Locate the lambda ARN
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.get_function
response = self.lambda_client.get_function(FunctionName=lambda_name)
lambda_arn = response["Configuration"]["FunctionArn"]
# Locate the target group ARN
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_target_groups
response = self.elbv2_client.describe_target_groups(Names=[lambda_name])
if not (response["TargetGroups"]) or len(response["TargetGroups"]) > 1:
raise EnvironmentError(
"Failure to locate/delete ALB target group named [{}]. Response was: {}".format(
lambda_name, repr(response)
)
)
target_group_arn = response["TargetGroups"][0]["TargetGroupArn"]
# Deregister targets and wait for completion
self.elbv2_client.deregister_targets(
TargetGroupArn=target_group_arn, Targets=[{"Id": lambda_arn}]
)
waiter = self.elbv2_client.get_waiter("target_deregistered")
print("Waiting for target [{}] to be deregistered...".format(lambda_name))
waiter.wait(
TargetGroupArn=target_group_arn,
Targets=[{"Id": lambda_arn}],
WaiterConfig={"Delay": 3},
)
# Remove the target group
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_target_group
self.elbv2_client.delete_target_group(TargetGroupArn=target_group_arn)
except botocore.exceptions.ClientError as e: # pragma: no cover
print(e.response["Error"]["Code"])
if "TargetGroupNotFound" in e.response["Error"]["Code"]:
pass
else:
raise e
##
# API Gateway
##
def create_api_gateway_routes(
self,
lambda_arn,
api_name=None,
api_key_required=False,
authorization_type="NONE",
authorizer=None,
cors_options=None,
description=None,
endpoint_configuration=None,
):
"""
Create the API Gateway for this Zappa deployment.
Returns the new RestAPI CF resource.
"""
restapi = troposphere.apigateway.RestApi("Api")
restapi.Name = api_name or lambda_arn.split(":")[-1]
if not description:
description = "Created automatically by Zappa."
restapi.Description = description
endpoint_configuration = (
[] if endpoint_configuration is None else endpoint_configuration
)
if self.boto_session.region_name == "us-gov-west-1":
endpoint_configuration.append("REGIONAL")
if endpoint_configuration:
endpoint = troposphere.apigateway.EndpointConfiguration()
endpoint.Types = list(set(endpoint_configuration))
restapi.EndpointConfiguration = endpoint
if self.apigateway_policy:
restapi.Policy = json.loads(self.apigateway_policy)
self.cf_template.add_resource(restapi)
root_id = troposphere.GetAtt(restapi, "RootResourceId")
invocation_prefix = (
"aws" if self.boto_session.region_name != "us-gov-west-1" else "aws-us-gov"
)
invocations_uri = (
"arn:"
+ invocation_prefix
+ ":apigateway:"
+ self.boto_session.region_name
+ ":lambda:path/2015-03-31/functions/"
+ lambda_arn
+ "/invocations"
)
##
# The Resources
##
authorizer_resource = None
if authorizer:
authorizer_lambda_arn = authorizer.get("arn", lambda_arn)
lambda_uri = "arn:{invocation_prefix}:apigateway:{region_name}:lambda:path/2015-03-31/functions/{lambda_arn}/invocations".format(
invocation_prefix=invocation_prefix,
region_name=self.boto_session.region_name,
lambda_arn=authorizer_lambda_arn,
)
authorizer_resource = self.create_authorizer(
restapi, lambda_uri, authorizer
)
self.create_and_setup_methods(
restapi,
root_id,
api_key_required,
invocations_uri,
authorization_type,
authorizer_resource,
0,
)
if cors_options:
self.create_and_setup_cors(
restapi, root_id, invocations_uri, 0, cors_options
)
resource = troposphere.apigateway.Resource("ResourceAnyPathSlashed")
self.cf_api_resources.append(resource.title)
resource.RestApiId = troposphere.Ref(restapi)
resource.ParentId = root_id
resource.PathPart = "{proxy+}"
self.cf_template.add_resource(resource)
self.create_and_setup_methods(
restapi,
resource,
api_key_required,
invocations_uri,
authorization_type,
authorizer_resource,
1,
) # pragma: no cover
if cors_options:
self.create_and_setup_cors(
restapi, resource, invocations_uri, 1, cors_options
) # pragma: no cover
return restapi
def create_authorizer(self, restapi, uri, authorizer):
"""
Create Authorizer for API gateway
"""
authorizer_type = authorizer.get("type", "TOKEN").upper()
identity_validation_expression = authorizer.get("validation_expression", None)
authorizer_resource = troposphere.apigateway.Authorizer("Authorizer")
authorizer_resource.RestApiId = troposphere.Ref(restapi)
authorizer_resource.Name = authorizer.get("name", "ZappaAuthorizer")
authorizer_resource.Type = authorizer_type
authorizer_resource.AuthorizerUri = uri
authorizer_resource.IdentitySource = (
"method.request.header.%s" % authorizer.get("token_header", "Authorization")
)
if identity_validation_expression:
authorizer_resource.IdentityValidationExpression = (
identity_validation_expression
)
if authorizer_type == "TOKEN":
if not self.credentials_arn:
self.get_credentials_arn()
authorizer_resource.AuthorizerResultTtlInSeconds = authorizer.get(
"result_ttl", 300
)
authorizer_resource.AuthorizerCredentials = self.credentials_arn
if authorizer_type == "COGNITO_USER_POOLS":
authorizer_resource.ProviderARNs = authorizer.get("provider_arns")
self.cf_api_resources.append(authorizer_resource.title)
self.cf_template.add_resource(authorizer_resource)
return authorizer_resource
def create_and_setup_methods(
self,
restapi,
resource,
api_key_required,
uri,
authorization_type,
authorizer_resource,
depth,
):
"""
Set up the methods, integration responses and method responses for a given API Gateway resource.
"""
for method_name in self.http_methods:
method = troposphere.apigateway.Method(method_name + str(depth))
method.RestApiId = troposphere.Ref(restapi)
if type(resource) is troposphere.apigateway.Resource:
method.ResourceId = troposphere.Ref(resource)
else:
method.ResourceId = resource
method.HttpMethod = method_name.upper()
method.AuthorizationType = authorization_type
if authorizer_resource:
method.AuthorizerId = troposphere.Ref(authorizer_resource)
method.ApiKeyRequired = api_key_required
method.MethodResponses = []
self.cf_template.add_resource(method)
self.cf_api_resources.append(method.title)
if not self.credentials_arn:
self.get_credentials_arn()
credentials = self.credentials_arn # This must be a Role ARN
integration = troposphere.apigateway.Integration()
integration.CacheKeyParameters = []
integration.CacheNamespace = "none"
integration.Credentials = credentials
integration.IntegrationHttpMethod = "POST"
integration.IntegrationResponses = []
integration.PassthroughBehavior = "NEVER"
integration.Type = "AWS_PROXY"
integration.Uri = uri
method.Integration = integration
def create_and_setup_cors(self, restapi, resource, uri, depth, config):
"""
Set up the methods, integration responses and method responses for a given API Gateway resource.
"""
if config is True:
config = {}
method_name = "OPTIONS"
method = troposphere.apigateway.Method(method_name + str(depth))
method.RestApiId = troposphere.Ref(restapi)
if type(resource) is troposphere.apigateway.Resource:
method.ResourceId = troposphere.Ref(resource)
else:
method.ResourceId = resource
method.HttpMethod = method_name.upper()
method.AuthorizationType = "NONE"
method_response = troposphere.apigateway.MethodResponse()
method_response.ResponseModels = {"application/json": "Empty"}
response_headers = {
"Access-Control-Allow-Headers": "'%s'"
% ",".join(
config.get(
"allowed_headers",
[
"Content-Type",
"X-Amz-Date",
"Authorization",
"X-Api-Key",
"X-Amz-Security-Token",
],
)
),
"Access-Control-Allow-Methods": "'%s'"
% ",".join(
config.get(
"allowed_methods",
["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"],
)
),
"Access-Control-Allow-Origin": "'%s'" % config.get("allowed_origin", "*"),
}
method_response.ResponseParameters = {
"method.response.header.%s" % key: True for key in response_headers
}
method_response.StatusCode = "200"
method.MethodResponses = [method_response]
self.cf_template.add_resource(method)
self.cf_api_resources.append(method.title)
integration = troposphere.apigateway.Integration()
integration.Type = "MOCK"
integration.PassthroughBehavior = "NEVER"
integration.RequestTemplates = {"application/json": '{"statusCode": 200}'}
integration_response = troposphere.apigateway.IntegrationResponse()
integration_response.ResponseParameters = {
"method.response.header.%s" % key: value
for key, value in response_headers.items()
}
integration_response.ResponseTemplates = {"application/json": ""}
integration_response.StatusCode = "200"
integration.IntegrationResponses = [integration_response]
integration.Uri = uri
method.Integration = integration
def deploy_api_gateway(
self,
api_id,
stage_name,
stage_description="",
description="",
cache_cluster_enabled=False,
cache_cluster_size="0.5",
variables=None,
cloudwatch_log_level="OFF",
cloudwatch_data_trace=False,
cloudwatch_metrics_enabled=False,
cache_cluster_ttl=300,
cache_cluster_encrypted=False,
):
"""
Deploy the API Gateway!
Return the deployed API URL.
"""
print("Deploying API Gateway..")
self.apigateway_client.create_deployment(
restApiId=api_id,
stageName=stage_name,
stageDescription=stage_description,
description=description,
cacheClusterEnabled=cache_cluster_enabled,
cacheClusterSize=cache_cluster_size,
variables=variables or {},
)
if cloudwatch_log_level not in self.cloudwatch_log_levels:
cloudwatch_log_level = "OFF"
self.apigateway_client.update_stage(
restApiId=api_id,
stageName=stage_name,
patchOperations=[
self.get_patch_op("logging/loglevel", cloudwatch_log_level),
self.get_patch_op("logging/dataTrace", cloudwatch_data_trace),
self.get_patch_op("metrics/enabled", cloudwatch_metrics_enabled),
self.get_patch_op("caching/ttlInSeconds", str(cache_cluster_ttl)),
self.get_patch_op("caching/dataEncrypted", cache_cluster_encrypted),
],
)
return "https://{}.execute-api.{}.amazonaws.com/{}".format(
api_id, self.boto_session.region_name, stage_name
)
def add_binary_support(self, api_id, cors=False):
"""
Add binary support
"""
response = self.apigateway_client.get_rest_api(restApiId=api_id)
if (
"binaryMediaTypes" not in response
or "*/*" not in response["binaryMediaTypes"]
):
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[{"op": "add", "path": "/binaryMediaTypes/*~1*"}],
)
if cors:
# fix for issue 699 and 1035, cors+binary support don't work together
# go through each resource and update the contentHandling type
response = self.apigateway_client.get_resources(restApiId=api_id)
resource_ids = [
item["id"]
for item in response["items"]
if "OPTIONS" in item.get("resourceMethods", {})
]
for resource_id in resource_ids:
self.apigateway_client.update_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod="OPTIONS",
patchOperations=[
{
"op": "replace",
"path": "/contentHandling",
"value": "CONVERT_TO_TEXT",
}
],
)
def remove_binary_support(self, api_id, cors=False):
"""
Remove binary support
"""
response = self.apigateway_client.get_rest_api(restApiId=api_id)
if "binaryMediaTypes" in response and "*/*" in response["binaryMediaTypes"]:
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[{"op": "remove", "path": "/binaryMediaTypes/*~1*"}],
)
if cors:
# go through each resource and change the contentHandling type
response = self.apigateway_client.get_resources(restApiId=api_id)
resource_ids = [
item["id"]
for item in response["items"]
if "OPTIONS" in item.get("resourceMethods", {})
]
for resource_id in resource_ids:
self.apigateway_client.update_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod="OPTIONS",
patchOperations=[
{"op": "replace", "path": "/contentHandling", "value": ""}
],
)
def add_api_compression(self, api_id, min_compression_size):
"""
Add Rest API compression
"""
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
"op": "replace",
"path": "/minimumCompressionSize",
"value": str(min_compression_size),
}
],
)
def remove_api_compression(self, api_id):
"""
Remove Rest API compression
"""
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
"op": "replace",
"path": "/minimumCompressionSize",
}
],
)
def get_api_keys(self, api_id, stage_name):
"""
Generator that allows to iterate per API keys associated to an api_id and a stage_name.
"""
response = self.apigateway_client.get_api_keys(limit=500)
stage_key = "{}/{}".format(api_id, stage_name)
for api_key in response.get("items"):
if stage_key in api_key.get("stageKeys"):
yield api_key.get("id")
def create_api_key(self, api_id, stage_name):
"""
Create new API key and link it with an api_id and a stage_name
"""
response = self.apigateway_client.create_api_key(
name="{}_{}".format(stage_name, api_id),
description="Api Key for {}".format(api_id),
enabled=True,
stageKeys=[
{
"restApiId": "{}".format(api_id),
"stageName": "{}".format(stage_name),
},
],
)
print("Created a new x-api-key: {}".format(response["id"]))
def remove_api_key(self, api_id, stage_name):
"""
Remove a generated API key for api_id and stage_name
"""
response = self.apigateway_client.get_api_keys(
limit=1, nameQuery="{}_{}".format(stage_name, api_id)
)
for api_key in response.get("items"):
self.apigateway_client.delete_api_key(apiKey="{}".format(api_key["id"]))
def add_api_stage_to_api_key(self, api_key, api_id, stage_name):
"""
Add api stage to Api key
"""
self.apigateway_client.update_api_key(
apiKey=api_key,
patchOperations=[
{
"op": "add",
"path": "/stages",
"value": "{}/{}".format(api_id, stage_name),
}
],
)
def get_patch_op(self, keypath, value, op="replace"):
"""
Return an object that describes a change of configuration on the given staging.
Setting will be applied on all available HTTP methods.
"""
if isinstance(value, bool):
value = str(value).lower()
return {"op": op, "path": "/*/*/{}".format(keypath), "value": value}
def get_rest_apis(self, project_name):
"""
Generator that allows to iterate per every available apis.
"""
all_apis = self.apigateway_client.get_rest_apis(limit=500)
for api in all_apis["items"]:
if api["name"] != project_name:
continue
yield api
def undeploy_api_gateway(self, lambda_name, domain_name=None, base_path=None):
"""
Delete a deployed REST API Gateway.
"""
print("Deleting API Gateway..")
api_id = self.get_api_id(lambda_name)
if domain_name:
# XXX - Remove Route53 smartly here?
# XXX - This doesn't raise, but doesn't work either.
try:
self.apigateway_client.delete_base_path_mapping(
domainName=domain_name,
basePath="(none)" if base_path is None else base_path,
)
except Exception as e:
# We may not have actually set up the domain.
pass
was_deleted = self.delete_stack(lambda_name, wait=True)
if not was_deleted:
# try erasing it with the older method
for api in self.get_rest_apis(lambda_name):
self.apigateway_client.delete_rest_api(restApiId=api["id"])
def update_stage_config(
self,
project_name,
stage_name,
cloudwatch_log_level,
cloudwatch_data_trace,
cloudwatch_metrics_enabled,
):
"""
Update CloudWatch metrics configuration.
"""
if cloudwatch_log_level not in self.cloudwatch_log_levels:
cloudwatch_log_level = "OFF"
for api in self.get_rest_apis(project_name):
self.apigateway_client.update_stage(
restApiId=api["id"],
stageName=stage_name,
patchOperations=[
self.get_patch_op("logging/loglevel", cloudwatch_log_level),
self.get_patch_op("logging/dataTrace", cloudwatch_data_trace),
self.get_patch_op("metrics/enabled", cloudwatch_metrics_enabled),
],
)
def update_cognito(self, lambda_name, user_pool, lambda_configs, lambda_arn):
LambdaConfig = {}
for config in lambda_configs:
LambdaConfig[config] = lambda_arn
description = self.cognito_client.describe_user_pool(UserPoolId=user_pool)
description_kwargs = {}
for key, value in description["UserPool"].items():
if key in (
"UserPoolId",
"Policies",
"AutoVerifiedAttributes",
"SmsVerificationMessage",
"EmailVerificationMessage",
"EmailVerificationSubject",
"VerificationMessageTemplate",
"SmsAuthenticationMessage",
"MfaConfiguration",
"DeviceConfiguration",
"EmailConfiguration",
"SmsConfiguration",
"UserPoolTags",
"AdminCreateUserConfig",
):
description_kwargs[key] = value
elif key == "LambdaConfig":
for lckey, lcvalue in value.items():
if lckey in LambdaConfig:
value[lckey] = LambdaConfig[lckey]
print("value", value)
description_kwargs[key] = value
if "LambdaConfig" not in description_kwargs:
description_kwargs["LambdaConfig"] = LambdaConfig
if (
"TemporaryPasswordValidityDays"
in description_kwargs["Policies"]["PasswordPolicy"]
):
description_kwargs["AdminCreateUserConfig"].pop(
"UnusedAccountValidityDays", None
)
if "UnusedAccountValidityDays" in description_kwargs["AdminCreateUserConfig"]:
description_kwargs["Policies"]["PasswordPolicy"][
"TemporaryPasswordValidityDays"
] = description_kwargs["AdminCreateUserConfig"].pop(
"UnusedAccountValidityDays", None
)
result = self.cognito_client.update_user_pool(
UserPoolId=user_pool, **description_kwargs
)
if result["ResponseMetadata"]["HTTPStatusCode"] != 200:
print("Cognito: Failed to update user pool", result)
# Now we need to add a policy to the IAM that allows cognito access
result = self.create_event_permission(
lambda_name,
"cognito-idp.amazonaws.com",
"arn:aws:cognito-idp:{}:{}:userpool/{}".format(
self.aws_region,
self.sts_client.get_caller_identity().get("Account"),
user_pool,
),
)
if result["ResponseMetadata"]["HTTPStatusCode"] != 201:
print("Cognito: Failed to update lambda permission", result)
def delete_stack(self, name, wait=False):
"""
Delete the CF stack managed by Zappa.
"""
try:
stack = self.cf_client.describe_stacks(StackName=name)["Stacks"][0]
except: # pragma: no cover
print("No Zappa stack named {0}".format(name))
return False
tags = {x["Key"]: x["Value"] for x in stack["Tags"]}
if tags.get("ZappaProject") == name:
self.cf_client.delete_stack(StackName=name)
if wait:
waiter = self.cf_client.get_waiter("stack_delete_complete")
print("Waiting for stack {0} to be deleted..".format(name))
waiter.wait(StackName=name)
return True
else:
print("ZappaProject tag not found on {0}, doing nothing".format(name))
return False
def create_stack_template(
self,
lambda_arn,
lambda_name,
api_key_required,
iam_authorization,
authorizer,
cors_options=None,
description=None,
endpoint_configuration=None,
):
"""
Build the entire CF stack.
Just used for the API Gateway, but could be expanded in the future.
"""
auth_type = "NONE"
if iam_authorization and authorizer:
logger.warn(
"Both IAM Authorization and Authorizer are specified, this is not possible. "
"Setting Auth method to IAM Authorization"
)
authorizer = None
auth_type = "AWS_IAM"
elif iam_authorization:
auth_type = "AWS_IAM"
elif authorizer:
auth_type = authorizer.get("type", "CUSTOM")
# build a fresh template
self.cf_template = troposphere.Template()
self.cf_template.set_description("Automatically generated with Zappa")
self.cf_api_resources = []
self.cf_parameters = {}
restapi = self.create_api_gateway_routes(
lambda_arn,
api_name=lambda_name,
api_key_required=api_key_required,
authorization_type=auth_type,
authorizer=authorizer,
cors_options=cors_options,
description=description,
endpoint_configuration=endpoint_configuration,
)
return self.cf_template
def update_stack(
self,
name,
working_bucket,
wait=False,
update_only=False,
disable_progress=False,
):
"""
Update or create the CF stack managed by Zappa.
"""
capabilities = []
template = name + "-template-" + str(int(time.time())) + ".json"
with open(template, "wb") as out:
out.write(
bytes(
self.cf_template.to_json(indent=None, separators=(",", ":")),
"utf-8",
)
)
self.upload_to_s3(template, working_bucket, disable_progress=disable_progress)
if self.boto_session.region_name == "us-gov-west-1":
url = "https://s3-us-gov-west-1.amazonaws.com/{0}/{1}".format(
working_bucket, template
)
else:
url = "https://s3.amazonaws.com/{0}/{1}".format(working_bucket, template)
tags = [
{"Key": key, "Value": self.tags[key]}
for key in self.tags.keys()
if key != "ZappaProject"
]
tags.append({"Key": "ZappaProject", "Value": name})
update = True
try:
self.cf_client.describe_stacks(StackName=name)
except botocore.client.ClientError:
update = False
if update_only and not update:
print("CloudFormation stack missing, re-deploy to enable updates")
return
if not update:
self.cf_client.create_stack(
StackName=name, Capabilities=capabilities, TemplateURL=url, Tags=tags
)
print(
"Waiting for stack {0} to create (this can take a bit)..".format(name)
)
else:
try:
self.cf_client.update_stack(
StackName=name,
Capabilities=capabilities,
TemplateURL=url,
Tags=tags,
)
print("Waiting for stack {0} to update..".format(name))
except botocore.client.ClientError as e:
if e.response["Error"]["Message"] == "No updates are to be performed.":
wait = False
else:
raise
if wait:
total_resources = len(self.cf_template.resources)
current_resources = 0
sr = self.cf_client.get_paginator("list_stack_resources")
progress = tqdm(total=total_resources, unit="res", disable=disable_progress)
while True:
time.sleep(3)
result = self.cf_client.describe_stacks(StackName=name)
if not result["Stacks"]:
continue # might need to wait a bit
if result["Stacks"][0]["StackStatus"] in [
"CREATE_COMPLETE",
"UPDATE_COMPLETE",
]:
break
# Something has gone wrong.
# Is raising enough? Should we also remove the Lambda function?
if result["Stacks"][0]["StackStatus"] in [
"DELETE_COMPLETE",
"DELETE_IN_PROGRESS",
"ROLLBACK_IN_PROGRESS",
"UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS",
"UPDATE_ROLLBACK_COMPLETE",
]:
raise EnvironmentError(
"Stack creation failed. "
"Please check your CloudFormation console. "
"You may also need to `undeploy`."
)
count = 0
for result in sr.paginate(StackName=name):
done = (
1
for x in result["StackResourceSummaries"]
if "COMPLETE" in x["ResourceStatus"]
)
count += sum(done)
if count:
# We can end up in a situation where we have more resources being created
# than anticipated.
if (count - current_resources) > 0:
progress.update(count - current_resources)
current_resources = count
progress.close()
try:
os.remove(template)
except OSError:
pass
self.remove_from_s3(template, working_bucket)
def stack_outputs(self, name):
"""
Given a name, describes CloudFront stacks and returns dict of the stack Outputs
, else returns an empty dict.
"""
try:
stack = self.cf_client.describe_stacks(StackName=name)["Stacks"][0]
return {x["OutputKey"]: x["OutputValue"] for x in stack["Outputs"]}
except botocore.client.ClientError:
return {}
def get_api_url(self, lambda_name, stage_name):
"""
Given a lambda_name and stage_name, return a valid API URL.
"""
api_id = self.get_api_id(lambda_name)
if api_id:
return "https://{}.execute-api.{}.amazonaws.com/{}".format(
api_id, self.boto_session.region_name, stage_name
)
else:
return None
def get_api_id(self, lambda_name):
"""
Given a lambda_name, return the API id.
"""
try:
response = self.cf_client.describe_stack_resource(
StackName=lambda_name, LogicalResourceId="Api"
)
return response["StackResourceDetail"].get("PhysicalResourceId", None)
except: # pragma: no cover
try:
# Try the old method (project was probably made on an older, non CF version)
response = self.apigateway_client.get_rest_apis(limit=500)
for item in response["items"]:
if item["name"] == lambda_name:
return item["id"]
logger.exception("Could not get API ID.")
return None
except: # pragma: no cover
# We don't even have an API deployed. That's okay!
return None
def create_domain_name(
self,
domain_name,
certificate_name,
certificate_body=None,
certificate_private_key=None,
certificate_chain=None,
certificate_arn=None,
lambda_name=None,
stage=None,
base_path=None,
):
"""
Creates the API GW domain and returns the resulting DNS name.
"""
# This is a Let's Encrypt or custom certificate
if not certificate_arn:
agw_response = self.apigateway_client.create_domain_name(
domainName=domain_name,
certificateName=certificate_name,
certificateBody=certificate_body,
certificatePrivateKey=certificate_private_key,
certificateChain=certificate_chain,
)
# This is an AWS ACM-hosted Certificate
else:
agw_response = self.apigateway_client.create_domain_name(
domainName=domain_name,
certificateName=certificate_name,
certificateArn=certificate_arn,
)
api_id = self.get_api_id(lambda_name)
if not api_id:
raise LookupError("No API URL to certify found - did you deploy?")
self.apigateway_client.create_base_path_mapping(
domainName=domain_name,
basePath="" if base_path is None else base_path,
restApiId=api_id,
stage=stage,
)
return agw_response["distributionDomainName"]
def update_route53_records(self, domain_name, dns_name):
"""
Updates Route53 Records following GW domain creation
"""
zone_id = self.get_hosted_zone_id_for_domain(domain_name)
is_apex = (
self.route53.get_hosted_zone(Id=zone_id)["HostedZone"]["Name"][:-1]
== domain_name
)
if is_apex:
record_set = {
"Name": domain_name,
"Type": "A",
"AliasTarget": {
"HostedZoneId": "Z2FDTNDATAQYW2", # This is a magic value that means "CloudFront"
"DNSName": dns_name,
"EvaluateTargetHealth": False,
},
}
else:
record_set = {
"Name": domain_name,
"Type": "CNAME",
"ResourceRecords": [{"Value": dns_name}],
"TTL": 60,
}
# Related: https://github.com/boto/boto3/issues/157
# and: http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html
# and policy: https://spin.atomicobject.com/2016/04/28/route-53-hosted-zone-managment/
# pure_zone_id = zone_id.split('/hostedzone/')[1]
# XXX: ClientError: An error occurred (InvalidChangeBatch) when calling the ChangeResourceRecordSets operation:
# Tried to create an alias that targets d1awfeji80d0k2.cloudfront.net., type A in zone Z1XWOQP59BYF6Z,
# but the alias target name does not lie within the target zone
response = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
"Changes": [{"Action": "UPSERT", "ResourceRecordSet": record_set}]
},
)
return response
def update_domain_name(
self,
domain_name,
certificate_name=None,
certificate_body=None,
certificate_private_key=None,
certificate_chain=None,
certificate_arn=None,
lambda_name=None,
stage=None,
route53=True,
base_path=None,
):
"""
This updates your certificate information for an existing domain,
with similar arguments to boto's update_domain_name API Gateway api.
It returns the resulting new domain information including the new certificate's ARN
if created during this process.
Previously, this method involved downtime that could take up to 40 minutes
because the API Gateway api only allowed this by deleting, and then creating it.
Related issues: https://github.com/Miserlou/Zappa/issues/590
https://github.com/Miserlou/Zappa/issues/588
https://github.com/Miserlou/Zappa/pull/458
https://github.com/Miserlou/Zappa/issues/882
https://github.com/Miserlou/Zappa/pull/883
"""
print("Updating domain name!")
certificate_name = certificate_name + str(time.time())
api_gateway_domain = self.apigateway_client.get_domain_name(
domainName=domain_name
)
if (
not certificate_arn
and certificate_body
and certificate_private_key
and certificate_chain
):
acm_certificate = self.acm_client.import_certificate(
Certificate=certificate_body,
PrivateKey=certificate_private_key,
CertificateChain=certificate_chain,
)
certificate_arn = acm_certificate["CertificateArn"]
self.update_domain_base_path_mapping(domain_name, lambda_name, stage, base_path)
return self.apigateway_client.update_domain_name(
domainName=domain_name,
patchOperations=[
{
"op": "replace",
"path": "/certificateName",
"value": certificate_name,
},
{"op": "replace", "path": "/certificateArn", "value": certificate_arn},
],
)
def update_domain_base_path_mapping(
self, domain_name, lambda_name, stage, base_path
):
"""
Update domain base path mapping on API Gateway if it was changed
"""
api_id = self.get_api_id(lambda_name)
if not api_id:
print("Warning! Can't update base path mapping!")
return
base_path_mappings = self.apigateway_client.get_base_path_mappings(
domainName=domain_name
)
found = False
for base_path_mapping in base_path_mappings.get("items", []):
if (
base_path_mapping["restApiId"] == api_id
and base_path_mapping["stage"] == stage
):
found = True
if base_path_mapping["basePath"] != base_path:
self.apigateway_client.update_base_path_mapping(
domainName=domain_name,
basePath=base_path_mapping["basePath"],
patchOperations=[
{
"op": "replace",
"path": "/basePath",
"value": "" if base_path is None else base_path,
}
],
)
if not found:
self.apigateway_client.create_base_path_mapping(
domainName=domain_name,
basePath="" if base_path is None else base_path,
restApiId=api_id,
stage=stage,
)
def get_all_zones(self):
"""Same behaviour of list_host_zones, but transparently handling pagination."""
zones = {"HostedZones": []}
new_zones = self.route53.list_hosted_zones(MaxItems="100")
while new_zones["IsTruncated"]:
zones["HostedZones"] += new_zones["HostedZones"]
new_zones = self.route53.list_hosted_zones(
Marker=new_zones["NextMarker"], MaxItems="100"
)
zones["HostedZones"] += new_zones["HostedZones"]
return zones
def get_domain_name(self, domain_name, route53=True):
"""
Scan our hosted zones for the record of a given name.
Returns the record entry, else None.
"""
# Make sure api gateway domain is present
try:
self.apigateway_client.get_domain_name(domainName=domain_name)
except Exception:
return None
if not route53:
return True
try:
zones = self.get_all_zones()
for zone in zones["HostedZones"]:
records = self.route53.list_resource_record_sets(
HostedZoneId=zone["Id"]
)
for record in records["ResourceRecordSets"]:
if (
record["Type"] in ("CNAME", "A")
and record["Name"][:-1] == domain_name
):
return record
except Exception as e:
return None
##
# Old, automatic logic.
# If re-introduced, should be moved to a new function.
# Related ticket: https://github.com/Miserlou/Zappa/pull/458
##
# We may be in a position where Route53 doesn't have a domain, but the API Gateway does.
# We need to delete this before we can create the new Route53.
# try:
# api_gateway_domain = self.apigateway_client.get_domain_name(domainName=domain_name)
# self.apigateway_client.delete_domain_name(domainName=domain_name)
# except Exception:
# pass
return None
##
# IAM
##
def get_credentials_arn(self):
"""
Given our role name, get and set the credentials_arn.
"""
role = self.iam.Role(self.role_name)
self.credentials_arn = role.arn
return role, self.credentials_arn
def create_iam_roles(self):
"""
Create and defines the IAM roles and policies necessary for Zappa.
If the IAM role already exists, it will be updated if necessary.
"""
attach_policy_obj = json.loads(self.attach_policy)
assume_policy_obj = json.loads(self.assume_policy)
if self.extra_permissions:
for permission in self.extra_permissions:
attach_policy_obj["Statement"].append(dict(permission))
self.attach_policy = json.dumps(attach_policy_obj)
updated = False
# Create the role if needed
try:
role, credentials_arn = self.get_credentials_arn()
except botocore.client.ClientError:
print("Creating " + self.role_name + " IAM Role..")
role = self.iam.create_role(
RoleName=self.role_name, AssumeRolePolicyDocument=self.assume_policy
)
self.credentials_arn = role.arn
updated = True
# create or update the role's policies if needed
policy = self.iam.RolePolicy(self.role_name, "zappa-permissions")
try:
if policy.policy_document != attach_policy_obj:
print(
"Updating zappa-permissions policy on "
+ self.role_name
+ " IAM Role."
)
policy.put(PolicyDocument=self.attach_policy)
updated = True
except botocore.client.ClientError:
print(
"Creating zappa-permissions policy on " + self.role_name + " IAM Role."
)
policy.put(PolicyDocument=self.attach_policy)
updated = True
if role.assume_role_policy_document != assume_policy_obj and set(
role.assume_role_policy_document["Statement"][0]["Principal"]["Service"]
) != set(assume_policy_obj["Statement"][0]["Principal"]["Service"]):
print("Updating assume role policy on " + self.role_name + " IAM Role.")
self.iam_client.update_assume_role_policy(
RoleName=self.role_name, PolicyDocument=self.assume_policy
)
updated = True
return self.credentials_arn, updated
def _clear_policy(self, lambda_name):
"""
Remove obsolete policy statements to prevent policy from bloating over the limit after repeated updates.
"""
try:
policy_response = self.lambda_client.get_policy(FunctionName=lambda_name)
if policy_response["ResponseMetadata"]["HTTPStatusCode"] == 200:
statement = json.loads(policy_response["Policy"])["Statement"]
for s in statement:
delete_response = self.lambda_client.remove_permission(
FunctionName=lambda_name, StatementId=s["Sid"]
)
if delete_response["ResponseMetadata"]["HTTPStatusCode"] != 204:
logger.error(
"Failed to delete an obsolete policy statement: {}".format(
policy_response
)
)
else:
logger.debug(
"Failed to load Lambda function policy: {}".format(policy_response)
)
except ClientError as e:
if e.args[0].find("ResourceNotFoundException") > -1:
logger.debug("No policy found, must be first run.")
else:
logger.error("Unexpected client error {}".format(e.args[0]))
##
# CloudWatch Events
##
def create_event_permission(self, lambda_name, principal, source_arn):
"""
Create permissions to link to an event.
Related: http://docs.aws.amazon.com/lambda/latest/dg/with-s3-example-configure-event-source.html
"""
logger.debug(
"Adding new permission to invoke Lambda function: {}".format(lambda_name)
)
account_id: str = self.sts_client.get_caller_identity().get("Account")
permission_response = self.lambda_client.add_permission(
FunctionName=lambda_name,
StatementId="".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(8)
),
Action="lambda:InvokeFunction",
Principal=principal,
SourceArn=source_arn,
# The SourceAccount argument ensures that only the specified AWS account can invoke the lambda function.
# This prevents a security issue where if a lambda is triggered off of s3 bucket events and the bucket is
# deleted, another AWS account can create a bucket with the same name and potentially trigger the original
# lambda function, since bucket names are global.
# https://github.com/zappa/Zappa/issues/1039
SourceAccount=account_id,
)
if permission_response["ResponseMetadata"]["HTTPStatusCode"] != 201:
print("Problem creating permission to invoke Lambda function")
return None # XXX: Raise?
return permission_response
def schedule_events(self, lambda_arn, lambda_name, events, default=True):
"""
Given a Lambda ARN, name and a list of events, schedule this as CloudWatch Events.
'events' is a list of dictionaries, where the dict must contains the string
of a 'function' and the string of the event 'expression', and an optional 'name' and 'description'.
Expressions can be in rate or cron format:
http://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html
"""
# The stream sources - DynamoDB, Kinesis and SQS - are working differently than the other services (pull vs push)
# and do not require event permissions. They do require additional permissions on the Lambda roles though.
# http://docs.aws.amazon.com/lambda/latest/dg/lambda-api-permissions-ref.html
pull_services = ["dynamodb", "kinesis", "sqs"]
# XXX: Not available in Lambda yet.
# We probably want to execute the latest code.
# if default:
# lambda_arn = lambda_arn + ":$LATEST"
self.unschedule_events(
lambda_name=lambda_name,
lambda_arn=lambda_arn,
events=events,
excluded_source_services=pull_services,
)
for event in events:
function = event["function"]
expression = event.get("expression", None) # single expression
expressions = event.get("expressions", None) # multiple expression
kwargs = event.get(
"kwargs", {}
) # optional dict of keyword arguments for the event
event_source = event.get("event_source", None)
description = event.get("description", function)
# - If 'cron' or 'rate' in expression, use ScheduleExpression
# - Else, use EventPattern
# - ex https://github.com/awslabs/aws-lambda-ddns-function
if not self.credentials_arn:
self.get_credentials_arn()
if expression:
expressions = [
expression
] # same code for single and multiple expression
if expressions:
for index, expression in enumerate(expressions):
rule_name = self.get_scheduled_event_name(
event,
function,
lambda_name,
index,
)
rule_response = self.events_client.put_rule(
Name=rule_name,
ScheduleExpression=expression,
State="ENABLED",
Description=description,
RoleArn=self.credentials_arn,
)
if "RuleArn" in rule_response:
logger.debug(
"Rule created. ARN {}".format(rule_response["RuleArn"])
)
# Specific permissions are necessary for any trigger to work.
self.create_event_permission(
lambda_name, "events.amazonaws.com", rule_response["RuleArn"]
)
# Overwriting the input, supply the original values and add kwargs
input_template = (
'{"time": <time>, '
'"detail-type": <detail-type>, '
'"source": <source>,'
'"account": <account>, '
'"region": <region>,'
'"detail": <detail>, '
'"version": <version>,'
'"resources": <resources>,'
'"id": <id>,'
'"kwargs": %s'
"}" % json.dumps(kwargs)
)
# Create the CloudWatch event ARN for this function.
# https://github.com/Miserlou/Zappa/issues/359
target_response = self.events_client.put_targets(
Rule=rule_name,
Targets=[
{
"Id": "Id"
+ "".join(
random.choice(string.digits) for _ in range(12)
),
"Arn": lambda_arn,
"InputTransformer": {
"InputPathsMap": {
"time": "$.time",
"detail-type": "$.detail-type",
"source": "$.source",
"account": "$.account",
"region": "$.region",
"detail": "$.detail",
"version": "$.version",
"resources": "$.resources",
"id": "$.id",
},
"InputTemplate": input_template,
},
}
],
)
if target_response["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(
"Scheduled {} with expression {}!".format(
rule_name, expression
)
)
else:
print(
"Problem scheduling {} with expression {}.".format(
rule_name, expression
)
)
elif event_source:
service = self.service_from_arn(event_source["arn"])
if service not in pull_services:
svc = ",".join(event["event_source"]["events"])
self.create_event_permission(
lambda_name,
service + ".amazonaws.com",
event["event_source"]["arn"],
)
else:
svc = service
rule_response = add_event_source(
event_source, lambda_arn, function, self.boto_session
)
if rule_response == "successful":
print("Created {} event schedule for {}!".format(svc, function))
elif rule_response == "failed":
print(
"Problem creating {} event schedule for {}!".format(
svc, function
)
)
elif rule_response == "exists":
print(
"{} event schedule for {} already exists - Nothing to do here.".format(
svc, function
)
)
elif rule_response == "dryrun":
print(
"Dryrun for creating {} event schedule for {}!!".format(
svc, function
)
)
else:
print(
"Could not create event {} - Please define either an expression or an event source".format(
rule_name,
)
)
def get_scheduled_event_name(self, event, function, lambda_name, index=0):
"""
Returns an AWS-valid CloudWatch rule name using a digest of the event name, lambda name, and function.
This allows support for rule names that may be longer than the 64 char limit.
"""
name = event.get("name", function)
if name != function:
# a custom event name has been provided, make sure function name is included as postfix,
# otherwise zappa's handler won't be able to locate the function.
name = "{}-{}".format(name, function)
if index:
# to ensure unique cloudwatch rule names in the case of multiple expressions
# prefix all entries bar the first with the index
# Related: https://github.com/Miserlou/Zappa/pull/1051
name = "{}-{}".format(index, name)
# prefix scheduled event names with lambda name. So we can look them up later via the prefix.
event_name = self.get_event_name(lambda_name, name)
# if it's possible that we truncated name, generate a unique, shortened name
# https://github.com/Miserlou/Zappa/issues/970
if len(event_name) >= 64:
lambda_name = self.get_hashed_lambda_name(lambda_name)
event_name = self.get_event_name(lambda_name, name)
return event_name
@staticmethod
def get_event_name(lambda_name, name):
"""
Returns an AWS-valid Lambda event name.
"""
return "{prefix:.{width}}-{postfix}".format(
prefix=lambda_name, width=max(0, 63 - len(name)), postfix=name
)[:64]
@staticmethod
def get_hashed_lambda_name(lambda_name):
return hashlib.sha1(lambda_name.encode()).hexdigest()
def delete_rule(self, rule_name):
"""
Delete a CWE rule.
This deletes them, but they will still show up in the AWS console.
Annoying.
"""
logger.debug("Deleting existing rule {}".format(rule_name))
# All targets must be removed before
# we can actually delete the rule.
try:
targets = self.events_client.list_targets_by_rule(Rule=rule_name)
except botocore.exceptions.ClientError as e:
# This avoids misbehavior if low permissions, related: https://github.com/Miserlou/Zappa/issues/286
error_code = e.response["Error"]["Code"]
if error_code == "AccessDeniedException":
raise
else:
logger.debug(
"No target found for this rule: {} {}".format(rule_name, e.args[0])
)
return
if "Targets" in targets and targets["Targets"]:
self.events_client.remove_targets(
Rule=rule_name, Ids=[x["Id"] for x in targets["Targets"]]
)
else: # pragma: no cover
logger.debug("No target to delete")
# Delete our rule.
self.events_client.delete_rule(Name=rule_name)
def get_event_rule_names_for_lambda(self, lambda_arn):
"""
Get all of the rule names associated with a lambda function.
"""
response = self.events_client.list_rule_names_by_target(TargetArn=lambda_arn)
rule_names = response["RuleNames"]
# Iterate when the results are paginated
while "NextToken" in response:
response = self.events_client.list_rule_names_by_target(
TargetArn=lambda_arn, NextToken=response["NextToken"]
)
rule_names.extend(response["RuleNames"])
return rule_names
def get_event_rules_for_lambda(self, lambda_arn):
"""
Get all of the rule details associated with this function.
"""
rule_names = self.get_event_rule_names_for_lambda(lambda_arn=lambda_arn)
return [self.events_client.describe_rule(Name=r) for r in rule_names]
def unschedule_events(
self, events, lambda_arn=None, lambda_name=None, excluded_source_services=None
):
excluded_source_services = excluded_source_services or []
"""
Given a list of events, unschedule these CloudWatch Events.
'events' is a list of dictionaries, where the dict must contains the string
of a 'function' and the string of the event 'expression', and an optional 'name' and 'description'.
"""
self._clear_policy(lambda_name)
rule_names = self.get_event_rule_names_for_lambda(lambda_arn=lambda_arn)
for rule_name in rule_names:
self.delete_rule(rule_name)
print("Unscheduled " + rule_name + ".")
non_cwe = [e for e in events if "event_source" in e]
for event in non_cwe:
# TODO: This WILL miss non CW events that have been deployed but changed names. Figure out a way to remove
# them no matter what.
# These are non CWE event sources.
function = event["function"]
name = event.get("name", function)
event_source = event.get("event_source", function)
service = self.service_from_arn(event_source["arn"])
# DynamoDB and Kinesis streams take quite a while to setup after they are created and do not need to be
# re-scheduled when a new Lambda function is deployed. Therefore, they should not be removed during zappa
# update or zappa schedule.
if service not in excluded_source_services:
remove_event_source(
event_source, lambda_arn, function, self.boto_session
)
print(
"Removed event {}{}.".format(
name,
" ({})".format(str(event_source["events"]))
if "events" in event_source
else "",
)
)
###
# Async / SNS
##
def create_async_sns_topic(self, lambda_name, lambda_arn):
"""
Create the SNS-based async topic.
"""
topic_name = get_topic_name(lambda_name)
# Create SNS topic
topic_arn = self.sns_client.create_topic(Name=topic_name)["TopicArn"]
# Create subscription
self.sns_client.subscribe(
TopicArn=topic_arn, Protocol="lambda", Endpoint=lambda_arn
)
# Add Lambda permission for SNS to invoke function
self.create_event_permission(
lambda_name=lambda_name, principal="sns.amazonaws.com", source_arn=topic_arn
)
# Add rule for SNS topic as a event source
add_event_source(
event_source={"arn": topic_arn, "events": ["sns:Publish"]},
lambda_arn=lambda_arn,
target_function="zappa.asynchronous.route_task",
boto_session=self.boto_session,
)
return topic_arn
def remove_async_sns_topic(self, lambda_name):
"""
Remove the async SNS topic.
"""
topic_name = get_topic_name(lambda_name)
removed_arns = []
for sub in self.sns_client.list_subscriptions()["Subscriptions"]:
if topic_name in sub["TopicArn"]:
self.sns_client.delete_topic(TopicArn=sub["TopicArn"])
removed_arns.append(sub["TopicArn"])
return removed_arns
###
# Async / DynamoDB
##
def _set_async_dynamodb_table_ttl(self, table_name):
self.dynamodb_client.update_time_to_live(
TableName=table_name,
TimeToLiveSpecification={"Enabled": True, "AttributeName": "ttl"},
)
def create_async_dynamodb_table(self, table_name, read_capacity, write_capacity):
"""
Create the DynamoDB table for async task return values
"""
try:
dynamodb_table = self.dynamodb_client.describe_table(TableName=table_name)
return False, dynamodb_table
# catch this exception (triggered if the table doesn't exist)
except botocore.exceptions.ClientError:
dynamodb_table = self.dynamodb_client.create_table(
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
TableName=table_name,
KeySchema=[
{"AttributeName": "id", "KeyType": "HASH"},
],
ProvisionedThroughput={
"ReadCapacityUnits": read_capacity,
"WriteCapacityUnits": write_capacity,
},
)
if dynamodb_table:
try:
self._set_async_dynamodb_table_ttl(table_name)
except botocore.exceptions.ClientError:
# this fails because the operation is async, so retry
time.sleep(10)
self._set_async_dynamodb_table_ttl(table_name)
return True, dynamodb_table
def remove_async_dynamodb_table(self, table_name):
"""
Remove the DynamoDB Table used for async return values
"""
self.dynamodb_client.delete_table(TableName=table_name)
##
# CloudWatch Logging
##
def fetch_logs(self, lambda_name, filter_pattern="", limit=10000, start_time=0):
"""
Fetch the CloudWatch logs for a given Lambda name.
"""
log_name = "/aws/lambda/" + lambda_name
streams = self.logs_client.describe_log_streams(
logGroupName=log_name, descending=True, orderBy="LastEventTime"
)
all_streams = streams["logStreams"]
all_names = [stream["logStreamName"] for stream in all_streams]
events = []
response = {}
while not response or "nextToken" in response:
extra_args = {}
if "nextToken" in response:
extra_args["nextToken"] = response["nextToken"]
# Amazon uses millisecond epoch for some reason.
# Thanks, Jeff.
start_time = start_time * 1000
end_time = int(time.time()) * 1000
response = self.logs_client.filter_log_events(
logGroupName=log_name,
logStreamNames=all_names,
startTime=start_time,
endTime=end_time,
filterPattern=filter_pattern,
limit=limit,
interleaved=True, # Does this actually improve performance?
**extra_args,
)
if response and "events" in response:
events += response["events"]
return sorted(events, key=lambda k: k["timestamp"])
def remove_log_group(self, group_name):
"""
Filter all log groups that match the name given in log_filter.
"""
print("Removing log group: {}".format(group_name))
try:
self.logs_client.delete_log_group(logGroupName=group_name)
except botocore.exceptions.ClientError as e:
print("Couldn't remove '{}' because of: {}".format(group_name, e))
def remove_lambda_function_logs(self, lambda_function_name):
"""
Remove all logs that are assigned to a given lambda function id.
"""
self.remove_log_group("/aws/lambda/{}".format(lambda_function_name))
def remove_api_gateway_logs(self, project_name):
"""
Removed all logs that are assigned to a given rest api id.
"""
for rest_api in self.get_rest_apis(project_name):
for stage in self.apigateway_client.get_stages(restApiId=rest_api["id"])[
"item"
]:
self.remove_log_group(
"API-Gateway-Execution-Logs_{}/{}".format(
rest_api["id"], stage["stageName"]
)
)
##
# Route53 Domain Name Entries
##
def get_hosted_zone_id_for_domain(self, domain):
"""
Get the Hosted Zone ID for a given domain.
"""
all_zones = self.get_all_zones()
return self.get_best_match_zone(all_zones, domain)
@staticmethod
def get_best_match_zone(all_zones, domain):
"""Return zone id which name is closer matched with domain name."""
# Related: https://github.com/Miserlou/Zappa/issues/459
public_zones = [
zone
for zone in all_zones["HostedZones"]
if not zone["Config"]["PrivateZone"]
]
zones = {
zone["Name"][:-1]: zone["Id"]
for zone in public_zones
if zone["Name"][:-1] in domain
}
if zones:
keys = max(
zones.keys(), key=lambda a: len(a)
) # get longest key -- best match.
return zones[keys]
else:
return None
def set_dns_challenge_txt(self, zone_id, domain, txt_challenge):
"""
Set DNS challenge TXT.
"""
print("Setting DNS challenge..")
resp = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch=self.get_dns_challenge_change_batch(
"UPSERT", domain, txt_challenge
),
)
return resp
def remove_dns_challenge_txt(self, zone_id, domain, txt_challenge):
"""
Remove DNS challenge TXT.
"""
print("Deleting DNS challenge..")
resp = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch=self.get_dns_challenge_change_batch(
"DELETE", domain, txt_challenge
),
)
return resp
@staticmethod
def get_dns_challenge_change_batch(action, domain, txt_challenge):
"""
Given action, domain and challenge, return a change batch to use with
route53 call.
:param action: DELETE | UPSERT
:param domain: domain name
:param txt_challenge: challenge
:return: change set for a given action, domain and TXT challenge.
"""
return {
"Changes": [
{
"Action": action,
"ResourceRecordSet": {
"Name": "_acme-challenge.{0}".format(domain),
"Type": "TXT",
"TTL": 60,
"ResourceRecords": [{"Value": '"{0}"'.format(txt_challenge)}],
},
}
]
}
##
# Utility
##
def shell(self):
"""
Spawn a PDB shell.
"""
import pdb
pdb.set_trace()
def load_credentials(self, boto_session=None, profile_name=None):
"""
Load AWS credentials.
An optional boto_session can be provided, but that's usually for testing.
An optional profile_name can be provided for config files that have multiple sets
of credentials.
"""
# Automatically load credentials from config or environment
if not boto_session:
# If provided, use the supplied profile name.
if profile_name:
self.boto_session = boto3.Session(
profile_name=profile_name, region_name=self.aws_region
)
elif os.environ.get("AWS_ACCESS_KEY_ID") and os.environ.get(
"AWS_SECRET_ACCESS_KEY"
):
region_name = os.environ.get("AWS_DEFAULT_REGION") or self.aws_region
session_kw = {
"aws_access_key_id": os.environ.get("AWS_ACCESS_KEY_ID"),
"aws_secret_access_key": os.environ.get("AWS_SECRET_ACCESS_KEY"),
"region_name": region_name,
}
# If we're executing in a role, AWS_SESSION_TOKEN will be present, too.
if os.environ.get("AWS_SESSION_TOKEN"):
session_kw["aws_session_token"] = os.environ.get(
"AWS_SESSION_TOKEN"
)
self.boto_session = boto3.Session(**session_kw)
else:
self.boto_session = boto3.Session(region_name=self.aws_region)
logger.debug("Loaded boto session from config: %s", boto_session)
else:
logger.debug("Using provided boto session: %s", boto_session)
self.boto_session = boto_session
# use provided session's region in case it differs
self.aws_region = self.boto_session.region_name
if self.boto_session.region_name not in LAMBDA_REGIONS:
print("Warning! AWS Lambda may not be available in this AWS Region!")
if self.boto_session.region_name not in API_GATEWAY_REGIONS:
print("Warning! AWS API Gateway may not be available in this AWS Region!")
@staticmethod
def service_from_arn(arn):
return arn.split(":")[2] | zappa-teamturing | /zappa-teamturing-0.54.12.tar.gz/zappa-teamturing-0.54.12/zappa/core.py | core.py |
import calendar
import datetime
import fnmatch
import io
import json
import logging
import os
import re
import shutil
import stat
import sys
from urllib.parse import urlparse
import botocore
import durationpy
from past.builtins import basestring
LOG = logging.getLogger(__name__)
##
# Settings / Packaging
##
def copytree(src, dst, metadata=True, symlinks=False, ignore=None):
"""
This is a contributed re-implementation of 'copytree' that
should work with the exact same behavior on multiple platforms.
When `metadata` is False, file metadata such as permissions and modification
times are not copied.
"""
def copy_file(src, dst, item):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s): # pragma: no cover
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
if metadata:
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass # lchmod not available
elif os.path.isdir(s):
copytree(s, d, metadata, symlinks, ignore)
else:
shutil.copy2(s, d) if metadata else shutil.copy(s, d)
try:
lst = os.listdir(src)
if not os.path.exists(dst):
os.makedirs(dst)
if metadata:
shutil.copystat(src, dst)
except NotADirectoryError: # egg-link files
copy_file(os.path.dirname(src), os.path.dirname(dst), os.path.basename(src))
return
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
copy_file(src, dst, item)
def parse_s3_url(url):
"""
Parses S3 URL.
Returns bucket (domain) and file (full path).
"""
bucket = ""
path = ""
if url:
result = urlparse(url)
bucket = result.netloc
path = result.path.strip("/")
return bucket, path
def human_size(num, suffix="B"):
"""
Convert bytes length to a human-readable version
"""
for unit in ("", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"):
if abs(num) < 1024.0:
return "{0:3.1f}{1!s}{2!s}".format(num, unit, suffix)
num /= 1024.0
return "{0:.1f}{1!s}{2!s}".format(num, "Yi", suffix)
def string_to_timestamp(timestring):
"""
Accepts a str, returns an int timestamp.
"""
ts = None
# Uses an extended version of Go's duration string.
try:
delta = durationpy.from_str(timestring)
past = datetime.datetime.utcnow() - delta
ts = calendar.timegm(past.timetuple())
return ts
except Exception as e:
pass
if ts:
return ts
# else:
# print("Unable to parse timestring.")
return 0
##
# `init` related
##
def detect_django_settings():
"""
Automatically try to discover Django settings files,
return them as relative module paths.
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, "*settings.py"):
full = os.path.join(root, filename)
if "site-packages" in full:
continue
full = os.path.join(root, filename)
package_path = full.replace(os.getcwd(), "")
package_module = (
package_path.replace(os.sep, ".").split(".", 1)[1].replace(".py", "")
)
matches.append(package_module)
return matches
def detect_flask_apps():
"""
Automatically try to discover Flask apps files,
return them as relative module paths.
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, "*.py"):
full = os.path.join(root, filename)
if "site-packages" in full:
continue
full = os.path.join(root, filename)
with io.open(full, "r", encoding="utf-8") as f:
lines = f.readlines()
for line in lines:
app = None
# Kind of janky..
if "= Flask(" in line:
app = line.split("= Flask(")[0].strip()
if "=Flask(" in line:
app = line.split("=Flask(")[0].strip()
if not app:
continue
package_path = full.replace(os.getcwd(), "")
package_module = (
package_path.replace(os.sep, ".")
.split(".", 1)[1]
.replace(".py", "")
)
app_module = package_module + "." + app
matches.append(app_module)
return matches
def get_venv_from_python_version():
return "python{}.{}".format(*sys.version_info)
def get_runtime_from_python_version():
""" """
if sys.version_info[0] < 3:
raise ValueError("Python 2.x is no longer supported.")
else:
if sys.version_info[1] <= 6:
return "python3.6"
elif sys.version_info[1] <= 7:
return "python3.7"
elif sys.version_info[1] <= 8:
return "python3.8"
else:
return "python3.9"
##
# Async Tasks
##
def get_topic_name(lambda_name):
"""Topic name generation"""
return "%s-zappa-async" % lambda_name
##
# Event sources / Kappa
##
def get_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
):
"""
Given an event_source dictionary item, a session and a lambda_arn,
hack into Kappa's Gibson, create out an object we can call
to schedule this event, and return the event source.
"""
import kappa.awsclient
import kappa.event_source.base
import kappa.event_source.cloudwatch
import kappa.event_source.dynamodb_stream
import kappa.event_source.kinesis
import kappa.event_source.s3
import kappa.event_source.sns
import kappa.function
import kappa.policy
import kappa.restapi
import kappa.role
class PseudoContext:
def __init__(self):
return
class PseudoFunction:
def __init__(self):
return
# Mostly adapted from kappa - will probably be replaced by kappa support
class SqsEventSource(kappa.event_source.base.EventSource):
def __init__(self, context, config):
super().__init__(context, config)
self._lambda = kappa.awsclient.create_client("lambda", context.session)
def _get_uuid(self, function):
uuid = None
response = self._lambda.call(
"list_event_source_mappings",
FunctionName=function.name,
EventSourceArn=self.arn,
)
LOG.debug(response)
if len(response["EventSourceMappings"]) > 0:
uuid = response["EventSourceMappings"][0]["UUID"]
return uuid
def add(self, function):
try:
response = self._lambda.call(
"create_event_source_mapping",
FunctionName=function.name,
EventSourceArn=self.arn,
BatchSize=self.batch_size,
Enabled=self.enabled,
)
LOG.debug(response)
except Exception:
LOG.exception("Unable to add event source")
def enable(self, function):
self._config["enabled"] = True
try:
response = self._lambda.call(
"update_event_source_mapping",
UUID=self._get_uuid(function),
Enabled=self.enabled,
)
LOG.debug(response)
except Exception:
LOG.exception("Unable to enable event source")
def disable(self, function):
self._config["enabled"] = False
try:
response = self._lambda.call(
"update_event_source_mapping",
FunctionName=function.name,
Enabled=self.enabled,
)
LOG.debug(response)
except Exception:
LOG.exception("Unable to disable event source")
def update(self, function):
response = None
uuid = self._get_uuid(function)
if uuid:
try:
response = self._lambda.call(
"update_event_source_mapping",
BatchSize=self.batch_size,
Enabled=self.enabled,
FunctionName=function.arn,
)
LOG.debug(response)
except Exception:
LOG.exception("Unable to update event source")
def remove(self, function):
response = None
uuid = self._get_uuid(function)
if uuid:
response = self._lambda.call("delete_event_source_mapping", UUID=uuid)
LOG.debug(response)
return response
def status(self, function):
response = None
LOG.debug("getting status for event source %s", self.arn)
uuid = self._get_uuid(function)
if uuid:
try:
response = self._lambda.call(
"get_event_source_mapping", UUID=self._get_uuid(function)
)
LOG.debug(response)
except botocore.exceptions.ClientError:
LOG.debug("event source %s does not exist", self.arn)
response = None
else:
LOG.debug("No UUID for event source %s", self.arn)
return response
class ExtendedSnsEventSource(kappa.event_source.sns.SNSEventSource):
@property
def filters(self):
return self._config.get("filters")
def add_filters(self, function):
try:
subscription = self.exists(function)
if subscription:
response = self._sns.call(
"set_subscription_attributes",
SubscriptionArn=subscription["SubscriptionArn"],
AttributeName="FilterPolicy",
AttributeValue=json.dumps(self.filters),
)
kappa.event_source.sns.LOG.debug(response)
except Exception:
kappa.event_source.sns.LOG.exception(
"Unable to add filters for SNS topic %s", self.arn
)
def add(self, function):
super().add(function)
if self.filters:
self.add_filters(function)
event_source_map = {
"dynamodb": kappa.event_source.dynamodb_stream.DynamoDBStreamEventSource,
"kinesis": kappa.event_source.kinesis.KinesisEventSource,
"s3": kappa.event_source.s3.S3EventSource,
"sns": ExtendedSnsEventSource,
"sqs": SqsEventSource,
"events": kappa.event_source.cloudwatch.CloudWatchEventSource,
}
arn = event_source["arn"]
_, _, svc, _ = arn.split(":", 3)
event_source_func = event_source_map.get(svc, None)
if not event_source_func:
raise ValueError("Unknown event source: {0}".format(arn))
def autoreturn(self, function_name):
return function_name
event_source_func._make_notification_id = autoreturn
ctx = PseudoContext()
ctx.session = boto_session
funk = PseudoFunction()
funk.name = lambda_arn
# Kappa 0.6.0 requires this nasty hacking,
# hopefully we can remove at least some of this soon.
# Kappa 0.7.0 introduces a whole host over other changes we don't
# really want, so we're stuck here for a little while.
# Related: https://github.com/Miserlou/Zappa/issues/684
# https://github.com/Miserlou/Zappa/issues/688
# https://github.com/Miserlou/Zappa/commit/3216f7e5149e76921ecdf9451167846b95616313
if svc == "s3":
split_arn = lambda_arn.split(":")
arn_front = ":".join(split_arn[:-1])
arn_back = split_arn[-1]
ctx.environment = arn_back
funk.arn = arn_front
funk.name = ":".join([arn_back, target_function])
else:
funk.arn = lambda_arn
funk._context = ctx
event_source_obj = event_source_func(ctx, event_source)
return event_source_obj, ctx, funk
def add_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
):
"""
Given an event_source dictionary, create the object and add the event source.
"""
event_source_obj, ctx, funk = get_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
)
# TODO: Detect changes in config and refine exists algorithm
if not dry:
if not event_source_obj.status(funk):
event_source_obj.add(funk)
return "successful" if event_source_obj.status(funk) else "failed"
else:
return "exists"
return "dryrun"
def remove_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
):
"""
Given an event_source dictionary, create the object and remove the event source.
"""
event_source_obj, ctx, funk = get_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
)
# This is slightly dirty, but necessary for using Kappa this way.
funk.arn = lambda_arn
if not dry:
rule_response = event_source_obj.remove(funk)
return rule_response
else:
return event_source_obj
def get_event_source_status(
event_source, lambda_arn, target_function, boto_session, dry=False
):
"""
Given an event_source dictionary, create the object and get the event source status.
"""
event_source_obj, ctx, funk = get_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
)
return event_source_obj.status(funk)
##
# Analytics / Surveillance / Nagging
##
def check_new_version_available(this_version):
"""
Checks if a newer version of Zappa is available.
Returns True is updateable, else False.
"""
import requests
pypi_url = "https://pypi.org/pypi/Zappa/json"
resp = requests.get(pypi_url, timeout=1.5)
top_version = resp.json()["info"]["version"]
return this_version != top_version
class InvalidAwsLambdaName(Exception):
"""Exception: proposed AWS Lambda name is invalid"""
pass
def validate_name(name, maxlen=80):
"""Validate name for AWS Lambda function.
name: actual name (without `arn:aws:lambda:...:` prefix and without
`:$LATEST`, alias or version suffix.
maxlen: max allowed length for name without prefix and suffix.
The value 80 was calculated from prefix with longest known region name
and assuming that no alias or version would be longer than `$LATEST`.
Based on AWS Lambda spec
http://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html
Return: the name
Raise: InvalidAwsLambdaName, if the name is invalid.
"""
if not isinstance(name, basestring):
msg = "Name must be of type string"
raise InvalidAwsLambdaName(msg)
if len(name) > maxlen:
msg = "Name is longer than {maxlen} characters."
raise InvalidAwsLambdaName(msg.format(maxlen=maxlen))
if len(name) == 0:
msg = "Name must not be empty string."
raise InvalidAwsLambdaName(msg)
if not re.match("^[a-zA-Z0-9-_]+$", name):
msg = "Name can only contain characters from a-z, A-Z, 0-9, _ and -"
raise InvalidAwsLambdaName(msg)
return name
def contains_python_files_or_subdirs(folder):
"""
Checks (recursively) if the directory contains .py or .pyc files
"""
for root, dirs, files in os.walk(folder):
if [
filename
for filename in files
if filename.endswith(".py") or filename.endswith(".pyc")
]:
return True
for d in dirs:
for _, subdirs, subfiles in os.walk(d):
if [
filename
for filename in subfiles
if filename.endswith(".py") or filename.endswith(".pyc")
]:
return True
return False
def conflicts_with_a_neighbouring_module(directory_path):
"""
Checks if a directory lies in the same directory as a .py file with the same name.
"""
parent_dir_path, current_dir_name = os.path.split(os.path.normpath(directory_path))
neighbours = os.listdir(parent_dir_path)
conflicting_neighbour_filename = current_dir_name + ".py"
return conflicting_neighbour_filename in neighbours
# https://github.com/Miserlou/Zappa/issues/1188
def titlecase_keys(d):
"""
Takes a dict with keys of type str and returns a new dict with all keys titlecased.
"""
return {k.title(): v for k, v in d.items()}
# https://github.com/Miserlou/Zappa/issues/1688
def is_valid_bucket_name(name):
"""
Checks if an S3 bucket name is valid according to https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules
"""
# Bucket names must be at least 3 and no more than 63 characters long.
if len(name) < 3 or len(name) > 63:
return False
# Bucket names must not contain uppercase characters or underscores.
if any(x.isupper() for x in name):
return False
if "_" in name:
return False
# Bucket names must start with a lowercase letter or number.
if not (name[0].islower() or name[0].isdigit()):
return False
# Bucket names must be a series of one or more labels. Adjacent labels are separated by a single period (.).
for label in name.split("."):
# Each label must start and end with a lowercase letter or a number.
if len(label) < 1:
return False
if not (label[0].islower() or label[0].isdigit()):
return False
if not (label[-1].islower() or label[-1].isdigit()):
return False
# Bucket names must not be formatted as an IP address (for example, 192.168.5.4).
looks_like_IP = True
for label in name.split("."):
if not label.isdigit():
looks_like_IP = False
break
if looks_like_IP:
return False
return True
def merge_headers(event):
"""
Merge the values of headers and multiValueHeaders into a single dict.
Opens up support for multivalue headers via API Gateway and ALB.
See: https://github.com/Miserlou/Zappa/pull/1756
"""
headers = event.get("headers") or {}
multi_headers = (event.get("multiValueHeaders") or {}).copy()
for h in set(headers.keys()):
if h not in multi_headers:
multi_headers[h] = [headers[h]]
for h in multi_headers.keys():
multi_headers[h] = ", ".join(multi_headers[h])
return multi_headers | zappa-teamturing | /zappa-teamturing-0.54.12.tar.gz/zappa-teamturing-0.54.12/zappa/utilities.py | utilities.py |
import argparse
import base64
import collections
import importlib
import inspect
import logging
import os
import pkgutil
import random
import re
import string
import sys
import tempfile
import time
import zipfile
from builtins import bytes, input
from datetime import datetime, timedelta
import argcomplete
import botocore
import click
import hjson as json
import pkg_resources
import requests
import slugify
import toml
import yaml
from click import BaseCommand, Context
from click.exceptions import ClickException
from click.globals import push_context
from dateutil import parser
from past.builtins import basestring
from .core import API_GATEWAY_REGIONS, Zappa, logger
from .utilities import (
InvalidAwsLambdaName,
check_new_version_available,
detect_django_settings,
detect_flask_apps,
get_runtime_from_python_version,
get_venv_from_python_version,
human_size,
is_valid_bucket_name,
parse_s3_url,
string_to_timestamp,
validate_name,
)
CUSTOM_SETTINGS = [
"apigateway_policy",
"assume_policy",
"attach_policy",
"aws_region",
"delete_local_zip",
"delete_s3_zip",
"exclude",
"exclude_glob",
"extra_permissions",
"include",
"role_name",
"touch",
]
BOTO3_CONFIG_DOCS_URL = (
"https://boto3.readthedocs.io/en/latest/guide/quickstart.html#configuration"
)
##
# Main Input Processing
##
class ZappaCLI:
"""
ZappaCLI object is responsible for loading the settings,
handling the input arguments and executing the calls to the core library.
"""
# CLI
vargs = None
command = None
stage_env = None
# Zappa settings
zappa = None
zappa_settings = None
load_credentials = True
disable_progress = False
# Specific settings
api_stage = None
app_function = None
aws_region = None
debug = None
prebuild_script = None
project_name = None
profile_name = None
lambda_arn = None
lambda_name = None
lambda_description = None
lambda_concurrency = None
s3_bucket_name = None
settings_file = None
zip_path = None
handler_path = None
vpc_config = None
memory_size = None
use_apigateway = None
lambda_handler = None
django_settings = None
manage_roles = True
exception_handler = None
environment_variables = None
authorizer = None
xray_tracing = False
aws_kms_key_arn = ""
context_header_mappings = None
tags = []
layers = None
stage_name_env_pattern = re.compile("^[a-zA-Z0-9_]+$")
def __init__(self):
self._stage_config_overrides = (
{}
) # change using self.override_stage_config_setting(key, val)
@property
def stage_config(self):
"""
A shortcut property for settings of a stage.
"""
def get_stage_setting(stage, extended_stages=None):
if extended_stages is None:
extended_stages = []
if stage in extended_stages:
raise RuntimeError(
stage + " has already been extended to these settings. "
"There is a circular extends within the settings file."
)
extended_stages.append(stage)
try:
stage_settings = dict(self.zappa_settings[stage].copy())
except KeyError:
raise ClickException(
"Cannot extend settings for undefined stage '" + stage + "'."
)
extends_stage = self.zappa_settings[stage].get("extends", None)
if not extends_stage:
return stage_settings
extended_settings = get_stage_setting(
stage=extends_stage, extended_stages=extended_stages
)
extended_settings.update(stage_settings)
return extended_settings
settings = get_stage_setting(stage=self.api_stage)
# Backwards compatible for delete_zip setting that was more explicitly named delete_local_zip
if "delete_zip" in settings:
settings["delete_local_zip"] = settings.get("delete_zip")
settings.update(self.stage_config_overrides)
return settings
@property
def stage_config_overrides(self):
"""
Returns zappa_settings we forcefully override for the current stage
set by `self.override_stage_config_setting(key, value)`
"""
return getattr(self, "_stage_config_overrides", {}).get(self.api_stage, {})
def override_stage_config_setting(self, key, val):
"""
Forcefully override a setting set by zappa_settings (for the current stage only)
:param key: settings key
:param val: value
"""
self._stage_config_overrides = getattr(self, "_stage_config_overrides", {})
self._stage_config_overrides.setdefault(self.api_stage, {})[key] = val
def handle(self, argv=None):
"""
Main function.
Parses command, load settings and dispatches accordingly.
"""
desc = "Zappa - Deploy Python applications to AWS Lambda" " and API Gateway.\n"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
"-v",
"--version",
action="version",
version=pkg_resources.get_distribution("zappa-teamturing").version,
help="Print the zappa version",
)
parser.add_argument(
"--color", default="auto", choices=["auto", "never", "always"]
)
env_parser = argparse.ArgumentParser(add_help=False)
me_group = env_parser.add_mutually_exclusive_group()
all_help = "Execute this command for all of our defined " "Zappa stages."
me_group.add_argument("--all", action="store_true", help=all_help)
me_group.add_argument("stage_env", nargs="?")
group = env_parser.add_argument_group()
group.add_argument(
"-a", "--app_function", help="The WSGI application function."
)
group.add_argument(
"-s", "--settings_file", help="The path to a Zappa settings file."
)
group.add_argument(
"-q", "--quiet", action="store_true", help="Silence all output."
)
# https://github.com/Miserlou/Zappa/issues/407
# Moved when 'template' command added.
# Fuck Terraform.
group.add_argument(
"-j",
"--json",
action="store_true",
help="Make the output of this command be machine readable.",
)
# https://github.com/Miserlou/Zappa/issues/891
group.add_argument(
"--disable_progress", action="store_true", help="Disable progress bars."
)
group.add_argument("--no_venv", action="store_true", help="Skip venv check.")
##
# Certify
##
subparsers = parser.add_subparsers(title="subcommands", dest="command")
cert_parser = subparsers.add_parser(
"certify", parents=[env_parser], help="Create and install SSL certificate"
)
cert_parser.add_argument(
"--manual",
action="store_true",
help=(
"Gets new Let's Encrypt certificates, but prints them to console."
"Does not update API Gateway domains."
),
)
cert_parser.add_argument(
"-y", "--yes", action="store_true", help="Auto confirm yes."
)
##
# Deploy
##
deploy_parser = subparsers.add_parser(
"deploy", parents=[env_parser], help="Deploy application."
)
deploy_parser.add_argument(
"-z",
"--zip",
help="Deploy Lambda with specific local or S3 hosted zip package",
)
deploy_parser.add_argument(
"-d",
"--docker-image-uri",
help="Deploy Lambda with a specific docker image hosted in AWS Elastic Container Registry",
)
##
# Init
##
init_parser = subparsers.add_parser("init", help="Initialize Zappa app.")
##
# Package
##
package_parser = subparsers.add_parser(
"package",
parents=[env_parser],
help="Build the application zip package locally.",
)
package_parser.add_argument(
"-o", "--output", help="Name of file to output the package to."
)
##
# Template
##
template_parser = subparsers.add_parser(
"template",
parents=[env_parser],
help="Create a CloudFormation template for this API Gateway.",
)
template_parser.add_argument(
"-l",
"--lambda-arn",
required=True,
help="ARN of the Lambda function to template to.",
)
template_parser.add_argument(
"-r", "--role-arn", required=True, help="ARN of the Role to template with."
)
template_parser.add_argument(
"-o", "--output", help="Name of file to output the template to."
)
##
# Invocation
##
invoke_parser = subparsers.add_parser(
"invoke", parents=[env_parser], help="Invoke remote function."
)
invoke_parser.add_argument(
"--raw",
action="store_true",
help=(
"When invoking remotely, invoke this python as a string,"
" not as a modular path."
),
)
invoke_parser.add_argument(
"--no-color", action="store_true", help=("Don't color the output")
)
invoke_parser.add_argument("command_rest")
##
# Manage
##
manage_parser = subparsers.add_parser(
"manage", help="Invoke remote Django manage.py commands."
)
rest_help = (
"Command in the form of <env> <command>. <env> is not "
"required if --all is specified"
)
manage_parser.add_argument("--all", action="store_true", help=all_help)
manage_parser.add_argument("command_rest", nargs="+", help=rest_help)
manage_parser.add_argument(
"--no-color", action="store_true", help=("Don't color the output")
)
# This is explicitly added here because this is the only subcommand that doesn't inherit from env_parser
# https://github.com/Miserlou/Zappa/issues/1002
manage_parser.add_argument(
"-s", "--settings_file", help="The path to a Zappa settings file."
)
##
# Rollback
##
def positive_int(s):
"""Ensure an arg is positive"""
i = int(s)
if i < 0:
msg = "This argument must be positive (got {})".format(s)
raise argparse.ArgumentTypeError(msg)
return i
rollback_parser = subparsers.add_parser(
"rollback",
parents=[env_parser],
help="Rollback deployed code to a previous version.",
)
rollback_parser.add_argument(
"-n",
"--num-rollback",
type=positive_int,
default=1,
help="The number of versions to rollback.",
)
##
# Scheduling
##
subparsers.add_parser(
"schedule",
parents=[env_parser],
help="Schedule functions to occur at regular intervals.",
)
##
# Status
##
subparsers.add_parser(
"status",
parents=[env_parser],
help="Show deployment status and event schedules.",
)
##
# Log Tailing
##
tail_parser = subparsers.add_parser(
"tail", parents=[env_parser], help="Tail deployment logs."
)
tail_parser.add_argument(
"--no-color", action="store_true", help="Don't color log tail output."
)
tail_parser.add_argument(
"--http",
action="store_true",
help="Only show HTTP requests in tail output.",
)
tail_parser.add_argument(
"--non-http",
action="store_true",
help="Only show non-HTTP requests in tail output.",
)
tail_parser.add_argument(
"--since",
type=str,
default="100000s",
help="Only show lines since a certain timeframe.",
)
tail_parser.add_argument(
"--filter", type=str, default="", help="Apply a filter pattern to the logs."
)
tail_parser.add_argument(
"--force-color",
action="store_true",
help="Force coloring log tail output even if coloring support is not auto-detected. (example: piping)",
)
tail_parser.add_argument(
"--disable-keep-open",
action="store_true",
help="Exit after printing the last available log, rather than keeping the log open.",
)
##
# Undeploy
##
undeploy_parser = subparsers.add_parser(
"undeploy", parents=[env_parser], help="Undeploy application."
)
undeploy_parser.add_argument(
"--remove-logs",
action="store_true",
help=(
"Removes log groups of api gateway and lambda task"
" during the undeployment."
),
)
undeploy_parser.add_argument(
"-y", "--yes", action="store_true", help="Auto confirm yes."
)
##
# Unschedule
##
subparsers.add_parser(
"unschedule", parents=[env_parser], help="Unschedule functions."
)
##
# Updating
##
update_parser = subparsers.add_parser(
"update", parents=[env_parser], help="Update deployed application."
)
update_parser.add_argument(
"-z",
"--zip",
help="Update Lambda with specific local or S3 hosted zip package",
)
update_parser.add_argument(
"-n",
"--no-upload",
help="Update configuration where appropriate, but don't upload new code",
)
update_parser.add_argument(
"-d",
"--docker-image-uri",
help="Update Lambda with a specific docker image hosted in AWS Elastic Container Registry",
)
##
# Debug
##
subparsers.add_parser(
"shell",
parents=[env_parser],
help="A debug shell with a loaded Zappa object.",
)
##
# Python Settings File
##
settings_parser = subparsers.add_parser(
"save-python-settings-file",
parents=[env_parser],
help="Generate & save the Zappa settings Python file for docker deployments",
)
settings_parser.add_argument(
"-o",
"--output_path",
help=(
"The path to save the Zappa settings Python file. "
"File must be named zappa_settings.py and should be saved "
"in the same directory as the Zappa handler.py"
),
)
argcomplete.autocomplete(parser)
args = parser.parse_args(argv)
self.vargs = vars(args)
if args.color == "never":
disable_click_colors()
elif args.color == "always":
# TODO: Support aggressive coloring like "--force-color" on all commands
pass
elif args.color == "auto":
pass
# Parse the input
# NOTE(rmoe): Special case for manage command
# The manage command can't have both stage_env and command_rest
# arguments. Since they are both positional arguments argparse can't
# differentiate the two. This causes problems when used with --all.
# (e.g. "manage --all showmigrations admin" argparse thinks --all has
# been specified AND that stage_env='showmigrations')
# By having command_rest collect everything but --all we can split it
# apart here instead of relying on argparse.
if not args.command:
parser.print_help()
return
if args.command == "manage" and not self.vargs.get("all"):
self.stage_env = self.vargs["command_rest"].pop(0)
else:
self.stage_env = self.vargs.get("stage_env")
if args.command == "package":
self.load_credentials = False
self.command = args.command
self.disable_progress = self.vargs.get("disable_progress")
if self.vargs.get("quiet"):
self.silence()
# We don't have any settings yet, so make those first!
# (Settings-based interactions will fail
# before a project has been initialized.)
if self.command == "init":
self.init()
return
# Make sure there isn't a new version available
if not self.vargs.get("json"):
self.check_for_update()
# Load and Validate Settings File
self.load_settings_file(self.vargs.get("settings_file"))
# Should we execute this for all stages, or just one?
all_stages = self.vargs.get("all")
stages = []
if all_stages: # All stages!
stages = self.zappa_settings.keys()
else: # Just one env.
if not self.stage_env:
# If there's only one stage defined in the settings,
# use that as the default.
if len(self.zappa_settings.keys()) == 1:
stages.append(list(self.zappa_settings.keys())[0])
else:
parser.error("Please supply a stage to interact with.")
else:
stages.append(self.stage_env)
for stage in stages:
try:
self.dispatch_command(self.command, stage)
except ClickException as e:
# Discussion on exit codes: https://github.com/Miserlou/Zappa/issues/407
e.show()
sys.exit(e.exit_code)
def dispatch_command(self, command, stage):
"""
Given a command to execute and stage,
execute that command.
"""
self.check_stage_name(stage)
self.api_stage = stage
if command not in ["status", "manage"]:
if not self.vargs.get("json", None):
click.echo(
"Calling "
+ click.style(command, fg="green", bold=True)
+ " for stage "
+ click.style(self.api_stage, bold=True)
+ ".."
)
# Explicitly define the app function.
# Related: https://github.com/Miserlou/Zappa/issues/832
if self.vargs.get("app_function", None):
self.app_function = self.vargs["app_function"]
# Load our settings, based on api_stage.
try:
self.load_settings(self.vargs.get("settings_file"))
except ValueError as e:
if hasattr(e, "message"):
print("Error: {}".format(e.message))
else:
print(str(e))
sys.exit(-1)
self.callback("settings")
# Hand it off
if command == "deploy": # pragma: no cover
self.deploy(self.vargs["zip"], self.vargs["docker_image_uri"])
if command == "package": # pragma: no cover
self.package(self.vargs["output"])
if command == "template": # pragma: no cover
self.template(
self.vargs["lambda_arn"],
self.vargs["role_arn"],
output=self.vargs["output"],
json=self.vargs["json"],
)
elif command == "update": # pragma: no cover
self.update(
self.vargs["zip"],
self.vargs["no_upload"],
self.vargs["docker_image_uri"],
)
elif command == "rollback": # pragma: no cover
self.rollback(self.vargs["num_rollback"])
elif command == "invoke": # pragma: no cover
if not self.vargs.get("command_rest"):
print("Please enter the function to invoke.")
return
self.invoke(
self.vargs["command_rest"],
raw_python=self.vargs["raw"],
no_color=self.vargs["no_color"],
)
elif command == "manage": # pragma: no cover
if not self.vargs.get("command_rest"):
print("Please enter the management command to invoke.")
return
if not self.django_settings:
print("This command is for Django projects only!")
print(
"If this is a Django project, please define django_settings in your zappa_settings."
)
return
command_tail = self.vargs.get("command_rest")
if len(command_tail) > 1:
command = " ".join(
command_tail
) # ex: zappa manage dev "shell --version"
else:
command = command_tail[0] # ex: zappa manage dev showmigrations admin
self.invoke(
command,
command="manage",
no_color=self.vargs["no_color"],
)
elif command == "tail": # pragma: no cover
self.tail(
colorize=(not self.vargs["no_color"]),
http=self.vargs["http"],
non_http=self.vargs["non_http"],
since=self.vargs["since"],
filter_pattern=self.vargs["filter"],
force_colorize=self.vargs["force_color"] or None,
keep_open=not self.vargs["disable_keep_open"],
)
elif command == "undeploy": # pragma: no cover
self.undeploy(
no_confirm=self.vargs["yes"], remove_logs=self.vargs["remove_logs"]
)
elif command == "schedule": # pragma: no cover
self.schedule()
elif command == "unschedule": # pragma: no cover
self.unschedule()
elif command == "status": # pragma: no cover
self.status(return_json=self.vargs["json"])
elif command == "certify": # pragma: no cover
self.certify(no_confirm=self.vargs["yes"], manual=self.vargs["manual"])
elif command == "shell": # pragma: no cover
self.shell()
elif command == "save-python-settings-file": # pragma: no cover
self.save_python_settings_file(self.vargs["output_path"])
##
# The Commands
##
def save_python_settings_file(self, output_path=None):
settings_path = output_path or "zappa_settings.py"
print(
"Generating Zappa settings Python file and saving to {}".format(
settings_path
)
)
if not settings_path.endswith("zappa_settings.py"):
raise ValueError("Settings file must be named zappa_settings.py")
zappa_settings_s = self.get_zappa_settings_string()
with open(settings_path, "w") as f_out:
f_out.write(zappa_settings_s)
def package(self, output=None):
"""
Only build the package
"""
# Make sure we're in a venv.
self.check_venv()
# force not to delete the local zip
self.override_stage_config_setting("delete_local_zip", False)
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Create the Lambda Zip
self.create_package(output)
self.callback("zip")
size = human_size(os.path.getsize(self.zip_path))
click.echo(
click.style("Package created", fg="green", bold=True)
+ ": "
+ click.style(self.zip_path, bold=True)
+ " ("
+ size
+ ")"
)
def template(self, lambda_arn, role_arn, output=None, json=False):
"""
Only build the template file.
"""
if not lambda_arn:
raise ClickException("Lambda ARN is required to template.")
if not role_arn:
raise ClickException("Role ARN is required to template.")
self.zappa.credentials_arn = role_arn
# Create the template!
template = self.zappa.create_stack_template(
lambda_arn=lambda_arn,
lambda_name=self.lambda_name,
api_key_required=self.api_key_required,
iam_authorization=self.iam_authorization,
authorizer=self.authorizer,
cors_options=self.cors,
description=self.apigateway_description,
endpoint_configuration=self.endpoint_configuration,
)
if not output:
template_file = (
self.lambda_name + "-template-" + str(int(time.time())) + ".json"
)
else:
template_file = output
with open(template_file, "wb") as out:
out.write(
bytes(template.to_json(indent=None, separators=(",", ":")), "utf-8")
)
if not json:
click.echo(
click.style("Template created", fg="green", bold=True)
+ ": "
+ click.style(template_file, bold=True)
)
else:
with open(template_file, "r") as out:
print(out.read())
def deploy(self, source_zip=None, docker_image_uri=None):
"""
Package your project, upload it to S3, register the Lambda function
and create the API Gateway routes.
"""
if not source_zip or docker_image_uri:
# Make sure the necessary IAM execution roles are available
if self.manage_roles:
try:
self.zappa.create_iam_roles()
except botocore.client.ClientError as ce:
raise ClickException(
click.style("Failed", fg="red")
+ " to "
+ click.style("manage IAM roles", bold=True)
+ "!\n"
+ "You may "
+ click.style("lack the necessary AWS permissions", bold=True)
+ " to automatically manage a Zappa execution role.\n"
+ click.style("Exception reported by AWS:", bold=True)
+ format(ce)
+ "\n"
+ "To fix this, see here: "
+ click.style(
"https://github.com/Zappa/Zappa#custom-aws-iam-roles-and-policies-for-deployment",
bold=True,
)
+ "\n"
)
# Make sure this isn't already deployed.
deployed_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if len(deployed_versions) > 0:
raise ClickException(
"This application is "
+ click.style("already deployed", fg="red")
+ " - did you mean to call "
+ click.style("update", bold=True)
+ "?"
)
if not source_zip and not docker_image_uri:
# Make sure we're in a venv.
self.check_venv()
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Create the Lambda Zip
self.create_package()
self.callback("zip")
# Upload it to S3
success = self.zappa.upload_to_s3(
self.zip_path,
self.s3_bucket_name,
disable_progress=self.disable_progress,
)
if not success: # pragma: no cover
raise ClickException("Unable to upload to S3. Quitting.")
# If using a slim handler, upload it to S3 and tell lambda to use this slim handler zip
if self.stage_config.get("slim_handler", False):
# https://github.com/Miserlou/Zappa/issues/510
success = self.zappa.upload_to_s3(
self.handler_path,
self.s3_bucket_name,
disable_progress=self.disable_progress,
)
if not success: # pragma: no cover
raise ClickException("Unable to upload handler to S3. Quitting.")
# Copy the project zip to the current project zip
current_project_name = "{0!s}_{1!s}_current_project.tar.gz".format(
self.api_stage, self.project_name
)
success = self.zappa.copy_on_s3(
src_file_name=self.zip_path,
dst_file_name=current_project_name,
bucket_name=self.s3_bucket_name,
)
if not success: # pragma: no cover
raise ClickException(
"Unable to copy the zip to be the current project. Quitting."
)
handler_file = self.handler_path
else:
handler_file = self.zip_path
# Fixes https://github.com/Miserlou/Zappa/issues/613
try:
self.lambda_arn = self.zappa.get_lambda_function(
function_name=self.lambda_name
)
except botocore.client.ClientError:
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
kwargs = dict(
handler=self.lambda_handler,
description=self.lambda_description,
vpc_config=self.vpc_config,
dead_letter_config=self.dead_letter_config,
timeout=self.timeout_seconds,
memory_size=self.memory_size,
runtime=self.runtime,
aws_environment_variables=self.aws_environment_variables,
aws_kms_key_arn=self.aws_kms_key_arn,
use_alb=self.use_alb,
layers=self.layers,
concurrency=self.lambda_concurrency,
)
kwargs["function_name"] = self.lambda_name
if docker_image_uri:
kwargs["docker_image_uri"] = docker_image_uri
elif source_zip and source_zip.startswith("s3://"):
bucket, key_name = parse_s3_url(source_zip)
kwargs["bucket"] = bucket
kwargs["s3_key"] = key_name
elif source_zip and not source_zip.startswith("s3://"):
with open(source_zip, mode="rb") as fh:
byte_stream = fh.read()
kwargs["local_zip"] = byte_stream
else:
kwargs["bucket"] = self.s3_bucket_name
kwargs["s3_key"] = handler_file
self.lambda_arn = self.zappa.create_lambda_function(**kwargs)
# Schedule events for this deployment
self.schedule()
endpoint_url = ""
deployment_string = (
click.style("Deployment complete", fg="green", bold=True) + "!"
)
if self.use_alb:
kwargs = dict(
lambda_arn=self.lambda_arn,
lambda_name=self.lambda_name,
alb_vpc_config=self.alb_vpc_config,
timeout=self.timeout_seconds,
)
self.zappa.deploy_lambda_alb(**kwargs)
if self.use_apigateway:
# Create and configure the API Gateway
template = self.zappa.create_stack_template(
lambda_arn=self.lambda_arn,
lambda_name=self.lambda_name,
api_key_required=self.api_key_required,
iam_authorization=self.iam_authorization,
authorizer=self.authorizer,
cors_options=self.cors,
description=self.apigateway_description,
endpoint_configuration=self.endpoint_configuration,
)
self.zappa.update_stack(
self.lambda_name,
self.s3_bucket_name,
wait=True,
disable_progress=self.disable_progress,
)
api_id = self.zappa.get_api_id(self.lambda_name)
# Add binary support
if self.binary_support:
self.zappa.add_binary_support(api_id=api_id, cors=self.cors)
# Add payload compression
if self.stage_config.get("payload_compression", True):
self.zappa.add_api_compression(
api_id=api_id,
min_compression_size=self.stage_config.get(
"payload_minimum_compression_size", 0
),
)
# Deploy the API!
endpoint_url = self.deploy_api_gateway(api_id)
deployment_string = deployment_string + ": {}".format(endpoint_url)
# Create/link API key
if self.api_key_required:
if self.api_key is None:
self.zappa.create_api_key(api_id=api_id, stage_name=self.api_stage)
else:
self.zappa.add_api_stage_to_api_key(
api_key=self.api_key, api_id=api_id, stage_name=self.api_stage
)
if self.stage_config.get("touch", True):
self.zappa.wait_until_lambda_function_is_updated(
function_name=self.lambda_name
)
self.touch_endpoint(endpoint_url)
# Finally, delete the local copy our zip package
if not source_zip and not docker_image_uri:
if self.stage_config.get("delete_local_zip", True):
self.remove_local_zip()
# Remove the project zip from S3.
if not source_zip and not docker_image_uri:
self.remove_uploaded_zip()
self.callback("post")
click.echo(deployment_string)
def update(self, source_zip=None, no_upload=False, docker_image_uri=None):
"""
Repackage and update the function code.
"""
if not source_zip and not docker_image_uri:
# Make sure we're in a venv.
self.check_venv()
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Temporary version check
try:
updated_time = 1472581018
function_response = self.zappa.lambda_client.get_function(
FunctionName=self.lambda_name
)
conf = function_response["Configuration"]
last_updated = parser.parse(conf["LastModified"])
last_updated_unix = time.mktime(last_updated.timetuple())
except botocore.exceptions.BotoCoreError as e:
click.echo(click.style(type(e).__name__, fg="red") + ": " + e.args[0])
sys.exit(-1)
except Exception as e:
click.echo(
click.style("Warning!", fg="red")
+ " Couldn't get function "
+ self.lambda_name
+ " in "
+ self.zappa.aws_region
+ " - have you deployed yet?"
)
sys.exit(-1)
if last_updated_unix <= updated_time:
click.echo(
click.style("Warning!", fg="red")
+ " You may have upgraded Zappa since deploying this application. You will need to "
+ click.style("redeploy", bold=True)
+ " for this deployment to work properly!"
)
# Make sure the necessary IAM execution roles are available
if self.manage_roles:
try:
self.zappa.create_iam_roles()
except botocore.client.ClientError:
click.echo(
click.style("Failed", fg="red")
+ " to "
+ click.style("manage IAM roles", bold=True)
+ "!"
)
click.echo(
"You may "
+ click.style("lack the necessary AWS permissions", bold=True)
+ " to automatically manage a Zappa execution role."
)
click.echo(
"To fix this, see here: "
+ click.style(
"https://github.com/Zappa/Zappa#custom-aws-iam-roles-and-policies-for-deployment",
bold=True,
)
)
sys.exit(-1)
# Create the Lambda Zip,
if not no_upload:
self.create_package()
self.callback("zip")
# Upload it to S3
if not no_upload:
success = self.zappa.upload_to_s3(
self.zip_path,
self.s3_bucket_name,
disable_progress=self.disable_progress,
)
if not success: # pragma: no cover
raise ClickException("Unable to upload project to S3. Quitting.")
# If using a slim handler, upload it to S3 and tell lambda to use this slim handler zip
if self.stage_config.get("slim_handler", False):
# https://github.com/Miserlou/Zappa/issues/510
success = self.zappa.upload_to_s3(
self.handler_path,
self.s3_bucket_name,
disable_progress=self.disable_progress,
)
if not success: # pragma: no cover
raise ClickException(
"Unable to upload handler to S3. Quitting."
)
# Copy the project zip to the current project zip
current_project_name = "{0!s}_{1!s}_current_project.tar.gz".format(
self.api_stage, self.project_name
)
success = self.zappa.copy_on_s3(
src_file_name=self.zip_path,
dst_file_name=current_project_name,
bucket_name=self.s3_bucket_name,
)
if not success: # pragma: no cover
raise ClickException(
"Unable to copy the zip to be the current project. Quitting."
)
handler_file = self.handler_path
else:
handler_file = self.zip_path
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
kwargs = dict(
bucket=self.s3_bucket_name,
function_name=self.lambda_name,
num_revisions=self.num_retained_versions,
concurrency=self.lambda_concurrency,
)
if docker_image_uri:
kwargs["docker_image_uri"] = docker_image_uri
self.lambda_arn = self.zappa.update_lambda_function(**kwargs)
elif source_zip and source_zip.startswith("s3://"):
bucket, key_name = parse_s3_url(source_zip)
kwargs.update(dict(bucket=bucket, s3_key=key_name))
self.lambda_arn = self.zappa.update_lambda_function(**kwargs)
elif source_zip and not source_zip.startswith("s3://"):
with open(source_zip, mode="rb") as fh:
byte_stream = fh.read()
kwargs["local_zip"] = byte_stream
self.lambda_arn = self.zappa.update_lambda_function(**kwargs)
else:
if not no_upload:
kwargs["s3_key"] = handler_file
self.lambda_arn = self.zappa.update_lambda_function(**kwargs)
# Remove the uploaded zip from S3, because it is now registered..
if not source_zip and not no_upload and not docker_image_uri:
self.remove_uploaded_zip()
# Update the configuration, in case there are changes.
self.lambda_arn = self.zappa.update_lambda_configuration(
lambda_arn=self.lambda_arn,
function_name=self.lambda_name,
handler=self.lambda_handler,
description=self.lambda_description,
vpc_config=self.vpc_config,
timeout=self.timeout_seconds,
memory_size=self.memory_size,
runtime=self.runtime,
aws_environment_variables=self.aws_environment_variables,
aws_kms_key_arn=self.aws_kms_key_arn,
layers=self.layers,
wait=False,
)
# Finally, delete the local copy our zip package
if not source_zip and not no_upload and not docker_image_uri:
if self.stage_config.get("delete_local_zip", True):
self.remove_local_zip()
if self.use_apigateway:
self.zappa.create_stack_template(
lambda_arn=self.lambda_arn,
lambda_name=self.lambda_name,
api_key_required=self.api_key_required,
iam_authorization=self.iam_authorization,
authorizer=self.authorizer,
cors_options=self.cors,
description=self.apigateway_description,
endpoint_configuration=self.endpoint_configuration,
)
self.zappa.update_stack(
self.lambda_name,
self.s3_bucket_name,
wait=True,
update_only=True,
disable_progress=self.disable_progress,
)
api_id = self.zappa.get_api_id(self.lambda_name)
# Update binary support
if self.binary_support:
self.zappa.add_binary_support(api_id=api_id, cors=self.cors)
else:
self.zappa.remove_binary_support(api_id=api_id, cors=self.cors)
if self.stage_config.get("payload_compression", True):
self.zappa.add_api_compression(
api_id=api_id,
min_compression_size=self.stage_config.get(
"payload_minimum_compression_size", 0
),
)
else:
self.zappa.remove_api_compression(api_id=api_id)
# It looks a bit like we might actually be using this just to get the URL,
# but we're also updating a few of the APIGW settings.
endpoint_url = self.deploy_api_gateway(api_id)
if self.stage_config.get("domain", None):
endpoint_url = self.stage_config.get("domain")
else:
endpoint_url = None
self.schedule()
# Update any cognito pool with the lambda arn
# do this after schedule as schedule clears the lambda policy and we need to add one
self.update_cognito_triggers()
self.callback("post")
if endpoint_url and "https://" not in endpoint_url:
endpoint_url = "https://" + endpoint_url
if self.base_path:
endpoint_url += "/" + self.base_path
deployed_string = (
"Your updated Zappa deployment is "
+ click.style("live", fg="green", bold=True)
+ "!"
)
if self.use_apigateway:
deployed_string = (
deployed_string
+ ": "
+ click.style("{}".format(endpoint_url), bold=True)
)
api_url = None
if endpoint_url and "amazonaws.com" not in endpoint_url:
api_url = self.zappa.get_api_url(self.lambda_name, self.api_stage)
if endpoint_url != api_url:
deployed_string = deployed_string + " (" + api_url + ")"
if self.stage_config.get("touch", True):
self.zappa.wait_until_lambda_function_is_updated(
function_name=self.lambda_name
)
if api_url:
self.touch_endpoint(api_url)
elif endpoint_url:
self.touch_endpoint(endpoint_url)
click.echo(deployed_string)
def rollback(self, revision):
"""
Rollsback the currently deploy lambda code to a previous revision.
"""
print("Rolling back..")
self.zappa.rollback_lambda_function_version(
self.lambda_name, versions_back=revision
)
print("Done!")
def tail(
self,
since,
filter_pattern,
limit=10000,
keep_open=True,
colorize=True,
http=False,
non_http=False,
force_colorize=False,
):
"""
Tail this function's logs.
if keep_open, do so repeatedly, printing any new logs
"""
try:
since_stamp = string_to_timestamp(since)
last_since = since_stamp
while True:
new_logs = self.zappa.fetch_logs(
self.lambda_name,
start_time=since_stamp,
limit=limit,
filter_pattern=filter_pattern,
)
new_logs = [e for e in new_logs if e["timestamp"] > last_since]
self.print_logs(new_logs, colorize, http, non_http, force_colorize)
if not keep_open:
break
if new_logs:
last_since = new_logs[-1]["timestamp"]
time.sleep(1)
except KeyboardInterrupt: # pragma: no cover
# Die gracefully
try:
sys.exit(0)
except SystemExit:
os._exit(130)
def undeploy(self, no_confirm=False, remove_logs=False):
"""
Tear down an existing deployment.
"""
if not no_confirm: # pragma: no cover
confirm = input("Are you sure you want to undeploy? [y/n] ")
if confirm != "y":
return
if self.use_alb:
self.zappa.undeploy_lambda_alb(self.lambda_name)
if self.use_apigateway:
if remove_logs:
self.zappa.remove_api_gateway_logs(self.lambda_name)
domain_name = self.stage_config.get("domain", None)
base_path = self.stage_config.get("base_path", None)
# Only remove the api key when not specified
if self.api_key_required and self.api_key is None:
api_id = self.zappa.get_api_id(self.lambda_name)
self.zappa.remove_api_key(api_id, self.api_stage)
gateway_id = self.zappa.undeploy_api_gateway(
self.lambda_name, domain_name=domain_name, base_path=base_path
)
self.unschedule() # removes event triggers, including warm up event.
self.zappa.delete_lambda_function(self.lambda_name)
if remove_logs:
self.zappa.remove_lambda_function_logs(self.lambda_name)
click.echo(click.style("Done", fg="green", bold=True) + "!")
def update_cognito_triggers(self):
"""
Update any cognito triggers
"""
if self.cognito:
user_pool = self.cognito.get("user_pool")
triggers = self.cognito.get("triggers", [])
lambda_configs = set()
for trigger in triggers:
lambda_configs.add(trigger["source"].split("_")[0])
self.zappa.update_cognito(
self.lambda_name, user_pool, lambda_configs, self.lambda_arn
)
def schedule(self):
"""
Given a a list of functions and a schedule to execute them,
setup up regular execution.
"""
events = self.stage_config.get("events", [])
if events:
if not isinstance(events, list): # pragma: no cover
print("Events must be supplied as a list.")
return
for event in events:
self.collision_warning(event.get("function"))
if self.stage_config.get("keep_warm", True):
if not events:
events = []
keep_warm_rate = self.stage_config.get(
"keep_warm_expression", "rate(4 minutes)"
)
events.append(
{
"name": "zappa-keep-warm",
"function": "handler.keep_warm_callback",
"expression": keep_warm_rate,
"description": "Zappa Keep Warm - {}".format(self.lambda_name),
}
)
if events:
try:
function_response = self.zappa.lambda_client.get_function(
FunctionName=self.lambda_name
)
except botocore.exceptions.ClientError as e: # pragma: no cover
click.echo(
click.style("Function does not exist", fg="yellow")
+ ", please "
+ click.style("deploy", bold=True)
+ "first. Ex:"
+ click.style("zappa deploy {}.".format(self.api_stage), bold=True)
)
sys.exit(-1)
print("Scheduling..")
self.zappa.schedule_events(
lambda_arn=function_response["Configuration"]["FunctionArn"],
lambda_name=self.lambda_name,
events=events,
)
# Add async tasks SNS
if self.stage_config.get(
"async_source", None
) == "sns" and self.stage_config.get("async_resources", True):
self.lambda_arn = self.zappa.get_lambda_function(
function_name=self.lambda_name
)
topic_arn = self.zappa.create_async_sns_topic(
lambda_name=self.lambda_name, lambda_arn=self.lambda_arn
)
click.echo("SNS Topic created: %s" % topic_arn)
# Add async tasks DynamoDB
table_name = self.stage_config.get("async_response_table", False)
read_capacity = self.stage_config.get("async_response_table_read_capacity", 1)
write_capacity = self.stage_config.get("async_response_table_write_capacity", 1)
if table_name and self.stage_config.get("async_resources", True):
created, response_table = self.zappa.create_async_dynamodb_table(
table_name, read_capacity, write_capacity
)
if created:
click.echo("DynamoDB table created: %s" % table_name)
else:
click.echo("DynamoDB table exists: %s" % table_name)
provisioned_throughput = response_table["Table"][
"ProvisionedThroughput"
]
if (
provisioned_throughput["ReadCapacityUnits"] != read_capacity
or provisioned_throughput["WriteCapacityUnits"] != write_capacity
):
click.echo(
click.style(
"\nWarning! Existing DynamoDB table ({}) does not match configured capacity.\n".format(
table_name
),
fg="red",
)
)
def unschedule(self):
"""
Given a a list of scheduled functions,
tear down their regular execution.
"""
# Run even if events are not defined to remove previously existing ones (thus default to []).
events = self.stage_config.get("events", [])
if not isinstance(events, list): # pragma: no cover
print("Events must be supplied as a list.")
return
function_arn = None
try:
function_response = self.zappa.lambda_client.get_function(
FunctionName=self.lambda_name
)
function_arn = function_response["Configuration"]["FunctionArn"]
except botocore.exceptions.ClientError as e: # pragma: no cover
raise ClickException(
"Function does not exist, you should deploy first. Ex: zappa deploy {}. "
"Proceeding to unschedule CloudWatch based events.".format(
self.api_stage
)
)
print("Unscheduling..")
self.zappa.unschedule_events(
lambda_name=self.lambda_name,
lambda_arn=function_arn,
events=events,
)
# Remove async task SNS
if self.stage_config.get(
"async_source", None
) == "sns" and self.stage_config.get("async_resources", True):
removed_arns = self.zappa.remove_async_sns_topic(self.lambda_name)
click.echo("SNS Topic removed: %s" % ", ".join(removed_arns))
def invoke(self, function_name, raw_python=False, command=None, no_color=False):
"""
Invoke a remote function.
"""
# There are three likely scenarios for 'command' here:
# command, which is a modular function path
# raw_command, which is a string of python to execute directly
# manage, which is a Django-specific management command invocation
key = command if command is not None else "command"
if raw_python:
command = {"raw_command": function_name}
else:
command = {key: function_name}
# Can't use hjson
import json as json
response = self.zappa.invoke_lambda_function(
self.lambda_name,
json.dumps(command),
invocation_type="RequestResponse",
)
print(self.format_lambda_response(response, not no_color))
# For a successful request FunctionError is not in response.
# https://github.com/Miserlou/Zappa/pull/1254/
if "FunctionError" in response:
raise ClickException(
"{} error occurred while invoking command.".format(
response["FunctionError"]
)
)
def format_lambda_response(self, response, colorize=True):
if "LogResult" in response:
logresult_bytes = base64.b64decode(response["LogResult"])
try:
decoded = logresult_bytes.decode()
except UnicodeDecodeError:
return logresult_bytes
else:
if colorize and sys.stdout.isatty():
formatted = self.format_invoke_command(decoded)
return self.colorize_invoke_command(formatted)
else:
return decoded
else:
return response
def format_invoke_command(self, string):
"""
Formats correctly the string output from the invoke() method,
replacing line breaks and tabs when necessary.
"""
string = string.replace("\\n", "\n")
formated_response = ""
for line in string.splitlines():
if line.startswith("REPORT"):
line = line.replace("\t", "\n")
if line.startswith("[DEBUG]"):
line = line.replace("\t", " ")
formated_response += line + "\n"
formated_response = formated_response.replace("\n\n", "\n")
return formated_response
def colorize_invoke_command(self, string):
"""
Apply various heuristics to return a colorized version the invoke
command string. If these fail, simply return the string in plaintext.
Inspired by colorize_log_entry().
"""
final_string = string
try:
# Line headers
try:
for token in ["START", "END", "REPORT", "[DEBUG]"]:
if token in final_string:
format_string = "[{}]"
# match whole words only
pattern = r"\b{}\b"
if token == "[DEBUG]":
format_string = "{}"
pattern = re.escape(token)
repl = click.style(
format_string.format(token), bold=True, fg="cyan"
)
final_string = re.sub(pattern.format(token), repl, final_string)
except Exception: # pragma: no cover
pass
# Green bold Tokens
try:
for token in [
"Zappa Event:",
"RequestId:",
"Version:",
"Duration:",
"Billed",
"Memory Size:",
"Max Memory Used:",
]:
if token in final_string:
final_string = final_string.replace(
token, click.style(token, bold=True, fg="green")
)
except Exception: # pragma: no cover
pass
# UUIDs
for token in final_string.replace("\t", " ").split(" "):
try:
if token.count("-") == 4 and token.replace("-", "").isalnum():
final_string = final_string.replace(
token, click.style(token, fg="magenta")
)
except Exception: # pragma: no cover
pass
return final_string
except Exception:
return string
def status(self, return_json=False):
"""
Describe the status of the current deployment.
"""
def tabular_print(title, value):
"""
Convenience function for priting formatted table items.
"""
click.echo(
"%-*s%s" % (32, click.style("\t" + title, fg="green") + ":", str(value))
)
return
# Lambda Env Details
lambda_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if not lambda_versions:
raise ClickException(
click.style(
"No Lambda %s detected in %s - have you deployed yet?"
% (self.lambda_name, self.zappa.aws_region),
fg="red",
)
)
status_dict = collections.OrderedDict()
status_dict["Lambda Versions"] = len(lambda_versions)
function_response = self.zappa.lambda_client.get_function(
FunctionName=self.lambda_name
)
conf = function_response["Configuration"]
self.lambda_arn = conf["FunctionArn"]
status_dict["Lambda Name"] = self.lambda_name
status_dict["Lambda ARN"] = self.lambda_arn
status_dict["Lambda Role ARN"] = conf["Role"]
status_dict["Lambda Code Size"] = conf["CodeSize"]
status_dict["Lambda Version"] = conf["Version"]
status_dict["Lambda Last Modified"] = conf["LastModified"]
status_dict["Lambda Memory Size"] = conf["MemorySize"]
status_dict["Lambda Timeout"] = conf["Timeout"]
# Handler & Runtime won't be present for lambda Docker deployments
# https://github.com/Miserlou/Zappa/issues/2188
status_dict["Lambda Handler"] = conf.get("Handler", "")
status_dict["Lambda Runtime"] = conf.get("Runtime", "")
if "VpcConfig" in conf.keys():
status_dict["Lambda VPC ID"] = conf.get("VpcConfig", {}).get(
"VpcId", "Not assigned"
)
else:
status_dict["Lambda VPC ID"] = None
# Calculated statistics
try:
function_invocations = self.zappa.cloudwatch.get_metric_statistics(
Namespace="AWS/Lambda",
MetricName="Invocations",
StartTime=datetime.utcnow() - timedelta(days=1),
EndTime=datetime.utcnow(),
Period=1440,
Statistics=["Sum"],
Dimensions=[
{"Name": "FunctionName", "Value": "{}".format(self.lambda_name)}
],
)["Datapoints"][0]["Sum"]
except Exception as e:
function_invocations = 0
try:
function_errors = self.zappa.cloudwatch.get_metric_statistics(
Namespace="AWS/Lambda",
MetricName="Errors",
StartTime=datetime.utcnow() - timedelta(days=1),
EndTime=datetime.utcnow(),
Period=1440,
Statistics=["Sum"],
Dimensions=[
{"Name": "FunctionName", "Value": "{}".format(self.lambda_name)}
],
)["Datapoints"][0]["Sum"]
except Exception as e:
function_errors = 0
try:
error_rate = "{0:.2f}%".format(function_errors / function_invocations * 100)
except:
error_rate = "Error calculating"
status_dict["Invocations (24h)"] = int(function_invocations)
status_dict["Errors (24h)"] = int(function_errors)
status_dict["Error Rate (24h)"] = error_rate
# URLs
if self.use_apigateway:
api_url = self.zappa.get_api_url(self.lambda_name, self.api_stage)
status_dict["API Gateway URL"] = api_url
# Api Keys
api_id = self.zappa.get_api_id(self.lambda_name)
for api_key in self.zappa.get_api_keys(api_id, self.api_stage):
status_dict["API Gateway x-api-key"] = api_key
# There literally isn't a better way to do this.
# AWS provides no way to tie a APIGW domain name to its Lambda function.
domain_url = self.stage_config.get("domain", None)
base_path = self.stage_config.get("base_path", None)
if domain_url:
status_dict["Domain URL"] = "https://" + domain_url
if base_path:
status_dict["Domain URL"] += "/" + base_path
else:
status_dict["Domain URL"] = "None Supplied"
# Scheduled Events
event_rules = self.zappa.get_event_rules_for_lambda(lambda_arn=self.lambda_arn)
status_dict["Num. Event Rules"] = len(event_rules)
if len(event_rules) > 0:
status_dict["Events"] = []
for rule in event_rules:
event_dict = {}
rule_name = rule["Name"]
event_dict["Event Rule Name"] = rule_name
event_dict["Event Rule Schedule"] = rule.get("ScheduleExpression", None)
event_dict["Event Rule State"] = rule.get("State", None).title()
event_dict["Event Rule ARN"] = rule.get("Arn", None)
status_dict["Events"].append(event_dict)
if return_json:
# Putting the status in machine readable format
# https://github.com/Miserlou/Zappa/issues/407
print(json.dumpsJSON(status_dict))
else:
click.echo("Status for " + click.style(self.lambda_name, bold=True) + ": ")
for k, v in status_dict.items():
if k == "Events":
# Events are a list of dicts
for event in v:
for item_k, item_v in event.items():
tabular_print(item_k, item_v)
else:
tabular_print(k, v)
# TODO: S3/SQS/etc. type events?
return True
def check_stage_name(self, stage_name):
"""
Make sure the stage name matches the AWS-allowed pattern
(calls to apigateway_client.create_deployment, will fail with error
message "ClientError: An error occurred (BadRequestException) when
calling the CreateDeployment operation: Stage name only allows
a-zA-Z0-9_" if the pattern does not match)
"""
if not self.use_apigateway:
return True
if self.stage_name_env_pattern.match(stage_name):
return True
raise ValueError(
"API stage names must match a-zA-Z0-9_ ; '{0!s}' does not.".format(
stage_name
)
)
def check_environment(self, environment):
"""
Make sure the environment contains only strings
(since putenv needs a string)
"""
non_strings = []
for (k, v) in environment.items():
if not isinstance(v, basestring):
non_strings.append(k)
if non_strings:
raise ValueError(
"The following environment variables are not strings: {}".format(
", ".join(non_strings)
)
)
else:
return True
def init(self, settings_file="zappa_settings.json"):
"""
Initialize a new Zappa project by creating a new zappa_settings.json in a guided process.
This should probably be broken up into few separate componants once it's stable.
Testing these inputs requires monkeypatching with mock, which isn't pretty.
"""
# Make sure we're in a venv.
self.check_venv()
# Ensure that we don't already have a zappa_settings file.
if os.path.isfile(settings_file):
raise ClickException(
"This project already has a "
+ click.style("{0!s} file".format(settings_file), fg="red", bold=True)
+ "!"
)
# Explain system.
click.echo(
click.style(
"""\n███████╗ █████╗ ██████╗ ██████╗ █████╗
╚══███╔╝██╔══██╗██╔══██╗██╔══██╗██╔══██╗
███╔╝ ███████║██████╔╝██████╔╝███████║
███╔╝ ██╔══██║██╔═══╝ ██╔═══╝ ██╔══██║
███████╗██║ ██║██║ ██║ ██║ ██║
╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ ╚═╝\n""",
fg="green",
bold=True,
)
)
click.echo(
click.style("Welcome to ", bold=True)
+ click.style("Zappa", fg="green", bold=True)
+ click.style("!\n", bold=True)
)
click.echo(
click.style("Zappa", bold=True)
+ " is a system for running server-less Python web applications"
" on AWS Lambda and AWS API Gateway."
)
click.echo(
"This `init` command will help you create and configure your new Zappa deployment."
)
click.echo("Let's get started!\n")
# Create Env
while True:
click.echo(
"Your Zappa configuration can support multiple production stages, like '"
+ click.style("dev", bold=True)
+ "', '"
+ click.style("staging", bold=True)
+ "', and '"
+ click.style("production", bold=True)
+ "'."
)
env = (
input("What do you want to call this environment (default 'dev'): ")
or "dev"
)
try:
self.check_stage_name(env)
break
except ValueError:
click.echo(click.style("Stage names must match a-zA-Z0-9_", fg="red"))
# Detect AWS profiles and regions
# If anyone knows a more straightforward way to easily detect and parse AWS profiles I'm happy to change this, feels like a hack
session = botocore.session.Session()
config = session.full_config
profiles = config.get("profiles", {})
profile_names = list(profiles.keys())
click.echo(
"\nAWS Lambda and API Gateway are only available in certain regions. "
"Let's check to make sure you have a profile set up in one that will work."
)
if not profile_names:
profile_name, profile = None, None
click.echo(
"We couldn't find an AWS profile to use. Before using Zappa, you'll need to set one up. See here for more info: {}".format(
click.style(BOTO3_CONFIG_DOCS_URL, fg="blue", underline=True)
)
)
elif len(profile_names) == 1:
profile_name = profile_names[0]
profile = profiles[profile_name]
click.echo(
"Okay, using profile {}!".format(click.style(profile_name, bold=True))
)
else:
if "default" in profile_names:
default_profile = [p for p in profile_names if p == "default"][0]
else:
default_profile = profile_names[0]
while True:
profile_name = (
input(
"We found the following profiles: {}, and {}. "
"Which would you like us to use? (default '{}'): ".format(
", ".join(profile_names[:-1]),
profile_names[-1],
default_profile,
)
)
or default_profile
)
if profile_name in profiles:
profile = profiles[profile_name]
break
else:
click.echo("Please enter a valid name for your AWS profile.")
profile_region = profile.get("region") if profile else None
# Create Bucket
click.echo(
"\nYour Zappa deployments will need to be uploaded to a "
+ click.style("private S3 bucket", bold=True)
+ "."
)
click.echo("If you don't have a bucket yet, we'll create one for you too.")
default_bucket = "zappa-" + "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(9)
)
while True:
bucket = (
input(
"What do you want to call your bucket? (default '%s'): "
% default_bucket
)
or default_bucket
)
if is_valid_bucket_name(bucket):
break
click.echo(click.style("Invalid bucket name!", bold=True))
click.echo("S3 buckets must be named according to the following rules:")
click.echo(
"""* Bucket names must be unique across all existing bucket names in Amazon S3.
* Bucket names must comply with DNS naming conventions.
* Bucket names must be at least 3 and no more than 63 characters long.
* Bucket names must not contain uppercase characters or underscores.
* Bucket names must start with a lowercase letter or number.
* Bucket names must be a series of one or more labels. Adjacent labels are separated
by a single period (.). Bucket names can contain lowercase letters, numbers, and
hyphens. Each label must start and end with a lowercase letter or a number.
* Bucket names must not be formatted as an IP address (for example, 192.168.5.4).
* When you use virtual hosted–style buckets with Secure Sockets Layer (SSL), the SSL
wildcard certificate only matches buckets that don't contain periods. To work around
this, use HTTP or write your own certificate verification logic. We recommend that
you do not use periods (".") in bucket names when using virtual hosted–style buckets.
"""
)
# Detect Django/Flask
try: # pragma: no cover
import django
has_django = True
except ImportError as e:
has_django = False
try: # pragma: no cover
import flask
has_flask = True
except ImportError as e:
has_flask = False
print("")
# App-specific
if has_django: # pragma: no cover
click.echo(
"It looks like this is a "
+ click.style("Django", bold=True)
+ " application!"
)
click.echo(
"What is the "
+ click.style("module path", bold=True)
+ " to your projects's Django settings?"
)
django_settings = None
matches = detect_django_settings()
while django_settings in [None, ""]:
if matches:
click.echo(
"We discovered: "
+ click.style(
", ".join("{}".format(i) for v, i in enumerate(matches)),
bold=True,
)
)
django_settings = (
input(
"Where are your project's settings? (default '%s'): "
% matches[0]
)
or matches[0]
)
else:
click.echo(
"(This will likely be something like 'your_project.settings')"
)
django_settings = input("Where are your project's settings?: ")
django_settings = django_settings.replace("'", "")
django_settings = django_settings.replace('"', "")
else:
matches = None
if has_flask:
click.echo(
"It looks like this is a "
+ click.style("Flask", bold=True)
+ " application."
)
matches = detect_flask_apps()
click.echo(
"What's the "
+ click.style("modular path", bold=True)
+ " to your app's function?"
)
click.echo("This will likely be something like 'your_module.app'.")
app_function = None
while app_function in [None, ""]:
if matches:
click.echo(
"We discovered: "
+ click.style(
", ".join("{}".format(i) for v, i in enumerate(matches)),
bold=True,
)
)
app_function = (
input(
"Where is your app's function? (default '%s'): "
% matches[0]
)
or matches[0]
)
else:
app_function = input("Where is your app's function?: ")
app_function = app_function.replace("'", "")
app_function = app_function.replace('"', "")
# TODO: Create VPC?
# Memory size? Time limit?
# Domain? LE keys? Region?
# 'Advanced Settings' mode?
# Globalize
click.echo(
"\nYou can optionally deploy to "
+ click.style("all available regions", bold=True)
+ " in order to provide fast global service."
)
click.echo(
"If you are using Zappa for the first time, you probably don't want to do this!"
)
global_deployment = False
while True:
global_type = input(
"Would you like to deploy this application "
+ click.style("globally", bold=True)
+ "? (default 'n') [y/n/(p)rimary]: "
)
if not global_type:
break
if global_type.lower() in ["y", "yes", "p", "primary"]:
global_deployment = True
break
if global_type.lower() in ["n", "no"]:
global_deployment = False
break
# The given environment name
zappa_settings = {
env: {
"profile_name": profile_name,
"s3_bucket": bucket,
"runtime": get_venv_from_python_version(),
"project_name": self.get_project_name(),
}
}
if profile_region:
zappa_settings[env]["aws_region"] = profile_region
if has_django:
zappa_settings[env]["django_settings"] = django_settings
else:
zappa_settings[env]["app_function"] = app_function
# Global Region Deployment
if global_deployment:
additional_regions = [r for r in API_GATEWAY_REGIONS if r != profile_region]
# Create additional stages
if global_type.lower() in ["p", "primary"]:
additional_regions = [r for r in additional_regions if "-1" in r]
for region in additional_regions:
env_name = env + "_" + region.replace("-", "_")
g_env = {env_name: {"extends": env, "aws_region": region}}
zappa_settings.update(g_env)
import json as json # hjson is fine for loading, not fine for writing.
zappa_settings_json = json.dumps(zappa_settings, sort_keys=True, indent=4)
click.echo(
"\nOkay, here's your "
+ click.style("zappa_settings.json", bold=True)
+ ":\n"
)
click.echo(click.style(zappa_settings_json, fg="yellow", bold=False))
confirm = (
input(
"\nDoes this look "
+ click.style("okay", bold=True, fg="green")
+ "? (default 'y') [y/n]: "
)
or "yes"
)
if confirm[0] not in ["y", "Y", "yes", "YES"]:
click.echo(
""
+ click.style("Sorry", bold=True, fg="red")
+ " to hear that! Please init again."
)
return
# Write
with open("zappa_settings.json", "w") as zappa_settings_file:
zappa_settings_file.write(zappa_settings_json)
if global_deployment:
click.echo(
"\n"
+ click.style("Done", bold=True)
+ "! You can also "
+ click.style("deploy all", bold=True)
+ " by executing:\n"
)
click.echo(click.style("\t$ zappa deploy --all", bold=True))
click.echo(
"\nAfter that, you can "
+ click.style("update", bold=True)
+ " your application code with:\n"
)
click.echo(click.style("\t$ zappa update --all", bold=True))
else:
click.echo(
"\n"
+ click.style("Done", bold=True)
+ "! Now you can "
+ click.style("deploy", bold=True)
+ " your Zappa application by executing:\n"
)
click.echo(click.style("\t$ zappa deploy %s" % env, bold=True))
click.echo(
"\nAfter that, you can "
+ click.style("update", bold=True)
+ " your application code with:\n"
)
click.echo(click.style("\t$ zappa update %s" % env, bold=True))
click.echo(
"\nTo learn more, check out our project page on "
+ click.style("GitHub", bold=True)
+ " here: "
+ click.style("https://github.com/Zappa/Zappa", fg="cyan", bold=True)
)
click.echo(
"and stop by our "
+ click.style("Slack", bold=True)
+ " channel here: "
+ click.style("https://zappateam.slack.com", fg="cyan", bold=True)
)
click.echo("\nEnjoy!,")
click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!")
return
def certify(self, no_confirm=True, manual=False):
"""
Register or update a domain certificate for this env.
"""
if not self.domain:
raise ClickException(
"Can't certify a domain without "
+ click.style("domain", fg="red", bold=True)
+ " configured!"
)
if not no_confirm: # pragma: no cover
confirm = input("Are you sure you want to certify? [y/n] ")
if confirm != "y":
return
# Make sure this isn't already deployed.
deployed_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if len(deployed_versions) == 0:
raise ClickException(
"This application "
+ click.style("isn't deployed yet", fg="red")
+ " - did you mean to call "
+ click.style("deploy", bold=True)
+ "?"
)
account_key_location = self.stage_config.get("lets_encrypt_key", None)
cert_location = self.stage_config.get("certificate", None)
cert_key_location = self.stage_config.get("certificate_key", None)
cert_chain_location = self.stage_config.get("certificate_chain", None)
cert_arn = self.stage_config.get("certificate_arn", None)
base_path = self.stage_config.get("base_path", None)
# These are sensitive
certificate_body = None
certificate_private_key = None
certificate_chain = None
# Prepare for custom Let's Encrypt
if not cert_location and not cert_arn:
if not account_key_location:
raise ClickException(
"Can't certify a domain without "
+ click.style("lets_encrypt_key", fg="red", bold=True)
+ " or "
+ click.style("certificate", fg="red", bold=True)
+ " or "
+ click.style("certificate_arn", fg="red", bold=True)
+ " configured!"
)
# Get install account_key to /tmp/account_key.pem
from .letsencrypt import gettempdir
if account_key_location.startswith("s3://"):
bucket, key_name = parse_s3_url(account_key_location)
self.zappa.s3_client.download_file(
bucket, key_name, os.path.join(gettempdir(), "account.key")
)
else:
from shutil import copyfile
copyfile(
account_key_location, os.path.join(gettempdir(), "account.key")
)
# Prepare for Custom SSL
elif not account_key_location and not cert_arn:
if not cert_location or not cert_key_location or not cert_chain_location:
raise ClickException(
"Can't certify a domain without "
+ click.style(
"certificate, certificate_key and certificate_chain",
fg="red",
bold=True,
)
+ " configured!"
)
# Read the supplied certificates.
with open(cert_location) as f:
certificate_body = f.read()
with open(cert_key_location) as f:
certificate_private_key = f.read()
with open(cert_chain_location) as f:
certificate_chain = f.read()
click.echo(
"Certifying domain "
+ click.style(self.domain, fg="green", bold=True)
+ ".."
)
# Get cert and update domain.
# Let's Encrypt
if not cert_location and not cert_arn:
from .letsencrypt import get_cert_and_update_domain
cert_success = get_cert_and_update_domain(
self.zappa, self.lambda_name, self.api_stage, self.domain, manual
)
# Custom SSL / ACM
else:
route53 = self.stage_config.get("route53_enabled", True)
if not self.zappa.get_domain_name(self.domain, route53=route53):
dns_name = self.zappa.create_domain_name(
domain_name=self.domain,
certificate_name=self.domain + "-Zappa-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=cert_arn,
lambda_name=self.lambda_name,
stage=self.api_stage,
base_path=base_path,
)
if route53:
self.zappa.update_route53_records(self.domain, dns_name)
print(
"Created a new domain name with supplied certificate. Please note that it can take up to 40 minutes for this domain to be "
"created and propagated through AWS, but it requires no further work on your part."
)
else:
self.zappa.update_domain_name(
domain_name=self.domain,
certificate_name=self.domain + "-Zappa-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=cert_arn,
lambda_name=self.lambda_name,
stage=self.api_stage,
route53=route53,
base_path=base_path,
)
cert_success = True
if cert_success:
click.echo(
"Certificate " + click.style("updated", fg="green", bold=True) + "!"
)
else:
click.echo(
click.style("Failed", fg="red", bold=True)
+ " to generate or install certificate! :("
)
click.echo("\n==============\n")
shamelessly_promote()
##
# Shell
##
def shell(self):
"""
Spawn a debug shell.
"""
click.echo(
click.style("NOTICE!", fg="yellow", bold=True)
+ " This is a "
+ click.style("local", fg="green", bold=True)
+ " shell, inside a "
+ click.style("Zappa", bold=True)
+ " object!"
)
self.zappa.shell()
return
##
# Utility
##
def callback(self, position):
"""
Allows the execution of custom code between creation of the zip file and deployment to AWS.
:return: None
"""
callbacks = self.stage_config.get("callbacks", {})
callback = callbacks.get(position)
if callback:
(mod_path, cb_func_name) = callback.rsplit(".", 1)
try: # Prefer callback in working directory
if mod_path.count(".") >= 1: # Callback function is nested in a folder
(mod_folder_path, mod_name) = mod_path.rsplit(".", 1)
mod_folder_path_fragments = mod_folder_path.split(".")
working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments)
else:
mod_name = mod_path
working_dir = os.getcwd()
working_dir_importer = pkgutil.get_importer(working_dir)
module_ = working_dir_importer.find_module(mod_name).load_module(
mod_name
)
except (ImportError, AttributeError):
try: # Callback func might be in virtualenv
module_ = importlib.import_module(mod_path)
except ImportError: # pragma: no cover
raise ClickException(
click.style("Failed ", fg="red")
+ "to "
+ click.style(
"import {position} callback ".format(position=position),
bold=True,
)
+ 'module: "{mod_path}"'.format(
mod_path=click.style(mod_path, bold=True)
)
)
if not hasattr(module_, cb_func_name): # pragma: no cover
raise ClickException(
click.style("Failed ", fg="red")
+ "to "
+ click.style(
"find {position} callback ".format(position=position), bold=True
)
+ 'function: "{cb_func_name}" '.format(
cb_func_name=click.style(cb_func_name, bold=True)
)
+ 'in module "{mod_path}"'.format(mod_path=mod_path)
)
cb_func = getattr(module_, cb_func_name)
cb_func(self) # Call the function passing self
def check_for_update(self):
"""
Print a warning if there's a new Zappa version available.
"""
try:
version = pkg_resources.require("zappa")[0].version
updateable = check_new_version_available(version)
if updateable:
click.echo(
click.style("Important!", fg="yellow", bold=True)
+ " A new version of "
+ click.style("Zappa", bold=True)
+ " is available!"
)
click.echo(
"Upgrade with: "
+ click.style("pip install zappa --upgrade", bold=True)
)
click.echo(
"Visit the project page on GitHub to see the latest changes: "
+ click.style("https://github.com/Zappa/Zappa", bold=True)
)
except Exception as e: # pragma: no cover
print(e)
return
def load_settings(self, settings_file=None, session=None):
"""
Load the local zappa_settings file.
An existing boto session can be supplied, though this is likely for testing purposes.
Returns the loaded Zappa object.
"""
# Ensure we're passed a valid settings file.
if not settings_file:
settings_file = self.get_json_or_yaml_settings()
if not os.path.isfile(settings_file):
raise ClickException("Please configure your zappa_settings file.")
# Load up file
self.load_settings_file(settings_file)
# Make sure that this stage is our settings
if self.api_stage not in self.zappa_settings.keys():
raise ClickException(
"Please define stage '{0!s}' in your Zappa settings.".format(
self.api_stage
)
)
# We need a working title for this project. Use one if supplied, else cwd dirname.
if "project_name" in self.stage_config: # pragma: no cover
# If the name is invalid, this will throw an exception with message up stack
self.project_name = validate_name(self.stage_config["project_name"])
else:
self.project_name = self.get_project_name()
# The name of the actual AWS Lambda function, ex, 'helloworld-dev'
# Assume that we already have have validated the name beforehand.
# Related: https://github.com/Miserlou/Zappa/pull/664
# https://github.com/Miserlou/Zappa/issues/678
# And various others from Slack.
self.lambda_name = slugify.slugify(self.project_name + "-" + self.api_stage)
# Load stage-specific settings
self.s3_bucket_name = self.stage_config.get(
"s3_bucket",
"zappa-"
+ "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(9)
),
)
self.vpc_config = self.stage_config.get("vpc_config", {})
self.memory_size = self.stage_config.get("memory_size", 512)
self.app_function = self.stage_config.get("app_function", None)
self.exception_handler = self.stage_config.get("exception_handler", None)
self.aws_region = self.stage_config.get("aws_region", None)
self.debug = self.stage_config.get("debug", True)
self.prebuild_script = self.stage_config.get("prebuild_script", None)
self.profile_name = self.stage_config.get("profile_name", None)
self.log_level = self.stage_config.get("log_level", "DEBUG")
self.domain = self.stage_config.get("domain", None)
self.base_path = self.stage_config.get("base_path", None)
self.timeout_seconds = self.stage_config.get("timeout_seconds", 30)
dead_letter_arn = self.stage_config.get("dead_letter_arn", "")
self.dead_letter_config = (
{"TargetArn": dead_letter_arn} if dead_letter_arn else {}
)
self.cognito = self.stage_config.get("cognito", None)
self.num_retained_versions = self.stage_config.get(
"num_retained_versions", None
)
# Check for valid values of num_retained_versions
if (
self.num_retained_versions is not None
and type(self.num_retained_versions) is not int
):
raise ClickException(
"Please supply either an integer or null for num_retained_versions in the zappa_settings.json. Found %s"
% type(self.num_retained_versions)
)
elif type(self.num_retained_versions) is int and self.num_retained_versions < 1:
raise ClickException(
"The value for num_retained_versions in the zappa_settings.json should be greater than 0."
)
# Provide legacy support for `use_apigateway`, now `apigateway_enabled`.
# https://github.com/Miserlou/Zappa/issues/490
# https://github.com/Miserlou/Zappa/issues/493
self.use_apigateway = self.stage_config.get("use_apigateway", True)
if self.use_apigateway:
self.use_apigateway = self.stage_config.get("apigateway_enabled", True)
self.apigateway_description = self.stage_config.get(
"apigateway_description", None
)
self.lambda_handler = self.stage_config.get(
"lambda_handler", "handler.lambda_handler"
)
# DEPRECATED. https://github.com/Miserlou/Zappa/issues/456
self.remote_env_bucket = self.stage_config.get("remote_env_bucket", None)
self.remote_env_file = self.stage_config.get("remote_env_file", None)
self.remote_env = self.stage_config.get("remote_env", None)
self.settings_file = self.stage_config.get("settings_file", None)
self.django_settings = self.stage_config.get("django_settings", None)
self.manage_roles = self.stage_config.get("manage_roles", True)
self.binary_support = self.stage_config.get("binary_support", True)
self.api_key_required = self.stage_config.get("api_key_required", False)
self.api_key = self.stage_config.get("api_key")
self.endpoint_configuration = self.stage_config.get(
"endpoint_configuration", None
)
self.iam_authorization = self.stage_config.get("iam_authorization", False)
self.cors = self.stage_config.get("cors", False)
self.lambda_description = self.stage_config.get(
"lambda_description", "Zappa Deployment"
)
self.lambda_concurrency = self.stage_config.get("lambda_concurrency", None)
self.environment_variables = self.stage_config.get("environment_variables", {})
self.aws_environment_variables = self.stage_config.get(
"aws_environment_variables", {}
)
self.check_environment(self.environment_variables)
self.authorizer = self.stage_config.get("authorizer", {})
self.runtime = self.stage_config.get(
"runtime", get_runtime_from_python_version()
)
self.aws_kms_key_arn = self.stage_config.get("aws_kms_key_arn", "")
self.context_header_mappings = self.stage_config.get(
"context_header_mappings", {}
)
self.xray_tracing = self.stage_config.get("xray_tracing", False)
self.desired_role_arn = self.stage_config.get("role_arn")
self.layers = self.stage_config.get("layers", None)
# Load ALB-related settings
self.use_alb = self.stage_config.get("alb_enabled", False)
self.alb_vpc_config = self.stage_config.get("alb_vpc_config", {})
# Additional tags
self.tags = self.stage_config.get("tags", {})
desired_role_name = self.lambda_name + "-ZappaLambdaExecutionRole"
self.zappa = Zappa(
boto_session=session,
profile_name=self.profile_name,
aws_region=self.aws_region,
load_credentials=self.load_credentials,
desired_role_name=desired_role_name,
desired_role_arn=self.desired_role_arn,
runtime=self.runtime,
tags=self.tags,
endpoint_urls=self.stage_config.get("aws_endpoint_urls", {}),
xray_tracing=self.xray_tracing,
)
for setting in CUSTOM_SETTINGS:
if setting in self.stage_config:
setting_val = self.stage_config[setting]
# Read the policy file contents.
if setting.endswith("policy"):
with open(setting_val, "r") as f:
setting_val = f.read()
setattr(self.zappa, setting, setting_val)
if self.app_function:
self.collision_warning(self.app_function)
if self.app_function[-3:] == ".py":
click.echo(
click.style("Warning!", fg="red", bold=True)
+ " Your app_function is pointing to a "
+ click.style("file and not a function", bold=True)
+ "! It should probably be something like 'my_file.app', not 'my_file.py'!"
)
return self.zappa
def get_json_or_yaml_settings(self, settings_name="zappa_settings"):
"""
Return zappa_settings path as JSON or YAML (or TOML), as appropriate.
"""
zs_json = settings_name + ".json"
zs_yml = settings_name + ".yml"
zs_yaml = settings_name + ".yaml"
zs_toml = settings_name + ".toml"
# Must have at least one
if (
not os.path.isfile(zs_json)
and not os.path.isfile(zs_yml)
and not os.path.isfile(zs_yaml)
and not os.path.isfile(zs_toml)
):
raise ClickException(
"Please configure a zappa_settings file or call `zappa init`."
)
# Prefer JSON
if os.path.isfile(zs_json):
settings_file = zs_json
elif os.path.isfile(zs_toml):
settings_file = zs_toml
elif os.path.isfile(zs_yml):
settings_file = zs_yml
else:
settings_file = zs_yaml
return settings_file
def load_settings_file(self, settings_file=None):
"""
Load our settings file.
"""
if not settings_file:
settings_file = self.get_json_or_yaml_settings()
if not os.path.isfile(settings_file):
raise ClickException(
"Please configure your zappa_settings file or call `zappa init`."
)
path, ext = os.path.splitext(settings_file)
if ext == ".yml" or ext == ".yaml":
with open(settings_file) as yaml_file:
try:
self.zappa_settings = yaml.safe_load(yaml_file)
except ValueError: # pragma: no cover
raise ValueError(
"Unable to load the Zappa settings YAML. It may be malformed."
)
elif ext == ".toml":
with open(settings_file) as toml_file:
try:
self.zappa_settings = toml.load(toml_file)
except ValueError: # pragma: no cover
raise ValueError(
"Unable to load the Zappa settings TOML. It may be malformed."
)
else:
with open(settings_file) as json_file:
try:
self.zappa_settings = json.load(json_file)
except ValueError: # pragma: no cover
raise ValueError(
"Unable to load the Zappa settings JSON. It may be malformed."
)
def create_package(self, output=None):
"""
Ensure that the package can be properly configured,
and then create it.
"""
# Create the Lambda zip package (includes project and virtualenvironment)
# Also define the path the handler file so it can be copied to the zip
# root for Lambda.
current_file = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
handler_file = (
os.sep.join(current_file.split(os.sep)[0:]) + os.sep + "handler.py"
)
# Create the zip file(s)
if self.stage_config.get("slim_handler", False):
# Create two zips. One with the application and the other with just the handler.
# https://github.com/Miserlou/Zappa/issues/510
self.zip_path = self.zappa.create_lambda_zip(
prefix=self.lambda_name,
use_precompiled_packages=self.stage_config.get(
"use_precompiled_packages", True
),
exclude=self.stage_config.get("exclude", []),
exclude_glob=self.stage_config.get("exclude_glob", []),
disable_progress=self.disable_progress,
archive_format="tarball",
)
# Make sure the normal venv is not included in the handler's zip
exclude = self.stage_config.get("exclude", [])
cur_venv = self.zappa.get_current_venv()
exclude.append(cur_venv.split("/")[-1])
self.handler_path = self.zappa.create_lambda_zip(
prefix="handler_{0!s}".format(self.lambda_name),
venv=self.zappa.create_handler_venv(),
handler_file=handler_file,
slim_handler=True,
exclude=exclude,
exclude_glob=self.stage_config.get("exclude_glob", []),
output=output,
disable_progress=self.disable_progress,
)
else:
# This could be python3.6 optimized.
exclude = self.stage_config.get(
"exclude", ["boto3", "dateutil", "botocore", "s3transfer", "concurrent"]
)
# Create a single zip that has the handler and application
self.zip_path = self.zappa.create_lambda_zip(
prefix=self.lambda_name,
handler_file=handler_file,
use_precompiled_packages=self.stage_config.get(
"use_precompiled_packages", True
),
exclude=exclude,
exclude_glob=self.stage_config.get("exclude_glob", []),
output=output,
disable_progress=self.disable_progress,
)
# Warn if this is too large for Lambda.
file_stats = os.stat(self.zip_path)
if file_stats.st_size > 52428800: # pragma: no cover
print(
"\n\nWarning: Application zip package is likely to be too large for AWS Lambda. "
'Try setting "slim_handler" to true in your Zappa settings file.\n\n'
)
# Throw custom settings into the zip that handles requests
if self.stage_config.get("slim_handler", False):
handler_zip = self.handler_path
else:
handler_zip = self.zip_path
with zipfile.ZipFile(handler_zip, "a") as lambda_zip:
settings_s = self.get_zappa_settings_string()
# Copy our Django app into root of our package.
# It doesn't work otherwise.
if self.django_settings:
base = __file__.rsplit(os.sep, 1)[0]
django_py = "".join(os.path.join(base, "ext", "django_zappa.py"))
lambda_zip.write(django_py, "django_zappa_app.py")
# Lambda requires a specific chmod
temp_settings = tempfile.NamedTemporaryFile(delete=False)
os.chmod(temp_settings.name, 0o644)
temp_settings.write(bytes(settings_s, "utf-8"))
temp_settings.close()
lambda_zip.write(temp_settings.name, "zappa_settings.py")
os.unlink(temp_settings.name)
def get_zappa_settings_string(self):
settings_s = "# Generated by Zappa\n"
if self.app_function:
if "." not in self.app_function: # pragma: no cover
raise ClickException(
"Your "
+ click.style("app_function", fg="red", bold=True)
+ " value is not a modular path."
+ " It needs to be in the format `"
+ click.style("your_module.your_app_object", bold=True)
+ "`."
)
app_module, app_function = self.app_function.rsplit(".", 1)
settings_s = (
settings_s
+ "APP_MODULE='{0!s}'\nAPP_FUNCTION='{1!s}'\n".format(
app_module, app_function
)
)
if self.exception_handler:
settings_s += "EXCEPTION_HANDLER='{0!s}'\n".format(self.exception_handler)
else:
settings_s += "EXCEPTION_HANDLER=None\n"
if self.debug:
settings_s = settings_s + "DEBUG=True\n"
else:
settings_s = settings_s + "DEBUG=False\n"
settings_s = settings_s + "LOG_LEVEL='{0!s}'\n".format((self.log_level))
if self.binary_support:
settings_s = settings_s + "BINARY_SUPPORT=True\n"
else:
settings_s = settings_s + "BINARY_SUPPORT=False\n"
head_map_dict = {}
head_map_dict.update(dict(self.context_header_mappings))
settings_s = settings_s + "CONTEXT_HEADER_MAPPINGS={0}\n".format(head_map_dict)
# If we're on a domain, we don't need to define the /<<env>> in
# the WSGI PATH
if self.domain:
settings_s = settings_s + "DOMAIN='{0!s}'\n".format((self.domain))
else:
settings_s = settings_s + "DOMAIN=None\n"
if self.base_path:
settings_s = settings_s + "BASE_PATH='{0!s}'\n".format((self.base_path))
else:
settings_s = settings_s + "BASE_PATH=None\n"
# Pass through remote config bucket and path
if self.remote_env:
settings_s = settings_s + "REMOTE_ENV='{0!s}'\n".format(self.remote_env)
# DEPRECATED. use remove_env instead
elif self.remote_env_bucket and self.remote_env_file:
settings_s = settings_s + "REMOTE_ENV='s3://{0!s}/{1!s}'\n".format(
self.remote_env_bucket, self.remote_env_file
)
# Local envs
env_dict = {}
if self.aws_region:
env_dict["AWS_REGION"] = self.aws_region
env_dict.update(dict(self.environment_variables))
# Environment variable keys must be ascii
# https://github.com/Miserlou/Zappa/issues/604
# https://github.com/Miserlou/Zappa/issues/998
try:
env_dict = dict(
(k.encode("ascii").decode("ascii"), v) for (k, v) in env_dict.items()
)
except Exception:
raise ValueError("Environment variable keys must be ascii.")
settings_s = settings_s + "ENVIRONMENT_VARIABLES={0}\n".format(env_dict)
# We can be environment-aware
settings_s = settings_s + "API_STAGE='{0!s}'\n".format((self.api_stage))
settings_s = settings_s + "PROJECT_NAME='{0!s}'\n".format((self.project_name))
if self.settings_file:
settings_s = settings_s + "SETTINGS_FILE='{0!s}'\n".format(
(self.settings_file)
)
else:
settings_s = settings_s + "SETTINGS_FILE=None\n"
if self.django_settings:
settings_s = settings_s + "DJANGO_SETTINGS='{0!s}'\n".format(
(self.django_settings)
)
else:
settings_s = settings_s + "DJANGO_SETTINGS=None\n"
# If slim handler, path to project zip
if self.stage_config.get("slim_handler", False):
settings_s += (
"ARCHIVE_PATH='s3://{0!s}/{1!s}_{2!s}_current_project.tar.gz'\n".format(
self.s3_bucket_name, self.api_stage, self.project_name
)
)
# since includes are for slim handler add the setting here by joining arbitrary list from zappa_settings file
# and tell the handler we are the slim_handler
# https://github.com/Miserlou/Zappa/issues/776
settings_s += "SLIM_HANDLER=True\n"
include = self.stage_config.get("include", [])
if len(include) >= 1:
settings_s += "INCLUDE=" + str(include) + "\n"
# AWS Events function mapping
event_mapping = {}
events = self.stage_config.get("events", [])
for event in events:
arn = event.get("event_source", {}).get("arn")
function = event.get("function")
if arn and function:
event_mapping[arn] = function
settings_s = settings_s + "AWS_EVENT_MAPPING={0!s}\n".format(event_mapping)
# Map Lext bot events
bot_events = self.stage_config.get("bot_events", [])
bot_events_mapping = {}
for bot_event in bot_events:
event_source = bot_event.get("event_source", {})
intent = event_source.get("intent")
invocation_source = event_source.get("invocation_source")
function = bot_event.get("function")
if intent and invocation_source and function:
bot_events_mapping[
str(intent) + ":" + str(invocation_source)
] = function
settings_s = settings_s + "AWS_BOT_EVENT_MAPPING={0!s}\n".format(
bot_events_mapping
)
# Map cognito triggers
cognito_trigger_mapping = {}
cognito_config = self.stage_config.get("cognito", {})
triggers = cognito_config.get("triggers", [])
for trigger in triggers:
source = trigger.get("source")
function = trigger.get("function")
if source and function:
cognito_trigger_mapping[source] = function
settings_s = settings_s + "COGNITO_TRIGGER_MAPPING={0!s}\n".format(
cognito_trigger_mapping
)
# Authorizer config
authorizer_function = self.authorizer.get("function", None)
if authorizer_function:
settings_s += "AUTHORIZER_FUNCTION='{0!s}'\n".format(authorizer_function)
# async response
async_response_table = self.stage_config.get("async_response_table", "")
settings_s += "ASYNC_RESPONSE_TABLE='{0!s}'\n".format(async_response_table)
return settings_s
def remove_local_zip(self):
"""
Remove our local zip file.
"""
if self.stage_config.get("delete_local_zip", True):
try:
if os.path.isfile(self.zip_path):
os.remove(self.zip_path)
if self.handler_path and os.path.isfile(self.handler_path):
os.remove(self.handler_path)
except Exception as e: # pragma: no cover
sys.exit(-1)
def remove_uploaded_zip(self):
"""
Remove the local and S3 zip file after uploading and updating.
"""
# Remove the uploaded zip from S3, because it is now registered..
if self.stage_config.get("delete_s3_zip", True):
self.zappa.remove_from_s3(self.zip_path, self.s3_bucket_name)
if self.stage_config.get("slim_handler", False):
# Need to keep the project zip as the slim handler uses it.
self.zappa.remove_from_s3(self.handler_path, self.s3_bucket_name)
def on_exit(self):
"""
Cleanup after the command finishes.
Always called: SystemExit, KeyboardInterrupt and any other Exception that occurs.
"""
if self.zip_path:
# Only try to remove uploaded zip if we're running a command that has loaded credentials
if self.load_credentials:
self.remove_uploaded_zip()
self.remove_local_zip()
def print_logs(
self, logs, colorize=True, http=False, non_http=False, force_colorize=None
):
"""
Parse, filter and print logs to the console.
"""
for log in logs:
timestamp = log["timestamp"]
message = log["message"]
if "START RequestId" in message:
continue
if "REPORT RequestId" in message:
continue
if "END RequestId" in message:
continue
if not colorize and not force_colorize:
if http:
if self.is_http_log_entry(message.strip()):
print("[" + str(timestamp) + "] " + message.strip())
elif non_http:
if not self.is_http_log_entry(message.strip()):
print("[" + str(timestamp) + "] " + message.strip())
else:
print("[" + str(timestamp) + "] " + message.strip())
else:
if http:
if self.is_http_log_entry(message.strip()):
click.echo(
click.style("[", fg="cyan")
+ click.style(str(timestamp), bold=True)
+ click.style("]", fg="cyan")
+ self.colorize_log_entry(message.strip()),
color=force_colorize,
)
elif non_http:
if not self.is_http_log_entry(message.strip()):
click.echo(
click.style("[", fg="cyan")
+ click.style(str(timestamp), bold=True)
+ click.style("]", fg="cyan")
+ self.colorize_log_entry(message.strip()),
color=force_colorize,
)
else:
click.echo(
click.style("[", fg="cyan")
+ click.style(str(timestamp), bold=True)
+ click.style("]", fg="cyan")
+ self.colorize_log_entry(message.strip()),
color=force_colorize,
)
def is_http_log_entry(self, string):
"""
Determines if a log entry is an HTTP-formatted log string or not.
"""
# Debug event filter
if "Zappa Event" in string:
return False
# IP address filter
for token in string.replace("\t", " ").split(" "):
try:
if token.count(".") == 3 and token.replace(".", "").isnumeric():
return True
except Exception: # pragma: no cover
pass
return False
def get_project_name(self):
return slugify.slugify(os.getcwd().split(os.sep)[-1])[:15]
def colorize_log_entry(self, string):
"""
Apply various heuristics to return a colorized version of a string.
If these fail, simply return the string in plaintext.
"""
final_string = string
try:
# First, do stuff in square brackets
inside_squares = re.findall(r"\[([^]]*)\]", string)
for token in inside_squares:
if token in ["CRITICAL", "ERROR", "WARNING", "DEBUG", "INFO", "NOTSET"]:
final_string = final_string.replace(
"[" + token + "]",
click.style("[", fg="cyan")
+ click.style(token, fg="cyan", bold=True)
+ click.style("]", fg="cyan"),
)
else:
final_string = final_string.replace(
"[" + token + "]",
click.style("[", fg="cyan")
+ click.style(token, bold=True)
+ click.style("]", fg="cyan"),
)
# Then do quoted strings
quotes = re.findall(r'"[^"]*"', string)
for token in quotes:
final_string = final_string.replace(
token, click.style(token, fg="yellow")
)
# And UUIDs
for token in final_string.replace("\t", " ").split(" "):
try:
if token.count("-") == 4 and token.replace("-", "").isalnum():
final_string = final_string.replace(
token, click.style(token, fg="magenta")
)
except Exception: # pragma: no cover
pass
# And IP addresses
try:
if token.count(".") == 3 and token.replace(".", "").isnumeric():
final_string = final_string.replace(
token, click.style(token, fg="red")
)
except Exception: # pragma: no cover
pass
# And status codes
try:
if token in ["200"]:
final_string = final_string.replace(
token, click.style(token, fg="green")
)
if token in ["400", "401", "403", "404", "405", "500"]:
final_string = final_string.replace(
token, click.style(token, fg="red")
)
except Exception: # pragma: no cover
pass
# And Zappa Events
try:
if "Zappa Event:" in final_string:
final_string = final_string.replace(
"Zappa Event:",
click.style("Zappa Event:", bold=True, fg="green"),
)
except Exception: # pragma: no cover
pass
# And dates
for token in final_string.split("\t"):
try:
is_date = parser.parse(token)
final_string = final_string.replace(
token, click.style(token, fg="green")
)
except Exception: # pragma: no cover
pass
final_string = final_string.replace("\t", " ").replace(" ", " ")
if final_string[0] != " ":
final_string = " " + final_string
return final_string
except Exception as e: # pragma: no cover
return string
def execute_prebuild_script(self):
"""
Parse and execute the prebuild_script from the zappa_settings.
"""
(pb_mod_path, pb_func) = self.prebuild_script.rsplit(".", 1)
try: # Prefer prebuild script in working directory
if (
pb_mod_path.count(".") >= 1
): # Prebuild script func is nested in a folder
(mod_folder_path, mod_name) = pb_mod_path.rsplit(".", 1)
mod_folder_path_fragments = mod_folder_path.split(".")
working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments)
else:
mod_name = pb_mod_path
working_dir = os.getcwd()
working_dir_importer = pkgutil.get_importer(working_dir)
module_ = working_dir_importer.find_module(mod_name).load_module(mod_name)
except (ImportError, AttributeError):
try: # Prebuild func might be in virtualenv
module_ = importlib.import_module(pb_mod_path)
except ImportError: # pragma: no cover
raise ClickException(
click.style("Failed ", fg="red")
+ "to "
+ click.style("import prebuild script ", bold=True)
+ 'module: "{pb_mod_path}"'.format(
pb_mod_path=click.style(pb_mod_path, bold=True)
)
)
if not hasattr(module_, pb_func): # pragma: no cover
raise ClickException(
click.style("Failed ", fg="red")
+ "to "
+ click.style("find prebuild script ", bold=True)
+ 'function: "{pb_func}" '.format(
pb_func=click.style(pb_func, bold=True)
)
+ 'in module "{pb_mod_path}"'.format(pb_mod_path=pb_mod_path)
)
prebuild_function = getattr(module_, pb_func)
prebuild_function() # Call the function
def collision_warning(self, item):
"""
Given a string, print a warning if this could
collide with a Zappa core package module.
Use for app functions and events.
"""
namespace_collisions = [
"zappa.",
"wsgi.",
"middleware.",
"handler.",
"util.",
"letsencrypt.",
"cli.",
]
for namespace_collision in namespace_collisions:
if item.startswith(namespace_collision):
click.echo(
click.style("Warning!", fg="red", bold=True)
+ " You may have a namespace collision between "
+ click.style(item, bold=True)
+ " and "
+ click.style(namespace_collision, bold=True)
+ "! You may want to rename that file."
)
def deploy_api_gateway(self, api_id):
cache_cluster_enabled = self.stage_config.get("cache_cluster_enabled", False)
cache_cluster_size = str(self.stage_config.get("cache_cluster_size", 0.5))
endpoint_url = self.zappa.deploy_api_gateway(
api_id=api_id,
stage_name=self.api_stage,
cache_cluster_enabled=cache_cluster_enabled,
cache_cluster_size=cache_cluster_size,
cloudwatch_log_level=self.stage_config.get("cloudwatch_log_level", "OFF"),
cloudwatch_data_trace=self.stage_config.get("cloudwatch_data_trace", False),
cloudwatch_metrics_enabled=self.stage_config.get(
"cloudwatch_metrics_enabled", False
),
cache_cluster_ttl=self.stage_config.get("cache_cluster_ttl", 300),
cache_cluster_encrypted=self.stage_config.get(
"cache_cluster_encrypted", False
),
)
return endpoint_url
def check_venv(self):
"""Ensure we're inside a virtualenv."""
if self.vargs and self.vargs.get("no_venv"):
return
if self.zappa:
venv = self.zappa.get_current_venv()
else:
# Just for `init`, when we don't have settings yet.
venv = Zappa.get_current_venv()
if not venv:
raise ClickException(
click.style("Zappa", bold=True)
+ " requires an "
+ click.style("active virtual environment", bold=True, fg="red")
+ "!\n"
+ "Learn more about virtual environments here: "
+ click.style(
"http://docs.python-guide.org/en/latest/dev/virtualenvs/",
bold=False,
fg="cyan",
)
)
def silence(self):
"""
Route all stdout to null.
"""
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
def touch_endpoint(self, endpoint_url):
"""
Test the deployed endpoint with a GET request.
"""
# Private APIGW endpoints most likely can't be reached by a deployer
# unless they're connected to the VPC by VPN. Instead of trying
# connect to the service, print a warning and let the user know
# to check it manually.
# See: https://github.com/Miserlou/Zappa/pull/1719#issuecomment-471341565
if "PRIVATE" in self.stage_config.get("endpoint_configuration", []):
print(
click.style("Warning!", fg="yellow", bold=True)
+ " Since you're deploying a private API Gateway endpoint,"
" Zappa cannot determine if your function is returning "
" a correct status code. You should check your API's response"
" manually before considering this deployment complete."
)
return
touch_path = self.stage_config.get("touch_path", "/")
req = requests.get(endpoint_url + touch_path)
# Sometimes on really large packages, it can take 60-90 secs to be
# ready and requests will return 504 status_code until ready.
# So, if we get a 504 status code, rerun the request up to 4 times or
# until we don't get a 504 error
if req.status_code == 504:
i = 0
status_code = 504
while status_code == 504 and i <= 4:
req = requests.get(endpoint_url + touch_path)
status_code = req.status_code
i += 1
if req.status_code >= 500:
raise ClickException(
click.style("Warning!", fg="red", bold=True)
+ " Status check on the deployed lambda failed."
+ " A GET request to '"
+ touch_path
+ "' yielded a "
+ click.style(str(req.status_code), fg="red", bold=True)
+ " response code."
)
####################################################################
# Main
####################################################################
def shamelessly_promote():
"""
Shamelessly promote our little community.
"""
click.echo(
"Need "
+ click.style("help", fg="green", bold=True)
+ "? Found a "
+ click.style("bug", fg="green", bold=True)
+ "? Let us "
+ click.style("know", fg="green", bold=True)
+ "! :D"
)
click.echo(
"File bug reports on "
+ click.style("GitHub", bold=True)
+ " here: "
+ click.style("https://github.com/Zappa/Zappa", fg="cyan", bold=True)
)
click.echo(
"And join our "
+ click.style("Slack", bold=True)
+ " channel here: "
+ click.style("https://zappateam.slack.com", fg="cyan", bold=True)
)
click.echo("Love!,")
click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!")
def disable_click_colors():
"""
Set a Click context where colors are disabled. Creates a throwaway BaseCommand
to play nicely with the Context constructor.
The intended side-effect here is that click.echo() checks this context and will
suppress colors.
https://github.com/pallets/click/blob/e1aa43a3/click/globals.py#L39
"""
ctx = Context(BaseCommand("AllYourBaseAreBelongToUs"))
ctx.color = False
push_context(ctx)
def handle(): # pragma: no cover
"""
Main program execution handler.
"""
try:
cli = ZappaCLI()
sys.exit(cli.handle())
except SystemExit as e: # pragma: no cover
cli.on_exit()
sys.exit(e.code)
except KeyboardInterrupt: # pragma: no cover
cli.on_exit()
sys.exit(130)
except Exception as e:
cli.on_exit()
click.echo(
"Oh no! An " + click.style("error occurred", fg="red", bold=True) + "! :("
)
click.echo("\n==============\n")
import traceback
traceback.print_exc()
click.echo("\n==============\n")
shamelessly_promote()
sys.exit(-1)
if __name__ == "__main__": # pragma: no cover
handle() | zappa-teamturing | /zappa-teamturing-0.54.12.tar.gz/zappa-teamturing-0.54.12/zappa/cli.py | cli.py |
import atexit
import base64
import binascii
import copy
import hashlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import time
from urllib.request import urlopen
import requests
# Staging
# Amazon doesn't accept these though.
# DEFAULT_CA = "https://acme-staging.api.letsencrypt.org"
# Production
DEFAULT_CA = "https://acme-v02.api.letsencrypt.org"
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.StreamHandler())
def get_cert_and_update_domain(
zappa_instance,
lambda_name,
api_stage,
domain=None,
manual=False,
):
"""
Main cert installer path.
"""
try:
create_domain_key()
create_domain_csr(domain)
get_cert(zappa_instance)
create_chained_certificate()
with open("{}/signed.crt".format(gettempdir())) as f:
certificate_body = f.read()
with open("{}/domain.key".format(gettempdir())) as f:
certificate_private_key = f.read()
with open("{}/intermediate.pem".format(gettempdir())) as f:
certificate_chain = f.read()
if not manual:
if domain:
if not zappa_instance.get_domain_name(domain):
zappa_instance.create_domain_name(
domain_name=domain,
certificate_name=domain + "-Zappa-LE-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=None,
lambda_name=lambda_name,
stage=api_stage,
)
print(
"Created a new domain name. Please note that it can take up to 40 minutes for this domain to be created and propagated through AWS, but it requires no further work on your part."
)
else:
zappa_instance.update_domain_name(
domain_name=domain,
certificate_name=domain + "-Zappa-LE-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=None,
lambda_name=lambda_name,
stage=api_stage,
)
else:
print("Cerificate body:\n")
print(certificate_body)
print("\nCerificate private key:\n")
print(certificate_private_key)
print("\nCerificate chain:\n")
print(certificate_chain)
except Exception as e:
print(e)
return False
return True
def create_domain_key():
devnull = open(os.devnull, "wb")
out = subprocess.check_output(["openssl", "genrsa", "2048"], stderr=devnull)
with open(os.path.join(gettempdir(), "domain.key"), "wb") as f:
f.write(out)
def create_domain_csr(domain):
subj = "/CN=" + domain
cmd = [
"openssl",
"req",
"-new",
"-sha256",
"-key",
os.path.join(gettempdir(), "domain.key"),
"-subj",
subj,
]
devnull = open(os.devnull, "wb")
out = subprocess.check_output(cmd, stderr=devnull)
with open(os.path.join(gettempdir(), "domain.csr"), "wb") as f:
f.write(out)
def create_chained_certificate():
signed_crt = open(os.path.join(gettempdir(), "signed.crt"), "rb").read()
cross_cert_url = "https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem"
cert = requests.get(cross_cert_url)
with open(os.path.join(gettempdir(), "intermediate.pem"), "wb") as intermediate_pem:
intermediate_pem.write(cert.content)
with open(os.path.join(gettempdir(), "chained.pem"), "wb") as chained_pem:
chained_pem.write(signed_crt)
chained_pem.write(cert.content)
def parse_account_key():
"""Parse account key to get public key"""
LOGGER.info("Parsing account key...")
cmd = [
"openssl",
"rsa",
"-in",
os.path.join(gettempdir(), "account.key"),
"-noout",
"-text",
]
devnull = open(os.devnull, "wb")
return subprocess.check_output(cmd, stderr=devnull)
def parse_csr():
"""
Parse certificate signing request for domains
"""
LOGGER.info("Parsing CSR...")
cmd = [
"openssl",
"req",
"-in",
os.path.join(gettempdir(), "domain.csr"),
"-noout",
"-text",
]
devnull = open(os.devnull, "wb")
out = subprocess.check_output(cmd, stderr=devnull)
domains = set([])
common_name = re.search(r"Subject:.*? CN\s?=\s?([^\s,;/]+)", out.decode("utf8"))
if common_name is not None:
domains.add(common_name.group(1))
subject_alt_names = re.search(
r"X509v3 Subject Alternative Name: \n +([^\n]+)\n",
out.decode("utf8"),
re.MULTILINE | re.DOTALL,
)
if subject_alt_names is not None:
for san in subject_alt_names.group(1).split(", "):
if san.startswith("DNS:"):
domains.add(san[4:])
return domains
def get_boulder_header(key_bytes):
"""
Use regular expressions to find crypto values from parsed account key,
and return a header we can send to our Boulder instance.
"""
pub_hex, pub_exp = re.search(
r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)",
key_bytes.decode("utf8"),
re.MULTILINE | re.DOTALL,
).groups()
pub_exp = "{0:x}".format(int(pub_exp))
pub_exp = "0{0}".format(pub_exp) if len(pub_exp) % 2 else pub_exp
header = {
"alg": "RS256",
"jwk": {
"e": _b64(binascii.unhexlify(pub_exp.encode("utf-8"))),
"kty": "RSA",
"n": _b64(
binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex).encode("utf-8"))
),
},
}
return header
def register_account():
"""
Agree to LE TOS
"""
LOGGER.info("Registering account...")
code, result = _send_signed_request(
DEFAULT_CA + "/acme/new-reg",
{
"resource": "new-reg",
"agreement": "https://letsencrypt.org/documents/LE-SA-v1.2-November-15-2017.pdf",
},
)
if code == 201: # pragma: no cover
LOGGER.info("Registered!")
elif code == 409: # pragma: no cover
LOGGER.info("Already registered!")
else: # pragma: no cover
raise ValueError("Error registering: {0} {1}".format(code, result))
def get_cert(zappa_instance, log=LOGGER, CA=DEFAULT_CA):
"""
Call LE to get a new signed CA.
"""
out = parse_account_key()
header = get_boulder_header(out)
accountkey_json = json.dumps(header["jwk"], sort_keys=True, separators=(",", ":"))
thumbprint = _b64(hashlib.sha256(accountkey_json.encode("utf8")).digest())
# find domains
domains = parse_csr()
# get the certificate domains and expiration
register_account()
# verify each domain
for domain in domains:
log.info("Verifying {0}...".format(domain))
# get new challenge
code, result = _send_signed_request(
CA + "/acme/new-authz",
{
"resource": "new-authz",
"identifier": {"type": "dns", "value": domain},
},
)
if code != 201:
raise ValueError(
"Error requesting challenges: {0} {1}".format(code, result)
)
challenge = [
ch
for ch in json.loads(result.decode("utf8"))["challenges"]
if ch["type"] == "dns-01"
][0]
token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge["token"])
keyauthorization = "{0}.{1}".format(token, thumbprint).encode("utf-8")
# sha256_b64
digest = _b64(hashlib.sha256(keyauthorization).digest())
zone_id = zappa_instance.get_hosted_zone_id_for_domain(domain)
if not zone_id:
raise ValueError("Could not find Zone ID for: " + domain)
zappa_instance.set_dns_challenge_txt(zone_id, domain, digest) # resp is unused
print("Waiting for DNS to propagate..")
# What's optimal here?
# import time # double import; import in loop; shadowed import
time.sleep(45)
# notify challenge are met
code, result = _send_signed_request(
challenge["uri"],
{
"resource": "challenge",
"keyAuthorization": keyauthorization.decode("utf-8"),
},
)
if code != 202:
raise ValueError("Error triggering challenge: {0} {1}".format(code, result))
# wait for challenge to be verified
verify_challenge(challenge["uri"])
# Challenge verified, clean up R53
zappa_instance.remove_dns_challenge_txt(zone_id, domain, digest)
# Sign
result = sign_certificate()
# Encode to PEM format
encode_certificate(result)
return True
def verify_challenge(uri):
"""
Loop until our challenge is verified, else fail.
"""
while True:
try:
resp = urlopen(uri)
challenge_status = json.loads(resp.read().decode("utf8"))
except IOError as e:
raise ValueError(
"Error checking challenge: {0} {1}".format(
e.code, json.loads(e.read().decode("utf8"))
)
)
if challenge_status["status"] == "pending":
time.sleep(2)
elif challenge_status["status"] == "valid":
LOGGER.info("Domain verified!")
break
else:
raise ValueError(
"Domain challenge did not pass: {0}".format(challenge_status)
)
def sign_certificate():
"""
Get the new certificate.
Returns the signed bytes.
"""
LOGGER.info("Signing certificate...")
cmd = [
"openssl",
"req",
"-in",
os.path.join(gettempdir(), "domain.csr"),
"-outform",
"DER",
]
devnull = open(os.devnull, "wb")
csr_der = subprocess.check_output(cmd, stderr=devnull)
code, result = _send_signed_request(
DEFAULT_CA + "/acme/new-cert",
{
"resource": "new-cert",
"csr": _b64(csr_der),
},
)
if code != 201:
raise ValueError("Error signing certificate: {0} {1}".format(code, result))
LOGGER.info("Certificate signed!")
return result
def encode_certificate(result):
"""
Encode cert bytes to PEM encoded cert file.
"""
cert_body = (
"""-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""".format(
"\n".join(textwrap.wrap(base64.b64encode(result).decode("utf8"), 64))
)
)
signed_crt = open("{}/signed.crt".format(gettempdir()), "w")
signed_crt.write(cert_body)
signed_crt.close()
return True
##
# Request Utility
##
def _b64(b):
"""
Helper function base64 encode for jose spec
"""
return base64.urlsafe_b64encode(b).decode("utf8").replace("=", "")
def _send_signed_request(url, payload):
"""
Helper function to make signed requests to Boulder
"""
payload64 = _b64(json.dumps(payload).encode("utf8"))
out = parse_account_key()
header = get_boulder_header(out)
protected = copy.deepcopy(header)
protected["nonce"] = urlopen(DEFAULT_CA + "/directory").headers["Replay-Nonce"]
protected64 = _b64(json.dumps(protected).encode("utf8"))
cmd = [
"openssl",
"dgst",
"-sha256",
"-sign",
os.path.join(gettempdir(), "account.key"),
]
proc = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = proc.communicate("{0}.{1}".format(protected64, payload64).encode("utf8"))
if proc.returncode != 0: # pragma: no cover
raise IOError("OpenSSL Error: {0}".format(err))
data = json.dumps(
{
"header": header,
"protected": protected64,
"payload": payload64,
"signature": _b64(out),
}
)
try:
resp = urlopen(url, data.encode("utf8"))
return resp.getcode(), resp.read()
except IOError as e:
return getattr(e, "code", None), getattr(e, "read", e.__str__)()
##
# Temporary Directory Utility
##
__tempdir = None
def gettempdir():
"""
Lazily creates a temporary directory in a secure manner. When Python exits,
or the cleanup() function is called, the directory is erased.
"""
global __tempdir
if __tempdir is not None:
return __tempdir
__tempdir = tempfile.mkdtemp()
return __tempdir
@atexit.register
def cleanup():
"""
Delete any temporary files.
"""
global __tempdir
if __tempdir is not None:
shutil.rmtree(__tempdir)
__tempdir = None | zappa-troposphere | /zappa_troposphere-0.53.1-py3-none-any.whl/zappa/letsencrypt.py | letsencrypt.py |
from werkzeug.wsgi import ClosingIterator
def all_casings(input_string):
"""
Permute all casings of a given string.
A pretty algorithm, via @Amber
http://stackoverflow.com/questions/6792803/finding-all-possible-case-permutations-in-python
"""
if not input_string:
yield ""
else:
first = input_string[:1]
if first.lower() == first.upper():
for sub_casing in all_casings(input_string[1:]):
yield first + sub_casing
else:
for sub_casing in all_casings(input_string[1:]):
yield first.lower() + sub_casing
yield first.upper() + sub_casing
class ZappaWSGIMiddleware:
"""
Middleware functions necessary for a Zappa deployment.
Most hacks have now been remove except for Set-Cookie permutation.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
"""
We must case-mangle the Set-Cookie header name or AWS will use only a
single one of these headers.
"""
def encode_response(status, headers, exc_info=None):
"""
This makes the 'set-cookie' headers name lowercase,
all the non-cookie headers should be sent unharmed.
Related: https://github.com/Miserlou/Zappa/issues/1965
"""
new_headers = [
header
for header in headers
if ((type(header[0]) != str) or (header[0].lower() != "set-cookie"))
]
cookie_headers = [
(header[0].lower(), header[1])
for header in headers
if ((type(header[0]) == str) and (header[0].lower() == "set-cookie"))
]
new_headers = new_headers + cookie_headers
return start_response(status, new_headers, exc_info)
# Call the application with our modifier
response = self.application(environ, encode_response)
# Return the response as a WSGI-safe iterator
return ClosingIterator(response) | zappa-troposphere | /zappa_troposphere-0.53.1-py3-none-any.whl/zappa/middleware.py | middleware.py |
import base64
import collections
import datetime
import importlib
import inspect
import json
import logging
import os
import sys
import tarfile
import traceback
from builtins import str
import boto3
from werkzeug.wrappers import Response
# This file may be copied into a project's root,
# so handle both scenarios.
try:
from zappa.middleware import ZappaWSGIMiddleware
from zappa.utilities import merge_headers, parse_s3_url
from zappa.wsgi import common_log, create_wsgi_request
except ImportError as e: # pragma: no cover
from .middleware import ZappaWSGIMiddleware
from .utilities import merge_headers, parse_s3_url
from .wsgi import common_log, create_wsgi_request
# Set up logging
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class LambdaHandler:
"""
Singleton for avoiding duplicate setup.
Pattern provided by @benbangert.
"""
__instance = None
settings = None
settings_name = None
session = None
# Application
app_module = None
wsgi_app = None
trailing_slash = False
def __new__(cls, settings_name="zappa_settings", session=None):
"""Singleton instance to avoid repeat setup"""
if LambdaHandler.__instance is None:
print("Instancing..")
LambdaHandler.__instance = object.__new__(cls)
return LambdaHandler.__instance
def __init__(self, settings_name="zappa_settings", session=None):
# We haven't cached our settings yet, load the settings and app.
if not self.settings:
# Loading settings from a python module
self.settings = importlib.import_module(settings_name)
self.settings_name = settings_name
self.session = session
# Custom log level
if self.settings.LOG_LEVEL:
level = logging.getLevelName(self.settings.LOG_LEVEL)
logger.setLevel(level)
remote_env = getattr(self.settings, "REMOTE_ENV", None)
remote_bucket, remote_file = parse_s3_url(remote_env)
if remote_bucket and remote_file:
self.load_remote_settings(remote_bucket, remote_file)
# Let the system know that this will be a Lambda/Zappa/Stack
os.environ["SERVERTYPE"] = "AWS Lambda"
os.environ["FRAMEWORK"] = "Zappa"
try:
os.environ["PROJECT"] = self.settings.PROJECT_NAME
os.environ["STAGE"] = self.settings.API_STAGE
except Exception: # pragma: no cover
pass
# Set any locally defined env vars
# Environment variable keys can't be Unicode
# https://github.com/Miserlou/Zappa/issues/604
for key in self.settings.ENVIRONMENT_VARIABLES.keys():
os.environ[str(key)] = self.settings.ENVIRONMENT_VARIABLES[key]
# Pulling from S3 if given a zip path
project_archive_path = getattr(self.settings, "ARCHIVE_PATH", None)
if project_archive_path:
self.load_remote_project_archive(project_archive_path)
# Load compiled library to the PythonPath
# checks if we are the slim_handler since this is not needed otherwise
# https://github.com/Miserlou/Zappa/issues/776
is_slim_handler = getattr(self.settings, "SLIM_HANDLER", False)
if is_slim_handler:
included_libraries = getattr(
self.settings, "INCLUDE", ["libmysqlclient.so.18"]
)
try:
from ctypes import cdll, util
for library in included_libraries:
try:
cdll.LoadLibrary(os.path.join(os.getcwd(), library))
except OSError:
print(
"Failed to find library: {}...right filename?".format(
library
)
)
except ImportError:
print("Failed to import cytpes library")
# This is a non-WSGI application
# https://github.com/Miserlou/Zappa/pull/748
if (
not hasattr(self.settings, "APP_MODULE")
and not self.settings.DJANGO_SETTINGS
):
self.app_module = None
wsgi_app_function = None
# This is probably a normal WSGI app (Or django with overloaded wsgi application)
# https://github.com/Miserlou/Zappa/issues/1164
elif hasattr(self.settings, "APP_MODULE"):
if self.settings.DJANGO_SETTINGS:
sys.path.append("/var/task")
from django.conf import (
ENVIRONMENT_VARIABLE as SETTINGS_ENVIRONMENT_VARIABLE,
)
# add the Lambda root path into the sys.path
self.trailing_slash = True
os.environ[
SETTINGS_ENVIRONMENT_VARIABLE
] = self.settings.DJANGO_SETTINGS
else:
self.trailing_slash = False
# The app module
self.app_module = importlib.import_module(self.settings.APP_MODULE)
# The application
wsgi_app_function = getattr(self.app_module, self.settings.APP_FUNCTION)
# Django gets special treatment.
else:
try: # Support both for tests
from zappa.ext.django_zappa import get_django_wsgi
except ImportError: # pragma: no cover
from django_zappa_app import get_django_wsgi
# Get the Django WSGI app from our extension
wsgi_app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS)
self.trailing_slash = True
self.wsgi_app = ZappaWSGIMiddleware(wsgi_app_function)
def load_remote_project_archive(self, project_zip_path):
"""
Puts the project files from S3 in /tmp and adds to path
"""
project_folder = "/tmp/{0!s}".format(self.settings.PROJECT_NAME)
if not os.path.isdir(project_folder):
# The project folder doesn't exist in this cold lambda, get it from S3
if not self.session:
boto_session = boto3.Session()
else:
boto_session = self.session
# Download zip file from S3
remote_bucket, remote_file = parse_s3_url(project_zip_path)
s3 = boto_session.resource("s3")
archive_on_s3 = s3.Object(remote_bucket, remote_file).get()
with tarfile.open(fileobj=archive_on_s3["Body"], mode="r|gz") as t:
t.extractall(project_folder)
# Add to project path
sys.path.insert(0, project_folder)
# Change working directory to project folder
# Related: https://github.com/Miserlou/Zappa/issues/702
os.chdir(project_folder)
return True
def load_remote_settings(self, remote_bucket, remote_file):
"""
Attempt to read a file from s3 containing a flat json object. Adds each
key->value pair as environment variables. Helpful for keeping
sensitiZve or stage-specific configuration variables in s3 instead of
version control.
"""
if not self.session:
boto_session = boto3.Session()
else:
boto_session = self.session
s3 = boto_session.resource("s3")
try:
remote_env_object = s3.Object(remote_bucket, remote_file).get()
except Exception as e: # pragma: no cover
# catch everything aws might decide to raise
print("Could not load remote settings file.", e)
return
try:
content = remote_env_object["Body"].read()
except Exception as e: # pragma: no cover
# catch everything aws might decide to raise
print("Exception while reading remote settings file.", e)
return
try:
settings_dict = json.loads(content)
except (ValueError, TypeError): # pragma: no cover
print("Failed to parse remote settings!")
return
# add each key-value to environment - overwrites existing keys!
for key, value in settings_dict.items():
if self.settings.LOG_LEVEL == "DEBUG":
print("Adding {} -> {} to environment".format(key, value))
# Environment variable keys can't be Unicode
# https://github.com/Miserlou/Zappa/issues/604
try:
os.environ[str(key)] = value
except Exception:
if self.settings.LOG_LEVEL == "DEBUG":
print("Environment variable keys must be non-unicode!")
@staticmethod
def import_module_and_get_function(whole_function):
"""
Given a modular path to a function, import that module
and return the function.
"""
module, function = whole_function.rsplit(".", 1)
app_module = importlib.import_module(module)
app_function = getattr(app_module, function)
return app_function
@classmethod
def lambda_handler(cls, event, context): # pragma: no cover
handler = cls()
exception_handler = handler.settings.EXCEPTION_HANDLER
try:
return handler.handler(event, context)
except Exception as ex:
exception_processed = cls._process_exception(
exception_handler=exception_handler,
event=event,
context=context,
exception=ex,
)
if not exception_processed:
# Only re-raise exception if handler directed so. Allows handler to control if lambda has to retry
# an event execution in case of failure.
raise
@classmethod
def _process_exception(cls, exception_handler, event, context, exception):
exception_processed = False
if exception_handler:
try:
handler_function = cls.import_module_and_get_function(exception_handler)
exception_processed = handler_function(exception, event, context)
except Exception as cex:
logger.error(msg="Failed to process exception via custom handler.")
print(cex)
return exception_processed
@staticmethod
def run_function(app_function, event, context):
"""
Given a function and event context,
detect signature and execute, returning any result.
"""
# getargspec does not support python 3 method with type hints
# Related issue: https://github.com/Miserlou/Zappa/issues/1452
if hasattr(inspect, "getfullargspec"): # Python 3
args, varargs, keywords, defaults, _, _, _ = inspect.getfullargspec(
app_function
)
else: # Python 2
args, varargs, keywords, defaults = inspect.getargspec(app_function)
num_args = len(args)
if num_args == 0:
result = app_function(event, context) if varargs else app_function()
elif num_args == 1:
result = app_function(event, context) if varargs else app_function(event)
elif num_args == 2:
result = app_function(event, context)
else:
raise RuntimeError(
"Function signature is invalid. Expected a function that accepts at most "
"2 arguments or varargs."
)
return result
def get_function_for_aws_event(self, record):
"""
Get the associated function to execute for a triggered AWS event
Support S3, SNS, DynamoDB, kinesis and SQS events
"""
if "s3" in record:
if ":" in record["s3"]["configurationId"]:
return record["s3"]["configurationId"].split(":")[-1]
arn = None
if "Sns" in record:
try:
message = json.loads(record["Sns"]["Message"])
if message.get("command"):
return message["command"]
except ValueError:
pass
arn = record["Sns"].get("TopicArn")
elif "dynamodb" in record or "kinesis" in record:
arn = record.get("eventSourceARN")
elif "eventSource" in record and record.get("eventSource") == "aws:sqs":
arn = record.get("eventSourceARN")
elif "s3" in record:
arn = record["s3"]["bucket"]["arn"]
if arn:
return self.settings.AWS_EVENT_MAPPING.get(arn)
return None
def get_function_from_bot_intent_trigger(self, event):
"""
For the given event build ARN and return the configured function
"""
intent = event.get("currentIntent")
if intent:
intent = intent.get("name")
if intent:
return self.settings.AWS_BOT_EVENT_MAPPING.get(
"{}:{}".format(intent, event.get("invocationSource"))
)
def get_function_for_cognito_trigger(self, trigger):
"""
Get the associated function to execute for a cognito trigger
"""
print(
"get_function_for_cognito_trigger",
self.settings.COGNITO_TRIGGER_MAPPING,
trigger,
self.settings.COGNITO_TRIGGER_MAPPING.get(trigger),
)
return self.settings.COGNITO_TRIGGER_MAPPING.get(trigger)
def handler(self, event, context):
"""
An AWS Lambda function which parses specific API Gateway input into a
WSGI request, feeds it to our WSGI app, processes the response, and returns
that back to the API Gateway.
"""
settings = self.settings
# If in DEBUG mode, log all raw incoming events.
if settings.DEBUG:
logger.debug("Zappa Event: {}".format(event))
# Set any API Gateway defined Stage Variables
# as env vars
if event.get("stageVariables"):
for key in event["stageVariables"].keys():
os.environ[str(key)] = event["stageVariables"][key]
# This is the result of a keep alive, recertify
# or scheduled event.
if event.get("detail-type") == "Scheduled Event":
whole_function = event["resources"][0].split("/")[-1].split("-")[-1]
# This is a scheduled function.
if "." in whole_function:
app_function = self.import_module_and_get_function(whole_function)
# Execute the function!
return self.run_function(app_function, event, context)
# Else, let this execute as it were.
# This is a direct command invocation.
elif event.get("command", None):
whole_function = event["command"]
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
print("Result of %s:" % whole_function)
print(result)
return result
# This is a direct, raw python invocation.
# It's _extremely_ important we don't allow this event source
# to be overridden by unsanitized, non-admin user input.
elif event.get("raw_command", None):
raw_command = event["raw_command"]
exec(raw_command)
return
# This is a Django management command invocation.
elif event.get("manage", None):
from django.core import management
try: # Support both for tests
from zappa.ext.django_zappa import get_django_wsgi
except ImportError as e: # pragma: no cover
from django_zappa_app import get_django_wsgi
# Get the Django WSGI app from our extension
# We don't actually need the function,
# but we do need to do all of the required setup for it.
app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS)
# Couldn't figure out how to get the value into stdout with StringIO..
# Read the log for now. :[]
management.call_command(*event["manage"].split(" "))
return {}
# This is an AWS-event triggered invocation.
elif event.get("Records", None):
records = event.get("Records")
result = None
whole_function = self.get_function_for_aws_event(records[0])
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# this is an AWS-event triggered from Lex bot's intent
elif event.get("bot"):
result = None
whole_function = self.get_function_from_bot_intent_trigger(event)
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# This is an API Gateway authorizer event
elif event.get("type") == "TOKEN":
whole_function = self.settings.AUTHORIZER_FUNCTION
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
policy = self.run_function(app_function, event, context)
return policy
else:
logger.error(
"Cannot find a function to process the authorization request."
)
raise Exception("Unauthorized")
# This is an AWS Cognito Trigger Event
elif event.get("triggerSource", None):
triggerSource = event.get("triggerSource")
whole_function = self.get_function_for_cognito_trigger(triggerSource)
result = event
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error(
"Cannot find a function to handle cognito trigger {}".format(
triggerSource
)
)
return result
# This is a CloudWatch event
# Related: https://github.com/Miserlou/Zappa/issues/1924
elif event.get("awslogs", None):
result = None
whole_function = "{}.{}".format(settings.APP_MODULE, settings.APP_FUNCTION)
app_function = self.import_module_and_get_function(whole_function)
if app_function:
result = self.run_function(app_function, event, context)
logger.debug("Result of %s:" % whole_function)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# Normal web app flow
try:
# Timing
time_start = datetime.datetime.now()
# This is a normal HTTP request
if event.get("httpMethod", None):
script_name = ""
is_elb_context = False
headers = merge_headers(event)
if event.get("requestContext", None) and event["requestContext"].get(
"elb", None
):
# Related: https://github.com/Miserlou/Zappa/issues/1715
# inputs/outputs for lambda loadbalancer
# https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html
is_elb_context = True
# host is lower-case when forwarded from ELB
host = headers.get("host")
# TODO: pathParameters is a first-class citizen in apigateway but not available without
# some parsing work for ELB (is this parameter used for anything?)
event["pathParameters"] = ""
else:
if headers:
host = headers.get("Host")
else:
host = None
logger.debug("host found: [{}]".format(host))
if host:
if "amazonaws.com" in host:
logger.debug("amazonaws found in host")
# The path provided in th event doesn't include the
# stage, so we must tell Flask to include the API
# stage in the url it calculates. See https://github.com/Miserlou/Zappa/issues/1014
script_name = "/" + settings.API_STAGE
else:
# This is a test request sent from the AWS console
if settings.DOMAIN:
# Assume the requests received will be on the specified
# domain. No special handling is required
pass
else:
# Assume the requests received will be to the
# amazonaws.com endpoint, so tell Flask to include the
# API stage
script_name = "/" + settings.API_STAGE
base_path = getattr(settings, "BASE_PATH", None)
# Create the environment for WSGI and handle the request
environ = create_wsgi_request(
event,
script_name=script_name,
base_path=base_path,
trailing_slash=self.trailing_slash,
binary_support=settings.BINARY_SUPPORT,
context_header_mappings=settings.CONTEXT_HEADER_MAPPINGS,
)
# We are always on https on Lambda, so tell our wsgi app that.
environ["HTTPS"] = "on"
environ["wsgi.url_scheme"] = "https"
environ["lambda.context"] = context
environ["lambda.event"] = event
# Execute the application
with Response.from_app(self.wsgi_app, environ) as response:
# This is the object we're going to return.
# Pack the WSGI response into our special dictionary.
zappa_returndict = dict()
# Issue #1715: ALB support. ALB responses must always include
# base64 encoding and status description
if is_elb_context:
zappa_returndict.setdefault("isBase64Encoded", False)
zappa_returndict.setdefault(
"statusDescription", response.status
)
if response.data:
if (
settings.BINARY_SUPPORT
and not response.mimetype.startswith("text/")
and response.mimetype != "application/json"
):
zappa_returndict["body"] = base64.b64encode(
response.data
).decode("utf-8")
zappa_returndict["isBase64Encoded"] = True
else:
zappa_returndict["body"] = response.get_data(as_text=True)
zappa_returndict["statusCode"] = response.status_code
if "headers" in event:
zappa_returndict["headers"] = {}
for key, value in response.headers:
zappa_returndict["headers"][key] = value
if "multiValueHeaders" in event:
zappa_returndict["multiValueHeaders"] = {}
for key, value in response.headers:
zappa_returndict["multiValueHeaders"][
key
] = response.headers.getlist(key)
# Calculate the total response time,
# and log it in the Common Log format.
time_end = datetime.datetime.now()
delta = time_end - time_start
response_time_ms = delta.total_seconds() * 1000
response.content = response.data
common_log(environ, response, response_time=response_time_ms)
return zappa_returndict
except Exception as e: # pragma: no cover
# Print statements are visible in the logs either way
print(e)
exc_info = sys.exc_info()
message = (
"An uncaught exception happened while servicing this request. "
"You can investigate this with the `zappa tail` command."
)
# If we didn't even build an app_module, just raise.
if not settings.DJANGO_SETTINGS:
try:
self.app_module
except NameError as ne:
message = "Failed to import module: {}".format(ne.message)
# Call exception handler for unhandled exceptions
exception_handler = self.settings.EXCEPTION_HANDLER
self._process_exception(
exception_handler=exception_handler,
event=event,
context=context,
exception=e,
)
# Return this unspecified exception as a 500, using template that API Gateway expects.
content = collections.OrderedDict()
content["statusCode"] = 500
body = {"message": message}
if settings.DEBUG: # only include traceback if debug is on.
body["traceback"] = traceback.format_exception(
*exc_info
) # traceback as a list for readability.
content["body"] = json.dumps(str(body), sort_keys=True, indent=4)
return content
def lambda_handler(event, context): # pragma: no cover
return LambdaHandler.lambda_handler(event, context)
def keep_warm_callback(event, context):
"""Method is triggered by the CloudWatch event scheduled when keep_warm setting is set to true."""
lambda_handler(
event={}, context=context
) # overriding event with an empty one so that web app initialization will
# be triggered. | zappa-troposphere | /zappa_troposphere-0.53.1-py3-none-any.whl/zappa/handler.py | handler.py |
import base64
import logging
import sys
from urllib.parse import urlencode
import six
from requestlogger import ApacheFormatter
from werkzeug import urls
from .utilities import merge_headers, titlecase_keys
BINARY_METHODS = ["POST", "PUT", "PATCH", "DELETE", "CONNECT", "OPTIONS"]
def create_wsgi_request(
event_info,
server_name="zappa",
script_name=None,
trailing_slash=True,
binary_support=False,
base_path=None,
context_header_mappings={},
):
"""
Given some event_info via API Gateway,
create and return a valid WSGI request environ.
"""
method = event_info["httpMethod"]
headers = (
merge_headers(event_info) or {}
) # Allow for the AGW console 'Test' button to work (Pull #735)
"""
API Gateway and ALB both started allowing for multi-value querystring
params in Nov. 2018. If there aren't multi-value params present, then
it acts identically to 'queryStringParameters', so we can use it as a
drop-in replacement.
The one caveat here is that ALB will only include _one_ of
queryStringParameters _or_ multiValueQueryStringParameters, which means
we have to check for the existence of one and then fall back to the
other.
"""
if "multiValueQueryStringParameters" in event_info:
query = event_info["multiValueQueryStringParameters"]
query_string = urlencode(query, doseq=True) if query else ""
else:
query = event_info.get("queryStringParameters", {})
query_string = urlencode(query) if query else ""
if context_header_mappings:
for key, value in context_header_mappings.items():
parts = value.split(".")
header_val = event_info["requestContext"]
for part in parts:
if part not in header_val:
header_val = None
break
else:
header_val = header_val[part]
if header_val is not None:
headers[key] = header_val
# Extract remote user from context if Authorizer is enabled
remote_user = None
if event_info["requestContext"].get("authorizer"):
remote_user = event_info["requestContext"]["authorizer"].get("principalId")
elif event_info["requestContext"].get("identity"):
remote_user = event_info["requestContext"]["identity"].get("userArn")
# Related: https://github.com/Miserlou/Zappa/issues/677
# https://github.com/Miserlou/Zappa/issues/683
# https://github.com/Miserlou/Zappa/issues/696
# https://github.com/Miserlou/Zappa/issues/836
# https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Summary_table
if binary_support and (method in BINARY_METHODS):
if event_info.get("isBase64Encoded", False):
encoded_body = event_info["body"]
body = base64.b64decode(encoded_body)
else:
body = event_info["body"]
if isinstance(body, six.string_types):
body = body.encode("utf-8")
else:
body = event_info["body"]
if isinstance(body, six.string_types):
body = body.encode("utf-8")
# Make header names canonical, e.g. content-type => Content-Type
# https://github.com/Miserlou/Zappa/issues/1188
headers = titlecase_keys(headers)
path = urls.url_unquote(event_info["path"])
if base_path:
script_name = "/" + base_path
if path.startswith(script_name):
path = path[len(script_name) :]
x_forwarded_for = headers.get("X-Forwarded-For", "")
if "," in x_forwarded_for:
# The last one is the cloudfront proxy ip. The second to last is the real client ip.
# Everything else is user supplied and untrustworthy.
remote_addr = x_forwarded_for.split(", ")[-2]
else:
remote_addr = x_forwarded_for or "127.0.0.1"
environ = {
"PATH_INFO": get_wsgi_string(path),
"QUERY_STRING": get_wsgi_string(query_string),
"REMOTE_ADDR": remote_addr,
"REQUEST_METHOD": method,
"SCRIPT_NAME": get_wsgi_string(str(script_name)) if script_name else "",
"SERVER_NAME": str(server_name),
"SERVER_PORT": headers.get("X-Forwarded-Port", "80"),
"SERVER_PROTOCOL": str("HTTP/1.1"),
"wsgi.version": (1, 0),
"wsgi.url_scheme": headers.get("X-Forwarded-Proto", "http"),
"wsgi.input": body,
"wsgi.errors": sys.stderr,
"wsgi.multiprocess": False,
"wsgi.multithread": False,
"wsgi.run_once": False,
}
# Input processing
if method in ["POST", "PUT", "PATCH", "DELETE"]:
if "Content-Type" in headers:
environ["CONTENT_TYPE"] = headers["Content-Type"]
# This must be Bytes or None
environ["wsgi.input"] = six.BytesIO(body)
if body:
environ["CONTENT_LENGTH"] = str(len(body))
else:
environ["CONTENT_LENGTH"] = "0"
for header in headers:
wsgi_name = "HTTP_" + header.upper().replace("-", "_")
environ[wsgi_name] = str(headers[header])
if script_name:
environ["SCRIPT_NAME"] = script_name
path_info = environ["PATH_INFO"]
if script_name in path_info:
environ["PATH_INFO"].replace(script_name, "")
if remote_user:
environ["REMOTE_USER"] = remote_user
if event_info["requestContext"].get("authorizer"):
environ["API_GATEWAY_AUTHORIZER"] = event_info["requestContext"]["authorizer"]
return environ
def common_log(environ, response, response_time=None):
"""
Given the WSGI environ and the response,
log this event in Common Log Format.
"""
logger = logging.getLogger()
if response_time:
formatter = ApacheFormatter(with_response_time=True)
try:
log_entry = formatter(
response.status_code,
environ,
len(response.content),
rt_us=response_time,
)
except TypeError:
# Upstream introduced a very annoying breaking change on the rt_ms/rt_us kwarg.
log_entry = formatter(
response.status_code,
environ,
len(response.content),
rt_ms=response_time,
)
else:
formatter = ApacheFormatter(with_response_time=False)
log_entry = formatter(response.status_code, environ, len(response.content))
logger.info(log_entry)
return log_entry
# Related: https://github.com/Miserlou/Zappa/issues/1199
def get_wsgi_string(string, encoding="utf-8"):
"""
Returns wsgi-compatible string
"""
return string.encode(encoding).decode("iso-8859-1") | zappa-troposphere | /zappa_troposphere-0.53.1-py3-none-any.whl/zappa/wsgi.py | wsgi.py |
import importlib
import inspect
import json
import os
import time
import uuid
from functools import update_wrapper, wraps
import boto3
import botocore
from .utilities import get_topic_name
try:
from zappa_settings import ASYNC_RESPONSE_TABLE
except ImportError:
ASYNC_RESPONSE_TABLE = None
# Declare these here so they're kept warm.
try:
aws_session = boto3.Session()
LAMBDA_CLIENT = aws_session.client("lambda")
SNS_CLIENT = aws_session.client("sns")
STS_CLIENT = aws_session.client("sts")
DYNAMODB_CLIENT = aws_session.client("dynamodb")
except botocore.exceptions.NoRegionError as e: # pragma: no cover
# This can happen while testing on Travis, but it's taken care of
# during class initialization.
pass
##
# Response and Exception classes
##
LAMBDA_ASYNC_PAYLOAD_LIMIT = 256000
SNS_ASYNC_PAYLOAD_LIMIT = 256000
class AsyncException(Exception): # pragma: no cover
"""Simple exception class for async tasks."""
pass
class LambdaAsyncResponse:
"""
Base Response Dispatcher class
Can be used directly or subclassed if the method to send the message is changed.
"""
def __init__(
self,
lambda_function_name=None,
aws_region=None,
capture_response=False,
**kwargs
):
""" """
if kwargs.get("boto_session"):
self.client = kwargs.get("boto_session").client("lambda")
else: # pragma: no cover
self.client = LAMBDA_CLIENT
self.lambda_function_name = lambda_function_name
self.aws_region = aws_region
if capture_response:
if ASYNC_RESPONSE_TABLE is None:
print(
"Warning! Attempted to capture a response without "
"async_response_table configured in settings (you won't "
"capture async responses)."
)
capture_response = False
self.response_id = "MISCONFIGURED"
else:
self.response_id = str(uuid.uuid4())
else:
self.response_id = None
self.capture_response = capture_response
def send(self, task_path, args, kwargs):
"""
Create the message object and pass it to the actual sender.
"""
message = {
"task_path": task_path,
"capture_response": self.capture_response,
"response_id": self.response_id,
"args": args,
"kwargs": kwargs,
}
self._send(message)
return self
def _send(self, message):
"""
Given a message, directly invoke the lamdba function for this task.
"""
message["command"] = "zappa.asynchronous.route_lambda_task"
payload = json.dumps(message).encode("utf-8")
if len(payload) > LAMBDA_ASYNC_PAYLOAD_LIMIT: # pragma: no cover
raise AsyncException("Payload too large for async Lambda call")
self.response = self.client.invoke(
FunctionName=self.lambda_function_name,
InvocationType="Event", # makes the call async
Payload=payload,
)
self.sent = self.response.get("StatusCode", 0) == 202
class SnsAsyncResponse(LambdaAsyncResponse):
"""
Send a SNS message to a specified SNS topic
Serialise the func path and arguments
"""
def __init__(
self,
lambda_function_name=None,
aws_region=None,
capture_response=False,
**kwargs
):
self.lambda_function_name = lambda_function_name
self.aws_region = aws_region
if kwargs.get("boto_session"):
self.client = kwargs.get("boto_session").client("sns")
else: # pragma: no cover
self.client = SNS_CLIENT
if kwargs.get("arn"):
self.arn = kwargs.get("arn")
else:
if kwargs.get("boto_session"):
sts_client = kwargs.get("boto_session").client("sts")
else:
sts_client = STS_CLIENT
AWS_ACCOUNT_ID = sts_client.get_caller_identity()["Account"]
self.arn = "arn:aws:sns:{region}:{account}:{topic_name}".format(
region=self.aws_region,
account=AWS_ACCOUNT_ID,
topic_name=get_topic_name(self.lambda_function_name),
)
# Issue: https://github.com/Miserlou/Zappa/issues/1209
# TODO: Refactor
self.capture_response = capture_response
if capture_response:
if ASYNC_RESPONSE_TABLE is None:
print(
"Warning! Attempted to capture a response without "
"async_response_table configured in settings (you won't "
"capture async responses)."
)
capture_response = False
self.response_id = "MISCONFIGURED"
else:
self.response_id = str(uuid.uuid4())
else:
self.response_id = None
self.capture_response = capture_response
def _send(self, message):
"""
Given a message, publish to this topic.
"""
message["command"] = "zappa.asynchronous.route_sns_task"
payload = json.dumps(message).encode("utf-8")
if len(payload) > LAMBDA_ASYNC_PAYLOAD_LIMIT: # pragma: no cover
raise AsyncException("Payload too large for SNS")
self.response = self.client.publish(TargetArn=self.arn, Message=payload)
self.sent = self.response.get("MessageId")
##
# Aync Routers
##
ASYNC_CLASSES = {
"lambda": LambdaAsyncResponse,
"sns": SnsAsyncResponse,
}
def route_lambda_task(event, context):
"""
Deserialises the message from event passed to zappa.handler.run_function
imports the function, calls the function with args
"""
message = event
return run_message(message)
def route_sns_task(event, context):
"""
Gets SNS Message, deserialises the message,
imports the function, calls the function with args
"""
record = event["Records"][0]
message = json.loads(record["Sns"]["Message"])
return run_message(message)
def run_message(message):
"""
Runs a function defined by a message object with keys:
'task_path', 'args', and 'kwargs' used by lambda routing
and a 'command' in handler.py
"""
if message.get("capture_response", False):
DYNAMODB_CLIENT.put_item(
TableName=ASYNC_RESPONSE_TABLE,
Item={
"id": {"S": str(message["response_id"])},
"ttl": {"N": str(int(time.time() + 600))},
"async_status": {"S": "in progress"},
"async_response": {"S": str(json.dumps("N/A"))},
},
)
func = import_and_get_task(message["task_path"])
if hasattr(func, "sync"):
response = func.sync(*message["args"], **message["kwargs"])
else:
response = func(*message["args"], **message["kwargs"])
if message.get("capture_response", False):
DYNAMODB_CLIENT.update_item(
TableName=ASYNC_RESPONSE_TABLE,
Key={"id": {"S": str(message["response_id"])}},
UpdateExpression="SET async_response = :r, async_status = :s",
ExpressionAttributeValues={
":r": {"S": str(json.dumps(response))},
":s": {"S": "complete"},
},
)
return response
##
# Execution interfaces and classes
##
def run(
func,
args=[],
kwargs={},
service="lambda",
capture_response=False,
remote_aws_lambda_function_name=None,
remote_aws_region=None,
**task_kwargs
):
"""
Instead of decorating a function with @task, you can just run it directly.
If you were going to do func(*args, **kwargs), then you will call this:
import zappa.asynchronous.run
zappa.asynchronous.run(func, args, kwargs)
If you want to use SNS, then do:
zappa.asynchronous.run(func, args, kwargs, service='sns')
and other arguments are similar to @task
"""
lambda_function_name = remote_aws_lambda_function_name or os.environ.get(
"AWS_LAMBDA_FUNCTION_NAME"
)
aws_region = remote_aws_region or os.environ.get("AWS_REGION")
task_path = get_func_task_path(func)
return ASYNC_CLASSES[service](
lambda_function_name=lambda_function_name,
aws_region=aws_region,
capture_response=capture_response,
**task_kwargs
).send(task_path, args, kwargs)
# Handy:
# http://stackoverflow.com/questions/10294014/python-decorator-best-practice-using-a-class-vs-a-function
# However, this needs to pass inspect.getargspec() in handler.py which does not take classes
# Wrapper written to take optional arguments
# http://chase-seibert.github.io/blog/2013/12/17/python-decorator-optional-parameter.html
def task(*args, **kwargs):
"""Async task decorator so that running
Args:
func (function): the function to be wrapped
Further requirements:
func must be an independent top-level function.
i.e. not a class method or an anonymous function
service (str): either 'lambda' or 'sns'
remote_aws_lambda_function_name (str): the name of a remote lambda function to call with this task
remote_aws_region (str): the name of a remote region to make lambda/sns calls against
Returns:
A replacement function that dispatches func() to
run asynchronously through the service in question
"""
func = None
if len(args) == 1 and callable(args[0]):
func = args[0]
if not kwargs: # Default Values
service = "lambda"
lambda_function_name_arg = None
aws_region_arg = None
else: # Arguments were passed
service = kwargs.get("service", "lambda")
lambda_function_name_arg = kwargs.get("remote_aws_lambda_function_name")
aws_region_arg = kwargs.get("remote_aws_region")
capture_response = kwargs.get("capture_response", False)
def func_wrapper(func):
task_path = get_func_task_path(func)
@wraps(func)
def _run_async(*args, **kwargs):
"""
This is the wrapping async function that replaces the function
that is decorated with @task.
Args:
These are just passed through to @task's func
Assuming a valid service is passed to task() and it is run
inside a Lambda process (i.e. AWS_LAMBDA_FUNCTION_NAME exists),
it dispatches the function to be run through the service variable.
Otherwise, it runs the task synchronously.
Returns:
In async mode, the object returned includes state of the dispatch.
For instance
When outside of Lambda, the func passed to @task is run and we
return the actual value.
"""
lambda_function_name = lambda_function_name_arg or os.environ.get(
"AWS_LAMBDA_FUNCTION_NAME"
)
aws_region = aws_region_arg or os.environ.get("AWS_REGION")
if (service in ASYNC_CLASSES) and (lambda_function_name):
send_result = ASYNC_CLASSES[service](
lambda_function_name=lambda_function_name,
aws_region=aws_region,
capture_response=capture_response,
).send(task_path, args, kwargs)
return send_result
else:
return func(*args, **kwargs)
update_wrapper(_run_async, func)
_run_async.service = service
_run_async.sync = func
return _run_async
return func_wrapper(func) if func else func_wrapper
def task_sns(func):
"""
SNS-based task dispatcher. Functions the same way as task()
"""
return task(func, service="sns")
##
# Utility Functions
##
def import_and_get_task(task_path):
"""
Given a modular path to a function, import that module
and return the function.
"""
module, function = task_path.rsplit(".", 1)
app_module = importlib.import_module(module)
app_function = getattr(app_module, function)
return app_function
def get_func_task_path(func):
"""
Format the modular task path for a function via inspection.
"""
module_path = inspect.getmodule(func).__name__
task_path = "{module_path}.{func_name}".format(
module_path=module_path, func_name=func.__name__
)
return task_path
def get_async_response(response_id):
"""
Get the response from the async table
"""
response = DYNAMODB_CLIENT.get_item(
TableName=ASYNC_RESPONSE_TABLE, Key={"id": {"S": str(response_id)}}
)
if "Item" not in response:
return None
return {
"status": response["Item"]["async_status"]["S"],
"response": json.loads(response["Item"]["async_response"]["S"]),
} | zappa-troposphere | /zappa_troposphere-0.53.1-py3-none-any.whl/zappa/asynchronous.py | asynchronous.py |
import getpass
import glob
import hashlib
import json
import logging
import os
import random
import re
import shutil
import string
import subprocess
import tarfile
import tempfile
import time
import uuid
import zipfile
from builtins import bytes, int
from distutils.dir_util import copy_tree
from io import open
import boto3
import botocore
import requests
import troposphere
import troposphere.apigateway
from botocore.exceptions import ClientError
from setuptools import find_packages
from tqdm import tqdm
from .utilities import (
add_event_source,
conflicts_with_a_neighbouring_module,
contains_python_files_or_subdirs,
copytree,
get_topic_name,
get_venv_from_python_version,
human_size,
remove_event_source,
)
##
# Logging Config
##
logging.basicConfig(format="%(levelname)s:%(message)s")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
##
# Policies And Template Mappings
##
ASSUME_POLICY = """{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": [
"apigateway.amazonaws.com",
"lambda.amazonaws.com",
"events.amazonaws.com"
]
},
"Action": "sts:AssumeRole"
}
]
}"""
ATTACH_POLICY = """{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:*"
],
"Resource": "arn:aws:logs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"lambda:InvokeFunction"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"xray:PutTraceSegments",
"xray:PutTelemetryRecords"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"ec2:AttachNetworkInterface",
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:DescribeInstances",
"ec2:DescribeNetworkInterfaces",
"ec2:DetachNetworkInterface",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:ResetNetworkInterfaceAttribute"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": "arn:aws:s3:::*"
},
{
"Effect": "Allow",
"Action": [
"kinesis:*"
],
"Resource": "arn:aws:kinesis:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"sns:*"
],
"Resource": "arn:aws:sns:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"sqs:*"
],
"Resource": "arn:aws:sqs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"dynamodb:*"
],
"Resource": "arn:aws:dynamodb:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"route53:*"
],
"Resource": "*"
}
]
}"""
# Latest list: https://docs.aws.amazon.com/general/latest/gr/rande.html#apigateway_region
API_GATEWAY_REGIONS = [
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
"eu-central-1",
"eu-north-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"eu-north-1",
"ap-northeast-1",
"ap-northeast-2",
"ap-northeast-3",
"ap-southeast-1",
"ap-southeast-2",
"ap-east-1",
"ap-south-1",
"ca-central-1",
"cn-north-1",
"cn-northwest-1",
"sa-east-1",
"us-gov-east-1",
"us-gov-west-1",
]
# Latest list: https://docs.aws.amazon.com/general/latest/gr/rande.html#lambda_region
LAMBDA_REGIONS = [
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
"eu-central-1",
"eu-north-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"eu-north-1",
"ap-northeast-1",
"ap-northeast-2",
"ap-northeast-3",
"ap-southeast-1",
"ap-southeast-2",
"ap-east-1",
"ap-south-1",
"ca-central-1",
"cn-north-1",
"cn-northwest-1",
"sa-east-1",
"us-gov-east-1",
"us-gov-west-1",
]
# We never need to include these.
# Related: https://github.com/Miserlou/Zappa/pull/56
# Related: https://github.com/Miserlou/Zappa/pull/581
ZIP_EXCLUDES = [
"*.exe",
"*.DS_Store",
"*.Python",
"*.git",
".git/*",
"*.zip",
"*.tar.gz",
"*.hg",
"pip",
"docutils*",
"setuputils*",
"__pycache__/*",
]
# When using ALB as an event source for Lambdas, we need to create an alias
# to ensure that, on zappa update, the ALB doesn't lose permissions to access
# the Lambda.
# See: https://github.com/Miserlou/Zappa/pull/1730
ALB_LAMBDA_ALIAS = "current-alb-version"
##
# Classes
##
class Zappa:
"""
Zappa!
Makes it easy to run Python web applications on AWS Lambda/API Gateway.
"""
##
# Configurables
##
http_methods = ["ANY"]
role_name = "ZappaLambdaExecution"
extra_permissions = None
assume_policy = ASSUME_POLICY
attach_policy = ATTACH_POLICY
apigateway_policy = None
cloudwatch_log_levels = ["OFF", "ERROR", "INFO"]
xray_tracing = False
##
# Credentials
##
boto_session = None
credentials_arn = None
def __init__(
self,
boto_session=None,
profile_name=None,
aws_region=None,
load_credentials=True,
desired_role_name=None,
desired_role_arn=None,
runtime="python3.6", # Detected at runtime in CLI
tags=(),
endpoint_urls={},
xray_tracing=False,
):
"""
Instantiate this new Zappa instance, loading any custom credentials if necessary.
"""
# Set aws_region to None to use the system's region instead
if aws_region is None:
# https://github.com/Miserlou/Zappa/issues/413
self.aws_region = boto3.Session().region_name
logger.debug("Set region from boto: %s", self.aws_region)
else:
self.aws_region = aws_region
if desired_role_name:
self.role_name = desired_role_name
if desired_role_arn:
self.credentials_arn = desired_role_arn
self.runtime = runtime
if self.runtime == "python3.6":
self.manylinux_suffix_start = "cp36m"
elif self.runtime == "python3.7":
self.manylinux_suffix_start = "cp37m"
else:
# The 'm' has been dropped in python 3.8+ since builds with and without pymalloc are ABI compatible
# See https://github.com/pypa/manylinux for a more detailed explanation
self.manylinux_suffix_start = "cp38"
# AWS Lambda supports manylinux1/2010 and manylinux2014
manylinux_suffixes = ("2014", "2010", "1")
self.manylinux_wheel_file_match = re.compile(
f'^.*{self.manylinux_suffix_start}-manylinux({"|".join(manylinux_suffixes)})_x86_64.whl$'
)
self.manylinux_wheel_abi3_file_match = re.compile(
f'^.*cp3.-abi3-manylinux({"|".join(manylinux_suffixes)})_x86_64.whl$'
)
self.endpoint_urls = endpoint_urls
self.xray_tracing = xray_tracing
# Some common invocations, such as DB migrations,
# can take longer than the default.
# Note that this is set to 300s, but if connected to
# APIGW, Lambda will max out at 30s.
# Related: https://github.com/Miserlou/Zappa/issues/205
long_config_dict = {
"region_name": aws_region,
"connect_timeout": 5,
"read_timeout": 300,
}
long_config = botocore.client.Config(**long_config_dict)
if load_credentials:
self.load_credentials(boto_session, profile_name)
# Initialize clients
self.s3_client = self.boto_client("s3")
self.lambda_client = self.boto_client("lambda", config=long_config)
self.elbv2_client = self.boto_client("elbv2")
self.events_client = self.boto_client("events")
self.apigateway_client = self.boto_client("apigateway")
# AWS ACM certificates need to be created from us-east-1 to be used by API gateway
east_config = botocore.client.Config(region_name="us-east-1")
self.acm_client = self.boto_client("acm", config=east_config)
self.logs_client = self.boto_client("logs")
self.iam_client = self.boto_client("iam")
self.iam = self.boto_resource("iam")
self.cloudwatch = self.boto_client("cloudwatch")
self.route53 = self.boto_client("route53")
self.sns_client = self.boto_client("sns")
self.cf_client = self.boto_client("cloudformation")
self.dynamodb_client = self.boto_client("dynamodb")
self.cognito_client = self.boto_client("cognito-idp")
self.sts_client = self.boto_client("sts")
self.tags = tags
self.cf_template = troposphere.Template()
self.cf_api_resources = []
self.cf_parameters = {}
def configure_boto_session_method_kwargs(self, service, kw):
"""Allow for custom endpoint urls for non-AWS (testing and bootleg cloud) deployments"""
if service in self.endpoint_urls and not "endpoint_url" in kw:
kw["endpoint_url"] = self.endpoint_urls[service]
return kw
def boto_client(self, service, *args, **kwargs):
"""A wrapper to apply configuration options to boto clients"""
return self.boto_session.client(
service, *args, **self.configure_boto_session_method_kwargs(service, kwargs)
)
def boto_resource(self, service, *args, **kwargs):
"""A wrapper to apply configuration options to boto resources"""
return self.boto_session.resource(
service, *args, **self.configure_boto_session_method_kwargs(service, kwargs)
)
def cache_param(self, value):
"""Returns a troposphere Ref to a value cached as a parameter."""
if value not in self.cf_parameters:
keyname = chr(ord("A") + len(self.cf_parameters))
param = self.cf_template.add_parameter(
troposphere.Parameter(
keyname, Type="String", Default=value, tags=self.tags
)
)
self.cf_parameters[value] = param
return troposphere.Ref(self.cf_parameters[value])
##
# Packaging
##
def copy_editable_packages(self, egg_links, temp_package_path):
""" """
for egg_link in egg_links:
with open(egg_link, "rb") as df:
egg_path = df.read().decode("utf-8").splitlines()[0].strip()
pkgs = set(
[
x.split(".")[0]
for x in find_packages(egg_path, exclude=["test", "tests"])
]
)
for pkg in pkgs:
copytree(
os.path.join(egg_path, pkg),
os.path.join(temp_package_path, pkg),
metadata=False,
symlinks=False,
)
if temp_package_path:
# now remove any egg-links as they will cause issues if they still exist
for link in glob.glob(os.path.join(temp_package_path, "*.egg-link")):
os.remove(link)
def get_deps_list(self, pkg_name, installed_distros=None):
"""
For a given package, returns a list of required packages. Recursive.
"""
# https://github.com/Miserlou/Zappa/issues/1478. Using `pkg_resources`
# instead of `pip` is the recommended approach. The usage is nearly
# identical.
import pkg_resources
deps = []
if not installed_distros:
installed_distros = pkg_resources.WorkingSet()
for package in installed_distros:
if package.project_name.lower() == pkg_name.lower():
deps = [(package.project_name, package.version)]
for req in package.requires():
deps += self.get_deps_list(
pkg_name=req.project_name, installed_distros=installed_distros
)
return list(set(deps)) # de-dupe before returning
def create_handler_venv(self):
"""
Takes the installed zappa and brings it into a fresh virtualenv-like folder. All dependencies are then downloaded.
"""
import subprocess
# We will need the currenv venv to pull Zappa from
current_venv = self.get_current_venv()
# Make a new folder for the handler packages
ve_path = os.path.join(os.getcwd(), "handler_venv")
if os.sys.platform == "win32":
current_site_packages_dir = os.path.join(
current_venv, "Lib", "site-packages"
)
venv_site_packages_dir = os.path.join(ve_path, "Lib", "site-packages")
else:
current_site_packages_dir = os.path.join(
current_venv, "lib", get_venv_from_python_version(), "site-packages"
)
venv_site_packages_dir = os.path.join(
ve_path, "lib", get_venv_from_python_version(), "site-packages"
)
if not os.path.isdir(venv_site_packages_dir):
os.makedirs(venv_site_packages_dir)
# Copy zappa* to the new virtualenv
zappa_things = [
z for z in os.listdir(current_site_packages_dir) if z.lower()[:5] == "zappa"
]
for z in zappa_things:
copytree(
os.path.join(current_site_packages_dir, z),
os.path.join(venv_site_packages_dir, z),
)
# Use pip to download zappa's dependencies. Copying from current venv causes issues with things like PyYAML that installs as yaml
zappa_deps = self.get_deps_list("zappa")
pkg_list = ["{0!s}=={1!s}".format(dep, version) for dep, version in zappa_deps]
# Need to manually add setuptools
pkg_list.append("setuptools")
command = [
"pip",
"install",
"--quiet",
"--target",
venv_site_packages_dir,
] + pkg_list
# This is the recommended method for installing packages if you don't
# to depend on `setuptools`
# https://github.com/pypa/pip/issues/5240#issuecomment-381662679
pip_process = subprocess.Popen(command, stdout=subprocess.PIPE)
# Using communicate() to avoid deadlocks
pip_process.communicate()
pip_return_code = pip_process.returncode
if pip_return_code:
raise EnvironmentError("Pypi lookup failed")
return ve_path
# staticmethod as per https://github.com/Miserlou/Zappa/issues/780
@staticmethod
def get_current_venv():
"""
Returns the path to the current virtualenv
"""
if "VIRTUAL_ENV" in os.environ:
venv = os.environ["VIRTUAL_ENV"]
elif os.path.exists(".python-version"): # pragma: no cover
try:
subprocess.check_output(["pyenv", "help"], stderr=subprocess.STDOUT)
except OSError:
print(
"This directory seems to have pyenv's local venv, "
"but pyenv executable was not found."
)
with open(".python-version", "r") as f:
# minor fix in how .python-version is read
# Related: https://github.com/Miserlou/Zappa/issues/921
env_name = f.readline().strip()
bin_path = subprocess.check_output(["pyenv", "which", "python"]).decode(
"utf-8"
)
venv = bin_path[: bin_path.rfind(env_name)] + env_name
else: # pragma: no cover
return None
return venv
def create_lambda_zip(
self,
prefix="lambda_package",
handler_file=None,
slim_handler=False,
minify=True,
exclude=None,
exclude_glob=None,
use_precompiled_packages=True,
include=None,
venv=None,
output=None,
disable_progress=False,
archive_format="zip",
):
"""
Create a Lambda-ready zip file of the current virtualenvironment and working directory.
Returns path to that file.
"""
# Validate archive_format
if archive_format not in ["zip", "tarball"]:
raise KeyError(
"The archive format to create a lambda package must be zip or tarball"
)
# Pip is a weird package.
# Calling this function in some environments without this can cause.. funkiness.
import pip
if not venv:
venv = self.get_current_venv()
build_time = str(int(time.time()))
cwd = os.getcwd()
if not output:
if archive_format == "zip":
archive_fname = prefix + "-" + build_time + ".zip"
elif archive_format == "tarball":
archive_fname = prefix + "-" + build_time + ".tar.gz"
else:
archive_fname = output
archive_path = os.path.join(cwd, archive_fname)
# Files that should be excluded from the zip
if exclude is None:
exclude = list()
if exclude_glob is None:
exclude_glob = list()
# Exclude the zip itself
exclude.append(archive_path)
# Make sure that 'concurrent' is always forbidden.
# https://github.com/Miserlou/Zappa/issues/827
if not "concurrent" in exclude:
exclude.append("concurrent")
def splitpath(path):
parts = []
(path, tail) = os.path.split(path)
while path and tail:
parts.append(tail)
(path, tail) = os.path.split(path)
parts.append(os.path.join(path, tail))
return list(map(os.path.normpath, parts))[::-1]
split_venv = splitpath(venv)
split_cwd = splitpath(cwd)
# Ideally this should be avoided automatically,
# but this serves as an okay stop-gap measure.
if split_venv[-1] == split_cwd[-1]: # pragma: no cover
print(
"Warning! Your project and virtualenv have the same name! You may want "
"to re-create your venv with a new name, or explicitly define a "
"'project_name', as this may cause errors."
)
# First, do the project..
temp_project_path = tempfile.mkdtemp(prefix="zappa-project")
if not slim_handler:
# Slim handler does not take the project files.
if minify:
# Related: https://github.com/Miserlou/Zappa/issues/744
excludes = ZIP_EXCLUDES + exclude + [split_venv[-1]]
copytree(
cwd,
temp_project_path,
metadata=False,
symlinks=False,
ignore=shutil.ignore_patterns(*excludes),
)
else:
copytree(cwd, temp_project_path, metadata=False, symlinks=False)
for glob_path in exclude_glob:
for path in glob.glob(os.path.join(temp_project_path, glob_path)):
try:
os.remove(path)
except OSError: # is a directory
shutil.rmtree(path)
# If a handler_file is supplied, copy that to the root of the package,
# because that's where AWS Lambda looks for it. It can't be inside a package.
if handler_file:
filename = handler_file.split(os.sep)[-1]
shutil.copy(handler_file, os.path.join(temp_project_path, filename))
# Create and populate package ID file and write to temp project path
package_info = {}
package_info["uuid"] = str(uuid.uuid4())
package_info["build_time"] = build_time
package_info["build_platform"] = os.sys.platform
package_info["build_user"] = getpass.getuser()
# TODO: Add git head and info?
# Ex, from @scoates:
# def _get_git_branch():
# chdir(DIR)
# out = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
# lambci_branch = environ.get('LAMBCI_BRANCH', None)
# if out == "HEAD" and lambci_branch:
# out += " lambci:{}".format(lambci_branch)
# return out
# def _get_git_hash():
# chdir(DIR)
# return check_output(['git', 'rev-parse', 'HEAD']).strip()
# def _get_uname():
# return check_output(['uname', '-a']).strip()
# def _get_user():
# return check_output(['whoami']).strip()
# def set_id_info(zappa_cli):
# build_info = {
# 'branch': _get_git_branch(),
# 'hash': _get_git_hash(),
# 'build_uname': _get_uname(),
# 'build_user': _get_user(),
# 'build_time': datetime.datetime.utcnow().isoformat(),
# }
# with open(path.join(DIR, 'id_info.json'), 'w') as f:
# json.dump(build_info, f)
# return True
package_id_file = open(
os.path.join(temp_project_path, "package_info.json"), "w"
)
dumped = json.dumps(package_info, indent=4)
try:
package_id_file.write(dumped)
except TypeError: # This is a Python 2/3 issue. TODO: Make pretty!
package_id_file.write(str(dumped))
package_id_file.close()
# Then, do site site-packages..
egg_links = []
temp_package_path = tempfile.mkdtemp(prefix="zappa-packages")
if os.sys.platform == "win32":
site_packages = os.path.join(venv, "Lib", "site-packages")
else:
site_packages = os.path.join(
venv, "lib", get_venv_from_python_version(), "site-packages"
)
egg_links.extend(glob.glob(os.path.join(site_packages, "*.egg-link")))
if minify:
excludes = ZIP_EXCLUDES + exclude
copytree(
site_packages,
temp_package_path,
metadata=False,
symlinks=False,
ignore=shutil.ignore_patterns(*excludes),
)
else:
copytree(site_packages, temp_package_path, metadata=False, symlinks=False)
# We may have 64-bin specific packages too.
site_packages_64 = os.path.join(
venv, "lib64", get_venv_from_python_version(), "site-packages"
)
if os.path.exists(site_packages_64):
egg_links.extend(glob.glob(os.path.join(site_packages_64, "*.egg-link")))
if minify:
excludes = ZIP_EXCLUDES + exclude
copytree(
site_packages_64,
temp_package_path,
metadata=False,
symlinks=False,
ignore=shutil.ignore_patterns(*excludes),
)
else:
copytree(
site_packages_64, temp_package_path, metadata=False, symlinks=False
)
if egg_links:
self.copy_editable_packages(egg_links, temp_package_path)
copy_tree(temp_package_path, temp_project_path, update=True)
# Then the pre-compiled packages..
if use_precompiled_packages:
print("Downloading and installing dependencies..")
installed_packages = self.get_installed_packages(
site_packages, site_packages_64
)
try:
for (
installed_package_name,
installed_package_version,
) in installed_packages.items():
cached_wheel_path = self.get_cached_manylinux_wheel(
installed_package_name,
installed_package_version,
disable_progress,
)
if cached_wheel_path:
# Otherwise try to use manylinux packages from PyPi..
# Related: https://github.com/Miserlou/Zappa/issues/398
shutil.rmtree(
os.path.join(temp_project_path, installed_package_name),
ignore_errors=True,
)
with zipfile.ZipFile(cached_wheel_path) as zfile:
zfile.extractall(temp_project_path)
except Exception as e:
print(e)
# XXX - What should we do here?
# Cleanup
for glob_path in exclude_glob:
for path in glob.glob(os.path.join(temp_project_path, glob_path)):
try:
os.remove(path)
except OSError: # is a directory
shutil.rmtree(path)
# Then archive it all up..
if archive_format == "zip":
print("Packaging project as zip.")
try:
compression_method = zipfile.ZIP_DEFLATED
except ImportError: # pragma: no cover
compression_method = zipfile.ZIP_STORED
archivef = zipfile.ZipFile(archive_path, "w", compression_method)
elif archive_format == "tarball":
print("Packaging project as gzipped tarball.")
archivef = tarfile.open(archive_path, "w|gz")
for root, dirs, files in os.walk(temp_project_path):
for filename in files:
# Skip .pyc files for Django migrations
# https://github.com/Miserlou/Zappa/issues/436
# https://github.com/Miserlou/Zappa/issues/464
if filename[-4:] == ".pyc" and root[-10:] == "migrations":
continue
# If there is a .pyc file in this package,
# we can skip the python source code as we'll just
# use the compiled bytecode anyway..
if filename[-3:] == ".py" and root[-10:] != "migrations":
abs_filname = os.path.join(root, filename)
abs_pyc_filename = abs_filname + "c"
if os.path.isfile(abs_pyc_filename):
# but only if the pyc is older than the py,
# otherwise we'll deploy outdated code!
py_time = os.stat(abs_filname).st_mtime
pyc_time = os.stat(abs_pyc_filename).st_mtime
if pyc_time > py_time:
continue
# Make sure that the files are all correctly chmodded
# Related: https://github.com/Miserlou/Zappa/issues/484
# Related: https://github.com/Miserlou/Zappa/issues/682
os.chmod(os.path.join(root, filename), 0o755)
if archive_format == "zip":
# Actually put the file into the proper place in the zip
# Related: https://github.com/Miserlou/Zappa/pull/716
zipi = zipfile.ZipInfo(
os.path.join(
root.replace(temp_project_path, "").lstrip(os.sep), filename
)
)
zipi.create_system = 3
zipi.external_attr = 0o755 << int(16) # Is this P2/P3 functional?
with open(os.path.join(root, filename), "rb") as f:
archivef.writestr(zipi, f.read(), compression_method)
elif archive_format == "tarball":
tarinfo = tarfile.TarInfo(
os.path.join(
root.replace(temp_project_path, "").lstrip(os.sep), filename
)
)
tarinfo.mode = 0o755
stat = os.stat(os.path.join(root, filename))
tarinfo.mtime = stat.st_mtime
tarinfo.size = stat.st_size
with open(os.path.join(root, filename), "rb") as f:
archivef.addfile(tarinfo, f)
# Create python init file if it does not exist
# Only do that if there are sub folders or python files and does not conflict with a neighbouring module
# Related: https://github.com/Miserlou/Zappa/issues/766
if not contains_python_files_or_subdirs(root):
# if the directory does not contain any .py file at any level, we can skip the rest
dirs[:] = [d for d in dirs if d != root]
else:
if (
"__init__.py" not in files
and not conflicts_with_a_neighbouring_module(root)
):
tmp_init = os.path.join(temp_project_path, "__init__.py")
open(tmp_init, "a").close()
os.chmod(tmp_init, 0o755)
arcname = os.path.join(
root.replace(temp_project_path, ""),
os.path.join(
root.replace(temp_project_path, ""), "__init__.py"
),
)
if archive_format == "zip":
archivef.write(tmp_init, arcname)
elif archive_format == "tarball":
archivef.add(tmp_init, arcname)
# And, we're done!
archivef.close()
# Trash the temp directory
shutil.rmtree(temp_project_path)
shutil.rmtree(temp_package_path)
if os.path.isdir(venv) and slim_handler:
# Remove the temporary handler venv folder
shutil.rmtree(venv)
return archive_fname
@staticmethod
def get_installed_packages(site_packages, site_packages_64):
"""
Returns a dict of installed packages that Zappa cares about.
"""
import pkg_resources
package_to_keep = []
if os.path.isdir(site_packages):
package_to_keep += os.listdir(site_packages)
if os.path.isdir(site_packages_64):
package_to_keep += os.listdir(site_packages_64)
package_to_keep = [x.lower() for x in package_to_keep]
installed_packages = {
package.project_name.lower(): package.version
for package in pkg_resources.WorkingSet()
if package.project_name.lower() in package_to_keep
or package.location.lower()
in [site_packages.lower(), site_packages_64.lower()]
}
return installed_packages
@staticmethod
def download_url_with_progress(url, stream, disable_progress):
"""
Downloads a given url in chunks and writes to the provided stream (can be any io stream).
Displays the progress bar for the download.
"""
resp = requests.get(
url, timeout=float(os.environ.get("PIP_TIMEOUT", 2)), stream=True
)
resp.raw.decode_content = True
progress = tqdm(
unit="B",
unit_scale=True,
total=int(resp.headers.get("Content-Length", 0)),
disable=disable_progress,
)
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
progress.update(len(chunk))
stream.write(chunk)
progress.close()
def get_cached_manylinux_wheel(
self, package_name, package_version, disable_progress=False
):
"""
Gets the locally stored version of a manylinux wheel. If one does not exist, the function downloads it.
"""
cached_wheels_dir = os.path.join(tempfile.gettempdir(), "cached_wheels")
if not os.path.isdir(cached_wheels_dir):
os.makedirs(cached_wheels_dir)
else:
# Check if we already have a cached copy
wheel_name = re.sub("[^\w\d.]+", "_", package_name, re.UNICODE)
wheel_file = f"{wheel_name}-{package_version}-*_x86_64.whl"
wheel_path = os.path.join(cached_wheels_dir, wheel_file)
for pathname in glob.iglob(wheel_path):
if re.match(self.manylinux_wheel_file_match, pathname) or re.match(
self.manylinux_wheel_abi3_file_match, pathname
):
print(
f" - {package_name}=={package_version}: Using locally cached manylinux wheel"
)
return pathname
# The file is not cached, download it.
wheel_url, filename = self.get_manylinux_wheel_url(
package_name, package_version
)
if not wheel_url:
return None
wheel_path = os.path.join(cached_wheels_dir, filename)
print(f" - {package_name}=={package_version}: Downloading")
with open(wheel_path, "wb") as f:
self.download_url_with_progress(wheel_url, f, disable_progress)
if not zipfile.is_zipfile(wheel_path):
return None
return wheel_path
def get_manylinux_wheel_url(self, package_name, package_version):
"""
For a given package name, returns a link to the download URL,
else returns None.
Related: https://github.com/Miserlou/Zappa/issues/398
Examples here: https://gist.github.com/perrygeo/9545f94eaddec18a65fd7b56880adbae
This function downloads metadata JSON of `package_name` from Pypi
and examines if the package has a manylinux wheel. This function
also caches the JSON file so that we don't have to poll Pypi
every time.
"""
cached_pypi_info_dir = os.path.join(tempfile.gettempdir(), "cached_pypi_info")
if not os.path.isdir(cached_pypi_info_dir):
os.makedirs(cached_pypi_info_dir)
# Even though the metadata is for the package, we save it in a
# filename that includes the package's version. This helps in
# invalidating the cached file if the user moves to a different
# version of the package.
# Related: https://github.com/Miserlou/Zappa/issues/899
json_file = "{0!s}-{1!s}.json".format(package_name, package_version)
json_file_path = os.path.join(cached_pypi_info_dir, json_file)
if os.path.exists(json_file_path):
with open(json_file_path, "rb") as metafile:
data = json.load(metafile)
else:
url = "https://pypi.python.org/pypi/{}/json".format(package_name)
try:
res = requests.get(
url, timeout=float(os.environ.get("PIP_TIMEOUT", 1.5))
)
data = res.json()
except Exception as e: # pragma: no cover
return None, None
with open(json_file_path, "wb") as metafile:
jsondata = json.dumps(data)
metafile.write(bytes(jsondata, "utf-8"))
if package_version not in data["releases"]:
return None, None
for f in data["releases"][package_version]:
if re.match(self.manylinux_wheel_file_match, f["filename"]):
return f["url"], f["filename"]
elif re.match(self.manylinux_wheel_abi3_file_match, f["filename"]):
return f["url"], f["filename"]
return None, None
##
# S3
##
def upload_to_s3(self, source_path, bucket_name, disable_progress=False):
r"""
Given a file, upload it to S3.
Credentials should be stored in environment variables or ~/.aws/credentials (%USERPROFILE%\.aws\credentials on Windows).
Returns True on success, false on failure.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError:
# This is really stupid S3 quirk. Technically, us-east-1 one has no S3,
# it's actually "US Standard", or something.
# More here: https://github.com/boto/boto3/issues/125
if self.aws_region == "us-east-1":
self.s3_client.create_bucket(
Bucket=bucket_name,
)
else:
self.s3_client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={"LocationConstraint": self.aws_region},
)
if self.tags:
tags = {
"TagSet": [
{"Key": key, "Value": self.tags[key]}
for key in self.tags.keys()
]
}
self.s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging=tags)
if not os.path.isfile(source_path) or os.stat(source_path).st_size == 0:
print("Problem with source file {}".format(source_path))
return False
dest_path = os.path.split(source_path)[1]
try:
source_size = os.stat(source_path).st_size
print("Uploading {0} ({1})..".format(dest_path, human_size(source_size)))
progress = tqdm(
total=float(os.path.getsize(source_path)),
unit_scale=True,
unit="B",
disable=disable_progress,
)
# Attempt to upload to S3 using the S3 meta client with the progress bar.
# If we're unable to do that, try one more time using a session client,
# which cannot use the progress bar.
# Related: https://github.com/boto/boto3/issues/611
try:
self.s3_client.upload_file(
source_path, bucket_name, dest_path, Callback=progress.update
)
except Exception as e: # pragma: no cover
self.s3_client.upload_file(source_path, bucket_name, dest_path)
progress.close()
except (KeyboardInterrupt, SystemExit): # pragma: no cover
raise
except Exception as e: # pragma: no cover
print(e)
return False
return True
def copy_on_s3(self, src_file_name, dst_file_name, bucket_name):
"""
Copies src file to destination within a bucket.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response["Error"]["Code"])
if error_code == 404:
return False
copy_src = {"Bucket": bucket_name, "Key": src_file_name}
try:
self.s3_client.copy(
CopySource=copy_src, Bucket=bucket_name, Key=dst_file_name
)
return True
except botocore.exceptions.ClientError: # pragma: no cover
return False
def remove_from_s3(self, file_name, bucket_name):
"""
Given a file name and a bucket, remove it from S3.
There's no reason to keep the file hosted on S3 once its been made into a Lambda function, so we can delete it from S3.
Returns True on success, False on failure.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response["Error"]["Code"])
if error_code == 404:
return False
try:
self.s3_client.delete_object(Bucket=bucket_name, Key=file_name)
return True
except (
botocore.exceptions.ParamValidationError,
botocore.exceptions.ClientError,
): # pragma: no cover
return False
##
# Lambda
##
def create_lambda_function(
self,
bucket=None,
function_name=None,
handler=None,
s3_key=None,
description="Zappa Deployment",
timeout=30,
memory_size=512,
publish=True,
vpc_config=None,
dead_letter_config=None,
runtime="python3.6",
aws_environment_variables=None,
aws_kms_key_arn=None,
xray_tracing=False,
local_zip=None,
use_alb=False,
layers=None,
concurrency=None,
docker_image_uri=None,
):
"""
Given a bucket and key (or a local path) of a valid Lambda-zip, a function name and a handler, register that Lambda function.
"""
if not vpc_config:
vpc_config = {}
if not dead_letter_config:
dead_letter_config = {}
if not self.credentials_arn:
self.get_credentials_arn()
if not aws_environment_variables:
aws_environment_variables = {}
if not aws_kms_key_arn:
aws_kms_key_arn = ""
if not layers:
layers = []
kwargs = dict(
FunctionName=function_name,
Role=self.credentials_arn,
Description=description,
Timeout=timeout,
MemorySize=memory_size,
Publish=publish,
VpcConfig=vpc_config,
DeadLetterConfig=dead_letter_config,
Environment={"Variables": aws_environment_variables},
KMSKeyArn=aws_kms_key_arn,
TracingConfig={"Mode": "Active" if self.xray_tracing else "PassThrough"},
Layers=layers,
)
if not docker_image_uri:
kwargs["Runtime"] = runtime
kwargs["Handler"] = handler
kwargs["PackageType"] = "Zip"
if docker_image_uri:
kwargs["Code"] = {"ImageUri": docker_image_uri}
# default is ZIP. override to Image for container support
kwargs["PackageType"] = "Image"
# The create function operation times out when this is '' (the default)
# So just remove it from the kwargs if it is not specified
if aws_kms_key_arn == "":
kwargs.pop("KMSKeyArn")
elif local_zip:
kwargs["Code"] = {"ZipFile": local_zip}
else:
kwargs["Code"] = {"S3Bucket": bucket, "S3Key": s3_key}
response = self.lambda_client.create_function(**kwargs)
resource_arn = response["FunctionArn"]
version = response["Version"]
# If we're using an ALB, let's create an alias mapped to the newly
# created function. This allows clean, no downtime association when
# using application load balancers as an event source.
# See: https://github.com/Miserlou/Zappa/pull/1730
# https://github.com/Miserlou/Zappa/issues/1823
if use_alb:
self.lambda_client.create_alias(
FunctionName=resource_arn,
FunctionVersion=version,
Name=ALB_LAMBDA_ALIAS,
)
if self.tags:
self.lambda_client.tag_resource(Resource=resource_arn, Tags=self.tags)
if concurrency is not None:
self.lambda_client.put_function_concurrency(
FunctionName=resource_arn,
ReservedConcurrentExecutions=concurrency,
)
return resource_arn
def update_lambda_function(
self,
bucket,
function_name,
s3_key=None,
publish=True,
local_zip=None,
num_revisions=None,
concurrency=None,
docker_image_uri=None,
):
"""
Given a bucket and key (or a local path) of a valid Lambda-zip, a function name and a handler, update that Lambda function's code.
Optionally, delete previous versions if they exceed the optional limit.
"""
print("Updating Lambda function code..")
kwargs = dict(FunctionName=function_name, Publish=publish)
if docker_image_uri:
kwargs["ImageUri"] = docker_image_uri
elif local_zip:
kwargs["ZipFile"] = local_zip
else:
kwargs["S3Bucket"] = bucket
kwargs["S3Key"] = s3_key
response = self.lambda_client.update_function_code(**kwargs)
resource_arn = response["FunctionArn"]
version = response["Version"]
# If the lambda has an ALB alias, let's update the alias
# to point to the newest version of the function. We have to use a GET
# here, as there's no HEAD-esque call to retrieve metadata about a
# function alias.
# Related: https://github.com/Miserlou/Zappa/pull/1730
# https://github.com/Miserlou/Zappa/issues/1823
try:
response = self.lambda_client.get_alias(
FunctionName=function_name,
Name=ALB_LAMBDA_ALIAS,
)
alias_exists = True
except botocore.exceptions.ClientError as e: # pragma: no cover
if "ResourceNotFoundException" not in e.response["Error"]["Code"]:
raise e
alias_exists = False
if alias_exists:
self.lambda_client.update_alias(
FunctionName=function_name,
FunctionVersion=version,
Name=ALB_LAMBDA_ALIAS,
)
if concurrency is not None:
self.lambda_client.put_function_concurrency(
FunctionName=function_name,
ReservedConcurrentExecutions=concurrency,
)
else:
self.lambda_client.delete_function_concurrency(FunctionName=function_name)
if num_revisions:
# Find the existing revision IDs for the given function
# Related: https://github.com/Miserlou/Zappa/issues/1402
versions_in_lambda = []
versions = self.lambda_client.list_versions_by_function(
FunctionName=function_name
)
for version in versions["Versions"]:
versions_in_lambda.append(version["Version"])
while "NextMarker" in versions:
versions = self.lambda_client.list_versions_by_function(
FunctionName=function_name, Marker=versions["NextMarker"]
)
for version in versions["Versions"]:
versions_in_lambda.append(version["Version"])
versions_in_lambda.remove("$LATEST")
# Delete older revisions if their number exceeds the specified limit
for version in versions_in_lambda[::-1][num_revisions:]:
self.lambda_client.delete_function(
FunctionName=function_name, Qualifier=version
)
return resource_arn
def update_lambda_configuration(
self,
lambda_arn,
function_name,
handler,
description="Zappa Deployment",
timeout=30,
memory_size=512,
publish=True,
vpc_config=None,
runtime="python3.6",
aws_environment_variables=None,
aws_kms_key_arn=None,
layers=None,
):
"""
Given an existing function ARN, update the configuration variables.
"""
print("Updating Lambda function configuration..")
if not vpc_config:
vpc_config = {}
if not self.credentials_arn:
self.get_credentials_arn()
if not aws_kms_key_arn:
aws_kms_key_arn = ""
if not aws_environment_variables:
aws_environment_variables = {}
if not layers:
layers = []
# Check if there are any remote aws lambda env vars so they don't get trashed.
# https://github.com/Miserlou/Zappa/issues/987, Related: https://github.com/Miserlou/Zappa/issues/765
lambda_aws_config = self.lambda_client.get_function_configuration(
FunctionName=function_name
)
if "Environment" in lambda_aws_config:
lambda_aws_environment_variables = lambda_aws_config["Environment"].get(
"Variables", {}
)
# Append keys that are remote but not in settings file
for key, value in lambda_aws_environment_variables.items():
if key not in aws_environment_variables:
aws_environment_variables[key] = value
kwargs = {
"FunctionName": function_name,
"Role": self.credentials_arn,
"Description": description,
"Timeout": timeout,
"MemorySize": memory_size,
"VpcConfig": vpc_config,
"Environment": {"Variables": aws_environment_variables},
"KMSKeyArn": aws_kms_key_arn,
"TracingConfig": {"Mode": "Active" if self.xray_tracing else "PassThrough"},
}
if lambda_aws_config["PackageType"] != "Image":
kwargs.update(
{
"Handler": handler,
"Runtime": runtime,
"Layers": layers,
}
)
response = self.lambda_client.update_function_configuration(**kwargs)
resource_arn = response["FunctionArn"]
if self.tags:
self.lambda_client.tag_resource(Resource=resource_arn, Tags=self.tags)
return resource_arn
def invoke_lambda_function(
self,
function_name,
payload,
invocation_type="Event",
log_type="Tail",
client_context=None,
qualifier=None,
):
"""
Directly invoke a named Lambda function with a payload.
Returns the response.
"""
return self.lambda_client.invoke(
FunctionName=function_name,
InvocationType=invocation_type,
LogType=log_type,
Payload=payload,
)
def rollback_lambda_function_version(
self, function_name, versions_back=1, publish=True
):
"""
Rollback the lambda function code 'versions_back' number of revisions.
Returns the Function ARN.
"""
response = self.lambda_client.list_versions_by_function(
FunctionName=function_name
)
# https://github.com/Miserlou/Zappa/pull/2192
if (
len(response.get("Versions", [])) > 1
and response["Versions"][-1]["PackageType"] == "Image"
):
raise NotImplementedError(
"Zappa's rollback functionality is not available for Docker based deployments"
)
# Take into account $LATEST
if len(response["Versions"]) < versions_back + 1:
print("We do not have {} revisions. Aborting".format(str(versions_back)))
return False
revisions = [
int(revision["Version"])
for revision in response["Versions"]
if revision["Version"] != "$LATEST"
]
revisions.sort(reverse=True)
response = self.lambda_client.get_function(
FunctionName="function:{}:{}".format(
function_name, revisions[versions_back]
)
)
response = requests.get(response["Code"]["Location"])
if response.status_code != 200:
print(
"Failed to get version {} of {} code".format(
versions_back, function_name
)
)
return False
response = self.lambda_client.update_function_code(
FunctionName=function_name, ZipFile=response.content, Publish=publish
) # pragma: no cover
return response["FunctionArn"]
def is_lambda_function_ready(self, function_name):
"""
Checks if a lambda function is active and no updates are in progress.
"""
response = self.lambda_client.get_function(FunctionName=function_name)
return (
response["Configuration"]["State"] == "Active"
and response["Configuration"]["LastUpdateStatus"] != "InProgress"
)
def wait_until_lambda_function_is_ready(self, function_name):
"""
Continuously check if a lambda function is active.
For functions deployed with a docker image instead of a
ZIP package, the function can take a few seconds longer
to be created or update, so we must wait before running any status
checks against the function.
"""
show_waiting_message = True
while True:
if self.is_lambda_function_ready(function_name):
break
if show_waiting_message:
print("Waiting until lambda function is ready.")
show_waiting_message = False
time.sleep(1)
def get_lambda_function(self, function_name):
"""
Returns the lambda function ARN, given a name
This requires the "lambda:GetFunction" role.
"""
response = self.lambda_client.get_function(FunctionName=function_name)
return response["Configuration"]["FunctionArn"]
def get_lambda_function_versions(self, function_name):
"""
Simply returns the versions available for a Lambda function, given a function name.
"""
try:
response = self.lambda_client.list_versions_by_function(
FunctionName=function_name
)
return response.get("Versions", [])
except Exception:
return []
def delete_lambda_function(self, function_name):
"""
Given a function name, delete it from AWS Lambda.
Returns the response.
"""
print("Deleting Lambda function..")
return self.lambda_client.delete_function(
FunctionName=function_name,
)
##
# Application load balancer
##
def deploy_lambda_alb(self, lambda_arn, lambda_name, alb_vpc_config, timeout):
"""
The `zappa deploy` functionality for ALB infrastructure.
"""
if not alb_vpc_config:
raise EnvironmentError(
"When creating an ALB, alb_vpc_config must be filled out in zappa_settings."
)
if "SubnetIds" not in alb_vpc_config:
raise EnvironmentError(
"When creating an ALB, you must supply two subnets in different availability zones."
)
if "SecurityGroupIds" not in alb_vpc_config:
alb_vpc_config["SecurityGroupIds"] = []
if not alb_vpc_config.get("CertificateArn"):
raise EnvironmentError(
"When creating an ALB, you must supply a CertificateArn for the HTTPS listener."
)
# Related: https://github.com/Miserlou/Zappa/issues/1856
if "Scheme" not in alb_vpc_config:
alb_vpc_config["Scheme"] = "internet-facing"
print("Deploying ALB infrastructure...")
# Create load balancer
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_load_balancer
kwargs = dict(
Name=lambda_name,
Subnets=alb_vpc_config["SubnetIds"],
SecurityGroups=alb_vpc_config["SecurityGroupIds"],
Scheme=alb_vpc_config["Scheme"],
# TODO: Tags might be a useful means of stock-keeping zappa-generated assets.
# Tags=[],
Type="application",
# TODO: can be ipv4 or dualstack (for ipv4 and ipv6) ipv4 is required for internal Scheme.
IpAddressType="ipv4",
)
response = self.elbv2_client.create_load_balancer(**kwargs)
if not (response["LoadBalancers"]) or len(response["LoadBalancers"]) != 1:
raise EnvironmentError(
"Failure to create application load balancer. Response was in unexpected format. Response was: {}".format(
repr(response)
)
)
if response["LoadBalancers"][0]["State"]["Code"] == "failed":
raise EnvironmentError(
"Failure to create application load balancer. Response reported a failed state: {}".format(
response["LoadBalancers"][0]["State"]["Reason"]
)
)
load_balancer_arn = response["LoadBalancers"][0]["LoadBalancerArn"]
load_balancer_dns = response["LoadBalancers"][0]["DNSName"]
load_balancer_vpc = response["LoadBalancers"][0]["VpcId"]
waiter = self.elbv2_client.get_waiter("load_balancer_available")
print(
"Waiting for load balancer [{}] to become active..".format(
load_balancer_arn
)
)
waiter.wait(LoadBalancerArns=[load_balancer_arn], WaiterConfig={"Delay": 3})
# Match the lambda timeout on the load balancer.
self.elbv2_client.modify_load_balancer_attributes(
LoadBalancerArn=load_balancer_arn,
Attributes=[{"Key": "idle_timeout.timeout_seconds", "Value": str(timeout)}],
)
# Create/associate target group.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_target_group
kwargs = dict(
Name=lambda_name,
TargetType="lambda",
# TODO: Add options for health checks
)
response = self.elbv2_client.create_target_group(**kwargs)
if not (response["TargetGroups"]) or len(response["TargetGroups"]) != 1:
raise EnvironmentError(
"Failure to create application load balancer target group. Response was in unexpected format. Response was: {}".format(
repr(response)
)
)
target_group_arn = response["TargetGroups"][0]["TargetGroupArn"]
# Enable multi-value headers by default.
response = self.elbv2_client.modify_target_group_attributes(
TargetGroupArn=target_group_arn,
Attributes=[
{"Key": "lambda.multi_value_headers.enabled", "Value": "true"},
],
)
# Allow execute permissions from target group to lambda.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.add_permission
kwargs = dict(
Action="lambda:InvokeFunction",
FunctionName="{}:{}".format(lambda_arn, ALB_LAMBDA_ALIAS),
Principal="elasticloadbalancing.amazonaws.com",
SourceArn=target_group_arn,
StatementId=lambda_name,
)
response = self.lambda_client.add_permission(**kwargs)
# Register target group to lambda association.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.register_targets
kwargs = dict(
TargetGroupArn=target_group_arn,
Targets=[{"Id": "{}:{}".format(lambda_arn, ALB_LAMBDA_ALIAS)}],
)
response = self.elbv2_client.register_targets(**kwargs)
# Bind listener to load balancer with default rule to target group.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_listener
kwargs = dict(
# TODO: Listeners support custom ssl certificates (Certificates). For now we leave this default.
Certificates=[{"CertificateArn": alb_vpc_config["CertificateArn"]}],
DefaultActions=[
{
"Type": "forward",
"TargetGroupArn": target_group_arn,
}
],
LoadBalancerArn=load_balancer_arn,
Protocol="HTTPS",
# TODO: Add option for custom ports
Port=443,
# TODO: Listeners support custom ssl security policy (SslPolicy). For now we leave this default.
)
response = self.elbv2_client.create_listener(**kwargs)
print("ALB created with DNS: {}".format(load_balancer_dns))
print("Note it may take several minutes for load balancer to become available.")
def undeploy_lambda_alb(self, lambda_name):
"""
The `zappa undeploy` functionality for ALB infrastructure.
"""
print("Undeploying ALB infrastructure...")
# Locate and delete alb/lambda permissions
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.remove_permission
self.lambda_client.remove_permission(
FunctionName=lambda_name, StatementId=lambda_name
)
except botocore.exceptions.ClientError as e: # pragma: no cover
if "ResourceNotFoundException" in e.response["Error"]["Code"]:
pass
else:
raise e
# Locate and delete load balancer
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_load_balancers
response = self.elbv2_client.describe_load_balancers(Names=[lambda_name])
if not (response["LoadBalancers"]) or len(response["LoadBalancers"]) > 1:
raise EnvironmentError(
"Failure to locate/delete ALB named [{}]. Response was: {}".format(
lambda_name, repr(response)
)
)
load_balancer_arn = response["LoadBalancers"][0]["LoadBalancerArn"]
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_listeners
response = self.elbv2_client.describe_listeners(
LoadBalancerArn=load_balancer_arn
)
if not (response["Listeners"]):
print("No listeners found.")
elif len(response["Listeners"]) > 1:
raise EnvironmentError(
"Failure to locate/delete listener for ALB named [{}]. Response was: {}".format(
lambda_name, repr(response)
)
)
else:
listener_arn = response["Listeners"][0]["ListenerArn"]
# Remove the listener. This explicit deletion of the listener seems necessary to avoid ResourceInUseExceptions when deleting target groups.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_listener
response = self.elbv2_client.delete_listener(ListenerArn=listener_arn)
# Remove the load balancer and wait for completion
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_load_balancer
response = self.elbv2_client.delete_load_balancer(
LoadBalancerArn=load_balancer_arn
)
waiter = self.elbv2_client.get_waiter("load_balancers_deleted")
print("Waiting for load balancer [{}] to be deleted..".format(lambda_name))
waiter.wait(LoadBalancerArns=[load_balancer_arn], WaiterConfig={"Delay": 3})
except botocore.exceptions.ClientError as e: # pragma: no cover
print(e.response["Error"]["Code"])
if "LoadBalancerNotFound" in e.response["Error"]["Code"]:
pass
else:
raise e
# Locate and delete target group
try:
# Locate the lambda ARN
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.get_function
response = self.lambda_client.get_function(FunctionName=lambda_name)
lambda_arn = response["Configuration"]["FunctionArn"]
# Locate the target group ARN
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_target_groups
response = self.elbv2_client.describe_target_groups(Names=[lambda_name])
if not (response["TargetGroups"]) or len(response["TargetGroups"]) > 1:
raise EnvironmentError(
"Failure to locate/delete ALB target group named [{}]. Response was: {}".format(
lambda_name, repr(response)
)
)
target_group_arn = response["TargetGroups"][0]["TargetGroupArn"]
# Deregister targets and wait for completion
self.elbv2_client.deregister_targets(
TargetGroupArn=target_group_arn, Targets=[{"Id": lambda_arn}]
)
waiter = self.elbv2_client.get_waiter("target_deregistered")
print("Waiting for target [{}] to be deregistered...".format(lambda_name))
waiter.wait(
TargetGroupArn=target_group_arn,
Targets=[{"Id": lambda_arn}],
WaiterConfig={"Delay": 3},
)
# Remove the target group
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_target_group
self.elbv2_client.delete_target_group(TargetGroupArn=target_group_arn)
except botocore.exceptions.ClientError as e: # pragma: no cover
print(e.response["Error"]["Code"])
if "TargetGroupNotFound" in e.response["Error"]["Code"]:
pass
else:
raise e
##
# API Gateway
##
def create_api_gateway_routes(
self,
lambda_arn,
api_name=None,
api_key_required=False,
authorization_type="NONE",
authorizer=None,
cors_options=None,
description=None,
endpoint_configuration=None,
):
"""
Create the API Gateway for this Zappa deployment.
Returns the new RestAPI CF resource.
"""
restapi = troposphere.apigateway.RestApi("Api")
restapi.Name = api_name or lambda_arn.split(":")[-1]
if not description:
description = "Created automatically by Zappa."
restapi.Description = description
endpoint_configuration = (
[] if endpoint_configuration is None else endpoint_configuration
)
if self.boto_session.region_name == "us-gov-west-1":
endpoint_configuration.append("REGIONAL")
if endpoint_configuration:
endpoint = troposphere.apigateway.EndpointConfiguration()
endpoint.Types = list(set(endpoint_configuration))
restapi.EndpointConfiguration = endpoint
if self.apigateway_policy:
restapi.Policy = json.loads(self.apigateway_policy)
self.cf_template.add_resource(restapi)
root_id = troposphere.GetAtt(restapi, "RootResourceId")
invocation_prefix = (
"aws" if self.boto_session.region_name != "us-gov-west-1" else "aws-us-gov"
)
invocations_uri = (
"arn:"
+ invocation_prefix
+ ":apigateway:"
+ self.boto_session.region_name
+ ":lambda:path/2015-03-31/functions/"
+ lambda_arn
+ "/invocations"
)
##
# The Resources
##
authorizer_resource = None
if authorizer:
authorizer_lambda_arn = authorizer.get("arn", lambda_arn)
lambda_uri = "arn:{invocation_prefix}:apigateway:{region_name}:lambda:path/2015-03-31/functions/{lambda_arn}/invocations".format(
invocation_prefix=invocation_prefix,
region_name=self.boto_session.region_name,
lambda_arn=authorizer_lambda_arn,
)
authorizer_resource = self.create_authorizer(
restapi, lambda_uri, authorizer
)
self.create_and_setup_methods(
restapi,
root_id,
api_key_required,
invocations_uri,
authorization_type,
authorizer_resource,
0,
)
if cors_options:
self.create_and_setup_cors(
restapi, root_id, invocations_uri, 0, cors_options
)
resource = troposphere.apigateway.Resource("ResourceAnyPathSlashed")
self.cf_api_resources.append(resource.title)
resource.RestApiId = troposphere.Ref(restapi)
resource.ParentId = root_id
resource.PathPart = "{proxy+}"
self.cf_template.add_resource(resource)
self.create_and_setup_methods(
restapi,
resource,
api_key_required,
invocations_uri,
authorization_type,
authorizer_resource,
1,
) # pragma: no cover
if cors_options:
self.create_and_setup_cors(
restapi, resource, invocations_uri, 1, cors_options
) # pragma: no cover
return restapi
def create_authorizer(self, restapi, uri, authorizer):
"""
Create Authorizer for API gateway
"""
authorizer_type = authorizer.get("type", "TOKEN").upper()
identity_validation_expression = authorizer.get("validation_expression", None)
authorizer_resource = troposphere.apigateway.Authorizer("Authorizer")
authorizer_resource.RestApiId = troposphere.Ref(restapi)
authorizer_resource.Name = authorizer.get("name", "ZappaAuthorizer")
authorizer_resource.Type = authorizer_type
authorizer_resource.AuthorizerUri = uri
authorizer_resource.IdentitySource = (
"method.request.header.%s" % authorizer.get("token_header", "Authorization")
)
if identity_validation_expression:
authorizer_resource.IdentityValidationExpression = (
identity_validation_expression
)
if authorizer_type == "TOKEN":
if not self.credentials_arn:
self.get_credentials_arn()
authorizer_resource.AuthorizerResultTtlInSeconds = authorizer.get(
"result_ttl", 300
)
authorizer_resource.AuthorizerCredentials = self.credentials_arn
if authorizer_type == "COGNITO_USER_POOLS":
authorizer_resource.ProviderARNs = authorizer.get("provider_arns")
self.cf_api_resources.append(authorizer_resource.title)
self.cf_template.add_resource(authorizer_resource)
return authorizer_resource
def create_and_setup_methods(
self,
restapi,
resource,
api_key_required,
uri,
authorization_type,
authorizer_resource,
depth,
):
"""
Set up the methods, integration responses and method responses for a given API Gateway resource.
"""
for method_name in self.http_methods:
method = troposphere.apigateway.Method(method_name + str(depth))
method.RestApiId = troposphere.Ref(restapi)
if type(resource) is troposphere.apigateway.Resource:
method.ResourceId = troposphere.Ref(resource)
else:
method.ResourceId = resource
method.HttpMethod = method_name.upper()
method.AuthorizationType = authorization_type
if authorizer_resource:
method.AuthorizerId = troposphere.Ref(authorizer_resource)
method.ApiKeyRequired = api_key_required
method.MethodResponses = []
self.cf_template.add_resource(method)
self.cf_api_resources.append(method.title)
if not self.credentials_arn:
self.get_credentials_arn()
credentials = self.credentials_arn # This must be a Role ARN
integration = troposphere.apigateway.Integration()
integration.CacheKeyParameters = []
integration.CacheNamespace = "none"
integration.Credentials = credentials
integration.IntegrationHttpMethod = "POST"
integration.IntegrationResponses = []
integration.PassthroughBehavior = "NEVER"
integration.Type = "AWS_PROXY"
integration.Uri = uri
method.Integration = integration
def create_and_setup_cors(self, restapi, resource, uri, depth, config):
"""
Set up the methods, integration responses and method responses for a given API Gateway resource.
"""
if config is True:
config = {}
method_name = "OPTIONS"
method = troposphere.apigateway.Method(method_name + str(depth))
method.RestApiId = troposphere.Ref(restapi)
if type(resource) is troposphere.apigateway.Resource:
method.ResourceId = troposphere.Ref(resource)
else:
method.ResourceId = resource
method.HttpMethod = method_name.upper()
method.AuthorizationType = "NONE"
method_response = troposphere.apigateway.MethodResponse()
method_response.ResponseModels = {"application/json": "Empty"}
response_headers = {
"Access-Control-Allow-Headers": "'%s'"
% ",".join(
config.get(
"allowed_headers",
[
"Content-Type",
"X-Amz-Date",
"Authorization",
"X-Api-Key",
"X-Amz-Security-Token",
],
)
),
"Access-Control-Allow-Methods": "'%s'"
% ",".join(
config.get(
"allowed_methods",
["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"],
)
),
"Access-Control-Allow-Origin": "'%s'" % config.get("allowed_origin", "*"),
}
method_response.ResponseParameters = {
"method.response.header.%s" % key: True for key in response_headers
}
method_response.StatusCode = "200"
method.MethodResponses = [method_response]
self.cf_template.add_resource(method)
self.cf_api_resources.append(method.title)
integration = troposphere.apigateway.Integration()
integration.Type = "MOCK"
integration.PassthroughBehavior = "NEVER"
integration.RequestTemplates = {"application/json": '{"statusCode": 200}'}
integration_response = troposphere.apigateway.IntegrationResponse()
integration_response.ResponseParameters = {
"method.response.header.%s" % key: value
for key, value in response_headers.items()
}
integration_response.ResponseTemplates = {"application/json": ""}
integration_response.StatusCode = "200"
integration.IntegrationResponses = [integration_response]
integration.Uri = uri
method.Integration = integration
def deploy_api_gateway(
self,
api_id,
stage_name,
stage_description="",
description="",
cache_cluster_enabled=False,
cache_cluster_size="0.5",
variables=None,
cloudwatch_log_level="OFF",
cloudwatch_data_trace=False,
cloudwatch_metrics_enabled=False,
cache_cluster_ttl=300,
cache_cluster_encrypted=False,
):
"""
Deploy the API Gateway!
Return the deployed API URL.
"""
print("Deploying API Gateway..")
self.apigateway_client.create_deployment(
restApiId=api_id,
stageName=stage_name,
stageDescription=stage_description,
description=description,
cacheClusterEnabled=cache_cluster_enabled,
cacheClusterSize=cache_cluster_size,
variables=variables or {},
)
if cloudwatch_log_level not in self.cloudwatch_log_levels:
cloudwatch_log_level = "OFF"
self.apigateway_client.update_stage(
restApiId=api_id,
stageName=stage_name,
patchOperations=[
self.get_patch_op("logging/loglevel", cloudwatch_log_level),
self.get_patch_op("logging/dataTrace", cloudwatch_data_trace),
self.get_patch_op("metrics/enabled", cloudwatch_metrics_enabled),
self.get_patch_op("caching/ttlInSeconds", str(cache_cluster_ttl)),
self.get_patch_op("caching/dataEncrypted", cache_cluster_encrypted),
],
)
return "https://{}.execute-api.{}.amazonaws.com/{}".format(
api_id, self.boto_session.region_name, stage_name
)
def add_binary_support(self, api_id, cors=False):
"""
Add binary support
"""
response = self.apigateway_client.get_rest_api(restApiId=api_id)
if (
"binaryMediaTypes" not in response
or "*/*" not in response["binaryMediaTypes"]
):
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[{"op": "add", "path": "/binaryMediaTypes/*~1*"}],
)
if cors:
# fix for issue 699 and 1035, cors+binary support don't work together
# go through each resource and update the contentHandling type
response = self.apigateway_client.get_resources(restApiId=api_id)
resource_ids = [
item["id"]
for item in response["items"]
if "OPTIONS" in item.get("resourceMethods", {})
]
for resource_id in resource_ids:
self.apigateway_client.update_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod="OPTIONS",
patchOperations=[
{
"op": "replace",
"path": "/contentHandling",
"value": "CONVERT_TO_TEXT",
}
],
)
def remove_binary_support(self, api_id, cors=False):
"""
Remove binary support
"""
response = self.apigateway_client.get_rest_api(restApiId=api_id)
if "binaryMediaTypes" in response and "*/*" in response["binaryMediaTypes"]:
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[{"op": "remove", "path": "/binaryMediaTypes/*~1*"}],
)
if cors:
# go through each resource and change the contentHandling type
response = self.apigateway_client.get_resources(restApiId=api_id)
resource_ids = [
item["id"]
for item in response["items"]
if "OPTIONS" in item.get("resourceMethods", {})
]
for resource_id in resource_ids:
self.apigateway_client.update_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod="OPTIONS",
patchOperations=[
{"op": "replace", "path": "/contentHandling", "value": ""}
],
)
def add_api_compression(self, api_id, min_compression_size):
"""
Add Rest API compression
"""
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
"op": "replace",
"path": "/minimumCompressionSize",
"value": str(min_compression_size),
}
],
)
def remove_api_compression(self, api_id):
"""
Remove Rest API compression
"""
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
"op": "replace",
"path": "/minimumCompressionSize",
}
],
)
def get_api_keys(self, api_id, stage_name):
"""
Generator that allows to iterate per API keys associated to an api_id and a stage_name.
"""
response = self.apigateway_client.get_api_keys(limit=500)
stage_key = "{}/{}".format(api_id, stage_name)
for api_key in response.get("items"):
if stage_key in api_key.get("stageKeys"):
yield api_key.get("id")
def create_api_key(self, api_id, stage_name):
"""
Create new API key and link it with an api_id and a stage_name
"""
response = self.apigateway_client.create_api_key(
name="{}_{}".format(stage_name, api_id),
description="Api Key for {}".format(api_id),
enabled=True,
stageKeys=[
{
"restApiId": "{}".format(api_id),
"stageName": "{}".format(stage_name),
},
],
)
print("Created a new x-api-key: {}".format(response["id"]))
def remove_api_key(self, api_id, stage_name):
"""
Remove a generated API key for api_id and stage_name
"""
response = self.apigateway_client.get_api_keys(
limit=1, nameQuery="{}_{}".format(stage_name, api_id)
)
for api_key in response.get("items"):
self.apigateway_client.delete_api_key(apiKey="{}".format(api_key["id"]))
def add_api_stage_to_api_key(self, api_key, api_id, stage_name):
"""
Add api stage to Api key
"""
self.apigateway_client.update_api_key(
apiKey=api_key,
patchOperations=[
{
"op": "add",
"path": "/stages",
"value": "{}/{}".format(api_id, stage_name),
}
],
)
def get_patch_op(self, keypath, value, op="replace"):
"""
Return an object that describes a change of configuration on the given staging.
Setting will be applied on all available HTTP methods.
"""
if isinstance(value, bool):
value = str(value).lower()
return {"op": op, "path": "/*/*/{}".format(keypath), "value": value}
def get_rest_apis(self, project_name):
"""
Generator that allows to iterate per every available apis.
"""
all_apis = self.apigateway_client.get_rest_apis(limit=500)
for api in all_apis["items"]:
if api["name"] != project_name:
continue
yield api
def undeploy_api_gateway(self, lambda_name, domain_name=None, base_path=None):
"""
Delete a deployed REST API Gateway.
"""
print("Deleting API Gateway..")
api_id = self.get_api_id(lambda_name)
if domain_name:
# XXX - Remove Route53 smartly here?
# XXX - This doesn't raise, but doesn't work either.
try:
self.apigateway_client.delete_base_path_mapping(
domainName=domain_name,
basePath="(none)" if base_path is None else base_path,
)
except Exception as e:
# We may not have actually set up the domain.
pass
was_deleted = self.delete_stack(lambda_name, wait=True)
if not was_deleted:
# try erasing it with the older method
for api in self.get_rest_apis(lambda_name):
self.apigateway_client.delete_rest_api(restApiId=api["id"])
def update_stage_config(
self,
project_name,
stage_name,
cloudwatch_log_level,
cloudwatch_data_trace,
cloudwatch_metrics_enabled,
):
"""
Update CloudWatch metrics configuration.
"""
if cloudwatch_log_level not in self.cloudwatch_log_levels:
cloudwatch_log_level = "OFF"
for api in self.get_rest_apis(project_name):
self.apigateway_client.update_stage(
restApiId=api["id"],
stageName=stage_name,
patchOperations=[
self.get_patch_op("logging/loglevel", cloudwatch_log_level),
self.get_patch_op("logging/dataTrace", cloudwatch_data_trace),
self.get_patch_op("metrics/enabled", cloudwatch_metrics_enabled),
],
)
def update_cognito(self, lambda_name, user_pool, lambda_configs, lambda_arn):
LambdaConfig = {}
for config in lambda_configs:
LambdaConfig[config] = lambda_arn
description = self.cognito_client.describe_user_pool(UserPoolId=user_pool)
description_kwargs = {}
for key, value in description["UserPool"].items():
if key in (
"UserPoolId",
"Policies",
"AutoVerifiedAttributes",
"SmsVerificationMessage",
"EmailVerificationMessage",
"EmailVerificationSubject",
"VerificationMessageTemplate",
"SmsAuthenticationMessage",
"MfaConfiguration",
"DeviceConfiguration",
"EmailConfiguration",
"SmsConfiguration",
"UserPoolTags",
"AdminCreateUserConfig",
):
description_kwargs[key] = value
elif key == "LambdaConfig":
for lckey, lcvalue in value.items():
if lckey in LambdaConfig:
value[lckey] = LambdaConfig[lckey]
print("value", value)
description_kwargs[key] = value
if "LambdaConfig" not in description_kwargs:
description_kwargs["LambdaConfig"] = LambdaConfig
if (
"TemporaryPasswordValidityDays"
in description_kwargs["Policies"]["PasswordPolicy"]
):
description_kwargs["AdminCreateUserConfig"].pop(
"UnusedAccountValidityDays", None
)
if "UnusedAccountValidityDays" in description_kwargs["AdminCreateUserConfig"]:
description_kwargs["Policies"]["PasswordPolicy"][
"TemporaryPasswordValidityDays"
] = description_kwargs["AdminCreateUserConfig"].pop(
"UnusedAccountValidityDays", None
)
result = self.cognito_client.update_user_pool(
UserPoolId=user_pool, **description_kwargs
)
if result["ResponseMetadata"]["HTTPStatusCode"] != 200:
print("Cognito: Failed to update user pool", result)
# Now we need to add a policy to the IAM that allows cognito access
result = self.create_event_permission(
lambda_name,
"cognito-idp.amazonaws.com",
"arn:aws:cognito-idp:{}:{}:userpool/{}".format(
self.aws_region,
self.sts_client.get_caller_identity().get("Account"),
user_pool,
),
)
if result["ResponseMetadata"]["HTTPStatusCode"] != 201:
print("Cognito: Failed to update lambda permission", result)
def delete_stack(self, name, wait=False):
"""
Delete the CF stack managed by Zappa.
"""
try:
stack = self.cf_client.describe_stacks(StackName=name)["Stacks"][0]
except: # pragma: no cover
print("No Zappa stack named {0}".format(name))
return False
tags = {x["Key"]: x["Value"] for x in stack["Tags"]}
if tags.get("ZappaProject") == name:
self.cf_client.delete_stack(StackName=name)
if wait:
waiter = self.cf_client.get_waiter("stack_delete_complete")
print("Waiting for stack {0} to be deleted..".format(name))
waiter.wait(StackName=name)
return True
else:
print("ZappaProject tag not found on {0}, doing nothing".format(name))
return False
def create_stack_template(
self,
lambda_arn,
lambda_name,
api_key_required,
iam_authorization,
authorizer,
cors_options=None,
description=None,
endpoint_configuration=None,
):
"""
Build the entire CF stack.
Just used for the API Gateway, but could be expanded in the future.
"""
auth_type = "NONE"
if iam_authorization and authorizer:
logger.warn(
"Both IAM Authorization and Authorizer are specified, this is not possible. "
"Setting Auth method to IAM Authorization"
)
authorizer = None
auth_type = "AWS_IAM"
elif iam_authorization:
auth_type = "AWS_IAM"
elif authorizer:
auth_type = authorizer.get("type", "CUSTOM")
# build a fresh template
self.cf_template = troposphere.Template()
self.cf_template.set_description("Automatically generated with Zappa")
self.cf_api_resources = []
self.cf_parameters = {}
restapi = self.create_api_gateway_routes(
lambda_arn,
api_name=lambda_name,
api_key_required=api_key_required,
authorization_type=auth_type,
authorizer=authorizer,
cors_options=cors_options,
description=description,
endpoint_configuration=endpoint_configuration,
)
return self.cf_template
def update_stack(
self,
name,
working_bucket,
wait=False,
update_only=False,
disable_progress=False,
):
"""
Update or create the CF stack managed by Zappa.
"""
capabilities = []
template = name + "-template-" + str(int(time.time())) + ".json"
with open(template, "wb") as out:
out.write(
bytes(
self.cf_template.to_json(indent=None, separators=(",", ":")),
"utf-8",
)
)
self.upload_to_s3(template, working_bucket, disable_progress=disable_progress)
if self.boto_session.region_name == "us-gov-west-1":
url = "https://s3-us-gov-west-1.amazonaws.com/{0}/{1}".format(
working_bucket, template
)
else:
url = "https://s3.amazonaws.com/{0}/{1}".format(working_bucket, template)
tags = [
{"Key": key, "Value": self.tags[key]}
for key in self.tags.keys()
if key != "ZappaProject"
]
tags.append({"Key": "ZappaProject", "Value": name})
update = True
try:
self.cf_client.describe_stacks(StackName=name)
except botocore.client.ClientError:
update = False
if update_only and not update:
print("CloudFormation stack missing, re-deploy to enable updates")
return
if not update:
self.cf_client.create_stack(
StackName=name, Capabilities=capabilities, TemplateURL=url, Tags=tags
)
print(
"Waiting for stack {0} to create (this can take a bit)..".format(name)
)
else:
try:
self.cf_client.update_stack(
StackName=name,
Capabilities=capabilities,
TemplateURL=url,
Tags=tags,
)
print("Waiting for stack {0} to update..".format(name))
except botocore.client.ClientError as e:
if e.response["Error"]["Message"] == "No updates are to be performed.":
wait = False
else:
raise
if wait:
total_resources = len(self.cf_template.resources)
current_resources = 0
sr = self.cf_client.get_paginator("list_stack_resources")
progress = tqdm(total=total_resources, unit="res", disable=disable_progress)
while True:
time.sleep(3)
result = self.cf_client.describe_stacks(StackName=name)
if not result["Stacks"]:
continue # might need to wait a bit
if result["Stacks"][0]["StackStatus"] in [
"CREATE_COMPLETE",
"UPDATE_COMPLETE",
]:
break
# Something has gone wrong.
# Is raising enough? Should we also remove the Lambda function?
if result["Stacks"][0]["StackStatus"] in [
"DELETE_COMPLETE",
"DELETE_IN_PROGRESS",
"ROLLBACK_IN_PROGRESS",
"UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS",
"UPDATE_ROLLBACK_COMPLETE",
]:
raise EnvironmentError(
"Stack creation failed. "
"Please check your CloudFormation console. "
"You may also need to `undeploy`."
)
count = 0
for result in sr.paginate(StackName=name):
done = (
1
for x in result["StackResourceSummaries"]
if "COMPLETE" in x["ResourceStatus"]
)
count += sum(done)
if count:
# We can end up in a situation where we have more resources being created
# than anticipated.
if (count - current_resources) > 0:
progress.update(count - current_resources)
current_resources = count
progress.close()
try:
os.remove(template)
except OSError:
pass
self.remove_from_s3(template, working_bucket)
def stack_outputs(self, name):
"""
Given a name, describes CloudFront stacks and returns dict of the stack Outputs
, else returns an empty dict.
"""
try:
stack = self.cf_client.describe_stacks(StackName=name)["Stacks"][0]
return {x["OutputKey"]: x["OutputValue"] for x in stack["Outputs"]}
except botocore.client.ClientError:
return {}
def get_api_url(self, lambda_name, stage_name):
"""
Given a lambda_name and stage_name, return a valid API URL.
"""
api_id = self.get_api_id(lambda_name)
if api_id:
return "https://{}.execute-api.{}.amazonaws.com/{}".format(
api_id, self.boto_session.region_name, stage_name
)
else:
return None
def get_api_id(self, lambda_name):
"""
Given a lambda_name, return the API id.
"""
try:
response = self.cf_client.describe_stack_resource(
StackName=lambda_name, LogicalResourceId="Api"
)
return response["StackResourceDetail"].get("PhysicalResourceId", None)
except: # pragma: no cover
try:
# Try the old method (project was probably made on an older, non CF version)
response = self.apigateway_client.get_rest_apis(limit=500)
for item in response["items"]:
if item["name"] == lambda_name:
return item["id"]
logger.exception("Could not get API ID.")
return None
except: # pragma: no cover
# We don't even have an API deployed. That's okay!
return None
def create_domain_name(
self,
domain_name,
certificate_name,
certificate_body=None,
certificate_private_key=None,
certificate_chain=None,
certificate_arn=None,
lambda_name=None,
stage=None,
base_path=None,
):
"""
Creates the API GW domain and returns the resulting DNS name.
"""
# This is a Let's Encrypt or custom certificate
if not certificate_arn:
agw_response = self.apigateway_client.create_domain_name(
domainName=domain_name,
certificateName=certificate_name,
certificateBody=certificate_body,
certificatePrivateKey=certificate_private_key,
certificateChain=certificate_chain,
)
# This is an AWS ACM-hosted Certificate
else:
agw_response = self.apigateway_client.create_domain_name(
domainName=domain_name,
certificateName=certificate_name,
certificateArn=certificate_arn,
)
api_id = self.get_api_id(lambda_name)
if not api_id:
raise LookupError("No API URL to certify found - did you deploy?")
self.apigateway_client.create_base_path_mapping(
domainName=domain_name,
basePath="" if base_path is None else base_path,
restApiId=api_id,
stage=stage,
)
return agw_response["distributionDomainName"]
def update_route53_records(self, domain_name, dns_name):
"""
Updates Route53 Records following GW domain creation
"""
zone_id = self.get_hosted_zone_id_for_domain(domain_name)
is_apex = (
self.route53.get_hosted_zone(Id=zone_id)["HostedZone"]["Name"][:-1]
== domain_name
)
if is_apex:
record_set = {
"Name": domain_name,
"Type": "A",
"AliasTarget": {
"HostedZoneId": "Z2FDTNDATAQYW2", # This is a magic value that means "CloudFront"
"DNSName": dns_name,
"EvaluateTargetHealth": False,
},
}
else:
record_set = {
"Name": domain_name,
"Type": "CNAME",
"ResourceRecords": [{"Value": dns_name}],
"TTL": 60,
}
# Related: https://github.com/boto/boto3/issues/157
# and: http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html
# and policy: https://spin.atomicobject.com/2016/04/28/route-53-hosted-zone-managment/
# pure_zone_id = zone_id.split('/hostedzone/')[1]
# XXX: ClientError: An error occurred (InvalidChangeBatch) when calling the ChangeResourceRecordSets operation:
# Tried to create an alias that targets d1awfeji80d0k2.cloudfront.net., type A in zone Z1XWOQP59BYF6Z,
# but the alias target name does not lie within the target zone
response = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
"Changes": [{"Action": "UPSERT", "ResourceRecordSet": record_set}]
},
)
return response
def update_domain_name(
self,
domain_name,
certificate_name=None,
certificate_body=None,
certificate_private_key=None,
certificate_chain=None,
certificate_arn=None,
lambda_name=None,
stage=None,
route53=True,
base_path=None,
):
"""
This updates your certificate information for an existing domain,
with similar arguments to boto's update_domain_name API Gateway api.
It returns the resulting new domain information including the new certificate's ARN
if created during this process.
Previously, this method involved downtime that could take up to 40 minutes
because the API Gateway api only allowed this by deleting, and then creating it.
Related issues: https://github.com/Miserlou/Zappa/issues/590
https://github.com/Miserlou/Zappa/issues/588
https://github.com/Miserlou/Zappa/pull/458
https://github.com/Miserlou/Zappa/issues/882
https://github.com/Miserlou/Zappa/pull/883
"""
print("Updating domain name!")
certificate_name = certificate_name + str(time.time())
api_gateway_domain = self.apigateway_client.get_domain_name(
domainName=domain_name
)
if (
not certificate_arn
and certificate_body
and certificate_private_key
and certificate_chain
):
acm_certificate = self.acm_client.import_certificate(
Certificate=certificate_body,
PrivateKey=certificate_private_key,
CertificateChain=certificate_chain,
)
certificate_arn = acm_certificate["CertificateArn"]
self.update_domain_base_path_mapping(domain_name, lambda_name, stage, base_path)
return self.apigateway_client.update_domain_name(
domainName=domain_name,
patchOperations=[
{
"op": "replace",
"path": "/certificateName",
"value": certificate_name,
},
{"op": "replace", "path": "/certificateArn", "value": certificate_arn},
],
)
def update_domain_base_path_mapping(
self, domain_name, lambda_name, stage, base_path
):
"""
Update domain base path mapping on API Gateway if it was changed
"""
api_id = self.get_api_id(lambda_name)
if not api_id:
print("Warning! Can't update base path mapping!")
return
base_path_mappings = self.apigateway_client.get_base_path_mappings(
domainName=domain_name
)
found = False
for base_path_mapping in base_path_mappings.get("items", []):
if (
base_path_mapping["restApiId"] == api_id
and base_path_mapping["stage"] == stage
):
found = True
if base_path_mapping["basePath"] != base_path:
self.apigateway_client.update_base_path_mapping(
domainName=domain_name,
basePath=base_path_mapping["basePath"],
patchOperations=[
{
"op": "replace",
"path": "/basePath",
"value": "" if base_path is None else base_path,
}
],
)
if not found:
self.apigateway_client.create_base_path_mapping(
domainName=domain_name,
basePath="" if base_path is None else base_path,
restApiId=api_id,
stage=stage,
)
def get_all_zones(self):
"""Same behaviour of list_host_zones, but transparently handling pagination."""
zones = {"HostedZones": []}
new_zones = self.route53.list_hosted_zones(MaxItems="100")
while new_zones["IsTruncated"]:
zones["HostedZones"] += new_zones["HostedZones"]
new_zones = self.route53.list_hosted_zones(
Marker=new_zones["NextMarker"], MaxItems="100"
)
zones["HostedZones"] += new_zones["HostedZones"]
return zones
def get_domain_name(self, domain_name, route53=True):
"""
Scan our hosted zones for the record of a given name.
Returns the record entry, else None.
"""
# Make sure api gateway domain is present
try:
self.apigateway_client.get_domain_name(domainName=domain_name)
except Exception:
return None
if not route53:
return True
try:
zones = self.get_all_zones()
for zone in zones["HostedZones"]:
records = self.route53.list_resource_record_sets(
HostedZoneId=zone["Id"]
)
for record in records["ResourceRecordSets"]:
if (
record["Type"] in ("CNAME", "A")
and record["Name"][:-1] == domain_name
):
return record
except Exception as e:
return None
##
# Old, automatic logic.
# If re-introduced, should be moved to a new function.
# Related ticket: https://github.com/Miserlou/Zappa/pull/458
##
# We may be in a position where Route53 doesn't have a domain, but the API Gateway does.
# We need to delete this before we can create the new Route53.
# try:
# api_gateway_domain = self.apigateway_client.get_domain_name(domainName=domain_name)
# self.apigateway_client.delete_domain_name(domainName=domain_name)
# except Exception:
# pass
return None
##
# IAM
##
def get_credentials_arn(self):
"""
Given our role name, get and set the credentials_arn.
"""
role = self.iam.Role(self.role_name)
self.credentials_arn = role.arn
return role, self.credentials_arn
def create_iam_roles(self):
"""
Create and defines the IAM roles and policies necessary for Zappa.
If the IAM role already exists, it will be updated if necessary.
"""
attach_policy_obj = json.loads(self.attach_policy)
assume_policy_obj = json.loads(self.assume_policy)
if self.extra_permissions:
for permission in self.extra_permissions:
attach_policy_obj["Statement"].append(dict(permission))
self.attach_policy = json.dumps(attach_policy_obj)
updated = False
# Create the role if needed
try:
role, credentials_arn = self.get_credentials_arn()
except botocore.client.ClientError:
print("Creating " + self.role_name + " IAM Role..")
role = self.iam.create_role(
RoleName=self.role_name, AssumeRolePolicyDocument=self.assume_policy
)
self.credentials_arn = role.arn
updated = True
# create or update the role's policies if needed
policy = self.iam.RolePolicy(self.role_name, "zappa-permissions")
try:
if policy.policy_document != attach_policy_obj:
print(
"Updating zappa-permissions policy on "
+ self.role_name
+ " IAM Role."
)
policy.put(PolicyDocument=self.attach_policy)
updated = True
except botocore.client.ClientError:
print(
"Creating zappa-permissions policy on " + self.role_name + " IAM Role."
)
policy.put(PolicyDocument=self.attach_policy)
updated = True
if role.assume_role_policy_document != assume_policy_obj and set(
role.assume_role_policy_document["Statement"][0]["Principal"]["Service"]
) != set(assume_policy_obj["Statement"][0]["Principal"]["Service"]):
print("Updating assume role policy on " + self.role_name + " IAM Role.")
self.iam_client.update_assume_role_policy(
RoleName=self.role_name, PolicyDocument=self.assume_policy
)
updated = True
return self.credentials_arn, updated
def _clear_policy(self, lambda_name):
"""
Remove obsolete policy statements to prevent policy from bloating over the limit after repeated updates.
"""
try:
policy_response = self.lambda_client.get_policy(FunctionName=lambda_name)
if policy_response["ResponseMetadata"]["HTTPStatusCode"] == 200:
statement = json.loads(policy_response["Policy"])["Statement"]
for s in statement:
delete_response = self.lambda_client.remove_permission(
FunctionName=lambda_name, StatementId=s["Sid"]
)
if delete_response["ResponseMetadata"]["HTTPStatusCode"] != 204:
logger.error(
"Failed to delete an obsolete policy statement: {}".format(
policy_response
)
)
else:
logger.debug(
"Failed to load Lambda function policy: {}".format(policy_response)
)
except ClientError as e:
if e.args[0].find("ResourceNotFoundException") > -1:
logger.debug("No policy found, must be first run.")
else:
logger.error("Unexpected client error {}".format(e.args[0]))
##
# CloudWatch Events
##
def create_event_permission(self, lambda_name, principal, source_arn):
"""
Create permissions to link to an event.
Related: http://docs.aws.amazon.com/lambda/latest/dg/with-s3-example-configure-event-source.html
"""
logger.debug(
"Adding new permission to invoke Lambda function: {}".format(lambda_name)
)
permission_response = self.lambda_client.add_permission(
FunctionName=lambda_name,
StatementId="".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(8)
),
Action="lambda:InvokeFunction",
Principal=principal,
SourceArn=source_arn,
)
if permission_response["ResponseMetadata"]["HTTPStatusCode"] != 201:
print("Problem creating permission to invoke Lambda function")
return None # XXX: Raise?
return permission_response
def schedule_events(self, lambda_arn, lambda_name, events, default=True):
"""
Given a Lambda ARN, name and a list of events, schedule this as CloudWatch Events.
'events' is a list of dictionaries, where the dict must contains the string
of a 'function' and the string of the event 'expression', and an optional 'name' and 'description'.
Expressions can be in rate or cron format:
http://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html
"""
# The stream sources - DynamoDB, Kinesis and SQS - are working differently than the other services (pull vs push)
# and do not require event permissions. They do require additional permissions on the Lambda roles though.
# http://docs.aws.amazon.com/lambda/latest/dg/lambda-api-permissions-ref.html
pull_services = ["dynamodb", "kinesis", "sqs"]
# XXX: Not available in Lambda yet.
# We probably want to execute the latest code.
# if default:
# lambda_arn = lambda_arn + ":$LATEST"
self.unschedule_events(
lambda_name=lambda_name,
lambda_arn=lambda_arn,
events=events,
excluded_source_services=pull_services,
)
for event in events:
function = event["function"]
expression = event.get("expression", None) # single expression
expressions = event.get("expressions", None) # multiple expression
kwargs = event.get(
"kwargs", {}
) # optional dict of keyword arguments for the event
event_source = event.get("event_source", None)
description = event.get("description", function)
# - If 'cron' or 'rate' in expression, use ScheduleExpression
# - Else, use EventPattern
# - ex https://github.com/awslabs/aws-lambda-ddns-function
if not self.credentials_arn:
self.get_credentials_arn()
if expression:
expressions = [
expression
] # same code for single and multiple expression
if expressions:
for index, expression in enumerate(expressions):
name = self.get_scheduled_event_name(
event, function, lambda_name, index
)
# if it's possible that we truncated name, generate a unique, shortened name
# https://github.com/Miserlou/Zappa/issues/970
if len(name) >= 64:
rule_name = self.get_hashed_rule_name(
event, function, lambda_name
)
else:
rule_name = name
rule_response = self.events_client.put_rule(
Name=rule_name,
ScheduleExpression=expression,
State="ENABLED",
Description=description,
RoleArn=self.credentials_arn,
)
if "RuleArn" in rule_response:
logger.debug(
"Rule created. ARN {}".format(rule_response["RuleArn"])
)
# Specific permissions are necessary for any trigger to work.
self.create_event_permission(
lambda_name, "events.amazonaws.com", rule_response["RuleArn"]
)
# Overwriting the input, supply the original values and add kwargs
input_template = (
'{"time": <time>, '
'"detail-type": <detail-type>, '
'"source": <source>,'
'"account": <account>, '
'"region": <region>,'
'"detail": <detail>, '
'"version": <version>,'
'"resources": <resources>,'
'"id": <id>,'
'"kwargs": %s'
"}" % json.dumps(kwargs)
)
# Create the CloudWatch event ARN for this function.
# https://github.com/Miserlou/Zappa/issues/359
target_response = self.events_client.put_targets(
Rule=rule_name,
Targets=[
{
"Id": "Id"
+ "".join(
random.choice(string.digits) for _ in range(12)
),
"Arn": lambda_arn,
"InputTransformer": {
"InputPathsMap": {
"time": "$.time",
"detail-type": "$.detail-type",
"source": "$.source",
"account": "$.account",
"region": "$.region",
"detail": "$.detail",
"version": "$.version",
"resources": "$.resources",
"id": "$.id",
},
"InputTemplate": input_template,
},
}
],
)
if target_response["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(
"Scheduled {} with expression {}!".format(
rule_name, expression
)
)
else:
print(
"Problem scheduling {} with expression {}.".format(
rule_name, expression
)
)
elif event_source:
service = self.service_from_arn(event_source["arn"])
if service not in pull_services:
svc = ",".join(event["event_source"]["events"])
self.create_event_permission(
lambda_name,
service + ".amazonaws.com",
event["event_source"]["arn"],
)
else:
svc = service
rule_response = add_event_source(
event_source, lambda_arn, function, self.boto_session
)
if rule_response == "successful":
print("Created {} event schedule for {}!".format(svc, function))
elif rule_response == "failed":
print(
"Problem creating {} event schedule for {}!".format(
svc, function
)
)
elif rule_response == "exists":
print(
"{} event schedule for {} already exists - Nothing to do here.".format(
svc, function
)
)
elif rule_response == "dryrun":
print(
"Dryrun for creating {} event schedule for {}!!".format(
svc, function
)
)
else:
print(
"Could not create event {} - Please define either an expression or an event source".format(
name
)
)
@staticmethod
def get_scheduled_event_name(event, function, lambda_name, index=0):
name = event.get("name", function)
if name != function:
# a custom event name has been provided, make sure function name is included as postfix,
# otherwise zappa's handler won't be able to locate the function.
name = "{}-{}".format(name, function)
if index:
# to ensure unique cloudwatch rule names in the case of multiple expressions
# prefix all entries bar the first with the index
# Related: https://github.com/Miserlou/Zappa/pull/1051
name = "{}-{}".format(index, name)
# prefix scheduled event names with lambda name. So we can look them up later via the prefix.
return Zappa.get_event_name(lambda_name, name)
@staticmethod
def get_event_name(lambda_name, name):
"""
Returns an AWS-valid Lambda event name.
"""
return "{prefix:.{width}}-{postfix}".format(
prefix=lambda_name, width=max(0, 63 - len(name)), postfix=name
)[:64]
@staticmethod
def get_hashed_rule_name(event, function, lambda_name):
"""
Returns an AWS-valid CloudWatch rule name using a digest of the event name, lambda name, and function.
This allows support for rule names that may be longer than the 64 char limit.
"""
event_name = event.get("name", function)
name_hash = hashlib.sha1(
"{}-{}".format(lambda_name, event_name).encode("UTF-8")
).hexdigest()
return Zappa.get_event_name(name_hash, function)
def delete_rule(self, rule_name):
"""
Delete a CWE rule.
This deletes them, but they will still show up in the AWS console.
Annoying.
"""
logger.debug("Deleting existing rule {}".format(rule_name))
# All targets must be removed before
# we can actually delete the rule.
try:
targets = self.events_client.list_targets_by_rule(Rule=rule_name)
except botocore.exceptions.ClientError as e:
# This avoids misbehavior if low permissions, related: https://github.com/Miserlou/Zappa/issues/286
error_code = e.response["Error"]["Code"]
if error_code == "AccessDeniedException":
raise
else:
logger.debug(
"No target found for this rule: {} {}".format(rule_name, e.args[0])
)
return
if "Targets" in targets and targets["Targets"]:
self.events_client.remove_targets(
Rule=rule_name, Ids=[x["Id"] for x in targets["Targets"]]
)
else: # pragma: no cover
logger.debug("No target to delete")
# Delete our rule.
self.events_client.delete_rule(Name=rule_name)
def get_event_rule_names_for_lambda(self, lambda_arn):
"""
Get all of the rule names associated with a lambda function.
"""
response = self.events_client.list_rule_names_by_target(TargetArn=lambda_arn)
rule_names = response["RuleNames"]
# Iterate when the results are paginated
while "NextToken" in response:
response = self.events_client.list_rule_names_by_target(
TargetArn=lambda_arn, NextToken=response["NextToken"]
)
rule_names.extend(response["RuleNames"])
return rule_names
def get_event_rules_for_lambda(self, lambda_arn):
"""
Get all of the rule details associated with this function.
"""
rule_names = self.get_event_rule_names_for_lambda(lambda_arn=lambda_arn)
return [self.events_client.describe_rule(Name=r) for r in rule_names]
def unschedule_events(
self, events, lambda_arn=None, lambda_name=None, excluded_source_services=None
):
excluded_source_services = excluded_source_services or []
"""
Given a list of events, unschedule these CloudWatch Events.
'events' is a list of dictionaries, where the dict must contains the string
of a 'function' and the string of the event 'expression', and an optional 'name' and 'description'.
"""
self._clear_policy(lambda_name)
rule_names = self.get_event_rule_names_for_lambda(lambda_arn=lambda_arn)
for rule_name in rule_names:
self.delete_rule(rule_name)
print("Unscheduled " + rule_name + ".")
non_cwe = [e for e in events if "event_source" in e]
for event in non_cwe:
# TODO: This WILL miss non CW events that have been deployed but changed names. Figure out a way to remove
# them no matter what.
# These are non CWE event sources.
function = event["function"]
name = event.get("name", function)
event_source = event.get("event_source", function)
service = self.service_from_arn(event_source["arn"])
# DynamoDB and Kinesis streams take quite a while to setup after they are created and do not need to be
# re-scheduled when a new Lambda function is deployed. Therefore, they should not be removed during zappa
# update or zappa schedule.
if service not in excluded_source_services:
remove_event_source(
event_source, lambda_arn, function, self.boto_session
)
print(
"Removed event {}{}.".format(
name,
" ({})".format(str(event_source["events"]))
if "events" in event_source
else "",
)
)
###
# Async / SNS
##
def create_async_sns_topic(self, lambda_name, lambda_arn):
"""
Create the SNS-based async topic.
"""
topic_name = get_topic_name(lambda_name)
# Create SNS topic
topic_arn = self.sns_client.create_topic(Name=topic_name)["TopicArn"]
# Create subscription
self.sns_client.subscribe(
TopicArn=topic_arn, Protocol="lambda", Endpoint=lambda_arn
)
# Add Lambda permission for SNS to invoke function
self.create_event_permission(
lambda_name=lambda_name, principal="sns.amazonaws.com", source_arn=topic_arn
)
# Add rule for SNS topic as a event source
add_event_source(
event_source={"arn": topic_arn, "events": ["sns:Publish"]},
lambda_arn=lambda_arn,
target_function="zappa.asynchronous.route_task",
boto_session=self.boto_session,
)
return topic_arn
def remove_async_sns_topic(self, lambda_name):
"""
Remove the async SNS topic.
"""
topic_name = get_topic_name(lambda_name)
removed_arns = []
for sub in self.sns_client.list_subscriptions()["Subscriptions"]:
if topic_name in sub["TopicArn"]:
self.sns_client.delete_topic(TopicArn=sub["TopicArn"])
removed_arns.append(sub["TopicArn"])
return removed_arns
###
# Async / DynamoDB
##
def _set_async_dynamodb_table_ttl(self, table_name):
self.dynamodb_client.update_time_to_live(
TableName=table_name,
TimeToLiveSpecification={"Enabled": True, "AttributeName": "ttl"},
)
def create_async_dynamodb_table(self, table_name, read_capacity, write_capacity):
"""
Create the DynamoDB table for async task return values
"""
try:
dynamodb_table = self.dynamodb_client.describe_table(TableName=table_name)
return False, dynamodb_table
# catch this exception (triggered if the table doesn't exist)
except botocore.exceptions.ClientError:
dynamodb_table = self.dynamodb_client.create_table(
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
TableName=table_name,
KeySchema=[
{"AttributeName": "id", "KeyType": "HASH"},
],
ProvisionedThroughput={
"ReadCapacityUnits": read_capacity,
"WriteCapacityUnits": write_capacity,
},
)
if dynamodb_table:
try:
self._set_async_dynamodb_table_ttl(table_name)
except botocore.exceptions.ClientError:
# this fails because the operation is async, so retry
time.sleep(10)
self._set_async_dynamodb_table_ttl(table_name)
return True, dynamodb_table
def remove_async_dynamodb_table(self, table_name):
"""
Remove the DynamoDB Table used for async return values
"""
self.dynamodb_client.delete_table(TableName=table_name)
##
# CloudWatch Logging
##
def fetch_logs(self, lambda_name, filter_pattern="", limit=10000, start_time=0):
"""
Fetch the CloudWatch logs for a given Lambda name.
"""
log_name = "/aws/lambda/" + lambda_name
streams = self.logs_client.describe_log_streams(
logGroupName=log_name, descending=True, orderBy="LastEventTime"
)
all_streams = streams["logStreams"]
all_names = [stream["logStreamName"] for stream in all_streams]
events = []
response = {}
while not response or "nextToken" in response:
extra_args = {}
if "nextToken" in response:
extra_args["nextToken"] = response["nextToken"]
# Amazon uses millisecond epoch for some reason.
# Thanks, Jeff.
start_time = start_time * 1000
end_time = int(time.time()) * 1000
response = self.logs_client.filter_log_events(
logGroupName=log_name,
logStreamNames=all_names,
startTime=start_time,
endTime=end_time,
filterPattern=filter_pattern,
limit=limit,
interleaved=True, # Does this actually improve performance?
**extra_args,
)
if response and "events" in response:
events += response["events"]
return sorted(events, key=lambda k: k["timestamp"])
def remove_log_group(self, group_name):
"""
Filter all log groups that match the name given in log_filter.
"""
print("Removing log group: {}".format(group_name))
try:
self.logs_client.delete_log_group(logGroupName=group_name)
except botocore.exceptions.ClientError as e:
print("Couldn't remove '{}' because of: {}".format(group_name, e))
def remove_lambda_function_logs(self, lambda_function_name):
"""
Remove all logs that are assigned to a given lambda function id.
"""
self.remove_log_group("/aws/lambda/{}".format(lambda_function_name))
def remove_api_gateway_logs(self, project_name):
"""
Removed all logs that are assigned to a given rest api id.
"""
for rest_api in self.get_rest_apis(project_name):
for stage in self.apigateway_client.get_stages(restApiId=rest_api["id"])[
"item"
]:
self.remove_log_group(
"API-Gateway-Execution-Logs_{}/{}".format(
rest_api["id"], stage["stageName"]
)
)
##
# Route53 Domain Name Entries
##
def get_hosted_zone_id_for_domain(self, domain):
"""
Get the Hosted Zone ID for a given domain.
"""
all_zones = self.get_all_zones()
return self.get_best_match_zone(all_zones, domain)
@staticmethod
def get_best_match_zone(all_zones, domain):
"""Return zone id which name is closer matched with domain name."""
# Related: https://github.com/Miserlou/Zappa/issues/459
public_zones = [
zone
for zone in all_zones["HostedZones"]
if not zone["Config"]["PrivateZone"]
]
zones = {
zone["Name"][:-1]: zone["Id"]
for zone in public_zones
if zone["Name"][:-1] in domain
}
if zones:
keys = max(
zones.keys(), key=lambda a: len(a)
) # get longest key -- best match.
return zones[keys]
else:
return None
def set_dns_challenge_txt(self, zone_id, domain, txt_challenge):
"""
Set DNS challenge TXT.
"""
print("Setting DNS challenge..")
resp = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch=self.get_dns_challenge_change_batch(
"UPSERT", domain, txt_challenge
),
)
return resp
def remove_dns_challenge_txt(self, zone_id, domain, txt_challenge):
"""
Remove DNS challenge TXT.
"""
print("Deleting DNS challenge..")
resp = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch=self.get_dns_challenge_change_batch(
"DELETE", domain, txt_challenge
),
)
return resp
@staticmethod
def get_dns_challenge_change_batch(action, domain, txt_challenge):
"""
Given action, domain and challenge, return a change batch to use with
route53 call.
:param action: DELETE | UPSERT
:param domain: domain name
:param txt_challenge: challenge
:return: change set for a given action, domain and TXT challenge.
"""
return {
"Changes": [
{
"Action": action,
"ResourceRecordSet": {
"Name": "_acme-challenge.{0}".format(domain),
"Type": "TXT",
"TTL": 60,
"ResourceRecords": [{"Value": '"{0}"'.format(txt_challenge)}],
},
}
]
}
##
# Utility
##
def shell(self):
"""
Spawn a PDB shell.
"""
import pdb
pdb.set_trace()
def load_credentials(self, boto_session=None, profile_name=None):
"""
Load AWS credentials.
An optional boto_session can be provided, but that's usually for testing.
An optional profile_name can be provided for config files that have multiple sets
of credentials.
"""
# Automatically load credentials from config or environment
if not boto_session:
# If provided, use the supplied profile name.
if profile_name:
self.boto_session = boto3.Session(
profile_name=profile_name, region_name=self.aws_region
)
elif os.environ.get("AWS_ACCESS_KEY_ID") and os.environ.get(
"AWS_SECRET_ACCESS_KEY"
):
region_name = os.environ.get("AWS_DEFAULT_REGION") or self.aws_region
session_kw = {
"aws_access_key_id": os.environ.get("AWS_ACCESS_KEY_ID"),
"aws_secret_access_key": os.environ.get("AWS_SECRET_ACCESS_KEY"),
"region_name": region_name,
}
# If we're executing in a role, AWS_SESSION_TOKEN will be present, too.
if os.environ.get("AWS_SESSION_TOKEN"):
session_kw["aws_session_token"] = os.environ.get(
"AWS_SESSION_TOKEN"
)
self.boto_session = boto3.Session(**session_kw)
else:
self.boto_session = boto3.Session(region_name=self.aws_region)
logger.debug("Loaded boto session from config: %s", boto_session)
else:
logger.debug("Using provided boto session: %s", boto_session)
self.boto_session = boto_session
# use provided session's region in case it differs
self.aws_region = self.boto_session.region_name
if self.boto_session.region_name not in LAMBDA_REGIONS:
print("Warning! AWS Lambda may not be available in this AWS Region!")
if self.boto_session.region_name not in API_GATEWAY_REGIONS:
print("Warning! AWS API Gateway may not be available in this AWS Region!")
@staticmethod
def service_from_arn(arn):
return arn.split(":")[2] | zappa-troposphere | /zappa_troposphere-0.53.1-py3-none-any.whl/zappa/core.py | core.py |
import calendar
import datetime
import fnmatch
import io
import json
import logging
import os
import re
import shutil
import stat
import sys
from urllib.parse import urlparse
import botocore
import durationpy
from past.builtins import basestring
LOG = logging.getLogger(__name__)
##
# Settings / Packaging
##
def copytree(src, dst, metadata=True, symlinks=False, ignore=None):
"""
This is a contributed re-implementation of 'copytree' that
should work with the exact same behavior on multiple platforms.
When `metadata` is False, file metadata such as permissions and modification
times are not copied.
"""
def copy_file(src, dst, item):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s): # pragma: no cover
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
if metadata:
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass # lchmod not available
elif os.path.isdir(s):
copytree(s, d, metadata, symlinks, ignore)
else:
shutil.copy2(s, d) if metadata else shutil.copy(s, d)
try:
lst = os.listdir(src)
if not os.path.exists(dst):
os.makedirs(dst)
if metadata:
shutil.copystat(src, dst)
except NotADirectoryError: # egg-link files
copy_file(os.path.dirname(src), os.path.dirname(dst), os.path.basename(src))
return
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
copy_file(src, dst, item)
def parse_s3_url(url):
"""
Parses S3 URL.
Returns bucket (domain) and file (full path).
"""
bucket = ""
path = ""
if url:
result = urlparse(url)
bucket = result.netloc
path = result.path.strip("/")
return bucket, path
def human_size(num, suffix="B"):
"""
Convert bytes length to a human-readable version
"""
for unit in ("", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"):
if abs(num) < 1024.0:
return "{0:3.1f}{1!s}{2!s}".format(num, unit, suffix)
num /= 1024.0
return "{0:.1f}{1!s}{2!s}".format(num, "Yi", suffix)
def string_to_timestamp(timestring):
"""
Accepts a str, returns an int timestamp.
"""
ts = None
# Uses an extended version of Go's duration string.
try:
delta = durationpy.from_str(timestring)
past = datetime.datetime.utcnow() - delta
ts = calendar.timegm(past.timetuple())
return ts
except Exception as e:
pass
if ts:
return ts
# else:
# print("Unable to parse timestring.")
return 0
##
# `init` related
##
def detect_django_settings():
"""
Automatically try to discover Django settings files,
return them as relative module paths.
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, "*settings.py"):
full = os.path.join(root, filename)
if "site-packages" in full:
continue
full = os.path.join(root, filename)
package_path = full.replace(os.getcwd(), "")
package_module = (
package_path.replace(os.sep, ".").split(".", 1)[1].replace(".py", "")
)
matches.append(package_module)
return matches
def detect_flask_apps():
"""
Automatically try to discover Flask apps files,
return them as relative module paths.
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, "*.py"):
full = os.path.join(root, filename)
if "site-packages" in full:
continue
full = os.path.join(root, filename)
with io.open(full, "r", encoding="utf-8") as f:
lines = f.readlines()
for line in lines:
app = None
# Kind of janky..
if "= Flask(" in line:
app = line.split("= Flask(")[0].strip()
if "=Flask(" in line:
app = line.split("=Flask(")[0].strip()
if not app:
continue
package_path = full.replace(os.getcwd(), "")
package_module = (
package_path.replace(os.sep, ".")
.split(".", 1)[1]
.replace(".py", "")
)
app_module = package_module + "." + app
matches.append(app_module)
return matches
def get_venv_from_python_version():
return "python{}.{}".format(*sys.version_info)
def get_runtime_from_python_version():
""" """
if sys.version_info[0] < 3:
raise ValueError("Python 2.x is no longer supported.")
else:
if sys.version_info[1] <= 6:
return "python3.6"
elif sys.version_info[1] <= 7:
return "python3.7"
else:
return "python3.8"
##
# Async Tasks
##
def get_topic_name(lambda_name):
"""Topic name generation"""
return "%s-zappa-async" % lambda_name
##
# Event sources / Kappa
##
def get_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
):
"""
Given an event_source dictionary item, a session and a lambda_arn,
hack into Kappa's Gibson, create out an object we can call
to schedule this event, and return the event source.
"""
import kappa.awsclient
import kappa.event_source.base
import kappa.event_source.cloudwatch
import kappa.event_source.dynamodb_stream
import kappa.event_source.kinesis
import kappa.event_source.s3
import kappa.event_source.sns
import kappa.function
import kappa.policy
import kappa.restapi
import kappa.role
class PseudoContext:
def __init__(self):
return
class PseudoFunction:
def __init__(self):
return
# Mostly adapted from kappa - will probably be replaced by kappa support
class SqsEventSource(kappa.event_source.base.EventSource):
def __init__(self, context, config):
super().__init__(context, config)
self._lambda = kappa.awsclient.create_client("lambda", context.session)
def _get_uuid(self, function):
uuid = None
response = self._lambda.call(
"list_event_source_mappings",
FunctionName=function.name,
EventSourceArn=self.arn,
)
LOG.debug(response)
if len(response["EventSourceMappings"]) > 0:
uuid = response["EventSourceMappings"][0]["UUID"]
return uuid
def add(self, function):
try:
response = self._lambda.call(
"create_event_source_mapping",
FunctionName=function.name,
EventSourceArn=self.arn,
BatchSize=self.batch_size,
Enabled=self.enabled,
)
LOG.debug(response)
except Exception:
LOG.exception("Unable to add event source")
def enable(self, function):
self._config["enabled"] = True
try:
response = self._lambda.call(
"update_event_source_mapping",
UUID=self._get_uuid(function),
Enabled=self.enabled,
)
LOG.debug(response)
except Exception:
LOG.exception("Unable to enable event source")
def disable(self, function):
self._config["enabled"] = False
try:
response = self._lambda.call(
"update_event_source_mapping",
FunctionName=function.name,
Enabled=self.enabled,
)
LOG.debug(response)
except Exception:
LOG.exception("Unable to disable event source")
def update(self, function):
response = None
uuid = self._get_uuid(function)
if uuid:
try:
response = self._lambda.call(
"update_event_source_mapping",
BatchSize=self.batch_size,
Enabled=self.enabled,
FunctionName=function.arn,
)
LOG.debug(response)
except Exception:
LOG.exception("Unable to update event source")
def remove(self, function):
response = None
uuid = self._get_uuid(function)
if uuid:
response = self._lambda.call("delete_event_source_mapping", UUID=uuid)
LOG.debug(response)
return response
def status(self, function):
response = None
LOG.debug("getting status for event source %s", self.arn)
uuid = self._get_uuid(function)
if uuid:
try:
response = self._lambda.call(
"get_event_source_mapping", UUID=self._get_uuid(function)
)
LOG.debug(response)
except botocore.exceptions.ClientError:
LOG.debug("event source %s does not exist", self.arn)
response = None
else:
LOG.debug("No UUID for event source %s", self.arn)
return response
class ExtendedSnsEventSource(kappa.event_source.sns.SNSEventSource):
@property
def filters(self):
return self._config.get("filters")
def add_filters(self, function):
try:
subscription = self.exists(function)
if subscription:
response = self._sns.call(
"set_subscription_attributes",
SubscriptionArn=subscription["SubscriptionArn"],
AttributeName="FilterPolicy",
AttributeValue=json.dumps(self.filters),
)
kappa.event_source.sns.LOG.debug(response)
except Exception:
kappa.event_source.sns.LOG.exception(
"Unable to add filters for SNS topic %s", self.arn
)
def add(self, function):
super().add(function)
if self.filters:
self.add_filters(function)
event_source_map = {
"dynamodb": kappa.event_source.dynamodb_stream.DynamoDBStreamEventSource,
"kinesis": kappa.event_source.kinesis.KinesisEventSource,
"s3": kappa.event_source.s3.S3EventSource,
"sns": ExtendedSnsEventSource,
"sqs": SqsEventSource,
"events": kappa.event_source.cloudwatch.CloudWatchEventSource,
}
arn = event_source["arn"]
_, _, svc, _ = arn.split(":", 3)
event_source_func = event_source_map.get(svc, None)
if not event_source_func:
raise ValueError("Unknown event source: {0}".format(arn))
def autoreturn(self, function_name):
return function_name
event_source_func._make_notification_id = autoreturn
ctx = PseudoContext()
ctx.session = boto_session
funk = PseudoFunction()
funk.name = lambda_arn
# Kappa 0.6.0 requires this nasty hacking,
# hopefully we can remove at least some of this soon.
# Kappa 0.7.0 introduces a whole host over other changes we don't
# really want, so we're stuck here for a little while.
# Related: https://github.com/Miserlou/Zappa/issues/684
# https://github.com/Miserlou/Zappa/issues/688
# https://github.com/Miserlou/Zappa/commit/3216f7e5149e76921ecdf9451167846b95616313
if svc == "s3":
split_arn = lambda_arn.split(":")
arn_front = ":".join(split_arn[:-1])
arn_back = split_arn[-1]
ctx.environment = arn_back
funk.arn = arn_front
funk.name = ":".join([arn_back, target_function])
else:
funk.arn = lambda_arn
funk._context = ctx
event_source_obj = event_source_func(ctx, event_source)
return event_source_obj, ctx, funk
def add_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
):
"""
Given an event_source dictionary, create the object and add the event source.
"""
event_source_obj, ctx, funk = get_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
)
# TODO: Detect changes in config and refine exists algorithm
if not dry:
if not event_source_obj.status(funk):
event_source_obj.add(funk)
return "successful" if event_source_obj.status(funk) else "failed"
else:
return "exists"
return "dryrun"
def remove_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
):
"""
Given an event_source dictionary, create the object and remove the event source.
"""
event_source_obj, ctx, funk = get_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
)
# This is slightly dirty, but necessary for using Kappa this way.
funk.arn = lambda_arn
if not dry:
rule_response = event_source_obj.remove(funk)
return rule_response
else:
return event_source_obj
def get_event_source_status(
event_source, lambda_arn, target_function, boto_session, dry=False
):
"""
Given an event_source dictionary, create the object and get the event source status.
"""
event_source_obj, ctx, funk = get_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
)
return event_source_obj.status(funk)
##
# Analytics / Surveillance / Nagging
##
def check_new_version_available(this_version):
"""
Checks if a newer version of Zappa is available.
Returns True is updateable, else False.
"""
import requests
pypi_url = "https://pypi.org/pypi/Zappa/json"
resp = requests.get(pypi_url, timeout=1.5)
top_version = resp.json()["info"]["version"]
return this_version != top_version
class InvalidAwsLambdaName(Exception):
"""Exception: proposed AWS Lambda name is invalid"""
pass
def validate_name(name, maxlen=80):
"""Validate name for AWS Lambda function.
name: actual name (without `arn:aws:lambda:...:` prefix and without
`:$LATEST`, alias or version suffix.
maxlen: max allowed length for name without prefix and suffix.
The value 80 was calculated from prefix with longest known region name
and assuming that no alias or version would be longer than `$LATEST`.
Based on AWS Lambda spec
http://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html
Return: the name
Raise: InvalidAwsLambdaName, if the name is invalid.
"""
if not isinstance(name, basestring):
msg = "Name must be of type string"
raise InvalidAwsLambdaName(msg)
if len(name) > maxlen:
msg = "Name is longer than {maxlen} characters."
raise InvalidAwsLambdaName(msg.format(maxlen=maxlen))
if len(name) == 0:
msg = "Name must not be empty string."
raise InvalidAwsLambdaName(msg)
if not re.match("^[a-zA-Z0-9-_]+$", name):
msg = "Name can only contain characters from a-z, A-Z, 0-9, _ and -"
raise InvalidAwsLambdaName(msg)
return name
def contains_python_files_or_subdirs(folder):
"""
Checks (recursively) if the directory contains .py or .pyc files
"""
for root, dirs, files in os.walk(folder):
if [
filename
for filename in files
if filename.endswith(".py") or filename.endswith(".pyc")
]:
return True
for d in dirs:
for _, subdirs, subfiles in os.walk(d):
if [
filename
for filename in subfiles
if filename.endswith(".py") or filename.endswith(".pyc")
]:
return True
return False
def conflicts_with_a_neighbouring_module(directory_path):
"""
Checks if a directory lies in the same directory as a .py file with the same name.
"""
parent_dir_path, current_dir_name = os.path.split(os.path.normpath(directory_path))
neighbours = os.listdir(parent_dir_path)
conflicting_neighbour_filename = current_dir_name + ".py"
return conflicting_neighbour_filename in neighbours
# https://github.com/Miserlou/Zappa/issues/1188
def titlecase_keys(d):
"""
Takes a dict with keys of type str and returns a new dict with all keys titlecased.
"""
return {k.title(): v for k, v in d.items()}
# https://github.com/Miserlou/Zappa/issues/1688
def is_valid_bucket_name(name):
"""
Checks if an S3 bucket name is valid according to https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules
"""
# Bucket names must be at least 3 and no more than 63 characters long.
if len(name) < 3 or len(name) > 63:
return False
# Bucket names must not contain uppercase characters or underscores.
if any(x.isupper() for x in name):
return False
if "_" in name:
return False
# Bucket names must start with a lowercase letter or number.
if not (name[0].islower() or name[0].isdigit()):
return False
# Bucket names must be a series of one or more labels. Adjacent labels are separated by a single period (.).
for label in name.split("."):
# Each label must start and end with a lowercase letter or a number.
if len(label) < 1:
return False
if not (label[0].islower() or label[0].isdigit()):
return False
if not (label[-1].islower() or label[-1].isdigit()):
return False
# Bucket names must not be formatted as an IP address (for example, 192.168.5.4).
looks_like_IP = True
for label in name.split("."):
if not label.isdigit():
looks_like_IP = False
break
if looks_like_IP:
return False
return True
def merge_headers(event):
"""
Merge the values of headers and multiValueHeaders into a single dict.
Opens up support for multivalue headers via API Gateway and ALB.
See: https://github.com/Miserlou/Zappa/pull/1756
"""
headers = event.get("headers") or {}
multi_headers = (event.get("multiValueHeaders") or {}).copy()
for h in set(headers.keys()):
if h not in multi_headers:
multi_headers[h] = [headers[h]]
for h in multi_headers.keys():
multi_headers[h] = ", ".join(multi_headers[h])
return multi_headers | zappa-troposphere | /zappa_troposphere-0.53.1-py3-none-any.whl/zappa/utilities.py | utilities.py |
import argparse
import base64
import collections
import importlib
import inspect
import logging
import os
import pkgutil
import random
import re
import string
import sys
import tempfile
import time
import zipfile
from builtins import bytes, input
from datetime import datetime, timedelta
import argcomplete
import botocore
import click
import hjson as json
import pkg_resources
import requests
import slugify
import toml
import yaml
from click import BaseCommand, Context
from click.exceptions import ClickException
from click.globals import push_context
from dateutil import parser
from past.builtins import basestring
from .core import API_GATEWAY_REGIONS, Zappa, logger
from .utilities import (
InvalidAwsLambdaName,
check_new_version_available,
detect_django_settings,
detect_flask_apps,
get_runtime_from_python_version,
get_venv_from_python_version,
human_size,
is_valid_bucket_name,
parse_s3_url,
string_to_timestamp,
validate_name,
)
CUSTOM_SETTINGS = [
"apigateway_policy",
"assume_policy",
"attach_policy",
"aws_region",
"delete_local_zip",
"delete_s3_zip",
"exclude",
"exclude_glob",
"extra_permissions",
"include",
"role_name",
"touch",
]
BOTO3_CONFIG_DOCS_URL = (
"https://boto3.readthedocs.io/en/latest/guide/quickstart.html#configuration"
)
##
# Main Input Processing
##
class ZappaCLI:
"""
ZappaCLI object is responsible for loading the settings,
handling the input arguments and executing the calls to the core library.
"""
# CLI
vargs = None
command = None
stage_env = None
# Zappa settings
zappa = None
zappa_settings = None
load_credentials = True
disable_progress = False
# Specific settings
api_stage = None
app_function = None
aws_region = None
debug = None
prebuild_script = None
project_name = None
profile_name = None
lambda_arn = None
lambda_name = None
lambda_description = None
lambda_concurrency = None
s3_bucket_name = None
settings_file = None
zip_path = None
handler_path = None
vpc_config = None
memory_size = None
use_apigateway = None
lambda_handler = None
django_settings = None
manage_roles = True
exception_handler = None
environment_variables = None
authorizer = None
xray_tracing = False
aws_kms_key_arn = ""
context_header_mappings = None
tags = []
layers = None
stage_name_env_pattern = re.compile("^[a-zA-Z0-9_]+$")
def __init__(self):
self._stage_config_overrides = (
{}
) # change using self.override_stage_config_setting(key, val)
@property
def stage_config(self):
"""
A shortcut property for settings of a stage.
"""
def get_stage_setting(stage, extended_stages=None):
if extended_stages is None:
extended_stages = []
if stage in extended_stages:
raise RuntimeError(
stage + " has already been extended to these settings. "
"There is a circular extends within the settings file."
)
extended_stages.append(stage)
try:
stage_settings = dict(self.zappa_settings[stage].copy())
except KeyError:
raise ClickException(
"Cannot extend settings for undefined stage '" + stage + "'."
)
extends_stage = self.zappa_settings[stage].get("extends", None)
if not extends_stage:
return stage_settings
extended_settings = get_stage_setting(
stage=extends_stage, extended_stages=extended_stages
)
extended_settings.update(stage_settings)
return extended_settings
settings = get_stage_setting(stage=self.api_stage)
# Backwards compatible for delete_zip setting that was more explicitly named delete_local_zip
if "delete_zip" in settings:
settings["delete_local_zip"] = settings.get("delete_zip")
settings.update(self.stage_config_overrides)
return settings
@property
def stage_config_overrides(self):
"""
Returns zappa_settings we forcefully override for the current stage
set by `self.override_stage_config_setting(key, value)`
"""
return getattr(self, "_stage_config_overrides", {}).get(self.api_stage, {})
def override_stage_config_setting(self, key, val):
"""
Forcefully override a setting set by zappa_settings (for the current stage only)
:param key: settings key
:param val: value
"""
self._stage_config_overrides = getattr(self, "_stage_config_overrides", {})
self._stage_config_overrides.setdefault(self.api_stage, {})[key] = val
def handle(self, argv=None):
"""
Main function.
Parses command, load settings and dispatches accordingly.
"""
desc = "Zappa - Deploy Python applications to AWS Lambda" " and API Gateway.\n"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
"-v",
"--version",
action="version",
version=pkg_resources.get_distribution("zappa-troposphere").version,
help="Print the zappa version",
)
parser.add_argument(
"--color", default="auto", choices=["auto", "never", "always"]
)
env_parser = argparse.ArgumentParser(add_help=False)
me_group = env_parser.add_mutually_exclusive_group()
all_help = "Execute this command for all of our defined " "Zappa stages."
me_group.add_argument("--all", action="store_true", help=all_help)
me_group.add_argument("stage_env", nargs="?")
group = env_parser.add_argument_group()
group.add_argument(
"-a", "--app_function", help="The WSGI application function."
)
group.add_argument(
"-s", "--settings_file", help="The path to a Zappa settings file."
)
group.add_argument(
"-q", "--quiet", action="store_true", help="Silence all output."
)
# https://github.com/Miserlou/Zappa/issues/407
# Moved when 'template' command added.
# Fuck Terraform.
group.add_argument(
"-j",
"--json",
action="store_true",
help="Make the output of this command be machine readable.",
)
# https://github.com/Miserlou/Zappa/issues/891
group.add_argument(
"--disable_progress", action="store_true", help="Disable progress bars."
)
group.add_argument("--no_venv", action="store_true", help="Skip venv check.")
##
# Certify
##
subparsers = parser.add_subparsers(title="subcommands", dest="command")
cert_parser = subparsers.add_parser(
"certify", parents=[env_parser], help="Create and install SSL certificate"
)
cert_parser.add_argument(
"--manual",
action="store_true",
help=(
"Gets new Let's Encrypt certificates, but prints them to console."
"Does not update API Gateway domains."
),
)
cert_parser.add_argument(
"-y", "--yes", action="store_true", help="Auto confirm yes."
)
##
# Deploy
##
deploy_parser = subparsers.add_parser(
"deploy", parents=[env_parser], help="Deploy application."
)
deploy_parser.add_argument(
"-z",
"--zip",
help="Deploy Lambda with specific local or S3 hosted zip package",
)
deploy_parser.add_argument(
"-d",
"--docker-image-uri",
help="Deploy Lambda with a specific docker image hosted in AWS Elastic Container Registry",
)
##
# Init
##
init_parser = subparsers.add_parser("init", help="Initialize Zappa app.")
##
# Package
##
package_parser = subparsers.add_parser(
"package",
parents=[env_parser],
help="Build the application zip package locally.",
)
package_parser.add_argument(
"-o", "--output", help="Name of file to output the package to."
)
##
# Template
##
template_parser = subparsers.add_parser(
"template",
parents=[env_parser],
help="Create a CloudFormation template for this API Gateway.",
)
template_parser.add_argument(
"-l",
"--lambda-arn",
required=True,
help="ARN of the Lambda function to template to.",
)
template_parser.add_argument(
"-r", "--role-arn", required=True, help="ARN of the Role to template with."
)
template_parser.add_argument(
"-o", "--output", help="Name of file to output the template to."
)
##
# Invocation
##
invoke_parser = subparsers.add_parser(
"invoke", parents=[env_parser], help="Invoke remote function."
)
invoke_parser.add_argument(
"--raw",
action="store_true",
help=(
"When invoking remotely, invoke this python as a string,"
" not as a modular path."
),
)
invoke_parser.add_argument(
"--no-color", action="store_true", help=("Don't color the output")
)
invoke_parser.add_argument("command_rest")
##
# Manage
##
manage_parser = subparsers.add_parser(
"manage", help="Invoke remote Django manage.py commands."
)
rest_help = (
"Command in the form of <env> <command>. <env> is not "
"required if --all is specified"
)
manage_parser.add_argument("--all", action="store_true", help=all_help)
manage_parser.add_argument("command_rest", nargs="+", help=rest_help)
manage_parser.add_argument(
"--no-color", action="store_true", help=("Don't color the output")
)
# This is explicitly added here because this is the only subcommand that doesn't inherit from env_parser
# https://github.com/Miserlou/Zappa/issues/1002
manage_parser.add_argument(
"-s", "--settings_file", help="The path to a Zappa settings file."
)
##
# Rollback
##
def positive_int(s):
"""Ensure an arg is positive"""
i = int(s)
if i < 0:
msg = "This argument must be positive (got {})".format(s)
raise argparse.ArgumentTypeError(msg)
return i
rollback_parser = subparsers.add_parser(
"rollback",
parents=[env_parser],
help="Rollback deployed code to a previous version.",
)
rollback_parser.add_argument(
"-n",
"--num-rollback",
type=positive_int,
default=1,
help="The number of versions to rollback.",
)
##
# Scheduling
##
subparsers.add_parser(
"schedule",
parents=[env_parser],
help="Schedule functions to occur at regular intervals.",
)
##
# Status
##
subparsers.add_parser(
"status",
parents=[env_parser],
help="Show deployment status and event schedules.",
)
##
# Log Tailing
##
tail_parser = subparsers.add_parser(
"tail", parents=[env_parser], help="Tail deployment logs."
)
tail_parser.add_argument(
"--no-color", action="store_true", help="Don't color log tail output."
)
tail_parser.add_argument(
"--http",
action="store_true",
help="Only show HTTP requests in tail output.",
)
tail_parser.add_argument(
"--non-http",
action="store_true",
help="Only show non-HTTP requests in tail output.",
)
tail_parser.add_argument(
"--since",
type=str,
default="100000s",
help="Only show lines since a certain timeframe.",
)
tail_parser.add_argument(
"--filter", type=str, default="", help="Apply a filter pattern to the logs."
)
tail_parser.add_argument(
"--force-color",
action="store_true",
help="Force coloring log tail output even if coloring support is not auto-detected. (example: piping)",
)
tail_parser.add_argument(
"--disable-keep-open",
action="store_true",
help="Exit after printing the last available log, rather than keeping the log open.",
)
##
# Undeploy
##
undeploy_parser = subparsers.add_parser(
"undeploy", parents=[env_parser], help="Undeploy application."
)
undeploy_parser.add_argument(
"--remove-logs",
action="store_true",
help=(
"Removes log groups of api gateway and lambda task"
" during the undeployment."
),
)
undeploy_parser.add_argument(
"-y", "--yes", action="store_true", help="Auto confirm yes."
)
##
# Unschedule
##
subparsers.add_parser(
"unschedule", parents=[env_parser], help="Unschedule functions."
)
##
# Updating
##
update_parser = subparsers.add_parser(
"update", parents=[env_parser], help="Update deployed application."
)
update_parser.add_argument(
"-z",
"--zip",
help="Update Lambda with specific local or S3 hosted zip package",
)
update_parser.add_argument(
"-n",
"--no-upload",
help="Update configuration where appropriate, but don't upload new code",
)
update_parser.add_argument(
"-d",
"--docker-image-uri",
help="Update Lambda with a specific docker image hosted in AWS Elastic Container Registry",
)
##
# Debug
##
subparsers.add_parser(
"shell",
parents=[env_parser],
help="A debug shell with a loaded Zappa object.",
)
##
# Python Settings File
##
settings_parser = subparsers.add_parser(
"save-python-settings-file",
parents=[env_parser],
help="Generate & save the Zappa settings Python file for docker deployments",
)
settings_parser.add_argument(
"-o",
"--output_path",
help=(
"The path to save the Zappa settings Python file. "
"File must be named zappa_settings.py and should be saved "
"in the same directory as the Zappa handler.py"
),
)
argcomplete.autocomplete(parser)
args = parser.parse_args(argv)
self.vargs = vars(args)
if args.color == "never":
disable_click_colors()
elif args.color == "always":
# TODO: Support aggressive coloring like "--force-color" on all commands
pass
elif args.color == "auto":
pass
# Parse the input
# NOTE(rmoe): Special case for manage command
# The manage command can't have both stage_env and command_rest
# arguments. Since they are both positional arguments argparse can't
# differentiate the two. This causes problems when used with --all.
# (e.g. "manage --all showmigrations admin" argparse thinks --all has
# been specified AND that stage_env='showmigrations')
# By having command_rest collect everything but --all we can split it
# apart here instead of relying on argparse.
if not args.command:
parser.print_help()
return
if args.command == "manage" and not self.vargs.get("all"):
self.stage_env = self.vargs["command_rest"].pop(0)
else:
self.stage_env = self.vargs.get("stage_env")
if args.command == "package":
self.load_credentials = False
self.command = args.command
self.disable_progress = self.vargs.get("disable_progress")
if self.vargs.get("quiet"):
self.silence()
# We don't have any settings yet, so make those first!
# (Settings-based interactions will fail
# before a project has been initialized.)
if self.command == "init":
self.init()
return
# Make sure there isn't a new version available
if not self.vargs.get("json"):
self.check_for_update()
# Load and Validate Settings File
self.load_settings_file(self.vargs.get("settings_file"))
# Should we execute this for all stages, or just one?
all_stages = self.vargs.get("all")
stages = []
if all_stages: # All stages!
stages = self.zappa_settings.keys()
else: # Just one env.
if not self.stage_env:
# If there's only one stage defined in the settings,
# use that as the default.
if len(self.zappa_settings.keys()) == 1:
stages.append(list(self.zappa_settings.keys())[0])
else:
parser.error("Please supply a stage to interact with.")
else:
stages.append(self.stage_env)
for stage in stages:
try:
self.dispatch_command(self.command, stage)
except ClickException as e:
# Discussion on exit codes: https://github.com/Miserlou/Zappa/issues/407
e.show()
sys.exit(e.exit_code)
def dispatch_command(self, command, stage):
"""
Given a command to execute and stage,
execute that command.
"""
self.api_stage = stage
if command not in ["status", "manage"]:
if not self.vargs.get("json", None):
click.echo(
"Calling "
+ click.style(command, fg="green", bold=True)
+ " for stage "
+ click.style(self.api_stage, bold=True)
+ ".."
)
# Explicitly define the app function.
# Related: https://github.com/Miserlou/Zappa/issues/832
if self.vargs.get("app_function", None):
self.app_function = self.vargs["app_function"]
# Load our settings, based on api_stage.
try:
self.load_settings(self.vargs.get("settings_file"))
except ValueError as e:
if hasattr(e, "message"):
print("Error: {}".format(e.message))
else:
print(str(e))
sys.exit(-1)
self.callback("settings")
# Hand it off
if command == "deploy": # pragma: no cover
self.deploy(self.vargs["zip"], self.vargs["docker_image_uri"])
if command == "package": # pragma: no cover
self.package(self.vargs["output"])
if command == "template": # pragma: no cover
self.template(
self.vargs["lambda_arn"],
self.vargs["role_arn"],
output=self.vargs["output"],
json=self.vargs["json"],
)
elif command == "update": # pragma: no cover
self.update(
self.vargs["zip"],
self.vargs["no_upload"],
self.vargs["docker_image_uri"],
)
elif command == "rollback": # pragma: no cover
self.rollback(self.vargs["num_rollback"])
elif command == "invoke": # pragma: no cover
if not self.vargs.get("command_rest"):
print("Please enter the function to invoke.")
return
self.invoke(
self.vargs["command_rest"],
raw_python=self.vargs["raw"],
no_color=self.vargs["no_color"],
)
elif command == "manage": # pragma: no cover
if not self.vargs.get("command_rest"):
print("Please enter the management command to invoke.")
return
if not self.django_settings:
print("This command is for Django projects only!")
print(
"If this is a Django project, please define django_settings in your zappa_settings."
)
return
command_tail = self.vargs.get("command_rest")
if len(command_tail) > 1:
command = " ".join(
command_tail
) # ex: zappa manage dev "shell --version"
else:
command = command_tail[0] # ex: zappa manage dev showmigrations admin
self.invoke(
command,
command="manage",
no_color=self.vargs["no_color"],
)
elif command == "tail": # pragma: no cover
self.tail(
colorize=(not self.vargs["no_color"]),
http=self.vargs["http"],
non_http=self.vargs["non_http"],
since=self.vargs["since"],
filter_pattern=self.vargs["filter"],
force_colorize=self.vargs["force_color"] or None,
keep_open=not self.vargs["disable_keep_open"],
)
elif command == "undeploy": # pragma: no cover
self.undeploy(
no_confirm=self.vargs["yes"], remove_logs=self.vargs["remove_logs"]
)
elif command == "schedule": # pragma: no cover
self.schedule()
elif command == "unschedule": # pragma: no cover
self.unschedule()
elif command == "status": # pragma: no cover
self.status(return_json=self.vargs["json"])
elif command == "certify": # pragma: no cover
self.certify(no_confirm=self.vargs["yes"], manual=self.vargs["manual"])
elif command == "shell": # pragma: no cover
self.shell()
elif command == "save-python-settings-file": # pragma: no cover
self.save_python_settings_file(self.vargs["output_path"])
##
# The Commands
##
def save_python_settings_file(self, output_path=None):
settings_path = output_path or "zappa_settings.py"
print(
"Generating Zappa settings Python file and saving to {}".format(
settings_path
)
)
if not settings_path.endswith("zappa_settings.py"):
raise ValueError("Settings file must be named zappa_settings.py")
zappa_settings_s = self.get_zappa_settings_string()
with open(settings_path, "w") as f_out:
f_out.write(zappa_settings_s)
def package(self, output=None):
"""
Only build the package
"""
# Make sure we're in a venv.
self.check_venv()
# force not to delete the local zip
self.override_stage_config_setting("delete_local_zip", False)
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Create the Lambda Zip
self.create_package(output)
self.callback("zip")
size = human_size(os.path.getsize(self.zip_path))
click.echo(
click.style("Package created", fg="green", bold=True)
+ ": "
+ click.style(self.zip_path, bold=True)
+ " ("
+ size
+ ")"
)
def template(self, lambda_arn, role_arn, output=None, json=False):
"""
Only build the template file.
"""
if not lambda_arn:
raise ClickException("Lambda ARN is required to template.")
if not role_arn:
raise ClickException("Role ARN is required to template.")
self.zappa.credentials_arn = role_arn
# Create the template!
template = self.zappa.create_stack_template(
lambda_arn=lambda_arn,
lambda_name=self.lambda_name,
api_key_required=self.api_key_required,
iam_authorization=self.iam_authorization,
authorizer=self.authorizer,
cors_options=self.cors,
description=self.apigateway_description,
endpoint_configuration=self.endpoint_configuration,
)
if not output:
template_file = (
self.lambda_name + "-template-" + str(int(time.time())) + ".json"
)
else:
template_file = output
with open(template_file, "wb") as out:
out.write(
bytes(template.to_json(indent=None, separators=(",", ":")), "utf-8")
)
if not json:
click.echo(
click.style("Template created", fg="green", bold=True)
+ ": "
+ click.style(template_file, bold=True)
)
else:
with open(template_file, "r") as out:
print(out.read())
def deploy(self, source_zip=None, docker_image_uri=None):
"""
Package your project, upload it to S3, register the Lambda function
and create the API Gateway routes.
"""
if not source_zip or docker_image_uri:
# Make sure the necessary IAM execution roles are available
if self.manage_roles:
try:
self.zappa.create_iam_roles()
except botocore.client.ClientError as ce:
raise ClickException(
click.style("Failed", fg="red")
+ " to "
+ click.style("manage IAM roles", bold=True)
+ "!\n"
+ "You may "
+ click.style("lack the necessary AWS permissions", bold=True)
+ " to automatically manage a Zappa execution role.\n"
+ click.style("Exception reported by AWS:", bold=True)
+ format(ce)
+ "\n"
+ "To fix this, see here: "
+ click.style(
"https://github.com/Zappa/Zappa#custom-aws-iam-roles-and-policies-for-deployment",
bold=True,
)
+ "\n"
)
# Make sure this isn't already deployed.
deployed_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if len(deployed_versions) > 0:
raise ClickException(
"This application is "
+ click.style("already deployed", fg="red")
+ " - did you mean to call "
+ click.style("update", bold=True)
+ "?"
)
if not source_zip and not docker_image_uri:
# Make sure we're in a venv.
self.check_venv()
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Create the Lambda Zip
self.create_package()
self.callback("zip")
# Upload it to S3
success = self.zappa.upload_to_s3(
self.zip_path,
self.s3_bucket_name,
disable_progress=self.disable_progress,
)
if not success: # pragma: no cover
raise ClickException("Unable to upload to S3. Quitting.")
# If using a slim handler, upload it to S3 and tell lambda to use this slim handler zip
if self.stage_config.get("slim_handler", False):
# https://github.com/Miserlou/Zappa/issues/510
success = self.zappa.upload_to_s3(
self.handler_path,
self.s3_bucket_name,
disable_progress=self.disable_progress,
)
if not success: # pragma: no cover
raise ClickException("Unable to upload handler to S3. Quitting.")
# Copy the project zip to the current project zip
current_project_name = "{0!s}_{1!s}_current_project.tar.gz".format(
self.api_stage, self.project_name
)
success = self.zappa.copy_on_s3(
src_file_name=self.zip_path,
dst_file_name=current_project_name,
bucket_name=self.s3_bucket_name,
)
if not success: # pragma: no cover
raise ClickException(
"Unable to copy the zip to be the current project. Quitting."
)
handler_file = self.handler_path
else:
handler_file = self.zip_path
# Fixes https://github.com/Miserlou/Zappa/issues/613
try:
self.lambda_arn = self.zappa.get_lambda_function(
function_name=self.lambda_name
)
except botocore.client.ClientError:
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
kwargs = dict(
handler=self.lambda_handler,
description=self.lambda_description,
vpc_config=self.vpc_config,
dead_letter_config=self.dead_letter_config,
timeout=self.timeout_seconds,
memory_size=self.memory_size,
runtime=self.runtime,
aws_environment_variables=self.aws_environment_variables,
aws_kms_key_arn=self.aws_kms_key_arn,
use_alb=self.use_alb,
layers=self.layers,
concurrency=self.lambda_concurrency,
)
kwargs["function_name"] = self.lambda_name
if docker_image_uri:
kwargs["docker_image_uri"] = docker_image_uri
elif source_zip and source_zip.startswith("s3://"):
bucket, key_name = parse_s3_url(source_zip)
kwargs["bucket"] = bucket
kwargs["s3_key"] = key_name
elif source_zip and not source_zip.startswith("s3://"):
with open(source_zip, mode="rb") as fh:
byte_stream = fh.read()
kwargs["local_zip"] = byte_stream
else:
kwargs["bucket"] = self.s3_bucket_name
kwargs["s3_key"] = handler_file
self.lambda_arn = self.zappa.create_lambda_function(**kwargs)
# Schedule events for this deployment
self.schedule()
endpoint_url = ""
deployment_string = (
click.style("Deployment complete", fg="green", bold=True) + "!"
)
if self.use_alb:
kwargs = dict(
lambda_arn=self.lambda_arn,
lambda_name=self.lambda_name,
alb_vpc_config=self.alb_vpc_config,
timeout=self.timeout_seconds,
)
self.zappa.deploy_lambda_alb(**kwargs)
if self.use_apigateway:
# Create and configure the API Gateway
template = self.zappa.create_stack_template(
lambda_arn=self.lambda_arn,
lambda_name=self.lambda_name,
api_key_required=self.api_key_required,
iam_authorization=self.iam_authorization,
authorizer=self.authorizer,
cors_options=self.cors,
description=self.apigateway_description,
endpoint_configuration=self.endpoint_configuration,
)
self.zappa.update_stack(
self.lambda_name,
self.s3_bucket_name,
wait=True,
disable_progress=self.disable_progress,
)
api_id = self.zappa.get_api_id(self.lambda_name)
# Add binary support
if self.binary_support:
self.zappa.add_binary_support(api_id=api_id, cors=self.cors)
# Add payload compression
if self.stage_config.get("payload_compression", True):
self.zappa.add_api_compression(
api_id=api_id,
min_compression_size=self.stage_config.get(
"payload_minimum_compression_size", 0
),
)
# Deploy the API!
endpoint_url = self.deploy_api_gateway(api_id)
deployment_string = deployment_string + ": {}".format(endpoint_url)
# Create/link API key
if self.api_key_required:
if self.api_key is None:
self.zappa.create_api_key(api_id=api_id, stage_name=self.api_stage)
else:
self.zappa.add_api_stage_to_api_key(
api_key=self.api_key, api_id=api_id, stage_name=self.api_stage
)
if self.stage_config.get("touch", True):
self.zappa.wait_until_lambda_function_is_ready(
function_name=self.lambda_name
)
self.touch_endpoint(endpoint_url)
# Finally, delete the local copy our zip package
if not source_zip and not docker_image_uri:
if self.stage_config.get("delete_local_zip", True):
self.remove_local_zip()
# Remove the project zip from S3.
if not source_zip and not docker_image_uri:
self.remove_uploaded_zip()
self.callback("post")
click.echo(deployment_string)
def update(self, source_zip=None, no_upload=False, docker_image_uri=None):
"""
Repackage and update the function code.
"""
if not source_zip and not docker_image_uri:
# Make sure we're in a venv.
self.check_venv()
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Temporary version check
try:
updated_time = 1472581018
function_response = self.zappa.lambda_client.get_function(
FunctionName=self.lambda_name
)
conf = function_response["Configuration"]
last_updated = parser.parse(conf["LastModified"])
last_updated_unix = time.mktime(last_updated.timetuple())
except botocore.exceptions.BotoCoreError as e:
click.echo(click.style(type(e).__name__, fg="red") + ": " + e.args[0])
sys.exit(-1)
except Exception as e:
click.echo(
click.style("Warning!", fg="red")
+ " Couldn't get function "
+ self.lambda_name
+ " in "
+ self.zappa.aws_region
+ " - have you deployed yet?"
)
sys.exit(-1)
if last_updated_unix <= updated_time:
click.echo(
click.style("Warning!", fg="red")
+ " You may have upgraded Zappa since deploying this application. You will need to "
+ click.style("redeploy", bold=True)
+ " for this deployment to work properly!"
)
# Make sure the necessary IAM execution roles are available
if self.manage_roles:
try:
self.zappa.create_iam_roles()
except botocore.client.ClientError:
click.echo(
click.style("Failed", fg="red")
+ " to "
+ click.style("manage IAM roles", bold=True)
+ "!"
)
click.echo(
"You may "
+ click.style("lack the necessary AWS permissions", bold=True)
+ " to automatically manage a Zappa execution role."
)
click.echo(
"To fix this, see here: "
+ click.style(
"https://github.com/Zappa/Zappa#custom-aws-iam-roles-and-policies-for-deployment",
bold=True,
)
)
sys.exit(-1)
# Create the Lambda Zip,
if not no_upload:
self.create_package()
self.callback("zip")
# Upload it to S3
if not no_upload:
success = self.zappa.upload_to_s3(
self.zip_path,
self.s3_bucket_name,
disable_progress=self.disable_progress,
)
if not success: # pragma: no cover
raise ClickException("Unable to upload project to S3. Quitting.")
# If using a slim handler, upload it to S3 and tell lambda to use this slim handler zip
if self.stage_config.get("slim_handler", False):
# https://github.com/Miserlou/Zappa/issues/510
success = self.zappa.upload_to_s3(
self.handler_path,
self.s3_bucket_name,
disable_progress=self.disable_progress,
)
if not success: # pragma: no cover
raise ClickException(
"Unable to upload handler to S3. Quitting."
)
# Copy the project zip to the current project zip
current_project_name = "{0!s}_{1!s}_current_project.tar.gz".format(
self.api_stage, self.project_name
)
success = self.zappa.copy_on_s3(
src_file_name=self.zip_path,
dst_file_name=current_project_name,
bucket_name=self.s3_bucket_name,
)
if not success: # pragma: no cover
raise ClickException(
"Unable to copy the zip to be the current project. Quitting."
)
handler_file = self.handler_path
else:
handler_file = self.zip_path
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
kwargs = dict(
bucket=self.s3_bucket_name,
function_name=self.lambda_name,
num_revisions=self.num_retained_versions,
concurrency=self.lambda_concurrency,
)
if docker_image_uri:
kwargs["docker_image_uri"] = docker_image_uri
self.lambda_arn = self.zappa.update_lambda_function(**kwargs)
self.zappa.wait_until_lambda_function_is_ready(
function_name=self.lambda_name
)
elif source_zip and source_zip.startswith("s3://"):
bucket, key_name = parse_s3_url(source_zip)
kwargs.update(dict(bucket=bucket, s3_key=key_name))
self.lambda_arn = self.zappa.update_lambda_function(**kwargs)
elif source_zip and not source_zip.startswith("s3://"):
with open(source_zip, mode="rb") as fh:
byte_stream = fh.read()
kwargs["local_zip"] = byte_stream
self.lambda_arn = self.zappa.update_lambda_function(**kwargs)
else:
if not no_upload:
kwargs["s3_key"] = handler_file
self.lambda_arn = self.zappa.update_lambda_function(**kwargs)
# Remove the uploaded zip from S3, because it is now registered..
if not source_zip and not no_upload and not docker_image_uri:
self.remove_uploaded_zip()
# Update the configuration, in case there are changes.
self.lambda_arn = self.zappa.update_lambda_configuration(
lambda_arn=self.lambda_arn,
function_name=self.lambda_name,
handler=self.lambda_handler,
description=self.lambda_description,
vpc_config=self.vpc_config,
timeout=self.timeout_seconds,
memory_size=self.memory_size,
runtime=self.runtime,
aws_environment_variables=self.aws_environment_variables,
aws_kms_key_arn=self.aws_kms_key_arn,
layers=self.layers,
)
# Finally, delete the local copy our zip package
if not source_zip and not no_upload and not docker_image_uri:
if self.stage_config.get("delete_local_zip", True):
self.remove_local_zip()
if self.use_apigateway:
self.zappa.create_stack_template(
lambda_arn=self.lambda_arn,
lambda_name=self.lambda_name,
api_key_required=self.api_key_required,
iam_authorization=self.iam_authorization,
authorizer=self.authorizer,
cors_options=self.cors,
description=self.apigateway_description,
endpoint_configuration=self.endpoint_configuration,
)
self.zappa.update_stack(
self.lambda_name,
self.s3_bucket_name,
wait=True,
update_only=True,
disable_progress=self.disable_progress,
)
api_id = self.zappa.get_api_id(self.lambda_name)
# Update binary support
if self.binary_support:
self.zappa.add_binary_support(api_id=api_id, cors=self.cors)
else:
self.zappa.remove_binary_support(api_id=api_id, cors=self.cors)
if self.stage_config.get("payload_compression", True):
self.zappa.add_api_compression(
api_id=api_id,
min_compression_size=self.stage_config.get(
"payload_minimum_compression_size", 0
),
)
else:
self.zappa.remove_api_compression(api_id=api_id)
# It looks a bit like we might actually be using this just to get the URL,
# but we're also updating a few of the APIGW settings.
endpoint_url = self.deploy_api_gateway(api_id)
if self.stage_config.get("domain", None):
endpoint_url = self.stage_config.get("domain")
else:
endpoint_url = None
self.schedule()
# Update any cognito pool with the lambda arn
# do this after schedule as schedule clears the lambda policy and we need to add one
self.update_cognito_triggers()
self.callback("post")
if endpoint_url and "https://" not in endpoint_url:
endpoint_url = "https://" + endpoint_url
if self.base_path:
endpoint_url += "/" + self.base_path
deployed_string = (
"Your updated Zappa deployment is "
+ click.style("live", fg="green", bold=True)
+ "!"
)
if self.use_apigateway:
deployed_string = (
deployed_string
+ ": "
+ click.style("{}".format(endpoint_url), bold=True)
)
api_url = None
if endpoint_url and "amazonaws.com" not in endpoint_url:
api_url = self.zappa.get_api_url(self.lambda_name, self.api_stage)
if endpoint_url != api_url:
deployed_string = deployed_string + " (" + api_url + ")"
if self.stage_config.get("touch", True):
self.zappa.wait_until_lambda_function_is_ready(
function_name=self.lambda_name
)
if api_url:
self.touch_endpoint(api_url)
elif endpoint_url:
self.touch_endpoint(endpoint_url)
click.echo(deployed_string)
def rollback(self, revision):
"""
Rollsback the currently deploy lambda code to a previous revision.
"""
print("Rolling back..")
self.zappa.rollback_lambda_function_version(
self.lambda_name, versions_back=revision
)
print("Done!")
def tail(
self,
since,
filter_pattern,
limit=10000,
keep_open=True,
colorize=True,
http=False,
non_http=False,
force_colorize=False,
):
"""
Tail this function's logs.
if keep_open, do so repeatedly, printing any new logs
"""
try:
since_stamp = string_to_timestamp(since)
last_since = since_stamp
while True:
new_logs = self.zappa.fetch_logs(
self.lambda_name,
start_time=since_stamp,
limit=limit,
filter_pattern=filter_pattern,
)
new_logs = [e for e in new_logs if e["timestamp"] > last_since]
self.print_logs(new_logs, colorize, http, non_http, force_colorize)
if not keep_open:
break
if new_logs:
last_since = new_logs[-1]["timestamp"]
time.sleep(1)
except KeyboardInterrupt: # pragma: no cover
# Die gracefully
try:
sys.exit(0)
except SystemExit:
os._exit(130)
def undeploy(self, no_confirm=False, remove_logs=False):
"""
Tear down an existing deployment.
"""
if not no_confirm: # pragma: no cover
confirm = input("Are you sure you want to undeploy? [y/n] ")
if confirm != "y":
return
if self.use_alb:
self.zappa.undeploy_lambda_alb(self.lambda_name)
if self.use_apigateway:
if remove_logs:
self.zappa.remove_api_gateway_logs(self.lambda_name)
domain_name = self.stage_config.get("domain", None)
base_path = self.stage_config.get("base_path", None)
# Only remove the api key when not specified
if self.api_key_required and self.api_key is None:
api_id = self.zappa.get_api_id(self.lambda_name)
self.zappa.remove_api_key(api_id, self.api_stage)
gateway_id = self.zappa.undeploy_api_gateway(
self.lambda_name, domain_name=domain_name, base_path=base_path
)
self.unschedule() # removes event triggers, including warm up event.
self.zappa.delete_lambda_function(self.lambda_name)
if remove_logs:
self.zappa.remove_lambda_function_logs(self.lambda_name)
click.echo(click.style("Done", fg="green", bold=True) + "!")
def update_cognito_triggers(self):
"""
Update any cognito triggers
"""
if self.cognito:
user_pool = self.cognito.get("user_pool")
triggers = self.cognito.get("triggers", [])
lambda_configs = set()
for trigger in triggers:
lambda_configs.add(trigger["source"].split("_")[0])
self.zappa.update_cognito(
self.lambda_name, user_pool, lambda_configs, self.lambda_arn
)
def schedule(self):
"""
Given a a list of functions and a schedule to execute them,
setup up regular execution.
"""
events = self.stage_config.get("events", [])
if events:
if not isinstance(events, list): # pragma: no cover
print("Events must be supplied as a list.")
return
for event in events:
self.collision_warning(event.get("function"))
if self.stage_config.get("keep_warm", True):
if not events:
events = []
keep_warm_rate = self.stage_config.get(
"keep_warm_expression", "rate(4 minutes)"
)
events.append(
{
"name": "zappa-keep-warm",
"function": "handler.keep_warm_callback",
"expression": keep_warm_rate,
"description": "Zappa Keep Warm - {}".format(self.lambda_name),
}
)
if events:
try:
function_response = self.zappa.lambda_client.get_function(
FunctionName=self.lambda_name
)
except botocore.exceptions.ClientError as e: # pragma: no cover
click.echo(
click.style("Function does not exist", fg="yellow")
+ ", please "
+ click.style("deploy", bold=True)
+ "first. Ex:"
+ click.style("zappa deploy {}.".format(self.api_stage), bold=True)
)
sys.exit(-1)
print("Scheduling..")
self.zappa.schedule_events(
lambda_arn=function_response["Configuration"]["FunctionArn"],
lambda_name=self.lambda_name,
events=events,
)
# Add async tasks SNS
if self.stage_config.get(
"async_source", None
) == "sns" and self.stage_config.get("async_resources", True):
self.lambda_arn = self.zappa.get_lambda_function(
function_name=self.lambda_name
)
topic_arn = self.zappa.create_async_sns_topic(
lambda_name=self.lambda_name, lambda_arn=self.lambda_arn
)
click.echo("SNS Topic created: %s" % topic_arn)
# Add async tasks DynamoDB
table_name = self.stage_config.get("async_response_table", False)
read_capacity = self.stage_config.get("async_response_table_read_capacity", 1)
write_capacity = self.stage_config.get("async_response_table_write_capacity", 1)
if table_name and self.stage_config.get("async_resources", True):
created, response_table = self.zappa.create_async_dynamodb_table(
table_name, read_capacity, write_capacity
)
if created:
click.echo("DynamoDB table created: %s" % table_name)
else:
click.echo("DynamoDB table exists: %s" % table_name)
provisioned_throughput = response_table["Table"][
"ProvisionedThroughput"
]
if (
provisioned_throughput["ReadCapacityUnits"] != read_capacity
or provisioned_throughput["WriteCapacityUnits"] != write_capacity
):
click.echo(
click.style(
"\nWarning! Existing DynamoDB table ({}) does not match configured capacity.\n".format(
table_name
),
fg="red",
)
)
def unschedule(self):
"""
Given a a list of scheduled functions,
tear down their regular execution.
"""
# Run even if events are not defined to remove previously existing ones (thus default to []).
events = self.stage_config.get("events", [])
if not isinstance(events, list): # pragma: no cover
print("Events must be supplied as a list.")
return
function_arn = None
try:
function_response = self.zappa.lambda_client.get_function(
FunctionName=self.lambda_name
)
function_arn = function_response["Configuration"]["FunctionArn"]
except botocore.exceptions.ClientError as e: # pragma: no cover
raise ClickException(
"Function does not exist, you should deploy first. Ex: zappa deploy {}. "
"Proceeding to unschedule CloudWatch based events.".format(
self.api_stage
)
)
print("Unscheduling..")
self.zappa.unschedule_events(
lambda_name=self.lambda_name,
lambda_arn=function_arn,
events=events,
)
# Remove async task SNS
if self.stage_config.get(
"async_source", None
) == "sns" and self.stage_config.get("async_resources", True):
removed_arns = self.zappa.remove_async_sns_topic(self.lambda_name)
click.echo("SNS Topic removed: %s" % ", ".join(removed_arns))
def invoke(self, function_name, raw_python=False, command=None, no_color=False):
"""
Invoke a remote function.
"""
# There are three likely scenarios for 'command' here:
# command, which is a modular function path
# raw_command, which is a string of python to execute directly
# manage, which is a Django-specific management command invocation
key = command if command is not None else "command"
if raw_python:
command = {"raw_command": function_name}
else:
command = {key: function_name}
# Can't use hjson
import json as json
response = self.zappa.invoke_lambda_function(
self.lambda_name,
json.dumps(command),
invocation_type="RequestResponse",
)
if "LogResult" in response:
if no_color:
print(base64.b64decode(response["LogResult"]))
else:
decoded = base64.b64decode(response["LogResult"]).decode()
formatted = self.format_invoke_command(decoded)
colorized = self.colorize_invoke_command(formatted)
print(colorized)
else:
print(response)
# For a successful request FunctionError is not in response.
# https://github.com/Miserlou/Zappa/pull/1254/
if "FunctionError" in response:
raise ClickException(
"{} error occurred while invoking command.".format(
response["FunctionError"]
)
)
def format_invoke_command(self, string):
"""
Formats correctly the string output from the invoke() method,
replacing line breaks and tabs when necessary.
"""
string = string.replace("\\n", "\n")
formated_response = ""
for line in string.splitlines():
if line.startswith("REPORT"):
line = line.replace("\t", "\n")
if line.startswith("[DEBUG]"):
line = line.replace("\t", " ")
formated_response += line + "\n"
formated_response = formated_response.replace("\n\n", "\n")
return formated_response
def colorize_invoke_command(self, string):
"""
Apply various heuristics to return a colorized version the invoke
command string. If these fail, simply return the string in plaintext.
Inspired by colorize_log_entry().
"""
final_string = string
try:
# Line headers
try:
for token in ["START", "END", "REPORT", "[DEBUG]"]:
if token in final_string:
format_string = "[{}]"
# match whole words only
pattern = r"\b{}\b"
if token == "[DEBUG]":
format_string = "{}"
pattern = re.escape(token)
repl = click.style(
format_string.format(token), bold=True, fg="cyan"
)
final_string = re.sub(pattern.format(token), repl, final_string)
except Exception: # pragma: no cover
pass
# Green bold Tokens
try:
for token in [
"Zappa Event:",
"RequestId:",
"Version:",
"Duration:",
"Billed",
"Memory Size:",
"Max Memory Used:",
]:
if token in final_string:
final_string = final_string.replace(
token, click.style(token, bold=True, fg="green")
)
except Exception: # pragma: no cover
pass
# UUIDs
for token in final_string.replace("\t", " ").split(" "):
try:
if token.count("-") == 4 and token.replace("-", "").isalnum():
final_string = final_string.replace(
token, click.style(token, fg="magenta")
)
except Exception: # pragma: no cover
pass
return final_string
except Exception:
return string
def status(self, return_json=False):
"""
Describe the status of the current deployment.
"""
def tabular_print(title, value):
"""
Convenience function for priting formatted table items.
"""
click.echo(
"%-*s%s" % (32, click.style("\t" + title, fg="green") + ":", str(value))
)
return
# Lambda Env Details
lambda_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if not lambda_versions:
raise ClickException(
click.style(
"No Lambda %s detected in %s - have you deployed yet?"
% (self.lambda_name, self.zappa.aws_region),
fg="red",
)
)
status_dict = collections.OrderedDict()
status_dict["Lambda Versions"] = len(lambda_versions)
function_response = self.zappa.lambda_client.get_function(
FunctionName=self.lambda_name
)
conf = function_response["Configuration"]
self.lambda_arn = conf["FunctionArn"]
status_dict["Lambda Name"] = self.lambda_name
status_dict["Lambda ARN"] = self.lambda_arn
status_dict["Lambda Role ARN"] = conf["Role"]
status_dict["Lambda Code Size"] = conf["CodeSize"]
status_dict["Lambda Version"] = conf["Version"]
status_dict["Lambda Last Modified"] = conf["LastModified"]
status_dict["Lambda Memory Size"] = conf["MemorySize"]
status_dict["Lambda Timeout"] = conf["Timeout"]
# Handler & Runtime won't be present for lambda Docker deployments
# https://github.com/Miserlou/Zappa/issues/2188
status_dict["Lambda Handler"] = conf.get("Handler", "")
status_dict["Lambda Runtime"] = conf.get("Runtime", "")
if "VpcConfig" in conf.keys():
status_dict["Lambda VPC ID"] = conf.get("VpcConfig", {}).get(
"VpcId", "Not assigned"
)
else:
status_dict["Lambda VPC ID"] = None
# Calculated statistics
try:
function_invocations = self.zappa.cloudwatch.get_metric_statistics(
Namespace="AWS/Lambda",
MetricName="Invocations",
StartTime=datetime.utcnow() - timedelta(days=1),
EndTime=datetime.utcnow(),
Period=1440,
Statistics=["Sum"],
Dimensions=[
{"Name": "FunctionName", "Value": "{}".format(self.lambda_name)}
],
)["Datapoints"][0]["Sum"]
except Exception as e:
function_invocations = 0
try:
function_errors = self.zappa.cloudwatch.get_metric_statistics(
Namespace="AWS/Lambda",
MetricName="Errors",
StartTime=datetime.utcnow() - timedelta(days=1),
EndTime=datetime.utcnow(),
Period=1440,
Statistics=["Sum"],
Dimensions=[
{"Name": "FunctionName", "Value": "{}".format(self.lambda_name)}
],
)["Datapoints"][0]["Sum"]
except Exception as e:
function_errors = 0
try:
error_rate = "{0:.2f}%".format(function_errors / function_invocations * 100)
except:
error_rate = "Error calculating"
status_dict["Invocations (24h)"] = int(function_invocations)
status_dict["Errors (24h)"] = int(function_errors)
status_dict["Error Rate (24h)"] = error_rate
# URLs
if self.use_apigateway:
api_url = self.zappa.get_api_url(self.lambda_name, self.api_stage)
status_dict["API Gateway URL"] = api_url
# Api Keys
api_id = self.zappa.get_api_id(self.lambda_name)
for api_key in self.zappa.get_api_keys(api_id, self.api_stage):
status_dict["API Gateway x-api-key"] = api_key
# There literally isn't a better way to do this.
# AWS provides no way to tie a APIGW domain name to its Lambda function.
domain_url = self.stage_config.get("domain", None)
base_path = self.stage_config.get("base_path", None)
if domain_url:
status_dict["Domain URL"] = "https://" + domain_url
if base_path:
status_dict["Domain URL"] += "/" + base_path
else:
status_dict["Domain URL"] = "None Supplied"
# Scheduled Events
event_rules = self.zappa.get_event_rules_for_lambda(lambda_arn=self.lambda_arn)
status_dict["Num. Event Rules"] = len(event_rules)
if len(event_rules) > 0:
status_dict["Events"] = []
for rule in event_rules:
event_dict = {}
rule_name = rule["Name"]
event_dict["Event Rule Name"] = rule_name
event_dict["Event Rule Schedule"] = rule.get("ScheduleExpression", None)
event_dict["Event Rule State"] = rule.get("State", None).title()
event_dict["Event Rule ARN"] = rule.get("Arn", None)
status_dict["Events"].append(event_dict)
if return_json:
# Putting the status in machine readable format
# https://github.com/Miserlou/Zappa/issues/407
print(json.dumpsJSON(status_dict))
else:
click.echo("Status for " + click.style(self.lambda_name, bold=True) + ": ")
for k, v in status_dict.items():
if k == "Events":
# Events are a list of dicts
for event in v:
for item_k, item_v in event.items():
tabular_print(item_k, item_v)
else:
tabular_print(k, v)
# TODO: S3/SQS/etc. type events?
return True
def check_stage_name(self, stage_name):
"""
Make sure the stage name matches the AWS-allowed pattern
(calls to apigateway_client.create_deployment, will fail with error
message "ClientError: An error occurred (BadRequestException) when
calling the CreateDeployment operation: Stage name only allows
a-zA-Z0-9_" if the pattern does not match)
"""
if self.stage_name_env_pattern.match(stage_name):
return True
raise ValueError("AWS requires stage name to match a-zA-Z0-9_")
def check_environment(self, environment):
"""
Make sure the environment contains only strings
(since putenv needs a string)
"""
non_strings = []
for (k, v) in environment.items():
if not isinstance(v, basestring):
non_strings.append(k)
if non_strings:
raise ValueError(
"The following environment variables are not strings: {}".format(
", ".join(non_strings)
)
)
else:
return True
def init(self, settings_file="zappa_settings.json"):
"""
Initialize a new Zappa project by creating a new zappa_settings.json in a guided process.
This should probably be broken up into few separate componants once it's stable.
Testing these inputs requires monkeypatching with mock, which isn't pretty.
"""
# Make sure we're in a venv.
self.check_venv()
# Ensure that we don't already have a zappa_settings file.
if os.path.isfile(settings_file):
raise ClickException(
"This project already has a "
+ click.style("{0!s} file".format(settings_file), fg="red", bold=True)
+ "!"
)
# Explain system.
click.echo(
click.style(
"""\n███████╗ █████╗ ██████╗ ██████╗ █████╗
╚══███╔╝██╔══██╗██╔══██╗██╔══██╗██╔══██╗
███╔╝ ███████║██████╔╝██████╔╝███████║
███╔╝ ██╔══██║██╔═══╝ ██╔═══╝ ██╔══██║
███████╗██║ ██║██║ ██║ ██║ ██║
╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ ╚═╝\n""",
fg="green",
bold=True,
)
)
click.echo(
click.style("Welcome to ", bold=True)
+ click.style("Zappa", fg="green", bold=True)
+ click.style("!\n", bold=True)
)
click.echo(
click.style("Zappa", bold=True)
+ " is a system for running server-less Python web applications"
" on AWS Lambda and AWS API Gateway."
)
click.echo(
"This `init` command will help you create and configure your new Zappa deployment."
)
click.echo("Let's get started!\n")
# Create Env
while True:
click.echo(
"Your Zappa configuration can support multiple production stages, like '"
+ click.style("dev", bold=True)
+ "', '"
+ click.style("staging", bold=True)
+ "', and '"
+ click.style("production", bold=True)
+ "'."
)
env = (
input("What do you want to call this environment (default 'dev'): ")
or "dev"
)
try:
self.check_stage_name(env)
break
except ValueError:
click.echo(click.style("Stage names must match a-zA-Z0-9_", fg="red"))
# Detect AWS profiles and regions
# If anyone knows a more straightforward way to easily detect and parse AWS profiles I'm happy to change this, feels like a hack
session = botocore.session.Session()
config = session.full_config
profiles = config.get("profiles", {})
profile_names = list(profiles.keys())
click.echo(
"\nAWS Lambda and API Gateway are only available in certain regions. "
"Let's check to make sure you have a profile set up in one that will work."
)
if not profile_names:
profile_name, profile = None, None
click.echo(
"We couldn't find an AWS profile to use. Before using Zappa, you'll need to set one up. See here for more info: {}".format(
click.style(BOTO3_CONFIG_DOCS_URL, fg="blue", underline=True)
)
)
elif len(profile_names) == 1:
profile_name = profile_names[0]
profile = profiles[profile_name]
click.echo(
"Okay, using profile {}!".format(click.style(profile_name, bold=True))
)
else:
if "default" in profile_names:
default_profile = [p for p in profile_names if p == "default"][0]
else:
default_profile = profile_names[0]
while True:
profile_name = (
input(
"We found the following profiles: {}, and {}. "
"Which would you like us to use? (default '{}'): ".format(
", ".join(profile_names[:-1]),
profile_names[-1],
default_profile,
)
)
or default_profile
)
if profile_name in profiles:
profile = profiles[profile_name]
break
else:
click.echo("Please enter a valid name for your AWS profile.")
profile_region = profile.get("region") if profile else None
# Create Bucket
click.echo(
"\nYour Zappa deployments will need to be uploaded to a "
+ click.style("private S3 bucket", bold=True)
+ "."
)
click.echo("If you don't have a bucket yet, we'll create one for you too.")
default_bucket = "zappa-" + "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(9)
)
while True:
bucket = (
input(
"What do you want to call your bucket? (default '%s'): "
% default_bucket
)
or default_bucket
)
if is_valid_bucket_name(bucket):
break
click.echo(click.style("Invalid bucket name!", bold=True))
click.echo("S3 buckets must be named according to the following rules:")
click.echo(
"""* Bucket names must be unique across all existing bucket names in Amazon S3.
* Bucket names must comply with DNS naming conventions.
* Bucket names must be at least 3 and no more than 63 characters long.
* Bucket names must not contain uppercase characters or underscores.
* Bucket names must start with a lowercase letter or number.
* Bucket names must be a series of one or more labels. Adjacent labels are separated
by a single period (.). Bucket names can contain lowercase letters, numbers, and
hyphens. Each label must start and end with a lowercase letter or a number.
* Bucket names must not be formatted as an IP address (for example, 192.168.5.4).
* When you use virtual hosted–style buckets with Secure Sockets Layer (SSL), the SSL
wildcard certificate only matches buckets that don't contain periods. To work around
this, use HTTP or write your own certificate verification logic. We recommend that
you do not use periods (".") in bucket names when using virtual hosted–style buckets.
"""
)
# Detect Django/Flask
try: # pragma: no cover
import django
has_django = True
except ImportError as e:
has_django = False
try: # pragma: no cover
import flask
has_flask = True
except ImportError as e:
has_flask = False
print("")
# App-specific
if has_django: # pragma: no cover
click.echo(
"It looks like this is a "
+ click.style("Django", bold=True)
+ " application!"
)
click.echo(
"What is the "
+ click.style("module path", bold=True)
+ " to your projects's Django settings?"
)
django_settings = None
matches = detect_django_settings()
while django_settings in [None, ""]:
if matches:
click.echo(
"We discovered: "
+ click.style(
", ".join("{}".format(i) for v, i in enumerate(matches)),
bold=True,
)
)
django_settings = (
input(
"Where are your project's settings? (default '%s'): "
% matches[0]
)
or matches[0]
)
else:
click.echo(
"(This will likely be something like 'your_project.settings')"
)
django_settings = input("Where are your project's settings?: ")
django_settings = django_settings.replace("'", "")
django_settings = django_settings.replace('"', "")
else:
matches = None
if has_flask:
click.echo(
"It looks like this is a "
+ click.style("Flask", bold=True)
+ " application."
)
matches = detect_flask_apps()
click.echo(
"What's the "
+ click.style("modular path", bold=True)
+ " to your app's function?"
)
click.echo("This will likely be something like 'your_module.app'.")
app_function = None
while app_function in [None, ""]:
if matches:
click.echo(
"We discovered: "
+ click.style(
", ".join("{}".format(i) for v, i in enumerate(matches)),
bold=True,
)
)
app_function = (
input(
"Where is your app's function? (default '%s'): "
% matches[0]
)
or matches[0]
)
else:
app_function = input("Where is your app's function?: ")
app_function = app_function.replace("'", "")
app_function = app_function.replace('"', "")
# TODO: Create VPC?
# Memory size? Time limit?
# Domain? LE keys? Region?
# 'Advanced Settings' mode?
# Globalize
click.echo(
"\nYou can optionally deploy to "
+ click.style("all available regions", bold=True)
+ " in order to provide fast global service."
)
click.echo(
"If you are using Zappa for the first time, you probably don't want to do this!"
)
global_deployment = False
while True:
global_type = input(
"Would you like to deploy this application "
+ click.style("globally", bold=True)
+ "? (default 'n') [y/n/(p)rimary]: "
)
if not global_type:
break
if global_type.lower() in ["y", "yes", "p", "primary"]:
global_deployment = True
break
if global_type.lower() in ["n", "no"]:
global_deployment = False
break
# The given environment name
zappa_settings = {
env: {
"profile_name": profile_name,
"s3_bucket": bucket,
"runtime": get_venv_from_python_version(),
"project_name": self.get_project_name(),
}
}
if profile_region:
zappa_settings[env]["aws_region"] = profile_region
if has_django:
zappa_settings[env]["django_settings"] = django_settings
else:
zappa_settings[env]["app_function"] = app_function
# Global Region Deployment
if global_deployment:
additional_regions = [r for r in API_GATEWAY_REGIONS if r != profile_region]
# Create additional stages
if global_type.lower() in ["p", "primary"]:
additional_regions = [r for r in additional_regions if "-1" in r]
for region in additional_regions:
env_name = env + "_" + region.replace("-", "_")
g_env = {env_name: {"extends": env, "aws_region": region}}
zappa_settings.update(g_env)
import json as json # hjson is fine for loading, not fine for writing.
zappa_settings_json = json.dumps(zappa_settings, sort_keys=True, indent=4)
click.echo(
"\nOkay, here's your "
+ click.style("zappa_settings.json", bold=True)
+ ":\n"
)
click.echo(click.style(zappa_settings_json, fg="yellow", bold=False))
confirm = (
input(
"\nDoes this look "
+ click.style("okay", bold=True, fg="green")
+ "? (default 'y') [y/n]: "
)
or "yes"
)
if confirm[0] not in ["y", "Y", "yes", "YES"]:
click.echo(
""
+ click.style("Sorry", bold=True, fg="red")
+ " to hear that! Please init again."
)
return
# Write
with open("zappa_settings.json", "w") as zappa_settings_file:
zappa_settings_file.write(zappa_settings_json)
if global_deployment:
click.echo(
"\n"
+ click.style("Done", bold=True)
+ "! You can also "
+ click.style("deploy all", bold=True)
+ " by executing:\n"
)
click.echo(click.style("\t$ zappa deploy --all", bold=True))
click.echo(
"\nAfter that, you can "
+ click.style("update", bold=True)
+ " your application code with:\n"
)
click.echo(click.style("\t$ zappa update --all", bold=True))
else:
click.echo(
"\n"
+ click.style("Done", bold=True)
+ "! Now you can "
+ click.style("deploy", bold=True)
+ " your Zappa application by executing:\n"
)
click.echo(click.style("\t$ zappa deploy %s" % env, bold=True))
click.echo(
"\nAfter that, you can "
+ click.style("update", bold=True)
+ " your application code with:\n"
)
click.echo(click.style("\t$ zappa update %s" % env, bold=True))
click.echo(
"\nTo learn more, check out our project page on "
+ click.style("GitHub", bold=True)
+ " here: "
+ click.style("https://github.com/Zappa/Zappa", fg="cyan", bold=True)
)
click.echo(
"and stop by our "
+ click.style("Slack", bold=True)
+ " channel here: "
+ click.style("https://zappateam.slack.com", fg="cyan", bold=True)
)
click.echo("\nEnjoy!,")
click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!")
return
def certify(self, no_confirm=True, manual=False):
"""
Register or update a domain certificate for this env.
"""
if not self.domain:
raise ClickException(
"Can't certify a domain without "
+ click.style("domain", fg="red", bold=True)
+ " configured!"
)
if not no_confirm: # pragma: no cover
confirm = input("Are you sure you want to certify? [y/n] ")
if confirm != "y":
return
# Make sure this isn't already deployed.
deployed_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if len(deployed_versions) == 0:
raise ClickException(
"This application "
+ click.style("isn't deployed yet", fg="red")
+ " - did you mean to call "
+ click.style("deploy", bold=True)
+ "?"
)
account_key_location = self.stage_config.get("lets_encrypt_key", None)
cert_location = self.stage_config.get("certificate", None)
cert_key_location = self.stage_config.get("certificate_key", None)
cert_chain_location = self.stage_config.get("certificate_chain", None)
cert_arn = self.stage_config.get("certificate_arn", None)
base_path = self.stage_config.get("base_path", None)
# These are sensitive
certificate_body = None
certificate_private_key = None
certificate_chain = None
# Prepare for custom Let's Encrypt
if not cert_location and not cert_arn:
if not account_key_location:
raise ClickException(
"Can't certify a domain without "
+ click.style("lets_encrypt_key", fg="red", bold=True)
+ " or "
+ click.style("certificate", fg="red", bold=True)
+ " or "
+ click.style("certificate_arn", fg="red", bold=True)
+ " configured!"
)
# Get install account_key to /tmp/account_key.pem
from .letsencrypt import gettempdir
if account_key_location.startswith("s3://"):
bucket, key_name = parse_s3_url(account_key_location)
self.zappa.s3_client.download_file(
bucket, key_name, os.path.join(gettempdir(), "account.key")
)
else:
from shutil import copyfile
copyfile(
account_key_location, os.path.join(gettempdir(), "account.key")
)
# Prepare for Custom SSL
elif not account_key_location and not cert_arn:
if not cert_location or not cert_key_location or not cert_chain_location:
raise ClickException(
"Can't certify a domain without "
+ click.style(
"certificate, certificate_key and certificate_chain",
fg="red",
bold=True,
)
+ " configured!"
)
# Read the supplied certificates.
with open(cert_location) as f:
certificate_body = f.read()
with open(cert_key_location) as f:
certificate_private_key = f.read()
with open(cert_chain_location) as f:
certificate_chain = f.read()
click.echo(
"Certifying domain "
+ click.style(self.domain, fg="green", bold=True)
+ ".."
)
# Get cert and update domain.
# Let's Encrypt
if not cert_location and not cert_arn:
from .letsencrypt import get_cert_and_update_domain
cert_success = get_cert_and_update_domain(
self.zappa, self.lambda_name, self.api_stage, self.domain, manual
)
# Custom SSL / ACM
else:
route53 = self.stage_config.get("route53_enabled", True)
if not self.zappa.get_domain_name(self.domain, route53=route53):
dns_name = self.zappa.create_domain_name(
domain_name=self.domain,
certificate_name=self.domain + "-Zappa-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=cert_arn,
lambda_name=self.lambda_name,
stage=self.api_stage,
base_path=base_path,
)
if route53:
self.zappa.update_route53_records(self.domain, dns_name)
print(
"Created a new domain name with supplied certificate. Please note that it can take up to 40 minutes for this domain to be "
"created and propagated through AWS, but it requires no further work on your part."
)
else:
self.zappa.update_domain_name(
domain_name=self.domain,
certificate_name=self.domain + "-Zappa-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=cert_arn,
lambda_name=self.lambda_name,
stage=self.api_stage,
route53=route53,
base_path=base_path,
)
cert_success = True
if cert_success:
click.echo(
"Certificate " + click.style("updated", fg="green", bold=True) + "!"
)
else:
click.echo(
click.style("Failed", fg="red", bold=True)
+ " to generate or install certificate! :("
)
click.echo("\n==============\n")
shamelessly_promote()
##
# Shell
##
def shell(self):
"""
Spawn a debug shell.
"""
click.echo(
click.style("NOTICE!", fg="yellow", bold=True)
+ " This is a "
+ click.style("local", fg="green", bold=True)
+ " shell, inside a "
+ click.style("Zappa", bold=True)
+ " object!"
)
self.zappa.shell()
return
##
# Utility
##
def callback(self, position):
"""
Allows the execution of custom code between creation of the zip file and deployment to AWS.
:return: None
"""
callbacks = self.stage_config.get("callbacks", {})
callback = callbacks.get(position)
if callback:
(mod_path, cb_func_name) = callback.rsplit(".", 1)
try: # Prefer callback in working directory
if mod_path.count(".") >= 1: # Callback function is nested in a folder
(mod_folder_path, mod_name) = mod_path.rsplit(".", 1)
mod_folder_path_fragments = mod_folder_path.split(".")
working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments)
else:
mod_name = mod_path
working_dir = os.getcwd()
working_dir_importer = pkgutil.get_importer(working_dir)
module_ = working_dir_importer.find_module(mod_name).load_module(
mod_name
)
except (ImportError, AttributeError):
try: # Callback func might be in virtualenv
module_ = importlib.import_module(mod_path)
except ImportError: # pragma: no cover
raise ClickException(
click.style("Failed ", fg="red")
+ "to "
+ click.style(
"import {position} callback ".format(position=position),
bold=True,
)
+ 'module: "{mod_path}"'.format(
mod_path=click.style(mod_path, bold=True)
)
)
if not hasattr(module_, cb_func_name): # pragma: no cover
raise ClickException(
click.style("Failed ", fg="red")
+ "to "
+ click.style(
"find {position} callback ".format(position=position), bold=True
)
+ 'function: "{cb_func_name}" '.format(
cb_func_name=click.style(cb_func_name, bold=True)
)
+ 'in module "{mod_path}"'.format(mod_path=mod_path)
)
cb_func = getattr(module_, cb_func_name)
cb_func(self) # Call the function passing self
def check_for_update(self):
"""
Print a warning if there's a new Zappa version available.
"""
try:
version = pkg_resources.require("zappa-troposphere")[0].version
updateable = check_new_version_available(version)
if updateable:
click.echo(
click.style("Important!", fg="yellow", bold=True)
+ " A new version of "
+ click.style("Zappa", bold=True)
+ " is available!"
)
click.echo(
"Upgrade with: "
+ click.style("pip install zappa --upgrade", bold=True)
)
click.echo(
"Visit the project page on GitHub to see the latest changes: "
+ click.style("https://github.com/Zappa/Zappa", bold=True)
)
except Exception as e: # pragma: no cover
print(e)
return
def load_settings(self, settings_file=None, session=None):
"""
Load the local zappa_settings file.
An existing boto session can be supplied, though this is likely for testing purposes.
Returns the loaded Zappa object.
"""
# Ensure we're passed a valid settings file.
if not settings_file:
settings_file = self.get_json_or_yaml_settings()
if not os.path.isfile(settings_file):
raise ClickException("Please configure your zappa_settings file.")
# Load up file
self.load_settings_file(settings_file)
# Make sure that the stages are valid names:
for stage_name in self.zappa_settings.keys():
try:
self.check_stage_name(stage_name)
except ValueError:
raise ValueError(
"API stage names must match a-zA-Z0-9_ ; '{0!s}' does not.".format(
stage_name
)
)
# Make sure that this stage is our settings
if self.api_stage not in self.zappa_settings.keys():
raise ClickException(
"Please define stage '{0!s}' in your Zappa settings.".format(
self.api_stage
)
)
# We need a working title for this project. Use one if supplied, else cwd dirname.
if "project_name" in self.stage_config: # pragma: no cover
# If the name is invalid, this will throw an exception with message up stack
self.project_name = validate_name(self.stage_config["project_name"])
else:
self.project_name = self.get_project_name()
# The name of the actual AWS Lambda function, ex, 'helloworld-dev'
# Assume that we already have have validated the name beforehand.
# Related: https://github.com/Miserlou/Zappa/pull/664
# https://github.com/Miserlou/Zappa/issues/678
# And various others from Slack.
self.lambda_name = slugify.slugify(self.project_name + "-" + self.api_stage)
# Load stage-specific settings
self.s3_bucket_name = self.stage_config.get(
"s3_bucket",
"zappa-"
+ "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(9)
),
)
self.vpc_config = self.stage_config.get("vpc_config", {})
self.memory_size = self.stage_config.get("memory_size", 512)
self.app_function = self.stage_config.get("app_function", None)
self.exception_handler = self.stage_config.get("exception_handler", None)
self.aws_region = self.stage_config.get("aws_region", None)
self.debug = self.stage_config.get("debug", True)
self.prebuild_script = self.stage_config.get("prebuild_script", None)
self.profile_name = self.stage_config.get("profile_name", None)
self.log_level = self.stage_config.get("log_level", "DEBUG")
self.domain = self.stage_config.get("domain", None)
self.base_path = self.stage_config.get("base_path", None)
self.timeout_seconds = self.stage_config.get("timeout_seconds", 30)
dead_letter_arn = self.stage_config.get("dead_letter_arn", "")
self.dead_letter_config = (
{"TargetArn": dead_letter_arn} if dead_letter_arn else {}
)
self.cognito = self.stage_config.get("cognito", None)
self.num_retained_versions = self.stage_config.get(
"num_retained_versions", None
)
# Check for valid values of num_retained_versions
if (
self.num_retained_versions is not None
and type(self.num_retained_versions) is not int
):
raise ClickException(
"Please supply either an integer or null for num_retained_versions in the zappa_settings.json. Found %s"
% type(self.num_retained_versions)
)
elif type(self.num_retained_versions) is int and self.num_retained_versions < 1:
raise ClickException(
"The value for num_retained_versions in the zappa_settings.json should be greater than 0."
)
# Provide legacy support for `use_apigateway`, now `apigateway_enabled`.
# https://github.com/Miserlou/Zappa/issues/490
# https://github.com/Miserlou/Zappa/issues/493
self.use_apigateway = self.stage_config.get("use_apigateway", True)
if self.use_apigateway:
self.use_apigateway = self.stage_config.get("apigateway_enabled", True)
self.apigateway_description = self.stage_config.get(
"apigateway_description", None
)
self.lambda_handler = self.stage_config.get(
"lambda_handler", "handler.lambda_handler"
)
# DEPRECATED. https://github.com/Miserlou/Zappa/issues/456
self.remote_env_bucket = self.stage_config.get("remote_env_bucket", None)
self.remote_env_file = self.stage_config.get("remote_env_file", None)
self.remote_env = self.stage_config.get("remote_env", None)
self.settings_file = self.stage_config.get("settings_file", None)
self.django_settings = self.stage_config.get("django_settings", None)
self.manage_roles = self.stage_config.get("manage_roles", True)
self.binary_support = self.stage_config.get("binary_support", True)
self.api_key_required = self.stage_config.get("api_key_required", False)
self.api_key = self.stage_config.get("api_key")
self.endpoint_configuration = self.stage_config.get(
"endpoint_configuration", None
)
self.iam_authorization = self.stage_config.get("iam_authorization", False)
self.cors = self.stage_config.get("cors", False)
self.lambda_description = self.stage_config.get(
"lambda_description", "Zappa Deployment"
)
self.lambda_concurrency = self.stage_config.get("lambda_concurrency", None)
self.environment_variables = self.stage_config.get("environment_variables", {})
self.aws_environment_variables = self.stage_config.get(
"aws_environment_variables", {}
)
self.check_environment(self.environment_variables)
self.authorizer = self.stage_config.get("authorizer", {})
self.runtime = self.stage_config.get(
"runtime", get_runtime_from_python_version()
)
self.aws_kms_key_arn = self.stage_config.get("aws_kms_key_arn", "")
self.context_header_mappings = self.stage_config.get(
"context_header_mappings", {}
)
self.xray_tracing = self.stage_config.get("xray_tracing", False)
self.desired_role_arn = self.stage_config.get("role_arn")
self.layers = self.stage_config.get("layers", None)
# Load ALB-related settings
self.use_alb = self.stage_config.get("alb_enabled", False)
self.alb_vpc_config = self.stage_config.get("alb_vpc_config", {})
# Additional tags
self.tags = self.stage_config.get("tags", {})
desired_role_name = self.lambda_name + "-ZappaLambdaExecutionRole"
self.zappa = Zappa(
boto_session=session,
profile_name=self.profile_name,
aws_region=self.aws_region,
load_credentials=self.load_credentials,
desired_role_name=desired_role_name,
desired_role_arn=self.desired_role_arn,
runtime=self.runtime,
tags=self.tags,
endpoint_urls=self.stage_config.get("aws_endpoint_urls", {}),
xray_tracing=self.xray_tracing,
)
for setting in CUSTOM_SETTINGS:
if setting in self.stage_config:
setting_val = self.stage_config[setting]
# Read the policy file contents.
if setting.endswith("policy"):
with open(setting_val, "r") as f:
setting_val = f.read()
setattr(self.zappa, setting, setting_val)
if self.app_function:
self.collision_warning(self.app_function)
if self.app_function[-3:] == ".py":
click.echo(
click.style("Warning!", fg="red", bold=True)
+ " Your app_function is pointing to a "
+ click.style("file and not a function", bold=True)
+ "! It should probably be something like 'my_file.app', not 'my_file.py'!"
)
return self.zappa
def get_json_or_yaml_settings(self, settings_name="zappa_settings"):
"""
Return zappa_settings path as JSON or YAML (or TOML), as appropriate.
"""
zs_json = settings_name + ".json"
zs_yml = settings_name + ".yml"
zs_yaml = settings_name + ".yaml"
zs_toml = settings_name + ".toml"
# Must have at least one
if (
not os.path.isfile(zs_json)
and not os.path.isfile(zs_yml)
and not os.path.isfile(zs_yaml)
and not os.path.isfile(zs_toml)
):
raise ClickException(
"Please configure a zappa_settings file or call `zappa init`."
)
# Prefer JSON
if os.path.isfile(zs_json):
settings_file = zs_json
elif os.path.isfile(zs_toml):
settings_file = zs_toml
elif os.path.isfile(zs_yml):
settings_file = zs_yml
else:
settings_file = zs_yaml
return settings_file
def load_settings_file(self, settings_file=None):
"""
Load our settings file.
"""
if not settings_file:
settings_file = self.get_json_or_yaml_settings()
if not os.path.isfile(settings_file):
raise ClickException(
"Please configure your zappa_settings file or call `zappa init`."
)
path, ext = os.path.splitext(settings_file)
if ext == ".yml" or ext == ".yaml":
with open(settings_file) as yaml_file:
try:
self.zappa_settings = yaml.safe_load(yaml_file)
except ValueError: # pragma: no cover
raise ValueError(
"Unable to load the Zappa settings YAML. It may be malformed."
)
elif ext == ".toml":
with open(settings_file) as toml_file:
try:
self.zappa_settings = toml.load(toml_file)
except ValueError: # pragma: no cover
raise ValueError(
"Unable to load the Zappa settings TOML. It may be malformed."
)
else:
with open(settings_file) as json_file:
try:
self.zappa_settings = json.load(json_file)
except ValueError: # pragma: no cover
raise ValueError(
"Unable to load the Zappa settings JSON. It may be malformed."
)
def create_package(self, output=None):
"""
Ensure that the package can be properly configured,
and then create it.
"""
# Create the Lambda zip package (includes project and virtualenvironment)
# Also define the path the handler file so it can be copied to the zip
# root for Lambda.
current_file = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
handler_file = (
os.sep.join(current_file.split(os.sep)[0:]) + os.sep + "handler.py"
)
# Create the zip file(s)
if self.stage_config.get("slim_handler", False):
# Create two zips. One with the application and the other with just the handler.
# https://github.com/Miserlou/Zappa/issues/510
self.zip_path = self.zappa.create_lambda_zip(
prefix=self.lambda_name,
use_precompiled_packages=self.stage_config.get(
"use_precompiled_packages", True
),
exclude=self.stage_config.get("exclude", []),
exclude_glob=self.stage_config.get("exclude_glob", []),
disable_progress=self.disable_progress,
archive_format="tarball",
)
# Make sure the normal venv is not included in the handler's zip
exclude = self.stage_config.get("exclude", [])
cur_venv = self.zappa.get_current_venv()
exclude.append(cur_venv.split("/")[-1])
self.handler_path = self.zappa.create_lambda_zip(
prefix="handler_{0!s}".format(self.lambda_name),
venv=self.zappa.create_handler_venv(),
handler_file=handler_file,
slim_handler=True,
exclude=exclude,
exclude_glob=self.stage_config.get("exclude_glob", []),
output=output,
disable_progress=self.disable_progress,
)
else:
# This could be python3.6 optimized.
exclude = self.stage_config.get(
"exclude", ["boto3", "dateutil", "botocore", "s3transfer", "concurrent"]
)
# Create a single zip that has the handler and application
self.zip_path = self.zappa.create_lambda_zip(
prefix=self.lambda_name,
handler_file=handler_file,
use_precompiled_packages=self.stage_config.get(
"use_precompiled_packages", True
),
exclude=exclude,
exclude_glob=self.stage_config.get("exclude_glob", []),
output=output,
disable_progress=self.disable_progress,
)
# Warn if this is too large for Lambda.
file_stats = os.stat(self.zip_path)
if file_stats.st_size > 52428800: # pragma: no cover
print(
"\n\nWarning: Application zip package is likely to be too large for AWS Lambda. "
'Try setting "slim_handler" to true in your Zappa settings file.\n\n'
)
# Throw custom settings into the zip that handles requests
if self.stage_config.get("slim_handler", False):
handler_zip = self.handler_path
else:
handler_zip = self.zip_path
with zipfile.ZipFile(handler_zip, "a") as lambda_zip:
settings_s = self.get_zappa_settings_string()
# Copy our Django app into root of our package.
# It doesn't work otherwise.
if self.django_settings:
base = __file__.rsplit(os.sep, 1)[0]
django_py = "".join(os.path.join(base, "ext", "django_zappa.py"))
lambda_zip.write(django_py, "django_zappa_app.py")
# Lambda requires a specific chmod
temp_settings = tempfile.NamedTemporaryFile(delete=False)
os.chmod(temp_settings.name, 0o644)
temp_settings.write(bytes(settings_s, "utf-8"))
temp_settings.close()
lambda_zip.write(temp_settings.name, "zappa_settings.py")
os.unlink(temp_settings.name)
def get_zappa_settings_string(self):
settings_s = "# Generated by Zappa\n"
if self.app_function:
if "." not in self.app_function: # pragma: no cover
raise ClickException(
"Your "
+ click.style("app_function", fg="red", bold=True)
+ " value is not a modular path."
+ " It needs to be in the format `"
+ click.style("your_module.your_app_object", bold=True)
+ "`."
)
app_module, app_function = self.app_function.rsplit(".", 1)
settings_s = (
settings_s
+ "APP_MODULE='{0!s}'\nAPP_FUNCTION='{1!s}'\n".format(
app_module, app_function
)
)
if self.exception_handler:
settings_s += "EXCEPTION_HANDLER='{0!s}'\n".format(self.exception_handler)
else:
settings_s += "EXCEPTION_HANDLER=None\n"
if self.debug:
settings_s = settings_s + "DEBUG=True\n"
else:
settings_s = settings_s + "DEBUG=False\n"
settings_s = settings_s + "LOG_LEVEL='{0!s}'\n".format((self.log_level))
if self.binary_support:
settings_s = settings_s + "BINARY_SUPPORT=True\n"
else:
settings_s = settings_s + "BINARY_SUPPORT=False\n"
head_map_dict = {}
head_map_dict.update(dict(self.context_header_mappings))
settings_s = settings_s + "CONTEXT_HEADER_MAPPINGS={0}\n".format(head_map_dict)
# If we're on a domain, we don't need to define the /<<env>> in
# the WSGI PATH
if self.domain:
settings_s = settings_s + "DOMAIN='{0!s}'\n".format((self.domain))
else:
settings_s = settings_s + "DOMAIN=None\n"
if self.base_path:
settings_s = settings_s + "BASE_PATH='{0!s}'\n".format((self.base_path))
else:
settings_s = settings_s + "BASE_PATH=None\n"
# Pass through remote config bucket and path
if self.remote_env:
settings_s = settings_s + "REMOTE_ENV='{0!s}'\n".format(self.remote_env)
# DEPRECATED. use remove_env instead
elif self.remote_env_bucket and self.remote_env_file:
settings_s = settings_s + "REMOTE_ENV='s3://{0!s}/{1!s}'\n".format(
self.remote_env_bucket, self.remote_env_file
)
# Local envs
env_dict = {}
if self.aws_region:
env_dict["AWS_REGION"] = self.aws_region
env_dict.update(dict(self.environment_variables))
# Environment variable keys must be ascii
# https://github.com/Miserlou/Zappa/issues/604
# https://github.com/Miserlou/Zappa/issues/998
try:
env_dict = dict(
(k.encode("ascii").decode("ascii"), v) for (k, v) in env_dict.items()
)
except Exception:
raise ValueError("Environment variable keys must be ascii.")
settings_s = settings_s + "ENVIRONMENT_VARIABLES={0}\n".format(env_dict)
# We can be environment-aware
settings_s = settings_s + "API_STAGE='{0!s}'\n".format((self.api_stage))
settings_s = settings_s + "PROJECT_NAME='{0!s}'\n".format((self.project_name))
if self.settings_file:
settings_s = settings_s + "SETTINGS_FILE='{0!s}'\n".format(
(self.settings_file)
)
else:
settings_s = settings_s + "SETTINGS_FILE=None\n"
if self.django_settings:
settings_s = settings_s + "DJANGO_SETTINGS='{0!s}'\n".format(
(self.django_settings)
)
else:
settings_s = settings_s + "DJANGO_SETTINGS=None\n"
# If slim handler, path to project zip
if self.stage_config.get("slim_handler", False):
settings_s += (
"ARCHIVE_PATH='s3://{0!s}/{1!s}_{2!s}_current_project.tar.gz'\n".format(
self.s3_bucket_name, self.api_stage, self.project_name
)
)
# since includes are for slim handler add the setting here by joining arbitrary list from zappa_settings file
# and tell the handler we are the slim_handler
# https://github.com/Miserlou/Zappa/issues/776
settings_s += "SLIM_HANDLER=True\n"
include = self.stage_config.get("include", [])
if len(include) >= 1:
settings_s += "INCLUDE=" + str(include) + "\n"
# AWS Events function mapping
event_mapping = {}
events = self.stage_config.get("events", [])
for event in events:
arn = event.get("event_source", {}).get("arn")
function = event.get("function")
if arn and function:
event_mapping[arn] = function
settings_s = settings_s + "AWS_EVENT_MAPPING={0!s}\n".format(event_mapping)
# Map Lext bot events
bot_events = self.stage_config.get("bot_events", [])
bot_events_mapping = {}
for bot_event in bot_events:
event_source = bot_event.get("event_source", {})
intent = event_source.get("intent")
invocation_source = event_source.get("invocation_source")
function = bot_event.get("function")
if intent and invocation_source and function:
bot_events_mapping[
str(intent) + ":" + str(invocation_source)
] = function
settings_s = settings_s + "AWS_BOT_EVENT_MAPPING={0!s}\n".format(
bot_events_mapping
)
# Map cognito triggers
cognito_trigger_mapping = {}
cognito_config = self.stage_config.get("cognito", {})
triggers = cognito_config.get("triggers", [])
for trigger in triggers:
source = trigger.get("source")
function = trigger.get("function")
if source and function:
cognito_trigger_mapping[source] = function
settings_s = settings_s + "COGNITO_TRIGGER_MAPPING={0!s}\n".format(
cognito_trigger_mapping
)
# Authorizer config
authorizer_function = self.authorizer.get("function", None)
if authorizer_function:
settings_s += "AUTHORIZER_FUNCTION='{0!s}'\n".format(authorizer_function)
# async response
async_response_table = self.stage_config.get("async_response_table", "")
settings_s += "ASYNC_RESPONSE_TABLE='{0!s}'\n".format(async_response_table)
return settings_s
def remove_local_zip(self):
"""
Remove our local zip file.
"""
if self.stage_config.get("delete_local_zip", True):
try:
if os.path.isfile(self.zip_path):
os.remove(self.zip_path)
if self.handler_path and os.path.isfile(self.handler_path):
os.remove(self.handler_path)
except Exception as e: # pragma: no cover
sys.exit(-1)
def remove_uploaded_zip(self):
"""
Remove the local and S3 zip file after uploading and updating.
"""
# Remove the uploaded zip from S3, because it is now registered..
if self.stage_config.get("delete_s3_zip", True):
self.zappa.remove_from_s3(self.zip_path, self.s3_bucket_name)
if self.stage_config.get("slim_handler", False):
# Need to keep the project zip as the slim handler uses it.
self.zappa.remove_from_s3(self.handler_path, self.s3_bucket_name)
def on_exit(self):
"""
Cleanup after the command finishes.
Always called: SystemExit, KeyboardInterrupt and any other Exception that occurs.
"""
if self.zip_path:
# Only try to remove uploaded zip if we're running a command that has loaded credentials
if self.load_credentials:
self.remove_uploaded_zip()
self.remove_local_zip()
def print_logs(
self, logs, colorize=True, http=False, non_http=False, force_colorize=None
):
"""
Parse, filter and print logs to the console.
"""
for log in logs:
timestamp = log["timestamp"]
message = log["message"]
if "START RequestId" in message:
continue
if "REPORT RequestId" in message:
continue
if "END RequestId" in message:
continue
if not colorize and not force_colorize:
if http:
if self.is_http_log_entry(message.strip()):
print("[" + str(timestamp) + "] " + message.strip())
elif non_http:
if not self.is_http_log_entry(message.strip()):
print("[" + str(timestamp) + "] " + message.strip())
else:
print("[" + str(timestamp) + "] " + message.strip())
else:
if http:
if self.is_http_log_entry(message.strip()):
click.echo(
click.style("[", fg="cyan")
+ click.style(str(timestamp), bold=True)
+ click.style("]", fg="cyan")
+ self.colorize_log_entry(message.strip()),
color=force_colorize,
)
elif non_http:
if not self.is_http_log_entry(message.strip()):
click.echo(
click.style("[", fg="cyan")
+ click.style(str(timestamp), bold=True)
+ click.style("]", fg="cyan")
+ self.colorize_log_entry(message.strip()),
color=force_colorize,
)
else:
click.echo(
click.style("[", fg="cyan")
+ click.style(str(timestamp), bold=True)
+ click.style("]", fg="cyan")
+ self.colorize_log_entry(message.strip()),
color=force_colorize,
)
def is_http_log_entry(self, string):
"""
Determines if a log entry is an HTTP-formatted log string or not.
"""
# Debug event filter
if "Zappa Event" in string:
return False
# IP address filter
for token in string.replace("\t", " ").split(" "):
try:
if token.count(".") == 3 and token.replace(".", "").isnumeric():
return True
except Exception: # pragma: no cover
pass
return False
def get_project_name(self):
return slugify.slugify(os.getcwd().split(os.sep)[-1])[:15]
def colorize_log_entry(self, string):
"""
Apply various heuristics to return a colorized version of a string.
If these fail, simply return the string in plaintext.
"""
final_string = string
try:
# First, do stuff in square brackets
inside_squares = re.findall(r"\[([^]]*)\]", string)
for token in inside_squares:
if token in ["CRITICAL", "ERROR", "WARNING", "DEBUG", "INFO", "NOTSET"]:
final_string = final_string.replace(
"[" + token + "]",
click.style("[", fg="cyan")
+ click.style(token, fg="cyan", bold=True)
+ click.style("]", fg="cyan"),
)
else:
final_string = final_string.replace(
"[" + token + "]",
click.style("[", fg="cyan")
+ click.style(token, bold=True)
+ click.style("]", fg="cyan"),
)
# Then do quoted strings
quotes = re.findall(r'"[^"]*"', string)
for token in quotes:
final_string = final_string.replace(
token, click.style(token, fg="yellow")
)
# And UUIDs
for token in final_string.replace("\t", " ").split(" "):
try:
if token.count("-") == 4 and token.replace("-", "").isalnum():
final_string = final_string.replace(
token, click.style(token, fg="magenta")
)
except Exception: # pragma: no cover
pass
# And IP addresses
try:
if token.count(".") == 3 and token.replace(".", "").isnumeric():
final_string = final_string.replace(
token, click.style(token, fg="red")
)
except Exception: # pragma: no cover
pass
# And status codes
try:
if token in ["200"]:
final_string = final_string.replace(
token, click.style(token, fg="green")
)
if token in ["400", "401", "403", "404", "405", "500"]:
final_string = final_string.replace(
token, click.style(token, fg="red")
)
except Exception: # pragma: no cover
pass
# And Zappa Events
try:
if "Zappa Event:" in final_string:
final_string = final_string.replace(
"Zappa Event:",
click.style("Zappa Event:", bold=True, fg="green"),
)
except Exception: # pragma: no cover
pass
# And dates
for token in final_string.split("\t"):
try:
is_date = parser.parse(token)
final_string = final_string.replace(
token, click.style(token, fg="green")
)
except Exception: # pragma: no cover
pass
final_string = final_string.replace("\t", " ").replace(" ", " ")
if final_string[0] != " ":
final_string = " " + final_string
return final_string
except Exception as e: # pragma: no cover
return string
def execute_prebuild_script(self):
"""
Parse and execute the prebuild_script from the zappa_settings.
"""
(pb_mod_path, pb_func) = self.prebuild_script.rsplit(".", 1)
try: # Prefer prebuild script in working directory
if (
pb_mod_path.count(".") >= 1
): # Prebuild script func is nested in a folder
(mod_folder_path, mod_name) = pb_mod_path.rsplit(".", 1)
mod_folder_path_fragments = mod_folder_path.split(".")
working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments)
else:
mod_name = pb_mod_path
working_dir = os.getcwd()
working_dir_importer = pkgutil.get_importer(working_dir)
module_ = working_dir_importer.find_module(mod_name).load_module(mod_name)
except (ImportError, AttributeError):
try: # Prebuild func might be in virtualenv
module_ = importlib.import_module(pb_mod_path)
except ImportError: # pragma: no cover
raise ClickException(
click.style("Failed ", fg="red")
+ "to "
+ click.style("import prebuild script ", bold=True)
+ 'module: "{pb_mod_path}"'.format(
pb_mod_path=click.style(pb_mod_path, bold=True)
)
)
if not hasattr(module_, pb_func): # pragma: no cover
raise ClickException(
click.style("Failed ", fg="red")
+ "to "
+ click.style("find prebuild script ", bold=True)
+ 'function: "{pb_func}" '.format(
pb_func=click.style(pb_func, bold=True)
)
+ 'in module "{pb_mod_path}"'.format(pb_mod_path=pb_mod_path)
)
prebuild_function = getattr(module_, pb_func)
prebuild_function() # Call the function
def collision_warning(self, item):
"""
Given a string, print a warning if this could
collide with a Zappa core package module.
Use for app functions and events.
"""
namespace_collisions = [
"zappa.",
"wsgi.",
"middleware.",
"handler.",
"util.",
"letsencrypt.",
"cli.",
]
for namespace_collision in namespace_collisions:
if item.startswith(namespace_collision):
click.echo(
click.style("Warning!", fg="red", bold=True)
+ " You may have a namespace collision between "
+ click.style(item, bold=True)
+ " and "
+ click.style(namespace_collision, bold=True)
+ "! You may want to rename that file."
)
def deploy_api_gateway(self, api_id):
cache_cluster_enabled = self.stage_config.get("cache_cluster_enabled", False)
cache_cluster_size = str(self.stage_config.get("cache_cluster_size", 0.5))
endpoint_url = self.zappa.deploy_api_gateway(
api_id=api_id,
stage_name=self.api_stage,
cache_cluster_enabled=cache_cluster_enabled,
cache_cluster_size=cache_cluster_size,
cloudwatch_log_level=self.stage_config.get("cloudwatch_log_level", "OFF"),
cloudwatch_data_trace=self.stage_config.get("cloudwatch_data_trace", False),
cloudwatch_metrics_enabled=self.stage_config.get(
"cloudwatch_metrics_enabled", False
),
cache_cluster_ttl=self.stage_config.get("cache_cluster_ttl", 300),
cache_cluster_encrypted=self.stage_config.get(
"cache_cluster_encrypted", False
),
)
return endpoint_url
def check_venv(self):
"""Ensure we're inside a virtualenv."""
if self.vargs and self.vargs.get("no_venv"):
return
if self.zappa:
venv = self.zappa.get_current_venv()
else:
# Just for `init`, when we don't have settings yet.
venv = Zappa.get_current_venv()
if not venv:
raise ClickException(
click.style("Zappa", bold=True)
+ " requires an "
+ click.style("active virtual environment", bold=True, fg="red")
+ "!\n"
+ "Learn more about virtual environments here: "
+ click.style(
"http://docs.python-guide.org/en/latest/dev/virtualenvs/",
bold=False,
fg="cyan",
)
)
def silence(self):
"""
Route all stdout to null.
"""
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
def touch_endpoint(self, endpoint_url):
"""
Test the deployed endpoint with a GET request.
"""
# Private APIGW endpoints most likely can't be reached by a deployer
# unless they're connected to the VPC by VPN. Instead of trying
# connect to the service, print a warning and let the user know
# to check it manually.
# See: https://github.com/Miserlou/Zappa/pull/1719#issuecomment-471341565
if "PRIVATE" in self.stage_config.get("endpoint_configuration", []):
print(
click.style("Warning!", fg="yellow", bold=True)
+ " Since you're deploying a private API Gateway endpoint,"
" Zappa cannot determine if your function is returning "
" a correct status code. You should check your API's response"
" manually before considering this deployment complete."
)
return
touch_path = self.stage_config.get("touch_path", "/")
req = requests.get(endpoint_url + touch_path)
# Sometimes on really large packages, it can take 60-90 secs to be
# ready and requests will return 504 status_code until ready.
# So, if we get a 504 status code, rerun the request up to 4 times or
# until we don't get a 504 error
if req.status_code == 504:
i = 0
status_code = 504
while status_code == 504 and i <= 4:
req = requests.get(endpoint_url + touch_path)
status_code = req.status_code
i += 1
if req.status_code >= 500:
raise ClickException(
click.style("Warning!", fg="red", bold=True)
+ " Status check on the deployed lambda failed."
+ " A GET request to '"
+ touch_path
+ "' yielded a "
+ click.style(str(req.status_code), fg="red", bold=True)
+ " response code."
)
####################################################################
# Main
####################################################################
def shamelessly_promote():
"""
Shamelessly promote our little community.
"""
click.echo(
"Need "
+ click.style("help", fg="green", bold=True)
+ "? Found a "
+ click.style("bug", fg="green", bold=True)
+ "? Let us "
+ click.style("know", fg="green", bold=True)
+ "! :D"
)
click.echo(
"File bug reports on "
+ click.style("GitHub", bold=True)
+ " here: "
+ click.style("https://github.com/Zappa/Zappa", fg="cyan", bold=True)
)
click.echo(
"And join our "
+ click.style("Slack", bold=True)
+ " channel here: "
+ click.style("https://zappateam.slack.com", fg="cyan", bold=True)
)
click.echo("Love!,")
click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!")
def disable_click_colors():
"""
Set a Click context where colors are disabled. Creates a throwaway BaseCommand
to play nicely with the Context constructor.
The intended side-effect here is that click.echo() checks this context and will
suppress colors.
https://github.com/pallets/click/blob/e1aa43a3/click/globals.py#L39
"""
ctx = Context(BaseCommand("AllYourBaseAreBelongToUs"))
ctx.color = False
push_context(ctx)
def handle(): # pragma: no cover
"""
Main program execution handler.
"""
try:
cli = ZappaCLI()
sys.exit(cli.handle())
except SystemExit as e: # pragma: no cover
cli.on_exit()
sys.exit(e.code)
except KeyboardInterrupt: # pragma: no cover
cli.on_exit()
sys.exit(130)
except Exception as e:
cli.on_exit()
click.echo(
"Oh no! An " + click.style("error occurred", fg="red", bold=True) + "! :("
)
click.echo("\n==============\n")
import traceback
traceback.print_exc()
click.echo("\n==============\n")
shamelessly_promote()
sys.exit(-1)
if __name__ == "__main__": # pragma: no cover
handle() | zappa-troposphere | /zappa_troposphere-0.53.1-py3-none-any.whl/zappa/cli.py | cli.py |
import atexit
import base64
import binascii
import copy
import hashlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import time
from urllib.request import urlopen
import requests
# Staging
# Amazon doesn't accept these though.
# DEFAULT_CA = "https://acme-staging.api.letsencrypt.org"
# Production
DEFAULT_CA = "https://acme-v02.api.letsencrypt.org"
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.StreamHandler())
def get_cert_and_update_domain(
zappa_instance,
lambda_name,
api_stage,
domain=None,
manual=False,
):
"""
Main cert installer path.
"""
try:
create_domain_key()
create_domain_csr(domain)
get_cert(zappa_instance)
create_chained_certificate()
with open("{}/signed.crt".format(gettempdir())) as f:
certificate_body = f.read()
with open("{}/domain.key".format(gettempdir())) as f:
certificate_private_key = f.read()
with open("{}/intermediate.pem".format(gettempdir())) as f:
certificate_chain = f.read()
if not manual:
if domain:
if not zappa_instance.get_domain_name(domain):
zappa_instance.create_domain_name(
domain_name=domain,
certificate_name=domain + "-Zappa-LE-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=None,
lambda_name=lambda_name,
stage=api_stage,
)
print(
"Created a new domain name. Please note that it can take up to 40 minutes for this domain to be created and propagated through AWS, but it requires no further work on your part."
)
else:
zappa_instance.update_domain_name(
domain_name=domain,
certificate_name=domain + "-Zappa-LE-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=None,
lambda_name=lambda_name,
stage=api_stage,
)
else:
print("Cerificate body:\n")
print(certificate_body)
print("\nCerificate private key:\n")
print(certificate_private_key)
print("\nCerificate chain:\n")
print(certificate_chain)
except Exception as e:
print(e)
return False
return True
def create_domain_key():
devnull = open(os.devnull, "wb")
out = subprocess.check_output(["openssl", "genrsa", "2048"], stderr=devnull)
with open(os.path.join(gettempdir(), "domain.key"), "wb") as f:
f.write(out)
def create_domain_csr(domain):
subj = "/CN=" + domain
cmd = [
"openssl",
"req",
"-new",
"-sha256",
"-key",
os.path.join(gettempdir(), "domain.key"),
"-subj",
subj,
]
devnull = open(os.devnull, "wb")
out = subprocess.check_output(cmd, stderr=devnull)
with open(os.path.join(gettempdir(), "domain.csr"), "wb") as f:
f.write(out)
def create_chained_certificate():
signed_crt = open(os.path.join(gettempdir(), "signed.crt"), "rb").read()
cross_cert_url = "https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem"
cert = requests.get(cross_cert_url)
with open(os.path.join(gettempdir(), "intermediate.pem"), "wb") as intermediate_pem:
intermediate_pem.write(cert.content)
with open(os.path.join(gettempdir(), "chained.pem"), "wb") as chained_pem:
chained_pem.write(signed_crt)
chained_pem.write(cert.content)
def parse_account_key():
"""Parse account key to get public key"""
LOGGER.info("Parsing account key...")
cmd = [
"openssl",
"rsa",
"-in",
os.path.join(gettempdir(), "account.key"),
"-noout",
"-text",
]
devnull = open(os.devnull, "wb")
return subprocess.check_output(cmd, stderr=devnull)
def parse_csr():
"""
Parse certificate signing request for domains
"""
LOGGER.info("Parsing CSR...")
cmd = [
"openssl",
"req",
"-in",
os.path.join(gettempdir(), "domain.csr"),
"-noout",
"-text",
]
devnull = open(os.devnull, "wb")
out = subprocess.check_output(cmd, stderr=devnull)
domains = set([])
common_name = re.search(r"Subject:.*? CN\s?=\s?([^\s,;/]+)", out.decode("utf8"))
if common_name is not None:
domains.add(common_name.group(1))
subject_alt_names = re.search(
r"X509v3 Subject Alternative Name: \n +([^\n]+)\n",
out.decode("utf8"),
re.MULTILINE | re.DOTALL,
)
if subject_alt_names is not None:
for san in subject_alt_names.group(1).split(", "):
if san.startswith("DNS:"):
domains.add(san[4:])
return domains
def get_boulder_header(key_bytes):
"""
Use regular expressions to find crypto values from parsed account key,
and return a header we can send to our Boulder instance.
"""
pub_hex, pub_exp = re.search(
r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)",
key_bytes.decode("utf8"),
re.MULTILINE | re.DOTALL,
).groups()
pub_exp = "{0:x}".format(int(pub_exp))
pub_exp = "0{0}".format(pub_exp) if len(pub_exp) % 2 else pub_exp
header = {
"alg": "RS256",
"jwk": {
"e": _b64(binascii.unhexlify(pub_exp.encode("utf-8"))),
"kty": "RSA",
"n": _b64(
binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex).encode("utf-8"))
),
},
}
return header
def register_account():
"""
Agree to LE TOS
"""
LOGGER.info("Registering account...")
code, result = _send_signed_request(
DEFAULT_CA + "/acme/new-reg",
{
"resource": "new-reg",
"agreement": "https://letsencrypt.org/documents/LE-SA-v1.2-November-15-2017.pdf",
},
)
if code == 201: # pragma: no cover
LOGGER.info("Registered!")
elif code == 409: # pragma: no cover
LOGGER.info("Already registered!")
else: # pragma: no cover
raise ValueError("Error registering: {0} {1}".format(code, result))
def get_cert(zappa_instance, log=LOGGER, CA=DEFAULT_CA):
"""
Call LE to get a new signed CA.
"""
out = parse_account_key()
header = get_boulder_header(out)
accountkey_json = json.dumps(header["jwk"], sort_keys=True, separators=(",", ":"))
thumbprint = _b64(hashlib.sha256(accountkey_json.encode("utf8")).digest())
# find domains
domains = parse_csr()
# get the certificate domains and expiration
register_account()
# verify each domain
for domain in domains:
log.info("Verifying {0}...".format(domain))
# get new challenge
code, result = _send_signed_request(
CA + "/acme/new-authz",
{
"resource": "new-authz",
"identifier": {"type": "dns", "value": domain},
},
)
if code != 201:
raise ValueError(
"Error requesting challenges: {0} {1}".format(code, result)
)
challenge = [
ch
for ch in json.loads(result.decode("utf8"))["challenges"]
if ch["type"] == "dns-01"
][0]
token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge["token"])
keyauthorization = "{0}.{1}".format(token, thumbprint).encode("utf-8")
# sha256_b64
digest = _b64(hashlib.sha256(keyauthorization).digest())
zone_id = zappa_instance.get_hosted_zone_id_for_domain(domain)
if not zone_id:
raise ValueError("Could not find Zone ID for: " + domain)
zappa_instance.set_dns_challenge_txt(zone_id, domain, digest) # resp is unused
print("Waiting for DNS to propagate..")
# What's optimal here?
# import time # double import; import in loop; shadowed import
time.sleep(45)
# notify challenge are met
code, result = _send_signed_request(
challenge["uri"],
{
"resource": "challenge",
"keyAuthorization": keyauthorization.decode("utf-8"),
},
)
if code != 202:
raise ValueError("Error triggering challenge: {0} {1}".format(code, result))
# wait for challenge to be verified
verify_challenge(challenge["uri"])
# Challenge verified, clean up R53
zappa_instance.remove_dns_challenge_txt(zone_id, domain, digest)
# Sign
result = sign_certificate()
# Encode to PEM format
encode_certificate(result)
return True
def verify_challenge(uri):
"""
Loop until our challenge is verified, else fail.
"""
while True:
try:
resp = urlopen(uri)
challenge_status = json.loads(resp.read().decode("utf8"))
except IOError as e:
raise ValueError(
"Error checking challenge: {0} {1}".format(
e.code, json.loads(e.read().decode("utf8"))
)
)
if challenge_status["status"] == "pending":
time.sleep(2)
elif challenge_status["status"] == "valid":
LOGGER.info("Domain verified!")
break
else:
raise ValueError(
"Domain challenge did not pass: {0}".format(challenge_status)
)
def sign_certificate():
"""
Get the new certificate.
Returns the signed bytes.
"""
LOGGER.info("Signing certificate...")
cmd = [
"openssl",
"req",
"-in",
os.path.join(gettempdir(), "domain.csr"),
"-outform",
"DER",
]
devnull = open(os.devnull, "wb")
csr_der = subprocess.check_output(cmd, stderr=devnull)
code, result = _send_signed_request(
DEFAULT_CA + "/acme/new-cert",
{
"resource": "new-cert",
"csr": _b64(csr_der),
},
)
if code != 201:
raise ValueError("Error signing certificate: {0} {1}".format(code, result))
LOGGER.info("Certificate signed!")
return result
def encode_certificate(result):
"""
Encode cert bytes to PEM encoded cert file.
"""
cert_body = (
"""-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""".format(
"\n".join(textwrap.wrap(base64.b64encode(result).decode("utf8"), 64))
)
)
signed_crt = open("{}/signed.crt".format(gettempdir()), "w")
signed_crt.write(cert_body)
signed_crt.close()
return True
##
# Request Utility
##
def _b64(b):
"""
Helper function base64 encode for jose spec
"""
return base64.urlsafe_b64encode(b).decode("utf8").replace("=", "")
def _send_signed_request(url, payload):
"""
Helper function to make signed requests to Boulder
"""
payload64 = _b64(json.dumps(payload).encode("utf8"))
out = parse_account_key()
header = get_boulder_header(out)
protected = copy.deepcopy(header)
protected["nonce"] = urlopen(DEFAULT_CA + "/directory").headers["Replay-Nonce"]
protected64 = _b64(json.dumps(protected).encode("utf8"))
cmd = [
"openssl",
"dgst",
"-sha256",
"-sign",
os.path.join(gettempdir(), "account.key"),
]
proc = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = proc.communicate("{0}.{1}".format(protected64, payload64).encode("utf8"))
if proc.returncode != 0: # pragma: no cover
raise IOError("OpenSSL Error: {0}".format(err))
data = json.dumps(
{
"header": header,
"protected": protected64,
"payload": payload64,
"signature": _b64(out),
}
)
try:
resp = urlopen(url, data.encode("utf8"))
return resp.getcode(), resp.read()
except IOError as e:
return getattr(e, "code", None), getattr(e, "read", e.__str__)()
##
# Temporary Directory Utility
##
__tempdir = None
def gettempdir():
"""
Lazily creates a temporary directory in a secure manner. When Python exits,
or the cleanup() function is called, the directory is erased.
"""
global __tempdir
if __tempdir is not None:
return __tempdir
__tempdir = tempfile.mkdtemp()
return __tempdir
@atexit.register
def cleanup():
"""
Delete any temporary files.
"""
global __tempdir
if __tempdir is not None:
shutil.rmtree(__tempdir)
__tempdir = None | zappa-warm | /zappa_warm-0.54.4-py3-none-any.whl/zappa/letsencrypt.py | letsencrypt.py |
from werkzeug.wsgi import ClosingIterator
def all_casings(input_string):
"""
Permute all casings of a given string.
A pretty algorithm, via @Amber
http://stackoverflow.com/questions/6792803/finding-all-possible-case-permutations-in-python
"""
if not input_string:
yield ""
else:
first = input_string[:1]
if first.lower() == first.upper():
for sub_casing in all_casings(input_string[1:]):
yield first + sub_casing
else:
for sub_casing in all_casings(input_string[1:]):
yield first.lower() + sub_casing
yield first.upper() + sub_casing
class ZappaWSGIMiddleware:
"""
Middleware functions necessary for a Zappa deployment.
Most hacks have now been remove except for Set-Cookie permutation.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
"""
We must case-mangle the Set-Cookie header name or AWS will use only a
single one of these headers.
"""
def encode_response(status, headers, exc_info=None):
"""
This makes the 'set-cookie' headers name lowercase,
all the non-cookie headers should be sent unharmed.
Related: https://github.com/Miserlou/Zappa/issues/1965
"""
new_headers = [
header
for header in headers
if ((type(header[0]) != str) or (header[0].lower() != "set-cookie"))
]
cookie_headers = [
(header[0].lower(), header[1])
for header in headers
if ((type(header[0]) == str) and (header[0].lower() == "set-cookie"))
]
new_headers = new_headers + cookie_headers
return start_response(status, new_headers, exc_info)
# Call the application with our modifier
response = self.application(environ, encode_response)
# Return the response as a WSGI-safe iterator
return ClosingIterator(response) | zappa-warm | /zappa_warm-0.54.4-py3-none-any.whl/zappa/middleware.py | middleware.py |
import base64
import collections
import datetime
import importlib
import inspect
import json
import logging
import os
import sys
import tarfile
import traceback
from builtins import str
import boto3
from werkzeug.wrappers import Response
# This file may be copied into a project's root,
# so handle both scenarios.
try:
from zappa.middleware import ZappaWSGIMiddleware
from zappa.utilities import merge_headers, parse_s3_url
from zappa.wsgi import common_log, create_wsgi_request
except ImportError as e: # pragma: no cover
from .middleware import ZappaWSGIMiddleware
from .utilities import merge_headers, parse_s3_url
from .wsgi import common_log, create_wsgi_request
# Set up logging
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class LambdaHandler:
"""
Singleton for avoiding duplicate setup.
Pattern provided by @benbangert.
"""
__instance = None
settings = None
settings_name = None
session = None
# Application
app_module = None
wsgi_app = None
trailing_slash = False
def __new__(cls, settings_name="zappa_settings", session=None):
"""Singleton instance to avoid repeat setup"""
if LambdaHandler.__instance is None:
print("Instancing..")
LambdaHandler.__instance = object.__new__(cls)
return LambdaHandler.__instance
def __init__(self, settings_name="zappa_settings", session=None):
# We haven't cached our settings yet, load the settings and app.
if not self.settings:
# Loading settings from a python module
self.settings = importlib.import_module(settings_name)
self.settings_name = settings_name
self.session = session
# Custom log level
if self.settings.LOG_LEVEL:
level = logging.getLevelName(self.settings.LOG_LEVEL)
logger.setLevel(level)
remote_env = getattr(self.settings, "REMOTE_ENV", None)
remote_bucket, remote_file = parse_s3_url(remote_env)
if remote_bucket and remote_file:
self.load_remote_settings(remote_bucket, remote_file)
# Let the system know that this will be a Lambda/Zappa/Stack
os.environ["SERVERTYPE"] = "AWS Lambda"
os.environ["FRAMEWORK"] = "Zappa"
try:
os.environ["PROJECT"] = self.settings.PROJECT_NAME
os.environ["STAGE"] = self.settings.API_STAGE
except Exception: # pragma: no cover
pass
# Set any locally defined env vars
# Environment variable keys can't be Unicode
# https://github.com/Miserlou/Zappa/issues/604
for key in self.settings.ENVIRONMENT_VARIABLES.keys():
os.environ[str(key)] = self.settings.ENVIRONMENT_VARIABLES[key]
# Pulling from S3 if given a zip path
project_archive_path = getattr(self.settings, "ARCHIVE_PATH", None)
if project_archive_path:
self.load_remote_project_archive(project_archive_path)
# Load compiled library to the PythonPath
# checks if we are the slim_handler since this is not needed otherwise
# https://github.com/Miserlou/Zappa/issues/776
is_slim_handler = getattr(self.settings, "SLIM_HANDLER", False)
if is_slim_handler:
included_libraries = getattr(self.settings, "INCLUDE", [])
try:
from ctypes import cdll, util
for library in included_libraries:
try:
cdll.LoadLibrary(os.path.join(os.getcwd(), library))
except OSError:
print(
"Failed to find library: {}...right filename?".format(
library
)
)
except ImportError:
print("Failed to import cytpes library")
# This is a non-WSGI application
# https://github.com/Miserlou/Zappa/pull/748
if (
not hasattr(self.settings, "APP_MODULE")
and not self.settings.DJANGO_SETTINGS
):
self.app_module = None
wsgi_app_function = None
# This is probably a normal WSGI app (Or django with overloaded wsgi application)
# https://github.com/Miserlou/Zappa/issues/1164
elif hasattr(self.settings, "APP_MODULE"):
if self.settings.DJANGO_SETTINGS:
sys.path.append("/var/task")
from django.conf import (
ENVIRONMENT_VARIABLE as SETTINGS_ENVIRONMENT_VARIABLE,
)
# add the Lambda root path into the sys.path
self.trailing_slash = True
os.environ[
SETTINGS_ENVIRONMENT_VARIABLE
] = self.settings.DJANGO_SETTINGS
else:
self.trailing_slash = False
# The app module
self.app_module = importlib.import_module(self.settings.APP_MODULE)
# The application
wsgi_app_function = getattr(self.app_module, self.settings.APP_FUNCTION)
# Django gets special treatment.
else:
try: # Support both for tests
from zappa.ext.django_zappa import get_django_wsgi
except ImportError: # pragma: no cover
from django_zappa_app import get_django_wsgi
# Get the Django WSGI app from our extension
wsgi_app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS)
self.trailing_slash = True
self.wsgi_app = ZappaWSGIMiddleware(wsgi_app_function)
def load_remote_project_archive(self, project_zip_path):
"""
Puts the project files from S3 in /tmp and adds to path
"""
project_folder = "/tmp/{0!s}".format(self.settings.PROJECT_NAME)
if not os.path.isdir(project_folder):
# The project folder doesn't exist in this cold lambda, get it from S3
if not self.session:
boto_session = boto3.Session()
else:
boto_session = self.session
# Download zip file from S3
remote_bucket, remote_file = parse_s3_url(project_zip_path)
s3 = boto_session.resource("s3")
archive_on_s3 = s3.Object(remote_bucket, remote_file).get()
with tarfile.open(fileobj=archive_on_s3["Body"], mode="r|gz") as t:
t.extractall(project_folder)
# Add to project path
sys.path.insert(0, project_folder)
# Change working directory to project folder
# Related: https://github.com/Miserlou/Zappa/issues/702
os.chdir(project_folder)
return True
def load_remote_settings(self, remote_bucket, remote_file):
"""
Attempt to read a file from s3 containing a flat json object. Adds each
key->value pair as environment variables. Helpful for keeping
sensitiZve or stage-specific configuration variables in s3 instead of
version control.
"""
if not self.session:
boto_session = boto3.Session()
else:
boto_session = self.session
s3 = boto_session.resource("s3")
try:
remote_env_object = s3.Object(remote_bucket, remote_file).get()
except Exception as e: # pragma: no cover
# catch everything aws might decide to raise
print("Could not load remote settings file.", e)
return
try:
content = remote_env_object["Body"].read()
except Exception as e: # pragma: no cover
# catch everything aws might decide to raise
print("Exception while reading remote settings file.", e)
return
try:
settings_dict = json.loads(content)
except (ValueError, TypeError): # pragma: no cover
print("Failed to parse remote settings!")
return
# add each key-value to environment - overwrites existing keys!
for key, value in settings_dict.items():
if self.settings.LOG_LEVEL == "DEBUG":
print("Adding {} -> {} to environment".format(key, value))
# Environment variable keys can't be Unicode
# https://github.com/Miserlou/Zappa/issues/604
try:
os.environ[str(key)] = value
except Exception:
if self.settings.LOG_LEVEL == "DEBUG":
print("Environment variable keys must be non-unicode!")
@staticmethod
def import_module_and_get_function(whole_function):
"""
Given a modular path to a function, import that module
and return the function.
"""
module, function = whole_function.rsplit(".", 1)
app_module = importlib.import_module(module)
app_function = getattr(app_module, function)
return app_function
@classmethod
def lambda_handler(cls, event, context): # pragma: no cover
handler = global_handler or cls()
exception_handler = handler.settings.EXCEPTION_HANDLER
try:
return handler.handler(event, context)
except Exception as ex:
exception_processed = cls._process_exception(
exception_handler=exception_handler,
event=event,
context=context,
exception=ex,
)
if not exception_processed:
# Only re-raise exception if handler directed so. Allows handler to control if lambda has to retry
# an event execution in case of failure.
raise
@classmethod
def _process_exception(cls, exception_handler, event, context, exception):
exception_processed = False
if exception_handler:
try:
handler_function = cls.import_module_and_get_function(exception_handler)
exception_processed = handler_function(exception, event, context)
except Exception as cex:
logger.error(msg="Failed to process exception via custom handler.")
print(cex)
return exception_processed
@staticmethod
def run_function(app_function, event, context):
"""
Given a function and event context,
detect signature and execute, returning any result.
"""
# getargspec does not support python 3 method with type hints
# Related issue: https://github.com/Miserlou/Zappa/issues/1452
if hasattr(inspect, "getfullargspec"): # Python 3
args, varargs, keywords, defaults, _, _, _ = inspect.getfullargspec(
app_function
)
else: # Python 2
args, varargs, keywords, defaults = inspect.getargspec(app_function)
num_args = len(args)
if num_args == 0:
result = app_function(event, context) if varargs else app_function()
elif num_args == 1:
result = app_function(event, context) if varargs else app_function(event)
elif num_args == 2:
result = app_function(event, context)
else:
raise RuntimeError(
"Function signature is invalid. Expected a function that accepts at most "
"2 arguments or varargs."
)
return result
def get_function_for_aws_event(self, record):
"""
Get the associated function to execute for a triggered AWS event
Support S3, SNS, DynamoDB, kinesis and SQS events
"""
if "s3" in record:
if ":" in record["s3"]["configurationId"]:
return record["s3"]["configurationId"].split(":")[-1]
arn = None
if "Sns" in record:
try:
message = json.loads(record["Sns"]["Message"])
if message.get("command"):
return message["command"]
except ValueError:
pass
arn = record["Sns"].get("TopicArn")
elif "dynamodb" in record or "kinesis" in record:
arn = record.get("eventSourceARN")
elif "eventSource" in record and record.get("eventSource") == "aws:sqs":
arn = record.get("eventSourceARN")
elif "s3" in record:
arn = record["s3"]["bucket"]["arn"]
if arn:
return self.settings.AWS_EVENT_MAPPING.get(arn)
return None
def get_function_from_bot_intent_trigger(self, event):
"""
For the given event build ARN and return the configured function
"""
intent = event.get("currentIntent")
if intent:
intent = intent.get("name")
if intent:
return self.settings.AWS_BOT_EVENT_MAPPING.get(
"{}:{}".format(intent, event.get("invocationSource"))
)
def get_function_for_cognito_trigger(self, trigger):
"""
Get the associated function to execute for a cognito trigger
"""
print(
"get_function_for_cognito_trigger",
self.settings.COGNITO_TRIGGER_MAPPING,
trigger,
self.settings.COGNITO_TRIGGER_MAPPING.get(trigger),
)
return self.settings.COGNITO_TRIGGER_MAPPING.get(trigger)
def handler(self, event, context):
"""
An AWS Lambda function which parses specific API Gateway input into a
WSGI request, feeds it to our WSGI app, processes the response, and returns
that back to the API Gateway.
"""
settings = self.settings
# If in DEBUG mode, log all raw incoming events.
if settings.DEBUG:
logger.debug("Zappa Event: {}".format(event))
# Set any API Gateway defined Stage Variables
# as env vars
if event.get("stageVariables"):
for key in event["stageVariables"].keys():
os.environ[str(key)] = event["stageVariables"][key]
# This is the result of a keep alive, recertify
# or scheduled event.
if event.get("detail-type") == "Scheduled Event":
whole_function = event["resources"][0].split("/")[-1].split("-")[-1]
# This is a scheduled function.
if "." in whole_function:
app_function = self.import_module_and_get_function(whole_function)
# Execute the function!
return self.run_function(app_function, event, context)
# Else, let this execute as it were.
# This is a direct command invocation.
elif event.get("command", None):
whole_function = event["command"]
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
print("Result of %s:" % whole_function)
print(result)
return result
# This is a direct, raw python invocation.
# It's _extremely_ important we don't allow this event source
# to be overridden by unsanitized, non-admin user input.
elif event.get("raw_command", None):
raw_command = event["raw_command"]
exec(raw_command)
return
# This is a Django management command invocation.
elif event.get("manage", None):
from django.core import management
try: # Support both for tests
from zappa.ext.django_zappa import get_django_wsgi
except ImportError as e: # pragma: no cover
from django_zappa_app import get_django_wsgi
# Get the Django WSGI app from our extension
# We don't actually need the function,
# but we do need to do all of the required setup for it.
app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS)
# Couldn't figure out how to get the value into stdout with StringIO..
# Read the log for now. :[]
management.call_command(*event["manage"].split(" "))
return {}
# This is an AWS-event triggered invocation.
elif event.get("Records", None):
records = event.get("Records")
result = None
whole_function = self.get_function_for_aws_event(records[0])
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# this is an AWS-event triggered from Lex bot's intent
elif event.get("bot"):
result = None
whole_function = self.get_function_from_bot_intent_trigger(event)
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# This is an API Gateway authorizer event
elif event.get("type") == "TOKEN":
whole_function = self.settings.AUTHORIZER_FUNCTION
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
policy = self.run_function(app_function, event, context)
return policy
else:
logger.error(
"Cannot find a function to process the authorization request."
)
raise Exception("Unauthorized")
# This is an AWS Cognito Trigger Event
elif event.get("triggerSource", None):
triggerSource = event.get("triggerSource")
whole_function = self.get_function_for_cognito_trigger(triggerSource)
result = event
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error(
"Cannot find a function to handle cognito trigger {}".format(
triggerSource
)
)
return result
# This is a CloudWatch event
# Related: https://github.com/Miserlou/Zappa/issues/1924
elif event.get("awslogs", None):
result = None
whole_function = "{}.{}".format(settings.APP_MODULE, settings.APP_FUNCTION)
app_function = self.import_module_and_get_function(whole_function)
if app_function:
result = self.run_function(app_function, event, context)
logger.debug("Result of %s:" % whole_function)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# Normal web app flow
try:
# Timing
time_start = datetime.datetime.now()
# This is a normal HTTP request
if event.get("httpMethod", None):
script_name = ""
is_elb_context = False
headers = merge_headers(event)
if event.get("requestContext", None) and event["requestContext"].get(
"elb", None
):
# Related: https://github.com/Miserlou/Zappa/issues/1715
# inputs/outputs for lambda loadbalancer
# https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html
is_elb_context = True
# host is lower-case when forwarded from ELB
host = headers.get("host")
# TODO: pathParameters is a first-class citizen in apigateway but not available without
# some parsing work for ELB (is this parameter used for anything?)
event["pathParameters"] = ""
else:
if headers:
host = headers.get("Host")
else:
host = None
logger.debug("host found: [{}]".format(host))
if host:
if "amazonaws.com" in host:
logger.debug("amazonaws found in host")
# The path provided in th event doesn't include the
# stage, so we must tell Flask to include the API
# stage in the url it calculates. See https://github.com/Miserlou/Zappa/issues/1014
script_name = "/" + settings.API_STAGE
else:
# This is a test request sent from the AWS console
if settings.DOMAIN:
# Assume the requests received will be on the specified
# domain. No special handling is required
pass
else:
# Assume the requests received will be to the
# amazonaws.com endpoint, so tell Flask to include the
# API stage
script_name = "/" + settings.API_STAGE
base_path = getattr(settings, "BASE_PATH", None)
# Create the environment for WSGI and handle the request
environ = create_wsgi_request(
event,
script_name=script_name,
base_path=base_path,
trailing_slash=self.trailing_slash,
binary_support=settings.BINARY_SUPPORT,
context_header_mappings=settings.CONTEXT_HEADER_MAPPINGS,
)
# We are always on https on Lambda, so tell our wsgi app that.
environ["HTTPS"] = "on"
environ["wsgi.url_scheme"] = "https"
environ["lambda.context"] = context
environ["lambda.event"] = event
# Execute the application
with Response.from_app(self.wsgi_app, environ) as response:
# This is the object we're going to return.
# Pack the WSGI response into our special dictionary.
zappa_returndict = dict()
# Issue #1715: ALB support. ALB responses must always include
# base64 encoding and status description
if is_elb_context:
zappa_returndict.setdefault("isBase64Encoded", False)
zappa_returndict.setdefault(
"statusDescription", response.status
)
if response.data:
if (
settings.BINARY_SUPPORT
and not response.mimetype.startswith("text/")
and response.mimetype != "application/json"
):
zappa_returndict["body"] = base64.b64encode(
response.data
).decode("utf-8")
zappa_returndict["isBase64Encoded"] = True
else:
zappa_returndict["body"] = response.get_data(as_text=True)
zappa_returndict["statusCode"] = response.status_code
if "headers" in event:
zappa_returndict["headers"] = {}
for key, value in response.headers:
zappa_returndict["headers"][key] = value
if "multiValueHeaders" in event:
zappa_returndict["multiValueHeaders"] = {}
for key, value in response.headers:
zappa_returndict["multiValueHeaders"][
key
] = response.headers.getlist(key)
# Calculate the total response time,
# and log it in the Common Log format.
time_end = datetime.datetime.now()
delta = time_end - time_start
response_time_ms = delta.total_seconds() * 1000
response.content = response.data
common_log(environ, response, response_time=response_time_ms)
return zappa_returndict
except Exception as e: # pragma: no cover
# Print statements are visible in the logs either way
print(e)
exc_info = sys.exc_info()
message = (
"An uncaught exception happened while servicing this request. "
"You can investigate this with the `zappa tail` command."
)
# If we didn't even build an app_module, just raise.
if not settings.DJANGO_SETTINGS:
try:
self.app_module
except NameError as ne:
message = "Failed to import module: {}".format(ne.message)
# Call exception handler for unhandled exceptions
exception_handler = self.settings.EXCEPTION_HANDLER
self._process_exception(
exception_handler=exception_handler,
event=event,
context=context,
exception=e,
)
# Return this unspecified exception as a 500, using template that API Gateway expects.
content = collections.OrderedDict()
content["statusCode"] = 500
body = {"message": message}
if settings.DEBUG: # only include traceback if debug is on.
body["traceback"] = traceback.format_exception(
*exc_info
) # traceback as a list for readability.
content["body"] = json.dumps(str(body), sort_keys=True, indent=4)
return content
def lambda_handler(event, context): # pragma: no cover
return LambdaHandler.lambda_handler(event, context)
def keep_warm_callback(event, context):
"""Method is triggered by the CloudWatch event scheduled when keep_warm setting is set to true."""
lambda_handler(
event={}, context=context
) # overriding event with an empty one so that web app initialization will
# be triggered.
global_handler = None
if os.environ.get("INSTANTIATE_LAMBDA_HANDLER_ON_IMPORT"):
global_handler = LambdaHandler() | zappa-warm | /zappa_warm-0.54.4-py3-none-any.whl/zappa/handler.py | handler.py |
import base64
import logging
import sys
from urllib.parse import urlencode
import six
from requestlogger import ApacheFormatter
from werkzeug import urls
from .utilities import merge_headers, titlecase_keys
BINARY_METHODS = ["POST", "PUT", "PATCH", "DELETE", "CONNECT", "OPTIONS"]
def create_wsgi_request(
event_info,
server_name="zappa",
script_name=None,
trailing_slash=True,
binary_support=False,
base_path=None,
context_header_mappings={},
):
"""
Given some event_info via API Gateway,
create and return a valid WSGI request environ.
"""
method = event_info["httpMethod"]
headers = (
merge_headers(event_info) or {}
) # Allow for the AGW console 'Test' button to work (Pull #735)
"""
API Gateway and ALB both started allowing for multi-value querystring
params in Nov. 2018. If there aren't multi-value params present, then
it acts identically to 'queryStringParameters', so we can use it as a
drop-in replacement.
The one caveat here is that ALB will only include _one_ of
queryStringParameters _or_ multiValueQueryStringParameters, which means
we have to check for the existence of one and then fall back to the
other.
"""
if "multiValueQueryStringParameters" in event_info:
query = event_info["multiValueQueryStringParameters"]
query_string = urlencode(query, doseq=True) if query else ""
else:
query = event_info.get("queryStringParameters", {})
query_string = urlencode(query) if query else ""
if context_header_mappings:
for key, value in context_header_mappings.items():
parts = value.split(".")
header_val = event_info["requestContext"]
for part in parts:
if part not in header_val:
header_val = None
break
else:
header_val = header_val[part]
if header_val is not None:
headers[key] = header_val
# Extract remote user from context if Authorizer is enabled
remote_user = None
if event_info["requestContext"].get("authorizer"):
remote_user = event_info["requestContext"]["authorizer"].get("principalId")
elif event_info["requestContext"].get("identity"):
remote_user = event_info["requestContext"]["identity"].get("userArn")
# Related: https://github.com/Miserlou/Zappa/issues/677
# https://github.com/Miserlou/Zappa/issues/683
# https://github.com/Miserlou/Zappa/issues/696
# https://github.com/Miserlou/Zappa/issues/836
# https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Summary_table
if binary_support and (method in BINARY_METHODS):
if event_info.get("isBase64Encoded", False):
encoded_body = event_info["body"]
body = base64.b64decode(encoded_body)
else:
body = event_info["body"]
if isinstance(body, six.string_types):
body = body.encode("utf-8")
else:
body = event_info["body"]
if isinstance(body, six.string_types):
body = body.encode("utf-8")
# Make header names canonical, e.g. content-type => Content-Type
# https://github.com/Miserlou/Zappa/issues/1188
headers = titlecase_keys(headers)
path = urls.url_unquote(event_info["path"])
if base_path:
script_name = "/" + base_path
if path.startswith(script_name):
path = path[len(script_name) :]
x_forwarded_for = headers.get("X-Forwarded-For", "")
if "," in x_forwarded_for:
# The last one is the cloudfront proxy ip. The second to last is the real client ip.
# Everything else is user supplied and untrustworthy.
remote_addr = x_forwarded_for.split(", ")[-2]
else:
remote_addr = x_forwarded_for or "127.0.0.1"
environ = {
"PATH_INFO": get_wsgi_string(path),
"QUERY_STRING": get_wsgi_string(query_string),
"REMOTE_ADDR": remote_addr,
"REQUEST_METHOD": method,
"SCRIPT_NAME": get_wsgi_string(str(script_name)) if script_name else "",
"SERVER_NAME": str(server_name),
"SERVER_PORT": headers.get("X-Forwarded-Port", "80"),
"SERVER_PROTOCOL": str("HTTP/1.1"),
"wsgi.version": (1, 0),
"wsgi.url_scheme": headers.get("X-Forwarded-Proto", "http"),
"wsgi.input": body,
"wsgi.errors": sys.stderr,
"wsgi.multiprocess": False,
"wsgi.multithread": False,
"wsgi.run_once": False,
}
# Input processing
if method in ["POST", "PUT", "PATCH", "DELETE"]:
if "Content-Type" in headers:
environ["CONTENT_TYPE"] = headers["Content-Type"]
# This must be Bytes or None
environ["wsgi.input"] = six.BytesIO(body)
if body:
environ["CONTENT_LENGTH"] = str(len(body))
else:
environ["CONTENT_LENGTH"] = "0"
for header in headers:
wsgi_name = "HTTP_" + header.upper().replace("-", "_")
environ[wsgi_name] = str(headers[header])
if script_name:
environ["SCRIPT_NAME"] = script_name
path_info = environ["PATH_INFO"]
if script_name in path_info:
environ["PATH_INFO"].replace(script_name, "")
if remote_user:
environ["REMOTE_USER"] = remote_user
if event_info["requestContext"].get("authorizer"):
environ["API_GATEWAY_AUTHORIZER"] = event_info["requestContext"]["authorizer"]
return environ
def common_log(environ, response, response_time=None):
"""
Given the WSGI environ and the response,
log this event in Common Log Format.
"""
logger = logging.getLogger()
if response_time:
formatter = ApacheFormatter(with_response_time=True)
try:
log_entry = formatter(
response.status_code,
environ,
len(response.content),
rt_us=response_time,
)
except TypeError:
# Upstream introduced a very annoying breaking change on the rt_ms/rt_us kwarg.
log_entry = formatter(
response.status_code,
environ,
len(response.content),
rt_ms=response_time,
)
else:
formatter = ApacheFormatter(with_response_time=False)
log_entry = formatter(response.status_code, environ, len(response.content))
logger.info(log_entry)
return log_entry
# Related: https://github.com/Miserlou/Zappa/issues/1199
def get_wsgi_string(string, encoding="utf-8"):
"""
Returns wsgi-compatible string
"""
return string.encode(encoding).decode("iso-8859-1") | zappa-warm | /zappa_warm-0.54.4-py3-none-any.whl/zappa/wsgi.py | wsgi.py |
import importlib
import inspect
import json
import os
import time
import uuid
from functools import update_wrapper, wraps
import boto3
import botocore
from .utilities import get_topic_name
try:
from zappa_settings import ASYNC_RESPONSE_TABLE
except ImportError:
ASYNC_RESPONSE_TABLE = None
# Declare these here so they're kept warm.
try:
aws_session = boto3.Session()
LAMBDA_CLIENT = aws_session.client("lambda")
SNS_CLIENT = aws_session.client("sns")
STS_CLIENT = aws_session.client("sts")
DYNAMODB_CLIENT = aws_session.client("dynamodb")
except botocore.exceptions.NoRegionError as e: # pragma: no cover
# This can happen while testing on Travis, but it's taken care of
# during class initialization.
pass
##
# Response and Exception classes
##
LAMBDA_ASYNC_PAYLOAD_LIMIT = 256000
SNS_ASYNC_PAYLOAD_LIMIT = 256000
class AsyncException(Exception): # pragma: no cover
"""Simple exception class for async tasks."""
pass
class LambdaAsyncResponse:
"""
Base Response Dispatcher class
Can be used directly or subclassed if the method to send the message is changed.
"""
def __init__(
self,
lambda_function_name=None,
aws_region=None,
capture_response=False,
**kwargs
):
""" """
if kwargs.get("boto_session"):
self.client = kwargs.get("boto_session").client("lambda")
else: # pragma: no cover
self.client = LAMBDA_CLIENT
self.lambda_function_name = lambda_function_name
self.aws_region = aws_region
if capture_response:
if ASYNC_RESPONSE_TABLE is None:
print(
"Warning! Attempted to capture a response without "
"async_response_table configured in settings (you won't "
"capture async responses)."
)
capture_response = False
self.response_id = "MISCONFIGURED"
else:
self.response_id = str(uuid.uuid4())
else:
self.response_id = None
self.capture_response = capture_response
def send(self, task_path, args, kwargs):
"""
Create the message object and pass it to the actual sender.
"""
message = {
"task_path": task_path,
"capture_response": self.capture_response,
"response_id": self.response_id,
"args": args,
"kwargs": kwargs,
}
self._send(message)
return self
def _send(self, message):
"""
Given a message, directly invoke the lamdba function for this task.
"""
message["command"] = "zappa.asynchronous.route_lambda_task"
payload = json.dumps(message)
if len(payload) > LAMBDA_ASYNC_PAYLOAD_LIMIT: # pragma: no cover
raise AsyncException("Payload too large for async Lambda call")
self.response = self.client.invoke(
FunctionName=self.lambda_function_name,
InvocationType="Event", # makes the call async
Payload=payload,
)
self.sent = self.response.get("StatusCode", 0) == 202
class SnsAsyncResponse(LambdaAsyncResponse):
"""
Send a SNS message to a specified SNS topic
Serialise the func path and arguments
"""
def __init__(
self,
lambda_function_name=None,
aws_region=None,
capture_response=False,
**kwargs
):
self.lambda_function_name = lambda_function_name
self.aws_region = aws_region
if kwargs.get("boto_session"):
self.client = kwargs.get("boto_session").client("sns")
else: # pragma: no cover
self.client = SNS_CLIENT
if kwargs.get("arn"):
self.arn = kwargs.get("arn")
else:
if kwargs.get("boto_session"):
sts_client = kwargs.get("boto_session").client("sts")
else:
sts_client = STS_CLIENT
AWS_ACCOUNT_ID = sts_client.get_caller_identity()["Account"]
self.arn = "arn:aws:sns:{region}:{account}:{topic_name}".format(
region=self.aws_region,
account=AWS_ACCOUNT_ID,
topic_name=get_topic_name(self.lambda_function_name),
)
# Issue: https://github.com/Miserlou/Zappa/issues/1209
# TODO: Refactor
self.capture_response = capture_response
if capture_response:
if ASYNC_RESPONSE_TABLE is None:
print(
"Warning! Attempted to capture a response without "
"async_response_table configured in settings (you won't "
"capture async responses)."
)
capture_response = False
self.response_id = "MISCONFIGURED"
else:
self.response_id = str(uuid.uuid4())
else:
self.response_id = None
self.capture_response = capture_response
def _send(self, message):
"""
Given a message, publish to this topic.
"""
message["command"] = "zappa.asynchronous.route_sns_task"
payload = json.dumps(message)
if len(payload) > LAMBDA_ASYNC_PAYLOAD_LIMIT: # pragma: no cover
raise AsyncException("Payload too large for SNS")
self.response = self.client.publish(TargetArn=self.arn, Message=payload)
self.sent = self.response.get("MessageId")
##
# Aync Routers
##
ASYNC_CLASSES = {
"lambda": LambdaAsyncResponse,
"sns": SnsAsyncResponse,
}
def route_lambda_task(event, context):
"""
Deserialises the message from event passed to zappa.handler.run_function
imports the function, calls the function with args
"""
message = event
return run_message(message)
def route_sns_task(event, context):
"""
Gets SNS Message, deserialises the message,
imports the function, calls the function with args
"""
record = event["Records"][0]
message = json.loads(record["Sns"]["Message"])
return run_message(message)
def run_message(message):
"""
Runs a function defined by a message object with keys:
'task_path', 'args', and 'kwargs' used by lambda routing
and a 'command' in handler.py
"""
if message.get("capture_response", False):
DYNAMODB_CLIENT.put_item(
TableName=ASYNC_RESPONSE_TABLE,
Item={
"id": {"S": str(message["response_id"])},
"ttl": {"N": str(int(time.time() + 600))},
"async_status": {"S": "in progress"},
"async_response": {"S": str(json.dumps("N/A"))},
},
)
func = import_and_get_task(message["task_path"])
if hasattr(func, "sync"):
response = func.sync(*message["args"], **message["kwargs"])
else:
response = func(*message["args"], **message["kwargs"])
if message.get("capture_response", False):
DYNAMODB_CLIENT.update_item(
TableName=ASYNC_RESPONSE_TABLE,
Key={"id": {"S": str(message["response_id"])}},
UpdateExpression="SET async_response = :r, async_status = :s",
ExpressionAttributeValues={
":r": {"S": str(json.dumps(response))},
":s": {"S": "complete"},
},
)
return response
##
# Execution interfaces and classes
##
def run(
func,
args=[],
kwargs={},
service="lambda",
capture_response=False,
remote_aws_lambda_function_name=None,
remote_aws_region=None,
**task_kwargs
):
"""
Instead of decorating a function with @task, you can just run it directly.
If you were going to do func(*args, **kwargs), then you will call this:
import zappa.asynchronous.run
zappa.asynchronous.run(func, args, kwargs)
If you want to use SNS, then do:
zappa.asynchronous.run(func, args, kwargs, service='sns')
and other arguments are similar to @task
"""
lambda_function_name = remote_aws_lambda_function_name or os.environ.get(
"AWS_LAMBDA_FUNCTION_NAME"
)
aws_region = remote_aws_region or os.environ.get("AWS_REGION")
task_path = get_func_task_path(func)
return ASYNC_CLASSES[service](
lambda_function_name=lambda_function_name,
aws_region=aws_region,
capture_response=capture_response,
**task_kwargs
).send(task_path, args, kwargs)
# Handy:
# http://stackoverflow.com/questions/10294014/python-decorator-best-practice-using-a-class-vs-a-function
# However, this needs to pass inspect.getargspec() in handler.py which does not take classes
# Wrapper written to take optional arguments
# http://chase-seibert.github.io/blog/2013/12/17/python-decorator-optional-parameter.html
def task(*args, **kwargs):
"""Async task decorator so that running
Args:
func (function): the function to be wrapped
Further requirements:
func must be an independent top-level function.
i.e. not a class method or an anonymous function
service (str): either 'lambda' or 'sns'
remote_aws_lambda_function_name (str): the name of a remote lambda function to call with this task
remote_aws_region (str): the name of a remote region to make lambda/sns calls against
Returns:
A replacement function that dispatches func() to
run asynchronously through the service in question
"""
func = None
if len(args) == 1 and callable(args[0]):
func = args[0]
if not kwargs: # Default Values
service = "lambda"
lambda_function_name_arg = None
aws_region_arg = None
else: # Arguments were passed
service = kwargs.get("service", "lambda")
lambda_function_name_arg = kwargs.get("remote_aws_lambda_function_name")
aws_region_arg = kwargs.get("remote_aws_region")
capture_response = kwargs.get("capture_response", False)
def func_wrapper(func):
task_path = get_func_task_path(func)
@wraps(func)
def _run_async(*args, **kwargs):
"""
This is the wrapping async function that replaces the function
that is decorated with @task.
Args:
These are just passed through to @task's func
Assuming a valid service is passed to task() and it is run
inside a Lambda process (i.e. AWS_LAMBDA_FUNCTION_NAME exists),
it dispatches the function to be run through the service variable.
Otherwise, it runs the task synchronously.
Returns:
In async mode, the object returned includes state of the dispatch.
For instance
When outside of Lambda, the func passed to @task is run and we
return the actual value.
"""
lambda_function_name = lambda_function_name_arg or os.environ.get(
"AWS_LAMBDA_FUNCTION_NAME"
)
aws_region = aws_region_arg or os.environ.get("AWS_REGION")
if (service in ASYNC_CLASSES) and (lambda_function_name):
send_result = ASYNC_CLASSES[service](
lambda_function_name=lambda_function_name,
aws_region=aws_region,
capture_response=capture_response,
).send(task_path, args, kwargs)
return send_result
else:
return func(*args, **kwargs)
update_wrapper(_run_async, func)
_run_async.service = service
_run_async.sync = func
return _run_async
return func_wrapper(func) if func else func_wrapper
def task_sns(func):
"""
SNS-based task dispatcher. Functions the same way as task()
"""
return task(func, service="sns")
##
# Utility Functions
##
def import_and_get_task(task_path):
"""
Given a modular path to a function, import that module
and return the function.
"""
module, function = task_path.rsplit(".", 1)
app_module = importlib.import_module(module)
app_function = getattr(app_module, function)
return app_function
def get_func_task_path(func):
"""
Format the modular task path for a function via inspection.
"""
module_path = inspect.getmodule(func).__name__
task_path = "{module_path}.{func_name}".format(
module_path=module_path, func_name=func.__name__
)
return task_path
def get_async_response(response_id):
"""
Get the response from the async table
"""
response = DYNAMODB_CLIENT.get_item(
TableName=ASYNC_RESPONSE_TABLE, Key={"id": {"S": str(response_id)}}
)
if "Item" not in response:
return None
return {
"status": response["Item"]["async_status"]["S"],
"response": json.loads(response["Item"]["async_response"]["S"]),
} | zappa-warm | /zappa_warm-0.54.4-py3-none-any.whl/zappa/asynchronous.py | asynchronous.py |
import getpass
import glob
import hashlib
import json
import logging
import os
import random
import re
import shutil
import string
import subprocess
import tarfile
import tempfile
import time
import uuid
import zipfile
from builtins import bytes, int
from distutils.dir_util import copy_tree
from io import open
import boto3
import botocore
import requests
import troposphere
import troposphere.apigateway
from botocore.exceptions import ClientError
from setuptools import find_packages
from tqdm import tqdm
from .utilities import (
add_event_source,
conflicts_with_a_neighbouring_module,
contains_python_files_or_subdirs,
copytree,
get_topic_name,
get_venv_from_python_version,
human_size,
remove_event_source,
)
##
# Logging Config
##
logging.basicConfig(format="%(levelname)s:%(message)s")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
##
# Policies And Template Mappings
##
ASSUME_POLICY = """{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": [
"apigateway.amazonaws.com",
"lambda.amazonaws.com",
"events.amazonaws.com"
]
},
"Action": "sts:AssumeRole"
}
]
}"""
ATTACH_POLICY = """{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:*"
],
"Resource": "arn:aws:logs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"lambda:InvokeFunction"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"xray:PutTraceSegments",
"xray:PutTelemetryRecords"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"ec2:AttachNetworkInterface",
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:DescribeInstances",
"ec2:DescribeNetworkInterfaces",
"ec2:DetachNetworkInterface",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:ResetNetworkInterfaceAttribute"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": "arn:aws:s3:::*"
},
{
"Effect": "Allow",
"Action": [
"kinesis:*"
],
"Resource": "arn:aws:kinesis:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"sns:*"
],
"Resource": "arn:aws:sns:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"sqs:*"
],
"Resource": "arn:aws:sqs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"dynamodb:*"
],
"Resource": "arn:aws:dynamodb:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"route53:*"
],
"Resource": "*"
}
]
}"""
# Latest list: https://docs.aws.amazon.com/general/latest/gr/rande.html#apigateway_region
API_GATEWAY_REGIONS = [
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
"eu-central-1",
"eu-north-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"eu-north-1",
"ap-northeast-1",
"ap-northeast-2",
"ap-northeast-3",
"ap-southeast-1",
"ap-southeast-2",
"ap-east-1",
"ap-south-1",
"ca-central-1",
"cn-north-1",
"cn-northwest-1",
"sa-east-1",
"us-gov-east-1",
"us-gov-west-1",
]
# Latest list: https://docs.aws.amazon.com/general/latest/gr/rande.html#lambda_region
LAMBDA_REGIONS = [
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
"eu-central-1",
"eu-north-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"eu-north-1",
"ap-northeast-1",
"ap-northeast-2",
"ap-northeast-3",
"ap-southeast-1",
"ap-southeast-2",
"ap-east-1",
"ap-south-1",
"ca-central-1",
"cn-north-1",
"cn-northwest-1",
"sa-east-1",
"us-gov-east-1",
"us-gov-west-1",
]
# We never need to include these.
# Related: https://github.com/Miserlou/Zappa/pull/56
# Related: https://github.com/Miserlou/Zappa/pull/581
ZIP_EXCLUDES = [
"*.exe",
"*.DS_Store",
"*.Python",
"*.git",
".git/*",
"*.zip",
"*.tar.gz",
"*.hg",
"pip",
"docutils*",
"setuputils*",
"__pycache__/*",
]
# When using ALB as an event source for Lambdas, we need to create an alias
# to ensure that, on zappa update, the ALB doesn't lose permissions to access
# the Lambda.
# See: https://github.com/Miserlou/Zappa/pull/1730
ALB_LAMBDA_ALIAS = "current-alb-version"
##
# Classes
##
class Zappa:
"""
Zappa!
Makes it easy to run Python web applications on AWS Lambda/API Gateway.
"""
##
# Configurables
##
http_methods = ["ANY"]
role_name = "ZappaLambdaExecution"
extra_permissions = None
assume_policy = ASSUME_POLICY
attach_policy = ATTACH_POLICY
apigateway_policy = None
cloudwatch_log_levels = ["OFF", "ERROR", "INFO"]
xray_tracing = False
##
# Credentials
##
boto_session = None
credentials_arn = None
def __init__(
self,
boto_session=None,
profile_name=None,
aws_region=None,
load_credentials=True,
desired_role_name=None,
desired_role_arn=None,
runtime="python3.6", # Detected at runtime in CLI
tags=(),
endpoint_urls={},
xray_tracing=False,
):
"""
Instantiate this new Zappa instance, loading any custom credentials if necessary.
"""
# Set aws_region to None to use the system's region instead
if aws_region is None:
# https://github.com/Miserlou/Zappa/issues/413
self.aws_region = boto3.Session().region_name
logger.debug("Set region from boto: %s", self.aws_region)
else:
self.aws_region = aws_region
if desired_role_name:
self.role_name = desired_role_name
if desired_role_arn:
self.credentials_arn = desired_role_arn
self.runtime = runtime
if self.runtime == "python3.6":
self.manylinux_suffix_start = "cp36m"
elif self.runtime == "python3.7":
self.manylinux_suffix_start = "cp37m"
elif self.runtime == "python3.8":
# The 'm' has been dropped in python 3.8+ since builds with and without pymalloc are ABI compatible
# See https://github.com/pypa/manylinux for a more detailed explanation
self.manylinux_suffix_start = "cp38"
else:
self.manylinux_suffix_start = "cp39"
# AWS Lambda supports manylinux1/2010, manylinux2014, and manylinux_2_24
manylinux_suffixes = ("_2_24", "2014", "2010", "1")
self.manylinux_wheel_file_match = re.compile(
f'^.*{self.manylinux_suffix_start}-(manylinux_\d+_\d+_x86_64[.])?manylinux({"|".join(manylinux_suffixes)})_x86_64[.]whl$'
)
self.manylinux_wheel_abi3_file_match = re.compile(
f'^.*cp3.-abi3-manylinux({"|".join(manylinux_suffixes)})_x86_64.whl$'
)
self.endpoint_urls = endpoint_urls
self.xray_tracing = xray_tracing
# Some common invocations, such as DB migrations,
# can take longer than the default.
# Config used for direct invocations of Lambda functions from the Zappa CLI.
# Note that the maximum configurable Lambda function execution time (15 minutes)
# is longer than the maximum timeout configurable in API Gateway (30 seconds).
# Related: https://github.com/Miserlou/Zappa/issues/205
long_config_dict = {
"region_name": aws_region,
"connect_timeout": 5,
"read_timeout": 900,
}
long_config = botocore.client.Config(**long_config_dict)
if load_credentials:
self.load_credentials(boto_session, profile_name)
# Initialize clients
self.s3_client = self.boto_client("s3")
self.lambda_client = self.boto_client("lambda", config=long_config)
self.elbv2_client = self.boto_client("elbv2")
self.events_client = self.boto_client("events")
self.apigateway_client = self.boto_client("apigateway")
# AWS ACM certificates need to be created from us-east-1 to be used by API gateway
east_config = botocore.client.Config(region_name="us-east-1")
self.acm_client = self.boto_client("acm", config=east_config)
self.logs_client = self.boto_client("logs")
self.iam_client = self.boto_client("iam")
self.iam = self.boto_resource("iam")
self.cloudwatch = self.boto_client("cloudwatch")
self.route53 = self.boto_client("route53")
self.sns_client = self.boto_client("sns")
self.cf_client = self.boto_client("cloudformation")
self.dynamodb_client = self.boto_client("dynamodb")
self.cognito_client = self.boto_client("cognito-idp")
self.sts_client = self.boto_client("sts")
self.tags = tags
self.cf_template = troposphere.Template()
self.cf_api_resources = []
self.cf_parameters = {}
def configure_boto_session_method_kwargs(self, service, kw):
"""Allow for custom endpoint urls for non-AWS (testing and bootleg cloud) deployments"""
if service in self.endpoint_urls and not "endpoint_url" in kw:
kw["endpoint_url"] = self.endpoint_urls[service]
return kw
def boto_client(self, service, *args, **kwargs):
"""A wrapper to apply configuration options to boto clients"""
return self.boto_session.client(
service, *args, **self.configure_boto_session_method_kwargs(service, kwargs)
)
def boto_resource(self, service, *args, **kwargs):
"""A wrapper to apply configuration options to boto resources"""
return self.boto_session.resource(
service, *args, **self.configure_boto_session_method_kwargs(service, kwargs)
)
def cache_param(self, value):
"""Returns a troposphere Ref to a value cached as a parameter."""
if value not in self.cf_parameters:
keyname = chr(ord("A") + len(self.cf_parameters))
param = self.cf_template.add_parameter(
troposphere.Parameter(
keyname, Type="String", Default=value, tags=self.tags
)
)
self.cf_parameters[value] = param
return troposphere.Ref(self.cf_parameters[value])
##
# Packaging
##
def copy_editable_packages(self, egg_links, temp_package_path):
""" """
for egg_link in egg_links:
with open(egg_link, "rb") as df:
egg_path = df.read().decode("utf-8").splitlines()[0].strip()
pkgs = set(
[
x.split(".")[0]
for x in find_packages(egg_path, exclude=["test", "tests"])
]
)
for pkg in pkgs:
copytree(
os.path.join(egg_path, pkg),
os.path.join(temp_package_path, pkg),
metadata=False,
symlinks=False,
)
if temp_package_path:
# now remove any egg-links as they will cause issues if they still exist
for link in glob.glob(os.path.join(temp_package_path, "*.egg-link")):
os.remove(link)
def get_deps_list(self, pkg_name, installed_distros=None):
"""
For a given package, returns a list of required packages. Recursive.
"""
# https://github.com/Miserlou/Zappa/issues/1478. Using `pkg_resources`
# instead of `pip` is the recommended approach. The usage is nearly
# identical.
import pkg_resources
deps = []
if not installed_distros:
installed_distros = pkg_resources.WorkingSet()
for package in installed_distros:
if package.project_name.lower() == pkg_name.lower():
deps = [(package.project_name, package.version)]
for req in package.requires():
deps += self.get_deps_list(
pkg_name=req.project_name, installed_distros=installed_distros
)
return list(set(deps)) # de-dupe before returning
def create_handler_venv(self):
"""
Takes the installed zappa and brings it into a fresh virtualenv-like folder. All dependencies are then downloaded.
"""
import subprocess
# We will need the currenv venv to pull Zappa from
current_venv = self.get_current_venv()
# Make a new folder for the handler packages
ve_path = os.path.join(os.getcwd(), "handler_venv")
if os.sys.platform == "win32":
current_site_packages_dir = os.path.join(
current_venv, "Lib", "site-packages"
)
venv_site_packages_dir = os.path.join(ve_path, "Lib", "site-packages")
else:
current_site_packages_dir = os.path.join(
current_venv, "lib", get_venv_from_python_version(), "site-packages"
)
venv_site_packages_dir = os.path.join(
ve_path, "lib", get_venv_from_python_version(), "site-packages"
)
if not os.path.isdir(venv_site_packages_dir):
os.makedirs(venv_site_packages_dir)
# Copy zappa* to the new virtualenv
zappa_things = [
z for z in os.listdir(current_site_packages_dir) if z.lower()[:5] == "zappa"
]
for z in zappa_things:
copytree(
os.path.join(current_site_packages_dir, z),
os.path.join(venv_site_packages_dir, z),
)
# Use pip to download zappa's dependencies. Copying from current venv causes issues with things like PyYAML that installs as yaml
zappa_deps = self.get_deps_list("zappa")
pkg_list = ["{0!s}=={1!s}".format(dep, version) for dep, version in zappa_deps]
# Need to manually add setuptools
pkg_list.append("setuptools")
command = [
"pip",
"install",
"--quiet",
"--target",
venv_site_packages_dir,
] + pkg_list
# This is the recommended method for installing packages if you don't
# to depend on `setuptools`
# https://github.com/pypa/pip/issues/5240#issuecomment-381662679
pip_process = subprocess.Popen(command, stdout=subprocess.PIPE)
# Using communicate() to avoid deadlocks
pip_process.communicate()
pip_return_code = pip_process.returncode
if pip_return_code:
raise EnvironmentError("Pypi lookup failed")
return ve_path
# staticmethod as per https://github.com/Miserlou/Zappa/issues/780
@staticmethod
def get_current_venv():
"""
Returns the path to the current virtualenv
"""
if "VIRTUAL_ENV" in os.environ:
venv = os.environ["VIRTUAL_ENV"]
elif os.path.exists(".python-version"): # pragma: no cover
try:
subprocess.check_output(["pyenv", "help"], stderr=subprocess.STDOUT)
except OSError:
print(
"This directory seems to have pyenv's local venv, "
"but pyenv executable was not found."
)
with open(".python-version", "r") as f:
# minor fix in how .python-version is read
# Related: https://github.com/Miserlou/Zappa/issues/921
env_name = f.readline().strip()
bin_path = subprocess.check_output(["pyenv", "which", "python"]).decode(
"utf-8"
)
venv = bin_path[: bin_path.rfind(env_name)] + env_name
else: # pragma: no cover
return None
return venv
def create_lambda_zip(
self,
prefix="lambda_package",
handler_file=None,
slim_handler=False,
minify=True,
exclude=None,
exclude_glob=None,
use_precompiled_packages=True,
include=None,
venv=None,
output=None,
disable_progress=False,
archive_format="zip",
):
"""
Create a Lambda-ready zip file of the current virtualenvironment and working directory.
Returns path to that file.
"""
# Validate archive_format
if archive_format not in ["zip", "tarball"]:
raise KeyError(
"The archive format to create a lambda package must be zip or tarball"
)
# Pip is a weird package.
# Calling this function in some environments without this can cause.. funkiness.
import pip
if not venv:
venv = self.get_current_venv()
build_time = str(int(time.time()))
cwd = os.getcwd()
if not output:
if archive_format == "zip":
archive_fname = prefix + "-" + build_time + ".zip"
elif archive_format == "tarball":
archive_fname = prefix + "-" + build_time + ".tar.gz"
else:
archive_fname = output
archive_path = os.path.join(cwd, archive_fname)
# Files that should be excluded from the zip
if exclude is None:
exclude = list()
if exclude_glob is None:
exclude_glob = list()
# Exclude the zip itself
exclude.append(archive_path)
# Make sure that 'concurrent' is always forbidden.
# https://github.com/Miserlou/Zappa/issues/827
if not "concurrent" in exclude:
exclude.append("concurrent")
def splitpath(path):
parts = []
(path, tail) = os.path.split(path)
while path and tail:
parts.append(tail)
(path, tail) = os.path.split(path)
parts.append(os.path.join(path, tail))
return list(map(os.path.normpath, parts))[::-1]
split_venv = splitpath(venv)
split_cwd = splitpath(cwd)
# Ideally this should be avoided automatically,
# but this serves as an okay stop-gap measure.
if split_venv[-1] == split_cwd[-1]: # pragma: no cover
print(
"Warning! Your project and virtualenv have the same name! You may want "
"to re-create your venv with a new name, or explicitly define a "
"'project_name', as this may cause errors."
)
# First, do the project..
temp_project_path = tempfile.mkdtemp(prefix="zappa-project")
if not slim_handler:
# Slim handler does not take the project files.
if minify:
# Related: https://github.com/Miserlou/Zappa/issues/744
excludes = ZIP_EXCLUDES + exclude + [split_venv[-1]]
copytree(
cwd,
temp_project_path,
metadata=False,
symlinks=False,
ignore=shutil.ignore_patterns(*excludes),
)
else:
copytree(cwd, temp_project_path, metadata=False, symlinks=False)
for glob_path in exclude_glob:
for path in glob.glob(os.path.join(temp_project_path, glob_path)):
try:
os.remove(path)
except OSError: # is a directory
shutil.rmtree(path)
# If a handler_file is supplied, copy that to the root of the package,
# because that's where AWS Lambda looks for it. It can't be inside a package.
if handler_file:
filename = handler_file.split(os.sep)[-1]
shutil.copy(handler_file, os.path.join(temp_project_path, filename))
# Create and populate package ID file and write to temp project path
package_info = {}
package_info["uuid"] = str(uuid.uuid4())
package_info["build_time"] = build_time
package_info["build_platform"] = os.sys.platform
package_info["build_user"] = getpass.getuser()
# TODO: Add git head and info?
# Ex, from @scoates:
# def _get_git_branch():
# chdir(DIR)
# out = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
# lambci_branch = environ.get('LAMBCI_BRANCH', None)
# if out == "HEAD" and lambci_branch:
# out += " lambci:{}".format(lambci_branch)
# return out
# def _get_git_hash():
# chdir(DIR)
# return check_output(['git', 'rev-parse', 'HEAD']).strip()
# def _get_uname():
# return check_output(['uname', '-a']).strip()
# def _get_user():
# return check_output(['whoami']).strip()
# def set_id_info(zappa_cli):
# build_info = {
# 'branch': _get_git_branch(),
# 'hash': _get_git_hash(),
# 'build_uname': _get_uname(),
# 'build_user': _get_user(),
# 'build_time': datetime.datetime.utcnow().isoformat(),
# }
# with open(path.join(DIR, 'id_info.json'), 'w') as f:
# json.dump(build_info, f)
# return True
package_id_file = open(
os.path.join(temp_project_path, "package_info.json"), "w"
)
dumped = json.dumps(package_info, indent=4)
try:
package_id_file.write(dumped)
except TypeError: # This is a Python 2/3 issue. TODO: Make pretty!
package_id_file.write(str(dumped))
package_id_file.close()
# Then, do site site-packages..
egg_links = []
temp_package_path = tempfile.mkdtemp(prefix="zappa-packages")
if os.sys.platform == "win32":
site_packages = os.path.join(venv, "Lib", "site-packages")
else:
site_packages = os.path.join(
venv, "lib", get_venv_from_python_version(), "site-packages"
)
egg_links.extend(glob.glob(os.path.join(site_packages, "*.egg-link")))
if minify:
excludes = ZIP_EXCLUDES + exclude
copytree(
site_packages,
temp_package_path,
metadata=False,
symlinks=False,
ignore=shutil.ignore_patterns(*excludes),
)
else:
copytree(site_packages, temp_package_path, metadata=False, symlinks=False)
# We may have 64-bin specific packages too.
site_packages_64 = os.path.join(
venv, "lib64", get_venv_from_python_version(), "site-packages"
)
if os.path.exists(site_packages_64):
egg_links.extend(glob.glob(os.path.join(site_packages_64, "*.egg-link")))
if minify:
excludes = ZIP_EXCLUDES + exclude
copytree(
site_packages_64,
temp_package_path,
metadata=False,
symlinks=False,
ignore=shutil.ignore_patterns(*excludes),
)
else:
copytree(
site_packages_64, temp_package_path, metadata=False, symlinks=False
)
if egg_links:
self.copy_editable_packages(egg_links, temp_package_path)
copy_tree(temp_package_path, temp_project_path, update=True)
# Then the pre-compiled packages..
if use_precompiled_packages:
print("Downloading and installing dependencies..")
installed_packages = self.get_installed_packages(
site_packages, site_packages_64
)
try:
for (
installed_package_name,
installed_package_version,
) in installed_packages.items():
cached_wheel_path = self.get_cached_manylinux_wheel(
installed_package_name,
installed_package_version,
disable_progress,
)
if cached_wheel_path:
# Otherwise try to use manylinux packages from PyPi..
# Related: https://github.com/Miserlou/Zappa/issues/398
shutil.rmtree(
os.path.join(temp_project_path, installed_package_name),
ignore_errors=True,
)
with zipfile.ZipFile(cached_wheel_path) as zfile:
zfile.extractall(temp_project_path)
except Exception as e:
print(e)
# XXX - What should we do here?
# Cleanup
for glob_path in exclude_glob:
for path in glob.glob(os.path.join(temp_project_path, glob_path)):
try:
os.remove(path)
except OSError: # is a directory
shutil.rmtree(path)
# Then archive it all up..
if archive_format == "zip":
print("Packaging project as zip.")
try:
compression_method = zipfile.ZIP_DEFLATED
except ImportError: # pragma: no cover
compression_method = zipfile.ZIP_STORED
archivef = zipfile.ZipFile(archive_path, "w", compression_method)
elif archive_format == "tarball":
print("Packaging project as gzipped tarball.")
archivef = tarfile.open(archive_path, "w|gz")
for root, dirs, files in os.walk(temp_project_path):
for filename in files:
# Skip .pyc files for Django migrations
# https://github.com/Miserlou/Zappa/issues/436
# https://github.com/Miserlou/Zappa/issues/464
if filename[-4:] == ".pyc" and root[-10:] == "migrations":
continue
# If there is a .pyc file in this package,
# we can skip the python source code as we'll just
# use the compiled bytecode anyway..
if filename[-3:] == ".py" and root[-10:] != "migrations":
abs_filname = os.path.join(root, filename)
abs_pyc_filename = abs_filname + "c"
if os.path.isfile(abs_pyc_filename):
# but only if the pyc is older than the py,
# otherwise we'll deploy outdated code!
py_time = os.stat(abs_filname).st_mtime
pyc_time = os.stat(abs_pyc_filename).st_mtime
if pyc_time > py_time:
continue
# Make sure that the files are all correctly chmodded
# Related: https://github.com/Miserlou/Zappa/issues/484
# Related: https://github.com/Miserlou/Zappa/issues/682
os.chmod(os.path.join(root, filename), 0o755)
if archive_format == "zip":
# Actually put the file into the proper place in the zip
# Related: https://github.com/Miserlou/Zappa/pull/716
zipi = zipfile.ZipInfo(
os.path.join(
root.replace(temp_project_path, "").lstrip(os.sep), filename
)
)
zipi.create_system = 3
zipi.external_attr = 0o755 << int(16) # Is this P2/P3 functional?
with open(os.path.join(root, filename), "rb") as f:
archivef.writestr(zipi, f.read(), compression_method)
elif archive_format == "tarball":
tarinfo = tarfile.TarInfo(
os.path.join(
root.replace(temp_project_path, "").lstrip(os.sep), filename
)
)
tarinfo.mode = 0o755
stat = os.stat(os.path.join(root, filename))
tarinfo.mtime = stat.st_mtime
tarinfo.size = stat.st_size
with open(os.path.join(root, filename), "rb") as f:
archivef.addfile(tarinfo, f)
# Create python init file if it does not exist
# Only do that if there are sub folders or python files and does not conflict with a neighbouring module
# Related: https://github.com/Miserlou/Zappa/issues/766
if not contains_python_files_or_subdirs(root):
# if the directory does not contain any .py file at any level, we can skip the rest
dirs[:] = [d for d in dirs if d != root]
else:
if (
"__init__.py" not in files
and not conflicts_with_a_neighbouring_module(root)
):
tmp_init = os.path.join(temp_project_path, "__init__.py")
open(tmp_init, "a").close()
os.chmod(tmp_init, 0o755)
arcname = os.path.join(
root.replace(temp_project_path, ""),
os.path.join(
root.replace(temp_project_path, ""), "__init__.py"
),
)
if archive_format == "zip":
archivef.write(tmp_init, arcname)
elif archive_format == "tarball":
archivef.add(tmp_init, arcname)
# And, we're done!
archivef.close()
# Trash the temp directory
shutil.rmtree(temp_project_path)
shutil.rmtree(temp_package_path)
if os.path.isdir(venv) and slim_handler:
# Remove the temporary handler venv folder
shutil.rmtree(venv)
return archive_fname
@staticmethod
def get_installed_packages(site_packages, site_packages_64):
"""
Returns a dict of installed packages that Zappa cares about.
"""
import pkg_resources
package_to_keep = []
if os.path.isdir(site_packages):
package_to_keep += os.listdir(site_packages)
if os.path.isdir(site_packages_64):
package_to_keep += os.listdir(site_packages_64)
package_to_keep = [x.lower() for x in package_to_keep]
installed_packages = {
package.project_name.lower(): package.version
for package in pkg_resources.WorkingSet()
if package.project_name.lower() in package_to_keep
or package.location.lower()
in [site_packages.lower(), site_packages_64.lower()]
}
return installed_packages
@staticmethod
def download_url_with_progress(url, stream, disable_progress):
"""
Downloads a given url in chunks and writes to the provided stream (can be any io stream).
Displays the progress bar for the download.
"""
resp = requests.get(
url, timeout=float(os.environ.get("PIP_TIMEOUT", 2)), stream=True
)
resp.raw.decode_content = True
progress = tqdm(
unit="B",
unit_scale=True,
total=int(resp.headers.get("Content-Length", 0)),
disable=disable_progress,
)
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
progress.update(len(chunk))
stream.write(chunk)
progress.close()
def get_cached_manylinux_wheel(
self, package_name, package_version, disable_progress=False
):
"""
Gets the locally stored version of a manylinux wheel. If one does not exist, the function downloads it.
"""
cached_wheels_dir = os.path.join(tempfile.gettempdir(), "cached_wheels")
if not os.path.isdir(cached_wheels_dir):
os.makedirs(cached_wheels_dir)
else:
# Check if we already have a cached copy
wheel_name = re.sub("[^\w\d.]+", "_", package_name, re.UNICODE)
wheel_file = f"{wheel_name}-{package_version}-*_x86_64.whl"
wheel_path = os.path.join(cached_wheels_dir, wheel_file)
for pathname in glob.iglob(wheel_path):
if re.match(self.manylinux_wheel_file_match, pathname) or re.match(
self.manylinux_wheel_abi3_file_match, pathname
):
print(
f" - {package_name}=={package_version}: Using locally cached manylinux wheel"
)
return pathname
# The file is not cached, download it.
wheel_url, filename = self.get_manylinux_wheel_url(
package_name, package_version
)
if not wheel_url:
return None
wheel_path = os.path.join(cached_wheels_dir, filename)
print(f" - {package_name}=={package_version}: Downloading")
with open(wheel_path, "wb") as f:
self.download_url_with_progress(wheel_url, f, disable_progress)
if not zipfile.is_zipfile(wheel_path):
return None
return wheel_path
def get_manylinux_wheel_url(self, package_name, package_version):
"""
For a given package name, returns a link to the download URL,
else returns None.
Related: https://github.com/Miserlou/Zappa/issues/398
Examples here: https://gist.github.com/perrygeo/9545f94eaddec18a65fd7b56880adbae
This function downloads metadata JSON of `package_name` from Pypi
and examines if the package has a manylinux wheel. This function
also caches the JSON file so that we don't have to poll Pypi
every time.
"""
cached_pypi_info_dir = os.path.join(tempfile.gettempdir(), "cached_pypi_info")
if not os.path.isdir(cached_pypi_info_dir):
os.makedirs(cached_pypi_info_dir)
# Even though the metadata is for the package, we save it in a
# filename that includes the package's version. This helps in
# invalidating the cached file if the user moves to a different
# version of the package.
# Related: https://github.com/Miserlou/Zappa/issues/899
json_file = "{0!s}-{1!s}.json".format(package_name, package_version)
json_file_path = os.path.join(cached_pypi_info_dir, json_file)
if os.path.exists(json_file_path):
with open(json_file_path, "rb") as metafile:
data = json.load(metafile)
else:
url = "https://pypi.python.org/pypi/{}/json".format(package_name)
try:
res = requests.get(
url, timeout=float(os.environ.get("PIP_TIMEOUT", 1.5))
)
data = res.json()
except Exception as e: # pragma: no cover
return None, None
with open(json_file_path, "wb") as metafile:
jsondata = json.dumps(data)
metafile.write(bytes(jsondata, "utf-8"))
if package_version not in data["releases"]:
return None, None
for f in data["releases"][package_version]:
if re.match(self.manylinux_wheel_file_match, f["filename"]):
return f["url"], f["filename"]
elif re.match(self.manylinux_wheel_abi3_file_match, f["filename"]):
return f["url"], f["filename"]
return None, None
##
# S3
##
def upload_to_s3(self, source_path, bucket_name, disable_progress=False):
r"""
Given a file, upload it to S3.
Credentials should be stored in environment variables or ~/.aws/credentials (%USERPROFILE%\.aws\credentials on Windows).
Returns True on success, false on failure.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError:
# This is really stupid S3 quirk. Technically, us-east-1 one has no S3,
# it's actually "US Standard", or something.
# More here: https://github.com/boto/boto3/issues/125
if self.aws_region == "us-east-1":
self.s3_client.create_bucket(
Bucket=bucket_name,
)
else:
self.s3_client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={"LocationConstraint": self.aws_region},
)
if self.tags:
tags = {
"TagSet": [
{"Key": key, "Value": self.tags[key]}
for key in self.tags.keys()
]
}
self.s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging=tags)
if not os.path.isfile(source_path) or os.stat(source_path).st_size == 0:
print("Problem with source file {}".format(source_path))
return False
dest_path = os.path.split(source_path)[1]
try:
source_size = os.stat(source_path).st_size
print("Uploading {0} ({1})..".format(dest_path, human_size(source_size)))
progress = tqdm(
total=float(os.path.getsize(source_path)),
unit_scale=True,
unit="B",
disable=disable_progress,
)
# Attempt to upload to S3 using the S3 meta client with the progress bar.
# If we're unable to do that, try one more time using a session client,
# which cannot use the progress bar.
# Related: https://github.com/boto/boto3/issues/611
try:
self.s3_client.upload_file(
source_path, bucket_name, dest_path, Callback=progress.update
)
except Exception as e: # pragma: no cover
self.s3_client.upload_file(source_path, bucket_name, dest_path)
progress.close()
except (KeyboardInterrupt, SystemExit): # pragma: no cover
raise
except Exception as e: # pragma: no cover
print(e)
return False
return True
def copy_on_s3(self, src_file_name, dst_file_name, bucket_name):
"""
Copies src file to destination within a bucket.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response["Error"]["Code"])
if error_code == 404:
return False
copy_src = {"Bucket": bucket_name, "Key": src_file_name}
try:
self.s3_client.copy(
CopySource=copy_src, Bucket=bucket_name, Key=dst_file_name
)
return True
except botocore.exceptions.ClientError: # pragma: no cover
return False
def remove_from_s3(self, file_name, bucket_name):
"""
Given a file name and a bucket, remove it from S3.
There's no reason to keep the file hosted on S3 once its been made into a Lambda function, so we can delete it from S3.
Returns True on success, False on failure.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response["Error"]["Code"])
if error_code == 404:
return False
try:
self.s3_client.delete_object(Bucket=bucket_name, Key=file_name)
return True
except (
botocore.exceptions.ParamValidationError,
botocore.exceptions.ClientError,
): # pragma: no cover
return False
##
# Lambda
##
def create_lambda_function(
self,
bucket=None,
function_name=None,
handler=None,
s3_key=None,
description="Zappa Deployment",
timeout=30,
memory_size=512,
publish=True,
vpc_config=None,
dead_letter_config=None,
runtime="python3.6",
aws_environment_variables=None,
aws_kms_key_arn=None,
xray_tracing=False,
local_zip=None,
use_alb=False,
layers=None,
concurrency=None,
docker_image_uri=None,
):
"""
Given a bucket and key (or a local path) of a valid Lambda-zip, a function name and a handler, register that Lambda function.
"""
if not vpc_config:
vpc_config = {}
if not dead_letter_config:
dead_letter_config = {}
if not self.credentials_arn:
self.get_credentials_arn()
if not aws_environment_variables:
aws_environment_variables = {}
if not aws_kms_key_arn:
aws_kms_key_arn = ""
if not layers:
layers = []
kwargs = dict(
FunctionName=function_name,
Role=self.credentials_arn,
Description=description,
Timeout=timeout,
MemorySize=memory_size,
Publish=publish,
VpcConfig=vpc_config,
DeadLetterConfig=dead_letter_config,
Environment={"Variables": aws_environment_variables},
KMSKeyArn=aws_kms_key_arn,
TracingConfig={"Mode": "Active" if self.xray_tracing else "PassThrough"},
Layers=layers,
)
if not docker_image_uri:
kwargs["Runtime"] = runtime
kwargs["Handler"] = handler
kwargs["PackageType"] = "Zip"
if docker_image_uri:
kwargs["Code"] = {"ImageUri": docker_image_uri}
# default is ZIP. override to Image for container support
kwargs["PackageType"] = "Image"
# The create function operation times out when this is '' (the default)
# So just remove it from the kwargs if it is not specified
if aws_kms_key_arn == "":
kwargs.pop("KMSKeyArn")
elif local_zip:
kwargs["Code"] = {"ZipFile": local_zip}
else:
kwargs["Code"] = {"S3Bucket": bucket, "S3Key": s3_key}
response = self.lambda_client.create_function(**kwargs)
resource_arn = response["FunctionArn"]
version = response["Version"]
# If we're using an ALB, let's create an alias mapped to the newly
# created function. This allows clean, no downtime association when
# using application load balancers as an event source.
# See: https://github.com/Miserlou/Zappa/pull/1730
# https://github.com/Miserlou/Zappa/issues/1823
if use_alb:
self.lambda_client.create_alias(
FunctionName=resource_arn,
FunctionVersion=version,
Name=ALB_LAMBDA_ALIAS,
)
if self.tags:
self.lambda_client.tag_resource(Resource=resource_arn, Tags=self.tags)
if concurrency is not None:
self.lambda_client.put_function_concurrency(
FunctionName=resource_arn,
ReservedConcurrentExecutions=concurrency,
)
# Wait for lambda to become active, otherwise many operations will fail
self.wait_until_lambda_function_is_active(function_name)
return resource_arn
def update_lambda_function(
self,
bucket,
function_name,
s3_key=None,
publish=True,
local_zip=None,
num_revisions=None,
concurrency=None,
docker_image_uri=None,
):
"""
Given a bucket and key (or a local path) of a valid Lambda-zip, a function name and a handler, update that Lambda function's code.
Optionally, delete previous versions if they exceed the optional limit.
"""
print("Updating Lambda function code..")
kwargs = dict(FunctionName=function_name, Publish=publish)
if docker_image_uri:
kwargs["ImageUri"] = docker_image_uri
elif local_zip:
kwargs["ZipFile"] = local_zip
else:
kwargs["S3Bucket"] = bucket
kwargs["S3Key"] = s3_key
response = self.lambda_client.update_function_code(**kwargs)
resource_arn = response["FunctionArn"]
version = response["Version"]
# If the lambda has an ALB alias, let's update the alias
# to point to the newest version of the function. We have to use a GET
# here, as there's no HEAD-esque call to retrieve metadata about a
# function alias.
# Related: https://github.com/Miserlou/Zappa/pull/1730
# https://github.com/Miserlou/Zappa/issues/1823
try:
response = self.lambda_client.get_alias(
FunctionName=function_name,
Name=ALB_LAMBDA_ALIAS,
)
alias_exists = True
except botocore.exceptions.ClientError as e: # pragma: no cover
if "ResourceNotFoundException" not in e.response["Error"]["Code"]:
raise e
alias_exists = False
if alias_exists:
self.lambda_client.update_alias(
FunctionName=function_name,
FunctionVersion=version,
Name=ALB_LAMBDA_ALIAS,
)
if concurrency is not None:
self.lambda_client.put_function_concurrency(
FunctionName=function_name,
ReservedConcurrentExecutions=concurrency,
)
else:
self.lambda_client.delete_function_concurrency(FunctionName=function_name)
if num_revisions:
# Find the existing revision IDs for the given function
# Related: https://github.com/Miserlou/Zappa/issues/1402
versions_in_lambda = []
versions = self.lambda_client.list_versions_by_function(
FunctionName=function_name
)
for version in versions["Versions"]:
versions_in_lambda.append(version["Version"])
while "NextMarker" in versions:
versions = self.lambda_client.list_versions_by_function(
FunctionName=function_name, Marker=versions["NextMarker"]
)
for version in versions["Versions"]:
versions_in_lambda.append(version["Version"])
versions_in_lambda.remove("$LATEST")
# Delete older revisions if their number exceeds the specified limit
for version in versions_in_lambda[::-1][num_revisions:]:
self.lambda_client.delete_function(
FunctionName=function_name, Qualifier=version
)
self.wait_until_lambda_function_is_updated(function_name)
return resource_arn
def update_lambda_configuration(
self,
lambda_arn,
function_name,
handler,
description="Zappa Deployment",
timeout=30,
memory_size=512,
publish=True,
vpc_config=None,
runtime="python3.6",
aws_environment_variables=None,
aws_kms_key_arn=None,
layers=None,
wait=True,
):
"""
Given an existing function ARN, update the configuration variables.
"""
print("Updating Lambda function configuration..")
if not vpc_config:
vpc_config = {}
if not self.credentials_arn:
self.get_credentials_arn()
if not aws_kms_key_arn:
aws_kms_key_arn = ""
if not aws_environment_variables:
aws_environment_variables = {}
if not layers:
layers = []
if wait:
# Wait until function is ready, otherwise expected keys will be missing from 'lambda_aws_config'.
self.wait_until_lambda_function_is_updated(function_name)
# Check if there are any remote aws lambda env vars so they don't get trashed.
# https://github.com/Miserlou/Zappa/issues/987, Related: https://github.com/Miserlou/Zappa/issues/765
lambda_aws_config = self.lambda_client.get_function_configuration(
FunctionName=function_name
)
if "Environment" in lambda_aws_config:
lambda_aws_environment_variables = lambda_aws_config["Environment"].get(
"Variables", {}
)
# Append keys that are remote but not in settings file
for key, value in lambda_aws_environment_variables.items():
if key not in aws_environment_variables:
aws_environment_variables[key] = value
kwargs = {
"FunctionName": function_name,
"Role": self.credentials_arn,
"Description": description,
"Timeout": timeout,
"MemorySize": memory_size,
"VpcConfig": vpc_config,
"Environment": {"Variables": aws_environment_variables},
"KMSKeyArn": aws_kms_key_arn,
"TracingConfig": {"Mode": "Active" if self.xray_tracing else "PassThrough"},
}
if lambda_aws_config["PackageType"] != "Image":
kwargs.update(
{
"Handler": handler,
"Runtime": runtime,
"Layers": layers,
}
)
response = self.lambda_client.update_function_configuration(**kwargs)
resource_arn = response["FunctionArn"]
if self.tags:
self.lambda_client.tag_resource(Resource=resource_arn, Tags=self.tags)
return resource_arn
def invoke_lambda_function(
self,
function_name,
payload,
invocation_type="Event",
log_type="Tail",
client_context=None,
qualifier=None,
):
"""
Directly invoke a named Lambda function with a payload.
Returns the response.
"""
return self.lambda_client.invoke(
FunctionName=function_name,
InvocationType=invocation_type,
LogType=log_type,
Payload=payload,
)
def rollback_lambda_function_version(
self, function_name, versions_back=1, publish=True
):
"""
Rollback the lambda function code 'versions_back' number of revisions.
Returns the Function ARN.
"""
response = self.lambda_client.list_versions_by_function(
FunctionName=function_name
)
# https://github.com/Miserlou/Zappa/pull/2192
if (
len(response.get("Versions", [])) > 1
and response["Versions"][-1]["PackageType"] == "Image"
):
raise NotImplementedError(
"Zappa's rollback functionality is not available for Docker based deployments"
)
# Take into account $LATEST
if len(response["Versions"]) < versions_back + 1:
print("We do not have {} revisions. Aborting".format(str(versions_back)))
return False
revisions = [
int(revision["Version"])
for revision in response["Versions"]
if revision["Version"] != "$LATEST"
]
revisions.sort(reverse=True)
response = self.lambda_client.get_function(
FunctionName="function:{}:{}".format(
function_name, revisions[versions_back]
)
)
response = requests.get(response["Code"]["Location"])
if response.status_code != 200:
print(
"Failed to get version {} of {} code".format(
versions_back, function_name
)
)
return False
response = self.lambda_client.update_function_code(
FunctionName=function_name, ZipFile=response.content, Publish=publish
) # pragma: no cover
return response["FunctionArn"]
def wait_until_lambda_function_is_active(self, function_name):
"""
Wait until lambda State=Active
"""
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#waiters
waiter = self.lambda_client.get_waiter("function_active")
print(f"Waiting for lambda function [{function_name}] to become active...")
waiter.wait(FunctionName=function_name)
def wait_until_lambda_function_is_updated(self, function_name):
"""
Wait until lambda LastUpdateStatus=Successful
"""
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#waiters
waiter = self.lambda_client.get_waiter("function_updated")
print(f"Waiting for lambda function [{function_name}] to be updated...")
waiter.wait(FunctionName=function_name)
def get_lambda_function(self, function_name):
"""
Returns the lambda function ARN, given a name
This requires the "lambda:GetFunction" role.
"""
response = self.lambda_client.get_function(FunctionName=function_name)
return response["Configuration"]["FunctionArn"]
def get_lambda_function_versions(self, function_name):
"""
Simply returns the versions available for a Lambda function, given a function name.
"""
try:
response = self.lambda_client.list_versions_by_function(
FunctionName=function_name
)
return response.get("Versions", [])
except Exception:
return []
def delete_lambda_function(self, function_name):
"""
Given a function name, delete it from AWS Lambda.
Returns the response.
"""
print("Deleting Lambda function..")
return self.lambda_client.delete_function(
FunctionName=function_name,
)
##
# Application load balancer
##
def deploy_lambda_alb(self, lambda_arn, lambda_name, alb_vpc_config, timeout):
"""
The `zappa deploy` functionality for ALB infrastructure.
"""
if not alb_vpc_config:
raise EnvironmentError(
"When creating an ALB, alb_vpc_config must be filled out in zappa_settings."
)
if "SubnetIds" not in alb_vpc_config:
raise EnvironmentError(
"When creating an ALB, you must supply two subnets in different availability zones."
)
if "SecurityGroupIds" not in alb_vpc_config:
alb_vpc_config["SecurityGroupIds"] = []
if not alb_vpc_config.get("CertificateArn"):
raise EnvironmentError(
"When creating an ALB, you must supply a CertificateArn for the HTTPS listener."
)
# Related: https://github.com/Miserlou/Zappa/issues/1856
if "Scheme" not in alb_vpc_config:
alb_vpc_config["Scheme"] = "internet-facing"
print("Deploying ALB infrastructure...")
# Create load balancer
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_load_balancer
kwargs = dict(
Name=lambda_name,
Subnets=alb_vpc_config["SubnetIds"],
SecurityGroups=alb_vpc_config["SecurityGroupIds"],
Scheme=alb_vpc_config["Scheme"],
# TODO: Tags might be a useful means of stock-keeping zappa-generated assets.
# Tags=[],
Type="application",
# TODO: can be ipv4 or dualstack (for ipv4 and ipv6) ipv4 is required for internal Scheme.
IpAddressType="ipv4",
)
response = self.elbv2_client.create_load_balancer(**kwargs)
if not (response["LoadBalancers"]) or len(response["LoadBalancers"]) != 1:
raise EnvironmentError(
"Failure to create application load balancer. Response was in unexpected format. Response was: {}".format(
repr(response)
)
)
if response["LoadBalancers"][0]["State"]["Code"] == "failed":
raise EnvironmentError(
"Failure to create application load balancer. Response reported a failed state: {}".format(
response["LoadBalancers"][0]["State"]["Reason"]
)
)
load_balancer_arn = response["LoadBalancers"][0]["LoadBalancerArn"]
load_balancer_dns = response["LoadBalancers"][0]["DNSName"]
load_balancer_vpc = response["LoadBalancers"][0]["VpcId"]
waiter = self.elbv2_client.get_waiter("load_balancer_available")
print(
"Waiting for load balancer [{}] to become active..".format(
load_balancer_arn
)
)
waiter.wait(LoadBalancerArns=[load_balancer_arn], WaiterConfig={"Delay": 3})
# Match the lambda timeout on the load balancer.
self.elbv2_client.modify_load_balancer_attributes(
LoadBalancerArn=load_balancer_arn,
Attributes=[{"Key": "idle_timeout.timeout_seconds", "Value": str(timeout)}],
)
# Create/associate target group.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_target_group
kwargs = dict(
Name=lambda_name,
TargetType="lambda",
# TODO: Add options for health checks
)
response = self.elbv2_client.create_target_group(**kwargs)
if not (response["TargetGroups"]) or len(response["TargetGroups"]) != 1:
raise EnvironmentError(
"Failure to create application load balancer target group. Response was in unexpected format. Response was: {}".format(
repr(response)
)
)
target_group_arn = response["TargetGroups"][0]["TargetGroupArn"]
# Enable multi-value headers by default.
response = self.elbv2_client.modify_target_group_attributes(
TargetGroupArn=target_group_arn,
Attributes=[
{"Key": "lambda.multi_value_headers.enabled", "Value": "true"},
],
)
# Allow execute permissions from target group to lambda.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.add_permission
kwargs = dict(
Action="lambda:InvokeFunction",
FunctionName="{}:{}".format(lambda_arn, ALB_LAMBDA_ALIAS),
Principal="elasticloadbalancing.amazonaws.com",
SourceArn=target_group_arn,
StatementId=lambda_name,
)
response = self.lambda_client.add_permission(**kwargs)
# Register target group to lambda association.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.register_targets
kwargs = dict(
TargetGroupArn=target_group_arn,
Targets=[{"Id": "{}:{}".format(lambda_arn, ALB_LAMBDA_ALIAS)}],
)
response = self.elbv2_client.register_targets(**kwargs)
# Bind listener to load balancer with default rule to target group.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_listener
kwargs = dict(
# TODO: Listeners support custom ssl certificates (Certificates). For now we leave this default.
Certificates=[{"CertificateArn": alb_vpc_config["CertificateArn"]}],
DefaultActions=[
{
"Type": "forward",
"TargetGroupArn": target_group_arn,
}
],
LoadBalancerArn=load_balancer_arn,
Protocol="HTTPS",
# TODO: Add option for custom ports
Port=443,
# TODO: Listeners support custom ssl security policy (SslPolicy). For now we leave this default.
)
response = self.elbv2_client.create_listener(**kwargs)
print("ALB created with DNS: {}".format(load_balancer_dns))
print("Note it may take several minutes for load balancer to become available.")
def undeploy_lambda_alb(self, lambda_name):
"""
The `zappa undeploy` functionality for ALB infrastructure.
"""
print("Undeploying ALB infrastructure...")
# Locate and delete alb/lambda permissions
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.remove_permission
self.lambda_client.remove_permission(
FunctionName=lambda_name, StatementId=lambda_name
)
except botocore.exceptions.ClientError as e: # pragma: no cover
if "ResourceNotFoundException" in e.response["Error"]["Code"]:
pass
else:
raise e
# Locate and delete load balancer
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_load_balancers
response = self.elbv2_client.describe_load_balancers(Names=[lambda_name])
if not (response["LoadBalancers"]) or len(response["LoadBalancers"]) > 1:
raise EnvironmentError(
"Failure to locate/delete ALB named [{}]. Response was: {}".format(
lambda_name, repr(response)
)
)
load_balancer_arn = response["LoadBalancers"][0]["LoadBalancerArn"]
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_listeners
response = self.elbv2_client.describe_listeners(
LoadBalancerArn=load_balancer_arn
)
if not (response["Listeners"]):
print("No listeners found.")
elif len(response["Listeners"]) > 1:
raise EnvironmentError(
"Failure to locate/delete listener for ALB named [{}]. Response was: {}".format(
lambda_name, repr(response)
)
)
else:
listener_arn = response["Listeners"][0]["ListenerArn"]
# Remove the listener. This explicit deletion of the listener seems necessary to avoid ResourceInUseExceptions when deleting target groups.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_listener
response = self.elbv2_client.delete_listener(ListenerArn=listener_arn)
# Remove the load balancer and wait for completion
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_load_balancer
response = self.elbv2_client.delete_load_balancer(
LoadBalancerArn=load_balancer_arn
)
waiter = self.elbv2_client.get_waiter("load_balancers_deleted")
print("Waiting for load balancer [{}] to be deleted..".format(lambda_name))
waiter.wait(LoadBalancerArns=[load_balancer_arn], WaiterConfig={"Delay": 3})
except botocore.exceptions.ClientError as e: # pragma: no cover
print(e.response["Error"]["Code"])
if "LoadBalancerNotFound" in e.response["Error"]["Code"]:
pass
else:
raise e
# Locate and delete target group
try:
# Locate the lambda ARN
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.get_function
response = self.lambda_client.get_function(FunctionName=lambda_name)
lambda_arn = response["Configuration"]["FunctionArn"]
# Locate the target group ARN
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_target_groups
response = self.elbv2_client.describe_target_groups(Names=[lambda_name])
if not (response["TargetGroups"]) or len(response["TargetGroups"]) > 1:
raise EnvironmentError(
"Failure to locate/delete ALB target group named [{}]. Response was: {}".format(
lambda_name, repr(response)
)
)
target_group_arn = response["TargetGroups"][0]["TargetGroupArn"]
# Deregister targets and wait for completion
self.elbv2_client.deregister_targets(
TargetGroupArn=target_group_arn, Targets=[{"Id": lambda_arn}]
)
waiter = self.elbv2_client.get_waiter("target_deregistered")
print("Waiting for target [{}] to be deregistered...".format(lambda_name))
waiter.wait(
TargetGroupArn=target_group_arn,
Targets=[{"Id": lambda_arn}],
WaiterConfig={"Delay": 3},
)
# Remove the target group
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_target_group
self.elbv2_client.delete_target_group(TargetGroupArn=target_group_arn)
except botocore.exceptions.ClientError as e: # pragma: no cover
print(e.response["Error"]["Code"])
if "TargetGroupNotFound" in e.response["Error"]["Code"]:
pass
else:
raise e
##
# API Gateway
##
def create_api_gateway_routes(
self,
lambda_arn,
api_name=None,
api_key_required=False,
authorization_type="NONE",
authorizer=None,
cors_options=None,
description=None,
endpoint_configuration=None,
):
"""
Create the API Gateway for this Zappa deployment.
Returns the new RestAPI CF resource.
"""
restapi = troposphere.apigateway.RestApi("Api")
restapi.Name = api_name or lambda_arn.split(":")[-1]
if not description:
description = "Created automatically by Zappa."
restapi.Description = description
endpoint_configuration = (
[] if endpoint_configuration is None else endpoint_configuration
)
if self.boto_session.region_name == "us-gov-west-1":
endpoint_configuration.append("REGIONAL")
if endpoint_configuration:
endpoint = troposphere.apigateway.EndpointConfiguration()
endpoint.Types = list(set(endpoint_configuration))
restapi.EndpointConfiguration = endpoint
if self.apigateway_policy:
restapi.Policy = json.loads(self.apigateway_policy)
self.cf_template.add_resource(restapi)
root_id = troposphere.GetAtt(restapi, "RootResourceId")
invocation_prefix = (
"aws" if self.boto_session.region_name != "us-gov-west-1" else "aws-us-gov"
)
invocations_uri = (
"arn:"
+ invocation_prefix
+ ":apigateway:"
+ self.boto_session.region_name
+ ":lambda:path/2015-03-31/functions/"
+ lambda_arn
+ "/invocations"
)
##
# The Resources
##
authorizer_resource = None
if authorizer:
authorizer_lambda_arn = authorizer.get("arn", lambda_arn)
lambda_uri = "arn:{invocation_prefix}:apigateway:{region_name}:lambda:path/2015-03-31/functions/{lambda_arn}/invocations".format(
invocation_prefix=invocation_prefix,
region_name=self.boto_session.region_name,
lambda_arn=authorizer_lambda_arn,
)
authorizer_resource = self.create_authorizer(
restapi, lambda_uri, authorizer
)
self.create_and_setup_methods(
restapi,
root_id,
api_key_required,
invocations_uri,
authorization_type,
authorizer_resource,
0,
)
if cors_options:
self.create_and_setup_cors(
restapi, root_id, invocations_uri, 0, cors_options
)
resource = troposphere.apigateway.Resource("ResourceAnyPathSlashed")
self.cf_api_resources.append(resource.title)
resource.RestApiId = troposphere.Ref(restapi)
resource.ParentId = root_id
resource.PathPart = "{proxy+}"
self.cf_template.add_resource(resource)
self.create_and_setup_methods(
restapi,
resource,
api_key_required,
invocations_uri,
authorization_type,
authorizer_resource,
1,
) # pragma: no cover
if cors_options:
self.create_and_setup_cors(
restapi, resource, invocations_uri, 1, cors_options
) # pragma: no cover
return restapi
def create_authorizer(self, restapi, uri, authorizer):
"""
Create Authorizer for API gateway
"""
authorizer_type = authorizer.get("type", "TOKEN").upper()
identity_validation_expression = authorizer.get("validation_expression", None)
authorizer_resource = troposphere.apigateway.Authorizer("Authorizer")
authorizer_resource.RestApiId = troposphere.Ref(restapi)
authorizer_resource.Name = authorizer.get("name", "ZappaAuthorizer")
authorizer_resource.Type = authorizer_type
authorizer_resource.AuthorizerUri = uri
authorizer_resource.IdentitySource = (
"method.request.header.%s" % authorizer.get("token_header", "Authorization")
)
if identity_validation_expression:
authorizer_resource.IdentityValidationExpression = (
identity_validation_expression
)
if authorizer_type == "TOKEN":
if not self.credentials_arn:
self.get_credentials_arn()
authorizer_resource.AuthorizerResultTtlInSeconds = authorizer.get(
"result_ttl", 300
)
authorizer_resource.AuthorizerCredentials = self.credentials_arn
if authorizer_type == "COGNITO_USER_POOLS":
authorizer_resource.ProviderARNs = authorizer.get("provider_arns")
self.cf_api_resources.append(authorizer_resource.title)
self.cf_template.add_resource(authorizer_resource)
return authorizer_resource
def create_and_setup_methods(
self,
restapi,
resource,
api_key_required,
uri,
authorization_type,
authorizer_resource,
depth,
):
"""
Set up the methods, integration responses and method responses for a given API Gateway resource.
"""
for method_name in self.http_methods:
method = troposphere.apigateway.Method(method_name + str(depth))
method.RestApiId = troposphere.Ref(restapi)
if type(resource) is troposphere.apigateway.Resource:
method.ResourceId = troposphere.Ref(resource)
else:
method.ResourceId = resource
method.HttpMethod = method_name.upper()
method.AuthorizationType = authorization_type
if authorizer_resource:
method.AuthorizerId = troposphere.Ref(authorizer_resource)
method.ApiKeyRequired = api_key_required
method.MethodResponses = []
self.cf_template.add_resource(method)
self.cf_api_resources.append(method.title)
if not self.credentials_arn:
self.get_credentials_arn()
credentials = self.credentials_arn # This must be a Role ARN
integration = troposphere.apigateway.Integration()
integration.CacheKeyParameters = []
integration.CacheNamespace = "none"
integration.Credentials = credentials
integration.IntegrationHttpMethod = "POST"
integration.IntegrationResponses = []
integration.PassthroughBehavior = "NEVER"
integration.Type = "AWS_PROXY"
integration.Uri = uri
method.Integration = integration
def create_and_setup_cors(self, restapi, resource, uri, depth, config):
"""
Set up the methods, integration responses and method responses for a given API Gateway resource.
"""
if config is True:
config = {}
method_name = "OPTIONS"
method = troposphere.apigateway.Method(method_name + str(depth))
method.RestApiId = troposphere.Ref(restapi)
if type(resource) is troposphere.apigateway.Resource:
method.ResourceId = troposphere.Ref(resource)
else:
method.ResourceId = resource
method.HttpMethod = method_name.upper()
method.AuthorizationType = "NONE"
method_response = troposphere.apigateway.MethodResponse()
method_response.ResponseModels = {"application/json": "Empty"}
response_headers = {
"Access-Control-Allow-Headers": "'%s'"
% ",".join(
config.get(
"allowed_headers",
[
"Content-Type",
"X-Amz-Date",
"Authorization",
"X-Api-Key",
"X-Amz-Security-Token",
],
)
),
"Access-Control-Allow-Methods": "'%s'"
% ",".join(
config.get(
"allowed_methods",
["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"],
)
),
"Access-Control-Allow-Origin": "'%s'" % config.get("allowed_origin", "*"),
}
method_response.ResponseParameters = {
"method.response.header.%s" % key: True for key in response_headers
}
method_response.StatusCode = "200"
method.MethodResponses = [method_response]
self.cf_template.add_resource(method)
self.cf_api_resources.append(method.title)
integration = troposphere.apigateway.Integration()
integration.Type = "MOCK"
integration.PassthroughBehavior = "NEVER"
integration.RequestTemplates = {"application/json": '{"statusCode": 200}'}
integration_response = troposphere.apigateway.IntegrationResponse()
integration_response.ResponseParameters = {
"method.response.header.%s" % key: value
for key, value in response_headers.items()
}
integration_response.ResponseTemplates = {"application/json": ""}
integration_response.StatusCode = "200"
integration.IntegrationResponses = [integration_response]
integration.Uri = uri
method.Integration = integration
def deploy_api_gateway(
self,
api_id,
stage_name,
stage_description="",
description="",
cache_cluster_enabled=False,
cache_cluster_size="0.5",
variables=None,
cloudwatch_log_level="OFF",
cloudwatch_data_trace=False,
cloudwatch_metrics_enabled=False,
cache_cluster_ttl=300,
cache_cluster_encrypted=False,
):
"""
Deploy the API Gateway!
Return the deployed API URL.
"""
print("Deploying API Gateway..")
self.apigateway_client.create_deployment(
restApiId=api_id,
stageName=stage_name,
stageDescription=stage_description,
description=description,
cacheClusterEnabled=cache_cluster_enabled,
cacheClusterSize=cache_cluster_size,
variables=variables or {},
)
if cloudwatch_log_level not in self.cloudwatch_log_levels:
cloudwatch_log_level = "OFF"
self.apigateway_client.update_stage(
restApiId=api_id,
stageName=stage_name,
patchOperations=[
self.get_patch_op("logging/loglevel", cloudwatch_log_level),
self.get_patch_op("logging/dataTrace", cloudwatch_data_trace),
self.get_patch_op("metrics/enabled", cloudwatch_metrics_enabled),
self.get_patch_op("caching/ttlInSeconds", str(cache_cluster_ttl)),
self.get_patch_op("caching/dataEncrypted", cache_cluster_encrypted),
],
)
return "https://{}.execute-api.{}.amazonaws.com/{}".format(
api_id, self.boto_session.region_name, stage_name
)
def add_binary_support(self, api_id, cors=False):
"""
Add binary support
"""
response = self.apigateway_client.get_rest_api(restApiId=api_id)
if (
"binaryMediaTypes" not in response
or "*/*" not in response["binaryMediaTypes"]
):
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[{"op": "add", "path": "/binaryMediaTypes/*~1*"}],
)
if cors:
# fix for issue 699 and 1035, cors+binary support don't work together
# go through each resource and update the contentHandling type
response = self.apigateway_client.get_resources(restApiId=api_id)
resource_ids = [
item["id"]
for item in response["items"]
if "OPTIONS" in item.get("resourceMethods", {})
]
for resource_id in resource_ids:
self.apigateway_client.update_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod="OPTIONS",
patchOperations=[
{
"op": "replace",
"path": "/contentHandling",
"value": "CONVERT_TO_TEXT",
}
],
)
def remove_binary_support(self, api_id, cors=False):
"""
Remove binary support
"""
response = self.apigateway_client.get_rest_api(restApiId=api_id)
if "binaryMediaTypes" in response and "*/*" in response["binaryMediaTypes"]:
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[{"op": "remove", "path": "/binaryMediaTypes/*~1*"}],
)
if cors:
# go through each resource and change the contentHandling type
response = self.apigateway_client.get_resources(restApiId=api_id)
resource_ids = [
item["id"]
for item in response["items"]
if "OPTIONS" in item.get("resourceMethods", {})
]
for resource_id in resource_ids:
self.apigateway_client.update_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod="OPTIONS",
patchOperations=[
{"op": "replace", "path": "/contentHandling", "value": ""}
],
)
def add_api_compression(self, api_id, min_compression_size):
"""
Add Rest API compression
"""
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
"op": "replace",
"path": "/minimumCompressionSize",
"value": str(min_compression_size),
}
],
)
def remove_api_compression(self, api_id):
"""
Remove Rest API compression
"""
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
"op": "replace",
"path": "/minimumCompressionSize",
}
],
)
def get_api_keys(self, api_id, stage_name):
"""
Generator that allows to iterate per API keys associated to an api_id and a stage_name.
"""
response = self.apigateway_client.get_api_keys(limit=500)
stage_key = "{}/{}".format(api_id, stage_name)
for api_key in response.get("items"):
if stage_key in api_key.get("stageKeys"):
yield api_key.get("id")
def create_api_key(self, api_id, stage_name):
"""
Create new API key and link it with an api_id and a stage_name
"""
response = self.apigateway_client.create_api_key(
name="{}_{}".format(stage_name, api_id),
description="Api Key for {}".format(api_id),
enabled=True,
stageKeys=[
{
"restApiId": "{}".format(api_id),
"stageName": "{}".format(stage_name),
},
],
)
print("Created a new x-api-key: {}".format(response["id"]))
def remove_api_key(self, api_id, stage_name):
"""
Remove a generated API key for api_id and stage_name
"""
response = self.apigateway_client.get_api_keys(
limit=1, nameQuery="{}_{}".format(stage_name, api_id)
)
for api_key in response.get("items"):
self.apigateway_client.delete_api_key(apiKey="{}".format(api_key["id"]))
def add_api_stage_to_api_key(self, api_key, api_id, stage_name):
"""
Add api stage to Api key
"""
self.apigateway_client.update_api_key(
apiKey=api_key,
patchOperations=[
{
"op": "add",
"path": "/stages",
"value": "{}/{}".format(api_id, stage_name),
}
],
)
def get_patch_op(self, keypath, value, op="replace"):
"""
Return an object that describes a change of configuration on the given staging.
Setting will be applied on all available HTTP methods.
"""
if isinstance(value, bool):
value = str(value).lower()
return {"op": op, "path": "/*/*/{}".format(keypath), "value": value}
def get_rest_apis(self, project_name):
"""
Generator that allows to iterate per every available apis.
"""
all_apis = self.apigateway_client.get_rest_apis(limit=500)
for api in all_apis["items"]:
if api["name"] != project_name:
continue
yield api
def undeploy_api_gateway(self, lambda_name, domain_name=None, base_path=None):
"""
Delete a deployed REST API Gateway.
"""
print("Deleting API Gateway..")
api_id = self.get_api_id(lambda_name)
if domain_name:
# XXX - Remove Route53 smartly here?
# XXX - This doesn't raise, but doesn't work either.
try:
self.apigateway_client.delete_base_path_mapping(
domainName=domain_name,
basePath="(none)" if base_path is None else base_path,
)
except Exception as e:
# We may not have actually set up the domain.
pass
was_deleted = self.delete_stack(lambda_name, wait=True)
if not was_deleted:
# try erasing it with the older method
for api in self.get_rest_apis(lambda_name):
self.apigateway_client.delete_rest_api(restApiId=api["id"])
def update_stage_config(
self,
project_name,
stage_name,
cloudwatch_log_level,
cloudwatch_data_trace,
cloudwatch_metrics_enabled,
):
"""
Update CloudWatch metrics configuration.
"""
if cloudwatch_log_level not in self.cloudwatch_log_levels:
cloudwatch_log_level = "OFF"
for api in self.get_rest_apis(project_name):
self.apigateway_client.update_stage(
restApiId=api["id"],
stageName=stage_name,
patchOperations=[
self.get_patch_op("logging/loglevel", cloudwatch_log_level),
self.get_patch_op("logging/dataTrace", cloudwatch_data_trace),
self.get_patch_op("metrics/enabled", cloudwatch_metrics_enabled),
],
)
def update_cognito(self, lambda_name, user_pool, lambda_configs, lambda_arn):
LambdaConfig = {}
for config in lambda_configs:
LambdaConfig[config] = lambda_arn
description = self.cognito_client.describe_user_pool(UserPoolId=user_pool)
description_kwargs = {}
for key, value in description["UserPool"].items():
if key in (
"UserPoolId",
"Policies",
"AutoVerifiedAttributes",
"SmsVerificationMessage",
"EmailVerificationMessage",
"EmailVerificationSubject",
"VerificationMessageTemplate",
"SmsAuthenticationMessage",
"MfaConfiguration",
"DeviceConfiguration",
"EmailConfiguration",
"SmsConfiguration",
"UserPoolTags",
"AdminCreateUserConfig",
):
description_kwargs[key] = value
elif key == "LambdaConfig":
for lckey, lcvalue in value.items():
if lckey in LambdaConfig:
value[lckey] = LambdaConfig[lckey]
print("value", value)
description_kwargs[key] = value
if "LambdaConfig" not in description_kwargs:
description_kwargs["LambdaConfig"] = LambdaConfig
if (
"TemporaryPasswordValidityDays"
in description_kwargs["Policies"]["PasswordPolicy"]
):
description_kwargs["AdminCreateUserConfig"].pop(
"UnusedAccountValidityDays", None
)
if "UnusedAccountValidityDays" in description_kwargs["AdminCreateUserConfig"]:
description_kwargs["Policies"]["PasswordPolicy"][
"TemporaryPasswordValidityDays"
] = description_kwargs["AdminCreateUserConfig"].pop(
"UnusedAccountValidityDays", None
)
result = self.cognito_client.update_user_pool(
UserPoolId=user_pool, **description_kwargs
)
if result["ResponseMetadata"]["HTTPStatusCode"] != 200:
print("Cognito: Failed to update user pool", result)
# Now we need to add a policy to the IAM that allows cognito access
result = self.create_event_permission(
lambda_name,
"cognito-idp.amazonaws.com",
"arn:aws:cognito-idp:{}:{}:userpool/{}".format(
self.aws_region,
self.sts_client.get_caller_identity().get("Account"),
user_pool,
),
)
if result["ResponseMetadata"]["HTTPStatusCode"] != 201:
print("Cognito: Failed to update lambda permission", result)
def delete_stack(self, name, wait=False):
"""
Delete the CF stack managed by Zappa.
"""
try:
stack = self.cf_client.describe_stacks(StackName=name)["Stacks"][0]
except: # pragma: no cover
print("No Zappa stack named {0}".format(name))
return False
tags = {x["Key"]: x["Value"] for x in stack["Tags"]}
if tags.get("ZappaProject") == name:
self.cf_client.delete_stack(StackName=name)
if wait:
waiter = self.cf_client.get_waiter("stack_delete_complete")
print("Waiting for stack {0} to be deleted..".format(name))
waiter.wait(StackName=name)
return True
else:
print("ZappaProject tag not found on {0}, doing nothing".format(name))
return False
def create_stack_template(
self,
lambda_arn,
lambda_name,
api_key_required,
iam_authorization,
authorizer,
cors_options=None,
description=None,
endpoint_configuration=None,
):
"""
Build the entire CF stack.
Just used for the API Gateway, but could be expanded in the future.
"""
auth_type = "NONE"
if iam_authorization and authorizer:
logger.warn(
"Both IAM Authorization and Authorizer are specified, this is not possible. "
"Setting Auth method to IAM Authorization"
)
authorizer = None
auth_type = "AWS_IAM"
elif iam_authorization:
auth_type = "AWS_IAM"
elif authorizer:
auth_type = authorizer.get("type", "CUSTOM")
# build a fresh template
self.cf_template = troposphere.Template()
self.cf_template.set_description("Automatically generated with Zappa")
self.cf_api_resources = []
self.cf_parameters = {}
restapi = self.create_api_gateway_routes(
lambda_arn,
api_name=lambda_name,
api_key_required=api_key_required,
authorization_type=auth_type,
authorizer=authorizer,
cors_options=cors_options,
description=description,
endpoint_configuration=endpoint_configuration,
)
return self.cf_template
def update_stack(
self,
name,
working_bucket,
wait=False,
update_only=False,
disable_progress=False,
):
"""
Update or create the CF stack managed by Zappa.
"""
capabilities = []
template = name + "-template-" + str(int(time.time())) + ".json"
with open(template, "wb") as out:
out.write(
bytes(
self.cf_template.to_json(indent=None, separators=(",", ":")),
"utf-8",
)
)
self.upload_to_s3(template, working_bucket, disable_progress=disable_progress)
if self.boto_session.region_name == "us-gov-west-1":
url = "https://s3-us-gov-west-1.amazonaws.com/{0}/{1}".format(
working_bucket, template
)
else:
url = "https://s3.amazonaws.com/{0}/{1}".format(working_bucket, template)
tags = [
{"Key": key, "Value": self.tags[key]}
for key in self.tags.keys()
if key != "ZappaProject"
]
tags.append({"Key": "ZappaProject", "Value": name})
update = True
try:
self.cf_client.describe_stacks(StackName=name)
except botocore.client.ClientError:
update = False
if update_only and not update:
print("CloudFormation stack missing, re-deploy to enable updates")
return
if not update:
self.cf_client.create_stack(
StackName=name, Capabilities=capabilities, TemplateURL=url, Tags=tags
)
print(
"Waiting for stack {0} to create (this can take a bit)..".format(name)
)
else:
try:
self.cf_client.update_stack(
StackName=name,
Capabilities=capabilities,
TemplateURL=url,
Tags=tags,
)
print("Waiting for stack {0} to update..".format(name))
except botocore.client.ClientError as e:
if e.response["Error"]["Message"] == "No updates are to be performed.":
wait = False
else:
raise
if wait:
total_resources = len(self.cf_template.resources)
current_resources = 0
sr = self.cf_client.get_paginator("list_stack_resources")
progress = tqdm(total=total_resources, unit="res", disable=disable_progress)
while True:
time.sleep(3)
result = self.cf_client.describe_stacks(StackName=name)
if not result["Stacks"]:
continue # might need to wait a bit
if result["Stacks"][0]["StackStatus"] in [
"CREATE_COMPLETE",
"UPDATE_COMPLETE",
]:
break
# Something has gone wrong.
# Is raising enough? Should we also remove the Lambda function?
if result["Stacks"][0]["StackStatus"] in [
"DELETE_COMPLETE",
"DELETE_IN_PROGRESS",
"ROLLBACK_IN_PROGRESS",
"UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS",
"UPDATE_ROLLBACK_COMPLETE",
]:
raise EnvironmentError(
"Stack creation failed. "
"Please check your CloudFormation console. "
"You may also need to `undeploy`."
)
count = 0
for result in sr.paginate(StackName=name):
done = (
1
for x in result["StackResourceSummaries"]
if "COMPLETE" in x["ResourceStatus"]
)
count += sum(done)
if count:
# We can end up in a situation where we have more resources being created
# than anticipated.
if (count - current_resources) > 0:
progress.update(count - current_resources)
current_resources = count
progress.close()
try:
os.remove(template)
except OSError:
pass
self.remove_from_s3(template, working_bucket)
def stack_outputs(self, name):
"""
Given a name, describes CloudFront stacks and returns dict of the stack Outputs
, else returns an empty dict.
"""
try:
stack = self.cf_client.describe_stacks(StackName=name)["Stacks"][0]
return {x["OutputKey"]: x["OutputValue"] for x in stack["Outputs"]}
except botocore.client.ClientError:
return {}
def get_api_url(self, lambda_name, stage_name):
"""
Given a lambda_name and stage_name, return a valid API URL.
"""
api_id = self.get_api_id(lambda_name)
if api_id:
return "https://{}.execute-api.{}.amazonaws.com/{}".format(
api_id, self.boto_session.region_name, stage_name
)
else:
return None
def get_api_id(self, lambda_name):
"""
Given a lambda_name, return the API id.
"""
try:
response = self.cf_client.describe_stack_resource(
StackName=lambda_name, LogicalResourceId="Api"
)
return response["StackResourceDetail"].get("PhysicalResourceId", None)
except: # pragma: no cover
try:
# Try the old method (project was probably made on an older, non CF version)
response = self.apigateway_client.get_rest_apis(limit=500)
for item in response["items"]:
if item["name"] == lambda_name:
return item["id"]
logger.exception("Could not get API ID.")
return None
except: # pragma: no cover
# We don't even have an API deployed. That's okay!
return None
def create_domain_name(
self,
domain_name,
certificate_name,
certificate_body=None,
certificate_private_key=None,
certificate_chain=None,
certificate_arn=None,
lambda_name=None,
stage=None,
base_path=None,
):
"""
Creates the API GW domain and returns the resulting DNS name.
"""
# This is a Let's Encrypt or custom certificate
if not certificate_arn:
agw_response = self.apigateway_client.create_domain_name(
domainName=domain_name,
certificateName=certificate_name,
certificateBody=certificate_body,
certificatePrivateKey=certificate_private_key,
certificateChain=certificate_chain,
)
# This is an AWS ACM-hosted Certificate
else:
agw_response = self.apigateway_client.create_domain_name(
domainName=domain_name,
certificateName=certificate_name,
certificateArn=certificate_arn,
)
api_id = self.get_api_id(lambda_name)
if not api_id:
raise LookupError("No API URL to certify found - did you deploy?")
self.apigateway_client.create_base_path_mapping(
domainName=domain_name,
basePath="" if base_path is None else base_path,
restApiId=api_id,
stage=stage,
)
return agw_response["distributionDomainName"]
def update_route53_records(self, domain_name, dns_name):
"""
Updates Route53 Records following GW domain creation
"""
zone_id = self.get_hosted_zone_id_for_domain(domain_name)
is_apex = (
self.route53.get_hosted_zone(Id=zone_id)["HostedZone"]["Name"][:-1]
== domain_name
)
if is_apex:
record_set = {
"Name": domain_name,
"Type": "A",
"AliasTarget": {
"HostedZoneId": "Z2FDTNDATAQYW2", # This is a magic value that means "CloudFront"
"DNSName": dns_name,
"EvaluateTargetHealth": False,
},
}
else:
record_set = {
"Name": domain_name,
"Type": "CNAME",
"ResourceRecords": [{"Value": dns_name}],
"TTL": 60,
}
# Related: https://github.com/boto/boto3/issues/157
# and: http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html
# and policy: https://spin.atomicobject.com/2016/04/28/route-53-hosted-zone-managment/
# pure_zone_id = zone_id.split('/hostedzone/')[1]
# XXX: ClientError: An error occurred (InvalidChangeBatch) when calling the ChangeResourceRecordSets operation:
# Tried to create an alias that targets d1awfeji80d0k2.cloudfront.net., type A in zone Z1XWOQP59BYF6Z,
# but the alias target name does not lie within the target zone
response = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
"Changes": [{"Action": "UPSERT", "ResourceRecordSet": record_set}]
},
)
return response
def update_domain_name(
self,
domain_name,
certificate_name=None,
certificate_body=None,
certificate_private_key=None,
certificate_chain=None,
certificate_arn=None,
lambda_name=None,
stage=None,
route53=True,
base_path=None,
):
"""
This updates your certificate information for an existing domain,
with similar arguments to boto's update_domain_name API Gateway api.
It returns the resulting new domain information including the new certificate's ARN
if created during this process.
Previously, this method involved downtime that could take up to 40 minutes
because the API Gateway api only allowed this by deleting, and then creating it.
Related issues: https://github.com/Miserlou/Zappa/issues/590
https://github.com/Miserlou/Zappa/issues/588
https://github.com/Miserlou/Zappa/pull/458
https://github.com/Miserlou/Zappa/issues/882
https://github.com/Miserlou/Zappa/pull/883
"""
print("Updating domain name!")
certificate_name = certificate_name + str(time.time())
api_gateway_domain = self.apigateway_client.get_domain_name(
domainName=domain_name
)
if (
not certificate_arn
and certificate_body
and certificate_private_key
and certificate_chain
):
acm_certificate = self.acm_client.import_certificate(
Certificate=certificate_body,
PrivateKey=certificate_private_key,
CertificateChain=certificate_chain,
)
certificate_arn = acm_certificate["CertificateArn"]
self.update_domain_base_path_mapping(domain_name, lambda_name, stage, base_path)
return self.apigateway_client.update_domain_name(
domainName=domain_name,
patchOperations=[
{
"op": "replace",
"path": "/certificateName",
"value": certificate_name,
},
{"op": "replace", "path": "/certificateArn", "value": certificate_arn},
],
)
def update_domain_base_path_mapping(
self, domain_name, lambda_name, stage, base_path
):
"""
Update domain base path mapping on API Gateway if it was changed
"""
api_id = self.get_api_id(lambda_name)
if not api_id:
print("Warning! Can't update base path mapping!")
return
base_path_mappings = self.apigateway_client.get_base_path_mappings(
domainName=domain_name
)
found = False
for base_path_mapping in base_path_mappings.get("items", []):
if (
base_path_mapping["restApiId"] == api_id
and base_path_mapping["stage"] == stage
):
found = True
if base_path_mapping["basePath"] != base_path:
self.apigateway_client.update_base_path_mapping(
domainName=domain_name,
basePath=base_path_mapping["basePath"],
patchOperations=[
{
"op": "replace",
"path": "/basePath",
"value": "" if base_path is None else base_path,
}
],
)
if not found:
self.apigateway_client.create_base_path_mapping(
domainName=domain_name,
basePath="" if base_path is None else base_path,
restApiId=api_id,
stage=stage,
)
def get_all_zones(self):
"""Same behaviour of list_host_zones, but transparently handling pagination."""
zones = {"HostedZones": []}
new_zones = self.route53.list_hosted_zones(MaxItems="100")
while new_zones["IsTruncated"]:
zones["HostedZones"] += new_zones["HostedZones"]
new_zones = self.route53.list_hosted_zones(
Marker=new_zones["NextMarker"], MaxItems="100"
)
zones["HostedZones"] += new_zones["HostedZones"]
return zones
def get_domain_name(self, domain_name, route53=True):
"""
Scan our hosted zones for the record of a given name.
Returns the record entry, else None.
"""
# Make sure api gateway domain is present
try:
self.apigateway_client.get_domain_name(domainName=domain_name)
except Exception:
return None
if not route53:
return True
try:
zones = self.get_all_zones()
for zone in zones["HostedZones"]:
records = self.route53.list_resource_record_sets(
HostedZoneId=zone["Id"]
)
for record in records["ResourceRecordSets"]:
if (
record["Type"] in ("CNAME", "A")
and record["Name"][:-1] == domain_name
):
return record
except Exception as e:
return None
##
# Old, automatic logic.
# If re-introduced, should be moved to a new function.
# Related ticket: https://github.com/Miserlou/Zappa/pull/458
##
# We may be in a position where Route53 doesn't have a domain, but the API Gateway does.
# We need to delete this before we can create the new Route53.
# try:
# api_gateway_domain = self.apigateway_client.get_domain_name(domainName=domain_name)
# self.apigateway_client.delete_domain_name(domainName=domain_name)
# except Exception:
# pass
return None
##
# IAM
##
def get_credentials_arn(self):
"""
Given our role name, get and set the credentials_arn.
"""
role = self.iam.Role(self.role_name)
self.credentials_arn = role.arn
return role, self.credentials_arn
def create_iam_roles(self):
"""
Create and defines the IAM roles and policies necessary for Zappa.
If the IAM role already exists, it will be updated if necessary.
"""
attach_policy_obj = json.loads(self.attach_policy)
assume_policy_obj = json.loads(self.assume_policy)
if self.extra_permissions:
for permission in self.extra_permissions:
attach_policy_obj["Statement"].append(dict(permission))
self.attach_policy = json.dumps(attach_policy_obj)
updated = False
# Create the role if needed
try:
role, credentials_arn = self.get_credentials_arn()
except botocore.client.ClientError:
print("Creating " + self.role_name + " IAM Role..")
role = self.iam.create_role(
RoleName=self.role_name, AssumeRolePolicyDocument=self.assume_policy
)
self.credentials_arn = role.arn
updated = True
# create or update the role's policies if needed
policy = self.iam.RolePolicy(self.role_name, "zappa-permissions")
try:
if policy.policy_document != attach_policy_obj:
print(
"Updating zappa-permissions policy on "
+ self.role_name
+ " IAM Role."
)
policy.put(PolicyDocument=self.attach_policy)
updated = True
except botocore.client.ClientError:
print(
"Creating zappa-permissions policy on " + self.role_name + " IAM Role."
)
policy.put(PolicyDocument=self.attach_policy)
updated = True
if role.assume_role_policy_document != assume_policy_obj and set(
role.assume_role_policy_document["Statement"][0]["Principal"]["Service"]
) != set(assume_policy_obj["Statement"][0]["Principal"]["Service"]):
print("Updating assume role policy on " + self.role_name + " IAM Role.")
self.iam_client.update_assume_role_policy(
RoleName=self.role_name, PolicyDocument=self.assume_policy
)
updated = True
return self.credentials_arn, updated
def _clear_policy(self, lambda_name):
"""
Remove obsolete policy statements to prevent policy from bloating over the limit after repeated updates.
"""
try:
policy_response = self.lambda_client.get_policy(FunctionName=lambda_name)
if policy_response["ResponseMetadata"]["HTTPStatusCode"] == 200:
statement = json.loads(policy_response["Policy"])["Statement"]
for s in statement:
delete_response = self.lambda_client.remove_permission(
FunctionName=lambda_name, StatementId=s["Sid"]
)
if delete_response["ResponseMetadata"]["HTTPStatusCode"] != 204:
logger.error(
"Failed to delete an obsolete policy statement: {}".format(
policy_response
)
)
else:
logger.debug(
"Failed to load Lambda function policy: {}".format(policy_response)
)
except ClientError as e:
if e.args[0].find("ResourceNotFoundException") > -1:
logger.debug("No policy found, must be first run.")
else:
logger.error("Unexpected client error {}".format(e.args[0]))
##
# CloudWatch Events
##
def create_event_permission(self, lambda_name, principal, source_arn):
"""
Create permissions to link to an event.
Related: http://docs.aws.amazon.com/lambda/latest/dg/with-s3-example-configure-event-source.html
"""
logger.debug(
"Adding new permission to invoke Lambda function: {}".format(lambda_name)
)
account_id: str = self.sts_client.get_caller_identity().get("Account")
permission_response = self.lambda_client.add_permission(
FunctionName=lambda_name,
StatementId="".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(8)
),
Action="lambda:InvokeFunction",
Principal=principal,
SourceArn=source_arn,
# The SourceAccount argument ensures that only the specified AWS account can invoke the lambda function.
# This prevents a security issue where if a lambda is triggered off of s3 bucket events and the bucket is
# deleted, another AWS account can create a bucket with the same name and potentially trigger the original
# lambda function, since bucket names are global.
# https://github.com/zappa/Zappa/issues/1039
SourceAccount=account_id,
)
if permission_response["ResponseMetadata"]["HTTPStatusCode"] != 201:
print("Problem creating permission to invoke Lambda function")
return None # XXX: Raise?
return permission_response
def schedule_events(self, lambda_arn, lambda_name, events, default=True):
"""
Given a Lambda ARN, name and a list of events, schedule this as CloudWatch Events.
'events' is a list of dictionaries, where the dict must contains the string
of a 'function' and the string of the event 'expression', and an optional 'name' and 'description'.
Expressions can be in rate or cron format:
http://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html
"""
# The stream sources - DynamoDB, Kinesis and SQS - are working differently than the other services (pull vs push)
# and do not require event permissions. They do require additional permissions on the Lambda roles though.
# http://docs.aws.amazon.com/lambda/latest/dg/lambda-api-permissions-ref.html
pull_services = ["dynamodb", "kinesis", "sqs"]
# XXX: Not available in Lambda yet.
# We probably want to execute the latest code.
# if default:
# lambda_arn = lambda_arn + ":$LATEST"
self.unschedule_events(
lambda_name=lambda_name,
lambda_arn=lambda_arn,
events=events,
excluded_source_services=pull_services,
)
for event in events:
function = event["function"]
expression = event.get("expression", None) # single expression
expressions = event.get("expressions", None) # multiple expression
kwargs = event.get(
"kwargs", {}
) # optional dict of keyword arguments for the event
event_source = event.get("event_source", None)
description = event.get("description", function)
# - If 'cron' or 'rate' in expression, use ScheduleExpression
# - Else, use EventPattern
# - ex https://github.com/awslabs/aws-lambda-ddns-function
if not self.credentials_arn:
self.get_credentials_arn()
if expression:
expressions = [
expression
] # same code for single and multiple expression
if expressions:
for index, expression in enumerate(expressions):
rule_name = self.get_scheduled_event_name(
event,
function,
lambda_name,
index,
)
rule_response = self.events_client.put_rule(
Name=rule_name,
ScheduleExpression=expression,
State="ENABLED",
Description=description,
RoleArn=self.credentials_arn,
)
if "RuleArn" in rule_response:
logger.debug(
"Rule created. ARN {}".format(rule_response["RuleArn"])
)
# Specific permissions are necessary for any trigger to work.
self.create_event_permission(
lambda_name, "events.amazonaws.com", rule_response["RuleArn"]
)
# Overwriting the input, supply the original values and add kwargs
input_template = (
'{"time": <time>, '
'"detail-type": <detail-type>, '
'"source": <source>,'
'"account": <account>, '
'"region": <region>,'
'"detail": <detail>, '
'"version": <version>,'
'"resources": <resources>,'
'"id": <id>,'
'"kwargs": %s'
"}" % json.dumps(kwargs)
)
# Create the CloudWatch event ARN for this function.
# https://github.com/Miserlou/Zappa/issues/359
target_response = self.events_client.put_targets(
Rule=rule_name,
Targets=[
{
"Id": "Id"
+ "".join(
random.choice(string.digits) for _ in range(12)
),
"Arn": lambda_arn,
"InputTransformer": {
"InputPathsMap": {
"time": "$.time",
"detail-type": "$.detail-type",
"source": "$.source",
"account": "$.account",
"region": "$.region",
"detail": "$.detail",
"version": "$.version",
"resources": "$.resources",
"id": "$.id",
},
"InputTemplate": input_template,
},
}
],
)
if target_response["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(
"Scheduled {} with expression {}!".format(
rule_name, expression
)
)
else:
print(
"Problem scheduling {} with expression {}.".format(
rule_name, expression
)
)
elif event_source:
service = self.service_from_arn(event_source["arn"])
if service not in pull_services:
svc = ",".join(event["event_source"]["events"])
self.create_event_permission(
lambda_name,
service + ".amazonaws.com",
event["event_source"]["arn"],
)
else:
svc = service
rule_response = add_event_source(
event_source, lambda_arn, function, self.boto_session
)
if rule_response == "successful":
print("Created {} event schedule for {}!".format(svc, function))
elif rule_response == "failed":
print(
"Problem creating {} event schedule for {}!".format(
svc, function
)
)
elif rule_response == "exists":
print(
"{} event schedule for {} already exists - Nothing to do here.".format(
svc, function
)
)
elif rule_response == "dryrun":
print(
"Dryrun for creating {} event schedule for {}!!".format(
svc, function
)
)
else:
print(
"Could not create event {} - Please define either an expression or an event source".format(
rule_name,
)
)
def get_scheduled_event_name(self, event, function, lambda_name, index=0):
"""
Returns an AWS-valid CloudWatch rule name using a digest of the event name, lambda name, and function.
This allows support for rule names that may be longer than the 64 char limit.
"""
name = event.get("name", function)
if name != function:
# a custom event name has been provided, make sure function name is included as postfix,
# otherwise zappa's handler won't be able to locate the function.
name = "{}-{}".format(name, function)
if index:
# to ensure unique cloudwatch rule names in the case of multiple expressions
# prefix all entries bar the first with the index
# Related: https://github.com/Miserlou/Zappa/pull/1051
name = "{}-{}".format(index, name)
# prefix scheduled event names with lambda name. So we can look them up later via the prefix.
event_name = self.get_event_name(lambda_name, name)
# if it's possible that we truncated name, generate a unique, shortened name
# https://github.com/Miserlou/Zappa/issues/970
if len(event_name) >= 64:
lambda_name = self.get_hashed_lambda_name(lambda_name)
event_name = self.get_event_name(lambda_name, name)
return event_name
@staticmethod
def get_event_name(lambda_name, name):
"""
Returns an AWS-valid Lambda event name.
"""
return "{prefix:.{width}}-{postfix}".format(
prefix=lambda_name, width=max(0, 63 - len(name)), postfix=name
)[:64]
@staticmethod
def get_hashed_lambda_name(lambda_name):
return hashlib.sha1(lambda_name.encode()).hexdigest()
def delete_rule(self, rule_name):
"""
Delete a CWE rule.
This deletes them, but they will still show up in the AWS console.
Annoying.
"""
logger.debug("Deleting existing rule {}".format(rule_name))
# All targets must be removed before
# we can actually delete the rule.
try:
targets = self.events_client.list_targets_by_rule(Rule=rule_name)
except botocore.exceptions.ClientError as e:
# This avoids misbehavior if low permissions, related: https://github.com/Miserlou/Zappa/issues/286
error_code = e.response["Error"]["Code"]
if error_code == "AccessDeniedException":
raise
else:
logger.debug(
"No target found for this rule: {} {}".format(rule_name, e.args[0])
)
return
if "Targets" in targets and targets["Targets"]:
self.events_client.remove_targets(
Rule=rule_name, Ids=[x["Id"] for x in targets["Targets"]]
)
else: # pragma: no cover
logger.debug("No target to delete")
# Delete our rule.
self.events_client.delete_rule(Name=rule_name)
def get_event_rule_names_for_lambda(self, lambda_arn):
"""
Get all of the rule names associated with a lambda function.
"""
response = self.events_client.list_rule_names_by_target(TargetArn=lambda_arn)
rule_names = response["RuleNames"]
# Iterate when the results are paginated
while "NextToken" in response:
response = self.events_client.list_rule_names_by_target(
TargetArn=lambda_arn, NextToken=response["NextToken"]
)
rule_names.extend(response["RuleNames"])
return rule_names
def get_event_rules_for_lambda(self, lambda_arn):
"""
Get all of the rule details associated with this function.
"""
rule_names = self.get_event_rule_names_for_lambda(lambda_arn=lambda_arn)
return [self.events_client.describe_rule(Name=r) for r in rule_names]
def unschedule_events(
self, events, lambda_arn=None, lambda_name=None, excluded_source_services=None
):
excluded_source_services = excluded_source_services or []
"""
Given a list of events, unschedule these CloudWatch Events.
'events' is a list of dictionaries, where the dict must contains the string
of a 'function' and the string of the event 'expression', and an optional 'name' and 'description'.
"""
self._clear_policy(lambda_name)
rule_names = self.get_event_rule_names_for_lambda(lambda_arn=lambda_arn)
for rule_name in rule_names:
self.delete_rule(rule_name)
print("Unscheduled " + rule_name + ".")
non_cwe = [e for e in events if "event_source" in e]
for event in non_cwe:
# TODO: This WILL miss non CW events that have been deployed but changed names. Figure out a way to remove
# them no matter what.
# These are non CWE event sources.
function = event["function"]
name = event.get("name", function)
event_source = event.get("event_source", function)
service = self.service_from_arn(event_source["arn"])
# DynamoDB and Kinesis streams take quite a while to setup after they are created and do not need to be
# re-scheduled when a new Lambda function is deployed. Therefore, they should not be removed during zappa
# update or zappa schedule.
if service not in excluded_source_services:
remove_event_source(
event_source, lambda_arn, function, self.boto_session
)
print(
"Removed event {}{}.".format(
name,
" ({})".format(str(event_source["events"]))
if "events" in event_source
else "",
)
)
###
# Async / SNS
##
def create_async_sns_topic(self, lambda_name, lambda_arn):
"""
Create the SNS-based async topic.
"""
topic_name = get_topic_name(lambda_name)
# Create SNS topic
topic_arn = self.sns_client.create_topic(Name=topic_name)["TopicArn"]
# Create subscription
self.sns_client.subscribe(
TopicArn=topic_arn, Protocol="lambda", Endpoint=lambda_arn
)
# Add Lambda permission for SNS to invoke function
self.create_event_permission(
lambda_name=lambda_name, principal="sns.amazonaws.com", source_arn=topic_arn
)
# Add rule for SNS topic as a event source
add_event_source(
event_source={"arn": topic_arn, "events": ["sns:Publish"]},
lambda_arn=lambda_arn,
target_function="zappa.asynchronous.route_task",
boto_session=self.boto_session,
)
return topic_arn
def remove_async_sns_topic(self, lambda_name):
"""
Remove the async SNS topic.
"""
topic_name = get_topic_name(lambda_name)
removed_arns = []
for sub in self.sns_client.list_subscriptions()["Subscriptions"]:
if topic_name in sub["TopicArn"]:
self.sns_client.delete_topic(TopicArn=sub["TopicArn"])
removed_arns.append(sub["TopicArn"])
return removed_arns
###
# Async / DynamoDB
##
def _set_async_dynamodb_table_ttl(self, table_name):
self.dynamodb_client.update_time_to_live(
TableName=table_name,
TimeToLiveSpecification={"Enabled": True, "AttributeName": "ttl"},
)
def create_async_dynamodb_table(self, table_name, read_capacity, write_capacity):
"""
Create the DynamoDB table for async task return values
"""
try:
dynamodb_table = self.dynamodb_client.describe_table(TableName=table_name)
return False, dynamodb_table
# catch this exception (triggered if the table doesn't exist)
except botocore.exceptions.ClientError:
dynamodb_table = self.dynamodb_client.create_table(
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
TableName=table_name,
KeySchema=[
{"AttributeName": "id", "KeyType": "HASH"},
],
ProvisionedThroughput={
"ReadCapacityUnits": read_capacity,
"WriteCapacityUnits": write_capacity,
},
)
if dynamodb_table:
try:
self._set_async_dynamodb_table_ttl(table_name)
except botocore.exceptions.ClientError:
# this fails because the operation is async, so retry
time.sleep(10)
self._set_async_dynamodb_table_ttl(table_name)
return True, dynamodb_table
def remove_async_dynamodb_table(self, table_name):
"""
Remove the DynamoDB Table used for async return values
"""
self.dynamodb_client.delete_table(TableName=table_name)
##
# CloudWatch Logging
##
def fetch_logs(self, lambda_name, filter_pattern="", limit=10000, start_time=0):
"""
Fetch the CloudWatch logs for a given Lambda name.
"""
log_name = "/aws/lambda/" + lambda_name
streams = self.logs_client.describe_log_streams(
logGroupName=log_name, descending=True, orderBy="LastEventTime"
)
all_streams = streams["logStreams"]
all_names = [stream["logStreamName"] for stream in all_streams]
events = []
response = {}
while not response or "nextToken" in response:
extra_args = {}
if "nextToken" in response:
extra_args["nextToken"] = response["nextToken"]
# Amazon uses millisecond epoch for some reason.
# Thanks, Jeff.
start_time = start_time * 1000
end_time = int(time.time()) * 1000
response = self.logs_client.filter_log_events(
logGroupName=log_name,
logStreamNames=all_names,
startTime=start_time,
endTime=end_time,
filterPattern=filter_pattern,
limit=limit,
interleaved=True, # Does this actually improve performance?
**extra_args,
)
if response and "events" in response:
events += response["events"]
return sorted(events, key=lambda k: k["timestamp"])
def remove_log_group(self, group_name):
"""
Filter all log groups that match the name given in log_filter.
"""
print("Removing log group: {}".format(group_name))
try:
self.logs_client.delete_log_group(logGroupName=group_name)
except botocore.exceptions.ClientError as e:
print("Couldn't remove '{}' because of: {}".format(group_name, e))
def remove_lambda_function_logs(self, lambda_function_name):
"""
Remove all logs that are assigned to a given lambda function id.
"""
self.remove_log_group("/aws/lambda/{}".format(lambda_function_name))
def remove_api_gateway_logs(self, project_name):
"""
Removed all logs that are assigned to a given rest api id.
"""
for rest_api in self.get_rest_apis(project_name):
for stage in self.apigateway_client.get_stages(restApiId=rest_api["id"])[
"item"
]:
self.remove_log_group(
"API-Gateway-Execution-Logs_{}/{}".format(
rest_api["id"], stage["stageName"]
)
)
##
# Route53 Domain Name Entries
##
def get_hosted_zone_id_for_domain(self, domain):
"""
Get the Hosted Zone ID for a given domain.
"""
all_zones = self.get_all_zones()
return self.get_best_match_zone(all_zones, domain)
@staticmethod
def get_best_match_zone(all_zones, domain):
"""Return zone id which name is closer matched with domain name."""
# Related: https://github.com/Miserlou/Zappa/issues/459
public_zones = [
zone
for zone in all_zones["HostedZones"]
if not zone["Config"]["PrivateZone"]
]
zones = {
zone["Name"][:-1]: zone["Id"]
for zone in public_zones
if zone["Name"][:-1] in domain
}
if zones:
keys = max(
zones.keys(), key=lambda a: len(a)
) # get longest key -- best match.
return zones[keys]
else:
return None
def set_dns_challenge_txt(self, zone_id, domain, txt_challenge):
"""
Set DNS challenge TXT.
"""
print("Setting DNS challenge..")
resp = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch=self.get_dns_challenge_change_batch(
"UPSERT", domain, txt_challenge
),
)
return resp
def remove_dns_challenge_txt(self, zone_id, domain, txt_challenge):
"""
Remove DNS challenge TXT.
"""
print("Deleting DNS challenge..")
resp = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch=self.get_dns_challenge_change_batch(
"DELETE", domain, txt_challenge
),
)
return resp
@staticmethod
def get_dns_challenge_change_batch(action, domain, txt_challenge):
"""
Given action, domain and challenge, return a change batch to use with
route53 call.
:param action: DELETE | UPSERT
:param domain: domain name
:param txt_challenge: challenge
:return: change set for a given action, domain and TXT challenge.
"""
return {
"Changes": [
{
"Action": action,
"ResourceRecordSet": {
"Name": "_acme-challenge.{0}".format(domain),
"Type": "TXT",
"TTL": 60,
"ResourceRecords": [{"Value": '"{0}"'.format(txt_challenge)}],
},
}
]
}
##
# Utility
##
def shell(self):
"""
Spawn a PDB shell.
"""
import pdb
pdb.set_trace()
def load_credentials(self, boto_session=None, profile_name=None):
"""
Load AWS credentials.
An optional boto_session can be provided, but that's usually for testing.
An optional profile_name can be provided for config files that have multiple sets
of credentials.
"""
# Automatically load credentials from config or environment
if not boto_session:
# If provided, use the supplied profile name.
if profile_name:
self.boto_session = boto3.Session(
profile_name=profile_name, region_name=self.aws_region
)
elif os.environ.get("AWS_ACCESS_KEY_ID") and os.environ.get(
"AWS_SECRET_ACCESS_KEY"
):
region_name = os.environ.get("AWS_DEFAULT_REGION") or self.aws_region
session_kw = {
"aws_access_key_id": os.environ.get("AWS_ACCESS_KEY_ID"),
"aws_secret_access_key": os.environ.get("AWS_SECRET_ACCESS_KEY"),
"region_name": region_name,
}
# If we're executing in a role, AWS_SESSION_TOKEN will be present, too.
if os.environ.get("AWS_SESSION_TOKEN"):
session_kw["aws_session_token"] = os.environ.get(
"AWS_SESSION_TOKEN"
)
self.boto_session = boto3.Session(**session_kw)
else:
self.boto_session = boto3.Session(region_name=self.aws_region)
logger.debug("Loaded boto session from config: %s", boto_session)
else:
logger.debug("Using provided boto session: %s", boto_session)
self.boto_session = boto_session
# use provided session's region in case it differs
self.aws_region = self.boto_session.region_name
if self.boto_session.region_name not in LAMBDA_REGIONS:
print("Warning! AWS Lambda may not be available in this AWS Region!")
if self.boto_session.region_name not in API_GATEWAY_REGIONS:
print("Warning! AWS API Gateway may not be available in this AWS Region!")
@staticmethod
def service_from_arn(arn):
return arn.split(":")[2] | zappa-warm | /zappa_warm-0.54.4-py3-none-any.whl/zappa/core.py | core.py |
import calendar
import datetime
import fnmatch
import io
import json
import logging
import os
import re
import shutil
import stat
import sys
from urllib.parse import urlparse
import botocore
import durationpy
from past.builtins import basestring
LOG = logging.getLogger(__name__)
##
# Settings / Packaging
##
def copytree(src, dst, metadata=True, symlinks=False, ignore=None):
"""
This is a contributed re-implementation of 'copytree' that
should work with the exact same behavior on multiple platforms.
When `metadata` is False, file metadata such as permissions and modification
times are not copied.
"""
def copy_file(src, dst, item):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s): # pragma: no cover
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
if metadata:
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass # lchmod not available
elif os.path.isdir(s):
copytree(s, d, metadata, symlinks, ignore)
else:
shutil.copy2(s, d) if metadata else shutil.copy(s, d)
try:
lst = os.listdir(src)
if not os.path.exists(dst):
os.makedirs(dst)
if metadata:
shutil.copystat(src, dst)
except NotADirectoryError: # egg-link files
copy_file(os.path.dirname(src), os.path.dirname(dst), os.path.basename(src))
return
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
copy_file(src, dst, item)
def parse_s3_url(url):
"""
Parses S3 URL.
Returns bucket (domain) and file (full path).
"""
bucket = ""
path = ""
if url:
result = urlparse(url)
bucket = result.netloc
path = result.path.strip("/")
return bucket, path
def human_size(num, suffix="B"):
"""
Convert bytes length to a human-readable version
"""
for unit in ("", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"):
if abs(num) < 1024.0:
return "{0:3.1f}{1!s}{2!s}".format(num, unit, suffix)
num /= 1024.0
return "{0:.1f}{1!s}{2!s}".format(num, "Yi", suffix)
def string_to_timestamp(timestring):
"""
Accepts a str, returns an int timestamp.
"""
ts = None
# Uses an extended version of Go's duration string.
try:
delta = durationpy.from_str(timestring)
past = datetime.datetime.utcnow() - delta
ts = calendar.timegm(past.timetuple())
return ts
except Exception as e:
pass
if ts:
return ts
# else:
# print("Unable to parse timestring.")
return 0
##
# `init` related
##
def detect_django_settings():
"""
Automatically try to discover Django settings files,
return them as relative module paths.
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, "*settings.py"):
full = os.path.join(root, filename)
if "site-packages" in full:
continue
full = os.path.join(root, filename)
package_path = full.replace(os.getcwd(), "")
package_module = (
package_path.replace(os.sep, ".").split(".", 1)[1].replace(".py", "")
)
matches.append(package_module)
return matches
def detect_flask_apps():
"""
Automatically try to discover Flask apps files,
return them as relative module paths.
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, "*.py"):
full = os.path.join(root, filename)
if "site-packages" in full:
continue
full = os.path.join(root, filename)
with io.open(full, "r", encoding="utf-8") as f:
lines = f.readlines()
for line in lines:
app = None
# Kind of janky..
if "= Flask(" in line:
app = line.split("= Flask(")[0].strip()
if "=Flask(" in line:
app = line.split("=Flask(")[0].strip()
if not app:
continue
package_path = full.replace(os.getcwd(), "")
package_module = (
package_path.replace(os.sep, ".")
.split(".", 1)[1]
.replace(".py", "")
)
app_module = package_module + "." + app
matches.append(app_module)
return matches
def get_venv_from_python_version():
return "python{}.{}".format(*sys.version_info)
def get_runtime_from_python_version():
""" """
if sys.version_info[0] < 3:
raise ValueError("Python 2.x is no longer supported.")
else:
if sys.version_info[1] <= 6:
return "python3.6"
elif sys.version_info[1] <= 7:
return "python3.7"
elif sys.version_info[1] <= 8:
return "python3.8"
else:
return "python3.9"
##
# Async Tasks
##
def get_topic_name(lambda_name):
"""Topic name generation"""
return "%s-zappa-async" % lambda_name
##
# Event sources / Kappa
##
def get_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
):
"""
Given an event_source dictionary item, a session and a lambda_arn,
hack into Kappa's Gibson, create out an object we can call
to schedule this event, and return the event source.
"""
import kappa.awsclient
import kappa.event_source.base
import kappa.event_source.cloudwatch
import kappa.event_source.dynamodb_stream
import kappa.event_source.kinesis
import kappa.event_source.s3
import kappa.event_source.sns
import kappa.function
import kappa.policy
import kappa.restapi
import kappa.role
class PseudoContext:
def __init__(self):
return
class PseudoFunction:
def __init__(self):
return
# Mostly adapted from kappa - will probably be replaced by kappa support
class SqsEventSource(kappa.event_source.base.EventSource):
def __init__(self, context, config):
super().__init__(context, config)
self._lambda = kappa.awsclient.create_client("lambda", context.session)
def _get_uuid(self, function):
uuid = None
response = self._lambda.call(
"list_event_source_mappings",
FunctionName=function.name,
EventSourceArn=self.arn,
)
LOG.debug(response)
if len(response["EventSourceMappings"]) > 0:
uuid = response["EventSourceMappings"][0]["UUID"]
return uuid
def add(self, function):
try:
response = self._lambda.call(
"create_event_source_mapping",
FunctionName=function.name,
EventSourceArn=self.arn,
BatchSize=self.batch_size,
Enabled=self.enabled,
)
LOG.debug(response)
except Exception:
LOG.exception("Unable to add event source")
def enable(self, function):
self._config["enabled"] = True
try:
response = self._lambda.call(
"update_event_source_mapping",
UUID=self._get_uuid(function),
Enabled=self.enabled,
)
LOG.debug(response)
except Exception:
LOG.exception("Unable to enable event source")
def disable(self, function):
self._config["enabled"] = False
try:
response = self._lambda.call(
"update_event_source_mapping",
FunctionName=function.name,
Enabled=self.enabled,
)
LOG.debug(response)
except Exception:
LOG.exception("Unable to disable event source")
def update(self, function):
response = None
uuid = self._get_uuid(function)
if uuid:
try:
response = self._lambda.call(
"update_event_source_mapping",
BatchSize=self.batch_size,
Enabled=self.enabled,
FunctionName=function.arn,
)
LOG.debug(response)
except Exception:
LOG.exception("Unable to update event source")
def remove(self, function):
response = None
uuid = self._get_uuid(function)
if uuid:
response = self._lambda.call("delete_event_source_mapping", UUID=uuid)
LOG.debug(response)
return response
def status(self, function):
response = None
LOG.debug("getting status for event source %s", self.arn)
uuid = self._get_uuid(function)
if uuid:
try:
response = self._lambda.call(
"get_event_source_mapping", UUID=self._get_uuid(function)
)
LOG.debug(response)
except botocore.exceptions.ClientError:
LOG.debug("event source %s does not exist", self.arn)
response = None
else:
LOG.debug("No UUID for event source %s", self.arn)
return response
class ExtendedSnsEventSource(kappa.event_source.sns.SNSEventSource):
@property
def filters(self):
return self._config.get("filters")
def add_filters(self, function):
try:
subscription = self.exists(function)
if subscription:
response = self._sns.call(
"set_subscription_attributes",
SubscriptionArn=subscription["SubscriptionArn"],
AttributeName="FilterPolicy",
AttributeValue=json.dumps(self.filters),
)
kappa.event_source.sns.LOG.debug(response)
except Exception:
kappa.event_source.sns.LOG.exception(
"Unable to add filters for SNS topic %s", self.arn
)
def add(self, function):
super().add(function)
if self.filters:
self.add_filters(function)
event_source_map = {
"dynamodb": kappa.event_source.dynamodb_stream.DynamoDBStreamEventSource,
"kinesis": kappa.event_source.kinesis.KinesisEventSource,
"s3": kappa.event_source.s3.S3EventSource,
"sns": ExtendedSnsEventSource,
"sqs": SqsEventSource,
"events": kappa.event_source.cloudwatch.CloudWatchEventSource,
}
arn = event_source["arn"]
_, _, svc, _ = arn.split(":", 3)
event_source_func = event_source_map.get(svc, None)
if not event_source_func:
raise ValueError("Unknown event source: {0}".format(arn))
def autoreturn(self, function_name):
return function_name
event_source_func._make_notification_id = autoreturn
ctx = PseudoContext()
ctx.session = boto_session
funk = PseudoFunction()
funk.name = lambda_arn
# Kappa 0.6.0 requires this nasty hacking,
# hopefully we can remove at least some of this soon.
# Kappa 0.7.0 introduces a whole host over other changes we don't
# really want, so we're stuck here for a little while.
# Related: https://github.com/Miserlou/Zappa/issues/684
# https://github.com/Miserlou/Zappa/issues/688
# https://github.com/Miserlou/Zappa/commit/3216f7e5149e76921ecdf9451167846b95616313
if svc == "s3":
split_arn = lambda_arn.split(":")
arn_front = ":".join(split_arn[:-1])
arn_back = split_arn[-1]
ctx.environment = arn_back
funk.arn = arn_front
funk.name = ":".join([arn_back, target_function])
else:
funk.arn = lambda_arn
funk._context = ctx
event_source_obj = event_source_func(ctx, event_source)
return event_source_obj, ctx, funk
def add_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
):
"""
Given an event_source dictionary, create the object and add the event source.
"""
event_source_obj, ctx, funk = get_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
)
# TODO: Detect changes in config and refine exists algorithm
if not dry:
if not event_source_obj.status(funk):
event_source_obj.add(funk)
return "successful" if event_source_obj.status(funk) else "failed"
else:
return "exists"
return "dryrun"
def remove_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
):
"""
Given an event_source dictionary, create the object and remove the event source.
"""
event_source_obj, ctx, funk = get_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
)
# This is slightly dirty, but necessary for using Kappa this way.
funk.arn = lambda_arn
if not dry:
rule_response = event_source_obj.remove(funk)
return rule_response
else:
return event_source_obj
def get_event_source_status(
event_source, lambda_arn, target_function, boto_session, dry=False
):
"""
Given an event_source dictionary, create the object and get the event source status.
"""
event_source_obj, ctx, funk = get_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
)
return event_source_obj.status(funk)
##
# Analytics / Surveillance / Nagging
##
def check_new_version_available(this_version):
"""
Checks if a newer version of Zappa is available.
Returns True is updateable, else False.
"""
import requests
pypi_url = "https://pypi.org/pypi/Zappa/json"
resp = requests.get(pypi_url, timeout=1.5)
top_version = resp.json()["info"]["version"]
return this_version != top_version
class InvalidAwsLambdaName(Exception):
"""Exception: proposed AWS Lambda name is invalid"""
pass
def validate_name(name, maxlen=80):
"""Validate name for AWS Lambda function.
name: actual name (without `arn:aws:lambda:...:` prefix and without
`:$LATEST`, alias or version suffix.
maxlen: max allowed length for name without prefix and suffix.
The value 80 was calculated from prefix with longest known region name
and assuming that no alias or version would be longer than `$LATEST`.
Based on AWS Lambda spec
http://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html
Return: the name
Raise: InvalidAwsLambdaName, if the name is invalid.
"""
if not isinstance(name, basestring):
msg = "Name must be of type string"
raise InvalidAwsLambdaName(msg)
if len(name) > maxlen:
msg = "Name is longer than {maxlen} characters."
raise InvalidAwsLambdaName(msg.format(maxlen=maxlen))
if len(name) == 0:
msg = "Name must not be empty string."
raise InvalidAwsLambdaName(msg)
if not re.match("^[a-zA-Z0-9-_]+$", name):
msg = "Name can only contain characters from a-z, A-Z, 0-9, _ and -"
raise InvalidAwsLambdaName(msg)
return name
def contains_python_files_or_subdirs(folder):
"""
Checks (recursively) if the directory contains .py or .pyc files
"""
for root, dirs, files in os.walk(folder):
if [
filename
for filename in files
if filename.endswith(".py") or filename.endswith(".pyc")
]:
return True
for d in dirs:
for _, subdirs, subfiles in os.walk(d):
if [
filename
for filename in subfiles
if filename.endswith(".py") or filename.endswith(".pyc")
]:
return True
return False
def conflicts_with_a_neighbouring_module(directory_path):
"""
Checks if a directory lies in the same directory as a .py file with the same name.
"""
parent_dir_path, current_dir_name = os.path.split(os.path.normpath(directory_path))
neighbours = os.listdir(parent_dir_path)
conflicting_neighbour_filename = current_dir_name + ".py"
return conflicting_neighbour_filename in neighbours
# https://github.com/Miserlou/Zappa/issues/1188
def titlecase_keys(d):
"""
Takes a dict with keys of type str and returns a new dict with all keys titlecased.
"""
return {k.title(): v for k, v in d.items()}
# https://github.com/Miserlou/Zappa/issues/1688
def is_valid_bucket_name(name):
"""
Checks if an S3 bucket name is valid according to https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules
"""
# Bucket names must be at least 3 and no more than 63 characters long.
if len(name) < 3 or len(name) > 63:
return False
# Bucket names must not contain uppercase characters or underscores.
if any(x.isupper() for x in name):
return False
if "_" in name:
return False
# Bucket names must start with a lowercase letter or number.
if not (name[0].islower() or name[0].isdigit()):
return False
# Bucket names must be a series of one or more labels. Adjacent labels are separated by a single period (.).
for label in name.split("."):
# Each label must start and end with a lowercase letter or a number.
if len(label) < 1:
return False
if not (label[0].islower() or label[0].isdigit()):
return False
if not (label[-1].islower() or label[-1].isdigit()):
return False
# Bucket names must not be formatted as an IP address (for example, 192.168.5.4).
looks_like_IP = True
for label in name.split("."):
if not label.isdigit():
looks_like_IP = False
break
if looks_like_IP:
return False
return True
def merge_headers(event):
"""
Merge the values of headers and multiValueHeaders into a single dict.
Opens up support for multivalue headers via API Gateway and ALB.
See: https://github.com/Miserlou/Zappa/pull/1756
"""
headers = event.get("headers") or {}
multi_headers = (event.get("multiValueHeaders") or {}).copy()
for h in set(headers.keys()):
if h not in multi_headers:
multi_headers[h] = [headers[h]]
for h in multi_headers.keys():
multi_headers[h] = ", ".join(multi_headers[h])
return multi_headers | zappa-warm | /zappa_warm-0.54.4-py3-none-any.whl/zappa/utilities.py | utilities.py |
import argparse
import base64
import collections
import importlib
import inspect
import logging
import os
import pkgutil
import random
import re
import string
import sys
import tempfile
import time
import zipfile
from builtins import bytes, input
from datetime import datetime, timedelta
import argcomplete
import botocore
import click
import hjson as json
import pkg_resources
import requests
import slugify
import toml
import yaml
from click import BaseCommand, Context
from click.exceptions import ClickException
from click.globals import push_context
from dateutil import parser
from past.builtins import basestring
from .core import API_GATEWAY_REGIONS, Zappa, logger
from .utilities import (
InvalidAwsLambdaName,
check_new_version_available,
detect_django_settings,
detect_flask_apps,
get_runtime_from_python_version,
get_venv_from_python_version,
human_size,
is_valid_bucket_name,
parse_s3_url,
string_to_timestamp,
validate_name,
)
CUSTOM_SETTINGS = [
"apigateway_policy",
"assume_policy",
"attach_policy",
"aws_region",
"delete_local_zip",
"delete_s3_zip",
"exclude",
"exclude_glob",
"extra_permissions",
"include",
"role_name",
"touch",
]
BOTO3_CONFIG_DOCS_URL = (
"https://boto3.readthedocs.io/en/latest/guide/quickstart.html#configuration"
)
##
# Main Input Processing
##
class ZappaCLI:
"""
ZappaCLI object is responsible for loading the settings,
handling the input arguments and executing the calls to the core library.
"""
# CLI
vargs = None
command = None
stage_env = None
# Zappa settings
zappa = None
zappa_settings = None
load_credentials = True
disable_progress = False
# Specific settings
api_stage = None
app_function = None
aws_region = None
debug = None
prebuild_script = None
project_name = None
profile_name = None
lambda_arn = None
lambda_name = None
lambda_description = None
lambda_concurrency = None
s3_bucket_name = None
settings_file = None
zip_path = None
handler_path = None
vpc_config = None
memory_size = None
use_apigateway = None
lambda_handler = None
django_settings = None
manage_roles = True
exception_handler = None
environment_variables = None
authorizer = None
xray_tracing = False
aws_kms_key_arn = ""
context_header_mappings = None
tags = []
layers = None
stage_name_env_pattern = re.compile("^[a-zA-Z0-9_]+$")
def __init__(self):
self._stage_config_overrides = (
{}
) # change using self.override_stage_config_setting(key, val)
@property
def stage_config(self):
"""
A shortcut property for settings of a stage.
"""
def get_stage_setting(stage, extended_stages=None):
if extended_stages is None:
extended_stages = []
if stage in extended_stages:
raise RuntimeError(
stage + " has already been extended to these settings. "
"There is a circular extends within the settings file."
)
extended_stages.append(stage)
try:
stage_settings = dict(self.zappa_settings[stage].copy())
except KeyError:
raise ClickException(
"Cannot extend settings for undefined stage '" + stage + "'."
)
extends_stage = self.zappa_settings[stage].get("extends", None)
if not extends_stage:
return stage_settings
extended_settings = get_stage_setting(
stage=extends_stage, extended_stages=extended_stages
)
extended_settings.update(stage_settings)
return extended_settings
settings = get_stage_setting(stage=self.api_stage)
# Backwards compatible for delete_zip setting that was more explicitly named delete_local_zip
if "delete_zip" in settings:
settings["delete_local_zip"] = settings.get("delete_zip")
settings.update(self.stage_config_overrides)
return settings
@property
def stage_config_overrides(self):
"""
Returns zappa_settings we forcefully override for the current stage
set by `self.override_stage_config_setting(key, value)`
"""
return getattr(self, "_stage_config_overrides", {}).get(self.api_stage, {})
def override_stage_config_setting(self, key, val):
"""
Forcefully override a setting set by zappa_settings (for the current stage only)
:param key: settings key
:param val: value
"""
self._stage_config_overrides = getattr(self, "_stage_config_overrides", {})
self._stage_config_overrides.setdefault(self.api_stage, {})[key] = val
def handle(self, argv=None):
"""
Main function.
Parses command, load settings and dispatches accordingly.
"""
desc = "Zappa - Deploy Python applications to AWS Lambda" " and API Gateway.\n"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
"-v",
"--version",
action="version",
version=pkg_resources.get_distribution("zappa-warm").version,
help="Print the zappa version",
)
parser.add_argument(
"--color", default="auto", choices=["auto", "never", "always"]
)
env_parser = argparse.ArgumentParser(add_help=False)
me_group = env_parser.add_mutually_exclusive_group()
all_help = "Execute this command for all of our defined " "Zappa stages."
me_group.add_argument("--all", action="store_true", help=all_help)
me_group.add_argument("stage_env", nargs="?")
group = env_parser.add_argument_group()
group.add_argument(
"-a", "--app_function", help="The WSGI application function."
)
group.add_argument(
"-s", "--settings_file", help="The path to a Zappa settings file."
)
group.add_argument(
"-q", "--quiet", action="store_true", help="Silence all output."
)
# https://github.com/Miserlou/Zappa/issues/407
# Moved when 'template' command added.
# Fuck Terraform.
group.add_argument(
"-j",
"--json",
action="store_true",
help="Make the output of this command be machine readable.",
)
# https://github.com/Miserlou/Zappa/issues/891
group.add_argument(
"--disable_progress", action="store_true", help="Disable progress bars."
)
group.add_argument("--no_venv", action="store_true", help="Skip venv check.")
##
# Certify
##
subparsers = parser.add_subparsers(title="subcommands", dest="command")
cert_parser = subparsers.add_parser(
"certify", parents=[env_parser], help="Create and install SSL certificate"
)
cert_parser.add_argument(
"--manual",
action="store_true",
help=(
"Gets new Let's Encrypt certificates, but prints them to console."
"Does not update API Gateway domains."
),
)
cert_parser.add_argument(
"-y", "--yes", action="store_true", help="Auto confirm yes."
)
##
# Deploy
##
deploy_parser = subparsers.add_parser(
"deploy", parents=[env_parser], help="Deploy application."
)
deploy_parser.add_argument(
"-z",
"--zip",
help="Deploy Lambda with specific local or S3 hosted zip package",
)
deploy_parser.add_argument(
"-d",
"--docker-image-uri",
help="Deploy Lambda with a specific docker image hosted in AWS Elastic Container Registry",
)
##
# Init
##
init_parser = subparsers.add_parser("init", help="Initialize Zappa app.")
##
# Package
##
package_parser = subparsers.add_parser(
"package",
parents=[env_parser],
help="Build the application zip package locally.",
)
package_parser.add_argument(
"-o", "--output", help="Name of file to output the package to."
)
##
# Template
##
template_parser = subparsers.add_parser(
"template",
parents=[env_parser],
help="Create a CloudFormation template for this API Gateway.",
)
template_parser.add_argument(
"-l",
"--lambda-arn",
required=True,
help="ARN of the Lambda function to template to.",
)
template_parser.add_argument(
"-r", "--role-arn", required=True, help="ARN of the Role to template with."
)
template_parser.add_argument(
"-o", "--output", help="Name of file to output the template to."
)
##
# Invocation
##
invoke_parser = subparsers.add_parser(
"invoke", parents=[env_parser], help="Invoke remote function."
)
invoke_parser.add_argument(
"--raw",
action="store_true",
help=(
"When invoking remotely, invoke this python as a string,"
" not as a modular path."
),
)
invoke_parser.add_argument(
"--no-color", action="store_true", help=("Don't color the output")
)
invoke_parser.add_argument("command_rest")
##
# Manage
##
manage_parser = subparsers.add_parser(
"manage", help="Invoke remote Django manage.py commands."
)
rest_help = (
"Command in the form of <env> <command>. <env> is not "
"required if --all is specified"
)
manage_parser.add_argument("--all", action="store_true", help=all_help)
manage_parser.add_argument("command_rest", nargs="+", help=rest_help)
manage_parser.add_argument(
"--no-color", action="store_true", help=("Don't color the output")
)
# This is explicitly added here because this is the only subcommand that doesn't inherit from env_parser
# https://github.com/Miserlou/Zappa/issues/1002
manage_parser.add_argument(
"-s", "--settings_file", help="The path to a Zappa settings file."
)
##
# Rollback
##
def positive_int(s):
"""Ensure an arg is positive"""
i = int(s)
if i < 0:
msg = "This argument must be positive (got {})".format(s)
raise argparse.ArgumentTypeError(msg)
return i
rollback_parser = subparsers.add_parser(
"rollback",
parents=[env_parser],
help="Rollback deployed code to a previous version.",
)
rollback_parser.add_argument(
"-n",
"--num-rollback",
type=positive_int,
default=1,
help="The number of versions to rollback.",
)
##
# Scheduling
##
subparsers.add_parser(
"schedule",
parents=[env_parser],
help="Schedule functions to occur at regular intervals.",
)
##
# Status
##
subparsers.add_parser(
"status",
parents=[env_parser],
help="Show deployment status and event schedules.",
)
##
# Log Tailing
##
tail_parser = subparsers.add_parser(
"tail", parents=[env_parser], help="Tail deployment logs."
)
tail_parser.add_argument(
"--no-color", action="store_true", help="Don't color log tail output."
)
tail_parser.add_argument(
"--http",
action="store_true",
help="Only show HTTP requests in tail output.",
)
tail_parser.add_argument(
"--non-http",
action="store_true",
help="Only show non-HTTP requests in tail output.",
)
tail_parser.add_argument(
"--since",
type=str,
default="100000s",
help="Only show lines since a certain timeframe.",
)
tail_parser.add_argument(
"--filter", type=str, default="", help="Apply a filter pattern to the logs."
)
tail_parser.add_argument(
"--force-color",
action="store_true",
help="Force coloring log tail output even if coloring support is not auto-detected. (example: piping)",
)
tail_parser.add_argument(
"--disable-keep-open",
action="store_true",
help="Exit after printing the last available log, rather than keeping the log open.",
)
##
# Undeploy
##
undeploy_parser = subparsers.add_parser(
"undeploy", parents=[env_parser], help="Undeploy application."
)
undeploy_parser.add_argument(
"--remove-logs",
action="store_true",
help=(
"Removes log groups of api gateway and lambda task"
" during the undeployment."
),
)
undeploy_parser.add_argument(
"-y", "--yes", action="store_true", help="Auto confirm yes."
)
##
# Unschedule
##
subparsers.add_parser(
"unschedule", parents=[env_parser], help="Unschedule functions."
)
##
# Updating
##
update_parser = subparsers.add_parser(
"update", parents=[env_parser], help="Update deployed application."
)
update_parser.add_argument(
"-z",
"--zip",
help="Update Lambda with specific local or S3 hosted zip package",
)
update_parser.add_argument(
"-n",
"--no-upload",
help="Update configuration where appropriate, but don't upload new code",
)
update_parser.add_argument(
"-d",
"--docker-image-uri",
help="Update Lambda with a specific docker image hosted in AWS Elastic Container Registry",
)
##
# Debug
##
subparsers.add_parser(
"shell",
parents=[env_parser],
help="A debug shell with a loaded Zappa object.",
)
##
# Python Settings File
##
settings_parser = subparsers.add_parser(
"save-python-settings-file",
parents=[env_parser],
help="Generate & save the Zappa settings Python file for docker deployments",
)
settings_parser.add_argument(
"-o",
"--output_path",
help=(
"The path to save the Zappa settings Python file. "
"File must be named zappa_settings.py and should be saved "
"in the same directory as the Zappa handler.py"
),
)
argcomplete.autocomplete(parser)
args = parser.parse_args(argv)
self.vargs = vars(args)
if args.color == "never":
disable_click_colors()
elif args.color == "always":
# TODO: Support aggressive coloring like "--force-color" on all commands
pass
elif args.color == "auto":
pass
# Parse the input
# NOTE(rmoe): Special case for manage command
# The manage command can't have both stage_env and command_rest
# arguments. Since they are both positional arguments argparse can't
# differentiate the two. This causes problems when used with --all.
# (e.g. "manage --all showmigrations admin" argparse thinks --all has
# been specified AND that stage_env='showmigrations')
# By having command_rest collect everything but --all we can split it
# apart here instead of relying on argparse.
if not args.command:
parser.print_help()
return
if args.command == "manage" and not self.vargs.get("all"):
self.stage_env = self.vargs["command_rest"].pop(0)
else:
self.stage_env = self.vargs.get("stage_env")
if args.command == "package":
self.load_credentials = False
self.command = args.command
self.disable_progress = self.vargs.get("disable_progress")
if self.vargs.get("quiet"):
self.silence()
# We don't have any settings yet, so make those first!
# (Settings-based interactions will fail
# before a project has been initialized.)
if self.command == "init":
self.init()
return
# Make sure there isn't a new version available
if not self.vargs.get("json"):
self.check_for_update()
# Load and Validate Settings File
self.load_settings_file(self.vargs.get("settings_file"))
# Should we execute this for all stages, or just one?
all_stages = self.vargs.get("all")
stages = []
if all_stages: # All stages!
stages = self.zappa_settings.keys()
else: # Just one env.
if not self.stage_env:
# If there's only one stage defined in the settings,
# use that as the default.
if len(self.zappa_settings.keys()) == 1:
stages.append(list(self.zappa_settings.keys())[0])
else:
parser.error("Please supply a stage to interact with.")
else:
stages.append(self.stage_env)
for stage in stages:
try:
self.dispatch_command(self.command, stage)
except ClickException as e:
# Discussion on exit codes: https://github.com/Miserlou/Zappa/issues/407
e.show()
sys.exit(e.exit_code)
def dispatch_command(self, command, stage):
"""
Given a command to execute and stage,
execute that command.
"""
self.check_stage_name(stage)
self.api_stage = stage
if command not in ["status", "manage"]:
if not self.vargs.get("json", None):
click.echo(
"Calling "
+ click.style(command, fg="green", bold=True)
+ " for stage "
+ click.style(self.api_stage, bold=True)
+ ".."
)
# Explicitly define the app function.
# Related: https://github.com/Miserlou/Zappa/issues/832
if self.vargs.get("app_function", None):
self.app_function = self.vargs["app_function"]
# Load our settings, based on api_stage.
try:
self.load_settings(self.vargs.get("settings_file"))
except ValueError as e:
if hasattr(e, "message"):
print("Error: {}".format(e.message))
else:
print(str(e))
sys.exit(-1)
self.callback("settings")
# Hand it off
if command == "deploy": # pragma: no cover
self.deploy(self.vargs["zip"], self.vargs["docker_image_uri"])
if command == "package": # pragma: no cover
self.package(self.vargs["output"])
if command == "template": # pragma: no cover
self.template(
self.vargs["lambda_arn"],
self.vargs["role_arn"],
output=self.vargs["output"],
json=self.vargs["json"],
)
elif command == "update": # pragma: no cover
self.update(
self.vargs["zip"],
self.vargs["no_upload"],
self.vargs["docker_image_uri"],
)
elif command == "rollback": # pragma: no cover
self.rollback(self.vargs["num_rollback"])
elif command == "invoke": # pragma: no cover
if not self.vargs.get("command_rest"):
print("Please enter the function to invoke.")
return
self.invoke(
self.vargs["command_rest"],
raw_python=self.vargs["raw"],
no_color=self.vargs["no_color"],
)
elif command == "manage": # pragma: no cover
if not self.vargs.get("command_rest"):
print("Please enter the management command to invoke.")
return
if not self.django_settings:
print("This command is for Django projects only!")
print(
"If this is a Django project, please define django_settings in your zappa_settings."
)
return
command_tail = self.vargs.get("command_rest")
if len(command_tail) > 1:
command = " ".join(
command_tail
) # ex: zappa manage dev "shell --version"
else:
command = command_tail[0] # ex: zappa manage dev showmigrations admin
self.invoke(
command,
command="manage",
no_color=self.vargs["no_color"],
)
elif command == "tail": # pragma: no cover
self.tail(
colorize=(not self.vargs["no_color"]),
http=self.vargs["http"],
non_http=self.vargs["non_http"],
since=self.vargs["since"],
filter_pattern=self.vargs["filter"],
force_colorize=self.vargs["force_color"] or None,
keep_open=not self.vargs["disable_keep_open"],
)
elif command == "undeploy": # pragma: no cover
self.undeploy(
no_confirm=self.vargs["yes"], remove_logs=self.vargs["remove_logs"]
)
elif command == "schedule": # pragma: no cover
self.schedule()
elif command == "unschedule": # pragma: no cover
self.unschedule()
elif command == "status": # pragma: no cover
self.status(return_json=self.vargs["json"])
elif command == "certify": # pragma: no cover
self.certify(no_confirm=self.vargs["yes"], manual=self.vargs["manual"])
elif command == "shell": # pragma: no cover
self.shell()
elif command == "save-python-settings-file": # pragma: no cover
self.save_python_settings_file(self.vargs["output_path"])
##
# The Commands
##
def save_python_settings_file(self, output_path=None):
settings_path = output_path or "zappa_settings.py"
print(
"Generating Zappa settings Python file and saving to {}".format(
settings_path
)
)
if not settings_path.endswith("zappa_settings.py"):
raise ValueError("Settings file must be named zappa_settings.py")
zappa_settings_s = self.get_zappa_settings_string()
with open(settings_path, "w") as f_out:
f_out.write(zappa_settings_s)
def package(self, output=None):
"""
Only build the package
"""
# Make sure we're in a venv.
self.check_venv()
# force not to delete the local zip
self.override_stage_config_setting("delete_local_zip", False)
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Create the Lambda Zip
self.create_package(output)
self.callback("zip")
size = human_size(os.path.getsize(self.zip_path))
click.echo(
click.style("Package created", fg="green", bold=True)
+ ": "
+ click.style(self.zip_path, bold=True)
+ " ("
+ size
+ ")"
)
def template(self, lambda_arn, role_arn, output=None, json=False):
"""
Only build the template file.
"""
if not lambda_arn:
raise ClickException("Lambda ARN is required to template.")
if not role_arn:
raise ClickException("Role ARN is required to template.")
self.zappa.credentials_arn = role_arn
# Create the template!
template = self.zappa.create_stack_template(
lambda_arn=lambda_arn,
lambda_name=self.lambda_name,
api_key_required=self.api_key_required,
iam_authorization=self.iam_authorization,
authorizer=self.authorizer,
cors_options=self.cors,
description=self.apigateway_description,
endpoint_configuration=self.endpoint_configuration,
)
if not output:
template_file = (
self.lambda_name + "-template-" + str(int(time.time())) + ".json"
)
else:
template_file = output
with open(template_file, "wb") as out:
out.write(
bytes(template.to_json(indent=None, separators=(",", ":")), "utf-8")
)
if not json:
click.echo(
click.style("Template created", fg="green", bold=True)
+ ": "
+ click.style(template_file, bold=True)
)
else:
with open(template_file, "r") as out:
print(out.read())
def deploy(self, source_zip=None, docker_image_uri=None):
"""
Package your project, upload it to S3, register the Lambda function
and create the API Gateway routes.
"""
if not source_zip or docker_image_uri:
# Make sure the necessary IAM execution roles are available
if self.manage_roles:
try:
self.zappa.create_iam_roles()
except botocore.client.ClientError as ce:
raise ClickException(
click.style("Failed", fg="red")
+ " to "
+ click.style("manage IAM roles", bold=True)
+ "!\n"
+ "You may "
+ click.style("lack the necessary AWS permissions", bold=True)
+ " to automatically manage a Zappa execution role.\n"
+ click.style("Exception reported by AWS:", bold=True)
+ format(ce)
+ "\n"
+ "To fix this, see here: "
+ click.style(
"https://github.com/Zappa/Zappa#custom-aws-iam-roles-and-policies-for-deployment",
bold=True,
)
+ "\n"
)
# Make sure this isn't already deployed.
deployed_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if len(deployed_versions) > 0:
raise ClickException(
"This application is "
+ click.style("already deployed", fg="red")
+ " - did you mean to call "
+ click.style("update", bold=True)
+ "?"
)
if not source_zip and not docker_image_uri:
# Make sure we're in a venv.
self.check_venv()
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Create the Lambda Zip
self.create_package()
self.callback("zip")
# Upload it to S3
success = self.zappa.upload_to_s3(
self.zip_path,
self.s3_bucket_name,
disable_progress=self.disable_progress,
)
if not success: # pragma: no cover
raise ClickException("Unable to upload to S3. Quitting.")
# If using a slim handler, upload it to S3 and tell lambda to use this slim handler zip
if self.stage_config.get("slim_handler", False):
# https://github.com/Miserlou/Zappa/issues/510
success = self.zappa.upload_to_s3(
self.handler_path,
self.s3_bucket_name,
disable_progress=self.disable_progress,
)
if not success: # pragma: no cover
raise ClickException("Unable to upload handler to S3. Quitting.")
# Copy the project zip to the current project zip
current_project_name = "{0!s}_{1!s}_current_project.tar.gz".format(
self.api_stage, self.project_name
)
success = self.zappa.copy_on_s3(
src_file_name=self.zip_path,
dst_file_name=current_project_name,
bucket_name=self.s3_bucket_name,
)
if not success: # pragma: no cover
raise ClickException(
"Unable to copy the zip to be the current project. Quitting."
)
handler_file = self.handler_path
else:
handler_file = self.zip_path
# Fixes https://github.com/Miserlou/Zappa/issues/613
try:
self.lambda_arn = self.zappa.get_lambda_function(
function_name=self.lambda_name
)
except botocore.client.ClientError:
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
kwargs = dict(
handler=self.lambda_handler,
description=self.lambda_description,
vpc_config=self.vpc_config,
dead_letter_config=self.dead_letter_config,
timeout=self.timeout_seconds,
memory_size=self.memory_size,
runtime=self.runtime,
aws_environment_variables=self.aws_environment_variables,
aws_kms_key_arn=self.aws_kms_key_arn,
use_alb=self.use_alb,
layers=self.layers,
concurrency=self.lambda_concurrency,
)
kwargs["function_name"] = self.lambda_name
if docker_image_uri:
kwargs["docker_image_uri"] = docker_image_uri
elif source_zip and source_zip.startswith("s3://"):
bucket, key_name = parse_s3_url(source_zip)
kwargs["bucket"] = bucket
kwargs["s3_key"] = key_name
elif source_zip and not source_zip.startswith("s3://"):
with open(source_zip, mode="rb") as fh:
byte_stream = fh.read()
kwargs["local_zip"] = byte_stream
else:
kwargs["bucket"] = self.s3_bucket_name
kwargs["s3_key"] = handler_file
self.lambda_arn = self.zappa.create_lambda_function(**kwargs)
# Schedule events for this deployment
self.schedule()
endpoint_url = ""
deployment_string = (
click.style("Deployment complete", fg="green", bold=True) + "!"
)
if self.use_alb:
kwargs = dict(
lambda_arn=self.lambda_arn,
lambda_name=self.lambda_name,
alb_vpc_config=self.alb_vpc_config,
timeout=self.timeout_seconds,
)
self.zappa.deploy_lambda_alb(**kwargs)
if self.use_apigateway:
# Create and configure the API Gateway
template = self.zappa.create_stack_template(
lambda_arn=self.lambda_arn,
lambda_name=self.lambda_name,
api_key_required=self.api_key_required,
iam_authorization=self.iam_authorization,
authorizer=self.authorizer,
cors_options=self.cors,
description=self.apigateway_description,
endpoint_configuration=self.endpoint_configuration,
)
self.zappa.update_stack(
self.lambda_name,
self.s3_bucket_name,
wait=True,
disable_progress=self.disable_progress,
)
api_id = self.zappa.get_api_id(self.lambda_name)
# Add binary support
if self.binary_support:
self.zappa.add_binary_support(api_id=api_id, cors=self.cors)
# Add payload compression
if self.stage_config.get("payload_compression", True):
self.zappa.add_api_compression(
api_id=api_id,
min_compression_size=self.stage_config.get(
"payload_minimum_compression_size", 0
),
)
# Deploy the API!
endpoint_url = self.deploy_api_gateway(api_id)
deployment_string = deployment_string + ": {}".format(endpoint_url)
# Create/link API key
if self.api_key_required:
if self.api_key is None:
self.zappa.create_api_key(api_id=api_id, stage_name=self.api_stage)
else:
self.zappa.add_api_stage_to_api_key(
api_key=self.api_key, api_id=api_id, stage_name=self.api_stage
)
if self.stage_config.get("touch", True):
self.zappa.wait_until_lambda_function_is_updated(
function_name=self.lambda_name
)
self.touch_endpoint(endpoint_url)
# Finally, delete the local copy our zip package
if not source_zip and not docker_image_uri:
if self.stage_config.get("delete_local_zip", True):
self.remove_local_zip()
# Remove the project zip from S3.
if not source_zip and not docker_image_uri:
self.remove_uploaded_zip()
self.callback("post")
click.echo(deployment_string)
def update(self, source_zip=None, no_upload=False, docker_image_uri=None):
"""
Repackage and update the function code.
"""
if not source_zip and not docker_image_uri:
# Make sure we're in a venv.
self.check_venv()
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Temporary version check
try:
updated_time = 1472581018
function_response = self.zappa.lambda_client.get_function(
FunctionName=self.lambda_name
)
conf = function_response["Configuration"]
last_updated = parser.parse(conf["LastModified"])
last_updated_unix = time.mktime(last_updated.timetuple())
except botocore.exceptions.BotoCoreError as e:
click.echo(click.style(type(e).__name__, fg="red") + ": " + e.args[0])
sys.exit(-1)
except Exception as e:
click.echo(
click.style("Warning!", fg="red")
+ " Couldn't get function "
+ self.lambda_name
+ " in "
+ self.zappa.aws_region
+ " - have you deployed yet?"
)
sys.exit(-1)
if last_updated_unix <= updated_time:
click.echo(
click.style("Warning!", fg="red")
+ " You may have upgraded Zappa since deploying this application. You will need to "
+ click.style("redeploy", bold=True)
+ " for this deployment to work properly!"
)
# Make sure the necessary IAM execution roles are available
if self.manage_roles:
try:
self.zappa.create_iam_roles()
except botocore.client.ClientError:
click.echo(
click.style("Failed", fg="red")
+ " to "
+ click.style("manage IAM roles", bold=True)
+ "!"
)
click.echo(
"You may "
+ click.style("lack the necessary AWS permissions", bold=True)
+ " to automatically manage a Zappa execution role."
)
click.echo(
"To fix this, see here: "
+ click.style(
"https://github.com/Zappa/Zappa#custom-aws-iam-roles-and-policies-for-deployment",
bold=True,
)
)
sys.exit(-1)
# Create the Lambda Zip,
if not no_upload:
self.create_package()
self.callback("zip")
# Upload it to S3
if not no_upload:
success = self.zappa.upload_to_s3(
self.zip_path,
self.s3_bucket_name,
disable_progress=self.disable_progress,
)
if not success: # pragma: no cover
raise ClickException("Unable to upload project to S3. Quitting.")
# If using a slim handler, upload it to S3 and tell lambda to use this slim handler zip
if self.stage_config.get("slim_handler", False):
# https://github.com/Miserlou/Zappa/issues/510
success = self.zappa.upload_to_s3(
self.handler_path,
self.s3_bucket_name,
disable_progress=self.disable_progress,
)
if not success: # pragma: no cover
raise ClickException(
"Unable to upload handler to S3. Quitting."
)
# Copy the project zip to the current project zip
current_project_name = "{0!s}_{1!s}_current_project.tar.gz".format(
self.api_stage, self.project_name
)
success = self.zappa.copy_on_s3(
src_file_name=self.zip_path,
dst_file_name=current_project_name,
bucket_name=self.s3_bucket_name,
)
if not success: # pragma: no cover
raise ClickException(
"Unable to copy the zip to be the current project. Quitting."
)
handler_file = self.handler_path
else:
handler_file = self.zip_path
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
kwargs = dict(
bucket=self.s3_bucket_name,
function_name=self.lambda_name,
num_revisions=self.num_retained_versions,
concurrency=self.lambda_concurrency,
)
if docker_image_uri:
kwargs["docker_image_uri"] = docker_image_uri
self.lambda_arn = self.zappa.update_lambda_function(**kwargs)
elif source_zip and source_zip.startswith("s3://"):
bucket, key_name = parse_s3_url(source_zip)
kwargs.update(dict(bucket=bucket, s3_key=key_name))
self.lambda_arn = self.zappa.update_lambda_function(**kwargs)
elif source_zip and not source_zip.startswith("s3://"):
with open(source_zip, mode="rb") as fh:
byte_stream = fh.read()
kwargs["local_zip"] = byte_stream
self.lambda_arn = self.zappa.update_lambda_function(**kwargs)
else:
if not no_upload:
kwargs["s3_key"] = handler_file
self.lambda_arn = self.zappa.update_lambda_function(**kwargs)
# Remove the uploaded zip from S3, because it is now registered..
if not source_zip and not no_upload and not docker_image_uri:
self.remove_uploaded_zip()
# Update the configuration, in case there are changes.
self.lambda_arn = self.zappa.update_lambda_configuration(
lambda_arn=self.lambda_arn,
function_name=self.lambda_name,
handler=self.lambda_handler,
description=self.lambda_description,
vpc_config=self.vpc_config,
timeout=self.timeout_seconds,
memory_size=self.memory_size,
runtime=self.runtime,
aws_environment_variables=self.aws_environment_variables,
aws_kms_key_arn=self.aws_kms_key_arn,
layers=self.layers,
wait=False,
)
# Finally, delete the local copy our zip package
if not source_zip and not no_upload and not docker_image_uri:
if self.stage_config.get("delete_local_zip", True):
self.remove_local_zip()
if self.use_apigateway:
self.zappa.create_stack_template(
lambda_arn=self.lambda_arn,
lambda_name=self.lambda_name,
api_key_required=self.api_key_required,
iam_authorization=self.iam_authorization,
authorizer=self.authorizer,
cors_options=self.cors,
description=self.apigateway_description,
endpoint_configuration=self.endpoint_configuration,
)
self.zappa.update_stack(
self.lambda_name,
self.s3_bucket_name,
wait=True,
update_only=True,
disable_progress=self.disable_progress,
)
api_id = self.zappa.get_api_id(self.lambda_name)
# Update binary support
if self.binary_support:
self.zappa.add_binary_support(api_id=api_id, cors=self.cors)
else:
self.zappa.remove_binary_support(api_id=api_id, cors=self.cors)
if self.stage_config.get("payload_compression", True):
self.zappa.add_api_compression(
api_id=api_id,
min_compression_size=self.stage_config.get(
"payload_minimum_compression_size", 0
),
)
else:
self.zappa.remove_api_compression(api_id=api_id)
# It looks a bit like we might actually be using this just to get the URL,
# but we're also updating a few of the APIGW settings.
endpoint_url = self.deploy_api_gateway(api_id)
if self.stage_config.get("domain", None):
endpoint_url = self.stage_config.get("domain")
else:
endpoint_url = None
self.schedule()
# Update any cognito pool with the lambda arn
# do this after schedule as schedule clears the lambda policy and we need to add one
self.update_cognito_triggers()
self.callback("post")
if endpoint_url and "https://" not in endpoint_url:
endpoint_url = "https://" + endpoint_url
if self.base_path:
endpoint_url += "/" + self.base_path
deployed_string = (
"Your updated Zappa deployment is "
+ click.style("live", fg="green", bold=True)
+ "!"
)
if self.use_apigateway:
deployed_string = (
deployed_string
+ ": "
+ click.style("{}".format(endpoint_url), bold=True)
)
api_url = None
if endpoint_url and "amazonaws.com" not in endpoint_url:
api_url = self.zappa.get_api_url(self.lambda_name, self.api_stage)
if endpoint_url != api_url:
deployed_string = deployed_string + " (" + api_url + ")"
if self.stage_config.get("touch", True):
self.zappa.wait_until_lambda_function_is_updated(
function_name=self.lambda_name
)
if api_url:
self.touch_endpoint(api_url)
elif endpoint_url:
self.touch_endpoint(endpoint_url)
click.echo(deployed_string)
def rollback(self, revision):
"""
Rollsback the currently deploy lambda code to a previous revision.
"""
print("Rolling back..")
self.zappa.rollback_lambda_function_version(
self.lambda_name, versions_back=revision
)
print("Done!")
def tail(
self,
since,
filter_pattern,
limit=10000,
keep_open=True,
colorize=True,
http=False,
non_http=False,
force_colorize=False,
):
"""
Tail this function's logs.
if keep_open, do so repeatedly, printing any new logs
"""
try:
since_stamp = string_to_timestamp(since)
last_since = since_stamp
while True:
new_logs = self.zappa.fetch_logs(
self.lambda_name,
start_time=since_stamp,
limit=limit,
filter_pattern=filter_pattern,
)
new_logs = [e for e in new_logs if e["timestamp"] > last_since]
self.print_logs(new_logs, colorize, http, non_http, force_colorize)
if not keep_open:
break
if new_logs:
last_since = new_logs[-1]["timestamp"]
time.sleep(1)
except KeyboardInterrupt: # pragma: no cover
# Die gracefully
try:
sys.exit(0)
except SystemExit:
os._exit(130)
def undeploy(self, no_confirm=False, remove_logs=False):
"""
Tear down an existing deployment.
"""
if not no_confirm: # pragma: no cover
confirm = input("Are you sure you want to undeploy? [y/n] ")
if confirm != "y":
return
if self.use_alb:
self.zappa.undeploy_lambda_alb(self.lambda_name)
if self.use_apigateway:
if remove_logs:
self.zappa.remove_api_gateway_logs(self.lambda_name)
domain_name = self.stage_config.get("domain", None)
base_path = self.stage_config.get("base_path", None)
# Only remove the api key when not specified
if self.api_key_required and self.api_key is None:
api_id = self.zappa.get_api_id(self.lambda_name)
self.zappa.remove_api_key(api_id, self.api_stage)
gateway_id = self.zappa.undeploy_api_gateway(
self.lambda_name, domain_name=domain_name, base_path=base_path
)
self.unschedule() # removes event triggers, including warm up event.
self.zappa.delete_lambda_function(self.lambda_name)
if remove_logs:
self.zappa.remove_lambda_function_logs(self.lambda_name)
click.echo(click.style("Done", fg="green", bold=True) + "!")
def update_cognito_triggers(self):
"""
Update any cognito triggers
"""
if self.cognito:
user_pool = self.cognito.get("user_pool")
triggers = self.cognito.get("triggers", [])
lambda_configs = set()
for trigger in triggers:
lambda_configs.add(trigger["source"].split("_")[0])
self.zappa.update_cognito(
self.lambda_name, user_pool, lambda_configs, self.lambda_arn
)
def schedule(self):
"""
Given a a list of functions and a schedule to execute them,
setup up regular execution.
"""
events = self.stage_config.get("events", [])
if events:
if not isinstance(events, list): # pragma: no cover
print("Events must be supplied as a list.")
return
for event in events:
self.collision_warning(event.get("function"))
if self.stage_config.get("keep_warm", True):
if not events:
events = []
keep_warm_rate = self.stage_config.get(
"keep_warm_expression", "rate(4 minutes)"
)
events.append(
{
"name": "zappa-keep-warm",
"function": "handler.keep_warm_callback",
"expression": keep_warm_rate,
"description": "Zappa Keep Warm - {}".format(self.lambda_name),
}
)
if events:
try:
function_response = self.zappa.lambda_client.get_function(
FunctionName=self.lambda_name
)
except botocore.exceptions.ClientError as e: # pragma: no cover
click.echo(
click.style("Function does not exist", fg="yellow")
+ ", please "
+ click.style("deploy", bold=True)
+ "first. Ex:"
+ click.style("zappa deploy {}.".format(self.api_stage), bold=True)
)
sys.exit(-1)
print("Scheduling..")
self.zappa.schedule_events(
lambda_arn=function_response["Configuration"]["FunctionArn"],
lambda_name=self.lambda_name,
events=events,
)
# Add async tasks SNS
if self.stage_config.get(
"async_source", None
) == "sns" and self.stage_config.get("async_resources", True):
self.lambda_arn = self.zappa.get_lambda_function(
function_name=self.lambda_name
)
topic_arn = self.zappa.create_async_sns_topic(
lambda_name=self.lambda_name, lambda_arn=self.lambda_arn
)
click.echo("SNS Topic created: %s" % topic_arn)
# Add async tasks DynamoDB
table_name = self.stage_config.get("async_response_table", False)
read_capacity = self.stage_config.get("async_response_table_read_capacity", 1)
write_capacity = self.stage_config.get("async_response_table_write_capacity", 1)
if table_name and self.stage_config.get("async_resources", True):
created, response_table = self.zappa.create_async_dynamodb_table(
table_name, read_capacity, write_capacity
)
if created:
click.echo("DynamoDB table created: %s" % table_name)
else:
click.echo("DynamoDB table exists: %s" % table_name)
provisioned_throughput = response_table["Table"][
"ProvisionedThroughput"
]
if (
provisioned_throughput["ReadCapacityUnits"] != read_capacity
or provisioned_throughput["WriteCapacityUnits"] != write_capacity
):
click.echo(
click.style(
"\nWarning! Existing DynamoDB table ({}) does not match configured capacity.\n".format(
table_name
),
fg="red",
)
)
def unschedule(self):
"""
Given a a list of scheduled functions,
tear down their regular execution.
"""
# Run even if events are not defined to remove previously existing ones (thus default to []).
events = self.stage_config.get("events", [])
if not isinstance(events, list): # pragma: no cover
print("Events must be supplied as a list.")
return
function_arn = None
try:
function_response = self.zappa.lambda_client.get_function(
FunctionName=self.lambda_name
)
function_arn = function_response["Configuration"]["FunctionArn"]
except botocore.exceptions.ClientError as e: # pragma: no cover
raise ClickException(
"Function does not exist, you should deploy first. Ex: zappa deploy {}. "
"Proceeding to unschedule CloudWatch based events.".format(
self.api_stage
)
)
print("Unscheduling..")
self.zappa.unschedule_events(
lambda_name=self.lambda_name,
lambda_arn=function_arn,
events=events,
)
# Remove async task SNS
if self.stage_config.get(
"async_source", None
) == "sns" and self.stage_config.get("async_resources", True):
removed_arns = self.zappa.remove_async_sns_topic(self.lambda_name)
click.echo("SNS Topic removed: %s" % ", ".join(removed_arns))
def invoke(self, function_name, raw_python=False, command=None, no_color=False):
"""
Invoke a remote function.
"""
# There are three likely scenarios for 'command' here:
# command, which is a modular function path
# raw_command, which is a string of python to execute directly
# manage, which is a Django-specific management command invocation
key = command if command is not None else "command"
if raw_python:
command = {"raw_command": function_name}
else:
command = {key: function_name}
# Can't use hjson
import json as json
response = self.zappa.invoke_lambda_function(
self.lambda_name,
json.dumps(command),
invocation_type="RequestResponse",
)
print(self.format_lambda_response(response, not no_color))
# For a successful request FunctionError is not in response.
# https://github.com/Miserlou/Zappa/pull/1254/
if "FunctionError" in response:
raise ClickException(
"{} error occurred while invoking command.".format(
response["FunctionError"]
)
)
def format_lambda_response(self, response, colorize=True):
if "LogResult" in response:
logresult_bytes = base64.b64decode(response["LogResult"])
try:
decoded = logresult_bytes.decode()
except UnicodeDecodeError:
return logresult_bytes
else:
if colorize and sys.stdout.isatty():
formatted = self.format_invoke_command(decoded)
return self.colorize_invoke_command(formatted)
else:
return decoded
else:
return response
def format_invoke_command(self, string):
"""
Formats correctly the string output from the invoke() method,
replacing line breaks and tabs when necessary.
"""
string = string.replace("\\n", "\n")
formated_response = ""
for line in string.splitlines():
if line.startswith("REPORT"):
line = line.replace("\t", "\n")
if line.startswith("[DEBUG]"):
line = line.replace("\t", " ")
formated_response += line + "\n"
formated_response = formated_response.replace("\n\n", "\n")
return formated_response
def colorize_invoke_command(self, string):
"""
Apply various heuristics to return a colorized version the invoke
command string. If these fail, simply return the string in plaintext.
Inspired by colorize_log_entry().
"""
final_string = string
try:
# Line headers
try:
for token in ["START", "END", "REPORT", "[DEBUG]"]:
if token in final_string:
format_string = "[{}]"
# match whole words only
pattern = r"\b{}\b"
if token == "[DEBUG]":
format_string = "{}"
pattern = re.escape(token)
repl = click.style(
format_string.format(token), bold=True, fg="cyan"
)
final_string = re.sub(pattern.format(token), repl, final_string)
except Exception: # pragma: no cover
pass
# Green bold Tokens
try:
for token in [
"Zappa Event:",
"RequestId:",
"Version:",
"Duration:",
"Billed",
"Memory Size:",
"Max Memory Used:",
]:
if token in final_string:
final_string = final_string.replace(
token, click.style(token, bold=True, fg="green")
)
except Exception: # pragma: no cover
pass
# UUIDs
for token in final_string.replace("\t", " ").split(" "):
try:
if token.count("-") == 4 and token.replace("-", "").isalnum():
final_string = final_string.replace(
token, click.style(token, fg="magenta")
)
except Exception: # pragma: no cover
pass
return final_string
except Exception:
return string
def status(self, return_json=False):
"""
Describe the status of the current deployment.
"""
def tabular_print(title, value):
"""
Convenience function for priting formatted table items.
"""
click.echo(
"%-*s%s" % (32, click.style("\t" + title, fg="green") + ":", str(value))
)
return
# Lambda Env Details
lambda_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if not lambda_versions:
raise ClickException(
click.style(
"No Lambda %s detected in %s - have you deployed yet?"
% (self.lambda_name, self.zappa.aws_region),
fg="red",
)
)
status_dict = collections.OrderedDict()
status_dict["Lambda Versions"] = len(lambda_versions)
function_response = self.zappa.lambda_client.get_function(
FunctionName=self.lambda_name
)
conf = function_response["Configuration"]
self.lambda_arn = conf["FunctionArn"]
status_dict["Lambda Name"] = self.lambda_name
status_dict["Lambda ARN"] = self.lambda_arn
status_dict["Lambda Role ARN"] = conf["Role"]
status_dict["Lambda Code Size"] = conf["CodeSize"]
status_dict["Lambda Version"] = conf["Version"]
status_dict["Lambda Last Modified"] = conf["LastModified"]
status_dict["Lambda Memory Size"] = conf["MemorySize"]
status_dict["Lambda Timeout"] = conf["Timeout"]
# Handler & Runtime won't be present for lambda Docker deployments
# https://github.com/Miserlou/Zappa/issues/2188
status_dict["Lambda Handler"] = conf.get("Handler", "")
status_dict["Lambda Runtime"] = conf.get("Runtime", "")
if "VpcConfig" in conf.keys():
status_dict["Lambda VPC ID"] = conf.get("VpcConfig", {}).get(
"VpcId", "Not assigned"
)
else:
status_dict["Lambda VPC ID"] = None
# Calculated statistics
try:
function_invocations = self.zappa.cloudwatch.get_metric_statistics(
Namespace="AWS/Lambda",
MetricName="Invocations",
StartTime=datetime.utcnow() - timedelta(days=1),
EndTime=datetime.utcnow(),
Period=1440,
Statistics=["Sum"],
Dimensions=[
{"Name": "FunctionName", "Value": "{}".format(self.lambda_name)}
],
)["Datapoints"][0]["Sum"]
except Exception as e:
function_invocations = 0
try:
function_errors = self.zappa.cloudwatch.get_metric_statistics(
Namespace="AWS/Lambda",
MetricName="Errors",
StartTime=datetime.utcnow() - timedelta(days=1),
EndTime=datetime.utcnow(),
Period=1440,
Statistics=["Sum"],
Dimensions=[
{"Name": "FunctionName", "Value": "{}".format(self.lambda_name)}
],
)["Datapoints"][0]["Sum"]
except Exception as e:
function_errors = 0
try:
error_rate = "{0:.2f}%".format(function_errors / function_invocations * 100)
except:
error_rate = "Error calculating"
status_dict["Invocations (24h)"] = int(function_invocations)
status_dict["Errors (24h)"] = int(function_errors)
status_dict["Error Rate (24h)"] = error_rate
# URLs
if self.use_apigateway:
api_url = self.zappa.get_api_url(self.lambda_name, self.api_stage)
status_dict["API Gateway URL"] = api_url
# Api Keys
api_id = self.zappa.get_api_id(self.lambda_name)
for api_key in self.zappa.get_api_keys(api_id, self.api_stage):
status_dict["API Gateway x-api-key"] = api_key
# There literally isn't a better way to do this.
# AWS provides no way to tie a APIGW domain name to its Lambda function.
domain_url = self.stage_config.get("domain", None)
base_path = self.stage_config.get("base_path", None)
if domain_url:
status_dict["Domain URL"] = "https://" + domain_url
if base_path:
status_dict["Domain URL"] += "/" + base_path
else:
status_dict["Domain URL"] = "None Supplied"
# Scheduled Events
event_rules = self.zappa.get_event_rules_for_lambda(lambda_arn=self.lambda_arn)
status_dict["Num. Event Rules"] = len(event_rules)
if len(event_rules) > 0:
status_dict["Events"] = []
for rule in event_rules:
event_dict = {}
rule_name = rule["Name"]
event_dict["Event Rule Name"] = rule_name
event_dict["Event Rule Schedule"] = rule.get("ScheduleExpression", None)
event_dict["Event Rule State"] = rule.get("State", None).title()
event_dict["Event Rule ARN"] = rule.get("Arn", None)
status_dict["Events"].append(event_dict)
if return_json:
# Putting the status in machine readable format
# https://github.com/Miserlou/Zappa/issues/407
print(json.dumpsJSON(status_dict))
else:
click.echo("Status for " + click.style(self.lambda_name, bold=True) + ": ")
for k, v in status_dict.items():
if k == "Events":
# Events are a list of dicts
for event in v:
for item_k, item_v in event.items():
tabular_print(item_k, item_v)
else:
tabular_print(k, v)
# TODO: S3/SQS/etc. type events?
return True
def check_stage_name(self, stage_name):
"""
Make sure the stage name matches the AWS-allowed pattern
(calls to apigateway_client.create_deployment, will fail with error
message "ClientError: An error occurred (BadRequestException) when
calling the CreateDeployment operation: Stage name only allows
a-zA-Z0-9_" if the pattern does not match)
"""
if not self.use_apigateway:
return True
if self.stage_name_env_pattern.match(stage_name):
return True
raise ValueError(
"API stage names must match a-zA-Z0-9_ ; '{0!s}' does not.".format(
stage_name
)
)
def check_environment(self, environment):
"""
Make sure the environment contains only strings
(since putenv needs a string)
"""
non_strings = []
for (k, v) in environment.items():
if not isinstance(v, basestring):
non_strings.append(k)
if non_strings:
raise ValueError(
"The following environment variables are not strings: {}".format(
", ".join(non_strings)
)
)
else:
return True
def init(self, settings_file="zappa_settings.json"):
"""
Initialize a new Zappa project by creating a new zappa_settings.json in a guided process.
This should probably be broken up into few separate componants once it's stable.
Testing these inputs requires monkeypatching with mock, which isn't pretty.
"""
# Make sure we're in a venv.
self.check_venv()
# Ensure that we don't already have a zappa_settings file.
if os.path.isfile(settings_file):
raise ClickException(
"This project already has a "
+ click.style("{0!s} file".format(settings_file), fg="red", bold=True)
+ "!"
)
# Explain system.
click.echo(
click.style(
"""\n███████╗ █████╗ ██████╗ ██████╗ █████╗
╚══███╔╝██╔══██╗██╔══██╗██╔══██╗██╔══██╗
███╔╝ ███████║██████╔╝██████╔╝███████║
███╔╝ ██╔══██║██╔═══╝ ██╔═══╝ ██╔══██║
███████╗██║ ██║██║ ██║ ██║ ██║
╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ ╚═╝\n""",
fg="green",
bold=True,
)
)
click.echo(
click.style("Welcome to ", bold=True)
+ click.style("Zappa", fg="green", bold=True)
+ click.style("!\n", bold=True)
)
click.echo(
click.style("Zappa", bold=True)
+ " is a system for running server-less Python web applications"
" on AWS Lambda and AWS API Gateway."
)
click.echo(
"This `init` command will help you create and configure your new Zappa deployment."
)
click.echo("Let's get started!\n")
# Create Env
while True:
click.echo(
"Your Zappa configuration can support multiple production stages, like '"
+ click.style("dev", bold=True)
+ "', '"
+ click.style("staging", bold=True)
+ "', and '"
+ click.style("production", bold=True)
+ "'."
)
env = (
input("What do you want to call this environment (default 'dev'): ")
or "dev"
)
try:
self.check_stage_name(env)
break
except ValueError:
click.echo(click.style("Stage names must match a-zA-Z0-9_", fg="red"))
# Detect AWS profiles and regions
# If anyone knows a more straightforward way to easily detect and parse AWS profiles I'm happy to change this, feels like a hack
session = botocore.session.Session()
config = session.full_config
profiles = config.get("profiles", {})
profile_names = list(profiles.keys())
click.echo(
"\nAWS Lambda and API Gateway are only available in certain regions. "
"Let's check to make sure you have a profile set up in one that will work."
)
if not profile_names:
profile_name, profile = None, None
click.echo(
"We couldn't find an AWS profile to use. Before using Zappa, you'll need to set one up. See here for more info: {}".format(
click.style(BOTO3_CONFIG_DOCS_URL, fg="blue", underline=True)
)
)
elif len(profile_names) == 1:
profile_name = profile_names[0]
profile = profiles[profile_name]
click.echo(
"Okay, using profile {}!".format(click.style(profile_name, bold=True))
)
else:
if "default" in profile_names:
default_profile = [p for p in profile_names if p == "default"][0]
else:
default_profile = profile_names[0]
while True:
profile_name = (
input(
"We found the following profiles: {}, and {}. "
"Which would you like us to use? (default '{}'): ".format(
", ".join(profile_names[:-1]),
profile_names[-1],
default_profile,
)
)
or default_profile
)
if profile_name in profiles:
profile = profiles[profile_name]
break
else:
click.echo("Please enter a valid name for your AWS profile.")
profile_region = profile.get("region") if profile else None
# Create Bucket
click.echo(
"\nYour Zappa deployments will need to be uploaded to a "
+ click.style("private S3 bucket", bold=True)
+ "."
)
click.echo("If you don't have a bucket yet, we'll create one for you too.")
default_bucket = "zappa-" + "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(9)
)
while True:
bucket = (
input(
"What do you want to call your bucket? (default '%s'): "
% default_bucket
)
or default_bucket
)
if is_valid_bucket_name(bucket):
break
click.echo(click.style("Invalid bucket name!", bold=True))
click.echo("S3 buckets must be named according to the following rules:")
click.echo(
"""* Bucket names must be unique across all existing bucket names in Amazon S3.
* Bucket names must comply with DNS naming conventions.
* Bucket names must be at least 3 and no more than 63 characters long.
* Bucket names must not contain uppercase characters or underscores.
* Bucket names must start with a lowercase letter or number.
* Bucket names must be a series of one or more labels. Adjacent labels are separated
by a single period (.). Bucket names can contain lowercase letters, numbers, and
hyphens. Each label must start and end with a lowercase letter or a number.
* Bucket names must not be formatted as an IP address (for example, 192.168.5.4).
* When you use virtual hosted–style buckets with Secure Sockets Layer (SSL), the SSL
wildcard certificate only matches buckets that don't contain periods. To work around
this, use HTTP or write your own certificate verification logic. We recommend that
you do not use periods (".") in bucket names when using virtual hosted–style buckets.
"""
)
# Detect Django/Flask
try: # pragma: no cover
import django
has_django = True
except ImportError as e:
has_django = False
try: # pragma: no cover
import flask
has_flask = True
except ImportError as e:
has_flask = False
print("")
# App-specific
if has_django: # pragma: no cover
click.echo(
"It looks like this is a "
+ click.style("Django", bold=True)
+ " application!"
)
click.echo(
"What is the "
+ click.style("module path", bold=True)
+ " to your projects's Django settings?"
)
django_settings = None
matches = detect_django_settings()
while django_settings in [None, ""]:
if matches:
click.echo(
"We discovered: "
+ click.style(
", ".join("{}".format(i) for v, i in enumerate(matches)),
bold=True,
)
)
django_settings = (
input(
"Where are your project's settings? (default '%s'): "
% matches[0]
)
or matches[0]
)
else:
click.echo(
"(This will likely be something like 'your_project.settings')"
)
django_settings = input("Where are your project's settings?: ")
django_settings = django_settings.replace("'", "")
django_settings = django_settings.replace('"', "")
else:
matches = None
if has_flask:
click.echo(
"It looks like this is a "
+ click.style("Flask", bold=True)
+ " application."
)
matches = detect_flask_apps()
click.echo(
"What's the "
+ click.style("modular path", bold=True)
+ " to your app's function?"
)
click.echo("This will likely be something like 'your_module.app'.")
app_function = None
while app_function in [None, ""]:
if matches:
click.echo(
"We discovered: "
+ click.style(
", ".join("{}".format(i) for v, i in enumerate(matches)),
bold=True,
)
)
app_function = (
input(
"Where is your app's function? (default '%s'): "
% matches[0]
)
or matches[0]
)
else:
app_function = input("Where is your app's function?: ")
app_function = app_function.replace("'", "")
app_function = app_function.replace('"', "")
# TODO: Create VPC?
# Memory size? Time limit?
# Domain? LE keys? Region?
# 'Advanced Settings' mode?
# Globalize
click.echo(
"\nYou can optionally deploy to "
+ click.style("all available regions", bold=True)
+ " in order to provide fast global service."
)
click.echo(
"If you are using Zappa for the first time, you probably don't want to do this!"
)
global_deployment = False
while True:
global_type = input(
"Would you like to deploy this application "
+ click.style("globally", bold=True)
+ "? (default 'n') [y/n/(p)rimary]: "
)
if not global_type:
break
if global_type.lower() in ["y", "yes", "p", "primary"]:
global_deployment = True
break
if global_type.lower() in ["n", "no"]:
global_deployment = False
break
# The given environment name
zappa_settings = {
env: {
"profile_name": profile_name,
"s3_bucket": bucket,
"runtime": get_venv_from_python_version(),
"project_name": self.get_project_name(),
}
}
if profile_region:
zappa_settings[env]["aws_region"] = profile_region
if has_django:
zappa_settings[env]["django_settings"] = django_settings
else:
zappa_settings[env]["app_function"] = app_function
# Global Region Deployment
if global_deployment:
additional_regions = [r for r in API_GATEWAY_REGIONS if r != profile_region]
# Create additional stages
if global_type.lower() in ["p", "primary"]:
additional_regions = [r for r in additional_regions if "-1" in r]
for region in additional_regions:
env_name = env + "_" + region.replace("-", "_")
g_env = {env_name: {"extends": env, "aws_region": region}}
zappa_settings.update(g_env)
import json as json # hjson is fine for loading, not fine for writing.
zappa_settings_json = json.dumps(zappa_settings, sort_keys=True, indent=4)
click.echo(
"\nOkay, here's your "
+ click.style("zappa_settings.json", bold=True)
+ ":\n"
)
click.echo(click.style(zappa_settings_json, fg="yellow", bold=False))
confirm = (
input(
"\nDoes this look "
+ click.style("okay", bold=True, fg="green")
+ "? (default 'y') [y/n]: "
)
or "yes"
)
if confirm[0] not in ["y", "Y", "yes", "YES"]:
click.echo(
""
+ click.style("Sorry", bold=True, fg="red")
+ " to hear that! Please init again."
)
return
# Write
with open("zappa_settings.json", "w") as zappa_settings_file:
zappa_settings_file.write(zappa_settings_json)
if global_deployment:
click.echo(
"\n"
+ click.style("Done", bold=True)
+ "! You can also "
+ click.style("deploy all", bold=True)
+ " by executing:\n"
)
click.echo(click.style("\t$ zappa deploy --all", bold=True))
click.echo(
"\nAfter that, you can "
+ click.style("update", bold=True)
+ " your application code with:\n"
)
click.echo(click.style("\t$ zappa update --all", bold=True))
else:
click.echo(
"\n"
+ click.style("Done", bold=True)
+ "! Now you can "
+ click.style("deploy", bold=True)
+ " your Zappa application by executing:\n"
)
click.echo(click.style("\t$ zappa deploy %s" % env, bold=True))
click.echo(
"\nAfter that, you can "
+ click.style("update", bold=True)
+ " your application code with:\n"
)
click.echo(click.style("\t$ zappa update %s" % env, bold=True))
click.echo(
"\nTo learn more, check out our project page on "
+ click.style("GitHub", bold=True)
+ " here: "
+ click.style("https://github.com/Zappa/Zappa", fg="cyan", bold=True)
)
click.echo(
"and stop by our "
+ click.style("Slack", bold=True)
+ " channel here: "
+ click.style("https://zappateam.slack.com", fg="cyan", bold=True)
)
click.echo("\nEnjoy!,")
click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!")
return
def certify(self, no_confirm=True, manual=False):
"""
Register or update a domain certificate for this env.
"""
if not self.domain:
raise ClickException(
"Can't certify a domain without "
+ click.style("domain", fg="red", bold=True)
+ " configured!"
)
if not no_confirm: # pragma: no cover
confirm = input("Are you sure you want to certify? [y/n] ")
if confirm != "y":
return
# Make sure this isn't already deployed.
deployed_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if len(deployed_versions) == 0:
raise ClickException(
"This application "
+ click.style("isn't deployed yet", fg="red")
+ " - did you mean to call "
+ click.style("deploy", bold=True)
+ "?"
)
account_key_location = self.stage_config.get("lets_encrypt_key", None)
cert_location = self.stage_config.get("certificate", None)
cert_key_location = self.stage_config.get("certificate_key", None)
cert_chain_location = self.stage_config.get("certificate_chain", None)
cert_arn = self.stage_config.get("certificate_arn", None)
base_path = self.stage_config.get("base_path", None)
# These are sensitive
certificate_body = None
certificate_private_key = None
certificate_chain = None
# Prepare for custom Let's Encrypt
if not cert_location and not cert_arn:
if not account_key_location:
raise ClickException(
"Can't certify a domain without "
+ click.style("lets_encrypt_key", fg="red", bold=True)
+ " or "
+ click.style("certificate", fg="red", bold=True)
+ " or "
+ click.style("certificate_arn", fg="red", bold=True)
+ " configured!"
)
# Get install account_key to /tmp/account_key.pem
from .letsencrypt import gettempdir
if account_key_location.startswith("s3://"):
bucket, key_name = parse_s3_url(account_key_location)
self.zappa.s3_client.download_file(
bucket, key_name, os.path.join(gettempdir(), "account.key")
)
else:
from shutil import copyfile
copyfile(
account_key_location, os.path.join(gettempdir(), "account.key")
)
# Prepare for Custom SSL
elif not account_key_location and not cert_arn:
if not cert_location or not cert_key_location or not cert_chain_location:
raise ClickException(
"Can't certify a domain without "
+ click.style(
"certificate, certificate_key and certificate_chain",
fg="red",
bold=True,
)
+ " configured!"
)
# Read the supplied certificates.
with open(cert_location) as f:
certificate_body = f.read()
with open(cert_key_location) as f:
certificate_private_key = f.read()
with open(cert_chain_location) as f:
certificate_chain = f.read()
click.echo(
"Certifying domain "
+ click.style(self.domain, fg="green", bold=True)
+ ".."
)
# Get cert and update domain.
# Let's Encrypt
if not cert_location and not cert_arn:
from .letsencrypt import get_cert_and_update_domain
cert_success = get_cert_and_update_domain(
self.zappa, self.lambda_name, self.api_stage, self.domain, manual
)
# Custom SSL / ACM
else:
route53 = self.stage_config.get("route53_enabled", True)
if not self.zappa.get_domain_name(self.domain, route53=route53):
dns_name = self.zappa.create_domain_name(
domain_name=self.domain,
certificate_name=self.domain + "-Zappa-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=cert_arn,
lambda_name=self.lambda_name,
stage=self.api_stage,
base_path=base_path,
)
if route53:
self.zappa.update_route53_records(self.domain, dns_name)
print(
"Created a new domain name with supplied certificate. Please note that it can take up to 40 minutes for this domain to be "
"created and propagated through AWS, but it requires no further work on your part."
)
else:
self.zappa.update_domain_name(
domain_name=self.domain,
certificate_name=self.domain + "-Zappa-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=cert_arn,
lambda_name=self.lambda_name,
stage=self.api_stage,
route53=route53,
base_path=base_path,
)
cert_success = True
if cert_success:
click.echo(
"Certificate " + click.style("updated", fg="green", bold=True) + "!"
)
else:
click.echo(
click.style("Failed", fg="red", bold=True)
+ " to generate or install certificate! :("
)
click.echo("\n==============\n")
shamelessly_promote()
##
# Shell
##
def shell(self):
"""
Spawn a debug shell.
"""
click.echo(
click.style("NOTICE!", fg="yellow", bold=True)
+ " This is a "
+ click.style("local", fg="green", bold=True)
+ " shell, inside a "
+ click.style("Zappa", bold=True)
+ " object!"
)
self.zappa.shell()
return
##
# Utility
##
def callback(self, position):
"""
Allows the execution of custom code between creation of the zip file and deployment to AWS.
:return: None
"""
callbacks = self.stage_config.get("callbacks", {})
callback = callbacks.get(position)
if callback:
(mod_path, cb_func_name) = callback.rsplit(".", 1)
try: # Prefer callback in working directory
if mod_path.count(".") >= 1: # Callback function is nested in a folder
(mod_folder_path, mod_name) = mod_path.rsplit(".", 1)
mod_folder_path_fragments = mod_folder_path.split(".")
working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments)
else:
mod_name = mod_path
working_dir = os.getcwd()
working_dir_importer = pkgutil.get_importer(working_dir)
module_ = working_dir_importer.find_module(mod_name).load_module(
mod_name
)
except (ImportError, AttributeError):
try: # Callback func might be in virtualenv
module_ = importlib.import_module(mod_path)
except ImportError: # pragma: no cover
raise ClickException(
click.style("Failed ", fg="red")
+ "to "
+ click.style(
"import {position} callback ".format(position=position),
bold=True,
)
+ 'module: "{mod_path}"'.format(
mod_path=click.style(mod_path, bold=True)
)
)
if not hasattr(module_, cb_func_name): # pragma: no cover
raise ClickException(
click.style("Failed ", fg="red")
+ "to "
+ click.style(
"find {position} callback ".format(position=position), bold=True
)
+ 'function: "{cb_func_name}" '.format(
cb_func_name=click.style(cb_func_name, bold=True)
)
+ 'in module "{mod_path}"'.format(mod_path=mod_path)
)
cb_func = getattr(module_, cb_func_name)
cb_func(self) # Call the function passing self
def check_for_update(self):
"""
Print a warning if there's a new Zappa version available.
"""
try:
version = pkg_resources.require("zappa")[0].version
updateable = check_new_version_available(version)
if updateable:
click.echo(
click.style("Important!", fg="yellow", bold=True)
+ " A new version of "
+ click.style("Zappa", bold=True)
+ " is available!"
)
click.echo(
"Upgrade with: "
+ click.style("pip install zappa --upgrade", bold=True)
)
click.echo(
"Visit the project page on GitHub to see the latest changes: "
+ click.style("https://github.com/Zappa/Zappa", bold=True)
)
except Exception as e: # pragma: no cover
print(e)
return
def load_settings(self, settings_file=None, session=None):
"""
Load the local zappa_settings file.
An existing boto session can be supplied, though this is likely for testing purposes.
Returns the loaded Zappa object.
"""
# Ensure we're passed a valid settings file.
if not settings_file:
settings_file = self.get_json_or_yaml_settings()
if not os.path.isfile(settings_file):
raise ClickException("Please configure your zappa_settings file.")
# Load up file
self.load_settings_file(settings_file)
# Make sure that this stage is our settings
if self.api_stage not in self.zappa_settings.keys():
raise ClickException(
"Please define stage '{0!s}' in your Zappa settings.".format(
self.api_stage
)
)
# We need a working title for this project. Use one if supplied, else cwd dirname.
if "project_name" in self.stage_config: # pragma: no cover
# If the name is invalid, this will throw an exception with message up stack
self.project_name = validate_name(self.stage_config["project_name"])
else:
self.project_name = self.get_project_name()
# The name of the actual AWS Lambda function, ex, 'helloworld-dev'
# Assume that we already have have validated the name beforehand.
# Related: https://github.com/Miserlou/Zappa/pull/664
# https://github.com/Miserlou/Zappa/issues/678
# And various others from Slack.
self.lambda_name = slugify.slugify(self.project_name + "-" + self.api_stage)
# Load stage-specific settings
self.s3_bucket_name = self.stage_config.get(
"s3_bucket",
"zappa-"
+ "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(9)
),
)
self.vpc_config = self.stage_config.get("vpc_config", {})
self.memory_size = self.stage_config.get("memory_size", 512)
self.app_function = self.stage_config.get("app_function", None)
self.exception_handler = self.stage_config.get("exception_handler", None)
self.aws_region = self.stage_config.get("aws_region", None)
self.debug = self.stage_config.get("debug", True)
self.prebuild_script = self.stage_config.get("prebuild_script", None)
self.profile_name = self.stage_config.get("profile_name", None)
self.log_level = self.stage_config.get("log_level", "DEBUG")
self.domain = self.stage_config.get("domain", None)
self.base_path = self.stage_config.get("base_path", None)
self.timeout_seconds = self.stage_config.get("timeout_seconds", 30)
dead_letter_arn = self.stage_config.get("dead_letter_arn", "")
self.dead_letter_config = (
{"TargetArn": dead_letter_arn} if dead_letter_arn else {}
)
self.cognito = self.stage_config.get("cognito", None)
self.num_retained_versions = self.stage_config.get(
"num_retained_versions", None
)
# Check for valid values of num_retained_versions
if (
self.num_retained_versions is not None
and type(self.num_retained_versions) is not int
):
raise ClickException(
"Please supply either an integer or null for num_retained_versions in the zappa_settings.json. Found %s"
% type(self.num_retained_versions)
)
elif type(self.num_retained_versions) is int and self.num_retained_versions < 1:
raise ClickException(
"The value for num_retained_versions in the zappa_settings.json should be greater than 0."
)
# Provide legacy support for `use_apigateway`, now `apigateway_enabled`.
# https://github.com/Miserlou/Zappa/issues/490
# https://github.com/Miserlou/Zappa/issues/493
self.use_apigateway = self.stage_config.get("use_apigateway", True)
if self.use_apigateway:
self.use_apigateway = self.stage_config.get("apigateway_enabled", True)
self.apigateway_description = self.stage_config.get(
"apigateway_description", None
)
self.lambda_handler = self.stage_config.get(
"lambda_handler", "handler.lambda_handler"
)
# DEPRECATED. https://github.com/Miserlou/Zappa/issues/456
self.remote_env_bucket = self.stage_config.get("remote_env_bucket", None)
self.remote_env_file = self.stage_config.get("remote_env_file", None)
self.remote_env = self.stage_config.get("remote_env", None)
self.settings_file = self.stage_config.get("settings_file", None)
self.django_settings = self.stage_config.get("django_settings", None)
self.manage_roles = self.stage_config.get("manage_roles", True)
self.binary_support = self.stage_config.get("binary_support", True)
self.api_key_required = self.stage_config.get("api_key_required", False)
self.api_key = self.stage_config.get("api_key")
self.endpoint_configuration = self.stage_config.get(
"endpoint_configuration", None
)
self.iam_authorization = self.stage_config.get("iam_authorization", False)
self.cors = self.stage_config.get("cors", False)
self.lambda_description = self.stage_config.get(
"lambda_description", "Zappa Deployment"
)
self.lambda_concurrency = self.stage_config.get("lambda_concurrency", None)
self.environment_variables = self.stage_config.get("environment_variables", {})
self.aws_environment_variables = self.stage_config.get(
"aws_environment_variables", {}
)
self.check_environment(self.environment_variables)
self.authorizer = self.stage_config.get("authorizer", {})
self.runtime = self.stage_config.get(
"runtime", get_runtime_from_python_version()
)
self.aws_kms_key_arn = self.stage_config.get("aws_kms_key_arn", "")
self.context_header_mappings = self.stage_config.get(
"context_header_mappings", {}
)
self.xray_tracing = self.stage_config.get("xray_tracing", False)
self.desired_role_arn = self.stage_config.get("role_arn")
self.layers = self.stage_config.get("layers", None)
# Load ALB-related settings
self.use_alb = self.stage_config.get("alb_enabled", False)
self.alb_vpc_config = self.stage_config.get("alb_vpc_config", {})
# Additional tags
self.tags = self.stage_config.get("tags", {})
desired_role_name = self.lambda_name + "-ZappaLambdaExecutionRole"
self.zappa = Zappa(
boto_session=session,
profile_name=self.profile_name,
aws_region=self.aws_region,
load_credentials=self.load_credentials,
desired_role_name=desired_role_name,
desired_role_arn=self.desired_role_arn,
runtime=self.runtime,
tags=self.tags,
endpoint_urls=self.stage_config.get("aws_endpoint_urls", {}),
xray_tracing=self.xray_tracing,
)
for setting in CUSTOM_SETTINGS:
if setting in self.stage_config:
setting_val = self.stage_config[setting]
# Read the policy file contents.
if setting.endswith("policy"):
with open(setting_val, "r") as f:
setting_val = f.read()
setattr(self.zappa, setting, setting_val)
if self.app_function:
self.collision_warning(self.app_function)
if self.app_function[-3:] == ".py":
click.echo(
click.style("Warning!", fg="red", bold=True)
+ " Your app_function is pointing to a "
+ click.style("file and not a function", bold=True)
+ "! It should probably be something like 'my_file.app', not 'my_file.py'!"
)
return self.zappa
def get_json_or_yaml_settings(self, settings_name="zappa_settings"):
"""
Return zappa_settings path as JSON or YAML (or TOML), as appropriate.
"""
zs_json = settings_name + ".json"
zs_yml = settings_name + ".yml"
zs_yaml = settings_name + ".yaml"
zs_toml = settings_name + ".toml"
# Must have at least one
if (
not os.path.isfile(zs_json)
and not os.path.isfile(zs_yml)
and not os.path.isfile(zs_yaml)
and not os.path.isfile(zs_toml)
):
raise ClickException(
"Please configure a zappa_settings file or call `zappa init`."
)
# Prefer JSON
if os.path.isfile(zs_json):
settings_file = zs_json
elif os.path.isfile(zs_toml):
settings_file = zs_toml
elif os.path.isfile(zs_yml):
settings_file = zs_yml
else:
settings_file = zs_yaml
return settings_file
def load_settings_file(self, settings_file=None):
"""
Load our settings file.
"""
if not settings_file:
settings_file = self.get_json_or_yaml_settings()
if not os.path.isfile(settings_file):
raise ClickException(
"Please configure your zappa_settings file or call `zappa init`."
)
path, ext = os.path.splitext(settings_file)
if ext == ".yml" or ext == ".yaml":
with open(settings_file) as yaml_file:
try:
self.zappa_settings = yaml.safe_load(yaml_file)
except ValueError: # pragma: no cover
raise ValueError(
"Unable to load the Zappa settings YAML. It may be malformed."
)
elif ext == ".toml":
with open(settings_file) as toml_file:
try:
self.zappa_settings = toml.load(toml_file)
except ValueError: # pragma: no cover
raise ValueError(
"Unable to load the Zappa settings TOML. It may be malformed."
)
else:
with open(settings_file) as json_file:
try:
self.zappa_settings = json.load(json_file)
except ValueError: # pragma: no cover
raise ValueError(
"Unable to load the Zappa settings JSON. It may be malformed."
)
def create_package(self, output=None):
"""
Ensure that the package can be properly configured,
and then create it.
"""
# Create the Lambda zip package (includes project and virtualenvironment)
# Also define the path the handler file so it can be copied to the zip
# root for Lambda.
current_file = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
handler_file = (
os.sep.join(current_file.split(os.sep)[0:]) + os.sep + "handler.py"
)
# Create the zip file(s)
if self.stage_config.get("slim_handler", False):
# Create two zips. One with the application and the other with just the handler.
# https://github.com/Miserlou/Zappa/issues/510
self.zip_path = self.zappa.create_lambda_zip(
prefix=self.lambda_name,
use_precompiled_packages=self.stage_config.get(
"use_precompiled_packages", True
),
exclude=self.stage_config.get("exclude", []),
exclude_glob=self.stage_config.get("exclude_glob", []),
disable_progress=self.disable_progress,
archive_format="tarball",
)
# Make sure the normal venv is not included in the handler's zip
exclude = self.stage_config.get("exclude", [])
cur_venv = self.zappa.get_current_venv()
exclude.append(cur_venv.split("/")[-1])
self.handler_path = self.zappa.create_lambda_zip(
prefix="handler_{0!s}".format(self.lambda_name),
venv=self.zappa.create_handler_venv(),
handler_file=handler_file,
slim_handler=True,
exclude=exclude,
exclude_glob=self.stage_config.get("exclude_glob", []),
output=output,
disable_progress=self.disable_progress,
)
else:
# This could be python3.6 optimized.
exclude = self.stage_config.get(
"exclude", ["boto3", "dateutil", "botocore", "s3transfer", "concurrent"]
)
# Create a single zip that has the handler and application
self.zip_path = self.zappa.create_lambda_zip(
prefix=self.lambda_name,
handler_file=handler_file,
use_precompiled_packages=self.stage_config.get(
"use_precompiled_packages", True
),
exclude=exclude,
exclude_glob=self.stage_config.get("exclude_glob", []),
output=output,
disable_progress=self.disable_progress,
)
# Warn if this is too large for Lambda.
file_stats = os.stat(self.zip_path)
if file_stats.st_size > 52428800: # pragma: no cover
print(
"\n\nWarning: Application zip package is likely to be too large for AWS Lambda. "
'Try setting "slim_handler" to true in your Zappa settings file.\n\n'
)
# Throw custom settings into the zip that handles requests
if self.stage_config.get("slim_handler", False):
handler_zip = self.handler_path
else:
handler_zip = self.zip_path
with zipfile.ZipFile(handler_zip, "a") as lambda_zip:
settings_s = self.get_zappa_settings_string()
# Copy our Django app into root of our package.
# It doesn't work otherwise.
if self.django_settings:
base = __file__.rsplit(os.sep, 1)[0]
django_py = "".join(os.path.join(base, "ext", "django_zappa.py"))
lambda_zip.write(django_py, "django_zappa_app.py")
# Lambda requires a specific chmod
temp_settings = tempfile.NamedTemporaryFile(delete=False)
os.chmod(temp_settings.name, 0o644)
temp_settings.write(bytes(settings_s, "utf-8"))
temp_settings.close()
lambda_zip.write(temp_settings.name, "zappa_settings.py")
os.unlink(temp_settings.name)
def get_zappa_settings_string(self):
settings_s = "# Generated by Zappa\n"
if self.app_function:
if "." not in self.app_function: # pragma: no cover
raise ClickException(
"Your "
+ click.style("app_function", fg="red", bold=True)
+ " value is not a modular path."
+ " It needs to be in the format `"
+ click.style("your_module.your_app_object", bold=True)
+ "`."
)
app_module, app_function = self.app_function.rsplit(".", 1)
settings_s = (
settings_s
+ "APP_MODULE='{0!s}'\nAPP_FUNCTION='{1!s}'\n".format(
app_module, app_function
)
)
if self.exception_handler:
settings_s += "EXCEPTION_HANDLER='{0!s}'\n".format(self.exception_handler)
else:
settings_s += "EXCEPTION_HANDLER=None\n"
if self.debug:
settings_s = settings_s + "DEBUG=True\n"
else:
settings_s = settings_s + "DEBUG=False\n"
settings_s = settings_s + "LOG_LEVEL='{0!s}'\n".format((self.log_level))
if self.binary_support:
settings_s = settings_s + "BINARY_SUPPORT=True\n"
else:
settings_s = settings_s + "BINARY_SUPPORT=False\n"
head_map_dict = {}
head_map_dict.update(dict(self.context_header_mappings))
settings_s = settings_s + "CONTEXT_HEADER_MAPPINGS={0}\n".format(head_map_dict)
# If we're on a domain, we don't need to define the /<<env>> in
# the WSGI PATH
if self.domain:
settings_s = settings_s + "DOMAIN='{0!s}'\n".format((self.domain))
else:
settings_s = settings_s + "DOMAIN=None\n"
if self.base_path:
settings_s = settings_s + "BASE_PATH='{0!s}'\n".format((self.base_path))
else:
settings_s = settings_s + "BASE_PATH=None\n"
# Pass through remote config bucket and path
if self.remote_env:
settings_s = settings_s + "REMOTE_ENV='{0!s}'\n".format(self.remote_env)
# DEPRECATED. use remove_env instead
elif self.remote_env_bucket and self.remote_env_file:
settings_s = settings_s + "REMOTE_ENV='s3://{0!s}/{1!s}'\n".format(
self.remote_env_bucket, self.remote_env_file
)
# Local envs
env_dict = {}
if self.aws_region:
env_dict["AWS_REGION"] = self.aws_region
env_dict.update(dict(self.environment_variables))
# Environment variable keys must be ascii
# https://github.com/Miserlou/Zappa/issues/604
# https://github.com/Miserlou/Zappa/issues/998
try:
env_dict = dict(
(k.encode("ascii").decode("ascii"), v) for (k, v) in env_dict.items()
)
except Exception:
raise ValueError("Environment variable keys must be ascii.")
settings_s = settings_s + "ENVIRONMENT_VARIABLES={0}\n".format(env_dict)
# We can be environment-aware
settings_s = settings_s + "API_STAGE='{0!s}'\n".format((self.api_stage))
settings_s = settings_s + "PROJECT_NAME='{0!s}'\n".format((self.project_name))
if self.settings_file:
settings_s = settings_s + "SETTINGS_FILE='{0!s}'\n".format(
(self.settings_file)
)
else:
settings_s = settings_s + "SETTINGS_FILE=None\n"
if self.django_settings:
settings_s = settings_s + "DJANGO_SETTINGS='{0!s}'\n".format(
(self.django_settings)
)
else:
settings_s = settings_s + "DJANGO_SETTINGS=None\n"
# If slim handler, path to project zip
if self.stage_config.get("slim_handler", False):
settings_s += (
"ARCHIVE_PATH='s3://{0!s}/{1!s}_{2!s}_current_project.tar.gz'\n".format(
self.s3_bucket_name, self.api_stage, self.project_name
)
)
# since includes are for slim handler add the setting here by joining arbitrary list from zappa_settings file
# and tell the handler we are the slim_handler
# https://github.com/Miserlou/Zappa/issues/776
settings_s += "SLIM_HANDLER=True\n"
include = self.stage_config.get("include", [])
if len(include) >= 1:
settings_s += "INCLUDE=" + str(include) + "\n"
# AWS Events function mapping
event_mapping = {}
events = self.stage_config.get("events", [])
for event in events:
arn = event.get("event_source", {}).get("arn")
function = event.get("function")
if arn and function:
event_mapping[arn] = function
settings_s = settings_s + "AWS_EVENT_MAPPING={0!s}\n".format(event_mapping)
# Map Lext bot events
bot_events = self.stage_config.get("bot_events", [])
bot_events_mapping = {}
for bot_event in bot_events:
event_source = bot_event.get("event_source", {})
intent = event_source.get("intent")
invocation_source = event_source.get("invocation_source")
function = bot_event.get("function")
if intent and invocation_source and function:
bot_events_mapping[
str(intent) + ":" + str(invocation_source)
] = function
settings_s = settings_s + "AWS_BOT_EVENT_MAPPING={0!s}\n".format(
bot_events_mapping
)
# Map cognito triggers
cognito_trigger_mapping = {}
cognito_config = self.stage_config.get("cognito", {})
triggers = cognito_config.get("triggers", [])
for trigger in triggers:
source = trigger.get("source")
function = trigger.get("function")
if source and function:
cognito_trigger_mapping[source] = function
settings_s = settings_s + "COGNITO_TRIGGER_MAPPING={0!s}\n".format(
cognito_trigger_mapping
)
# Authorizer config
authorizer_function = self.authorizer.get("function", None)
if authorizer_function:
settings_s += "AUTHORIZER_FUNCTION='{0!s}'\n".format(authorizer_function)
# async response
async_response_table = self.stage_config.get("async_response_table", "")
settings_s += "ASYNC_RESPONSE_TABLE='{0!s}'\n".format(async_response_table)
return settings_s
def remove_local_zip(self):
"""
Remove our local zip file.
"""
if self.stage_config.get("delete_local_zip", True):
try:
if os.path.isfile(self.zip_path):
os.remove(self.zip_path)
if self.handler_path and os.path.isfile(self.handler_path):
os.remove(self.handler_path)
except Exception as e: # pragma: no cover
sys.exit(-1)
def remove_uploaded_zip(self):
"""
Remove the local and S3 zip file after uploading and updating.
"""
# Remove the uploaded zip from S3, because it is now registered..
if self.stage_config.get("delete_s3_zip", True):
self.zappa.remove_from_s3(self.zip_path, self.s3_bucket_name)
if self.stage_config.get("slim_handler", False):
# Need to keep the project zip as the slim handler uses it.
self.zappa.remove_from_s3(self.handler_path, self.s3_bucket_name)
def on_exit(self):
"""
Cleanup after the command finishes.
Always called: SystemExit, KeyboardInterrupt and any other Exception that occurs.
"""
if self.zip_path:
# Only try to remove uploaded zip if we're running a command that has loaded credentials
if self.load_credentials:
self.remove_uploaded_zip()
self.remove_local_zip()
def print_logs(
self, logs, colorize=True, http=False, non_http=False, force_colorize=None
):
"""
Parse, filter and print logs to the console.
"""
for log in logs:
timestamp = log["timestamp"]
message = log["message"]
if "START RequestId" in message:
continue
if "REPORT RequestId" in message:
continue
if "END RequestId" in message:
continue
if not colorize and not force_colorize:
if http:
if self.is_http_log_entry(message.strip()):
print("[" + str(timestamp) + "] " + message.strip())
elif non_http:
if not self.is_http_log_entry(message.strip()):
print("[" + str(timestamp) + "] " + message.strip())
else:
print("[" + str(timestamp) + "] " + message.strip())
else:
if http:
if self.is_http_log_entry(message.strip()):
click.echo(
click.style("[", fg="cyan")
+ click.style(str(timestamp), bold=True)
+ click.style("]", fg="cyan")
+ self.colorize_log_entry(message.strip()),
color=force_colorize,
)
elif non_http:
if not self.is_http_log_entry(message.strip()):
click.echo(
click.style("[", fg="cyan")
+ click.style(str(timestamp), bold=True)
+ click.style("]", fg="cyan")
+ self.colorize_log_entry(message.strip()),
color=force_colorize,
)
else:
click.echo(
click.style("[", fg="cyan")
+ click.style(str(timestamp), bold=True)
+ click.style("]", fg="cyan")
+ self.colorize_log_entry(message.strip()),
color=force_colorize,
)
def is_http_log_entry(self, string):
"""
Determines if a log entry is an HTTP-formatted log string or not.
"""
# Debug event filter
if "Zappa Event" in string:
return False
# IP address filter
for token in string.replace("\t", " ").split(" "):
try:
if token.count(".") == 3 and token.replace(".", "").isnumeric():
return True
except Exception: # pragma: no cover
pass
return False
def get_project_name(self):
return slugify.slugify(os.getcwd().split(os.sep)[-1])[:15]
def colorize_log_entry(self, string):
"""
Apply various heuristics to return a colorized version of a string.
If these fail, simply return the string in plaintext.
"""
final_string = string
try:
# First, do stuff in square brackets
inside_squares = re.findall(r"\[([^]]*)\]", string)
for token in inside_squares:
if token in ["CRITICAL", "ERROR", "WARNING", "DEBUG", "INFO", "NOTSET"]:
final_string = final_string.replace(
"[" + token + "]",
click.style("[", fg="cyan")
+ click.style(token, fg="cyan", bold=True)
+ click.style("]", fg="cyan"),
)
else:
final_string = final_string.replace(
"[" + token + "]",
click.style("[", fg="cyan")
+ click.style(token, bold=True)
+ click.style("]", fg="cyan"),
)
# Then do quoted strings
quotes = re.findall(r'"[^"]*"', string)
for token in quotes:
final_string = final_string.replace(
token, click.style(token, fg="yellow")
)
# And UUIDs
for token in final_string.replace("\t", " ").split(" "):
try:
if token.count("-") == 4 and token.replace("-", "").isalnum():
final_string = final_string.replace(
token, click.style(token, fg="magenta")
)
except Exception: # pragma: no cover
pass
# And IP addresses
try:
if token.count(".") == 3 and token.replace(".", "").isnumeric():
final_string = final_string.replace(
token, click.style(token, fg="red")
)
except Exception: # pragma: no cover
pass
# And status codes
try:
if token in ["200"]:
final_string = final_string.replace(
token, click.style(token, fg="green")
)
if token in ["400", "401", "403", "404", "405", "500"]:
final_string = final_string.replace(
token, click.style(token, fg="red")
)
except Exception: # pragma: no cover
pass
# And Zappa Events
try:
if "Zappa Event:" in final_string:
final_string = final_string.replace(
"Zappa Event:",
click.style("Zappa Event:", bold=True, fg="green"),
)
except Exception: # pragma: no cover
pass
# And dates
for token in final_string.split("\t"):
try:
is_date = parser.parse(token)
final_string = final_string.replace(
token, click.style(token, fg="green")
)
except Exception: # pragma: no cover
pass
final_string = final_string.replace("\t", " ").replace(" ", " ")
if final_string[0] != " ":
final_string = " " + final_string
return final_string
except Exception as e: # pragma: no cover
return string
def execute_prebuild_script(self):
"""
Parse and execute the prebuild_script from the zappa_settings.
"""
(pb_mod_path, pb_func) = self.prebuild_script.rsplit(".", 1)
try: # Prefer prebuild script in working directory
if (
pb_mod_path.count(".") >= 1
): # Prebuild script func is nested in a folder
(mod_folder_path, mod_name) = pb_mod_path.rsplit(".", 1)
mod_folder_path_fragments = mod_folder_path.split(".")
working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments)
else:
mod_name = pb_mod_path
working_dir = os.getcwd()
working_dir_importer = pkgutil.get_importer(working_dir)
module_ = working_dir_importer.find_module(mod_name).load_module(mod_name)
except (ImportError, AttributeError):
try: # Prebuild func might be in virtualenv
module_ = importlib.import_module(pb_mod_path)
except ImportError: # pragma: no cover
raise ClickException(
click.style("Failed ", fg="red")
+ "to "
+ click.style("import prebuild script ", bold=True)
+ 'module: "{pb_mod_path}"'.format(
pb_mod_path=click.style(pb_mod_path, bold=True)
)
)
if not hasattr(module_, pb_func): # pragma: no cover
raise ClickException(
click.style("Failed ", fg="red")
+ "to "
+ click.style("find prebuild script ", bold=True)
+ 'function: "{pb_func}" '.format(
pb_func=click.style(pb_func, bold=True)
)
+ 'in module "{pb_mod_path}"'.format(pb_mod_path=pb_mod_path)
)
prebuild_function = getattr(module_, pb_func)
prebuild_function() # Call the function
def collision_warning(self, item):
"""
Given a string, print a warning if this could
collide with a Zappa core package module.
Use for app functions and events.
"""
namespace_collisions = [
"zappa.",
"wsgi.",
"middleware.",
"handler.",
"util.",
"letsencrypt.",
"cli.",
]
for namespace_collision in namespace_collisions:
if item.startswith(namespace_collision):
click.echo(
click.style("Warning!", fg="red", bold=True)
+ " You may have a namespace collision between "
+ click.style(item, bold=True)
+ " and "
+ click.style(namespace_collision, bold=True)
+ "! You may want to rename that file."
)
def deploy_api_gateway(self, api_id):
cache_cluster_enabled = self.stage_config.get("cache_cluster_enabled", False)
cache_cluster_size = str(self.stage_config.get("cache_cluster_size", 0.5))
endpoint_url = self.zappa.deploy_api_gateway(
api_id=api_id,
stage_name=self.api_stage,
cache_cluster_enabled=cache_cluster_enabled,
cache_cluster_size=cache_cluster_size,
cloudwatch_log_level=self.stage_config.get("cloudwatch_log_level", "OFF"),
cloudwatch_data_trace=self.stage_config.get("cloudwatch_data_trace", False),
cloudwatch_metrics_enabled=self.stage_config.get(
"cloudwatch_metrics_enabled", False
),
cache_cluster_ttl=self.stage_config.get("cache_cluster_ttl", 300),
cache_cluster_encrypted=self.stage_config.get(
"cache_cluster_encrypted", False
),
)
return endpoint_url
def check_venv(self):
"""Ensure we're inside a virtualenv."""
if self.vargs and self.vargs.get("no_venv"):
return
if self.zappa:
venv = self.zappa.get_current_venv()
else:
# Just for `init`, when we don't have settings yet.
venv = Zappa.get_current_venv()
if not venv:
raise ClickException(
click.style("Zappa", bold=True)
+ " requires an "
+ click.style("active virtual environment", bold=True, fg="red")
+ "!\n"
+ "Learn more about virtual environments here: "
+ click.style(
"http://docs.python-guide.org/en/latest/dev/virtualenvs/",
bold=False,
fg="cyan",
)
)
def silence(self):
"""
Route all stdout to null.
"""
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
def touch_endpoint(self, endpoint_url):
"""
Test the deployed endpoint with a GET request.
"""
# Private APIGW endpoints most likely can't be reached by a deployer
# unless they're connected to the VPC by VPN. Instead of trying
# connect to the service, print a warning and let the user know
# to check it manually.
# See: https://github.com/Miserlou/Zappa/pull/1719#issuecomment-471341565
if "PRIVATE" in self.stage_config.get("endpoint_configuration", []):
print(
click.style("Warning!", fg="yellow", bold=True)
+ " Since you're deploying a private API Gateway endpoint,"
" Zappa cannot determine if your function is returning "
" a correct status code. You should check your API's response"
" manually before considering this deployment complete."
)
return
touch_path = self.stage_config.get("touch_path", "/")
req = requests.get(endpoint_url + touch_path)
# Sometimes on really large packages, it can take 60-90 secs to be
# ready and requests will return 504 status_code until ready.
# So, if we get a 504 status code, rerun the request up to 4 times or
# until we don't get a 504 error
if req.status_code == 504:
i = 0
status_code = 504
while status_code == 504 and i <= 4:
req = requests.get(endpoint_url + touch_path)
status_code = req.status_code
i += 1
if req.status_code >= 500:
raise ClickException(
click.style("Warning!", fg="red", bold=True)
+ " Status check on the deployed lambda failed."
+ " A GET request to '"
+ touch_path
+ "' yielded a "
+ click.style(str(req.status_code), fg="red", bold=True)
+ " response code."
)
####################################################################
# Main
####################################################################
def shamelessly_promote():
"""
Shamelessly promote our little community.
"""
click.echo(
"Need "
+ click.style("help", fg="green", bold=True)
+ "? Found a "
+ click.style("bug", fg="green", bold=True)
+ "? Let us "
+ click.style("know", fg="green", bold=True)
+ "! :D"
)
click.echo(
"File bug reports on "
+ click.style("GitHub", bold=True)
+ " here: "
+ click.style("https://github.com/Zappa/Zappa", fg="cyan", bold=True)
)
click.echo(
"And join our "
+ click.style("Slack", bold=True)
+ " channel here: "
+ click.style("https://zappateam.slack.com", fg="cyan", bold=True)
)
click.echo("Love!,")
click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!")
def disable_click_colors():
"""
Set a Click context where colors are disabled. Creates a throwaway BaseCommand
to play nicely with the Context constructor.
The intended side-effect here is that click.echo() checks this context and will
suppress colors.
https://github.com/pallets/click/blob/e1aa43a3/click/globals.py#L39
"""
ctx = Context(BaseCommand("AllYourBaseAreBelongToUs"))
ctx.color = False
push_context(ctx)
def handle(): # pragma: no cover
"""
Main program execution handler.
"""
try:
cli = ZappaCLI()
sys.exit(cli.handle())
except SystemExit as e: # pragma: no cover
cli.on_exit()
sys.exit(e.code)
except KeyboardInterrupt: # pragma: no cover
cli.on_exit()
sys.exit(130)
except Exception as e:
cli.on_exit()
click.echo(
"Oh no! An " + click.style("error occurred", fg="red", bold=True) + "! :("
)
click.echo("\n==============\n")
import traceback
traceback.print_exc()
click.echo("\n==============\n")
shamelessly_promote()
sys.exit(-1)
if __name__ == "__main__": # pragma: no cover
handle() | zappa-warm | /zappa_warm-0.54.4-py3-none-any.whl/zappa/cli.py | cli.py |
<p align="center">
<img src="http://i.imgur.com/oePnHJn.jpg" alt="Zappa Rocks!"/>
</p>
## Zappa - Serverless Python
[](https://github.com/zappa/Zappa/actions/workflows/ci.yaml)
[](https://coveralls.io/github/zappa/Zappa)
[](https://pypi.python.org/pypi/zappa)
[](https://zappateam.slack.com/)
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [About](#about)
- [Installation and Configuration](#installation-and-configuration)
- [Running the Initial Setup / Settings](#running-the-initial-setup--settings)
- [Basic Usage](#basic-usage)
- [Initial Deployments](#initial-deployments)
- [Updates](#updates)
- [Docker Workflows](#docker-workflows)
- [Rollback](#rollback)
- [Scheduling](#scheduling)
- [Advanced Scheduling](#advanced-scheduling)
- [Multiple Expressions](#multiple-expressions)
- [Disabled Event](#disabled-event)
- [Undeploy](#undeploy)
- [Package](#package)
- [How Zappa Makes Packages](#how-zappa-makes-packages)
- [Template](#template)
- [Status](#status)
- [Tailing Logs](#tailing-logs)
- [Remote Function Invocation](#remote-function-invocation)
- [Django Management Commands](#django-management-commands)
- [SSL Certification](#ssl-certification)
- [Deploying to a Domain With AWS Certificate Manager](#deploying-to-a-domain-with-aws-certificate-manager)
- [Deploying to a Domain With a Let's Encrypt Certificate (DNS Auth)](#deploying-to-a-domain-with-a-lets-encrypt-certificate-dns-auth)
- [Deploying to a Domain With a Let's Encrypt Certificate (HTTP Auth)](#deploying-to-a-domain-with-a-lets-encrypt-certificate-http-auth)
- [Deploying to a Domain With Your Own SSL Certs](#deploying-to-a-domain-with-your-own-ssl-certs)
- [Executing in Response to AWS Events](#executing-in-response-to-aws-events)
- [Asynchronous Task Execution](#asynchronous-task-execution)
- [Catching Exceptions](#catching-exceptions)
- [Task Sources](#task-sources)
- [Direct Invocation](#direct-invocation)
- [Remote Invocations](#remote-invocations)
- [Restrictions](#restrictions)
- [Running Tasks in a VPC](#running-tasks-in-a-vpc)
- [Responses](#responses)
- [Advanced Settings](#advanced-settings)
- [YAML Settings](#yaml-settings)
- [Advanced Usage](#advanced-usage)
- [Keeping The Server Warm](#keeping-the-server-warm)
- [Serving Static Files / Binary Uploads](#serving-static-files--binary-uploads)
- [Enabling CORS](#enabling-cors)
- [Large Projects](#large-projects)
- [Enabling Bash Completion](#enabling-bash-completion)
- [Enabling Secure Endpoints on API Gateway](#enabling-secure-endpoints-on-api-gateway)
- [API Key](#api-key)
- [IAM Policy](#iam-policy)
- [API Gateway Lambda Authorizers](#api-gateway-lambda-authorizers)
- [Cognito User Pool Authorizer](#cognito-user-pool-authorizer)
- [API Gateway Resource Policy](#api-gateway-resource-policy)
- [Setting Environment Variables](#setting-environment-variables)
- [Local Environment Variables](#local-environment-variables)
- [Remote AWS Environment Variables](#remote-aws-environment-variables)
- [Remote Environment Variables](#remote-environment-variables)
- [Remote Environment Variables (via an S3 file)](#remote-environment-variables-via-an-s3-file)
- [API Gateway Context Variables](#api-gateway-context-variables)
- [Catching Unhandled Exceptions](#catching-unhandled-exceptions)
- [Using Custom AWS IAM Roles and Policies](#using-custom-aws-iam-roles-and-policies)
- [Custom AWS IAM Roles and Policies for Deployment](#custom-aws-iam-roles-and-policies-for-deployment)
- [Custom AWS IAM Roles and Policies for Execution](#custom-aws-iam-roles-and-policies-for-execution)
- [AWS X-Ray](#aws-x-ray)
- [Globally Available Server-less Architectures](#globally-available-server-less-architectures)
- [Raising AWS Service Limits](#raising-aws-service-limits)
- [Dead Letter Queues](#dead-letter-queues)
- [Unique Package ID](#unique-package-id)
- [Application Load Balancer Event Source](#application-load-balancer-event-source)
- [Endpoint Configuration](#endpoint-configuration)
- [Example Private API Gateway configuration](#example-private-api-gateway-configuration)
- [Cold Starts (Experimental)](#cold-starts-experimental)
- [Zappa Guides](#zappa-guides)
- [Zappa in the Press](#zappa-in-the-press)
- [Sites Using Zappa](#sites-using-zappa)
- [Related Projects](#related-projects)
- [Hacks](#hacks)
- [Contributing](#contributing)
- [Using a Local Repo](#using-a-local-repo)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## About
<p align="center">
<a href="https://htmlpreview.github.io/?https://raw.githubusercontent.com/Miserlou/Talks/master/serverless-sf/big.quickstart.html"><img src="http://i.imgur.com/c23kDNT.png?1" alt="Zappa Slides"/></a>
</p>
<p align="center">
<i>In a hurry? Click to see <a href="https://htmlpreview.github.io/?https://raw.githubusercontent.com/Miserlou/Talks/master/serverless-sf/big.quickstart.html">(now slightly out-dated) slides from Serverless SF</a>!</i>
</p>
**Zappa** makes it super easy to build and deploy server-less, event-driven Python applications (including, but not limited to, WSGI web apps) on AWS Lambda + API Gateway. Think of it as "serverless" web hosting for your Python apps. That means **infinite scaling**, **zero downtime**, **zero maintenance** - and at a fraction of the cost of your current deployments!
If you've got a Python web app (including Django and Flask apps), it's as easy as:
```
$ pip install zappa
$ zappa init
$ zappa deploy
```
and now you're server-less! _Wow!_
> What do you mean "serverless"?
Okay, so there still is a server - but it only has a _40 millisecond_ life cycle! Serverless in this case means **"without any permanent infrastructure."**
With a traditional HTTP server, the server is online 24/7, processing requests one by one as they come in. If the queue of incoming requests grows too large, some requests will time out. With Zappa, **each request is given its own virtual HTTP "server"** by Amazon API Gateway. AWS handles the horizontal scaling automatically, so no requests ever time out. Each request then calls your application from a memory cache in AWS Lambda and returns the response via Python's WSGI interface. After your app returns, the "server" dies.
Better still, with Zappa you only pay for the milliseconds of server time that you use, so it's many **orders of magnitude cheaper** than VPS/PaaS hosts like Linode or Heroku - and in most cases, it's completely free. Plus, there's no need to worry about load balancing or keeping servers online ever again.
It's great for deploying serverless microservices with frameworks like Flask and Bottle, and for hosting larger web apps and CMSes with Django. Or, you can use any WSGI-compatible app you like! You **probably don't need to change your existing applications** to use it, and you're not locked into using it.
Zappa also lets you build hybrid event-driven applications that can scale to **trillions of events** a year with **no additional effort** on your part! You also get **free SSL certificates**, **global app deployment**, **API access management**, **automatic security policy generation**, **precompiled C-extensions**, **auto keep-warms**, **oversized Lambda packages**, and **many other exclusive features**!
And finally, Zappa is **super easy to use**. You can deploy your application with a single command out of the box!
__Awesome!__
<p align="center">
<img src="http://i.imgur.com/f1PJxCQ.gif" alt="Zappa Demo Gif"/>
</p>
## Installation and Configuration
_Before you begin, make sure you are running Python 3.7/3.8/3.9/3.10 and you have a valid AWS account and your [AWS credentials file](https://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs) is properly installed._
**Zappa** can easily be installed through pip, like so:
$ pip install zappa
Please note that Zappa _**must**_ be installed into your project's [virtual environment](http://docs.python-guide.org/en/latest/dev/virtualenvs/). The virtual environment name should not be the same as the Zappa project name, as this may cause errors.
_(If you use [pyenv](https://github.com/yyuu/pyenv) and love to manage virtualenvs with **pyenv-virtualenv**, you just have to call `pyenv local [your_venv_name]` and it's ready. [Conda](http://conda.pydata.org/docs/) users should comment [here](https://github.com/Miserlou/Zappa/pull/108).)_
Next, you'll need to define your local and server-side settings.
### Running the Initial Setup / Settings
**Zappa** can automatically set up your deployment settings for you with the `init` command:
$ zappa init
This will automatically detect your application type (Flask/Django - Pyramid users [see here](https://github.com/Miserlou/Zappa/issues/278#issuecomment-241917956)) and help you define your deployment configuration settings. Once you finish initialization, you'll have a file named *zappa_settings.json* in your project directory defining your basic deployment settings. It will probably look something like this for most WSGI apps:
```javascript
{
// The name of your stage
"dev": {
// The name of your S3 bucket
"s3_bucket": "lambda",
// The modular python path to your WSGI application function.
// In Flask and Bottle, this is your 'app' object.
// Flask (your_module.py):
// app = Flask()
// Bottle (your_module.py):
// app = bottle.default_app()
"app_function": "your_module.app"
}
}
```
or for Django:
```javascript
{
"dev": { // The name of your stage
"s3_bucket": "lambda", // The name of your S3 bucket
"django_settings": "your_project.settings" // The python path to your Django settings.
}
}
```
_Psst: If you're deploying a Django application with Zappa for the first time, you might want to read Edgar Roman's [Django Zappa Guide](https://edgarroman.github.io/zappa-django-guide/)._
You can define as many stages as your like - we recommend having _dev_, _staging_, and _production_.
Now, you're ready to deploy!
## Basic Usage
### Initial Deployments
Once your settings are configured, you can package and deploy your application to a stage called "production" with a single command:
$ zappa deploy production
Deploying..
Your application is now live at: https://7k6anj0k99.execute-api.us-east-1.amazonaws.com/production
And now your app is **live!** How cool is that?!
To explain what's going on, when you call `deploy`, Zappa will automatically package up your application and local virtual environment into a Lambda-compatible archive, replace any dependencies with versions with wheels compatible with lambda, set up the function handler and necessary WSGI Middleware, upload the archive to S3, create and manage the necessary Amazon IAM policies and roles, register it as a new Lambda function, create a new API Gateway resource, create WSGI-compatible routes for it, link it to the new Lambda function, and finally delete the archive from your S3 bucket. Handy!
Be aware that the default IAM role and policy created for executing Lambda applies a liberal set of permissions.
These are most likely not appropriate for production deployment of important applications. See the section
[Custom AWS IAM Roles and Policies for Execution](#custom-aws-iam-roles-and-policies-for-execution) for more detail.
### Updates
If your application has already been deployed and you only need to upload new Python code, but not touch the underlying routes, you can simply:
$ zappa update production
Updating..
Your application is now live at: https://7k6anj0k99.execute-api.us-east-1.amazonaws.com/production
This creates a new archive, uploads it to S3 and updates the Lambda function to use the new code, but doesn't touch the API Gateway routes.
#### Docker Workflows
In [version 0.53.0](https://github.com/zappa/Zappa/blob/master/CHANGELOG.md), support was added to deploy & update Lambda functions using Docker.
You can specify an ECR image using the `--docker-image-uri` option to the zappa command on `deploy` and `update`.
Zappa expects that the image is built and pushed to a Amazon ECR repository.
Deploy Example:
$ zappa deploy --docker-image-uri {AWS ACCOUNT ID}.dkr.ecr.{REGION}.amazonaws.com/{REPOSITORY NAME}:latest
Update Example:
$ zappa update --docker-image-uri {AWS ACCOUNT ID}.dkr.ecr.{REGION}.amazonaws.com/{REPOSITORY NAME}:latest
Refer to [the blog post](https://ianwhitestone.work/zappa-serverless-docker/) for more details about how to leverage this functionality, and when you may want to.
If you are using a custom Docker image for your Lambda runtime (e.g. if you want to use a newer version of Python that is not yet supported by Lambda out of the box) and you would like to bypass the Python version check, you can set an environment variable to do so:
$ export ZAPPA_RUNNING_IN_DOCKER=True
You can also add this to your Dockerfile like this:
```
ENV ZAPPA_RUNNING_IN_DOCKER=True
```
### Rollback
You can also `rollback` the deployed code to a previous version by supplying the number of revisions to return to. For instance, to rollback to the version deployed 3 versions ago:
$ zappa rollback production -n 3
### Scheduling
Zappa can be used to easily schedule functions to occur on regular intervals. This provides a much nicer, maintenance-free alternative to Celery!
These functions will be packaged and deployed along with your `app_function` and called from the handler automatically.
Just list your functions and the expression to schedule them using [cron or rate syntax](http://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html) in your *zappa_settings.json* file:
```javascript
{
"production": {
...
"events": [{
"function": "your_module.your_function", // The function to execute
"expression": "rate(1 minute)" // When to execute it (in cron or rate format)
}],
...
}
}
```
And then:
$ zappa schedule production
And now your function will execute every minute!
If you want to cancel these, you can simply use the `unschedule` command:
$ zappa unschedule production
And now your scheduled event rules are deleted.
See the [example](example/) for more details.
#### Advanced Scheduling
##### Multiple Expressions
Sometimes a function needs multiple expressions to describe its schedule. To set multiple expressions, simply list your functions, and the list of expressions to schedule them using [cron or rate syntax](http://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html) in your *zappa_settings.json* file:
```javascript
{
"production": {
...
"events": [{
"function": "your_module.your_function", // The function to execute
"expressions": ["cron(0 20-23 ? * SUN-THU *)", "cron(0 0-8 ? * MON-FRI *)"] // When to execute it (in cron or rate format)
}],
...
}
}
```
This can be used to deal with issues arising from the UTC timezone crossing midnight during business hours in your local timezone.
It should be noted that overlapping expressions will not throw a warning, and should be checked for, to prevent duplicate triggering of functions.
##### Disabled Event
Sometimes an event should be scheduled, yet disabled.
For example, perhaps an event should only run in your production environment, but not sandbox.
You may still want to deploy it to sandbox to ensure there is no issue with your expression(s) before deploying to production.
In this case, you can disable it from running by setting `enabled` to `false` in the event definition:
```javascript
{
"sandbox": {
...
"events": [{
"function": "your_module.your_function", // The function to execute
"expression": "rate(1 minute)", // When to execute it (in cron or rate format)
"enabled": false
}],
...
}
}
```
### Undeploy
If you need to remove the API Gateway and Lambda function that you have previously published, you can simply:
$ zappa undeploy production
You will be asked for confirmation before it executes.
If you enabled CloudWatch Logs for your API Gateway service and you don't
want to keep those logs, you can specify the `--remove-logs` argument to purge the logs for your API Gateway and your Lambda function:
$ zappa undeploy production --remove-logs
### Package
If you want to build your application package without actually uploading and registering it as a Lambda function, you can use the `package` command:
$ zappa package production
If you have a `zip` callback in your `callbacks` setting, this will also be invoked.
```javascript
{
"production": { // The name of your stage
"callbacks": {
"zip": "my_app.zip_callback"// After creating the package
}
}
}
```
You can also specify the output filename of the package with `-o`:
$ zappa package production -o my_awesome_package.zip
#### How Zappa Makes Packages
Zappa will automatically package your active virtual environment into a package which runs smoothly on AWS Lambda.
During this process, it will replace any local dependencies with AWS Lambda compatible versions. Dependencies are included in this order:
* Lambda-compatible `manylinux` wheels from a local cache
* Lambda-compatible `manylinux` wheels from PyPI
* Packages from the active virtual environment
* Packages from the local project directory
It also skips certain unnecessary files, and ignores any .py files if .pyc files are available.
In addition, Zappa will also automatically set the correct execution permissions, configure package settings, and create a unique, auditable package manifest file.
To further reduce the final package file size, you can:
* Set `slim_handler` to `True` to upload a small handler to Lambda and the rest of the package to S3. For more details, see the [merged pull request](https://github.com/Miserlou/Zappa/pull/548) and the [discussion in the original issue](https://github.com/Miserlou/Zappa/issues/510). See also: [Large Projects](#large-projects).
* Use the `exclude` or `exclude_glob` setting and provide a list of patterns to exclude from the archive. By default, Zappa will exclude Boto, because [it's already available in the Lambda execution environment](http://docs.aws.amazon.com/lambda/latest/dg/current-supported-versions.html).
### Template
Similarly to `package`, if you only want the API Gateway CloudFormation template, use the `template` command:
$ zappa template production --l your-lambda-arn -r your-role-arn
Note that you must supply your own Lambda ARN and Role ARNs in this case, as they may not have been created for you.
You can get the JSON output directly with `--json`, and specify the output file with `--output`.
### Status
If you need to see the status of your deployment and event schedules, simply use the `status` command.
$ zappa status production
### Tailing Logs
You can watch the logs of a deployment by calling the `tail` management command.
$ zappa tail production
By default, this will show all log items. In addition to HTTP and other events, anything `print`ed to `stdout` or `stderr` will be shown in the logs.
You can use the argument `--http` to filter for HTTP requests, which will be in the Apache Common Log Format.
$ zappa tail production --http
Similarly, you can do the inverse and only show non-HTTP events and log messages:
$ zappa tail production --non-http
If you don't like the default log colors, you can turn them off with `--no-color`.
You can also limit the length of the tail with `--since`, which accepts a simple duration string:
$ zappa tail production --since 4h # 4 hours
$ zappa tail production --since 1m # 1 minute
$ zappa tail production --since 1mm # 1 month
You can filter out the contents of the logs with `--filter`, like so:
$ zappa tail production --http --filter "POST" # Only show POST HTTP requests
Note that this uses the [CloudWatch Logs filter syntax](http://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html).
To tail logs without following (to exit immediately after displaying the end of the requested logs), pass `--disable-keep-open`:
$ zappa tail production --since 1h --disable-keep-open
### Remote Function Invocation
You can execute any function in your application directly at any time by using the `invoke` command.
For instance, suppose you have a basic application in a file called "my_app.py", and you want to invoke a function in it called "my_function". Once your application is deployed, you can invoke that function at any time by calling:
$ zappa invoke production my_app.my_function
Any remote print statements made and the value the function returned will then be printed to your local console. **Nifty!**
You can also invoke interpretable Python 3.7/3.8/3.9/3.10 strings directly by using `--raw`, like so:
$ zappa invoke production "print(1 + 2 + 3)" --raw
For instance, it can come in handy if you want to create your first `superuser` on a RDS database running in a VPC (like Serverless Aurora):
$ zappa invoke staging "from django.contrib.auth import get_user_model; User = get_user_model(); User.objects.create_superuser('username', 'email', 'password')" --raw
### Django Management Commands
As a convenience, Zappa can also invoke remote Django 'manage.py' commands with the `manage` command. For instance, to perform the basic Django status check:
$ zappa manage production showmigrations admin
Obviously, this only works for Django projects which have their settings properly defined.
For commands which have their own arguments, you can also pass the command in as a string, like so:
$ zappa manage production "shell --version"
Commands which require direct user input, such as `createsuperuser`, should be [replaced by commands](http://stackoverflow.com/a/26091252) which use `zappa invoke <env> --raw`.
For more Django integration, take a look at the [zappa-django-utils](https://github.com/Miserlou/zappa-django-utils) project.
### SSL Certification
Zappa can be deployed to custom domain names and subdomains with custom SSL certificates, Let's Encrypt certificates, and [AWS Certificate Manager](https://aws.amazon.com/certificate-manager/) (ACM) certificates.
Currently, the easiest of these to use are the AWS Certificate Manager certificates, as they are free, self-renewing, and require the least amount of work.
Once configured as described below, all of these methods use the same command:
$ zappa certify
When deploying from a CI/CD system, you can use:
$ zappa certify --yes
to skip the confirmation prompt.
#### Deploying to a Domain With AWS Certificate Manager
Amazon provides their own free alternative to Let's Encrypt called [AWS Certificate Manager](https://aws.amazon.com/certificate-manager/) (ACM). To use this service with Zappa:
1. Verify your domain in the AWS Certificate Manager console.
2. In the console, select the N. Virginia (us-east-1) region and request a certificate for your domain or subdomain (`sub.yourdomain.tld`), or request a wildcard domain (`*.yourdomain.tld`).
3. Copy the entire ARN of that certificate and place it in the Zappa setting `certificate_arn`.
4. Set your desired domain in the `domain` setting.
5. Call `$ zappa certify` to create and associate the API Gateway distribution using that certificate.
#### Deploying to a Domain With a Let's Encrypt Certificate (DNS Auth)
If you want to use Zappa on a domain with a free Let's Encrypt certificate using automatic Route 53 based DNS Authentication, you can follow [this handy guide](https://github.com/zappa/Zappa/blob/master/docs/domain_with_free_ssl_dns.md).
#### Deploying to a Domain With a Let's Encrypt Certificate (HTTP Auth)
If you want to use Zappa on a domain with a free Let's Encrypt certificate using HTTP Authentication, you can follow [this guide](https://github.com/zappa/Zappa/blob/master/docs/domain_with_free_ssl_http.md).
However, it's now far easier to use Route 53-based DNS authentication, which will allow you to use a Let's Encrypt certificate with a single `$ zappa certify` command.
#### Deploying to a Domain With Your Own SSL Certs
1. The first step is to create a custom domain and obtain your SSL cert / key / bundle.
2. Ensure you have set the `domain` setting within your Zappa settings JSON - this will avoid problems with the Base Path mapping between the Custom Domain and the API invoke URL, which gets the Stage Name appended in the URI
3. Add the paths to your SSL cert / key / bundle to the `certificate`, `certificate_key`, and `certificate_chain` settings, respectively, in your Zappa settings JSON
4. Set `route53_enabled` to `false` if you plan on using your own DNS provider, and not an AWS Route53 Hosted zone.
5. Deploy or update your app using Zappa
6. Run `$ zappa certify` to upload your certificates and register the custom domain name with your API gateway.
## Executing in Response to AWS Events
Similarly, you can have your functions execute in response to events that happen in the AWS ecosystem, such as S3 uploads, DynamoDB entries, Kinesis streams, SNS messages, and SQS queues.
In your *zappa_settings.json* file, define your [event sources](http://docs.aws.amazon.com/lambda/latest/dg/invoking-lambda-function.html) and the function you wish to execute. For instance, this will execute `your_module.process_upload_function` in response to new objects in your `my-bucket` S3 bucket. Note that `process_upload_function` must accept `event` and `context` parameters.
```javascript
{
"production": {
...
"events": [{
"function": "your_module.process_upload_function",
"event_source": {
"arn": "arn:aws:s3:::my-bucket",
"events": [
"s3:ObjectCreated:*" // Supported event types: http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#supported-notification-event-types
],
"key_filters": [{ // optional
"type": "suffix",
"value": "yourfile.json"
},
{
"type": "prefix",
"value": "prefix/for/your/object"
}]
}
}],
...
}
}
```
And then:
$ zappa schedule production
And now your function will execute every time a new upload appears in your bucket!
To access the key's information in your application context, you'll want `process_upload_function` to look something like this:
```python
import boto3
s3_client = boto3.client('s3')
def process_upload_function(event, context):
"""
Process a file upload.
"""
# Get the uploaded file's information
bucket = event['Records'][0]['s3']['bucket']['name'] # Will be `my-bucket`
key = event['Records'][0]['s3']['object']['key'] # Will be the file path of whatever file was uploaded.
# Get the bytes from S3
s3_client.download_file(bucket, key, '/tmp/' + key) # Download this file to writable tmp space.
file_bytes = open('/tmp/' + key).read()
```
Similarly, for a [Simple Notification Service](https://aws.amazon.com/sns/) event:
```javascript
"events": [
{
"function": "your_module.your_function",
"event_source": {
"arn": "arn:aws:sns:::your-event-topic-arn",
"events": [
"sns:Publish"
]
}
}
]
```
Optionally you can add [SNS message filters](http://docs.aws.amazon.com/sns/latest/dg/message-filtering.html):
```javascript
"events": [
{
"function": "your_module.your_function",
"event_source": {
"arn": "arn:aws:sns:::your-event-topic-arn",
"filters": {
"interests": ["python", "aws", "zappa"],
"version": ["1.0"]
},
...
}
}
]
```
[DynamoDB](http://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html) and [Kinesis](http://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html) are slightly different as it is not event based but pulling from a stream:
```javascript
"events": [
{
"function": "replication.replicate_records",
"event_source": {
"arn": "arn:aws:dynamodb:us-east-1:1234554:table/YourTable/stream/2016-05-11T00:00:00.000",
"starting_position": "TRIM_HORIZON", // Supported values: TRIM_HORIZON, LATEST
"batch_size": 50, // Max: 1000
"enabled": true // Default is false
}
}
]
```
[SQS](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html) is also pulling messages from a stream. At this time, [only "Standard" queues can trigger lambda events, not "FIFO" queues](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html). Read the AWS Documentation carefully since Lambda calls the SQS DeleteMessage API on your behalf once your function completes successfully.
```javascript
"events": [
{
"function": "your_module.process_messages",
"event_source": {
"arn": "arn:aws:sqs:us-east-1:12341234:your-queue-name-arn",
"batch_size": 10, // Max: 10. Use 1 to trigger immediate processing
"enabled": true // Default is false
}
}
]
```
For configuring Lex Bot's intent triggered events:
```javascript
"bot_events": [
{
"function": "lexbot.handlers.book_appointment.handler",
"event_source": {
"arn": "arn:aws:lex:us-east-1:01234123123:intent:TestLexEventNames:$LATEST", // optional. In future it will be used to configure the intent
"intent":"intentName", // name of the bot event configured
"invocation_source":"DialogCodeHook", // either FulfillmentCodeHook or DialogCodeHook
}
}
]
```
Events can also take keyword arguments:
```javascript
"events": [
{
"function": "your_module.your_recurring_function", // The function to execute
"kwargs": {"key": "val", "key2": "val2"}, // Keyword arguments to pass. These are available in the event
"expression": "rate(1 minute)" // When to execute it (in cron or rate format)
}
]
```
To get the keyword arguments you will need to look inside the event dictionary:
```python
def your_recurring_function(event, context):
my_kwargs = event.get("kwargs") # dict of kwargs given in zappa_settings file
```
You can find more [example event sources here](http://docs.aws.amazon.com/lambda/latest/dg/eventsources.html).
## Asynchronous Task Execution
Zappa also now offers the ability to seamlessly execute functions asynchronously in a completely separate AWS Lambda instance!
For example, if you have a Flask API for ordering a pie, you can call your `bake` function seamlessly in a completely separate Lambda instance by using the `zappa.asynchronous.task` decorator like so:
```python
from flask import Flask
from zappa.asynchronous import task
app = Flask(__name__)
@task
def make_pie():
""" This takes a long time! """
ingredients = get_ingredients()
pie = bake(ingredients)
deliver(pie)
@app.route('/api/order/pie')
def order_pie():
""" This returns immediately! """
make_pie()
return "Your pie is being made!"
```
And that's it! Your API response will return immediately, while the `make_pie` function executes in a completely different Lambda instance.
When calls to @task decorated functions or the zappa.asynchronous.run command occur outside of Lambda, such as your local dev environment,
the functions will execute immediately and locally. The zappa asynchronous functionality only works
when in the Lambda environment or when specifying [Remote Invocations](https://github.com/zappa/zappa#remote-invocations).
### Catching Exceptions
Putting a try..except block on an asynchronous task like this:
```python
@task
def make_pie():
try:
ingredients = get_ingredients()
pie = bake(ingredients)
deliver(pie)
except Fault as error:
"""send an email"""
...
return Response('Web services down', status=503)
```
will cause an email to be sent twice for the same error. See [asynchronous retries at AWS](https://docs.aws.amazon.com/lambda/latest/dg/retries-on-errors.html). To work around this side-effect, and have the fault handler execute only once, change the return value to:
```python
@task
def make_pie():
try:
"""code block"""
except Fault as error:
"""send an email"""
...
return {} #or return True
```
### Task Sources
By default, this feature uses direct AWS Lambda invocation. You can instead use AWS Simple Notification Service as the task event source by using the `task_sns` decorator, like so:
```python
from zappa.asynchronous import task_sns
@task_sns
```
Using SNS also requires setting the following settings in your `zappa_settings`:
```javascript
{
"dev": {
..
"async_source": "sns", // Source of async tasks. Defaults to "lambda"
"async_resources": true, // Create the SNS topic to use. Defaults to true.
..
}
}
```
This will automatically create and subscribe to the SNS topic the code will use when you call the `zappa schedule` command.
Using SNS will also return a message ID in case you need to track your invocations.
### Direct Invocation
You can also use this functionality without a decorator by passing your function to `zappa.asynchronous.run`, like so:
```python
from zappa.asynchronous import run
run(your_function, args, kwargs) # Using Lambda
run(your_function, args, kwargs, service='sns') # Using SNS
```
### Remote Invocations
By default, Zappa will use lambda's current function name and current AWS region. If you wish to invoke a lambda with
a different function name/region or invoke your lambda from outside of lambda, you must specify the
`remote_aws_lambda_function_name` and `remote_aws_region` arguments so that the application knows which function and
region to use. For example, if some part of our pizza making application had to live on an EC2 instance, but we
wished to call the make_pie() function on its own Lambda instance, we would do it as follows:
```python
@task(remote_aws_lambda_function_name='pizza-pie-prod', remote_aws_region='us-east-1')
def make_pie():
""" This takes a long time! """
ingredients = get_ingredients()
pie = bake(ingredients)
deliver(pie)
```
If those task() parameters were not used, then EC2 would execute the function locally. These same
`remote_aws_lambda_function_name` and `remote_aws_region` arguments can be used on the zappa.asynchronous.run() function as well.
### Restrictions
The following restrictions to this feature apply:
* Functions must have a clean import path -- i.e. no closures, lambdas, or methods.
* `args` and `kwargs` must be JSON-serializable.
* The JSON-serialized arguments must be within the size limits for Lambda (256K) or SNS (256K) events.
All of this code is still backwards-compatible with non-Lambda environments - it simply executes in a blocking fashion and returns the result.
### Running Tasks in a VPC
If you're running Zappa in a Virtual Private Cloud (VPC), you'll need to configure your subnets to allow your lambda to communicate with services inside your VPC as well as the public Internet. A minimal setup requires two subnets.
In __subnet-a__:
* Create a NAT
* Create an Internet gateway
* In the route table, create a route pointing the Internet gateway to 0.0.0.0/0.
In __subnet-b__:
* Place your lambda function
* In the route table, create a route pointing the NAT that belongs to __subnet-a__ to 0.0.0.0/0.
You can place your lambda in multiple subnets that are configured the same way as __subnet-b__ for high availability.
Some helpful resources are [this tutorial](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Tutorials.WebServerDB.CreateVPC.html), [this other tutorial](https://gist.github.com/reggi/dc5f2620b7b4f515e68e46255ac042a7) and [this AWS doc page](http://docs.aws.amazon.com/lambda/latest/dg/vpc.html#vpc-internet).
### Responses
It is possible to capture the responses of Asynchronous tasks.
Zappa uses DynamoDB as the backend for these.
To capture responses, you must configure a `async_response_table` in `zappa_settings`. This is the DynamoDB table name. Then, when decorating with `@task`, pass `capture_response=True`.
Async responses are assigned a `response_id`. This is returned as a property of the `LambdaAsyncResponse` (or `SnsAsyncResponse`) object that is returned by the `@task` decorator.
Example:
```python
from zappa.asynchronous import task, get_async_response
from flask import Flask, make_response, abort, url_for, redirect, request, jsonify
from time import sleep
app = Flask(__name__)
@app.route('/payload')
def payload():
delay = request.args.get('delay', 60)
x = longrunner(delay)
return redirect(url_for('response', response_id=x.response_id))
@app.route('/async-response/<response_id>')
def response(response_id):
response = get_async_response(response_id)
if response is None:
abort(404)
if response['status'] == 'complete':
return jsonify(response['response'])
sleep(5)
return "Not yet ready. Redirecting.", 302, {
'Content-Type': 'text/plain; charset=utf-8',
'Location': url_for('response', response_id=response_id, backoff=5),
'X-redirect-reason': "Not yet ready.",
}
@task(capture_response=True)
def longrunner(delay):
sleep(float(delay))
return {'MESSAGE': "It took {} seconds to generate this.".format(delay)}
```
## Advanced Settings
There are other settings that you can define in your local settings
to change Zappa's behavior. Use these at your own risk!
```javascript
{
"dev": {
"additional_text_mimetypes": [], // allows you to provide additional mimetypes to be handled as text when binary_support is true.
"alb_enabled": false, // enable provisioning of application load balancing resources. If set to true, you _must_ fill out the alb_vpc_config option as well.
"alb_vpc_config": {
"CertificateArn": "your_acm_certificate_arn", // ACM certificate ARN for ALB
"SubnetIds": [], // list of subnets for ALB
"SecurityGroupIds": [] // list of security groups for ALB
},
"api_key_required": false, // enable securing API Gateway endpoints with x-api-key header (default False)
"api_key": "your_api_key_id", // optional, use an existing API key. The option "api_key_required" must be true to apply
"apigateway_enabled": true, // Set to false if you don't want to create an API Gateway resource. Default true.
"apigateway_description": "My funky application!", // Define a custom description for the API Gateway console. Default None.
"assume_policy": "my_assume_policy.json", // optional, IAM assume policy JSON file
"attach_policy": "my_attach_policy.json", // optional, IAM attach policy JSON file
"apigateway_policy": "my_apigateway_policy.json", // optional, API Gateway resource policy JSON file
"async_source": "sns", // Source of async tasks. Defaults to "lambda"
"async_resources": true, // Create the SNS topic and DynamoDB table to use. Defaults to true.
"async_response_table": "your_dynamodb_table_name", // the DynamoDB table name to use for captured async responses; defaults to None (can't capture)
"async_response_table_read_capacity": 1, // DynamoDB table read capacity; defaults to 1
"async_response_table_write_capacity": 1, // DynamoDB table write capacity; defaults to 1
"aws_endpoint_urls": { "aws_service_name": "endpoint_url" }, // a dictionary of endpoint_urls that emulate the appropriate service. Usually used for testing, for instance with `localstack`.
"aws_environment_variables" : {"your_key": "your_value"}, // A dictionary of environment variables that will be available to your deployed app via AWS Lambdas native environment variables. See also "environment_variables" and "remote_env" . Default {}.
"aws_kms_key_arn": "your_aws_kms_key_arn", // Your AWS KMS Key ARN
"aws_region": "aws-region-name", // optional, uses region set in profile or environment variables if not set here,
"binary_support": true, // Enable automatic MIME-type based response encoding through API Gateway. Default true.
"callbacks": { // Call custom functions during the local Zappa deployment/update process
"settings": "my_app.settings_callback", // After loading the settings
"zip": "my_app.zip_callback", // After creating the package
"post": "my_app.post_callback", // After command has executed
},
"cache_cluster_enabled": false, // Use APIGW cache cluster (default False)
"cache_cluster_size": 0.5, // APIGW Cache Cluster size (default 0.5)
"cache_cluster_ttl": 300, // APIGW Cache Cluster time-to-live (default 300)
"cache_cluster_encrypted": false, // Whether or not APIGW Cache Cluster encrypts data (default False)
"certificate": "my_cert.crt", // SSL certificate file location. Used to manually certify a custom domain
"certificate_key": "my_key.key", // SSL key file location. Used to manually certify a custom domain
"certificate_chain": "my_cert_chain.pem", // SSL certificate chain file location. Used to manually certify a custom domain
"certificate_arn": "arn:aws:acm:us-east-1:1234512345:certificate/aaaa-bbb-cccc-dddd", // ACM certificate ARN (needs to be in us-east-1 region).
"cloudwatch_log_level": "OFF", // Enables/configures a level of logging for the given staging. Available options: "OFF", "INFO", "ERROR", default "OFF".
"cloudwatch_data_trace": false, // Logs all data about received events. Default false.
"cloudwatch_metrics_enabled": false, // Additional metrics for the API Gateway. Default false.
"cognito": { // for Cognito event triggers
"user_pool": "user-pool-id", // User pool ID from AWS Cognito
"triggers": [{
"source": "PreSignUp_SignUp", // triggerSource from http://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html#cognito-user-pools-lambda-trigger-syntax-pre-signup
"function": "my_app.pre_signup_function"
}]
},
"context_header_mappings": { "HTTP_header_name": "API_Gateway_context_variable" }, // A dictionary mapping HTTP header names to API Gateway context variables
"cors": false, // Enable Cross-Origin Resource Sharing. Default false. If true, simulates the "Enable CORS" button on the API Gateway console. Can also be a dictionary specifying lists of "allowed_headers", "allowed_methods", and string of "allowed_origin"
"dead_letter_arn": "arn:aws:<sns/sqs>:::my-topic/queue", // Optional Dead Letter configuration for when Lambda async invoke fails thrice
"debug": true, // Print Zappa configuration errors tracebacks in the 500. Default true.
"delete_local_zip": true, // Delete the local zip archive after code updates. Default true.
"delete_s3_zip": true, // Delete the s3 zip archive. Default true.
"django_settings": "your_project.production_settings", // The modular path to your Django project's settings. For Django projects only.
"domain": "yourapp.yourdomain.com", // Required if you're using a domain
"base_path": "your-base-path", // Optional base path for API gateway custom domain base path mapping. Default None. Not supported for use with Application Load Balancer event sources.
"environment_variables": {"your_key": "your_value"}, // A dictionary of environment variables that will be available to your deployed app. See also "remote_env" and "aws_environment_variables". Default {}.
"events": [
{ // Recurring events
"function": "your_module.your_recurring_function", // The function to execute
"expression": "rate(1 minute)" // When to execute it (in cron or rate format)
},
{ // AWS Reactive events
"function": "your_module.your_reactive_function", // The function to execute
"event_source": {
"arn": "arn:aws:s3:::my-bucket", // The ARN of this event source
"events": [
"s3:ObjectCreated:*" // The specific event to execute in response to.
]
}
}
],
"endpoint_configuration": ["EDGE", "REGIONAL", "PRIVATE"], // Specify APIGateway endpoint None (default) or list `EDGE`, `REGION`, `PRIVATE`
"exception_handler": "your_module.report_exception", // function that will be invoked in case Zappa sees an unhandled exception raised from your code
"exclude": ["file.gz", "tests"], // A list of filename patterns to exclude from the archive (see `fnmatch` module for patterns).
"exclude_glob": ["*.gz", "*.rar", "tests/**/*"], // A list of glob patterns to exclude from the archive. To exclude boto3 and botocore (available in an older version on Lambda), add "boto3*" and "botocore*".
"extends": "stage_name", // Duplicate and extend another stage's settings. For example, `dev-asia` could extend from `dev-common` with a different `s3_bucket` value.
"extra_permissions": [{ // Attach any extra permissions to this policy. Default None
"Effect": "Allow",
"Action": ["rekognition:*"], // AWS Service ARN
"Resource": "*"
}],
"iam_authorization": false, // optional, use IAM to require request signing. Default false. Note that enabling this will override the authorizer configuration.
"include": ["your_special_library_to_load_at_handler_init"], // load special libraries into PYTHONPATH at handler init that certain modules cannot find on path
"authorizer": {
"function": "your_module.your_auth_function", // Local function to run for token validation. For more information about the function see below.
"arn": "arn:aws:lambda:<region>:<account_id>:function:<function_name>", // Existing Lambda function to run for token validation.
"result_ttl": 300, // Optional. Default 300. The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches authorizer results. Currently, the maximum TTL value is 3600 seconds.
"token_header": "Authorization", // Optional. Default 'Authorization'. The name of a custom authorization header containing the token that clients submit as part of their requests.
"validation_expression": "^Bearer \\w+$", // Optional. A validation expression for the incoming token, specify a regular expression.
},
"keep_warm": true, // Create CloudWatch events to keep the server warm. Default true. To remove, set to false and then `unschedule`.
"keep_warm_expression": "rate(4 minutes)", // How often to execute the keep-warm, in cron and rate format. Default 4 minutes.
"lambda_description": "Your Description", // However you want to describe your project for the AWS console. Default "Zappa Deployment".
"lambda_handler": "your_custom_handler", // The name of Lambda handler. Default: handler.lambda_handler
"layers": ["arn:aws:lambda:<region>:<account_id>:layer:<layer_name>:<layer_version>"], // optional lambda layers
"lambda_concurrency": 10, // Sets the maximum number of simultaneous executions for a function, and reserves capacity for that concurrency level. Default is None.
"lets_encrypt_key": "s3://your-bucket/account.key", // Let's Encrypt account key path. Can either be an S3 path or a local file path.
"log_level": "DEBUG", // Set the Zappa log level. Can be one of CRITICAL, ERROR, WARNING, INFO and DEBUG. Default: DEBUG
"manage_roles": true, // Have Zappa automatically create and define IAM execution roles and policies. Default true. If false, you must define your own IAM Role and role_name setting.
"memory_size": 512, // Lambda function memory in MB. Default 512.
"num_retained_versions":null, // Indicates the number of old versions to retain for the lambda. If absent, keeps all the versions of the function.
"payload_compression": true, // Whether or not to enable API gateway payload compression (default: true)
"payload_minimum_compression_size": 0, // The threshold size (in bytes) below which payload compression will not be applied (default: 0)
"prebuild_script": "your_module.your_function", // Function to execute before uploading code
"profile_name": "your-profile-name", // AWS profile credentials to use. Default 'default'. Removing this setting will use the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables instead.
"project_name": "MyProject", // The name of the project as it appears on AWS. Defaults to a slugified `pwd`.
"remote_env": "s3://my-project-config-files/filename.json", // optional file in s3 bucket containing a flat json object which will be used to set custom environment variables.
"role_name": "MyLambdaRole", // Name of Zappa execution role. Default <project_name>-<env>-ZappaExecutionRole. To use a different, pre-existing policy, you must also set manage_roles to false.
"role_arn": "arn:aws:iam::12345:role/app-ZappaLambdaExecutionRole", // ARN of Zappa execution role. Default to None. To use a different, pre-existing policy, you must also set manage_roles to false. This overrides role_name. Use with temporary credentials via GetFederationToken.
"route53_enabled": true, // Have Zappa update your Route53 Hosted Zones when certifying with a custom domain. Default true.
"runtime": "python3.10", // Python runtime to use on Lambda. Can be one of "python3.7", "python3.8", "python3.9", or "python3.10". Defaults to whatever the current Python being used is.
"s3_bucket": "dev-bucket", // Zappa zip bucket,
"slim_handler": false, // Useful if project >50M. Set true to just upload a small handler to Lambda and load actual project from S3 at runtime. Default false.
"settings_file": "~/Projects/MyApp/settings/dev_settings.py", // Server side settings file location,
"tags": { // Attach additional tags to AWS Resources
"Key": "Value", // Example Key and value
"Key2": "Value2",
},
"timeout_seconds": 30, // Maximum lifespan for the Lambda function (default 30, max 900.)
"touch": true, // GET the production URL upon initial deployment (default True)
"touch_path": "/", // The endpoint path to GET when checking the initial deployment (default "/")
"use_precompiled_packages": true, // If possible, use C-extension packages which have been pre-compiled for AWS Lambda. Default true.
"vpc_config": { // Optional Virtual Private Cloud (VPC) configuration for Lambda function
"SubnetIds": [ "subnet-12345678" ], // Note: not all availability zones support Lambda!
"SecurityGroupIds": [ "sg-12345678" ]
},
"xray_tracing": false // Optional, enable AWS X-Ray tracing on your lambda function.
}
}
```
#### YAML Settings
If you prefer YAML over JSON, you can also use a `zappa_settings.yml`, like so:
```yaml
---
dev:
app_function: your_module.your_app
s3_bucket: your-code-bucket
events:
- function: your_module.your_function
event_source:
arn: arn:aws:s3:::your-event-bucket
events:
- s3:ObjectCreated:*
```
You can also supply a custom settings file at any time with the `-s` argument, ex:
```
$ zappa deploy dev -s my-custom-settings.yml
```
Similarly, you can supply a `zappa_settings.toml` file:
```toml
[dev]
app_function = "your_module.your_app"
s3_bucket = "your-code-bucket"
```
## Advanced Usage
### Keeping The Server Warm
Zappa will automatically set up a regularly occurring execution of your application in order to keep the Lambda function warm. This can be disabled via the `keep_warm` setting.
#### Serving Static Files / Binary Uploads
Zappa is now able to serve and receive binary files, as detected by their MIME-type.
However, generally Zappa is designed for running your application code, not for serving static web assets. If you plan on serving custom static assets in your web application (CSS/JavaScript/images/etc.,), you'll likely want to use a combination of AWS S3 and AWS CloudFront.
Your web application framework will likely be able to handle this for you automatically. For Flask, there is [Flask-S3](https://github.com/e-dard/flask-s3), and for Django, there is [Django-Storages](https://django-storages.readthedocs.io/en/latest/).
Similarly, you may want to design your application so that static binary uploads go [directly to S3](http://docs.aws.amazon.com/AWSJavaScriptSDK/guide/browser-examples.html#Uploading_a_local_file_using_the_File_API), which then triggers an event response defined in your `events` setting! That's thinking serverlessly!
### Enabling CORS
The simplest way to enable CORS (Cross-Origin Resource Sharing) for your Zappa application is to set `cors` to `true` in your Zappa settings file and update, which is the equivalent of pushing the "Enable CORS" button in the AWS API Gateway console. This is disabled by default, but you may wish to enable it for APIs which are accessed from other domains, etc.
You can also simply handle CORS directly in your application. Your web framework will probably have an extension to do this, such as [django-cors-headers](https://github.com/ottoyiu/django-cors-headers) or [Flask-CORS](https://github.com/corydolphin/flask-cors). Using these will make your code more portable.
### Large Projects
AWS currently limits Lambda zip sizes to 50 megabytes. If your project is larger than that, set `slim_handler: true` in your `zappa_settings.json`. In this case, your fat application package will be replaced with a small handler-only package. The handler file then pulls the rest of the large project down from S3 at run time! The initial load of the large project may add to startup overhead, but the difference should be minimal on a warm lambda function. Note that this will also eat into the storage space of your application function. Note that AWS currently [limits](https://docs.aws.amazon.com/lambda/latest/dg/limits.html) the `/tmp` directory storage to 512 MB, so your project must still be smaller than that.
### Enabling Bash Completion
Bash completion can be enabled by adding the following to your .bashrc:
```bash
eval "$(register-python-argcomplete zappa)"
```
`register-python-argcomplete` is provided by the argcomplete Python package. If this package was installed in a virtualenv
then the command must be run there. Alternatively you can execute:
activate-global-python-argcomplete --dest=- > file
The file's contents should then be sourced in e.g. ~/.bashrc.
### Enabling Secure Endpoints on API Gateway
#### API Key
You can use the `api_key_required` setting to generate an API key to all the routes of your API Gateway. The process is as follows:
1. Deploy/redeploy (update won't work) and write down the *id* for the key that has been created
2. Go to AWS console > Amazon API Gateway and
* select "API Keys" and find the key *value* (for example `key_value`)
* select "Usage Plans", create a new usage plan and link the API Key and the API that Zappa has created for you
3. Send a request where you pass the key value as a header called `x-api-key` to access the restricted endpoints (for example with curl: `curl --header "x-api-key: key_value"`). Note that without the x-api-key header, you will receive a 403.
#### IAM Policy
You can enable IAM-based (v4 signing) authorization on an API by setting the `iam_authorization` setting to `true`. Your API will then require signed requests and access can be controlled via [IAM policy](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-iam-policy-examples.html). Unsigned requests will receive a 403 response, as will requesters who are not authorized to access the API. Enabling this will override the Authorizer configuration (see below).
#### API Gateway Lambda Authorizers
If you deploy an API endpoint with Zappa, you can take advantage of [API Gateway Lambda Authorizers](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-use-lambda-authorizer.html) to implement a token-based authentication - all you need to do is to provide a function to create the required output, Zappa takes care of the rest. A good start for the function is the [AWS Labs blueprint example](https://github.com/awslabs/aws-apigateway-lambda-authorizer-blueprints/blob/master/blueprints/python/api-gateway-authorizer-python.py).
If you are wondering for what you would use an Authorizer, here are some potential use cases:
1. Call out to OAuth provider
2. Decode a JWT token inline
3. Lookup in a self-managed DB (for example DynamoDB)
Zappa can be configured to call a function inside your code to do the authorization, or to call some other existing lambda function (which lets you share the authorizer between multiple lambdas). You control the behavior by specifying either the `arn` or `function_name` values in the `authorizer` settings block.
For example, to get the Cognito identity, add this to a `zappa_settings.yaml`:
```yaml
context_header_mappings:
user_id: authorizer.user_id
```
Which can now be accessed in Flask like this:
```python
from flask import request
@route('/hello')
def hello_world:
print(request.headers.get('user_id'))
```
#### Cognito User Pool Authorizer
You can also use AWS Cognito User Pool Authorizer by adding:
```javascript
{
"authorizer": {
"type": "COGNITO_USER_POOLS",
"provider_arns": [
"arn:aws:cognito-idp:{region}:{account_id}:userpool/{user_pool_id}"
]
}
}
```
#### API Gateway Resource Policy
You can also use API Gateway Resource Policies. Example of IP Whitelisting:
```javascript
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": "*",
"Action": "execute-api:Invoke",
"Resource": "execute-api:/*",
"Condition": {
"IpAddress": {
"aws:SourceIp": [
"1.2.3.4/32"
]
}
}
}
]
}
```
### Setting Environment Variables
#### Local Environment Variables
If you want to set local environment variables for a deployment stage, you can simply set them in your `zappa_settings.json`:
```javascript
{
"dev": {
...
"environment_variables": {
"your_key": "your_value"
}
},
...
}
```
You can then access these inside your application with:
```python
import os
your_value = os.environ.get('your_key')
```
If your project needs to be aware of the type of environment you're deployed to, you'll also be able to get `SERVERTYPE` (AWS Lambda), `FRAMEWORK` (Zappa), `PROJECT` (your project name) and `STAGE` (_dev_, _production_, etc.) variables at any time.
#### Remote AWS Environment Variables
If you want to use native AWS Lambda environment variables you can use the `aws_environment_variables` configuration setting. These are useful as you can easily change them via the AWS Lambda console or cli at runtime. They are also useful for storing sensitive credentials and to take advantage of KMS encryption of environment variables.
During development, you can add your Zappa defined variables to your locally running app by, for example, using the below (for Django, to manage.py).
```python
if 'SERVERTYPE' in os.environ and os.environ['SERVERTYPE'] == 'AWS Lambda':
import json
import os
json_data = open('zappa_settings.json')
env_vars = json.load(json_data)['dev']['environment_variables']
for key, val in env_vars.items():
os.environ[key] = val
```
#### Remote Environment Variables
Any environment variables that you have set outside of Zappa (via AWS Lambda console or cli) will remain as they are when running `update`, unless they are also in `aws_environment_variables`, in which case the remote value will be overwritten by the one in the settings file. If you are using KMS-encrypted AWS environment variables, you can set your KMS Key ARN in the `aws_kms_key_arn` setting. Make sure that the values you set are encrypted in such case.
_Note: if you rely on these as well as `environment_variables`, and you have the same key names, then those in `environment_variables` will take precedence as they are injected in the lambda handler._
#### Remote Environment Variables (via an S3 file)
_S3 remote environment variables were added to Zappa before AWS introduced native environment variables for Lambda (via the console and cli). Before going down this route check if above make more sense for your usecase._
If you want to use remote environment variables to configure your application (which is especially useful for things like sensitive credentials), you can create a file and place it in an S3 bucket to which your Zappa application has access. To do this, add the `remote_env` key to zappa_settings pointing to a file containing a flat JSON object, so that each key-value pair on the object will be set as an environment variable and value whenever a new lambda instance spins up.
For example, to ensure your application has access to the database credentials without storing them in your version control, you can add a file to S3 with the connection string and load it into the lambda environment using the `remote_env` configuration setting.
super-secret-config.json (uploaded to my-config-bucket):
```javascript
{
"DB_CONNECTION_STRING": "super-secret:database"
}
```
zappa_settings.json:
```javascript
{
"dev": {
...
"remote_env": "s3://my-config-bucket/super-secret-config.json",
},
...
}
```
Now in your application you can use:
```python
import os
db_string = os.environ.get('DB_CONNECTION_STRING')
```
### API Gateway Context Variables
If you want to map an API Gateway context variable (http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html) to an HTTP header you can set up the mapping in `zappa_settings.json`:
```javascript
{
"dev": {
...
"context_header_mappings": {
"HTTP_header_name": "API_Gateway_context_variable"
}
},
...
}
```
For example, if you want to expose the $context.identity.cognitoIdentityId variable as the HTTP header CognitoIdentityId, and $context.stage as APIStage, you would have:
```javascript
{
"dev": {
...
"context_header_mappings": {
"CognitoIdentityId": "identity.cognitoIdentityId",
"APIStage": "stage"
}
},
...
}
```
### Catching Unhandled Exceptions
By default, if an _unhandled_ exception happens in your code, Zappa will just print the stacktrace into a CloudWatch log. If you wish to use an external reporting tool to take note of those exceptions, you can use the `exception_handler` configuration option.
zappa_settings.json:
```javascript
{
"dev": {
...
"exception_handler": "your_module.unhandled_exceptions",
},
...
}
```
The function has to accept three arguments: exception, event, and context:
your_module.py
```python
def unhandled_exceptions(e, event, context):
send_to_raygun(e, event) # gather data you need and send
return True # Prevent invocation retry
```
You may still need a similar exception handler inside your application, this is just a way to catch exception which happen at the Zappa/WSGI layer (typically event-based invocations, misconfigured settings, bad Lambda packages, and permissions issues).
By default, AWS Lambda will attempt to retry an event based (non-API Gateway, e.g. CloudWatch) invocation if an exception has been thrown. However, you can prevent this by returning True, as in example above, so Zappa that will not re-raise the uncaught exception, thus preventing AWS Lambda from retrying the current invocation.
### Using Custom AWS IAM Roles and Policies
#### Custom AWS IAM Roles and Policies for Deployment
You can specify which _local_ profile to use for deploying your Zappa application by defining
the `profile_name` setting, which will correspond to a profile in your AWS credentials file.
#### Custom AWS IAM Roles and Policies for Execution
The default IAM policy created by Zappa for executing the Lambda is very permissive.
It grants access to all actions for
all resources for types CloudWatch, S3, Kinesis, SNS, SQS, DynamoDB, and Route53; lambda:InvokeFunction
for all Lambda resources; Put to all X-Ray resources; and all Network Interface operations to all EC2
resources. While this allows most Lambdas to work correctly with no extra permissions, it is
generally not an acceptable set of permissions for most continuous integration pipelines or
production deployments. Instead, you will probably want to manually manage your IAM policies.
To manually define the policy of your Lambda execution role, you must set *manage_roles* to false and define
either the *role_name* or *role_arn* in your Zappa settings file.
```javascript
{
"dev": {
...
"manage_roles": false, // Disable Zappa client managing roles.
"role_name": "MyLambdaRole", // Name of your Zappa execution role. Optional, default: <project_name>-<env>-ZappaExecutionRole.
"role_arn": "arn:aws:iam::12345:role/app-ZappaLambdaExecutionRole", // ARN of your Zappa execution role. Optional.
...
},
...
}
```
Ongoing discussion about the minimum policy requirements necessary for a Zappa deployment [can be found here](https://github.com/Miserlou/Zappa/issues/244).
A more robust solution to managing these entitlements will likely be implemented soon.
To add permissions to the default Zappa execution policy, use the `extra_permissions` setting:
```javascript
{
"dev": {
...
"extra_permissions": [{ // Attach any extra permissions to this policy.
"Effect": "Allow",
"Action": ["rekognition:*"], // AWS Service ARN
"Resource": "*"
}]
},
...
}
```
### AWS X-Ray
Zappa can enable [AWS X-Ray](https://aws.amazon.com/xray/) support on your function with a configuration setting:
```javascript
{
"dev": {
...
"xray_tracing": true
},
...
}
```
This will enable it on the Lambda function and allow you to instrument your code with X-Ray.
For example, with Flask:
```python
from aws_xray_sdk.core import xray_recorder
app = Flask(__name__)
xray_recorder.configure(service='my_app_name')
@route('/hello')
@xray_recorder.capture('hello')
def hello_world:
return 'Hello'
```
You may use the capture decorator to create subsegments around functions, or `xray_recorder.begin_subsegment('subsegment_name')` and `xray_recorder.end_subsegment()` within a function. The official [X-Ray documentation for Python](http://docs.aws.amazon.com/xray-sdk-for-python/latest/reference/) has more information on how to use this with your code.
Note that you may create subsegments in your code but an exception will be raised if you try to create a segment, as it is [created by the lambda worker](https://github.com/aws/aws-xray-sdk-python/issues/2). This also means that if you use Flask you must not use the [XRayMiddleware the documentation suggests](https://docs.aws.amazon.com/xray/latest/devguide/xray-sdk-python-middleware.html).
### Globally Available Server-less Architectures
<p align="center">
<a href="https://htmlpreview.github.io/?https://github.com/Miserlou/Talks/blob/master/serverless-london/global.html#0"><img src="http://i.imgur.com/oR61Qau.png" alt="Global Zappa Slides"/></a>
</p>
<p align="center">
<i>Click to see <a href="https://htmlpreview.github.io/?https://github.com/Miserlou/Talks/blob/master/serverless-london/global.html#0">slides from ServerlessConf London</a>!</i>
</p>
During the `init` process, you will be given the option to deploy your application "globally." This will allow you to deploy your application to all available AWS regions simultaneously in order to provide a consistent global speed, increased redundancy, data isolation, and legal compliance. You can also choose to deploy only to "primary" locations, the AWS regions with `-1` in their names.
To learn more about these capabilities, see [these slides](https://htmlpreview.github.io/?https://github.com/Miserlou/Talks/blob/master/serverless-london/global.html#0) from ServerlessConf London.
### Raising AWS Service Limits
Out of the box, AWS sets a limit of [1000 concurrent executions](http://docs.aws.amazon.com/lambda/latest/dg/limits.html) for your functions. If you start to breach these limits, you may start to see errors like `ClientError: An error occurred (LimitExceededException) when calling the PutTargets.."` or something similar.
To avoid this, you can file a [service ticket](https://console.aws.amazon.com/support/home#/) with Amazon to raise your limits up to the many tens of thousands of concurrent executions which you may need. This is a fairly common practice with Amazon, designed to prevent you from accidentally creating extremely expensive bug reports. So, before raising your service limits, make sure that you don't have any rogue scripts which could accidentally create tens of thousands of parallel executions that you don't want to pay for.
### Dead Letter Queues
If you want to utilise [AWS Lambda's Dead Letter Queue feature](http://docs.aws.amazon.com/lambda/latest/dg/dlq.html) simply add the key `dead_letter_arn`, with the value being the complete ARN to the corresponding SNS topic or SQS queue in your `zappa_settings.json`.
You must have already created the corresponding SNS/SQS topic/queue, and the Lambda function execution role must have been provisioned with read/publish/sendMessage access to the DLQ resource.
### Unique Package ID
For monitoring of different deployments, a unique UUID for each package is available in `package_info.json` in the root directory of your application's package. You can use this information or a hash of this file for such things as tracking errors across different deployments, monitoring status of deployments and other such things on services such as Sentry and New Relic. The package will contain:
```json
{
"build_platform": "darwin",
"build_user": "frank",
"build_time": "1509732511",
"uuid": "9c2df9e6-30f4-4c0a-ac4d-4ecb51831a74"
}
```
### Application Load Balancer Event Source
Zappa can be used to handle events triggered by Application Load Balancers (ALB). This can be useful in a few circumstances:
- Since API Gateway has a hard limit of 30 seconds before timing out, you can use an ALB for longer running requests.
- API Gateway is billed per-request; therefore, costs can become excessive with high throughput services. ALBs pricing model makes much more sense financially if you're expecting a lot of traffic to your Lambda.
- ALBs can be placed within a VPC, which may make more sense for private endpoints than using API Gateway's private model (using AWS PrivateLink).
Like API Gateway, Zappa can automatically provision ALB resources for you. You'll need to add the following to your `zappa_settings`:
```
"alb_enabled": true,
"alb_vpc_config": {
"CertificateArn": "arn:aws:acm:us-east-1:[your-account-id]:certificate/[certificate-id]",
"SubnetIds": [
// Here, you'll want to provide a list of subnets for your ALB, eg. 'subnet-02a58266'
],
"SecurityGroupIds": [
// And here, a list of security group IDs, eg. 'sg-fbacb791'
]
}
```
More information on using ALB as an event source for Lambda can be found [here](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html).
*An important note*: right now, Zappa will provision ONE lambda to ONE load balancer, which means using `base_path` along with ALB configuration is currently unsupported.
### Endpoint Configuration
API Gateway can be configured to be only accessible in a VPC. To enable this; [configure your VPC to support](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-private-apis.html) then set the `endpoint_configuration` to `PRIVATE` and set up Resource Policy on the API Gateway. A note about this; if you're using a private endpoint, Zappa won't be able to tell if the API is returning a successful status code upon deploy or update, so you'll have to check it manually to ensure your setup is working properly.
For full list of options for endpoint configuration refer to [API Gateway EndpointConfiguration documentation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-apigateway-restapi-endpointconfiguration.html)
#### Example Private API Gateway configuration
zappa_settings.json:
```json
{
"dev": {
...
"endpoint_configuration": ["PRIVATE"],
"apigateway_policy": "apigateway_resource_policy.json",
...
},
...
}
```
apigateway_resource_policy.json:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Deny",
"Principal": "*",
"Action": "execute-api:Invoke",
"Resource": "execute-api:/*",
"Condition": {
"StringNotEquals": {
"aws:sourceVpc": "{{vpcID}}" // UPDATE ME
}
}
},
{
"Effect": "Allow",
"Principal": "*",
"Action": "execute-api:Invoke",
"Resource": "execute-api:/*"
}
]
}
```
### Cold Starts (Experimental)
Lambda may provide additional resources than provisioned during cold start initialization. Set `INSTANTIATE_LAMBDA_HANDLER_ON_IMPORT=True` to instantiate the lambda handler on import. This is an experimental feature - if startup time is critical, look into using Provisioned Concurrency.
## Zappa Guides
* [Django-Zappa tutorial (screencast)](https://www.youtube.com/watch?v=plUrbPN0xc8&feature=youtu.be).
* [Using Django-Zappa, Part 1](https://serverlesscode.com/post/zappa-wsgi-for-python/).
* [Using Django-Zappa, Part 2: VPCs](https://serverlesscode.com/post/zappa-wsgi-for-python-pt-2/).
* [Building Serverless Microservices with Zappa and Flask](https://gun.io/blog/serverless-microservices-with-zappa-and-flask/)
* [Zappa で Hello World するまで (Japanese)](http://qiita.com/satoshi_iwashita/items/505492193317819772c7)
* [How to Deploy Zappa with CloudFront, RDS and VPC](https://jinwright.net/how-deploy-serverless-wsgi-app-using-zappa/)
* [Secure 'Serverless' File Uploads with AWS Lambda, S3, and Zappa](http://blog.stratospark.com/secure-serverless-file-uploads-with-aws-lambda-s3-zappa.html)
* [Deploy a Serverless WSGI App using Zappa, CloudFront, RDS, and VPC](https://docs.google.com/presentation/d/1aYeOMgQl4V_fFgT5VNoycdXtob1v6xVUWlyxoTEiTw0/edit#slide=id.p)
* [AWS: Deploy Alexa Ask Skills with Flask-Ask and Zappa](https://developer.amazon.com/blogs/post/8e8ad73a-99e9-4c0f-a7b3-60f92287b0bf/New-Alexa-Tutorial-Deploy-Flask-Ask-Skills-to-AWS-Lambda-with-Zappa)
* [Guide to using Django with Zappa](https://edgarroman.github.io/zappa-django-guide/)
* [Zappa and LambCI](https://seancoates.com/blogs/zappa-and-lambci/)
* [Building A Serverless Image Processing SaaS using Zappa](https://medium.com/99serverless/building-a-serverless-image-processing-saas-9ef68b594076)
* [Serverless Slack Slash Commands with Python and Zappa](https://renzo.lucioni.xyz/serverless-slash-commands-with-python/)
* [Bringing Tokusatsu to AWS using Python, Flask, Zappa and Contentful](https://www.contentful.com/blog/2018/03/07/bringing-tokusatsu-to-aws-using-python-flask-zappa-and-contentful/)
* [AWS Summit 2018 Seoul - Zappa와 함께하는 Serverless Microservice](https://www.slideshare.net/YunSeopSong/zappa-serverless-microservice-94410308/)
* [Book - Building Serverless Python Web Services with Zappa](https://github.com/PacktPublishing/Building-Serverless-Python-Web-Services-with-Zappa)
* [Vider sa flask dans une lambda](http://free_zed.gitlab.io/articles/2019/11/vider-sa-flask-dans-une-lambda/)[French]
* _Your guide here?_
## Zappa in the Press
* _[Zappa Serves Python, Minus the Servers](http://www.infoworld.com/article/3031665/application-development/zappa-serves-python-web-apps-minus-the-servers.html)_
* _[Zappa lyfter serverlösa applikationer med Python](http://computersweden.idg.se/2.2683/1.649895/zappa-lyfter-python)_
* _[Interview: Rich Jones on Zappa](https://serverlesscode.com/post/rich-jones-interview-django-zappa/)_
* [Top 10 Python Libraries of 2016](https://tryolabs.com/blog/2016/12/20/top-10-python-libraries-of-2016/)
## Sites Using Zappa
* [Mailchimp Signup Utility](https://github.com/sasha42/Mailchimp-utility) - A microservice for adding people to a mailing list via API.
* [Zappa Slack Inviter](https://github.com/Miserlou/zappa-slack-inviter) - A tiny, server-less service for inviting new users to your Slack channel.
* [Serverless Image Host](https://github.com/Miserlou/serverless-imagehost) - A thumbnailing service with Flask, Zappa and Pillow.
* [Zappa BitTorrent Tracker](https://github.com/Miserlou/zappa-bittorrent-tracker) - An experimental server-less BitTorrent tracker. Work in progress.
* [JankyGlance](https://github.com/Miserlou/JankyGlance) - A server-less Yahoo! Pipes replacement.
* [LambdaMailer](https://github.com/tryolabs/lambda-mailer) - A server-less endpoint for processing a contact form.
* [Voter Registration Microservice](https://topics.arlingtonva.us/2016/11/voter-registration-search-microservice/) - Official backup to to the Virginia Department of Elections portal.
* [FreePoll Online](https://www.freepoll.online) - A simple and awesome say for groups to make decisions.
* [PasteOfCode](https://paste.ofcode.org/) - A Zappa-powered paste bin.
* And many more, including banks, governments, startups, enterprises and schools!
Are you using Zappa? Let us know and we'll list your site here!
## Related Projects
* [Mackenzie](http://github.com/Miserlou/Mackenzie) - AWS Lambda Infection Toolkit
* [NoDB](https://github.com/Miserlou/NoDB) - A simple, server-less, Pythonic object store based on S3.
* [zappa-cms](http://github.com/Miserlou/zappa-cms) - A tiny server-less CMS for busy hackers. Work in progress.
* [zappa-django-utils](https://github.com/Miserlou/zappa-django-utils) - Utility commands to help Django deployments.
* [flask-ask](https://github.com/johnwheeler/flask-ask) - A framework for building Amazon Alexa applications. Uses Zappa for deployments.
* [zappa-file-widget](https://github.com/anush0247/zappa-file-widget) - A Django plugin for supporting binary file uploads in Django on Zappa.
* [zops](https://github.com/bjinwright/zops) - Utilities for teams and continuous integrations using Zappa.
* [cookiecutter-mobile-backend](https://github.com/narfman0/cookiecutter-mobile-backend/) - A `cookiecutter` Django project with Zappa and S3 uploads support.
* [zappa-examples](https://github.com/narfman0/zappa-examples/) - Flask, Django, image uploads, and more!
* [zappa-hug-example](https://github.com/mcrowson/zappa-hug-example) - Example of a Hug application using Zappa.
* [Zappa Docker Image](https://github.com/danielwhatmuff/zappa) - A Docker image for running Zappa locally, based on Lambda Docker.
* [zappa-dashing](https://github.com/nikos/zappa-dashing) - Monitor your AWS environment (health/metrics) with Zappa and CloudWatch.
* [s3env](https://github.com/cameronmaske/s3env) - Manipulate a remote Zappa environment variable key/value JSON object file in an S3 bucket through the CLI.
* [zappa_resize_image_on_fly](https://github.com/wobeng/zappa_resize_image_on_fly) - Resize images on the fly using Flask, Zappa, Pillow, and OpenCV-python.
* [zappa-ffmpeg](https://github.com/ubergarm/zappa-ffmpeg) - Run ffmpeg inside a lambda for serverless transformations.
* [gdrive-lambda](https://github.com/richiverse/gdrive-lambda) - pass json data to a csv file for end users who use Gdrive across the organization.
* [travis-build-repeat](https://github.com/bcongdon/travis-build-repeat) - Repeat TravisCI builds to avoid stale test results.
* [wunderskill-alexa-skill](https://github.com/mcrowson/wunderlist-alexa-skill) - An Alexa skill for adding to a Wunderlist.
* [xrayvision](https://github.com/mathom/xrayvision) - Utilities and wrappers for using AWS X-Ray with Zappa.
* [terraform-aws-zappa](https://github.com/dpetzold/terraform-aws-zappa) - Terraform modules for creating a VPC, RDS instance, ElastiCache Redis and CloudFront Distribution for use with Zappa.
* [zappa-sentry](https://github.com/jneves/zappa-sentry) - Integration with Zappa and Sentry
* [IOpipe](https://github.com/iopipe/iopipe-python#zappa) - Monitor, profile and analyze your Zappa apps.
## Hacks
Zappa goes quite far beyond what Lambda and API Gateway were ever intended to handle. As a result, there are quite a few hacks in here that allow it to work. Some of those include, but aren't limited to..
* Using VTL to map body, headers, method, params and query strings into JSON, and then turning that into valid WSGI.
* Attaching response codes to response bodies, Base64 encoding the whole thing, using that as a regex to route the response code, decoding the body in VTL, and mapping the response body to that.
* Packing and _Base58_ encoding multiple cookies into a single cookie because we can only map one kind.
* Forcing the case permutations of "Set-Cookie" in order to return multiple headers at the same time.
* Turning cookie-setting 301/302 responses into 200 responses with HTML redirects, because we have no way to set headers on redirects.
## Contributing
Contributions are very welcome!
Please file tickets for discussion before submitting patches. Pull requests should target `master` and should leave Zappa in a "shippable" state if merged.
If you are adding a non-trivial amount of new code, please include a functioning test in your PR. For AWS calls, we use the `placebo` library, which you can learn to use [in their README](https://github.com/garnaat/placebo#usage-as-a-decorator). The test suite will be run by [Travis CI](https://travis-ci.org/zappa/Zappa) once you open a pull request.
Please include the GitHub issue or pull request URL that has discussion related to your changes as a comment in the code ([example](https://github.com/zappa/Zappa/blob/fae2925431b820eaedf088a632022e4120a29f89/zappa/zappa.py#L241-L243)). This greatly helps for project maintainability, as it allows us to trace back use cases and explain decision making. Similarly, please make sure that you meet all of the requirements listed in the [pull request template](https://raw.githubusercontent.com/zappa/Zappa/master/.github/PULL_REQUEST_TEMPLATE.md).
Please feel free to work on any open ticket, especially any ticket marked with the "help-wanted" label. If you get stuck or want to discuss an issue further, please join [our Slack channel](https://zappateam.slack.com/), where you'll find a community of smart and interesting people working dilligently on hard problems.
[Zappa Slack Auto Invite](https://slackautoinviter.herokuapp.com)
Zappa does not intend to conform to PEP8, isolate your commits so that changes to functionality with changes made by your linter.
#### Using a Local Repo
To use the git HEAD, you *probably can't* use `pip install -e `. Instead, you should clone the repo to your machine and then `pip install /path/to/zappa/repo` or `ln -s /path/to/zappa/repo/zappa zappa` in your local project.
| zappa | /zappa-0.57.0.tar.gz/zappa-0.57.0/README.md | README.md |
import atexit
import base64
import binascii
import copy
import hashlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import time
from urllib.request import urlopen
import requests
# Staging
# Amazon doesn't accept these though.
# DEFAULT_CA = "https://acme-staging.api.letsencrypt.org"
# Production
DEFAULT_CA = "https://acme-v02.api.letsencrypt.org"
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.StreamHandler())
def get_cert_and_update_domain(
zappa_instance,
lambda_name,
api_stage,
domain=None,
manual=False,
):
"""
Main cert installer path.
"""
try:
create_domain_key()
create_domain_csr(domain)
get_cert(zappa_instance)
create_chained_certificate()
with open("{}/signed.crt".format(gettempdir())) as f:
certificate_body = f.read()
with open("{}/domain.key".format(gettempdir())) as f:
certificate_private_key = f.read()
with open("{}/intermediate.pem".format(gettempdir())) as f:
certificate_chain = f.read()
if not manual:
if domain:
if not zappa_instance.get_domain_name(domain):
zappa_instance.create_domain_name(
domain_name=domain,
certificate_name=domain + "-Zappa-LE-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=None,
lambda_name=lambda_name,
stage=api_stage,
)
print(
"Created a new domain name. Please note that it can take up to 40 minutes for this domain to be created and propagated through AWS, but it requires no further work on your part."
)
else:
zappa_instance.update_domain_name(
domain_name=domain,
certificate_name=domain + "-Zappa-LE-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=None,
lambda_name=lambda_name,
stage=api_stage,
)
else:
print("Cerificate body:\n")
print(certificate_body)
print("\nCerificate private key:\n")
print(certificate_private_key)
print("\nCerificate chain:\n")
print(certificate_chain)
except Exception as e:
print(e)
return False
return True
def create_domain_key():
devnull = open(os.devnull, "wb")
out = subprocess.check_output(["openssl", "genrsa", "2048"], stderr=devnull)
with open(os.path.join(gettempdir(), "domain.key"), "wb") as f:
f.write(out)
def create_domain_csr(domain):
subj = "/CN=" + domain
cmd = [
"openssl",
"req",
"-new",
"-sha256",
"-key",
os.path.join(gettempdir(), "domain.key"),
"-subj",
subj,
]
devnull = open(os.devnull, "wb")
out = subprocess.check_output(cmd, stderr=devnull)
with open(os.path.join(gettempdir(), "domain.csr"), "wb") as f:
f.write(out)
def create_chained_certificate():
signed_crt = open(os.path.join(gettempdir(), "signed.crt"), "rb").read()
cross_cert_url = "https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem"
cert = requests.get(cross_cert_url)
with open(os.path.join(gettempdir(), "intermediate.pem"), "wb") as intermediate_pem:
intermediate_pem.write(cert.content)
with open(os.path.join(gettempdir(), "chained.pem"), "wb") as chained_pem:
chained_pem.write(signed_crt)
chained_pem.write(cert.content)
def parse_account_key():
"""Parse account key to get public key"""
LOGGER.info("Parsing account key...")
cmd = [
"openssl",
"rsa",
"-in",
os.path.join(gettempdir(), "account.key"),
"-noout",
"-text",
]
devnull = open(os.devnull, "wb")
return subprocess.check_output(cmd, stderr=devnull)
def parse_csr():
"""
Parse certificate signing request for domains
"""
LOGGER.info("Parsing CSR...")
cmd = [
"openssl",
"req",
"-in",
os.path.join(gettempdir(), "domain.csr"),
"-noout",
"-text",
]
devnull = open(os.devnull, "wb")
out = subprocess.check_output(cmd, stderr=devnull)
domains = set([])
common_name = re.search(r"Subject:.*? CN\s?=\s?([^\s,;/]+)", out.decode("utf8"))
if common_name is not None:
domains.add(common_name.group(1))
subject_alt_names = re.search(
r"X509v3 Subject Alternative Name: \n +([^\n]+)\n",
out.decode("utf8"),
re.MULTILINE | re.DOTALL,
)
if subject_alt_names is not None:
for san in subject_alt_names.group(1).split(", "):
if san.startswith("DNS:"):
domains.add(san[4:])
return domains
def get_boulder_header(key_bytes):
"""
Use regular expressions to find crypto values from parsed account key,
and return a header we can send to our Boulder instance.
"""
pub_hex, pub_exp = re.search(
r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)",
key_bytes.decode("utf8"),
re.MULTILINE | re.DOTALL,
).groups()
pub_exp = "{0:x}".format(int(pub_exp))
pub_exp = "0{0}".format(pub_exp) if len(pub_exp) % 2 else pub_exp
header = {
"alg": "RS256",
"jwk": {
"e": _b64(binascii.unhexlify(pub_exp.encode("utf-8"))),
"kty": "RSA",
"n": _b64(
binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex).encode("utf-8"))
),
},
}
return header
def register_account():
"""
Agree to LE TOS
"""
LOGGER.info("Registering account...")
code, result = _send_signed_request(
DEFAULT_CA + "/acme/new-reg",
{
"resource": "new-reg",
"agreement": "https://letsencrypt.org/documents/LE-SA-v1.2-November-15-2017.pdf",
},
)
if code == 201: # pragma: no cover
LOGGER.info("Registered!")
elif code == 409: # pragma: no cover
LOGGER.info("Already registered!")
else: # pragma: no cover
raise ValueError("Error registering: {0} {1}".format(code, result))
def get_cert(zappa_instance, log=LOGGER, CA=DEFAULT_CA):
"""
Call LE to get a new signed CA.
"""
out = parse_account_key()
header = get_boulder_header(out)
accountkey_json = json.dumps(header["jwk"], sort_keys=True, separators=(",", ":"))
thumbprint = _b64(hashlib.sha256(accountkey_json.encode("utf8")).digest())
# find domains
domains = parse_csr()
# get the certificate domains and expiration
register_account()
# verify each domain
for domain in domains:
log.info("Verifying {0}...".format(domain))
# get new challenge
code, result = _send_signed_request(
CA + "/acme/new-authz",
{
"resource": "new-authz",
"identifier": {"type": "dns", "value": domain},
},
)
if code != 201:
raise ValueError(
"Error requesting challenges: {0} {1}".format(code, result)
)
challenge = [
ch
for ch in json.loads(result.decode("utf8"))["challenges"]
if ch["type"] == "dns-01"
][0]
token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge["token"])
keyauthorization = "{0}.{1}".format(token, thumbprint).encode("utf-8")
# sha256_b64
digest = _b64(hashlib.sha256(keyauthorization).digest())
zone_id = zappa_instance.get_hosted_zone_id_for_domain(domain)
if not zone_id:
raise ValueError("Could not find Zone ID for: " + domain)
zappa_instance.set_dns_challenge_txt(zone_id, domain, digest) # resp is unused
print("Waiting for DNS to propagate..")
# What's optimal here?
# import time # double import; import in loop; shadowed import
time.sleep(45)
# notify challenge are met
code, result = _send_signed_request(
challenge["uri"],
{
"resource": "challenge",
"keyAuthorization": keyauthorization.decode("utf-8"),
},
)
if code != 202:
raise ValueError("Error triggering challenge: {0} {1}".format(code, result))
# wait for challenge to be verified
verify_challenge(challenge["uri"])
# Challenge verified, clean up R53
zappa_instance.remove_dns_challenge_txt(zone_id, domain, digest)
# Sign
result = sign_certificate()
# Encode to PEM format
encode_certificate(result)
return True
def verify_challenge(uri):
"""
Loop until our challenge is verified, else fail.
"""
while True:
try:
resp = urlopen(uri)
challenge_status = json.loads(resp.read().decode("utf8"))
except IOError as e:
raise ValueError(
"Error checking challenge: {0} {1}".format(
e.code, json.loads(e.read().decode("utf8"))
)
)
if challenge_status["status"] == "pending":
time.sleep(2)
elif challenge_status["status"] == "valid":
LOGGER.info("Domain verified!")
break
else:
raise ValueError(
"Domain challenge did not pass: {0}".format(challenge_status)
)
def sign_certificate():
"""
Get the new certificate.
Returns the signed bytes.
"""
LOGGER.info("Signing certificate...")
cmd = [
"openssl",
"req",
"-in",
os.path.join(gettempdir(), "domain.csr"),
"-outform",
"DER",
]
devnull = open(os.devnull, "wb")
csr_der = subprocess.check_output(cmd, stderr=devnull)
code, result = _send_signed_request(
DEFAULT_CA + "/acme/new-cert",
{
"resource": "new-cert",
"csr": _b64(csr_der),
},
)
if code != 201:
raise ValueError("Error signing certificate: {0} {1}".format(code, result))
LOGGER.info("Certificate signed!")
return result
def encode_certificate(result):
"""
Encode cert bytes to PEM encoded cert file.
"""
cert_body = (
"""-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""".format(
"\n".join(textwrap.wrap(base64.b64encode(result).decode("utf8"), 64))
)
)
signed_crt = open("{}/signed.crt".format(gettempdir()), "w")
signed_crt.write(cert_body)
signed_crt.close()
return True
##
# Request Utility
##
def _b64(b):
"""
Helper function base64 encode for jose spec
"""
return base64.urlsafe_b64encode(b).decode("utf8").replace("=", "")
def _send_signed_request(url, payload):
"""
Helper function to make signed requests to Boulder
"""
payload64 = _b64(json.dumps(payload).encode("utf8"))
out = parse_account_key()
header = get_boulder_header(out)
protected = copy.deepcopy(header)
protected["nonce"] = urlopen(DEFAULT_CA + "/directory").headers["Replay-Nonce"]
protected64 = _b64(json.dumps(protected).encode("utf8"))
cmd = [
"openssl",
"dgst",
"-sha256",
"-sign",
os.path.join(gettempdir(), "account.key"),
]
proc = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = proc.communicate("{0}.{1}".format(protected64, payload64).encode("utf8"))
if proc.returncode != 0: # pragma: no cover
raise IOError("OpenSSL Error: {0}".format(err))
data = json.dumps(
{
"header": header,
"protected": protected64,
"payload": payload64,
"signature": _b64(out),
}
)
try:
resp = urlopen(url, data.encode("utf8"))
return resp.getcode(), resp.read()
except IOError as e:
return getattr(e, "code", None), getattr(e, "read", e.__str__)()
##
# Temporary Directory Utility
##
__tempdir = None
def gettempdir():
"""
Lazily creates a temporary directory in a secure manner. When Python exits,
or the cleanup() function is called, the directory is erased.
"""
global __tempdir
if __tempdir is not None:
return __tempdir
__tempdir = tempfile.mkdtemp()
return __tempdir
@atexit.register
def cleanup():
"""
Delete any temporary files.
"""
global __tempdir
if __tempdir is not None:
shutil.rmtree(__tempdir)
__tempdir = None | zappa2 | /zappa2-0.53.4-py3-none-any.whl/zappa/letsencrypt.py | letsencrypt.py |
from werkzeug.wsgi import ClosingIterator
def all_casings(input_string):
"""
Permute all casings of a given string.
A pretty algorithm, via @Amber
http://stackoverflow.com/questions/6792803/finding-all-possible-case-permutations-in-python
"""
if not input_string:
yield ""
else:
first = input_string[:1]
if first.lower() == first.upper():
for sub_casing in all_casings(input_string[1:]):
yield first + sub_casing
else:
for sub_casing in all_casings(input_string[1:]):
yield first.lower() + sub_casing
yield first.upper() + sub_casing
class ZappaWSGIMiddleware:
"""
Middleware functions necessary for a Zappa deployment.
Most hacks have now been remove except for Set-Cookie permutation.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
"""
We must case-mangle the Set-Cookie header name or AWS will use only a
single one of these headers.
"""
def encode_response(status, headers, exc_info=None):
"""
This makes the 'set-cookie' headers name lowercase,
all the non-cookie headers should be sent unharmed.
Related: https://github.com/Miserlou/Zappa/issues/1965
"""
new_headers = [
header
for header in headers
if ((type(header[0]) != str) or (header[0].lower() != "set-cookie"))
]
cookie_headers = [
(header[0].lower(), header[1])
for header in headers
if ((type(header[0]) == str) and (header[0].lower() == "set-cookie"))
]
new_headers = new_headers + cookie_headers
return start_response(status, new_headers, exc_info)
# Call the application with our modifier
response = self.application(environ, encode_response)
# Return the response as a WSGI-safe iterator
return ClosingIterator(response) | zappa2 | /zappa2-0.53.4-py3-none-any.whl/zappa/middleware.py | middleware.py |
import base64
import collections
import datetime
import importlib
import inspect
import json
import logging
import os
import sys
import tarfile
import traceback
from builtins import str
import boto3
from werkzeug.wrappers import Response
# This file may be copied into a project's root,
# so handle both scenarios.
try:
from zappa.middleware import ZappaWSGIMiddleware
from zappa.utilities import merge_headers, parse_s3_url
from zappa.wsgi import common_log, create_wsgi_request
except ImportError as e: # pragma: no cover
from .middleware import ZappaWSGIMiddleware
from .utilities import merge_headers, parse_s3_url
from .wsgi import common_log, create_wsgi_request
# Set up logging
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class LambdaHandler:
"""
Singleton for avoiding duplicate setup.
Pattern provided by @benbangert.
"""
__instance = None
settings = None
settings_name = None
session = None
# Application
app_module = None
wsgi_app = None
trailing_slash = False
def __new__(cls, settings_name="zappa_settings", session=None):
"""Singleton instance to avoid repeat setup"""
if LambdaHandler.__instance is None:
print("Instancing..")
LambdaHandler.__instance = object.__new__(cls)
return LambdaHandler.__instance
def __init__(self, settings_name="zappa_settings", session=None):
# We haven't cached our settings yet, load the settings and app.
if not self.settings:
# Loading settings from a python module
self.settings = importlib.import_module(settings_name)
self.settings_name = settings_name
self.session = session
# Custom log level
if self.settings.LOG_LEVEL:
level = logging.getLevelName(self.settings.LOG_LEVEL)
logger.setLevel(level)
remote_env = getattr(self.settings, "REMOTE_ENV", None)
remote_bucket, remote_file = parse_s3_url(remote_env)
if remote_bucket and remote_file:
self.load_remote_settings(remote_bucket, remote_file)
# Let the system know that this will be a Lambda/Zappa/Stack
os.environ["SERVERTYPE"] = "AWS Lambda"
os.environ["FRAMEWORK"] = "Zappa"
try:
os.environ["PROJECT"] = self.settings.PROJECT_NAME
os.environ["STAGE"] = self.settings.API_STAGE
except Exception: # pragma: no cover
pass
# Set any locally defined env vars
# Environment variable keys can't be Unicode
# https://github.com/Miserlou/Zappa/issues/604
for key in self.settings.ENVIRONMENT_VARIABLES.keys():
os.environ[str(key)] = self.settings.ENVIRONMENT_VARIABLES[key]
# Pulling from S3 if given a zip path
project_archive_path = getattr(self.settings, "ARCHIVE_PATH", None)
if project_archive_path:
self.load_remote_project_archive(project_archive_path)
# Load compiled library to the PythonPath
# checks if we are the slim_handler since this is not needed otherwise
# https://github.com/Miserlou/Zappa/issues/776
is_slim_handler = getattr(self.settings, "SLIM_HANDLER", False)
if is_slim_handler:
included_libraries = getattr(
self.settings, "INCLUDE", ["libmysqlclient.so.18"]
)
try:
from ctypes import cdll, util
for library in included_libraries:
try:
cdll.LoadLibrary(os.path.join(os.getcwd(), library))
except OSError:
print(
"Failed to find library: {}...right filename?".format(
library
)
)
except ImportError:
print("Failed to import cytpes library")
# This is a non-WSGI application
# https://github.com/Miserlou/Zappa/pull/748
if (
not hasattr(self.settings, "APP_MODULE")
and not self.settings.DJANGO_SETTINGS
):
self.app_module = None
wsgi_app_function = None
# This is probably a normal WSGI app (Or django with overloaded wsgi application)
# https://github.com/Miserlou/Zappa/issues/1164
elif hasattr(self.settings, "APP_MODULE"):
if self.settings.DJANGO_SETTINGS:
sys.path.append("/var/task")
from django.conf import (
ENVIRONMENT_VARIABLE as SETTINGS_ENVIRONMENT_VARIABLE,
)
# add the Lambda root path into the sys.path
self.trailing_slash = True
os.environ[
SETTINGS_ENVIRONMENT_VARIABLE
] = self.settings.DJANGO_SETTINGS
else:
self.trailing_slash = False
# The app module
self.app_module = importlib.import_module(self.settings.APP_MODULE)
# The application
wsgi_app_function = getattr(self.app_module, self.settings.APP_FUNCTION)
# Django gets special treatment.
else:
try: # Support both for tests
from zappa.ext.django_zappa import get_django_wsgi
except ImportError: # pragma: no cover
from django_zappa_app import get_django_wsgi
# Get the Django WSGI app from our extension
wsgi_app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS)
self.trailing_slash = True
self.wsgi_app = ZappaWSGIMiddleware(wsgi_app_function)
def load_remote_project_archive(self, project_zip_path):
"""
Puts the project files from S3 in /tmp and adds to path
"""
project_folder = "/tmp/{0!s}".format(self.settings.PROJECT_NAME)
if not os.path.isdir(project_folder):
# The project folder doesn't exist in this cold lambda, get it from S3
if not self.session:
boto_session = boto3.Session()
else:
boto_session = self.session
# Download zip file from S3
remote_bucket, remote_file = parse_s3_url(project_zip_path)
s3 = boto_session.resource("s3")
archive_on_s3 = s3.Object(remote_bucket, remote_file).get()
with tarfile.open(fileobj=archive_on_s3["Body"], mode="r|gz") as t:
t.extractall(project_folder)
# Add to project path
sys.path.insert(0, project_folder)
# Change working directory to project folder
# Related: https://github.com/Miserlou/Zappa/issues/702
os.chdir(project_folder)
return True
def load_remote_settings(self, remote_bucket, remote_file):
"""
Attempt to read a file from s3 containing a flat json object. Adds each
key->value pair as environment variables. Helpful for keeping
sensitiZve or stage-specific configuration variables in s3 instead of
version control.
"""
if not self.session:
boto_session = boto3.Session()
else:
boto_session = self.session
s3 = boto_session.resource("s3")
try:
remote_env_object = s3.Object(remote_bucket, remote_file).get()
except Exception as e: # pragma: no cover
# catch everything aws might decide to raise
print("Could not load remote settings file.", e)
return
try:
content = remote_env_object["Body"].read()
except Exception as e: # pragma: no cover
# catch everything aws might decide to raise
print("Exception while reading remote settings file.", e)
return
try:
settings_dict = json.loads(content)
except (ValueError, TypeError): # pragma: no cover
print("Failed to parse remote settings!")
return
# add each key-value to environment - overwrites existing keys!
for key, value in settings_dict.items():
if self.settings.LOG_LEVEL == "DEBUG":
print("Adding {} -> {} to environment".format(key, value))
# Environment variable keys can't be Unicode
# https://github.com/Miserlou/Zappa/issues/604
try:
os.environ[str(key)] = value
except Exception:
if self.settings.LOG_LEVEL == "DEBUG":
print("Environment variable keys must be non-unicode!")
@staticmethod
def import_module_and_get_function(whole_function):
"""
Given a modular path to a function, import that module
and return the function.
"""
module, function = whole_function.rsplit(".", 1)
app_module = importlib.import_module(module)
app_function = getattr(app_module, function)
return app_function
@classmethod
def lambda_handler(cls, event, context): # pragma: no cover
handler = cls()
exception_handler = handler.settings.EXCEPTION_HANDLER
try:
return handler.handler(event, context)
except Exception as ex:
exception_processed = cls._process_exception(
exception_handler=exception_handler,
event=event,
context=context,
exception=ex,
)
if not exception_processed:
# Only re-raise exception if handler directed so. Allows handler to control if lambda has to retry
# an event execution in case of failure.
raise
@classmethod
def _process_exception(cls, exception_handler, event, context, exception):
exception_processed = False
if exception_handler:
try:
handler_function = cls.import_module_and_get_function(exception_handler)
exception_processed = handler_function(exception, event, context)
except Exception as cex:
logger.error(msg="Failed to process exception via custom handler.")
print(cex)
return exception_processed
@staticmethod
def run_function(app_function, event, context):
"""
Given a function and event context,
detect signature and execute, returning any result.
"""
# getargspec does not support python 3 method with type hints
# Related issue: https://github.com/Miserlou/Zappa/issues/1452
if hasattr(inspect, "getfullargspec"): # Python 3
args, varargs, keywords, defaults, _, _, _ = inspect.getfullargspec(
app_function
)
else: # Python 2
args, varargs, keywords, defaults = inspect.getargspec(app_function)
num_args = len(args)
if num_args == 0:
result = app_function(event, context) if varargs else app_function()
elif num_args == 1:
result = app_function(event, context) if varargs else app_function(event)
elif num_args == 2:
result = app_function(event, context)
else:
raise RuntimeError(
"Function signature is invalid. Expected a function that accepts at most "
"2 arguments or varargs."
)
return result
def get_function_for_aws_event(self, record):
"""
Get the associated function to execute for a triggered AWS event
Support S3, SNS, DynamoDB, kinesis and SQS events
"""
if "s3" in record:
if ":" in record["s3"]["configurationId"]:
return record["s3"]["configurationId"].split(":")[-1]
arn = None
if "Sns" in record:
try:
message = json.loads(record["Sns"]["Message"])
if message.get("command"):
return message["command"]
except ValueError:
pass
arn = record["Sns"].get("TopicArn")
elif "dynamodb" in record or "kinesis" in record:
arn = record.get("eventSourceARN")
elif "eventSource" in record and record.get("eventSource") == "aws:sqs":
arn = record.get("eventSourceARN")
elif "s3" in record:
arn = record["s3"]["bucket"]["arn"]
if arn:
return self.settings.AWS_EVENT_MAPPING.get(arn)
return None
def get_function_from_bot_intent_trigger(self, event):
"""
For the given event build ARN and return the configured function
"""
intent = event.get("currentIntent")
if intent:
intent = intent.get("name")
if intent:
return self.settings.AWS_BOT_EVENT_MAPPING.get(
"{}:{}".format(intent, event.get("invocationSource"))
)
def get_function_for_cognito_trigger(self, trigger):
"""
Get the associated function to execute for a cognito trigger
"""
print(
"get_function_for_cognito_trigger",
self.settings.COGNITO_TRIGGER_MAPPING,
trigger,
self.settings.COGNITO_TRIGGER_MAPPING.get(trigger),
)
return self.settings.COGNITO_TRIGGER_MAPPING.get(trigger)
def handler(self, event, context):
"""
An AWS Lambda function which parses specific API Gateway input into a
WSGI request, feeds it to our WSGI app, processes the response, and returns
that back to the API Gateway.
"""
settings = self.settings
# If in DEBUG mode, log all raw incoming events.
if settings.DEBUG:
logger.debug("Zappa Event: {}".format(event))
# Set any API Gateway defined Stage Variables
# as env vars
if event.get("stageVariables"):
for key in event["stageVariables"].keys():
os.environ[str(key)] = event["stageVariables"][key]
# This is the result of a keep alive, recertify
# or scheduled event.
if event.get("detail-type") == "Scheduled Event":
whole_function = event["resources"][0].split("/")[-1].split("-")[-1]
# This is a scheduled function.
if "." in whole_function:
app_function = self.import_module_and_get_function(whole_function)
# Execute the function!
return self.run_function(app_function, event, context)
# Else, let this execute as it were.
# This is a direct command invocation.
elif event.get("command", None):
whole_function = event["command"]
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
print("Result of %s:" % whole_function)
print(result)
return result
# This is a direct, raw python invocation.
# It's _extremely_ important we don't allow this event source
# to be overridden by unsanitized, non-admin user input.
elif event.get("raw_command", None):
raw_command = event["raw_command"]
exec(raw_command)
return
# This is a Django management command invocation.
elif event.get("manage", None):
from django.core import management
try: # Support both for tests
from zappa.ext.django_zappa import get_django_wsgi
except ImportError as e: # pragma: no cover
from django_zappa_app import get_django_wsgi
# Get the Django WSGI app from our extension
# We don't actually need the function,
# but we do need to do all of the required setup for it.
app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS)
# Couldn't figure out how to get the value into stdout with StringIO..
# Read the log for now. :[]
management.call_command(*event["manage"].split(" "))
return {}
# This is an AWS-event triggered invocation.
elif event.get("Records", None):
records = event.get("Records")
result = None
whole_function = self.get_function_for_aws_event(records[0])
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# this is an AWS-event triggered from Lex bot's intent
elif event.get("bot"):
result = None
whole_function = self.get_function_from_bot_intent_trigger(event)
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# This is an API Gateway authorizer event
elif event.get("type") == "TOKEN":
whole_function = self.settings.AUTHORIZER_FUNCTION
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
policy = self.run_function(app_function, event, context)
return policy
else:
logger.error(
"Cannot find a function to process the authorization request."
)
raise Exception("Unauthorized")
# This is an AWS Cognito Trigger Event
elif event.get("triggerSource", None):
triggerSource = event.get("triggerSource")
whole_function = self.get_function_for_cognito_trigger(triggerSource)
result = event
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error(
"Cannot find a function to handle cognito trigger {}".format(
triggerSource
)
)
return result
# This is a CloudWatch event
# Related: https://github.com/Miserlou/Zappa/issues/1924
elif event.get("awslogs", None):
result = None
whole_function = "{}.{}".format(settings.APP_MODULE, settings.APP_FUNCTION)
app_function = self.import_module_and_get_function(whole_function)
if app_function:
result = self.run_function(app_function, event, context)
logger.debug("Result of %s:" % whole_function)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# Normal web app flow
try:
# Timing
time_start = datetime.datetime.now()
# This is a normal HTTP request
if event.get("httpMethod", None):
script_name = ""
is_elb_context = False
headers = merge_headers(event)
if event.get("requestContext", None) and event["requestContext"].get(
"elb", None
):
# Related: https://github.com/Miserlou/Zappa/issues/1715
# inputs/outputs for lambda loadbalancer
# https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html
is_elb_context = True
# host is lower-case when forwarded from ELB
host = headers.get("host")
# TODO: pathParameters is a first-class citizen in apigateway but not available without
# some parsing work for ELB (is this parameter used for anything?)
event["pathParameters"] = ""
else:
if headers:
host = headers.get("Host")
else:
host = None
logger.debug("host found: [{}]".format(host))
if host:
if "amazonaws.com" in host:
logger.debug("amazonaws found in host")
# The path provided in th event doesn't include the
# stage, so we must tell Flask to include the API
# stage in the url it calculates. See https://github.com/Miserlou/Zappa/issues/1014
script_name = "/" + settings.API_STAGE
else:
# This is a test request sent from the AWS console
if settings.DOMAIN:
# Assume the requests received will be on the specified
# domain. No special handling is required
pass
else:
# Assume the requests received will be to the
# amazonaws.com endpoint, so tell Flask to include the
# API stage
script_name = "/" + settings.API_STAGE
base_path = getattr(settings, "BASE_PATH", None)
# Create the environment for WSGI and handle the request
environ = create_wsgi_request(
event,
script_name=script_name,
base_path=base_path,
trailing_slash=self.trailing_slash,
binary_support=settings.BINARY_SUPPORT,
context_header_mappings=settings.CONTEXT_HEADER_MAPPINGS,
)
# We are always on https on Lambda, so tell our wsgi app that.
environ["HTTPS"] = "on"
environ["wsgi.url_scheme"] = "https"
environ["lambda.context"] = context
environ["lambda.event"] = event
# Execute the application
with Response.from_app(self.wsgi_app, environ) as response:
# This is the object we're going to return.
# Pack the WSGI response into our special dictionary.
zappa_returndict = dict()
# Issue #1715: ALB support. ALB responses must always include
# base64 encoding and status description
if is_elb_context:
zappa_returndict.setdefault("isBase64Encoded", False)
zappa_returndict.setdefault(
"statusDescription", response.status
)
if response.data:
if (
settings.BINARY_SUPPORT
and not response.mimetype.startswith("text/")
and response.mimetype != "application/json"
):
zappa_returndict["body"] = base64.b64encode(
response.data
).decode("utf-8")
zappa_returndict["isBase64Encoded"] = True
else:
zappa_returndict["body"] = response.get_data(as_text=True)
zappa_returndict["statusCode"] = response.status_code
if "headers" in event:
zappa_returndict["headers"] = {}
for key, value in response.headers:
zappa_returndict["headers"][key] = value
if "multiValueHeaders" in event:
zappa_returndict["multiValueHeaders"] = {}
for key, value in response.headers:
zappa_returndict["multiValueHeaders"][
key
] = response.headers.getlist(key)
# Calculate the total response time,
# and log it in the Common Log format.
time_end = datetime.datetime.now()
delta = time_end - time_start
response_time_ms = delta.total_seconds() * 1000
response.content = response.data
common_log(environ, response, response_time=response_time_ms)
return zappa_returndict
except Exception as e: # pragma: no cover
# Print statements are visible in the logs either way
print(e)
exc_info = sys.exc_info()
message = (
"An uncaught exception happened while servicing this request. "
"You can investigate this with the `zappa tail` command."
)
# If we didn't even build an app_module, just raise.
if not settings.DJANGO_SETTINGS:
try:
self.app_module
except NameError as ne:
message = "Failed to import module: {}".format(ne.message)
# Call exception handler for unhandled exceptions
exception_handler = self.settings.EXCEPTION_HANDLER
self._process_exception(
exception_handler=exception_handler,
event=event,
context=context,
exception=e,
)
# Return this unspecified exception as a 500, using template that API Gateway expects.
content = collections.OrderedDict()
content["statusCode"] = 500
body = {"message": message}
if settings.DEBUG: # only include traceback if debug is on.
body["traceback"] = traceback.format_exception(
*exc_info
) # traceback as a list for readability.
content["body"] = json.dumps(str(body), sort_keys=True, indent=4)
return content
def lambda_handler(event, context): # pragma: no cover
return LambdaHandler.lambda_handler(event, context)
def keep_warm_callback(event, context):
"""Method is triggered by the CloudWatch event scheduled when keep_warm setting is set to true."""
lambda_handler(
event={}, context=context
) # overriding event with an empty one so that web app initialization will
# be triggered. | zappa2 | /zappa2-0.53.4-py3-none-any.whl/zappa/handler.py | handler.py |
import base64
import logging
import sys
from urllib.parse import urlencode
import six
from requestlogger import ApacheFormatter
from werkzeug import urls
from .utilities import merge_headers, titlecase_keys
BINARY_METHODS = ["POST", "PUT", "PATCH", "DELETE", "CONNECT", "OPTIONS"]
def create_wsgi_request(
event_info,
server_name="zappa",
script_name=None,
trailing_slash=True,
binary_support=False,
base_path=None,
context_header_mappings={},
):
"""
Given some event_info via API Gateway,
create and return a valid WSGI request environ.
"""
method = event_info["httpMethod"]
headers = (
merge_headers(event_info) or {}
) # Allow for the AGW console 'Test' button to work (Pull #735)
"""
API Gateway and ALB both started allowing for multi-value querystring
params in Nov. 2018. If there aren't multi-value params present, then
it acts identically to 'queryStringParameters', so we can use it as a
drop-in replacement.
The one caveat here is that ALB will only include _one_ of
queryStringParameters _or_ multiValueQueryStringParameters, which means
we have to check for the existence of one and then fall back to the
other.
"""
if "multiValueQueryStringParameters" in event_info:
query = event_info["multiValueQueryStringParameters"]
query_string = urlencode(query, doseq=True) if query else ""
else:
query = event_info.get("queryStringParameters", {})
query_string = urlencode(query) if query else ""
if context_header_mappings:
for key, value in context_header_mappings.items():
parts = value.split(".")
header_val = event_info["requestContext"]
for part in parts:
if part not in header_val:
header_val = None
break
else:
header_val = header_val[part]
if header_val is not None:
headers[key] = header_val
# Extract remote user from context if Authorizer is enabled
remote_user = None
if event_info["requestContext"].get("authorizer"):
remote_user = event_info["requestContext"]["authorizer"].get("principalId")
elif event_info["requestContext"].get("identity"):
remote_user = event_info["requestContext"]["identity"].get("userArn")
# Related: https://github.com/Miserlou/Zappa/issues/677
# https://github.com/Miserlou/Zappa/issues/683
# https://github.com/Miserlou/Zappa/issues/696
# https://github.com/Miserlou/Zappa/issues/836
# https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Summary_table
if binary_support and (method in BINARY_METHODS):
if event_info.get("isBase64Encoded", False):
encoded_body = event_info["body"]
body = base64.b64decode(encoded_body)
else:
body = event_info["body"]
if isinstance(body, six.string_types):
body = body.encode("utf-8")
else:
body = event_info["body"]
if isinstance(body, six.string_types):
body = body.encode("utf-8")
# Make header names canonical, e.g. content-type => Content-Type
# https://github.com/Miserlou/Zappa/issues/1188
headers = titlecase_keys(headers)
path = urls.url_unquote(event_info["path"])
if base_path:
script_name = "/" + base_path
if path.startswith(script_name):
path = path[len(script_name) :]
x_forwarded_for = headers.get("X-Forwarded-For", "")
if "," in x_forwarded_for:
# The last one is the cloudfront proxy ip. The second to last is the real client ip.
# Everything else is user supplied and untrustworthy.
remote_addr = x_forwarded_for.split(", ")[-2]
else:
remote_addr = x_forwarded_for or "127.0.0.1"
environ = {
"PATH_INFO": get_wsgi_string(path),
"QUERY_STRING": get_wsgi_string(query_string),
"REMOTE_ADDR": remote_addr,
"REQUEST_METHOD": method,
"SCRIPT_NAME": get_wsgi_string(str(script_name)) if script_name else "",
"SERVER_NAME": str(server_name),
"SERVER_PORT": headers.get("X-Forwarded-Port", "80"),
"SERVER_PROTOCOL": str("HTTP/1.1"),
"wsgi.version": (1, 0),
"wsgi.url_scheme": headers.get("X-Forwarded-Proto", "http"),
"wsgi.input": body,
"wsgi.errors": sys.stderr,
"wsgi.multiprocess": False,
"wsgi.multithread": False,
"wsgi.run_once": False,
}
# Input processing
if method in ["POST", "PUT", "PATCH", "DELETE"]:
if "Content-Type" in headers:
environ["CONTENT_TYPE"] = headers["Content-Type"]
# This must be Bytes or None
environ["wsgi.input"] = six.BytesIO(body)
if body:
environ["CONTENT_LENGTH"] = str(len(body))
else:
environ["CONTENT_LENGTH"] = "0"
for header in headers:
wsgi_name = "HTTP_" + header.upper().replace("-", "_")
environ[wsgi_name] = str(headers[header])
if script_name:
environ["SCRIPT_NAME"] = script_name
path_info = environ["PATH_INFO"]
if script_name in path_info:
environ["PATH_INFO"].replace(script_name, "")
if remote_user:
environ["REMOTE_USER"] = remote_user
if event_info["requestContext"].get("authorizer"):
environ["API_GATEWAY_AUTHORIZER"] = event_info["requestContext"]["authorizer"]
return environ
def common_log(environ, response, response_time=None):
"""
Given the WSGI environ and the response,
log this event in Common Log Format.
"""
logger = logging.getLogger()
if response_time:
formatter = ApacheFormatter(with_response_time=True)
try:
log_entry = formatter(
response.status_code,
environ,
len(response.content),
rt_us=response_time,
)
except TypeError:
# Upstream introduced a very annoying breaking change on the rt_ms/rt_us kwarg.
log_entry = formatter(
response.status_code,
environ,
len(response.content),
rt_ms=response_time,
)
else:
formatter = ApacheFormatter(with_response_time=False)
log_entry = formatter(response.status_code, environ, len(response.content))
logger.info(log_entry)
return log_entry
# Related: https://github.com/Miserlou/Zappa/issues/1199
def get_wsgi_string(string, encoding="utf-8"):
"""
Returns wsgi-compatible string
"""
return string.encode(encoding).decode("iso-8859-1") | zappa2 | /zappa2-0.53.4-py3-none-any.whl/zappa/wsgi.py | wsgi.py |
import importlib
import inspect
import json
import os
import time
import uuid
from functools import update_wrapper, wraps
import boto3
import botocore
from .utilities import get_topic_name
try:
from zappa_settings import ASYNC_RESPONSE_TABLE
except ImportError:
ASYNC_RESPONSE_TABLE = None
# Declare these here so they're kept warm.
try:
aws_session = boto3.Session()
LAMBDA_CLIENT = aws_session.client("lambda")
SNS_CLIENT = aws_session.client("sns")
STS_CLIENT = aws_session.client("sts")
DYNAMODB_CLIENT = aws_session.client("dynamodb")
except botocore.exceptions.NoRegionError as e: # pragma: no cover
# This can happen while testing on Travis, but it's taken care of
# during class initialization.
pass
##
# Response and Exception classes
##
LAMBDA_ASYNC_PAYLOAD_LIMIT = 256000
SNS_ASYNC_PAYLOAD_LIMIT = 256000
class AsyncException(Exception): # pragma: no cover
"""Simple exception class for async tasks."""
pass
class LambdaAsyncResponse:
"""
Base Response Dispatcher class
Can be used directly or subclassed if the method to send the message is changed.
"""
def __init__(
self,
lambda_function_name=None,
aws_region=None,
capture_response=False,
**kwargs
):
""" """
if kwargs.get("boto_session"):
self.client = kwargs.get("boto_session").client("lambda")
else: # pragma: no cover
self.client = LAMBDA_CLIENT
self.lambda_function_name = lambda_function_name
self.aws_region = aws_region
if capture_response:
if ASYNC_RESPONSE_TABLE is None:
print(
"Warning! Attempted to capture a response without "
"async_response_table configured in settings (you won't "
"capture async responses)."
)
capture_response = False
self.response_id = "MISCONFIGURED"
else:
self.response_id = str(uuid.uuid4())
else:
self.response_id = None
self.capture_response = capture_response
def send(self, task_path, args, kwargs):
"""
Create the message object and pass it to the actual sender.
"""
message = {
"task_path": task_path,
"capture_response": self.capture_response,
"response_id": self.response_id,
"args": args,
"kwargs": kwargs,
}
self._send(message)
return self
def _send(self, message):
"""
Given a message, directly invoke the lamdba function for this task.
"""
message["command"] = "zappa.asynchronous.route_lambda_task"
payload = json.dumps(message).encode("utf-8")
if len(payload) > LAMBDA_ASYNC_PAYLOAD_LIMIT: # pragma: no cover
raise AsyncException("Payload too large for async Lambda call")
self.response = self.client.invoke(
FunctionName=self.lambda_function_name,
InvocationType="Event", # makes the call async
Payload=payload,
)
self.sent = self.response.get("StatusCode", 0) == 202
class SnsAsyncResponse(LambdaAsyncResponse):
"""
Send a SNS message to a specified SNS topic
Serialise the func path and arguments
"""
def __init__(
self,
lambda_function_name=None,
aws_region=None,
capture_response=False,
**kwargs
):
self.lambda_function_name = lambda_function_name
self.aws_region = aws_region
if kwargs.get("boto_session"):
self.client = kwargs.get("boto_session").client("sns")
else: # pragma: no cover
self.client = SNS_CLIENT
if kwargs.get("arn"):
self.arn = kwargs.get("arn")
else:
if kwargs.get("boto_session"):
sts_client = kwargs.get("boto_session").client("sts")
else:
sts_client = STS_CLIENT
AWS_ACCOUNT_ID = sts_client.get_caller_identity()["Account"]
self.arn = "arn:aws:sns:{region}:{account}:{topic_name}".format(
region=self.aws_region,
account=AWS_ACCOUNT_ID,
topic_name=get_topic_name(self.lambda_function_name),
)
# Issue: https://github.com/Miserlou/Zappa/issues/1209
# TODO: Refactor
self.capture_response = capture_response
if capture_response:
if ASYNC_RESPONSE_TABLE is None:
print(
"Warning! Attempted to capture a response without "
"async_response_table configured in settings (you won't "
"capture async responses)."
)
capture_response = False
self.response_id = "MISCONFIGURED"
else:
self.response_id = str(uuid.uuid4())
else:
self.response_id = None
self.capture_response = capture_response
def _send(self, message):
"""
Given a message, publish to this topic.
"""
message["command"] = "zappa.asynchronous.route_sns_task"
payload = json.dumps(message).encode("utf-8")
if len(payload) > LAMBDA_ASYNC_PAYLOAD_LIMIT: # pragma: no cover
raise AsyncException("Payload too large for SNS")
self.response = self.client.publish(TargetArn=self.arn, Message=payload)
self.sent = self.response.get("MessageId")
##
# Aync Routers
##
ASYNC_CLASSES = {
"lambda": LambdaAsyncResponse,
"sns": SnsAsyncResponse,
}
def route_lambda_task(event, context):
"""
Deserialises the message from event passed to zappa.handler.run_function
imports the function, calls the function with args
"""
message = event
return run_message(message)
def route_sns_task(event, context):
"""
Gets SNS Message, deserialises the message,
imports the function, calls the function with args
"""
record = event["Records"][0]
message = json.loads(record["Sns"]["Message"])
return run_message(message)
def run_message(message):
"""
Runs a function defined by a message object with keys:
'task_path', 'args', and 'kwargs' used by lambda routing
and a 'command' in handler.py
"""
if message.get("capture_response", False):
DYNAMODB_CLIENT.put_item(
TableName=ASYNC_RESPONSE_TABLE,
Item={
"id": {"S": str(message["response_id"])},
"ttl": {"N": str(int(time.time() + 600))},
"async_status": {"S": "in progress"},
"async_response": {"S": str(json.dumps("N/A"))},
},
)
func = import_and_get_task(message["task_path"])
if hasattr(func, "sync"):
response = func.sync(*message["args"], **message["kwargs"])
else:
response = func(*message["args"], **message["kwargs"])
if message.get("capture_response", False):
DYNAMODB_CLIENT.update_item(
TableName=ASYNC_RESPONSE_TABLE,
Key={"id": {"S": str(message["response_id"])}},
UpdateExpression="SET async_response = :r, async_status = :s",
ExpressionAttributeValues={
":r": {"S": str(json.dumps(response))},
":s": {"S": "complete"},
},
)
return response
##
# Execution interfaces and classes
##
def run(
func,
args=[],
kwargs={},
service="lambda",
capture_response=False,
remote_aws_lambda_function_name=None,
remote_aws_region=None,
**task_kwargs
):
"""
Instead of decorating a function with @task, you can just run it directly.
If you were going to do func(*args, **kwargs), then you will call this:
import zappa.asynchronous.run
zappa.asynchronous.run(func, args, kwargs)
If you want to use SNS, then do:
zappa.asynchronous.run(func, args, kwargs, service='sns')
and other arguments are similar to @task
"""
lambda_function_name = remote_aws_lambda_function_name or os.environ.get(
"AWS_LAMBDA_FUNCTION_NAME"
)
aws_region = remote_aws_region or os.environ.get("AWS_REGION")
task_path = get_func_task_path(func)
return ASYNC_CLASSES[service](
lambda_function_name=lambda_function_name,
aws_region=aws_region,
capture_response=capture_response,
**task_kwargs
).send(task_path, args, kwargs)
# Handy:
# http://stackoverflow.com/questions/10294014/python-decorator-best-practice-using-a-class-vs-a-function
# However, this needs to pass inspect.getargspec() in handler.py which does not take classes
# Wrapper written to take optional arguments
# http://chase-seibert.github.io/blog/2013/12/17/python-decorator-optional-parameter.html
def task(*args, **kwargs):
"""Async task decorator so that running
Args:
func (function): the function to be wrapped
Further requirements:
func must be an independent top-level function.
i.e. not a class method or an anonymous function
service (str): either 'lambda' or 'sns'
remote_aws_lambda_function_name (str): the name of a remote lambda function to call with this task
remote_aws_region (str): the name of a remote region to make lambda/sns calls against
Returns:
A replacement function that dispatches func() to
run asynchronously through the service in question
"""
func = None
if len(args) == 1 and callable(args[0]):
func = args[0]
if not kwargs: # Default Values
service = "lambda"
lambda_function_name_arg = None
aws_region_arg = None
else: # Arguments were passed
service = kwargs.get("service", "lambda")
lambda_function_name_arg = kwargs.get("remote_aws_lambda_function_name")
aws_region_arg = kwargs.get("remote_aws_region")
capture_response = kwargs.get("capture_response", False)
def func_wrapper(func):
task_path = get_func_task_path(func)
@wraps(func)
def _run_async(*args, **kwargs):
"""
This is the wrapping async function that replaces the function
that is decorated with @task.
Args:
These are just passed through to @task's func
Assuming a valid service is passed to task() and it is run
inside a Lambda process (i.e. AWS_LAMBDA_FUNCTION_NAME exists),
it dispatches the function to be run through the service variable.
Otherwise, it runs the task synchronously.
Returns:
In async mode, the object returned includes state of the dispatch.
For instance
When outside of Lambda, the func passed to @task is run and we
return the actual value.
"""
lambda_function_name = lambda_function_name_arg or os.environ.get(
"AWS_LAMBDA_FUNCTION_NAME"
)
aws_region = aws_region_arg or os.environ.get("AWS_REGION")
if (service in ASYNC_CLASSES) and (lambda_function_name):
send_result = ASYNC_CLASSES[service](
lambda_function_name=lambda_function_name,
aws_region=aws_region,
capture_response=capture_response,
).send(task_path, args, kwargs)
return send_result
else:
return func(*args, **kwargs)
update_wrapper(_run_async, func)
_run_async.service = service
_run_async.sync = func
return _run_async
return func_wrapper(func) if func else func_wrapper
def task_sns(func):
"""
SNS-based task dispatcher. Functions the same way as task()
"""
return task(func, service="sns")
##
# Utility Functions
##
def import_and_get_task(task_path):
"""
Given a modular path to a function, import that module
and return the function.
"""
module, function = task_path.rsplit(".", 1)
app_module = importlib.import_module(module)
app_function = getattr(app_module, function)
return app_function
def get_func_task_path(func):
"""
Format the modular task path for a function via inspection.
"""
module_path = inspect.getmodule(func).__name__
task_path = "{module_path}.{func_name}".format(
module_path=module_path, func_name=func.__name__
)
return task_path
def get_async_response(response_id):
"""
Get the response from the async table
"""
response = DYNAMODB_CLIENT.get_item(
TableName=ASYNC_RESPONSE_TABLE, Key={"id": {"S": str(response_id)}}
)
if "Item" not in response:
return None
return {
"status": response["Item"]["async_status"]["S"],
"response": json.loads(response["Item"]["async_response"]["S"]),
} | zappa2 | /zappa2-0.53.4-py3-none-any.whl/zappa/asynchronous.py | asynchronous.py |
import getpass
import glob
import hashlib
import json
import logging
import os
import random
import re
import shutil
import string
import subprocess
import tarfile
import tempfile
import time
import uuid
import zipfile
from builtins import bytes, int
from distutils.dir_util import copy_tree
from io import open
import boto3
import botocore
import requests
import troposphere
import troposphere.apigateway
from botocore.exceptions import ClientError
from setuptools import find_packages
from tqdm import tqdm
from .utilities import (
add_event_source,
conflicts_with_a_neighbouring_module,
contains_python_files_or_subdirs,
copytree,
get_topic_name,
get_venv_from_python_version,
human_size,
remove_event_source,
)
##
# Logging Config
##
logging.basicConfig(format="%(levelname)s:%(message)s")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
##
# Policies And Template Mappings
##
ASSUME_POLICY = """{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": [
"apigateway.amazonaws.com",
"lambda.amazonaws.com",
"events.amazonaws.com"
]
},
"Action": "sts:AssumeRole"
}
]
}"""
ATTACH_POLICY = """{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:*"
],
"Resource": "arn:aws:logs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"lambda:InvokeFunction"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"xray:PutTraceSegments",
"xray:PutTelemetryRecords"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"ec2:AttachNetworkInterface",
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:DescribeInstances",
"ec2:DescribeNetworkInterfaces",
"ec2:DetachNetworkInterface",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:ResetNetworkInterfaceAttribute"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": "arn:aws:s3:::*"
},
{
"Effect": "Allow",
"Action": [
"kinesis:*"
],
"Resource": "arn:aws:kinesis:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"sns:*"
],
"Resource": "arn:aws:sns:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"sqs:*"
],
"Resource": "arn:aws:sqs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"dynamodb:*"
],
"Resource": "arn:aws:dynamodb:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"route53:*"
],
"Resource": "*"
}
]
}"""
# Latest list: https://docs.aws.amazon.com/general/latest/gr/rande.html#apigateway_region
API_GATEWAY_REGIONS = [
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
"eu-central-1",
"eu-north-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"eu-north-1",
"ap-northeast-1",
"ap-northeast-2",
"ap-northeast-3",
"ap-southeast-1",
"ap-southeast-2",
"ap-east-1",
"ap-south-1",
"ca-central-1",
"cn-north-1",
"cn-northwest-1",
"sa-east-1",
"us-gov-east-1",
"us-gov-west-1",
]
# Latest list: https://docs.aws.amazon.com/general/latest/gr/rande.html#lambda_region
LAMBDA_REGIONS = [
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
"eu-central-1",
"eu-north-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"eu-north-1",
"ap-northeast-1",
"ap-northeast-2",
"ap-northeast-3",
"ap-southeast-1",
"ap-southeast-2",
"ap-east-1",
"ap-south-1",
"ca-central-1",
"cn-north-1",
"cn-northwest-1",
"sa-east-1",
"us-gov-east-1",
"us-gov-west-1",
]
# We never need to include these.
# Related: https://github.com/Miserlou/Zappa/pull/56
# Related: https://github.com/Miserlou/Zappa/pull/581
ZIP_EXCLUDES = [
"*.exe",
"*.DS_Store",
"*.Python",
"*.git",
".git/*",
"*.zip",
"*.tar.gz",
"*.hg",
"pip",
"docutils*",
"setuputils*",
"__pycache__/*",
]
# When using ALB as an event source for Lambdas, we need to create an alias
# to ensure that, on zappa update, the ALB doesn't lose permissions to access
# the Lambda.
# See: https://github.com/Miserlou/Zappa/pull/1730
ALB_LAMBDA_ALIAS = "current-alb-version"
##
# Classes
##
class Zappa:
"""
Zappa!
Makes it easy to run Python web applications on AWS Lambda/API Gateway.
"""
##
# Configurables
##
http_methods = ["ANY"]
role_name = "ZappaLambdaExecution"
extra_permissions = None
assume_policy = ASSUME_POLICY
attach_policy = ATTACH_POLICY
apigateway_policy = None
cloudwatch_log_levels = ["OFF", "ERROR", "INFO"]
xray_tracing = False
##
# Credentials
##
boto_session = None
credentials_arn = None
def __init__(
self,
boto_session=None,
profile_name=None,
aws_region=None,
load_credentials=True,
desired_role_name=None,
desired_role_arn=None,
runtime="python3.6", # Detected at runtime in CLI
tags=(),
endpoint_urls={},
xray_tracing=False,
):
"""
Instantiate this new Zappa instance, loading any custom credentials if necessary.
"""
# Set aws_region to None to use the system's region instead
if aws_region is None:
# https://github.com/Miserlou/Zappa/issues/413
self.aws_region = boto3.Session().region_name
logger.debug("Set region from boto: %s", self.aws_region)
else:
self.aws_region = aws_region
if desired_role_name:
self.role_name = desired_role_name
if desired_role_arn:
self.credentials_arn = desired_role_arn
self.runtime = runtime
if self.runtime == "python3.6":
self.manylinux_suffix_start = "cp36m"
elif self.runtime == "python3.7":
self.manylinux_suffix_start = "cp37m"
else:
# The 'm' has been dropped in python 3.8+ since builds with and without pymalloc are ABI compatible
# See https://github.com/pypa/manylinux for a more detailed explanation
self.manylinux_suffix_start = "cp38"
# AWS Lambda supports manylinux1/2010 and manylinux2014
manylinux_suffixes = ("2014", "2010", "1")
self.manylinux_wheel_file_match = re.compile(
f'^.*{self.manylinux_suffix_start}-manylinux({"|".join(manylinux_suffixes)})_x86_64.whl$'
)
self.manylinux_wheel_abi3_file_match = re.compile(
f'^.*cp3.-abi3-manylinux({"|".join(manylinux_suffixes)})_x86_64.whl$'
)
self.endpoint_urls = endpoint_urls
self.xray_tracing = xray_tracing
# Some common invocations, such as DB migrations,
# can take longer than the default.
# Note that this is set to 300s, but if connected to
# APIGW, Lambda will max out at 30s.
# Related: https://github.com/Miserlou/Zappa/issues/205
long_config_dict = {
"region_name": aws_region,
"connect_timeout": 5,
"read_timeout": 300,
}
long_config = botocore.client.Config(**long_config_dict)
if load_credentials:
self.load_credentials(boto_session, profile_name)
# Initialize clients
self.s3_client = self.boto_client("s3")
self.lambda_client = self.boto_client("lambda", config=long_config)
self.elbv2_client = self.boto_client("elbv2")
self.events_client = self.boto_client("events")
self.apigateway_client = self.boto_client("apigateway")
# AWS ACM certificates need to be created from us-east-1 to be used by API gateway
east_config = botocore.client.Config(region_name="us-east-1")
self.acm_client = self.boto_client("acm", config=east_config)
self.logs_client = self.boto_client("logs")
self.iam_client = self.boto_client("iam")
self.iam = self.boto_resource("iam")
self.cloudwatch = self.boto_client("cloudwatch")
self.route53 = self.boto_client("route53")
self.sns_client = self.boto_client("sns")
self.cf_client = self.boto_client("cloudformation")
self.dynamodb_client = self.boto_client("dynamodb")
self.cognito_client = self.boto_client("cognito-idp")
self.sts_client = self.boto_client("sts")
self.tags = tags
self.cf_template = troposphere.Template()
self.cf_api_resources = []
self.cf_parameters = {}
def configure_boto_session_method_kwargs(self, service, kw):
"""Allow for custom endpoint urls for non-AWS (testing and bootleg cloud) deployments"""
if service in self.endpoint_urls and not "endpoint_url" in kw:
kw["endpoint_url"] = self.endpoint_urls[service]
return kw
def boto_client(self, service, *args, **kwargs):
"""A wrapper to apply configuration options to boto clients"""
return self.boto_session.client(
service, *args, **self.configure_boto_session_method_kwargs(service, kwargs)
)
def boto_resource(self, service, *args, **kwargs):
"""A wrapper to apply configuration options to boto resources"""
return self.boto_session.resource(
service, *args, **self.configure_boto_session_method_kwargs(service, kwargs)
)
def cache_param(self, value):
"""Returns a troposphere Ref to a value cached as a parameter."""
if value not in self.cf_parameters:
keyname = chr(ord("A") + len(self.cf_parameters))
param = self.cf_template.add_parameter(
troposphere.Parameter(
keyname, Type="String", Default=value, tags=self.tags
)
)
self.cf_parameters[value] = param
return troposphere.Ref(self.cf_parameters[value])
##
# Packaging
##
def copy_editable_packages(self, egg_links, temp_package_path):
""" """
for egg_link in egg_links:
with open(egg_link, "rb") as df:
egg_path = df.read().decode("utf-8").splitlines()[0].strip()
pkgs = set(
[
x.split(".")[0]
for x in find_packages(egg_path, exclude=["test", "tests"])
]
)
for pkg in pkgs:
copytree(
os.path.join(egg_path, pkg),
os.path.join(temp_package_path, pkg),
metadata=False,
symlinks=False,
)
if temp_package_path:
# now remove any egg-links as they will cause issues if they still exist
for link in glob.glob(os.path.join(temp_package_path, "*.egg-link")):
os.remove(link)
def get_deps_list(self, pkg_name, installed_distros=None):
"""
For a given package, returns a list of required packages. Recursive.
"""
# https://github.com/Miserlou/Zappa/issues/1478. Using `pkg_resources`
# instead of `pip` is the recommended approach. The usage is nearly
# identical.
import pkg_resources
deps = []
if not installed_distros:
installed_distros = pkg_resources.WorkingSet()
for package in installed_distros:
if package.project_name.lower() == pkg_name.lower():
deps = [(package.project_name, package.version)]
for req in package.requires():
deps += self.get_deps_list(
pkg_name=req.project_name, installed_distros=installed_distros
)
return list(set(deps)) # de-dupe before returning
def create_handler_venv(self):
"""
Takes the installed zappa and brings it into a fresh virtualenv-like folder. All dependencies are then downloaded.
"""
import subprocess
# We will need the currenv venv to pull Zappa from
current_venv = self.get_current_venv()
# Make a new folder for the handler packages
ve_path = os.path.join(os.getcwd(), "handler_venv")
if os.sys.platform == "win32":
current_site_packages_dir = os.path.join(
current_venv, "Lib", "site-packages"
)
venv_site_packages_dir = os.path.join(ve_path, "Lib", "site-packages")
else:
current_site_packages_dir = os.path.join(
current_venv, "lib", get_venv_from_python_version(), "site-packages"
)
venv_site_packages_dir = os.path.join(
ve_path, "lib", get_venv_from_python_version(), "site-packages"
)
if not os.path.isdir(venv_site_packages_dir):
os.makedirs(venv_site_packages_dir)
# Copy zappa* to the new virtualenv
zappa_things = [
z for z in os.listdir(current_site_packages_dir) if z.lower()[:5] == "zappa"
]
for z in zappa_things:
copytree(
os.path.join(current_site_packages_dir, z),
os.path.join(venv_site_packages_dir, z),
)
# Use pip to download zappa's dependencies. Copying from current venv causes issues with things like PyYAML that installs as yaml
zappa_deps = self.get_deps_list("zappa")
pkg_list = ["{0!s}=={1!s}".format(dep, version) for dep, version in zappa_deps]
# Need to manually add setuptools
pkg_list.append("setuptools")
command = [
"pip",
"install",
"--quiet",
"--target",
venv_site_packages_dir,
] + pkg_list
# This is the recommended method for installing packages if you don't
# to depend on `setuptools`
# https://github.com/pypa/pip/issues/5240#issuecomment-381662679
pip_process = subprocess.Popen(command, stdout=subprocess.PIPE)
# Using communicate() to avoid deadlocks
pip_process.communicate()
pip_return_code = pip_process.returncode
if pip_return_code:
raise EnvironmentError("Pypi lookup failed")
return ve_path
# staticmethod as per https://github.com/Miserlou/Zappa/issues/780
@staticmethod
def get_current_venv():
"""
Returns the path to the current virtualenv
"""
if "VIRTUAL_ENV" in os.environ:
venv = os.environ["VIRTUAL_ENV"]
elif os.path.exists(".python-version"): # pragma: no cover
try:
subprocess.check_output(["pyenv", "help"], stderr=subprocess.STDOUT)
except OSError:
print(
"This directory seems to have pyenv's local venv, "
"but pyenv executable was not found."
)
with open(".python-version", "r") as f:
# minor fix in how .python-version is read
# Related: https://github.com/Miserlou/Zappa/issues/921
env_name = f.readline().strip()
bin_path = subprocess.check_output(["pyenv", "which", "python"]).decode(
"utf-8"
)
venv = bin_path[: bin_path.rfind(env_name)] + env_name
else: # pragma: no cover
return None
return venv
def create_lambda_zip(
self,
prefix="lambda_package",
handler_file=None,
slim_handler=False,
minify=True,
exclude=None,
exclude_glob=None,
use_precompiled_packages=True,
include=None,
venv=None,
output=None,
disable_progress=False,
archive_format="zip",
):
"""
Create a Lambda-ready zip file of the current virtualenvironment and working directory.
Returns path to that file.
"""
# Validate archive_format
if archive_format not in ["zip", "tarball"]:
raise KeyError(
"The archive format to create a lambda package must be zip or tarball"
)
# Pip is a weird package.
# Calling this function in some environments without this can cause.. funkiness.
import pip
if not venv:
venv = self.get_current_venv()
build_time = str(int(time.time()))
cwd = os.getcwd()
if not output:
if archive_format == "zip":
archive_fname = prefix + "-" + build_time + ".zip"
elif archive_format == "tarball":
archive_fname = prefix + "-" + build_time + ".tar.gz"
else:
archive_fname = output
archive_path = os.path.join(cwd, archive_fname)
# Files that should be excluded from the zip
if exclude is None:
exclude = list()
if exclude_glob is None:
exclude_glob = list()
# Exclude the zip itself
exclude.append(archive_path)
# Make sure that 'concurrent' is always forbidden.
# https://github.com/Miserlou/Zappa/issues/827
if not "concurrent" in exclude:
exclude.append("concurrent")
def splitpath(path):
parts = []
(path, tail) = os.path.split(path)
while path and tail:
parts.append(tail)
(path, tail) = os.path.split(path)
parts.append(os.path.join(path, tail))
return list(map(os.path.normpath, parts))[::-1]
split_venv = splitpath(venv)
split_cwd = splitpath(cwd)
# Ideally this should be avoided automatically,
# but this serves as an okay stop-gap measure.
if split_venv[-1] == split_cwd[-1]: # pragma: no cover
print(
"Warning! Your project and virtualenv have the same name! You may want "
"to re-create your venv with a new name, or explicitly define a "
"'project_name', as this may cause errors."
)
# First, do the project..
temp_project_path = tempfile.mkdtemp(prefix="zappa-project")
if not slim_handler:
# Slim handler does not take the project files.
if minify:
# Related: https://github.com/Miserlou/Zappa/issues/744
excludes = ZIP_EXCLUDES + exclude + [split_venv[-1]]
copytree(
cwd,
temp_project_path,
metadata=False,
symlinks=False,
ignore=shutil.ignore_patterns(*excludes),
)
else:
copytree(cwd, temp_project_path, metadata=False, symlinks=False)
for glob_path in exclude_glob:
for path in glob.glob(os.path.join(temp_project_path, glob_path)):
try:
os.remove(path)
except OSError: # is a directory
shutil.rmtree(path)
# If a handler_file is supplied, copy that to the root of the package,
# because that's where AWS Lambda looks for it. It can't be inside a package.
if handler_file:
filename = handler_file.split(os.sep)[-1]
shutil.copy(handler_file, os.path.join(temp_project_path, filename))
# Create and populate package ID file and write to temp project path
package_info = {}
package_info["uuid"] = str(uuid.uuid4())
package_info["build_time"] = build_time
package_info["build_platform"] = os.sys.platform
package_info["build_user"] = getpass.getuser()
# TODO: Add git head and info?
# Ex, from @scoates:
# def _get_git_branch():
# chdir(DIR)
# out = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
# lambci_branch = environ.get('LAMBCI_BRANCH', None)
# if out == "HEAD" and lambci_branch:
# out += " lambci:{}".format(lambci_branch)
# return out
# def _get_git_hash():
# chdir(DIR)
# return check_output(['git', 'rev-parse', 'HEAD']).strip()
# def _get_uname():
# return check_output(['uname', '-a']).strip()
# def _get_user():
# return check_output(['whoami']).strip()
# def set_id_info(zappa_cli):
# build_info = {
# 'branch': _get_git_branch(),
# 'hash': _get_git_hash(),
# 'build_uname': _get_uname(),
# 'build_user': _get_user(),
# 'build_time': datetime.datetime.utcnow().isoformat(),
# }
# with open(path.join(DIR, 'id_info.json'), 'w') as f:
# json.dump(build_info, f)
# return True
package_id_file = open(
os.path.join(temp_project_path, "package_info.json"), "w"
)
dumped = json.dumps(package_info, indent=4)
try:
package_id_file.write(dumped)
except TypeError: # This is a Python 2/3 issue. TODO: Make pretty!
package_id_file.write(str(dumped))
package_id_file.close()
# Then, do site site-packages..
egg_links = []
temp_package_path = tempfile.mkdtemp(prefix="zappa-packages")
if os.sys.platform == "win32":
site_packages = os.path.join(venv, "Lib", "site-packages")
else:
site_packages = os.path.join(
venv, "lib", get_venv_from_python_version(), "site-packages"
)
egg_links.extend(glob.glob(os.path.join(site_packages, "*.egg-link")))
if minify:
excludes = ZIP_EXCLUDES + exclude
copytree(
site_packages,
temp_package_path,
metadata=False,
symlinks=False,
ignore=shutil.ignore_patterns(*excludes),
)
else:
copytree(site_packages, temp_package_path, metadata=False, symlinks=False)
# We may have 64-bin specific packages too.
site_packages_64 = os.path.join(
venv, "lib64", get_venv_from_python_version(), "site-packages"
)
if os.path.exists(site_packages_64):
egg_links.extend(glob.glob(os.path.join(site_packages_64, "*.egg-link")))
if minify:
excludes = ZIP_EXCLUDES + exclude
copytree(
site_packages_64,
temp_package_path,
metadata=False,
symlinks=False,
ignore=shutil.ignore_patterns(*excludes),
)
else:
copytree(
site_packages_64, temp_package_path, metadata=False, symlinks=False
)
if egg_links:
self.copy_editable_packages(egg_links, temp_package_path)
copy_tree(temp_package_path, temp_project_path, update=True)
# Then the pre-compiled packages..
if use_precompiled_packages:
print("Downloading and installing dependencies..")
installed_packages = self.get_installed_packages(
site_packages, site_packages_64
)
try:
for (
installed_package_name,
installed_package_version,
) in installed_packages.items():
cached_wheel_path = self.get_cached_manylinux_wheel(
installed_package_name,
installed_package_version,
disable_progress,
)
if cached_wheel_path:
# Otherwise try to use manylinux packages from PyPi..
# Related: https://github.com/Miserlou/Zappa/issues/398
shutil.rmtree(
os.path.join(temp_project_path, installed_package_name),
ignore_errors=True,
)
with zipfile.ZipFile(cached_wheel_path) as zfile:
zfile.extractall(temp_project_path)
except Exception as e:
print(e)
# XXX - What should we do here?
# Cleanup
for glob_path in exclude_glob:
for path in glob.glob(os.path.join(temp_project_path, glob_path)):
try:
os.remove(path)
except OSError: # is a directory
shutil.rmtree(path)
# Then archive it all up..
if archive_format == "zip":
print("Packaging project as zip.")
try:
compression_method = zipfile.ZIP_DEFLATED
except ImportError: # pragma: no cover
compression_method = zipfile.ZIP_STORED
archivef = zipfile.ZipFile(archive_path, "w", compression_method)
elif archive_format == "tarball":
print("Packaging project as gzipped tarball.")
archivef = tarfile.open(archive_path, "w|gz")
for root, dirs, files in os.walk(temp_project_path):
for filename in files:
# Skip .pyc files for Django migrations
# https://github.com/Miserlou/Zappa/issues/436
# https://github.com/Miserlou/Zappa/issues/464
if filename[-4:] == ".pyc" and root[-10:] == "migrations":
continue
# If there is a .pyc file in this package,
# we can skip the python source code as we'll just
# use the compiled bytecode anyway..
if filename[-3:] == ".py" and root[-10:] != "migrations":
abs_filname = os.path.join(root, filename)
abs_pyc_filename = abs_filname + "c"
if os.path.isfile(abs_pyc_filename):
# but only if the pyc is older than the py,
# otherwise we'll deploy outdated code!
py_time = os.stat(abs_filname).st_mtime
pyc_time = os.stat(abs_pyc_filename).st_mtime
if pyc_time > py_time:
continue
# Make sure that the files are all correctly chmodded
# Related: https://github.com/Miserlou/Zappa/issues/484
# Related: https://github.com/Miserlou/Zappa/issues/682
os.chmod(os.path.join(root, filename), 0o755)
if archive_format == "zip":
# Actually put the file into the proper place in the zip
# Related: https://github.com/Miserlou/Zappa/pull/716
zipi = zipfile.ZipInfo(
os.path.join(
root.replace(temp_project_path, "").lstrip(os.sep), filename
)
)
zipi.create_system = 3
zipi.external_attr = 0o755 << int(16) # Is this P2/P3 functional?
with open(os.path.join(root, filename), "rb") as f:
archivef.writestr(zipi, f.read(), compression_method)
elif archive_format == "tarball":
tarinfo = tarfile.TarInfo(
os.path.join(
root.replace(temp_project_path, "").lstrip(os.sep), filename
)
)
tarinfo.mode = 0o755
stat = os.stat(os.path.join(root, filename))
tarinfo.mtime = stat.st_mtime
tarinfo.size = stat.st_size
with open(os.path.join(root, filename), "rb") as f:
archivef.addfile(tarinfo, f)
# Create python init file if it does not exist
# Only do that if there are sub folders or python files and does not conflict with a neighbouring module
# Related: https://github.com/Miserlou/Zappa/issues/766
if not contains_python_files_or_subdirs(root):
# if the directory does not contain any .py file at any level, we can skip the rest
dirs[:] = [d for d in dirs if d != root]
else:
if (
"__init__.py" not in files
and not conflicts_with_a_neighbouring_module(root)
):
tmp_init = os.path.join(temp_project_path, "__init__.py")
open(tmp_init, "a").close()
os.chmod(tmp_init, 0o755)
arcname = os.path.join(
root.replace(temp_project_path, ""),
os.path.join(
root.replace(temp_project_path, ""), "__init__.py"
),
)
if archive_format == "zip":
archivef.write(tmp_init, arcname)
elif archive_format == "tarball":
archivef.add(tmp_init, arcname)
# And, we're done!
archivef.close()
# Trash the temp directory
shutil.rmtree(temp_project_path)
shutil.rmtree(temp_package_path)
if os.path.isdir(venv) and slim_handler:
# Remove the temporary handler venv folder
shutil.rmtree(venv)
return archive_fname
@staticmethod
def get_installed_packages(site_packages, site_packages_64):
"""
Returns a dict of installed packages that Zappa cares about.
"""
import pkg_resources
package_to_keep = []
if os.path.isdir(site_packages):
package_to_keep += os.listdir(site_packages)
if os.path.isdir(site_packages_64):
package_to_keep += os.listdir(site_packages_64)
package_to_keep = [x.lower() for x in package_to_keep]
installed_packages = {
package.project_name.lower(): package.version
for package in pkg_resources.WorkingSet()
if package.project_name.lower() in package_to_keep
or package.location.lower()
in [site_packages.lower(), site_packages_64.lower()]
}
return installed_packages
@staticmethod
def download_url_with_progress(url, stream, disable_progress):
"""
Downloads a given url in chunks and writes to the provided stream (can be any io stream).
Displays the progress bar for the download.
"""
resp = requests.get(
url, timeout=float(os.environ.get("PIP_TIMEOUT", 2)), stream=True
)
resp.raw.decode_content = True
progress = tqdm(
unit="B",
unit_scale=True,
total=int(resp.headers.get("Content-Length", 0)),
disable=disable_progress,
)
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
progress.update(len(chunk))
stream.write(chunk)
progress.close()
def get_cached_manylinux_wheel(
self, package_name, package_version, disable_progress=False
):
"""
Gets the locally stored version of a manylinux wheel. If one does not exist, the function downloads it.
"""
cached_wheels_dir = os.path.join(tempfile.gettempdir(), "cached_wheels")
if not os.path.isdir(cached_wheels_dir):
os.makedirs(cached_wheels_dir)
else:
# Check if we already have a cached copy
wheel_name = re.sub("[^\w\d.]+", "_", package_name, re.UNICODE)
wheel_file = f"{wheel_name}-{package_version}-*_x86_64.whl"
wheel_path = os.path.join(cached_wheels_dir, wheel_file)
for pathname in glob.iglob(wheel_path):
if re.match(self.manylinux_wheel_file_match, pathname) or re.match(
self.manylinux_wheel_abi3_file_match, pathname
):
print(
f" - {package_name}=={package_version}: Using locally cached manylinux wheel"
)
return pathname
# The file is not cached, download it.
wheel_url, filename = self.get_manylinux_wheel_url(
package_name, package_version
)
if not wheel_url:
return None
wheel_path = os.path.join(cached_wheels_dir, filename)
print(f" - {package_name}=={package_version}: Downloading")
with open(wheel_path, "wb") as f:
self.download_url_with_progress(wheel_url, f, disable_progress)
if not zipfile.is_zipfile(wheel_path):
return None
return wheel_path
def get_manylinux_wheel_url(self, package_name, package_version):
"""
For a given package name, returns a link to the download URL,
else returns None.
Related: https://github.com/Miserlou/Zappa/issues/398
Examples here: https://gist.github.com/perrygeo/9545f94eaddec18a65fd7b56880adbae
This function downloads metadata JSON of `package_name` from Pypi
and examines if the package has a manylinux wheel. This function
also caches the JSON file so that we don't have to poll Pypi
every time.
"""
cached_pypi_info_dir = os.path.join(tempfile.gettempdir(), "cached_pypi_info")
if not os.path.isdir(cached_pypi_info_dir):
os.makedirs(cached_pypi_info_dir)
# Even though the metadata is for the package, we save it in a
# filename that includes the package's version. This helps in
# invalidating the cached file if the user moves to a different
# version of the package.
# Related: https://github.com/Miserlou/Zappa/issues/899
json_file = "{0!s}-{1!s}.json".format(package_name, package_version)
json_file_path = os.path.join(cached_pypi_info_dir, json_file)
if os.path.exists(json_file_path):
with open(json_file_path, "rb") as metafile:
data = json.load(metafile)
else:
url = "https://pypi.python.org/pypi/{}/json".format(package_name)
try:
res = requests.get(
url, timeout=float(os.environ.get("PIP_TIMEOUT", 1.5))
)
data = res.json()
except Exception as e: # pragma: no cover
return None, None
with open(json_file_path, "wb") as metafile:
jsondata = json.dumps(data)
metafile.write(bytes(jsondata, "utf-8"))
if package_version not in data["releases"]:
return None, None
for f in data["releases"][package_version]:
if re.match(self.manylinux_wheel_file_match, f["filename"]):
return f["url"], f["filename"]
elif re.match(self.manylinux_wheel_abi3_file_match, f["filename"]):
return f["url"], f["filename"]
return None, None
##
# S3
##
def upload_to_s3(self, source_path, bucket_name, disable_progress=False):
r"""
Given a file, upload it to S3.
Credentials should be stored in environment variables or ~/.aws/credentials (%USERPROFILE%\.aws\credentials on Windows).
Returns True on success, false on failure.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError:
# This is really stupid S3 quirk. Technically, us-east-1 one has no S3,
# it's actually "US Standard", or something.
# More here: https://github.com/boto/boto3/issues/125
if self.aws_region == "us-east-1":
self.s3_client.create_bucket(
Bucket=bucket_name,
)
else:
self.s3_client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={"LocationConstraint": self.aws_region},
)
if self.tags:
tags = {
"TagSet": [
{"Key": key, "Value": self.tags[key]}
for key in self.tags.keys()
]
}
self.s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging=tags)
if not os.path.isfile(source_path) or os.stat(source_path).st_size == 0:
print("Problem with source file {}".format(source_path))
return False
dest_path = os.path.split(source_path)[1]
try:
source_size = os.stat(source_path).st_size
print("Uploading {0} ({1})..".format(dest_path, human_size(source_size)))
progress = tqdm(
total=float(os.path.getsize(source_path)),
unit_scale=True,
unit="B",
disable=disable_progress,
)
# Attempt to upload to S3 using the S3 meta client with the progress bar.
# If we're unable to do that, try one more time using a session client,
# which cannot use the progress bar.
# Related: https://github.com/boto/boto3/issues/611
try:
self.s3_client.upload_file(
source_path, bucket_name, dest_path, Callback=progress.update
)
except Exception as e: # pragma: no cover
self.s3_client.upload_file(source_path, bucket_name, dest_path)
progress.close()
except (KeyboardInterrupt, SystemExit): # pragma: no cover
raise
except Exception as e: # pragma: no cover
print(e)
return False
return True
def copy_on_s3(self, src_file_name, dst_file_name, bucket_name):
"""
Copies src file to destination within a bucket.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response["Error"]["Code"])
if error_code == 404:
return False
copy_src = {"Bucket": bucket_name, "Key": src_file_name}
try:
self.s3_client.copy(
CopySource=copy_src, Bucket=bucket_name, Key=dst_file_name
)
return True
except botocore.exceptions.ClientError: # pragma: no cover
return False
def remove_from_s3(self, file_name, bucket_name):
"""
Given a file name and a bucket, remove it from S3.
There's no reason to keep the file hosted on S3 once its been made into a Lambda function, so we can delete it from S3.
Returns True on success, False on failure.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response["Error"]["Code"])
if error_code == 404:
return False
try:
self.s3_client.delete_object(Bucket=bucket_name, Key=file_name)
return True
except (
botocore.exceptions.ParamValidationError,
botocore.exceptions.ClientError,
): # pragma: no cover
return False
##
# Lambda
##
def create_lambda_function(
self,
bucket=None,
function_name=None,
handler=None,
s3_key=None,
description="Zappa Deployment",
timeout=30,
memory_size=512,
publish=True,
vpc_config=None,
dead_letter_config=None,
runtime="python3.6",
aws_environment_variables=None,
aws_kms_key_arn=None,
xray_tracing=False,
local_zip=None,
use_alb=False,
layers=None,
concurrency=None,
docker_image_uri=None,
):
"""
Given a bucket and key (or a local path) of a valid Lambda-zip, a function name and a handler, register that Lambda function.
"""
if not vpc_config:
vpc_config = {}
if not dead_letter_config:
dead_letter_config = {}
if not self.credentials_arn:
self.get_credentials_arn()
if not aws_environment_variables:
aws_environment_variables = {}
if not aws_kms_key_arn:
aws_kms_key_arn = ""
if not layers:
layers = []
kwargs = dict(
FunctionName=function_name,
Role=self.credentials_arn,
Description=description,
Timeout=timeout,
MemorySize=memory_size,
Publish=publish,
VpcConfig=vpc_config,
DeadLetterConfig=dead_letter_config,
Environment={"Variables": aws_environment_variables},
KMSKeyArn=aws_kms_key_arn,
TracingConfig={"Mode": "Active" if self.xray_tracing else "PassThrough"},
Layers=layers,
)
if not docker_image_uri:
kwargs["Runtime"] = runtime
kwargs["Handler"] = handler
kwargs["PackageType"] = "Zip"
if docker_image_uri:
kwargs["Code"] = {"ImageUri": docker_image_uri}
# default is ZIP. override to Image for container support
kwargs["PackageType"] = "Image"
# The create function operation times out when this is '' (the default)
# So just remove it from the kwargs if it is not specified
if aws_kms_key_arn == "":
kwargs.pop("KMSKeyArn")
elif local_zip:
kwargs["Code"] = {"ZipFile": local_zip}
else:
kwargs["Code"] = {"S3Bucket": bucket, "S3Key": s3_key}
response = self.lambda_client.create_function(**kwargs)
resource_arn = response["FunctionArn"]
version = response["Version"]
# If we're using an ALB, let's create an alias mapped to the newly
# created function. This allows clean, no downtime association when
# using application load balancers as an event source.
# See: https://github.com/Miserlou/Zappa/pull/1730
# https://github.com/Miserlou/Zappa/issues/1823
if use_alb:
self.lambda_client.create_alias(
FunctionName=resource_arn,
FunctionVersion=version,
Name=ALB_LAMBDA_ALIAS,
)
if self.tags:
self.lambda_client.tag_resource(Resource=resource_arn, Tags=self.tags)
if concurrency is not None:
self.lambda_client.put_function_concurrency(
FunctionName=resource_arn,
ReservedConcurrentExecutions=concurrency,
)
return resource_arn
def update_lambda_function(
self,
bucket,
function_name,
s3_key=None,
publish=True,
local_zip=None,
num_revisions=None,
concurrency=None,
docker_image_uri=None,
):
"""
Given a bucket and key (or a local path) of a valid Lambda-zip, a function name and a handler, update that Lambda function's code.
Optionally, delete previous versions if they exceed the optional limit.
"""
print("Updating Lambda function code..")
kwargs = dict(FunctionName=function_name, Publish=publish)
if docker_image_uri:
kwargs["ImageUri"] = docker_image_uri
elif local_zip:
kwargs["ZipFile"] = local_zip
else:
kwargs["S3Bucket"] = bucket
kwargs["S3Key"] = s3_key
response = self.lambda_client.update_function_code(**kwargs)
resource_arn = response["FunctionArn"]
version = response["Version"]
# If the lambda has an ALB alias, let's update the alias
# to point to the newest version of the function. We have to use a GET
# here, as there's no HEAD-esque call to retrieve metadata about a
# function alias.
# Related: https://github.com/Miserlou/Zappa/pull/1730
# https://github.com/Miserlou/Zappa/issues/1823
try:
response = self.lambda_client.get_alias(
FunctionName=function_name,
Name=ALB_LAMBDA_ALIAS,
)
alias_exists = True
except botocore.exceptions.ClientError as e: # pragma: no cover
if "ResourceNotFoundException" not in e.response["Error"]["Code"]:
raise e
alias_exists = False
if alias_exists:
self.lambda_client.update_alias(
FunctionName=function_name,
FunctionVersion=version,
Name=ALB_LAMBDA_ALIAS,
)
if concurrency is not None:
self.lambda_client.put_function_concurrency(
FunctionName=function_name,
ReservedConcurrentExecutions=concurrency,
)
else:
self.lambda_client.delete_function_concurrency(FunctionName=function_name)
if num_revisions:
# Find the existing revision IDs for the given function
# Related: https://github.com/Miserlou/Zappa/issues/1402
versions_in_lambda = []
versions = self.lambda_client.list_versions_by_function(
FunctionName=function_name
)
for version in versions["Versions"]:
versions_in_lambda.append(version["Version"])
while "NextMarker" in versions:
versions = self.lambda_client.list_versions_by_function(
FunctionName=function_name, Marker=versions["NextMarker"]
)
for version in versions["Versions"]:
versions_in_lambda.append(version["Version"])
versions_in_lambda.remove("$LATEST")
# Delete older revisions if their number exceeds the specified limit
for version in versions_in_lambda[::-1][num_revisions:]:
self.lambda_client.delete_function(
FunctionName=function_name, Qualifier=version
)
return resource_arn
def update_lambda_configuration(
self,
lambda_arn,
function_name,
handler,
description="Zappa Deployment",
timeout=30,
memory_size=512,
publish=True,
vpc_config=None,
runtime="python3.6",
aws_environment_variables=None,
aws_kms_key_arn=None,
layers=None,
):
"""
Given an existing function ARN, update the configuration variables.
"""
print("Updating Lambda function configuration..")
if not vpc_config:
vpc_config = {}
if not self.credentials_arn:
self.get_credentials_arn()
if not aws_kms_key_arn:
aws_kms_key_arn = ""
if not aws_environment_variables:
aws_environment_variables = {}
if not layers:
layers = []
# Check if there are any remote aws lambda env vars so they don't get trashed.
# https://github.com/Miserlou/Zappa/issues/987, Related: https://github.com/Miserlou/Zappa/issues/765
lambda_aws_config = self.lambda_client.get_function_configuration(
FunctionName=function_name
)
if "Environment" in lambda_aws_config:
lambda_aws_environment_variables = lambda_aws_config["Environment"].get(
"Variables", {}
)
# Append keys that are remote but not in settings file
for key, value in lambda_aws_environment_variables.items():
if key not in aws_environment_variables:
aws_environment_variables[key] = value
kwargs = {
"FunctionName": function_name,
"Role": self.credentials_arn,
"Description": description,
"Timeout": timeout,
"MemorySize": memory_size,
"VpcConfig": vpc_config,
"Environment": {"Variables": aws_environment_variables},
"KMSKeyArn": aws_kms_key_arn,
"TracingConfig": {"Mode": "Active" if self.xray_tracing else "PassThrough"},
}
if lambda_aws_config["PackageType"] != "Image":
kwargs.update(
{
"Handler": handler,
"Runtime": runtime,
"Layers": layers,
}
)
response = self.lambda_client.update_function_configuration(**kwargs)
resource_arn = response["FunctionArn"]
if self.tags:
self.lambda_client.tag_resource(Resource=resource_arn, Tags=self.tags)
return resource_arn
def invoke_lambda_function(
self,
function_name,
payload,
invocation_type="Event",
log_type="Tail",
client_context=None,
qualifier=None,
):
"""
Directly invoke a named Lambda function with a payload.
Returns the response.
"""
return self.lambda_client.invoke(
FunctionName=function_name,
InvocationType=invocation_type,
LogType=log_type,
Payload=payload,
)
def rollback_lambda_function_version(
self, function_name, versions_back=1, publish=True
):
"""
Rollback the lambda function code 'versions_back' number of revisions.
Returns the Function ARN.
"""
response = self.lambda_client.list_versions_by_function(
FunctionName=function_name
)
# https://github.com/Miserlou/Zappa/pull/2192
if (
len(response.get("Versions", [])) > 1
and response["Versions"][-1]["PackageType"] == "Image"
):
raise NotImplementedError(
"Zappa's rollback functionality is not available for Docker based deployments"
)
# Take into account $LATEST
if len(response["Versions"]) < versions_back + 1:
print("We do not have {} revisions. Aborting".format(str(versions_back)))
return False
revisions = [
int(revision["Version"])
for revision in response["Versions"]
if revision["Version"] != "$LATEST"
]
revisions.sort(reverse=True)
response = self.lambda_client.get_function(
FunctionName="function:{}:{}".format(
function_name, revisions[versions_back]
)
)
response = requests.get(response["Code"]["Location"])
if response.status_code != 200:
print(
"Failed to get version {} of {} code".format(
versions_back, function_name
)
)
return False
response = self.lambda_client.update_function_code(
FunctionName=function_name, ZipFile=response.content, Publish=publish
) # pragma: no cover
return response["FunctionArn"]
def is_lambda_function_ready(self, function_name):
"""
Checks if a lambda function is active and no updates are in progress.
"""
response = self.lambda_client.get_function(FunctionName=function_name)
return (
response["Configuration"]["State"] == "Active"
and response["Configuration"]["LastUpdateStatus"] != "InProgress"
)
def wait_until_lambda_function_is_ready(self, function_name):
"""
Continuously check if a lambda function is active.
For functions deployed with a docker image instead of a
ZIP package, the function can take a few seconds longer
to be created or update, so we must wait before running any status
checks against the function.
"""
show_waiting_message = True
while True:
if self.is_lambda_function_ready(function_name):
break
if show_waiting_message:
print("Waiting until lambda function is ready.")
show_waiting_message = False
time.sleep(1)
def get_lambda_function(self, function_name):
"""
Returns the lambda function ARN, given a name
This requires the "lambda:GetFunction" role.
"""
response = self.lambda_client.get_function(FunctionName=function_name)
return response["Configuration"]["FunctionArn"]
def get_lambda_function_versions(self, function_name):
"""
Simply returns the versions available for a Lambda function, given a function name.
"""
try:
response = self.lambda_client.list_versions_by_function(
FunctionName=function_name
)
return response.get("Versions", [])
except Exception:
return []
def delete_lambda_function(self, function_name):
"""
Given a function name, delete it from AWS Lambda.
Returns the response.
"""
print("Deleting Lambda function..")
return self.lambda_client.delete_function(
FunctionName=function_name,
)
##
# Application load balancer
##
def deploy_lambda_alb(self, lambda_arn, lambda_name, alb_vpc_config, timeout):
"""
The `zappa deploy` functionality for ALB infrastructure.
"""
if not alb_vpc_config:
raise EnvironmentError(
"When creating an ALB, alb_vpc_config must be filled out in zappa_settings."
)
if "SubnetIds" not in alb_vpc_config:
raise EnvironmentError(
"When creating an ALB, you must supply two subnets in different availability zones."
)
if "SecurityGroupIds" not in alb_vpc_config:
alb_vpc_config["SecurityGroupIds"] = []
if not alb_vpc_config.get("CertificateArn"):
raise EnvironmentError(
"When creating an ALB, you must supply a CertificateArn for the HTTPS listener."
)
# Related: https://github.com/Miserlou/Zappa/issues/1856
if "Scheme" not in alb_vpc_config:
alb_vpc_config["Scheme"] = "internet-facing"
print("Deploying ALB infrastructure...")
# Create load balancer
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_load_balancer
kwargs = dict(
Name=lambda_name,
Subnets=alb_vpc_config["SubnetIds"],
SecurityGroups=alb_vpc_config["SecurityGroupIds"],
Scheme=alb_vpc_config["Scheme"],
# TODO: Tags might be a useful means of stock-keeping zappa-generated assets.
# Tags=[],
Type="application",
# TODO: can be ipv4 or dualstack (for ipv4 and ipv6) ipv4 is required for internal Scheme.
IpAddressType="ipv4",
)
response = self.elbv2_client.create_load_balancer(**kwargs)
if not (response["LoadBalancers"]) or len(response["LoadBalancers"]) != 1:
raise EnvironmentError(
"Failure to create application load balancer. Response was in unexpected format. Response was: {}".format(
repr(response)
)
)
if response["LoadBalancers"][0]["State"]["Code"] == "failed":
raise EnvironmentError(
"Failure to create application load balancer. Response reported a failed state: {}".format(
response["LoadBalancers"][0]["State"]["Reason"]
)
)
load_balancer_arn = response["LoadBalancers"][0]["LoadBalancerArn"]
load_balancer_dns = response["LoadBalancers"][0]["DNSName"]
load_balancer_vpc = response["LoadBalancers"][0]["VpcId"]
waiter = self.elbv2_client.get_waiter("load_balancer_available")
print(
"Waiting for load balancer [{}] to become active..".format(
load_balancer_arn
)
)
waiter.wait(LoadBalancerArns=[load_balancer_arn], WaiterConfig={"Delay": 3})
# Match the lambda timeout on the load balancer.
self.elbv2_client.modify_load_balancer_attributes(
LoadBalancerArn=load_balancer_arn,
Attributes=[{"Key": "idle_timeout.timeout_seconds", "Value": str(timeout)}],
)
# Create/associate target group.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_target_group
kwargs = dict(
Name=lambda_name,
TargetType="lambda",
# TODO: Add options for health checks
)
response = self.elbv2_client.create_target_group(**kwargs)
if not (response["TargetGroups"]) or len(response["TargetGroups"]) != 1:
raise EnvironmentError(
"Failure to create application load balancer target group. Response was in unexpected format. Response was: {}".format(
repr(response)
)
)
target_group_arn = response["TargetGroups"][0]["TargetGroupArn"]
# Enable multi-value headers by default.
response = self.elbv2_client.modify_target_group_attributes(
TargetGroupArn=target_group_arn,
Attributes=[
{"Key": "lambda.multi_value_headers.enabled", "Value": "true"},
],
)
# Allow execute permissions from target group to lambda.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.add_permission
kwargs = dict(
Action="lambda:InvokeFunction",
FunctionName="{}:{}".format(lambda_arn, ALB_LAMBDA_ALIAS),
Principal="elasticloadbalancing.amazonaws.com",
SourceArn=target_group_arn,
StatementId=lambda_name,
)
response = self.lambda_client.add_permission(**kwargs)
# Register target group to lambda association.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.register_targets
kwargs = dict(
TargetGroupArn=target_group_arn,
Targets=[{"Id": "{}:{}".format(lambda_arn, ALB_LAMBDA_ALIAS)}],
)
response = self.elbv2_client.register_targets(**kwargs)
# Bind listener to load balancer with default rule to target group.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_listener
kwargs = dict(
# TODO: Listeners support custom ssl certificates (Certificates). For now we leave this default.
Certificates=[{"CertificateArn": alb_vpc_config["CertificateArn"]}],
DefaultActions=[
{
"Type": "forward",
"TargetGroupArn": target_group_arn,
}
],
LoadBalancerArn=load_balancer_arn,
Protocol="HTTPS",
# TODO: Add option for custom ports
Port=443,
# TODO: Listeners support custom ssl security policy (SslPolicy). For now we leave this default.
)
response = self.elbv2_client.create_listener(**kwargs)
print("ALB created with DNS: {}".format(load_balancer_dns))
print("Note it may take several minutes for load balancer to become available.")
def undeploy_lambda_alb(self, lambda_name):
"""
The `zappa undeploy` functionality for ALB infrastructure.
"""
print("Undeploying ALB infrastructure...")
# Locate and delete alb/lambda permissions
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.remove_permission
self.lambda_client.remove_permission(
FunctionName=lambda_name, StatementId=lambda_name
)
except botocore.exceptions.ClientError as e: # pragma: no cover
if "ResourceNotFoundException" in e.response["Error"]["Code"]:
pass
else:
raise e
# Locate and delete load balancer
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_load_balancers
response = self.elbv2_client.describe_load_balancers(Names=[lambda_name])
if not (response["LoadBalancers"]) or len(response["LoadBalancers"]) > 1:
raise EnvironmentError(
"Failure to locate/delete ALB named [{}]. Response was: {}".format(
lambda_name, repr(response)
)
)
load_balancer_arn = response["LoadBalancers"][0]["LoadBalancerArn"]
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_listeners
response = self.elbv2_client.describe_listeners(
LoadBalancerArn=load_balancer_arn
)
if not (response["Listeners"]):
print("No listeners found.")
elif len(response["Listeners"]) > 1:
raise EnvironmentError(
"Failure to locate/delete listener for ALB named [{}]. Response was: {}".format(
lambda_name, repr(response)
)
)
else:
listener_arn = response["Listeners"][0]["ListenerArn"]
# Remove the listener. This explicit deletion of the listener seems necessary to avoid ResourceInUseExceptions when deleting target groups.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_listener
response = self.elbv2_client.delete_listener(ListenerArn=listener_arn)
# Remove the load balancer and wait for completion
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_load_balancer
response = self.elbv2_client.delete_load_balancer(
LoadBalancerArn=load_balancer_arn
)
waiter = self.elbv2_client.get_waiter("load_balancers_deleted")
print("Waiting for load balancer [{}] to be deleted..".format(lambda_name))
waiter.wait(LoadBalancerArns=[load_balancer_arn], WaiterConfig={"Delay": 3})
except botocore.exceptions.ClientError as e: # pragma: no cover
print(e.response["Error"]["Code"])
if "LoadBalancerNotFound" in e.response["Error"]["Code"]:
pass
else:
raise e
# Locate and delete target group
try:
# Locate the lambda ARN
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.get_function
response = self.lambda_client.get_function(FunctionName=lambda_name)
lambda_arn = response["Configuration"]["FunctionArn"]
# Locate the target group ARN
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_target_groups
response = self.elbv2_client.describe_target_groups(Names=[lambda_name])
if not (response["TargetGroups"]) or len(response["TargetGroups"]) > 1:
raise EnvironmentError(
"Failure to locate/delete ALB target group named [{}]. Response was: {}".format(
lambda_name, repr(response)
)
)
target_group_arn = response["TargetGroups"][0]["TargetGroupArn"]
# Deregister targets and wait for completion
self.elbv2_client.deregister_targets(
TargetGroupArn=target_group_arn, Targets=[{"Id": lambda_arn}]
)
waiter = self.elbv2_client.get_waiter("target_deregistered")
print("Waiting for target [{}] to be deregistered...".format(lambda_name))
waiter.wait(
TargetGroupArn=target_group_arn,
Targets=[{"Id": lambda_arn}],
WaiterConfig={"Delay": 3},
)
# Remove the target group
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_target_group
self.elbv2_client.delete_target_group(TargetGroupArn=target_group_arn)
except botocore.exceptions.ClientError as e: # pragma: no cover
print(e.response["Error"]["Code"])
if "TargetGroupNotFound" in e.response["Error"]["Code"]:
pass
else:
raise e
##
# API Gateway
##
def create_api_gateway_routes(
self,
lambda_arn,
api_name=None,
api_key_required=False,
authorization_type="NONE",
authorizer=None,
cors_options=None,
description=None,
endpoint_configuration=None,
):
"""
Create the API Gateway for this Zappa deployment.
Returns the new RestAPI CF resource.
"""
restapi = troposphere.apigateway.RestApi("Api")
restapi.Name = api_name or lambda_arn.split(":")[-1]
if not description:
description = "Created automatically by Zappa."
restapi.Description = description
endpoint_configuration = (
[] if endpoint_configuration is None else endpoint_configuration
)
if self.boto_session.region_name == "us-gov-west-1":
endpoint_configuration.append("REGIONAL")
if endpoint_configuration:
endpoint = troposphere.apigateway.EndpointConfiguration()
endpoint.Types = list(set(endpoint_configuration))
restapi.EndpointConfiguration = endpoint
if self.apigateway_policy:
restapi.Policy = json.loads(self.apigateway_policy)
self.cf_template.add_resource(restapi)
root_id = troposphere.GetAtt(restapi, "RootResourceId")
invocation_prefix = (
"aws" if self.boto_session.region_name != "us-gov-west-1" else "aws-us-gov"
)
invocations_uri = (
"arn:"
+ invocation_prefix
+ ":apigateway:"
+ self.boto_session.region_name
+ ":lambda:path/2015-03-31/functions/"
+ lambda_arn
+ "/invocations"
)
##
# The Resources
##
authorizer_resource = None
if authorizer:
authorizer_lambda_arn = authorizer.get("arn", lambda_arn)
lambda_uri = "arn:{invocation_prefix}:apigateway:{region_name}:lambda:path/2015-03-31/functions/{lambda_arn}/invocations".format(
invocation_prefix=invocation_prefix,
region_name=self.boto_session.region_name,
lambda_arn=authorizer_lambda_arn,
)
authorizer_resource = self.create_authorizer(
restapi, lambda_uri, authorizer
)
self.create_and_setup_methods(
restapi,
root_id,
api_key_required,
invocations_uri,
authorization_type,
authorizer_resource,
0,
)
if cors_options:
self.create_and_setup_cors(
restapi, root_id, invocations_uri, 0, cors_options
)
resource = troposphere.apigateway.Resource("ResourceAnyPathSlashed")
self.cf_api_resources.append(resource.title)
resource.RestApiId = troposphere.Ref(restapi)
resource.ParentId = root_id
resource.PathPart = "{proxy+}"
self.cf_template.add_resource(resource)
self.create_and_setup_methods(
restapi,
resource,
api_key_required,
invocations_uri,
authorization_type,
authorizer_resource,
1,
) # pragma: no cover
if cors_options:
self.create_and_setup_cors(
restapi, resource, invocations_uri, 1, cors_options
) # pragma: no cover
return restapi
def create_authorizer(self, restapi, uri, authorizer):
"""
Create Authorizer for API gateway
"""
authorizer_type = authorizer.get("type", "TOKEN").upper()
identity_validation_expression = authorizer.get("validation_expression", None)
authorizer_resource = troposphere.apigateway.Authorizer("Authorizer")
authorizer_resource.RestApiId = troposphere.Ref(restapi)
authorizer_resource.Name = authorizer.get("name", "ZappaAuthorizer")
authorizer_resource.Type = authorizer_type
authorizer_resource.AuthorizerUri = uri
authorizer_resource.IdentitySource = (
"method.request.header.%s" % authorizer.get("token_header", "Authorization")
)
if identity_validation_expression:
authorizer_resource.IdentityValidationExpression = (
identity_validation_expression
)
if authorizer_type == "TOKEN":
if not self.credentials_arn:
self.get_credentials_arn()
authorizer_resource.AuthorizerResultTtlInSeconds = authorizer.get(
"result_ttl", 300
)
authorizer_resource.AuthorizerCredentials = self.credentials_arn
if authorizer_type == "COGNITO_USER_POOLS":
authorizer_resource.ProviderARNs = authorizer.get("provider_arns")
self.cf_api_resources.append(authorizer_resource.title)
self.cf_template.add_resource(authorizer_resource)
return authorizer_resource
def create_and_setup_methods(
self,
restapi,
resource,
api_key_required,
uri,
authorization_type,
authorizer_resource,
depth,
):
"""
Set up the methods, integration responses and method responses for a given API Gateway resource.
"""
for method_name in self.http_methods:
method = troposphere.apigateway.Method(method_name + str(depth))
method.RestApiId = troposphere.Ref(restapi)
if type(resource) is troposphere.apigateway.Resource:
method.ResourceId = troposphere.Ref(resource)
else:
method.ResourceId = resource
method.HttpMethod = method_name.upper()
method.AuthorizationType = authorization_type
if authorizer_resource:
method.AuthorizerId = troposphere.Ref(authorizer_resource)
method.ApiKeyRequired = api_key_required
method.MethodResponses = []
self.cf_template.add_resource(method)
self.cf_api_resources.append(method.title)
if not self.credentials_arn:
self.get_credentials_arn()
credentials = self.credentials_arn # This must be a Role ARN
integration = troposphere.apigateway.Integration()
integration.CacheKeyParameters = []
integration.CacheNamespace = "none"
integration.Credentials = credentials
integration.IntegrationHttpMethod = "POST"
integration.IntegrationResponses = []
integration.PassthroughBehavior = "NEVER"
integration.Type = "AWS_PROXY"
integration.Uri = uri
method.Integration = integration
def create_and_setup_cors(self, restapi, resource, uri, depth, config):
"""
Set up the methods, integration responses and method responses for a given API Gateway resource.
"""
if config is True:
config = {}
method_name = "OPTIONS"
method = troposphere.apigateway.Method(method_name + str(depth))
method.RestApiId = troposphere.Ref(restapi)
if type(resource) is troposphere.apigateway.Resource:
method.ResourceId = troposphere.Ref(resource)
else:
method.ResourceId = resource
method.HttpMethod = method_name.upper()
method.AuthorizationType = "NONE"
method_response = troposphere.apigateway.MethodResponse()
method_response.ResponseModels = {"application/json": "Empty"}
response_headers = {
"Access-Control-Allow-Headers": "'%s'"
% ",".join(
config.get(
"allowed_headers",
[
"Content-Type",
"X-Amz-Date",
"Authorization",
"X-Api-Key",
"X-Amz-Security-Token",
],
)
),
"Access-Control-Allow-Methods": "'%s'"
% ",".join(
config.get(
"allowed_methods",
["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"],
)
),
"Access-Control-Allow-Origin": "'%s'" % config.get("allowed_origin", "*"),
}
method_response.ResponseParameters = {
"method.response.header.%s" % key: True for key in response_headers
}
method_response.StatusCode = "200"
method.MethodResponses = [method_response]
self.cf_template.add_resource(method)
self.cf_api_resources.append(method.title)
integration = troposphere.apigateway.Integration()
integration.Type = "MOCK"
integration.PassthroughBehavior = "NEVER"
integration.RequestTemplates = {"application/json": '{"statusCode": 200}'}
integration_response = troposphere.apigateway.IntegrationResponse()
integration_response.ResponseParameters = {
"method.response.header.%s" % key: value
for key, value in response_headers.items()
}
integration_response.ResponseTemplates = {"application/json": ""}
integration_response.StatusCode = "200"
integration.IntegrationResponses = [integration_response]
integration.Uri = uri
method.Integration = integration
def deploy_api_gateway(
self,
api_id,
stage_name,
stage_description="",
description="",
cache_cluster_enabled=False,
cache_cluster_size="0.5",
variables=None,
cloudwatch_log_level="OFF",
cloudwatch_data_trace=False,
cloudwatch_metrics_enabled=False,
cache_cluster_ttl=300,
cache_cluster_encrypted=False,
):
"""
Deploy the API Gateway!
Return the deployed API URL.
"""
print("Deploying API Gateway..")
self.apigateway_client.create_deployment(
restApiId=api_id,
stageName=stage_name,
stageDescription=stage_description,
description=description,
cacheClusterEnabled=cache_cluster_enabled,
cacheClusterSize=cache_cluster_size,
variables=variables or {},
)
if cloudwatch_log_level not in self.cloudwatch_log_levels:
cloudwatch_log_level = "OFF"
self.apigateway_client.update_stage(
restApiId=api_id,
stageName=stage_name,
patchOperations=[
self.get_patch_op("logging/loglevel", cloudwatch_log_level),
self.get_patch_op("logging/dataTrace", cloudwatch_data_trace),
self.get_patch_op("metrics/enabled", cloudwatch_metrics_enabled),
self.get_patch_op("caching/ttlInSeconds", str(cache_cluster_ttl)),
self.get_patch_op("caching/dataEncrypted", cache_cluster_encrypted),
],
)
return "https://{}.execute-api.{}.amazonaws.com/{}".format(
api_id, self.boto_session.region_name, stage_name
)
def add_binary_support(self, api_id, cors=False):
"""
Add binary support
"""
response = self.apigateway_client.get_rest_api(restApiId=api_id)
if (
"binaryMediaTypes" not in response
or "*/*" not in response["binaryMediaTypes"]
):
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[{"op": "add", "path": "/binaryMediaTypes/*~1*"}],
)
if cors:
# fix for issue 699 and 1035, cors+binary support don't work together
# go through each resource and update the contentHandling type
response = self.apigateway_client.get_resources(restApiId=api_id)
resource_ids = [
item["id"]
for item in response["items"]
if "OPTIONS" in item.get("resourceMethods", {})
]
for resource_id in resource_ids:
self.apigateway_client.update_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod="OPTIONS",
patchOperations=[
{
"op": "replace",
"path": "/contentHandling",
"value": "CONVERT_TO_TEXT",
}
],
)
def remove_binary_support(self, api_id, cors=False):
"""
Remove binary support
"""
response = self.apigateway_client.get_rest_api(restApiId=api_id)
if "binaryMediaTypes" in response and "*/*" in response["binaryMediaTypes"]:
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[{"op": "remove", "path": "/binaryMediaTypes/*~1*"}],
)
if cors:
# go through each resource and change the contentHandling type
response = self.apigateway_client.get_resources(restApiId=api_id)
resource_ids = [
item["id"]
for item in response["items"]
if "OPTIONS" in item.get("resourceMethods", {})
]
for resource_id in resource_ids:
self.apigateway_client.update_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod="OPTIONS",
patchOperations=[
{"op": "replace", "path": "/contentHandling", "value": ""}
],
)
def add_api_compression(self, api_id, min_compression_size):
"""
Add Rest API compression
"""
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
"op": "replace",
"path": "/minimumCompressionSize",
"value": str(min_compression_size),
}
],
)
def remove_api_compression(self, api_id):
"""
Remove Rest API compression
"""
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
"op": "replace",
"path": "/minimumCompressionSize",
}
],
)
def get_api_keys(self, api_id, stage_name):
"""
Generator that allows to iterate per API keys associated to an api_id and a stage_name.
"""
response = self.apigateway_client.get_api_keys(limit=500)
stage_key = "{}/{}".format(api_id, stage_name)
for api_key in response.get("items"):
if stage_key in api_key.get("stageKeys"):
yield api_key.get("id")
def create_api_key(self, api_id, stage_name):
"""
Create new API key and link it with an api_id and a stage_name
"""
response = self.apigateway_client.create_api_key(
name="{}_{}".format(stage_name, api_id),
description="Api Key for {}".format(api_id),
enabled=True,
stageKeys=[
{
"restApiId": "{}".format(api_id),
"stageName": "{}".format(stage_name),
},
],
)
print("Created a new x-api-key: {}".format(response["id"]))
def remove_api_key(self, api_id, stage_name):
"""
Remove a generated API key for api_id and stage_name
"""
response = self.apigateway_client.get_api_keys(
limit=1, nameQuery="{}_{}".format(stage_name, api_id)
)
for api_key in response.get("items"):
self.apigateway_client.delete_api_key(apiKey="{}".format(api_key["id"]))
def add_api_stage_to_api_key(self, api_key, api_id, stage_name):
"""
Add api stage to Api key
"""
self.apigateway_client.update_api_key(
apiKey=api_key,
patchOperations=[
{
"op": "add",
"path": "/stages",
"value": "{}/{}".format(api_id, stage_name),
}
],
)
def get_patch_op(self, keypath, value, op="replace"):
"""
Return an object that describes a change of configuration on the given staging.
Setting will be applied on all available HTTP methods.
"""
if isinstance(value, bool):
value = str(value).lower()
return {"op": op, "path": "/*/*/{}".format(keypath), "value": value}
def get_rest_apis(self, project_name):
"""
Generator that allows to iterate per every available apis.
"""
all_apis = self.apigateway_client.get_rest_apis(limit=500)
for api in all_apis["items"]:
if api["name"] != project_name:
continue
yield api
def undeploy_api_gateway(self, lambda_name, domain_name=None, base_path=None):
"""
Delete a deployed REST API Gateway.
"""
print("Deleting API Gateway..")
api_id = self.get_api_id(lambda_name)
if domain_name:
# XXX - Remove Route53 smartly here?
# XXX - This doesn't raise, but doesn't work either.
try:
self.apigateway_client.delete_base_path_mapping(
domainName=domain_name,
basePath="(none)" if base_path is None else base_path,
)
except Exception as e:
# We may not have actually set up the domain.
pass
was_deleted = self.delete_stack(lambda_name, wait=True)
if not was_deleted:
# try erasing it with the older method
for api in self.get_rest_apis(lambda_name):
self.apigateway_client.delete_rest_api(restApiId=api["id"])
def update_stage_config(
self,
project_name,
stage_name,
cloudwatch_log_level,
cloudwatch_data_trace,
cloudwatch_metrics_enabled,
):
"""
Update CloudWatch metrics configuration.
"""
if cloudwatch_log_level not in self.cloudwatch_log_levels:
cloudwatch_log_level = "OFF"
for api in self.get_rest_apis(project_name):
self.apigateway_client.update_stage(
restApiId=api["id"],
stageName=stage_name,
patchOperations=[
self.get_patch_op("logging/loglevel", cloudwatch_log_level),
self.get_patch_op("logging/dataTrace", cloudwatch_data_trace),
self.get_patch_op("metrics/enabled", cloudwatch_metrics_enabled),
],
)
def update_cognito(self, lambda_name, user_pool, lambda_configs, lambda_arn):
LambdaConfig = {}
for config in lambda_configs:
LambdaConfig[config] = lambda_arn
description = self.cognito_client.describe_user_pool(UserPoolId=user_pool)
description_kwargs = {}
for key, value in description["UserPool"].items():
if key in (
"UserPoolId",
"Policies",
"AutoVerifiedAttributes",
"SmsVerificationMessage",
"EmailVerificationMessage",
"EmailVerificationSubject",
"VerificationMessageTemplate",
"SmsAuthenticationMessage",
"MfaConfiguration",
"DeviceConfiguration",
"EmailConfiguration",
"SmsConfiguration",
"UserPoolTags",
"AdminCreateUserConfig",
):
description_kwargs[key] = value
elif key == "LambdaConfig":
for lckey, lcvalue in value.items():
if lckey in LambdaConfig:
value[lckey] = LambdaConfig[lckey]
print("value", value)
description_kwargs[key] = value
if "LambdaConfig" not in description_kwargs:
description_kwargs["LambdaConfig"] = LambdaConfig
if (
"TemporaryPasswordValidityDays"
in description_kwargs["Policies"]["PasswordPolicy"]
):
description_kwargs["AdminCreateUserConfig"].pop(
"UnusedAccountValidityDays", None
)
if "UnusedAccountValidityDays" in description_kwargs["AdminCreateUserConfig"]:
description_kwargs["Policies"]["PasswordPolicy"][
"TemporaryPasswordValidityDays"
] = description_kwargs["AdminCreateUserConfig"].pop(
"UnusedAccountValidityDays", None
)
result = self.cognito_client.update_user_pool(
UserPoolId=user_pool, **description_kwargs
)
if result["ResponseMetadata"]["HTTPStatusCode"] != 200:
print("Cognito: Failed to update user pool", result)
# Now we need to add a policy to the IAM that allows cognito access
result = self.create_event_permission(
lambda_name,
"cognito-idp.amazonaws.com",
"arn:aws:cognito-idp:{}:{}:userpool/{}".format(
self.aws_region,
self.sts_client.get_caller_identity().get("Account"),
user_pool,
),
)
if result["ResponseMetadata"]["HTTPStatusCode"] != 201:
print("Cognito: Failed to update lambda permission", result)
def delete_stack(self, name, wait=False):
"""
Delete the CF stack managed by Zappa.
"""
try:
stack = self.cf_client.describe_stacks(StackName=name)["Stacks"][0]
except: # pragma: no cover
print("No Zappa stack named {0}".format(name))
return False
tags = {x["Key"]: x["Value"] for x in stack["Tags"]}
if tags.get("ZappaProject") == name:
self.cf_client.delete_stack(StackName=name)
if wait:
waiter = self.cf_client.get_waiter("stack_delete_complete")
print("Waiting for stack {0} to be deleted..".format(name))
waiter.wait(StackName=name)
return True
else:
print("ZappaProject tag not found on {0}, doing nothing".format(name))
return False
def create_stack_template(
self,
lambda_arn,
lambda_name,
api_key_required,
iam_authorization,
authorizer,
cors_options=None,
description=None,
endpoint_configuration=None,
):
"""
Build the entire CF stack.
Just used for the API Gateway, but could be expanded in the future.
"""
auth_type = "NONE"
if iam_authorization and authorizer:
logger.warn(
"Both IAM Authorization and Authorizer are specified, this is not possible. "
"Setting Auth method to IAM Authorization"
)
authorizer = None
auth_type = "AWS_IAM"
elif iam_authorization:
auth_type = "AWS_IAM"
elif authorizer:
auth_type = authorizer.get("type", "CUSTOM")
# build a fresh template
self.cf_template = troposphere.Template()
self.cf_template.set_description("Automatically generated with Zappa")
self.cf_api_resources = []
self.cf_parameters = {}
restapi = self.create_api_gateway_routes(
lambda_arn,
api_name=lambda_name,
api_key_required=api_key_required,
authorization_type=auth_type,
authorizer=authorizer,
cors_options=cors_options,
description=description,
endpoint_configuration=endpoint_configuration,
)
return self.cf_template
def update_stack(
self,
name,
working_bucket,
wait=False,
update_only=False,
disable_progress=False,
):
"""
Update or create the CF stack managed by Zappa.
"""
capabilities = []
template = name + "-template-" + str(int(time.time())) + ".json"
with open(template, "wb") as out:
out.write(
bytes(
self.cf_template.to_json(indent=None, separators=(",", ":")),
"utf-8",
)
)
self.upload_to_s3(template, working_bucket, disable_progress=disable_progress)
if self.boto_session.region_name == "us-gov-west-1":
url = "https://s3-us-gov-west-1.amazonaws.com/{0}/{1}".format(
working_bucket, template
)
else:
url = "https://s3.amazonaws.com/{0}/{1}".format(working_bucket, template)
tags = [
{"Key": key, "Value": self.tags[key]}
for key in self.tags.keys()
if key != "ZappaProject"
]
tags.append({"Key": "ZappaProject", "Value": name})
update = True
try:
self.cf_client.describe_stacks(StackName=name)
except botocore.client.ClientError:
update = False
if update_only and not update:
print("CloudFormation stack missing, re-deploy to enable updates")
return
if not update:
self.cf_client.create_stack(
StackName=name, Capabilities=capabilities, TemplateURL=url, Tags=tags
)
print(
"Waiting for stack {0} to create (this can take a bit)..".format(name)
)
else:
try:
self.cf_client.update_stack(
StackName=name,
Capabilities=capabilities,
TemplateURL=url,
Tags=tags,
)
print("Waiting for stack {0} to update..".format(name))
except botocore.client.ClientError as e:
if e.response["Error"]["Message"] == "No updates are to be performed.":
wait = False
else:
raise
if wait:
total_resources = len(self.cf_template.resources)
current_resources = 0
sr = self.cf_client.get_paginator("list_stack_resources")
progress = tqdm(total=total_resources, unit="res", disable=disable_progress)
while True:
time.sleep(3)
result = self.cf_client.describe_stacks(StackName=name)
if not result["Stacks"]:
continue # might need to wait a bit
if result["Stacks"][0]["StackStatus"] in [
"CREATE_COMPLETE",
"UPDATE_COMPLETE",
]:
break
# Something has gone wrong.
# Is raising enough? Should we also remove the Lambda function?
if result["Stacks"][0]["StackStatus"] in [
"DELETE_COMPLETE",
"DELETE_IN_PROGRESS",
"ROLLBACK_IN_PROGRESS",
"UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS",
"UPDATE_ROLLBACK_COMPLETE",
]:
raise EnvironmentError(
"Stack creation failed. "
"Please check your CloudFormation console. "
"You may also need to `undeploy`."
)
count = 0
for result in sr.paginate(StackName=name):
done = (
1
for x in result["StackResourceSummaries"]
if "COMPLETE" in x["ResourceStatus"]
)
count += sum(done)
if count:
# We can end up in a situation where we have more resources being created
# than anticipated.
if (count - current_resources) > 0:
progress.update(count - current_resources)
current_resources = count
progress.close()
try:
os.remove(template)
except OSError:
pass
self.remove_from_s3(template, working_bucket)
def stack_outputs(self, name):
"""
Given a name, describes CloudFront stacks and returns dict of the stack Outputs
, else returns an empty dict.
"""
try:
stack = self.cf_client.describe_stacks(StackName=name)["Stacks"][0]
return {x["OutputKey"]: x["OutputValue"] for x in stack["Outputs"]}
except botocore.client.ClientError:
return {}
def get_api_url(self, lambda_name, stage_name):
"""
Given a lambda_name and stage_name, return a valid API URL.
"""
api_id = self.get_api_id(lambda_name)
if api_id:
return "https://{}.execute-api.{}.amazonaws.com/{}".format(
api_id, self.boto_session.region_name, stage_name
)
else:
return None
def get_api_id(self, lambda_name):
"""
Given a lambda_name, return the API id.
"""
try:
response = self.cf_client.describe_stack_resource(
StackName=lambda_name, LogicalResourceId="Api"
)
return response["StackResourceDetail"].get("PhysicalResourceId", None)
except: # pragma: no cover
try:
# Try the old method (project was probably made on an older, non CF version)
response = self.apigateway_client.get_rest_apis(limit=500)
for item in response["items"]:
if item["name"] == lambda_name:
return item["id"]
logger.exception("Could not get API ID.")
return None
except: # pragma: no cover
# We don't even have an API deployed. That's okay!
return None
def create_domain_name(
self,
domain_name,
certificate_name,
certificate_body=None,
certificate_private_key=None,
certificate_chain=None,
certificate_arn=None,
lambda_name=None,
stage=None,
base_path=None,
):
"""
Creates the API GW domain and returns the resulting DNS name.
"""
# This is a Let's Encrypt or custom certificate
if not certificate_arn:
agw_response = self.apigateway_client.create_domain_name(
domainName=domain_name,
certificateName=certificate_name,
certificateBody=certificate_body,
certificatePrivateKey=certificate_private_key,
certificateChain=certificate_chain,
)
# This is an AWS ACM-hosted Certificate
else:
agw_response = self.apigateway_client.create_domain_name(
domainName=domain_name,
certificateName=certificate_name,
certificateArn=certificate_arn,
)
api_id = self.get_api_id(lambda_name)
if not api_id:
raise LookupError("No API URL to certify found - did you deploy?")
self.apigateway_client.create_base_path_mapping(
domainName=domain_name,
basePath="" if base_path is None else base_path,
restApiId=api_id,
stage=stage,
)
return agw_response["distributionDomainName"]
def update_route53_records(self, domain_name, dns_name):
"""
Updates Route53 Records following GW domain creation
"""
zone_id = self.get_hosted_zone_id_for_domain(domain_name)
is_apex = (
self.route53.get_hosted_zone(Id=zone_id)["HostedZone"]["Name"][:-1]
== domain_name
)
if is_apex:
record_set = {
"Name": domain_name,
"Type": "A",
"AliasTarget": {
"HostedZoneId": "Z2FDTNDATAQYW2", # This is a magic value that means "CloudFront"
"DNSName": dns_name,
"EvaluateTargetHealth": False,
},
}
else:
record_set = {
"Name": domain_name,
"Type": "CNAME",
"ResourceRecords": [{"Value": dns_name}],
"TTL": 60,
}
# Related: https://github.com/boto/boto3/issues/157
# and: http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html
# and policy: https://spin.atomicobject.com/2016/04/28/route-53-hosted-zone-managment/
# pure_zone_id = zone_id.split('/hostedzone/')[1]
# XXX: ClientError: An error occurred (InvalidChangeBatch) when calling the ChangeResourceRecordSets operation:
# Tried to create an alias that targets d1awfeji80d0k2.cloudfront.net., type A in zone Z1XWOQP59BYF6Z,
# but the alias target name does not lie within the target zone
response = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
"Changes": [{"Action": "UPSERT", "ResourceRecordSet": record_set}]
},
)
return response
def update_domain_name(
self,
domain_name,
certificate_name=None,
certificate_body=None,
certificate_private_key=None,
certificate_chain=None,
certificate_arn=None,
lambda_name=None,
stage=None,
route53=True,
base_path=None,
):
"""
This updates your certificate information for an existing domain,
with similar arguments to boto's update_domain_name API Gateway api.
It returns the resulting new domain information including the new certificate's ARN
if created during this process.
Previously, this method involved downtime that could take up to 40 minutes
because the API Gateway api only allowed this by deleting, and then creating it.
Related issues: https://github.com/Miserlou/Zappa/issues/590
https://github.com/Miserlou/Zappa/issues/588
https://github.com/Miserlou/Zappa/pull/458
https://github.com/Miserlou/Zappa/issues/882
https://github.com/Miserlou/Zappa/pull/883
"""
print("Updating domain name!")
certificate_name = certificate_name + str(time.time())
api_gateway_domain = self.apigateway_client.get_domain_name(
domainName=domain_name
)
if (
not certificate_arn
and certificate_body
and certificate_private_key
and certificate_chain
):
acm_certificate = self.acm_client.import_certificate(
Certificate=certificate_body,
PrivateKey=certificate_private_key,
CertificateChain=certificate_chain,
)
certificate_arn = acm_certificate["CertificateArn"]
self.update_domain_base_path_mapping(domain_name, lambda_name, stage, base_path)
return self.apigateway_client.update_domain_name(
domainName=domain_name,
patchOperations=[
{
"op": "replace",
"path": "/certificateName",
"value": certificate_name,
},
{"op": "replace", "path": "/certificateArn", "value": certificate_arn},
],
)
def update_domain_base_path_mapping(
self, domain_name, lambda_name, stage, base_path
):
"""
Update domain base path mapping on API Gateway if it was changed
"""
api_id = self.get_api_id(lambda_name)
if not api_id:
print("Warning! Can't update base path mapping!")
return
base_path_mappings = self.apigateway_client.get_base_path_mappings(
domainName=domain_name
)
found = False
for base_path_mapping in base_path_mappings.get("items", []):
if (
base_path_mapping["restApiId"] == api_id
and base_path_mapping["stage"] == stage
):
found = True
if base_path_mapping["basePath"] != base_path:
self.apigateway_client.update_base_path_mapping(
domainName=domain_name,
basePath=base_path_mapping["basePath"],
patchOperations=[
{
"op": "replace",
"path": "/basePath",
"value": "" if base_path is None else base_path,
}
],
)
if not found:
self.apigateway_client.create_base_path_mapping(
domainName=domain_name,
basePath="" if base_path is None else base_path,
restApiId=api_id,
stage=stage,
)
def get_all_zones(self):
"""Same behaviour of list_host_zones, but transparently handling pagination."""
zones = {"HostedZones": []}
new_zones = self.route53.list_hosted_zones(MaxItems="100")
while new_zones["IsTruncated"]:
zones["HostedZones"] += new_zones["HostedZones"]
new_zones = self.route53.list_hosted_zones(
Marker=new_zones["NextMarker"], MaxItems="100"
)
zones["HostedZones"] += new_zones["HostedZones"]
return zones
def get_domain_name(self, domain_name, route53=True):
"""
Scan our hosted zones for the record of a given name.
Returns the record entry, else None.
"""
# Make sure api gateway domain is present
try:
self.apigateway_client.get_domain_name(domainName=domain_name)
except Exception:
return None
if not route53:
return True
try:
zones = self.get_all_zones()
for zone in zones["HostedZones"]:
records = self.route53.list_resource_record_sets(
HostedZoneId=zone["Id"]
)
for record in records["ResourceRecordSets"]:
if (
record["Type"] in ("CNAME", "A")
and record["Name"][:-1] == domain_name
):
return record
except Exception as e:
return None
##
# Old, automatic logic.
# If re-introduced, should be moved to a new function.
# Related ticket: https://github.com/Miserlou/Zappa/pull/458
##
# We may be in a position where Route53 doesn't have a domain, but the API Gateway does.
# We need to delete this before we can create the new Route53.
# try:
# api_gateway_domain = self.apigateway_client.get_domain_name(domainName=domain_name)
# self.apigateway_client.delete_domain_name(domainName=domain_name)
# except Exception:
# pass
return None
##
# IAM
##
def get_credentials_arn(self):
"""
Given our role name, get and set the credentials_arn.
"""
role = self.iam.Role(self.role_name)
self.credentials_arn = role.arn
return role, self.credentials_arn
def create_iam_roles(self):
"""
Create and defines the IAM roles and policies necessary for Zappa.
If the IAM role already exists, it will be updated if necessary.
"""
attach_policy_obj = json.loads(self.attach_policy)
assume_policy_obj = json.loads(self.assume_policy)
if self.extra_permissions:
for permission in self.extra_permissions:
attach_policy_obj["Statement"].append(dict(permission))
self.attach_policy = json.dumps(attach_policy_obj)
updated = False
# Create the role if needed
try:
role, credentials_arn = self.get_credentials_arn()
except botocore.client.ClientError:
print("Creating " + self.role_name + " IAM Role..")
role = self.iam.create_role(
RoleName=self.role_name, AssumeRolePolicyDocument=self.assume_policy
)
self.credentials_arn = role.arn
updated = True
# create or update the role's policies if needed
policy = self.iam.RolePolicy(self.role_name, "zappa-permissions")
try:
if policy.policy_document != attach_policy_obj:
print(
"Updating zappa-permissions policy on "
+ self.role_name
+ " IAM Role."
)
policy.put(PolicyDocument=self.attach_policy)
updated = True
except botocore.client.ClientError:
print(
"Creating zappa-permissions policy on " + self.role_name + " IAM Role."
)
policy.put(PolicyDocument=self.attach_policy)
updated = True
if role.assume_role_policy_document != assume_policy_obj and set(
role.assume_role_policy_document["Statement"][0]["Principal"]["Service"]
) != set(assume_policy_obj["Statement"][0]["Principal"]["Service"]):
print("Updating assume role policy on " + self.role_name + " IAM Role.")
self.iam_client.update_assume_role_policy(
RoleName=self.role_name, PolicyDocument=self.assume_policy
)
updated = True
return self.credentials_arn, updated
def _clear_policy(self, lambda_name):
"""
Remove obsolete policy statements to prevent policy from bloating over the limit after repeated updates.
"""
try:
policy_response = self.lambda_client.get_policy(FunctionName=lambda_name)
if policy_response["ResponseMetadata"]["HTTPStatusCode"] == 200:
statement = json.loads(policy_response["Policy"])["Statement"]
for s in statement:
delete_response = self.lambda_client.remove_permission(
FunctionName=lambda_name, StatementId=s["Sid"]
)
if delete_response["ResponseMetadata"]["HTTPStatusCode"] != 204:
logger.error(
"Failed to delete an obsolete policy statement: {}".format(
policy_response
)
)
else:
logger.debug(
"Failed to load Lambda function policy: {}".format(policy_response)
)
except ClientError as e:
if e.args[0].find("ResourceNotFoundException") > -1:
logger.debug("No policy found, must be first run.")
else:
logger.error("Unexpected client error {}".format(e.args[0]))
##
# CloudWatch Events
##
def create_event_permission(self, lambda_name, principal, source_arn):
"""
Create permissions to link to an event.
Related: http://docs.aws.amazon.com/lambda/latest/dg/with-s3-example-configure-event-source.html
"""
logger.debug(
"Adding new permission to invoke Lambda function: {}".format(lambda_name)
)
account_id: str = self.sts_client.get_caller_identity().get('Account')
permission_response = self.lambda_client.add_permission(
FunctionName=lambda_name,
StatementId="".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(8)
),
Action="lambda:InvokeFunction",
Principal=principal,
SourceArn=source_arn,
# The SourceAccount argument ensures that only the specified AWS account can invoke the lambda function.
# This prevents a security issue where if a lambda is triggered off of s3 bucket events and the bucket is
# deleted, another AWS account can create a bucket with the same name and potentially trigger the original
# lambda function, since bucket names are global.
# https://github.com/zappa/Zappa/issues/1039
SourceAccount=account_id
)
if permission_response["ResponseMetadata"]["HTTPStatusCode"] != 201:
print("Problem creating permission to invoke Lambda function")
return None # XXX: Raise?
return permission_response
def schedule_events(self, lambda_arn, lambda_name, events, default=True):
"""
Given a Lambda ARN, name and a list of events, schedule this as CloudWatch Events.
'events' is a list of dictionaries, where the dict must contains the string
of a 'function' and the string of the event 'expression', and an optional 'name' and 'description'.
Expressions can be in rate or cron format:
http://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html
"""
# The stream sources - DynamoDB, Kinesis and SQS - are working differently than the other services (pull vs push)
# and do not require event permissions. They do require additional permissions on the Lambda roles though.
# http://docs.aws.amazon.com/lambda/latest/dg/lambda-api-permissions-ref.html
pull_services = ["dynamodb", "kinesis", "sqs"]
# XXX: Not available in Lambda yet.
# We probably want to execute the latest code.
# if default:
# lambda_arn = lambda_arn + ":$LATEST"
self.unschedule_events(
lambda_name=lambda_name,
lambda_arn=lambda_arn,
events=events,
excluded_source_services=pull_services,
)
for event in events:
function = event["function"]
expression = event.get("expression", None) # single expression
expressions = event.get("expressions", None) # multiple expression
kwargs = event.get(
"kwargs", {}
) # optional dict of keyword arguments for the event
event_source = event.get("event_source", None)
description = event.get("description", function)
# - If 'cron' or 'rate' in expression, use ScheduleExpression
# - Else, use EventPattern
# - ex https://github.com/awslabs/aws-lambda-ddns-function
if not self.credentials_arn:
self.get_credentials_arn()
if expression:
expressions = [
expression
] # same code for single and multiple expression
if expressions:
for index, expression in enumerate(expressions):
name = self.get_scheduled_event_name(
event, function, lambda_name, index
)
# if it's possible that we truncated name, generate a unique, shortened name
# https://github.com/Miserlou/Zappa/issues/970
if len(name) >= 64:
rule_name = self.get_hashed_rule_name(
event, function, lambda_name
)
else:
rule_name = name
rule_response = self.events_client.put_rule(
Name=rule_name,
ScheduleExpression=expression,
State="ENABLED",
Description=description,
RoleArn=self.credentials_arn,
)
if "RuleArn" in rule_response:
logger.debug(
"Rule created. ARN {}".format(rule_response["RuleArn"])
)
# Specific permissions are necessary for any trigger to work.
self.create_event_permission(
lambda_name, "events.amazonaws.com", rule_response["RuleArn"]
)
# Overwriting the input, supply the original values and add kwargs
input_template = (
'{"time": <time>, '
'"detail-type": <detail-type>, '
'"source": <source>,'
'"account": <account>, '
'"region": <region>,'
'"detail": <detail>, '
'"version": <version>,'
'"resources": <resources>,'
'"id": <id>,'
'"kwargs": %s'
"}" % json.dumps(kwargs)
)
# Create the CloudWatch event ARN for this function.
# https://github.com/Miserlou/Zappa/issues/359
target_response = self.events_client.put_targets(
Rule=rule_name,
Targets=[
{
"Id": "Id"
+ "".join(
random.choice(string.digits) for _ in range(12)
),
"Arn": lambda_arn,
"InputTransformer": {
"InputPathsMap": {
"time": "$.time",
"detail-type": "$.detail-type",
"source": "$.source",
"account": "$.account",
"region": "$.region",
"detail": "$.detail",
"version": "$.version",
"resources": "$.resources",
"id": "$.id",
},
"InputTemplate": input_template,
},
}
],
)
if target_response["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(
"Scheduled {} with expression {}!".format(
rule_name, expression
)
)
else:
print(
"Problem scheduling {} with expression {}.".format(
rule_name, expression
)
)
elif event_source:
service = self.service_from_arn(event_source["arn"])
if service not in pull_services:
svc = ",".join(event["event_source"]["events"])
self.create_event_permission(
lambda_name,
service + ".amazonaws.com",
event["event_source"]["arn"],
)
else:
svc = service
rule_response = add_event_source(
event_source, lambda_arn, function, self.boto_session
)
if rule_response == "successful":
print("Created {} event schedule for {}!".format(svc, function))
elif rule_response == "failed":
print(
"Problem creating {} event schedule for {}!".format(
svc, function
)
)
elif rule_response == "exists":
print(
"{} event schedule for {} already exists - Nothing to do here.".format(
svc, function
)
)
elif rule_response == "dryrun":
print(
"Dryrun for creating {} event schedule for {}!!".format(
svc, function
)
)
else:
print(
"Could not create event {} - Please define either an expression or an event source".format(
name
)
)
@staticmethod
def get_scheduled_event_name(event, function, lambda_name, index=0):
name = event.get("name", function)
if name != function:
# a custom event name has been provided, make sure function name is included as postfix,
# otherwise zappa's handler won't be able to locate the function.
name = "{}-{}".format(name, function)
if index:
# to ensure unique cloudwatch rule names in the case of multiple expressions
# prefix all entries bar the first with the index
# Related: https://github.com/Miserlou/Zappa/pull/1051
name = "{}-{}".format(index, name)
# prefix scheduled event names with lambda name. So we can look them up later via the prefix.
return Zappa.get_event_name(lambda_name, name)
@staticmethod
def get_event_name(lambda_name, name):
"""
Returns an AWS-valid Lambda event name.
"""
return "{prefix:.{width}}-{postfix}".format(
prefix=lambda_name, width=max(0, 63 - len(name)), postfix=name
)[:64]
@staticmethod
def get_hashed_rule_name(event, function, lambda_name):
"""
Returns an AWS-valid CloudWatch rule name using a digest of the event name, lambda name, and function.
This allows support for rule names that may be longer than the 64 char limit.
"""
event_name = event.get("name", function)
name_hash = hashlib.sha1(
"{}-{}".format(lambda_name, event_name).encode("UTF-8")
).hexdigest()
return Zappa.get_event_name(name_hash, function)
def delete_rule(self, rule_name):
"""
Delete a CWE rule.
This deletes them, but they will still show up in the AWS console.
Annoying.
"""
logger.debug("Deleting existing rule {}".format(rule_name))
# All targets must be removed before
# we can actually delete the rule.
try:
targets = self.events_client.list_targets_by_rule(Rule=rule_name)
except botocore.exceptions.ClientError as e:
# This avoids misbehavior if low permissions, related: https://github.com/Miserlou/Zappa/issues/286
error_code = e.response["Error"]["Code"]
if error_code == "AccessDeniedException":
raise
else:
logger.debug(
"No target found for this rule: {} {}".format(rule_name, e.args[0])
)
return
if "Targets" in targets and targets["Targets"]:
self.events_client.remove_targets(
Rule=rule_name, Ids=[x["Id"] for x in targets["Targets"]]
)
else: # pragma: no cover
logger.debug("No target to delete")
# Delete our rule.
self.events_client.delete_rule(Name=rule_name)
def get_event_rule_names_for_lambda(self, lambda_arn):
"""
Get all of the rule names associated with a lambda function.
"""
response = self.events_client.list_rule_names_by_target(TargetArn=lambda_arn)
rule_names = response["RuleNames"]
# Iterate when the results are paginated
while "NextToken" in response:
response = self.events_client.list_rule_names_by_target(
TargetArn=lambda_arn, NextToken=response["NextToken"]
)
rule_names.extend(response["RuleNames"])
return rule_names
def get_event_rules_for_lambda(self, lambda_arn):
"""
Get all of the rule details associated with this function.
"""
rule_names = self.get_event_rule_names_for_lambda(lambda_arn=lambda_arn)
return [self.events_client.describe_rule(Name=r) for r in rule_names]
def unschedule_events(
self, events, lambda_arn=None, lambda_name=None, excluded_source_services=None
):
excluded_source_services = excluded_source_services or []
"""
Given a list of events, unschedule these CloudWatch Events.
'events' is a list of dictionaries, where the dict must contains the string
of a 'function' and the string of the event 'expression', and an optional 'name' and 'description'.
"""
self._clear_policy(lambda_name)
rule_names = self.get_event_rule_names_for_lambda(lambda_arn=lambda_arn)
for rule_name in rule_names:
self.delete_rule(rule_name)
print("Unscheduled " + rule_name + ".")
non_cwe = [e for e in events if "event_source" in e]
for event in non_cwe:
# TODO: This WILL miss non CW events that have been deployed but changed names. Figure out a way to remove
# them no matter what.
# These are non CWE event sources.
function = event["function"]
name = event.get("name", function)
event_source = event.get("event_source", function)
service = self.service_from_arn(event_source["arn"])
# DynamoDB and Kinesis streams take quite a while to setup after they are created and do not need to be
# re-scheduled when a new Lambda function is deployed. Therefore, they should not be removed during zappa
# update or zappa schedule.
if service not in excluded_source_services:
remove_event_source(
event_source, lambda_arn, function, self.boto_session
)
print(
"Removed event {}{}.".format(
name,
" ({})".format(str(event_source["events"]))
if "events" in event_source
else "",
)
)
###
# Async / SNS
##
def create_async_sns_topic(self, lambda_name, lambda_arn):
"""
Create the SNS-based async topic.
"""
topic_name = get_topic_name(lambda_name)
# Create SNS topic
topic_arn = self.sns_client.create_topic(Name=topic_name)["TopicArn"]
# Create subscription
self.sns_client.subscribe(
TopicArn=topic_arn, Protocol="lambda", Endpoint=lambda_arn
)
# Add Lambda permission for SNS to invoke function
self.create_event_permission(
lambda_name=lambda_name, principal="sns.amazonaws.com", source_arn=topic_arn
)
# Add rule for SNS topic as a event source
add_event_source(
event_source={"arn": topic_arn, "events": ["sns:Publish"]},
lambda_arn=lambda_arn,
target_function="zappa.asynchronous.route_task",
boto_session=self.boto_session,
)
return topic_arn
def remove_async_sns_topic(self, lambda_name):
"""
Remove the async SNS topic.
"""
topic_name = get_topic_name(lambda_name)
removed_arns = []
for sub in self.sns_client.list_subscriptions()["Subscriptions"]:
if topic_name in sub["TopicArn"]:
self.sns_client.delete_topic(TopicArn=sub["TopicArn"])
removed_arns.append(sub["TopicArn"])
return removed_arns
###
# Async / DynamoDB
##
def _set_async_dynamodb_table_ttl(self, table_name):
self.dynamodb_client.update_time_to_live(
TableName=table_name,
TimeToLiveSpecification={"Enabled": True, "AttributeName": "ttl"},
)
def create_async_dynamodb_table(self, table_name, read_capacity, write_capacity):
"""
Create the DynamoDB table for async task return values
"""
try:
dynamodb_table = self.dynamodb_client.describe_table(TableName=table_name)
return False, dynamodb_table
# catch this exception (triggered if the table doesn't exist)
except botocore.exceptions.ClientError:
dynamodb_table = self.dynamodb_client.create_table(
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
TableName=table_name,
KeySchema=[
{"AttributeName": "id", "KeyType": "HASH"},
],
ProvisionedThroughput={
"ReadCapacityUnits": read_capacity,
"WriteCapacityUnits": write_capacity,
},
)
if dynamodb_table:
try:
self._set_async_dynamodb_table_ttl(table_name)
except botocore.exceptions.ClientError:
# this fails because the operation is async, so retry
time.sleep(10)
self._set_async_dynamodb_table_ttl(table_name)
return True, dynamodb_table
def remove_async_dynamodb_table(self, table_name):
"""
Remove the DynamoDB Table used for async return values
"""
self.dynamodb_client.delete_table(TableName=table_name)
##
# CloudWatch Logging
##
def fetch_logs(self, lambda_name, filter_pattern="", limit=10000, start_time=0):
"""
Fetch the CloudWatch logs for a given Lambda name.
"""
log_name = "/aws/lambda/" + lambda_name
streams = self.logs_client.describe_log_streams(
logGroupName=log_name, descending=True, orderBy="LastEventTime"
)
all_streams = streams["logStreams"]
all_names = [stream["logStreamName"] for stream in all_streams]
events = []
response = {}
while not response or "nextToken" in response:
extra_args = {}
if "nextToken" in response:
extra_args["nextToken"] = response["nextToken"]
# Amazon uses millisecond epoch for some reason.
# Thanks, Jeff.
start_time = start_time * 1000
end_time = int(time.time()) * 1000
response = self.logs_client.filter_log_events(
logGroupName=log_name,
logStreamNames=all_names,
startTime=start_time,
endTime=end_time,
filterPattern=filter_pattern,
limit=limit,
interleaved=True, # Does this actually improve performance?
**extra_args,
)
if response and "events" in response:
events += response["events"]
return sorted(events, key=lambda k: k["timestamp"])
def remove_log_group(self, group_name):
"""
Filter all log groups that match the name given in log_filter.
"""
print("Removing log group: {}".format(group_name))
try:
self.logs_client.delete_log_group(logGroupName=group_name)
except botocore.exceptions.ClientError as e:
print("Couldn't remove '{}' because of: {}".format(group_name, e))
def remove_lambda_function_logs(self, lambda_function_name):
"""
Remove all logs that are assigned to a given lambda function id.
"""
self.remove_log_group("/aws/lambda/{}".format(lambda_function_name))
def remove_api_gateway_logs(self, project_name):
"""
Removed all logs that are assigned to a given rest api id.
"""
for rest_api in self.get_rest_apis(project_name):
for stage in self.apigateway_client.get_stages(restApiId=rest_api["id"])[
"item"
]:
self.remove_log_group(
"API-Gateway-Execution-Logs_{}/{}".format(
rest_api["id"], stage["stageName"]
)
)
##
# Route53 Domain Name Entries
##
def get_hosted_zone_id_for_domain(self, domain):
"""
Get the Hosted Zone ID for a given domain.
"""
all_zones = self.get_all_zones()
return self.get_best_match_zone(all_zones, domain)
@staticmethod
def get_best_match_zone(all_zones, domain):
"""Return zone id which name is closer matched with domain name."""
# Related: https://github.com/Miserlou/Zappa/issues/459
public_zones = [
zone
for zone in all_zones["HostedZones"]
if not zone["Config"]["PrivateZone"]
]
zones = {
zone["Name"][:-1]: zone["Id"]
for zone in public_zones
if zone["Name"][:-1] in domain
}
if zones:
keys = max(
zones.keys(), key=lambda a: len(a)
) # get longest key -- best match.
return zones[keys]
else:
return None
def set_dns_challenge_txt(self, zone_id, domain, txt_challenge):
"""
Set DNS challenge TXT.
"""
print("Setting DNS challenge..")
resp = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch=self.get_dns_challenge_change_batch(
"UPSERT", domain, txt_challenge
),
)
return resp
def remove_dns_challenge_txt(self, zone_id, domain, txt_challenge):
"""
Remove DNS challenge TXT.
"""
print("Deleting DNS challenge..")
resp = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch=self.get_dns_challenge_change_batch(
"DELETE", domain, txt_challenge
),
)
return resp
@staticmethod
def get_dns_challenge_change_batch(action, domain, txt_challenge):
"""
Given action, domain and challenge, return a change batch to use with
route53 call.
:param action: DELETE | UPSERT
:param domain: domain name
:param txt_challenge: challenge
:return: change set for a given action, domain and TXT challenge.
"""
return {
"Changes": [
{
"Action": action,
"ResourceRecordSet": {
"Name": "_acme-challenge.{0}".format(domain),
"Type": "TXT",
"TTL": 60,
"ResourceRecords": [{"Value": '"{0}"'.format(txt_challenge)}],
},
}
]
}
##
# Utility
##
def shell(self):
"""
Spawn a PDB shell.
"""
import pdb
pdb.set_trace()
def load_credentials(self, boto_session=None, profile_name=None):
"""
Load AWS credentials.
An optional boto_session can be provided, but that's usually for testing.
An optional profile_name can be provided for config files that have multiple sets
of credentials.
"""
# Automatically load credentials from config or environment
if not boto_session:
# If provided, use the supplied profile name.
if profile_name:
self.boto_session = boto3.Session(
profile_name=profile_name, region_name=self.aws_region
)
elif os.environ.get("AWS_ACCESS_KEY_ID") and os.environ.get(
"AWS_SECRET_ACCESS_KEY"
):
region_name = os.environ.get("AWS_DEFAULT_REGION") or self.aws_region
session_kw = {
"aws_access_key_id": os.environ.get("AWS_ACCESS_KEY_ID"),
"aws_secret_access_key": os.environ.get("AWS_SECRET_ACCESS_KEY"),
"region_name": region_name,
}
# If we're executing in a role, AWS_SESSION_TOKEN will be present, too.
if os.environ.get("AWS_SESSION_TOKEN"):
session_kw["aws_session_token"] = os.environ.get(
"AWS_SESSION_TOKEN"
)
self.boto_session = boto3.Session(**session_kw)
else:
self.boto_session = boto3.Session(region_name=self.aws_region)
logger.debug("Loaded boto session from config: %s", boto_session)
else:
logger.debug("Using provided boto session: %s", boto_session)
self.boto_session = boto_session
# use provided session's region in case it differs
self.aws_region = self.boto_session.region_name
if self.boto_session.region_name not in LAMBDA_REGIONS:
print("Warning! AWS Lambda may not be available in this AWS Region!")
if self.boto_session.region_name not in API_GATEWAY_REGIONS:
print("Warning! AWS API Gateway may not be available in this AWS Region!")
@staticmethod
def service_from_arn(arn):
return arn.split(":")[2] | zappa2 | /zappa2-0.53.4-py3-none-any.whl/zappa/core.py | core.py |
import calendar
import datetime
import fnmatch
import io
import json
import logging
import os
import re
import shutil
import stat
import sys
from urllib.parse import urlparse
import botocore
import durationpy
from past.builtins import basestring
LOG = logging.getLogger(__name__)
##
# Settings / Packaging
##
def copytree(src, dst, metadata=True, symlinks=False, ignore=None):
"""
This is a contributed re-implementation of 'copytree' that
should work with the exact same behavior on multiple platforms.
When `metadata` is False, file metadata such as permissions and modification
times are not copied.
"""
def copy_file(src, dst, item):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s): # pragma: no cover
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
if metadata:
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass # lchmod not available
elif os.path.isdir(s):
copytree(s, d, metadata, symlinks, ignore)
else:
shutil.copy2(s, d) if metadata else shutil.copy(s, d)
try:
lst = os.listdir(src)
if not os.path.exists(dst):
os.makedirs(dst)
if metadata:
shutil.copystat(src, dst)
except NotADirectoryError: # egg-link files
copy_file(os.path.dirname(src), os.path.dirname(dst), os.path.basename(src))
return
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
copy_file(src, dst, item)
def parse_s3_url(url):
"""
Parses S3 URL.
Returns bucket (domain) and file (full path).
"""
bucket = ""
path = ""
if url:
result = urlparse(url)
bucket = result.netloc
path = result.path.strip("/")
return bucket, path
def human_size(num, suffix="B"):
"""
Convert bytes length to a human-readable version
"""
for unit in ("", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"):
if abs(num) < 1024.0:
return "{0:3.1f}{1!s}{2!s}".format(num, unit, suffix)
num /= 1024.0
return "{0:.1f}{1!s}{2!s}".format(num, "Yi", suffix)
def string_to_timestamp(timestring):
"""
Accepts a str, returns an int timestamp.
"""
ts = None
# Uses an extended version of Go's duration string.
try:
delta = durationpy.from_str(timestring)
past = datetime.datetime.utcnow() - delta
ts = calendar.timegm(past.timetuple())
return ts
except Exception as e:
pass
if ts:
return ts
# else:
# print("Unable to parse timestring.")
return 0
##
# `init` related
##
def detect_django_settings():
"""
Automatically try to discover Django settings files,
return them as relative module paths.
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, "*settings.py"):
full = os.path.join(root, filename)
if "site-packages" in full:
continue
full = os.path.join(root, filename)
package_path = full.replace(os.getcwd(), "")
package_module = (
package_path.replace(os.sep, ".").split(".", 1)[1].replace(".py", "")
)
matches.append(package_module)
return matches
def detect_flask_apps():
"""
Automatically try to discover Flask apps files,
return them as relative module paths.
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, "*.py"):
full = os.path.join(root, filename)
if "site-packages" in full:
continue
full = os.path.join(root, filename)
with io.open(full, "r", encoding="utf-8") as f:
lines = f.readlines()
for line in lines:
app = None
# Kind of janky..
if "= Flask(" in line:
app = line.split("= Flask(")[0].strip()
if "=Flask(" in line:
app = line.split("=Flask(")[0].strip()
if not app:
continue
package_path = full.replace(os.getcwd(), "")
package_module = (
package_path.replace(os.sep, ".")
.split(".", 1)[1]
.replace(".py", "")
)
app_module = package_module + "." + app
matches.append(app_module)
return matches
def get_venv_from_python_version():
return "python{}.{}".format(*sys.version_info)
def get_runtime_from_python_version():
""" """
if sys.version_info[0] < 3:
raise ValueError("Python 2.x is no longer supported.")
else:
if sys.version_info[1] <= 6:
return "python3.6"
elif sys.version_info[1] <= 7:
return "python3.7"
else:
return "python3.8"
##
# Async Tasks
##
def get_topic_name(lambda_name):
"""Topic name generation"""
return "%s-zappa-async" % lambda_name
##
# Event sources / Kappa
##
def get_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
):
"""
Given an event_source dictionary item, a session and a lambda_arn,
hack into Kappa's Gibson, create out an object we can call
to schedule this event, and return the event source.
"""
import kappa.awsclient
import kappa.event_source.base
import kappa.event_source.cloudwatch
import kappa.event_source.dynamodb_stream
import kappa.event_source.kinesis
import kappa.event_source.s3
import kappa.event_source.sns
import kappa.function
import kappa.policy
import kappa.restapi
import kappa.role
class PseudoContext:
def __init__(self):
return
class PseudoFunction:
def __init__(self):
return
# Mostly adapted from kappa - will probably be replaced by kappa support
class SqsEventSource(kappa.event_source.base.EventSource):
def __init__(self, context, config):
super().__init__(context, config)
self._lambda = kappa.awsclient.create_client("lambda", context.session)
def _get_uuid(self, function):
uuid = None
response = self._lambda.call(
"list_event_source_mappings",
FunctionName=function.name,
EventSourceArn=self.arn,
)
LOG.debug(response)
if len(response["EventSourceMappings"]) > 0:
uuid = response["EventSourceMappings"][0]["UUID"]
return uuid
def add(self, function):
try:
response = self._lambda.call(
"create_event_source_mapping",
FunctionName=function.name,
EventSourceArn=self.arn,
BatchSize=self.batch_size,
Enabled=self.enabled,
)
LOG.debug(response)
except Exception:
LOG.exception("Unable to add event source")
def enable(self, function):
self._config["enabled"] = True
try:
response = self._lambda.call(
"update_event_source_mapping",
UUID=self._get_uuid(function),
Enabled=self.enabled,
)
LOG.debug(response)
except Exception:
LOG.exception("Unable to enable event source")
def disable(self, function):
self._config["enabled"] = False
try:
response = self._lambda.call(
"update_event_source_mapping",
FunctionName=function.name,
Enabled=self.enabled,
)
LOG.debug(response)
except Exception:
LOG.exception("Unable to disable event source")
def update(self, function):
response = None
uuid = self._get_uuid(function)
if uuid:
try:
response = self._lambda.call(
"update_event_source_mapping",
BatchSize=self.batch_size,
Enabled=self.enabled,
FunctionName=function.arn,
)
LOG.debug(response)
except Exception:
LOG.exception("Unable to update event source")
def remove(self, function):
response = None
uuid = self._get_uuid(function)
if uuid:
response = self._lambda.call("delete_event_source_mapping", UUID=uuid)
LOG.debug(response)
return response
def status(self, function):
response = None
LOG.debug("getting status for event source %s", self.arn)
uuid = self._get_uuid(function)
if uuid:
try:
response = self._lambda.call(
"get_event_source_mapping", UUID=self._get_uuid(function)
)
LOG.debug(response)
except botocore.exceptions.ClientError:
LOG.debug("event source %s does not exist", self.arn)
response = None
else:
LOG.debug("No UUID for event source %s", self.arn)
return response
class ExtendedSnsEventSource(kappa.event_source.sns.SNSEventSource):
@property
def filters(self):
return self._config.get("filters")
def add_filters(self, function):
try:
subscription = self.exists(function)
if subscription:
response = self._sns.call(
"set_subscription_attributes",
SubscriptionArn=subscription["SubscriptionArn"],
AttributeName="FilterPolicy",
AttributeValue=json.dumps(self.filters),
)
kappa.event_source.sns.LOG.debug(response)
except Exception:
kappa.event_source.sns.LOG.exception(
"Unable to add filters for SNS topic %s", self.arn
)
def add(self, function):
super().add(function)
if self.filters:
self.add_filters(function)
event_source_map = {
"dynamodb": kappa.event_source.dynamodb_stream.DynamoDBStreamEventSource,
"kinesis": kappa.event_source.kinesis.KinesisEventSource,
"s3": kappa.event_source.s3.S3EventSource,
"sns": ExtendedSnsEventSource,
"sqs": SqsEventSource,
"events": kappa.event_source.cloudwatch.CloudWatchEventSource,
}
arn = event_source["arn"]
_, _, svc, _ = arn.split(":", 3)
event_source_func = event_source_map.get(svc, None)
if not event_source_func:
raise ValueError("Unknown event source: {0}".format(arn))
def autoreturn(self, function_name):
return function_name
event_source_func._make_notification_id = autoreturn
ctx = PseudoContext()
ctx.session = boto_session
funk = PseudoFunction()
funk.name = lambda_arn
# Kappa 0.6.0 requires this nasty hacking,
# hopefully we can remove at least some of this soon.
# Kappa 0.7.0 introduces a whole host over other changes we don't
# really want, so we're stuck here for a little while.
# Related: https://github.com/Miserlou/Zappa/issues/684
# https://github.com/Miserlou/Zappa/issues/688
# https://github.com/Miserlou/Zappa/commit/3216f7e5149e76921ecdf9451167846b95616313
if svc == "s3":
split_arn = lambda_arn.split(":")
arn_front = ":".join(split_arn[:-1])
arn_back = split_arn[-1]
ctx.environment = arn_back
funk.arn = arn_front
funk.name = ":".join([arn_back, target_function])
else:
funk.arn = lambda_arn
funk._context = ctx
event_source_obj = event_source_func(ctx, event_source)
return event_source_obj, ctx, funk
def add_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
):
"""
Given an event_source dictionary, create the object and add the event source.
"""
event_source_obj, ctx, funk = get_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
)
# TODO: Detect changes in config and refine exists algorithm
if not dry:
if not event_source_obj.status(funk):
event_source_obj.add(funk)
return "successful" if event_source_obj.status(funk) else "failed"
else:
return "exists"
return "dryrun"
def remove_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
):
"""
Given an event_source dictionary, create the object and remove the event source.
"""
event_source_obj, ctx, funk = get_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
)
# This is slightly dirty, but necessary for using Kappa this way.
funk.arn = lambda_arn
if not dry:
rule_response = event_source_obj.remove(funk)
return rule_response
else:
return event_source_obj
def get_event_source_status(
event_source, lambda_arn, target_function, boto_session, dry=False
):
"""
Given an event_source dictionary, create the object and get the event source status.
"""
event_source_obj, ctx, funk = get_event_source(
event_source, lambda_arn, target_function, boto_session, dry=False
)
return event_source_obj.status(funk)
##
# Analytics / Surveillance / Nagging
##
def check_new_version_available(this_version):
"""
Checks if a newer version of Zappa is available.
Returns True is updateable, else False.
"""
import requests
pypi_url = "https://pypi.org/pypi/Zappa/json"
resp = requests.get(pypi_url, timeout=1.5)
top_version = resp.json()["info"]["version"]
return this_version != top_version
class InvalidAwsLambdaName(Exception):
"""Exception: proposed AWS Lambda name is invalid"""
pass
def validate_name(name, maxlen=80):
"""Validate name for AWS Lambda function.
name: actual name (without `arn:aws:lambda:...:` prefix and without
`:$LATEST`, alias or version suffix.
maxlen: max allowed length for name without prefix and suffix.
The value 80 was calculated from prefix with longest known region name
and assuming that no alias or version would be longer than `$LATEST`.
Based on AWS Lambda spec
http://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html
Return: the name
Raise: InvalidAwsLambdaName, if the name is invalid.
"""
if not isinstance(name, basestring):
msg = "Name must be of type string"
raise InvalidAwsLambdaName(msg)
if len(name) > maxlen:
msg = "Name is longer than {maxlen} characters."
raise InvalidAwsLambdaName(msg.format(maxlen=maxlen))
if len(name) == 0:
msg = "Name must not be empty string."
raise InvalidAwsLambdaName(msg)
if not re.match("^[a-zA-Z0-9-_]+$", name):
msg = "Name can only contain characters from a-z, A-Z, 0-9, _ and -"
raise InvalidAwsLambdaName(msg)
return name
def contains_python_files_or_subdirs(folder):
"""
Checks (recursively) if the directory contains .py or .pyc files
"""
for root, dirs, files in os.walk(folder):
if [
filename
for filename in files
if filename.endswith(".py") or filename.endswith(".pyc")
]:
return True
for d in dirs:
for _, subdirs, subfiles in os.walk(d):
if [
filename
for filename in subfiles
if filename.endswith(".py") or filename.endswith(".pyc")
]:
return True
return False
def conflicts_with_a_neighbouring_module(directory_path):
"""
Checks if a directory lies in the same directory as a .py file with the same name.
"""
parent_dir_path, current_dir_name = os.path.split(os.path.normpath(directory_path))
neighbours = os.listdir(parent_dir_path)
conflicting_neighbour_filename = current_dir_name + ".py"
return conflicting_neighbour_filename in neighbours
# https://github.com/Miserlou/Zappa/issues/1188
def titlecase_keys(d):
"""
Takes a dict with keys of type str and returns a new dict with all keys titlecased.
"""
return {k.title(): v for k, v in d.items()}
# https://github.com/Miserlou/Zappa/issues/1688
def is_valid_bucket_name(name):
"""
Checks if an S3 bucket name is valid according to https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules
"""
# Bucket names must be at least 3 and no more than 63 characters long.
if len(name) < 3 or len(name) > 63:
return False
# Bucket names must not contain uppercase characters or underscores.
if any(x.isupper() for x in name):
return False
if "_" in name:
return False
# Bucket names must start with a lowercase letter or number.
if not (name[0].islower() or name[0].isdigit()):
return False
# Bucket names must be a series of one or more labels. Adjacent labels are separated by a single period (.).
for label in name.split("."):
# Each label must start and end with a lowercase letter or a number.
if len(label) < 1:
return False
if not (label[0].islower() or label[0].isdigit()):
return False
if not (label[-1].islower() or label[-1].isdigit()):
return False
# Bucket names must not be formatted as an IP address (for example, 192.168.5.4).
looks_like_IP = True
for label in name.split("."):
if not label.isdigit():
looks_like_IP = False
break
if looks_like_IP:
return False
return True
def merge_headers(event):
"""
Merge the values of headers and multiValueHeaders into a single dict.
Opens up support for multivalue headers via API Gateway and ALB.
See: https://github.com/Miserlou/Zappa/pull/1756
"""
headers = event.get("headers") or {}
multi_headers = (event.get("multiValueHeaders") or {}).copy()
for h in set(headers.keys()):
if h not in multi_headers:
multi_headers[h] = [headers[h]]
for h in multi_headers.keys():
multi_headers[h] = ", ".join(multi_headers[h])
return multi_headers | zappa2 | /zappa2-0.53.4-py3-none-any.whl/zappa/utilities.py | utilities.py |
import argparse
import base64
import collections
import importlib
import inspect
import logging
import os
import pkgutil
import random
import re
import string
import sys
import tempfile
import time
import zipfile
from builtins import bytes, input
from datetime import datetime, timedelta
import argcomplete
import botocore
import click
import hjson as json
import pkg_resources
import requests
import slugify
import toml
import yaml
from click import BaseCommand, Context
from click.exceptions import ClickException
from click.globals import push_context
from dateutil import parser
from past.builtins import basestring
from .core import API_GATEWAY_REGIONS, Zappa, logger
from .utilities import (
InvalidAwsLambdaName,
check_new_version_available,
detect_django_settings,
detect_flask_apps,
get_runtime_from_python_version,
get_venv_from_python_version,
human_size,
is_valid_bucket_name,
parse_s3_url,
string_to_timestamp,
validate_name,
)
CUSTOM_SETTINGS = [
"apigateway_policy",
"assume_policy",
"attach_policy",
"aws_region",
"delete_local_zip",
"delete_s3_zip",
"exclude",
"exclude_glob",
"extra_permissions",
"include",
"role_name",
"touch",
]
BOTO3_CONFIG_DOCS_URL = (
"https://boto3.readthedocs.io/en/latest/guide/quickstart.html#configuration"
)
##
# Main Input Processing
##
class ZappaCLI:
"""
ZappaCLI object is responsible for loading the settings,
handling the input arguments and executing the calls to the core library.
"""
# CLI
vargs = None
command = None
stage_env = None
# Zappa settings
zappa = None
zappa_settings = None
load_credentials = True
disable_progress = False
# Specific settings
api_stage = None
app_function = None
aws_region = None
debug = None
prebuild_script = None
project_name = None
profile_name = None
lambda_arn = None
lambda_name = None
lambda_description = None
lambda_concurrency = None
s3_bucket_name = None
settings_file = None
zip_path = None
handler_path = None
vpc_config = None
memory_size = None
use_apigateway = None
lambda_handler = None
django_settings = None
manage_roles = True
exception_handler = None
environment_variables = None
authorizer = None
xray_tracing = False
aws_kms_key_arn = ""
context_header_mappings = None
tags = []
layers = None
stage_name_env_pattern = re.compile("^[a-zA-Z0-9_]+$")
def __init__(self):
self._stage_config_overrides = (
{}
) # change using self.override_stage_config_setting(key, val)
@property
def stage_config(self):
"""
A shortcut property for settings of a stage.
"""
def get_stage_setting(stage, extended_stages=None):
if extended_stages is None:
extended_stages = []
if stage in extended_stages:
raise RuntimeError(
stage + " has already been extended to these settings. "
"There is a circular extends within the settings file."
)
extended_stages.append(stage)
try:
stage_settings = dict(self.zappa_settings[stage].copy())
except KeyError:
raise ClickException(
"Cannot extend settings for undefined stage '" + stage + "'."
)
extends_stage = self.zappa_settings[stage].get("extends", None)
if not extends_stage:
return stage_settings
extended_settings = get_stage_setting(
stage=extends_stage, extended_stages=extended_stages
)
extended_settings.update(stage_settings)
return extended_settings
settings = get_stage_setting(stage=self.api_stage)
# Backwards compatible for delete_zip setting that was more explicitly named delete_local_zip
if "delete_zip" in settings:
settings["delete_local_zip"] = settings.get("delete_zip")
settings.update(self.stage_config_overrides)
return settings
@property
def stage_config_overrides(self):
"""
Returns zappa_settings we forcefully override for the current stage
set by `self.override_stage_config_setting(key, value)`
"""
return getattr(self, "_stage_config_overrides", {}).get(self.api_stage, {})
def override_stage_config_setting(self, key, val):
"""
Forcefully override a setting set by zappa_settings (for the current stage only)
:param key: settings key
:param val: value
"""
self._stage_config_overrides = getattr(self, "_stage_config_overrides", {})
self._stage_config_overrides.setdefault(self.api_stage, {})[key] = val
def handle(self, argv=None):
"""
Main function.
Parses command, load settings and dispatches accordingly.
"""
desc = "Zappa - Deploy Python applications to AWS Lambda" " and API Gateway.\n"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
"-v",
"--version",
action="version",
version=pkg_resources.get_distribution("zappa2").version,
help="Print the zappa version",
)
parser.add_argument(
"--color", default="auto", choices=["auto", "never", "always"]
)
env_parser = argparse.ArgumentParser(add_help=False)
me_group = env_parser.add_mutually_exclusive_group()
all_help = "Execute this command for all of our defined " "Zappa stages."
me_group.add_argument("--all", action="store_true", help=all_help)
me_group.add_argument("stage_env", nargs="?")
group = env_parser.add_argument_group()
group.add_argument(
"-a", "--app_function", help="The WSGI application function."
)
group.add_argument(
"-s", "--settings_file", help="The path to a Zappa settings file."
)
group.add_argument(
"-q", "--quiet", action="store_true", help="Silence all output."
)
# https://github.com/Miserlou/Zappa/issues/407
# Moved when 'template' command added.
# Fuck Terraform.
group.add_argument(
"-j",
"--json",
action="store_true",
help="Make the output of this command be machine readable.",
)
# https://github.com/Miserlou/Zappa/issues/891
group.add_argument(
"--disable_progress", action="store_true", help="Disable progress bars."
)
group.add_argument("--no_venv", action="store_true", help="Skip venv check.")
##
# Certify
##
subparsers = parser.add_subparsers(title="subcommands", dest="command")
cert_parser = subparsers.add_parser(
"certify", parents=[env_parser], help="Create and install SSL certificate"
)
cert_parser.add_argument(
"--manual",
action="store_true",
help=(
"Gets new Let's Encrypt certificates, but prints them to console."
"Does not update API Gateway domains."
),
)
cert_parser.add_argument(
"-y", "--yes", action="store_true", help="Auto confirm yes."
)
##
# Deploy
##
deploy_parser = subparsers.add_parser(
"deploy", parents=[env_parser], help="Deploy application."
)
deploy_parser.add_argument(
"-z",
"--zip",
help="Deploy Lambda with specific local or S3 hosted zip package",
)
deploy_parser.add_argument(
"-d",
"--docker-image-uri",
help="Deploy Lambda with a specific docker image hosted in AWS Elastic Container Registry",
)
##
# Init
##
init_parser = subparsers.add_parser("init", help="Initialize Zappa app.")
##
# Package
##
package_parser = subparsers.add_parser(
"package",
parents=[env_parser],
help="Build the application zip package locally.",
)
package_parser.add_argument(
"-o", "--output", help="Name of file to output the package to."
)
##
# Template
##
template_parser = subparsers.add_parser(
"template",
parents=[env_parser],
help="Create a CloudFormation template for this API Gateway.",
)
template_parser.add_argument(
"-l",
"--lambda-arn",
required=True,
help="ARN of the Lambda function to template to.",
)
template_parser.add_argument(
"-r", "--role-arn", required=True, help="ARN of the Role to template with."
)
template_parser.add_argument(
"-o", "--output", help="Name of file to output the template to."
)
##
# Invocation
##
invoke_parser = subparsers.add_parser(
"invoke", parents=[env_parser], help="Invoke remote function."
)
invoke_parser.add_argument(
"--raw",
action="store_true",
help=(
"When invoking remotely, invoke this python as a string,"
" not as a modular path."
),
)
invoke_parser.add_argument(
"--no-color", action="store_true", help=("Don't color the output")
)
invoke_parser.add_argument("command_rest")
##
# Manage
##
manage_parser = subparsers.add_parser(
"manage", help="Invoke remote Django manage.py commands."
)
rest_help = (
"Command in the form of <env> <command>. <env> is not "
"required if --all is specified"
)
manage_parser.add_argument("--all", action="store_true", help=all_help)
manage_parser.add_argument("command_rest", nargs="+", help=rest_help)
manage_parser.add_argument(
"--no-color", action="store_true", help=("Don't color the output")
)
# This is explicitly added here because this is the only subcommand that doesn't inherit from env_parser
# https://github.com/Miserlou/Zappa/issues/1002
manage_parser.add_argument(
"-s", "--settings_file", help="The path to a Zappa settings file."
)
##
# Rollback
##
def positive_int(s):
"""Ensure an arg is positive"""
i = int(s)
if i < 0:
msg = "This argument must be positive (got {})".format(s)
raise argparse.ArgumentTypeError(msg)
return i
rollback_parser = subparsers.add_parser(
"rollback",
parents=[env_parser],
help="Rollback deployed code to a previous version.",
)
rollback_parser.add_argument(
"-n",
"--num-rollback",
type=positive_int,
default=1,
help="The number of versions to rollback.",
)
##
# Scheduling
##
subparsers.add_parser(
"schedule",
parents=[env_parser],
help="Schedule functions to occur at regular intervals.",
)
##
# Status
##
subparsers.add_parser(
"status",
parents=[env_parser],
help="Show deployment status and event schedules.",
)
##
# Log Tailing
##
tail_parser = subparsers.add_parser(
"tail", parents=[env_parser], help="Tail deployment logs."
)
tail_parser.add_argument(
"--no-color", action="store_true", help="Don't color log tail output."
)
tail_parser.add_argument(
"--http",
action="store_true",
help="Only show HTTP requests in tail output.",
)
tail_parser.add_argument(
"--non-http",
action="store_true",
help="Only show non-HTTP requests in tail output.",
)
tail_parser.add_argument(
"--since",
type=str,
default="100000s",
help="Only show lines since a certain timeframe.",
)
tail_parser.add_argument(
"--filter", type=str, default="", help="Apply a filter pattern to the logs."
)
tail_parser.add_argument(
"--force-color",
action="store_true",
help="Force coloring log tail output even if coloring support is not auto-detected. (example: piping)",
)
tail_parser.add_argument(
"--disable-keep-open",
action="store_true",
help="Exit after printing the last available log, rather than keeping the log open.",
)
##
# Undeploy
##
undeploy_parser = subparsers.add_parser(
"undeploy", parents=[env_parser], help="Undeploy application."
)
undeploy_parser.add_argument(
"--remove-logs",
action="store_true",
help=(
"Removes log groups of api gateway and lambda task"
" during the undeployment."
),
)
undeploy_parser.add_argument(
"-y", "--yes", action="store_true", help="Auto confirm yes."
)
##
# Unschedule
##
subparsers.add_parser(
"unschedule", parents=[env_parser], help="Unschedule functions."
)
##
# Updating
##
update_parser = subparsers.add_parser(
"update", parents=[env_parser], help="Update deployed application."
)
update_parser.add_argument(
"-z",
"--zip",
help="Update Lambda with specific local or S3 hosted zip package",
)
update_parser.add_argument(
"-n",
"--no-upload",
help="Update configuration where appropriate, but don't upload new code",
)
update_parser.add_argument(
"-d",
"--docker-image-uri",
help="Update Lambda with a specific docker image hosted in AWS Elastic Container Registry",
)
##
# Debug
##
subparsers.add_parser(
"shell",
parents=[env_parser],
help="A debug shell with a loaded Zappa object.",
)
##
# Python Settings File
##
settings_parser = subparsers.add_parser(
"save-python-settings-file",
parents=[env_parser],
help="Generate & save the Zappa settings Python file for docker deployments",
)
settings_parser.add_argument(
"-o",
"--output_path",
help=(
"The path to save the Zappa settings Python file. "
"File must be named zappa_settings.py and should be saved "
"in the same directory as the Zappa handler.py"
),
)
argcomplete.autocomplete(parser)
args = parser.parse_args(argv)
self.vargs = vars(args)
if args.color == "never":
disable_click_colors()
elif args.color == "always":
# TODO: Support aggressive coloring like "--force-color" on all commands
pass
elif args.color == "auto":
pass
# Parse the input
# NOTE(rmoe): Special case for manage command
# The manage command can't have both stage_env and command_rest
# arguments. Since they are both positional arguments argparse can't
# differentiate the two. This causes problems when used with --all.
# (e.g. "manage --all showmigrations admin" argparse thinks --all has
# been specified AND that stage_env='showmigrations')
# By having command_rest collect everything but --all we can split it
# apart here instead of relying on argparse.
if not args.command:
parser.print_help()
return
if args.command == "manage" and not self.vargs.get("all"):
self.stage_env = self.vargs["command_rest"].pop(0)
else:
self.stage_env = self.vargs.get("stage_env")
if args.command == "package":
self.load_credentials = False
self.command = args.command
self.disable_progress = self.vargs.get("disable_progress")
if self.vargs.get("quiet"):
self.silence()
# We don't have any settings yet, so make those first!
# (Settings-based interactions will fail
# before a project has been initialized.)
if self.command == "init":
self.init()
return
# Make sure there isn't a new version available
if not self.vargs.get("json"):
self.check_for_update()
# Load and Validate Settings File
self.load_settings_file(self.vargs.get("settings_file"))
# Should we execute this for all stages, or just one?
all_stages = self.vargs.get("all")
stages = []
if all_stages: # All stages!
stages = self.zappa_settings.keys()
else: # Just one env.
if not self.stage_env:
# If there's only one stage defined in the settings,
# use that as the default.
if len(self.zappa_settings.keys()) == 1:
stages.append(list(self.zappa_settings.keys())[0])
else:
parser.error("Please supply a stage to interact with.")
else:
stages.append(self.stage_env)
for stage in stages:
try:
self.dispatch_command(self.command, stage)
except ClickException as e:
# Discussion on exit codes: https://github.com/Miserlou/Zappa/issues/407
e.show()
sys.exit(e.exit_code)
def dispatch_command(self, command, stage):
"""
Given a command to execute and stage,
execute that command.
"""
self.api_stage = stage
if command not in ["status", "manage"]:
if not self.vargs.get("json", None):
click.echo(
"Calling "
+ click.style(command, fg="green", bold=True)
+ " for stage "
+ click.style(self.api_stage, bold=True)
+ ".."
)
# Explicitly define the app function.
# Related: https://github.com/Miserlou/Zappa/issues/832
if self.vargs.get("app_function", None):
self.app_function = self.vargs["app_function"]
# Load our settings, based on api_stage.
try:
self.load_settings(self.vargs.get("settings_file"))
except ValueError as e:
if hasattr(e, "message"):
print("Error: {}".format(e.message))
else:
print(str(e))
sys.exit(-1)
self.callback("settings")
# Hand it off
if command == "deploy": # pragma: no cover
self.deploy(self.vargs["zip"], self.vargs["docker_image_uri"])
if command == "package": # pragma: no cover
self.package(self.vargs["output"])
if command == "template": # pragma: no cover
self.template(
self.vargs["lambda_arn"],
self.vargs["role_arn"],
output=self.vargs["output"],
json=self.vargs["json"],
)
elif command == "update": # pragma: no cover
self.update(
self.vargs["zip"],
self.vargs["no_upload"],
self.vargs["docker_image_uri"],
)
elif command == "rollback": # pragma: no cover
self.rollback(self.vargs["num_rollback"])
elif command == "invoke": # pragma: no cover
if not self.vargs.get("command_rest"):
print("Please enter the function to invoke.")
return
self.invoke(
self.vargs["command_rest"],
raw_python=self.vargs["raw"],
no_color=self.vargs["no_color"],
)
elif command == "manage": # pragma: no cover
if not self.vargs.get("command_rest"):
print("Please enter the management command to invoke.")
return
if not self.django_settings:
print("This command is for Django projects only!")
print(
"If this is a Django project, please define django_settings in your zappa_settings."
)
return
command_tail = self.vargs.get("command_rest")
if len(command_tail) > 1:
command = " ".join(
command_tail
) # ex: zappa manage dev "shell --version"
else:
command = command_tail[0] # ex: zappa manage dev showmigrations admin
self.invoke(
command,
command="manage",
no_color=self.vargs["no_color"],
)
elif command == "tail": # pragma: no cover
self.tail(
colorize=(not self.vargs["no_color"]),
http=self.vargs["http"],
non_http=self.vargs["non_http"],
since=self.vargs["since"],
filter_pattern=self.vargs["filter"],
force_colorize=self.vargs["force_color"] or None,
keep_open=not self.vargs["disable_keep_open"],
)
elif command == "undeploy": # pragma: no cover
self.undeploy(
no_confirm=self.vargs["yes"], remove_logs=self.vargs["remove_logs"]
)
elif command == "schedule": # pragma: no cover
self.schedule()
elif command == "unschedule": # pragma: no cover
self.unschedule()
elif command == "status": # pragma: no cover
self.status(return_json=self.vargs["json"])
elif command == "certify": # pragma: no cover
self.certify(no_confirm=self.vargs["yes"], manual=self.vargs["manual"])
elif command == "shell": # pragma: no cover
self.shell()
elif command == "save-python-settings-file": # pragma: no cover
self.save_python_settings_file(self.vargs["output_path"])
##
# The Commands
##
def save_python_settings_file(self, output_path=None):
settings_path = output_path or "zappa_settings.py"
print(
"Generating Zappa settings Python file and saving to {}".format(
settings_path
)
)
if not settings_path.endswith("zappa_settings.py"):
raise ValueError("Settings file must be named zappa_settings.py")
zappa_settings_s = self.get_zappa_settings_string()
with open(settings_path, "w") as f_out:
f_out.write(zappa_settings_s)
def package(self, output=None):
"""
Only build the package
"""
# Make sure we're in a venv.
self.check_venv()
# force not to delete the local zip
self.override_stage_config_setting("delete_local_zip", False)
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Create the Lambda Zip
self.create_package(output)
self.callback("zip")
size = human_size(os.path.getsize(self.zip_path))
click.echo(
click.style("Package created", fg="green", bold=True)
+ ": "
+ click.style(self.zip_path, bold=True)
+ " ("
+ size
+ ")"
)
def template(self, lambda_arn, role_arn, output=None, json=False):
"""
Only build the template file.
"""
if not lambda_arn:
raise ClickException("Lambda ARN is required to template.")
if not role_arn:
raise ClickException("Role ARN is required to template.")
self.zappa.credentials_arn = role_arn
# Create the template!
template = self.zappa.create_stack_template(
lambda_arn=lambda_arn,
lambda_name=self.lambda_name,
api_key_required=self.api_key_required,
iam_authorization=self.iam_authorization,
authorizer=self.authorizer,
cors_options=self.cors,
description=self.apigateway_description,
endpoint_configuration=self.endpoint_configuration,
)
if not output:
template_file = (
self.lambda_name + "-template-" + str(int(time.time())) + ".json"
)
else:
template_file = output
with open(template_file, "wb") as out:
out.write(
bytes(template.to_json(indent=None, separators=(",", ":")), "utf-8")
)
if not json:
click.echo(
click.style("Template created", fg="green", bold=True)
+ ": "
+ click.style(template_file, bold=True)
)
else:
with open(template_file, "r") as out:
print(out.read())
def deploy(self, source_zip=None, docker_image_uri=None):
"""
Package your project, upload it to S3, register the Lambda function
and create the API Gateway routes.
"""
if not source_zip or docker_image_uri:
# Make sure the necessary IAM execution roles are available
if self.manage_roles:
try:
self.zappa.create_iam_roles()
except botocore.client.ClientError as ce:
raise ClickException(
click.style("Failed", fg="red")
+ " to "
+ click.style("manage IAM roles", bold=True)
+ "!\n"
+ "You may "
+ click.style("lack the necessary AWS permissions", bold=True)
+ " to automatically manage a Zappa execution role.\n"
+ click.style("Exception reported by AWS:", bold=True)
+ format(ce)
+ "\n"
+ "To fix this, see here: "
+ click.style(
"https://github.com/Zappa/Zappa#custom-aws-iam-roles-and-policies-for-deployment",
bold=True,
)
+ "\n"
)
# Make sure this isn't already deployed.
deployed_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if len(deployed_versions) > 0:
raise ClickException(
"This application is "
+ click.style("already deployed", fg="red")
+ " - did you mean to call "
+ click.style("update", bold=True)
+ "?"
)
if not source_zip and not docker_image_uri:
# Make sure we're in a venv.
self.check_venv()
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Create the Lambda Zip
self.create_package()
self.callback("zip")
# Upload it to S3
success = self.zappa.upload_to_s3(
self.zip_path,
self.s3_bucket_name,
disable_progress=self.disable_progress,
)
if not success: # pragma: no cover
raise ClickException("Unable to upload to S3. Quitting.")
# If using a slim handler, upload it to S3 and tell lambda to use this slim handler zip
if self.stage_config.get("slim_handler", False):
# https://github.com/Miserlou/Zappa/issues/510
success = self.zappa.upload_to_s3(
self.handler_path,
self.s3_bucket_name,
disable_progress=self.disable_progress,
)
if not success: # pragma: no cover
raise ClickException("Unable to upload handler to S3. Quitting.")
# Copy the project zip to the current project zip
current_project_name = "{0!s}_{1!s}_current_project.tar.gz".format(
self.api_stage, self.project_name
)
success = self.zappa.copy_on_s3(
src_file_name=self.zip_path,
dst_file_name=current_project_name,
bucket_name=self.s3_bucket_name,
)
if not success: # pragma: no cover
raise ClickException(
"Unable to copy the zip to be the current project. Quitting."
)
handler_file = self.handler_path
else:
handler_file = self.zip_path
# Fixes https://github.com/Miserlou/Zappa/issues/613
try:
self.lambda_arn = self.zappa.get_lambda_function(
function_name=self.lambda_name
)
except botocore.client.ClientError:
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
kwargs = dict(
handler=self.lambda_handler,
description=self.lambda_description,
vpc_config=self.vpc_config,
dead_letter_config=self.dead_letter_config,
timeout=self.timeout_seconds,
memory_size=self.memory_size,
runtime=self.runtime,
aws_environment_variables=self.aws_environment_variables,
aws_kms_key_arn=self.aws_kms_key_arn,
use_alb=self.use_alb,
layers=self.layers,
concurrency=self.lambda_concurrency,
)
kwargs["function_name"] = self.lambda_name
if docker_image_uri:
kwargs["docker_image_uri"] = docker_image_uri
elif source_zip and source_zip.startswith("s3://"):
bucket, key_name = parse_s3_url(source_zip)
kwargs["bucket"] = bucket
kwargs["s3_key"] = key_name
elif source_zip and not source_zip.startswith("s3://"):
with open(source_zip, mode="rb") as fh:
byte_stream = fh.read()
kwargs["local_zip"] = byte_stream
else:
kwargs["bucket"] = self.s3_bucket_name
kwargs["s3_key"] = handler_file
self.lambda_arn = self.zappa.create_lambda_function(**kwargs)
# Schedule events for this deployment
self.schedule()
endpoint_url = ""
deployment_string = (
click.style("Deployment complete", fg="green", bold=True) + "!"
)
if self.use_alb:
kwargs = dict(
lambda_arn=self.lambda_arn,
lambda_name=self.lambda_name,
alb_vpc_config=self.alb_vpc_config,
timeout=self.timeout_seconds,
)
self.zappa.deploy_lambda_alb(**kwargs)
if self.use_apigateway:
# Create and configure the API Gateway
template = self.zappa.create_stack_template(
lambda_arn=self.lambda_arn,
lambda_name=self.lambda_name,
api_key_required=self.api_key_required,
iam_authorization=self.iam_authorization,
authorizer=self.authorizer,
cors_options=self.cors,
description=self.apigateway_description,
endpoint_configuration=self.endpoint_configuration,
)
self.zappa.update_stack(
self.lambda_name,
self.s3_bucket_name,
wait=True,
disable_progress=self.disable_progress,
)
api_id = self.zappa.get_api_id(self.lambda_name)
# Add binary support
if self.binary_support:
self.zappa.add_binary_support(api_id=api_id, cors=self.cors)
# Add payload compression
if self.stage_config.get("payload_compression", True):
self.zappa.add_api_compression(
api_id=api_id,
min_compression_size=self.stage_config.get(
"payload_minimum_compression_size", 0
),
)
# Deploy the API!
endpoint_url = self.deploy_api_gateway(api_id)
deployment_string = deployment_string + ": {}".format(endpoint_url)
# Create/link API key
if self.api_key_required:
if self.api_key is None:
self.zappa.create_api_key(api_id=api_id, stage_name=self.api_stage)
else:
self.zappa.add_api_stage_to_api_key(
api_key=self.api_key, api_id=api_id, stage_name=self.api_stage
)
if self.stage_config.get("touch", True):
self.zappa.wait_until_lambda_function_is_ready(
function_name=self.lambda_name
)
self.touch_endpoint(endpoint_url)
# Finally, delete the local copy our zip package
if not source_zip and not docker_image_uri:
if self.stage_config.get("delete_local_zip", True):
self.remove_local_zip()
# Remove the project zip from S3.
if not source_zip and not docker_image_uri:
self.remove_uploaded_zip()
self.callback("post")
click.echo(deployment_string)
def update(self, source_zip=None, no_upload=False, docker_image_uri=None):
"""
Repackage and update the function code.
"""
if not source_zip and not docker_image_uri:
# Make sure we're in a venv.
self.check_venv()
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Temporary version check
try:
updated_time = 1472581018
function_response = self.zappa.lambda_client.get_function(
FunctionName=self.lambda_name
)
conf = function_response["Configuration"]
last_updated = parser.parse(conf["LastModified"])
last_updated_unix = time.mktime(last_updated.timetuple())
except botocore.exceptions.BotoCoreError as e:
click.echo(click.style(type(e).__name__, fg="red") + ": " + e.args[0])
sys.exit(-1)
except Exception as e:
click.echo(
click.style("Warning!", fg="red")
+ " Couldn't get function "
+ self.lambda_name
+ " in "
+ self.zappa.aws_region
+ " - have you deployed yet?"
)
sys.exit(-1)
if last_updated_unix <= updated_time:
click.echo(
click.style("Warning!", fg="red")
+ " You may have upgraded Zappa since deploying this application. You will need to "
+ click.style("redeploy", bold=True)
+ " for this deployment to work properly!"
)
# Make sure the necessary IAM execution roles are available
if self.manage_roles:
try:
self.zappa.create_iam_roles()
except botocore.client.ClientError:
click.echo(
click.style("Failed", fg="red")
+ " to "
+ click.style("manage IAM roles", bold=True)
+ "!"
)
click.echo(
"You may "
+ click.style("lack the necessary AWS permissions", bold=True)
+ " to automatically manage a Zappa execution role."
)
click.echo(
"To fix this, see here: "
+ click.style(
"https://github.com/Zappa/Zappa#custom-aws-iam-roles-and-policies-for-deployment",
bold=True,
)
)
sys.exit(-1)
# Create the Lambda Zip,
if not no_upload:
self.create_package()
self.callback("zip")
# Upload it to S3
if not no_upload:
success = self.zappa.upload_to_s3(
self.zip_path,
self.s3_bucket_name,
disable_progress=self.disable_progress,
)
if not success: # pragma: no cover
raise ClickException("Unable to upload project to S3. Quitting.")
# If using a slim handler, upload it to S3 and tell lambda to use this slim handler zip
if self.stage_config.get("slim_handler", False):
# https://github.com/Miserlou/Zappa/issues/510
success = self.zappa.upload_to_s3(
self.handler_path,
self.s3_bucket_name,
disable_progress=self.disable_progress,
)
if not success: # pragma: no cover
raise ClickException(
"Unable to upload handler to S3. Quitting."
)
# Copy the project zip to the current project zip
current_project_name = "{0!s}_{1!s}_current_project.tar.gz".format(
self.api_stage, self.project_name
)
success = self.zappa.copy_on_s3(
src_file_name=self.zip_path,
dst_file_name=current_project_name,
bucket_name=self.s3_bucket_name,
)
if not success: # pragma: no cover
raise ClickException(
"Unable to copy the zip to be the current project. Quitting."
)
handler_file = self.handler_path
else:
handler_file = self.zip_path
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
kwargs = dict(
bucket=self.s3_bucket_name,
function_name=self.lambda_name,
num_revisions=self.num_retained_versions,
concurrency=self.lambda_concurrency,
)
if docker_image_uri:
kwargs["docker_image_uri"] = docker_image_uri
self.lambda_arn = self.zappa.update_lambda_function(**kwargs)
self.zappa.wait_until_lambda_function_is_ready(
function_name=self.lambda_name
)
elif source_zip and source_zip.startswith("s3://"):
bucket, key_name = parse_s3_url(source_zip)
kwargs.update(dict(bucket=bucket, s3_key=key_name))
self.lambda_arn = self.zappa.update_lambda_function(**kwargs)
elif source_zip and not source_zip.startswith("s3://"):
with open(source_zip, mode="rb") as fh:
byte_stream = fh.read()
kwargs["local_zip"] = byte_stream
self.lambda_arn = self.zappa.update_lambda_function(**kwargs)
else:
if not no_upload:
kwargs["s3_key"] = handler_file
self.lambda_arn = self.zappa.update_lambda_function(**kwargs)
# Remove the uploaded zip from S3, because it is now registered..
if not source_zip and not no_upload and not docker_image_uri:
self.remove_uploaded_zip()
# Update the configuration, in case there are changes.
self.lambda_arn = self.zappa.update_lambda_configuration(
lambda_arn=self.lambda_arn,
function_name=self.lambda_name,
handler=self.lambda_handler,
description=self.lambda_description,
vpc_config=self.vpc_config,
timeout=self.timeout_seconds,
memory_size=self.memory_size,
runtime=self.runtime,
aws_environment_variables=self.aws_environment_variables,
aws_kms_key_arn=self.aws_kms_key_arn,
layers=self.layers,
)
# Finally, delete the local copy our zip package
if not source_zip and not no_upload and not docker_image_uri:
if self.stage_config.get("delete_local_zip", True):
self.remove_local_zip()
if self.use_apigateway:
self.zappa.create_stack_template(
lambda_arn=self.lambda_arn,
lambda_name=self.lambda_name,
api_key_required=self.api_key_required,
iam_authorization=self.iam_authorization,
authorizer=self.authorizer,
cors_options=self.cors,
description=self.apigateway_description,
endpoint_configuration=self.endpoint_configuration,
)
self.zappa.update_stack(
self.lambda_name,
self.s3_bucket_name,
wait=True,
update_only=True,
disable_progress=self.disable_progress,
)
api_id = self.zappa.get_api_id(self.lambda_name)
# Update binary support
if self.binary_support:
self.zappa.add_binary_support(api_id=api_id, cors=self.cors)
else:
self.zappa.remove_binary_support(api_id=api_id, cors=self.cors)
if self.stage_config.get("payload_compression", True):
self.zappa.add_api_compression(
api_id=api_id,
min_compression_size=self.stage_config.get(
"payload_minimum_compression_size", 0
),
)
else:
self.zappa.remove_api_compression(api_id=api_id)
# It looks a bit like we might actually be using this just to get the URL,
# but we're also updating a few of the APIGW settings.
endpoint_url = self.deploy_api_gateway(api_id)
if self.stage_config.get("domain", None):
endpoint_url = self.stage_config.get("domain")
else:
endpoint_url = None
self.schedule()
# Update any cognito pool with the lambda arn
# do this after schedule as schedule clears the lambda policy and we need to add one
self.update_cognito_triggers()
self.callback("post")
if endpoint_url and "https://" not in endpoint_url:
endpoint_url = "https://" + endpoint_url
if self.base_path:
endpoint_url += "/" + self.base_path
deployed_string = (
"Your updated Zappa deployment is "
+ click.style("live", fg="green", bold=True)
+ "!"
)
if self.use_apigateway:
deployed_string = (
deployed_string
+ ": "
+ click.style("{}".format(endpoint_url), bold=True)
)
api_url = None
if endpoint_url and "amazonaws.com" not in endpoint_url:
api_url = self.zappa.get_api_url(self.lambda_name, self.api_stage)
if endpoint_url != api_url:
deployed_string = deployed_string + " (" + api_url + ")"
if self.stage_config.get("touch", True):
self.zappa.wait_until_lambda_function_is_ready(
function_name=self.lambda_name
)
if api_url:
self.touch_endpoint(api_url)
elif endpoint_url:
self.touch_endpoint(endpoint_url)
click.echo(deployed_string)
def rollback(self, revision):
"""
Rollsback the currently deploy lambda code to a previous revision.
"""
print("Rolling back..")
self.zappa.rollback_lambda_function_version(
self.lambda_name, versions_back=revision
)
print("Done!")
def tail(
self,
since,
filter_pattern,
limit=10000,
keep_open=True,
colorize=True,
http=False,
non_http=False,
force_colorize=False,
):
"""
Tail this function's logs.
if keep_open, do so repeatedly, printing any new logs
"""
try:
since_stamp = string_to_timestamp(since)
last_since = since_stamp
while True:
new_logs = self.zappa.fetch_logs(
self.lambda_name,
start_time=since_stamp,
limit=limit,
filter_pattern=filter_pattern,
)
new_logs = [e for e in new_logs if e["timestamp"] > last_since]
self.print_logs(new_logs, colorize, http, non_http, force_colorize)
if not keep_open:
break
if new_logs:
last_since = new_logs[-1]["timestamp"]
time.sleep(1)
except KeyboardInterrupt: # pragma: no cover
# Die gracefully
try:
sys.exit(0)
except SystemExit:
os._exit(130)
def undeploy(self, no_confirm=False, remove_logs=False):
"""
Tear down an existing deployment.
"""
if not no_confirm: # pragma: no cover
confirm = input("Are you sure you want to undeploy? [y/n] ")
if confirm != "y":
return
if self.use_alb:
self.zappa.undeploy_lambda_alb(self.lambda_name)
if self.use_apigateway:
if remove_logs:
self.zappa.remove_api_gateway_logs(self.lambda_name)
domain_name = self.stage_config.get("domain", None)
base_path = self.stage_config.get("base_path", None)
# Only remove the api key when not specified
if self.api_key_required and self.api_key is None:
api_id = self.zappa.get_api_id(self.lambda_name)
self.zappa.remove_api_key(api_id, self.api_stage)
gateway_id = self.zappa.undeploy_api_gateway(
self.lambda_name, domain_name=domain_name, base_path=base_path
)
self.unschedule() # removes event triggers, including warm up event.
self.zappa.delete_lambda_function(self.lambda_name)
if remove_logs:
self.zappa.remove_lambda_function_logs(self.lambda_name)
click.echo(click.style("Done", fg="green", bold=True) + "!")
def update_cognito_triggers(self):
"""
Update any cognito triggers
"""
if self.cognito:
user_pool = self.cognito.get("user_pool")
triggers = self.cognito.get("triggers", [])
lambda_configs = set()
for trigger in triggers:
lambda_configs.add(trigger["source"].split("_")[0])
self.zappa.update_cognito(
self.lambda_name, user_pool, lambda_configs, self.lambda_arn
)
def schedule(self):
"""
Given a a list of functions and a schedule to execute them,
setup up regular execution.
"""
events = self.stage_config.get("events", [])
if events:
if not isinstance(events, list): # pragma: no cover
print("Events must be supplied as a list.")
return
for event in events:
self.collision_warning(event.get("function"))
if self.stage_config.get("keep_warm", True):
if not events:
events = []
keep_warm_rate = self.stage_config.get(
"keep_warm_expression", "rate(4 minutes)"
)
events.append(
{
"name": "zappa-keep-warm",
"function": "handler.keep_warm_callback",
"expression": keep_warm_rate,
"description": "Zappa Keep Warm - {}".format(self.lambda_name),
}
)
if events:
try:
function_response = self.zappa.lambda_client.get_function(
FunctionName=self.lambda_name
)
except botocore.exceptions.ClientError as e: # pragma: no cover
click.echo(
click.style("Function does not exist", fg="yellow")
+ ", please "
+ click.style("deploy", bold=True)
+ "first. Ex:"
+ click.style("zappa deploy {}.".format(self.api_stage), bold=True)
)
sys.exit(-1)
print("Scheduling..")
self.zappa.schedule_events(
lambda_arn=function_response["Configuration"]["FunctionArn"],
lambda_name=self.lambda_name,
events=events,
)
# Add async tasks SNS
if self.stage_config.get(
"async_source", None
) == "sns" and self.stage_config.get("async_resources", True):
self.lambda_arn = self.zappa.get_lambda_function(
function_name=self.lambda_name
)
topic_arn = self.zappa.create_async_sns_topic(
lambda_name=self.lambda_name, lambda_arn=self.lambda_arn
)
click.echo("SNS Topic created: %s" % topic_arn)
# Add async tasks DynamoDB
table_name = self.stage_config.get("async_response_table", False)
read_capacity = self.stage_config.get("async_response_table_read_capacity", 1)
write_capacity = self.stage_config.get("async_response_table_write_capacity", 1)
if table_name and self.stage_config.get("async_resources", True):
created, response_table = self.zappa.create_async_dynamodb_table(
table_name, read_capacity, write_capacity
)
if created:
click.echo("DynamoDB table created: %s" % table_name)
else:
click.echo("DynamoDB table exists: %s" % table_name)
provisioned_throughput = response_table["Table"][
"ProvisionedThroughput"
]
if (
provisioned_throughput["ReadCapacityUnits"] != read_capacity
or provisioned_throughput["WriteCapacityUnits"] != write_capacity
):
click.echo(
click.style(
"\nWarning! Existing DynamoDB table ({}) does not match configured capacity.\n".format(
table_name
),
fg="red",
)
)
def unschedule(self):
"""
Given a a list of scheduled functions,
tear down their regular execution.
"""
# Run even if events are not defined to remove previously existing ones (thus default to []).
events = self.stage_config.get("events", [])
if not isinstance(events, list): # pragma: no cover
print("Events must be supplied as a list.")
return
function_arn = None
try:
function_response = self.zappa.lambda_client.get_function(
FunctionName=self.lambda_name
)
function_arn = function_response["Configuration"]["FunctionArn"]
except botocore.exceptions.ClientError as e: # pragma: no cover
raise ClickException(
"Function does not exist, you should deploy first. Ex: zappa deploy {}. "
"Proceeding to unschedule CloudWatch based events.".format(
self.api_stage
)
)
print("Unscheduling..")
self.zappa.unschedule_events(
lambda_name=self.lambda_name,
lambda_arn=function_arn,
events=events,
)
# Remove async task SNS
if self.stage_config.get(
"async_source", None
) == "sns" and self.stage_config.get("async_resources", True):
removed_arns = self.zappa.remove_async_sns_topic(self.lambda_name)
click.echo("SNS Topic removed: %s" % ", ".join(removed_arns))
def invoke(self, function_name, raw_python=False, command=None, no_color=False):
"""
Invoke a remote function.
"""
# There are three likely scenarios for 'command' here:
# command, which is a modular function path
# raw_command, which is a string of python to execute directly
# manage, which is a Django-specific management command invocation
key = command if command is not None else "command"
if raw_python:
command = {"raw_command": function_name}
else:
command = {key: function_name}
# Can't use hjson
import json as json
response = self.zappa.invoke_lambda_function(
self.lambda_name,
json.dumps(command),
invocation_type="RequestResponse",
)
if "LogResult" in response:
if no_color:
print(base64.b64decode(response["LogResult"]))
else:
decoded = base64.b64decode(response["LogResult"]).decode()
formatted = self.format_invoke_command(decoded)
colorized = self.colorize_invoke_command(formatted)
print(colorized)
else:
print(response)
# For a successful request FunctionError is not in response.
# https://github.com/Miserlou/Zappa/pull/1254/
if "FunctionError" in response:
raise ClickException(
"{} error occurred while invoking command.".format(
response["FunctionError"]
)
)
def format_invoke_command(self, string):
"""
Formats correctly the string output from the invoke() method,
replacing line breaks and tabs when necessary.
"""
string = string.replace("\\n", "\n")
formated_response = ""
for line in string.splitlines():
if line.startswith("REPORT"):
line = line.replace("\t", "\n")
if line.startswith("[DEBUG]"):
line = line.replace("\t", " ")
formated_response += line + "\n"
formated_response = formated_response.replace("\n\n", "\n")
return formated_response
def colorize_invoke_command(self, string):
"""
Apply various heuristics to return a colorized version the invoke
command string. If these fail, simply return the string in plaintext.
Inspired by colorize_log_entry().
"""
final_string = string
try:
# Line headers
try:
for token in ["START", "END", "REPORT", "[DEBUG]"]:
if token in final_string:
format_string = "[{}]"
# match whole words only
pattern = r"\b{}\b"
if token == "[DEBUG]":
format_string = "{}"
pattern = re.escape(token)
repl = click.style(
format_string.format(token), bold=True, fg="cyan"
)
final_string = re.sub(pattern.format(token), repl, final_string)
except Exception: # pragma: no cover
pass
# Green bold Tokens
try:
for token in [
"Zappa Event:",
"RequestId:",
"Version:",
"Duration:",
"Billed",
"Memory Size:",
"Max Memory Used:",
]:
if token in final_string:
final_string = final_string.replace(
token, click.style(token, bold=True, fg="green")
)
except Exception: # pragma: no cover
pass
# UUIDs
for token in final_string.replace("\t", " ").split(" "):
try:
if token.count("-") == 4 and token.replace("-", "").isalnum():
final_string = final_string.replace(
token, click.style(token, fg="magenta")
)
except Exception: # pragma: no cover
pass
return final_string
except Exception:
return string
def status(self, return_json=False):
"""
Describe the status of the current deployment.
"""
def tabular_print(title, value):
"""
Convenience function for priting formatted table items.
"""
click.echo(
"%-*s%s" % (32, click.style("\t" + title, fg="green") + ":", str(value))
)
return
# Lambda Env Details
lambda_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if not lambda_versions:
raise ClickException(
click.style(
"No Lambda %s detected in %s - have you deployed yet?"
% (self.lambda_name, self.zappa.aws_region),
fg="red",
)
)
status_dict = collections.OrderedDict()
status_dict["Lambda Versions"] = len(lambda_versions)
function_response = self.zappa.lambda_client.get_function(
FunctionName=self.lambda_name
)
conf = function_response["Configuration"]
self.lambda_arn = conf["FunctionArn"]
status_dict["Lambda Name"] = self.lambda_name
status_dict["Lambda ARN"] = self.lambda_arn
status_dict["Lambda Role ARN"] = conf["Role"]
status_dict["Lambda Code Size"] = conf["CodeSize"]
status_dict["Lambda Version"] = conf["Version"]
status_dict["Lambda Last Modified"] = conf["LastModified"]
status_dict["Lambda Memory Size"] = conf["MemorySize"]
status_dict["Lambda Timeout"] = conf["Timeout"]
# Handler & Runtime won't be present for lambda Docker deployments
# https://github.com/Miserlou/Zappa/issues/2188
status_dict["Lambda Handler"] = conf.get("Handler", "")
status_dict["Lambda Runtime"] = conf.get("Runtime", "")
if "VpcConfig" in conf.keys():
status_dict["Lambda VPC ID"] = conf.get("VpcConfig", {}).get(
"VpcId", "Not assigned"
)
else:
status_dict["Lambda VPC ID"] = None
# Calculated statistics
try:
function_invocations = self.zappa.cloudwatch.get_metric_statistics(
Namespace="AWS/Lambda",
MetricName="Invocations",
StartTime=datetime.utcnow() - timedelta(days=1),
EndTime=datetime.utcnow(),
Period=1440,
Statistics=["Sum"],
Dimensions=[
{"Name": "FunctionName", "Value": "{}".format(self.lambda_name)}
],
)["Datapoints"][0]["Sum"]
except Exception as e:
function_invocations = 0
try:
function_errors = self.zappa.cloudwatch.get_metric_statistics(
Namespace="AWS/Lambda",
MetricName="Errors",
StartTime=datetime.utcnow() - timedelta(days=1),
EndTime=datetime.utcnow(),
Period=1440,
Statistics=["Sum"],
Dimensions=[
{"Name": "FunctionName", "Value": "{}".format(self.lambda_name)}
],
)["Datapoints"][0]["Sum"]
except Exception as e:
function_errors = 0
try:
error_rate = "{0:.2f}%".format(function_errors / function_invocations * 100)
except:
error_rate = "Error calculating"
status_dict["Invocations (24h)"] = int(function_invocations)
status_dict["Errors (24h)"] = int(function_errors)
status_dict["Error Rate (24h)"] = error_rate
# URLs
if self.use_apigateway:
api_url = self.zappa.get_api_url(self.lambda_name, self.api_stage)
status_dict["API Gateway URL"] = api_url
# Api Keys
api_id = self.zappa.get_api_id(self.lambda_name)
for api_key in self.zappa.get_api_keys(api_id, self.api_stage):
status_dict["API Gateway x-api-key"] = api_key
# There literally isn't a better way to do this.
# AWS provides no way to tie a APIGW domain name to its Lambda function.
domain_url = self.stage_config.get("domain", None)
base_path = self.stage_config.get("base_path", None)
if domain_url:
status_dict["Domain URL"] = "https://" + domain_url
if base_path:
status_dict["Domain URL"] += "/" + base_path
else:
status_dict["Domain URL"] = "None Supplied"
# Scheduled Events
event_rules = self.zappa.get_event_rules_for_lambda(lambda_arn=self.lambda_arn)
status_dict["Num. Event Rules"] = len(event_rules)
if len(event_rules) > 0:
status_dict["Events"] = []
for rule in event_rules:
event_dict = {}
rule_name = rule["Name"]
event_dict["Event Rule Name"] = rule_name
event_dict["Event Rule Schedule"] = rule.get("ScheduleExpression", None)
event_dict["Event Rule State"] = rule.get("State", None).title()
event_dict["Event Rule ARN"] = rule.get("Arn", None)
status_dict["Events"].append(event_dict)
if return_json:
# Putting the status in machine readable format
# https://github.com/Miserlou/Zappa/issues/407
print(json.dumpsJSON(status_dict))
else:
click.echo("Status for " + click.style(self.lambda_name, bold=True) + ": ")
for k, v in status_dict.items():
if k == "Events":
# Events are a list of dicts
for event in v:
for item_k, item_v in event.items():
tabular_print(item_k, item_v)
else:
tabular_print(k, v)
# TODO: S3/SQS/etc. type events?
return True
def check_stage_name(self, stage_name):
"""
Make sure the stage name matches the AWS-allowed pattern
(calls to apigateway_client.create_deployment, will fail with error
message "ClientError: An error occurred (BadRequestException) when
calling the CreateDeployment operation: Stage name only allows
a-zA-Z0-9_" if the pattern does not match)
"""
if self.stage_name_env_pattern.match(stage_name):
return True
raise ValueError("AWS requires stage name to match a-zA-Z0-9_")
def check_environment(self, environment):
"""
Make sure the environment contains only strings
(since putenv needs a string)
"""
non_strings = []
for (k, v) in environment.items():
if not isinstance(v, basestring):
non_strings.append(k)
if non_strings:
raise ValueError(
"The following environment variables are not strings: {}".format(
", ".join(non_strings)
)
)
else:
return True
def init(self, settings_file="zappa_settings.json"):
"""
Initialize a new Zappa project by creating a new zappa_settings.json in a guided process.
This should probably be broken up into few separate componants once it's stable.
Testing these inputs requires monkeypatching with mock, which isn't pretty.
"""
# Make sure we're in a venv.
self.check_venv()
# Ensure that we don't already have a zappa_settings file.
if os.path.isfile(settings_file):
raise ClickException(
"This project already has a "
+ click.style("{0!s} file".format(settings_file), fg="red", bold=True)
+ "!"
)
# Explain system.
click.echo(
click.style(
"""\n███████╗ █████╗ ██████╗ ██████╗ █████╗
╚══███╔╝██╔══██╗██╔══██╗██╔══██╗██╔══██╗
███╔╝ ███████║██████╔╝██████╔╝███████║
███╔╝ ██╔══██║██╔═══╝ ██╔═══╝ ██╔══██║
███████╗██║ ██║██║ ██║ ██║ ██║
╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ ╚═╝\n""",
fg="green",
bold=True,
)
)
click.echo(
click.style("Welcome to ", bold=True)
+ click.style("Zappa", fg="green", bold=True)
+ click.style("!\n", bold=True)
)
click.echo(
click.style("Zappa", bold=True)
+ " is a system for running server-less Python web applications"
" on AWS Lambda and AWS API Gateway."
)
click.echo(
"This `init` command will help you create and configure your new Zappa deployment."
)
click.echo("Let's get started!\n")
# Create Env
while True:
click.echo(
"Your Zappa configuration can support multiple production stages, like '"
+ click.style("dev", bold=True)
+ "', '"
+ click.style("staging", bold=True)
+ "', and '"
+ click.style("production", bold=True)
+ "'."
)
env = (
input("What do you want to call this environment (default 'dev'): ")
or "dev"
)
try:
self.check_stage_name(env)
break
except ValueError:
click.echo(click.style("Stage names must match a-zA-Z0-9_", fg="red"))
# Detect AWS profiles and regions
# If anyone knows a more straightforward way to easily detect and parse AWS profiles I'm happy to change this, feels like a hack
session = botocore.session.Session()
config = session.full_config
profiles = config.get("profiles", {})
profile_names = list(profiles.keys())
click.echo(
"\nAWS Lambda and API Gateway are only available in certain regions. "
"Let's check to make sure you have a profile set up in one that will work."
)
if not profile_names:
profile_name, profile = None, None
click.echo(
"We couldn't find an AWS profile to use. Before using Zappa, you'll need to set one up. See here for more info: {}".format(
click.style(BOTO3_CONFIG_DOCS_URL, fg="blue", underline=True)
)
)
elif len(profile_names) == 1:
profile_name = profile_names[0]
profile = profiles[profile_name]
click.echo(
"Okay, using profile {}!".format(click.style(profile_name, bold=True))
)
else:
if "default" in profile_names:
default_profile = [p for p in profile_names if p == "default"][0]
else:
default_profile = profile_names[0]
while True:
profile_name = (
input(
"We found the following profiles: {}, and {}. "
"Which would you like us to use? (default '{}'): ".format(
", ".join(profile_names[:-1]),
profile_names[-1],
default_profile,
)
)
or default_profile
)
if profile_name in profiles:
profile = profiles[profile_name]
break
else:
click.echo("Please enter a valid name for your AWS profile.")
profile_region = profile.get("region") if profile else None
# Create Bucket
click.echo(
"\nYour Zappa deployments will need to be uploaded to a "
+ click.style("private S3 bucket", bold=True)
+ "."
)
click.echo("If you don't have a bucket yet, we'll create one for you too.")
default_bucket = "zappa-" + "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(9)
)
while True:
bucket = (
input(
"What do you want to call your bucket? (default '%s'): "
% default_bucket
)
or default_bucket
)
if is_valid_bucket_name(bucket):
break
click.echo(click.style("Invalid bucket name!", bold=True))
click.echo("S3 buckets must be named according to the following rules:")
click.echo(
"""* Bucket names must be unique across all existing bucket names in Amazon S3.
* Bucket names must comply with DNS naming conventions.
* Bucket names must be at least 3 and no more than 63 characters long.
* Bucket names must not contain uppercase characters or underscores.
* Bucket names must start with a lowercase letter or number.
* Bucket names must be a series of one or more labels. Adjacent labels are separated
by a single period (.). Bucket names can contain lowercase letters, numbers, and
hyphens. Each label must start and end with a lowercase letter or a number.
* Bucket names must not be formatted as an IP address (for example, 192.168.5.4).
* When you use virtual hosted–style buckets with Secure Sockets Layer (SSL), the SSL
wildcard certificate only matches buckets that don't contain periods. To work around
this, use HTTP or write your own certificate verification logic. We recommend that
you do not use periods (".") in bucket names when using virtual hosted–style buckets.
"""
)
# Detect Django/Flask
try: # pragma: no cover
import django
has_django = True
except ImportError as e:
has_django = False
try: # pragma: no cover
import flask
has_flask = True
except ImportError as e:
has_flask = False
print("")
# App-specific
if has_django: # pragma: no cover
click.echo(
"It looks like this is a "
+ click.style("Django", bold=True)
+ " application!"
)
click.echo(
"What is the "
+ click.style("module path", bold=True)
+ " to your projects's Django settings?"
)
django_settings = None
matches = detect_django_settings()
while django_settings in [None, ""]:
if matches:
click.echo(
"We discovered: "
+ click.style(
", ".join("{}".format(i) for v, i in enumerate(matches)),
bold=True,
)
)
django_settings = (
input(
"Where are your project's settings? (default '%s'): "
% matches[0]
)
or matches[0]
)
else:
click.echo(
"(This will likely be something like 'your_project.settings')"
)
django_settings = input("Where are your project's settings?: ")
django_settings = django_settings.replace("'", "")
django_settings = django_settings.replace('"', "")
else:
matches = None
if has_flask:
click.echo(
"It looks like this is a "
+ click.style("Flask", bold=True)
+ " application."
)
matches = detect_flask_apps()
click.echo(
"What's the "
+ click.style("modular path", bold=True)
+ " to your app's function?"
)
click.echo("This will likely be something like 'your_module.app'.")
app_function = None
while app_function in [None, ""]:
if matches:
click.echo(
"We discovered: "
+ click.style(
", ".join("{}".format(i) for v, i in enumerate(matches)),
bold=True,
)
)
app_function = (
input(
"Where is your app's function? (default '%s'): "
% matches[0]
)
or matches[0]
)
else:
app_function = input("Where is your app's function?: ")
app_function = app_function.replace("'", "")
app_function = app_function.replace('"', "")
# TODO: Create VPC?
# Memory size? Time limit?
# Domain? LE keys? Region?
# 'Advanced Settings' mode?
# Globalize
click.echo(
"\nYou can optionally deploy to "
+ click.style("all available regions", bold=True)
+ " in order to provide fast global service."
)
click.echo(
"If you are using Zappa for the first time, you probably don't want to do this!"
)
global_deployment = False
while True:
global_type = input(
"Would you like to deploy this application "
+ click.style("globally", bold=True)
+ "? (default 'n') [y/n/(p)rimary]: "
)
if not global_type:
break
if global_type.lower() in ["y", "yes", "p", "primary"]:
global_deployment = True
break
if global_type.lower() in ["n", "no"]:
global_deployment = False
break
# The given environment name
zappa_settings = {
env: {
"profile_name": profile_name,
"s3_bucket": bucket,
"runtime": get_venv_from_python_version(),
"project_name": self.get_project_name(),
}
}
if profile_region:
zappa_settings[env]["aws_region"] = profile_region
if has_django:
zappa_settings[env]["django_settings"] = django_settings
else:
zappa_settings[env]["app_function"] = app_function
# Global Region Deployment
if global_deployment:
additional_regions = [r for r in API_GATEWAY_REGIONS if r != profile_region]
# Create additional stages
if global_type.lower() in ["p", "primary"]:
additional_regions = [r for r in additional_regions if "-1" in r]
for region in additional_regions:
env_name = env + "_" + region.replace("-", "_")
g_env = {env_name: {"extends": env, "aws_region": region}}
zappa_settings.update(g_env)
import json as json # hjson is fine for loading, not fine for writing.
zappa_settings_json = json.dumps(zappa_settings, sort_keys=True, indent=4)
click.echo(
"\nOkay, here's your "
+ click.style("zappa_settings.json", bold=True)
+ ":\n"
)
click.echo(click.style(zappa_settings_json, fg="yellow", bold=False))
confirm = (
input(
"\nDoes this look "
+ click.style("okay", bold=True, fg="green")
+ "? (default 'y') [y/n]: "
)
or "yes"
)
if confirm[0] not in ["y", "Y", "yes", "YES"]:
click.echo(
""
+ click.style("Sorry", bold=True, fg="red")
+ " to hear that! Please init again."
)
return
# Write
with open("zappa_settings.json", "w") as zappa_settings_file:
zappa_settings_file.write(zappa_settings_json)
if global_deployment:
click.echo(
"\n"
+ click.style("Done", bold=True)
+ "! You can also "
+ click.style("deploy all", bold=True)
+ " by executing:\n"
)
click.echo(click.style("\t$ zappa deploy --all", bold=True))
click.echo(
"\nAfter that, you can "
+ click.style("update", bold=True)
+ " your application code with:\n"
)
click.echo(click.style("\t$ zappa update --all", bold=True))
else:
click.echo(
"\n"
+ click.style("Done", bold=True)
+ "! Now you can "
+ click.style("deploy", bold=True)
+ " your Zappa application by executing:\n"
)
click.echo(click.style("\t$ zappa deploy %s" % env, bold=True))
click.echo(
"\nAfter that, you can "
+ click.style("update", bold=True)
+ " your application code with:\n"
)
click.echo(click.style("\t$ zappa update %s" % env, bold=True))
click.echo(
"\nTo learn more, check out our project page on "
+ click.style("GitHub", bold=True)
+ " here: "
+ click.style("https://github.com/Zappa/Zappa", fg="cyan", bold=True)
)
click.echo(
"and stop by our "
+ click.style("Slack", bold=True)
+ " channel here: "
+ click.style("https://zappateam.slack.com", fg="cyan", bold=True)
)
click.echo("\nEnjoy!,")
click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!")
return
def certify(self, no_confirm=True, manual=False):
"""
Register or update a domain certificate for this env.
"""
if not self.domain:
raise ClickException(
"Can't certify a domain without "
+ click.style("domain", fg="red", bold=True)
+ " configured!"
)
if not no_confirm: # pragma: no cover
confirm = input("Are you sure you want to certify? [y/n] ")
if confirm != "y":
return
# Make sure this isn't already deployed.
deployed_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if len(deployed_versions) == 0:
raise ClickException(
"This application "
+ click.style("isn't deployed yet", fg="red")
+ " - did you mean to call "
+ click.style("deploy", bold=True)
+ "?"
)
account_key_location = self.stage_config.get("lets_encrypt_key", None)
cert_location = self.stage_config.get("certificate", None)
cert_key_location = self.stage_config.get("certificate_key", None)
cert_chain_location = self.stage_config.get("certificate_chain", None)
cert_arn = self.stage_config.get("certificate_arn", None)
base_path = self.stage_config.get("base_path", None)
# These are sensitive
certificate_body = None
certificate_private_key = None
certificate_chain = None
# Prepare for custom Let's Encrypt
if not cert_location and not cert_arn:
if not account_key_location:
raise ClickException(
"Can't certify a domain without "
+ click.style("lets_encrypt_key", fg="red", bold=True)
+ " or "
+ click.style("certificate", fg="red", bold=True)
+ " or "
+ click.style("certificate_arn", fg="red", bold=True)
+ " configured!"
)
# Get install account_key to /tmp/account_key.pem
from .letsencrypt import gettempdir
if account_key_location.startswith("s3://"):
bucket, key_name = parse_s3_url(account_key_location)
self.zappa.s3_client.download_file(
bucket, key_name, os.path.join(gettempdir(), "account.key")
)
else:
from shutil import copyfile
copyfile(
account_key_location, os.path.join(gettempdir(), "account.key")
)
# Prepare for Custom SSL
elif not account_key_location and not cert_arn:
if not cert_location or not cert_key_location or not cert_chain_location:
raise ClickException(
"Can't certify a domain without "
+ click.style(
"certificate, certificate_key and certificate_chain",
fg="red",
bold=True,
)
+ " configured!"
)
# Read the supplied certificates.
with open(cert_location) as f:
certificate_body = f.read()
with open(cert_key_location) as f:
certificate_private_key = f.read()
with open(cert_chain_location) as f:
certificate_chain = f.read()
click.echo(
"Certifying domain "
+ click.style(self.domain, fg="green", bold=True)
+ ".."
)
# Get cert and update domain.
# Let's Encrypt
if not cert_location and not cert_arn:
from .letsencrypt import get_cert_and_update_domain
cert_success = get_cert_and_update_domain(
self.zappa, self.lambda_name, self.api_stage, self.domain, manual
)
# Custom SSL / ACM
else:
route53 = self.stage_config.get("route53_enabled", True)
if not self.zappa.get_domain_name(self.domain, route53=route53):
dns_name = self.zappa.create_domain_name(
domain_name=self.domain,
certificate_name=self.domain + "-Zappa-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=cert_arn,
lambda_name=self.lambda_name,
stage=self.api_stage,
base_path=base_path,
)
if route53:
self.zappa.update_route53_records(self.domain, dns_name)
print(
"Created a new domain name with supplied certificate. Please note that it can take up to 40 minutes for this domain to be "
"created and propagated through AWS, but it requires no further work on your part."
)
else:
self.zappa.update_domain_name(
domain_name=self.domain,
certificate_name=self.domain + "-Zappa-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=cert_arn,
lambda_name=self.lambda_name,
stage=self.api_stage,
route53=route53,
base_path=base_path,
)
cert_success = True
if cert_success:
click.echo(
"Certificate " + click.style("updated", fg="green", bold=True) + "!"
)
else:
click.echo(
click.style("Failed", fg="red", bold=True)
+ " to generate or install certificate! :("
)
click.echo("\n==============\n")
shamelessly_promote()
##
# Shell
##
def shell(self):
"""
Spawn a debug shell.
"""
click.echo(
click.style("NOTICE!", fg="yellow", bold=True)
+ " This is a "
+ click.style("local", fg="green", bold=True)
+ " shell, inside a "
+ click.style("Zappa", bold=True)
+ " object!"
)
self.zappa.shell()
return
##
# Utility
##
def callback(self, position):
"""
Allows the execution of custom code between creation of the zip file and deployment to AWS.
:return: None
"""
callbacks = self.stage_config.get("callbacks", {})
callback = callbacks.get(position)
if callback:
(mod_path, cb_func_name) = callback.rsplit(".", 1)
try: # Prefer callback in working directory
if mod_path.count(".") >= 1: # Callback function is nested in a folder
(mod_folder_path, mod_name) = mod_path.rsplit(".", 1)
mod_folder_path_fragments = mod_folder_path.split(".")
working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments)
else:
mod_name = mod_path
working_dir = os.getcwd()
working_dir_importer = pkgutil.get_importer(working_dir)
module_ = working_dir_importer.find_module(mod_name).load_module(
mod_name
)
except (ImportError, AttributeError):
try: # Callback func might be in virtualenv
module_ = importlib.import_module(mod_path)
except ImportError: # pragma: no cover
raise ClickException(
click.style("Failed ", fg="red")
+ "to "
+ click.style(
"import {position} callback ".format(position=position),
bold=True,
)
+ 'module: "{mod_path}"'.format(
mod_path=click.style(mod_path, bold=True)
)
)
if not hasattr(module_, cb_func_name): # pragma: no cover
raise ClickException(
click.style("Failed ", fg="red")
+ "to "
+ click.style(
"find {position} callback ".format(position=position), bold=True
)
+ 'function: "{cb_func_name}" '.format(
cb_func_name=click.style(cb_func_name, bold=True)
)
+ 'in module "{mod_path}"'.format(mod_path=mod_path)
)
cb_func = getattr(module_, cb_func_name)
cb_func(self) # Call the function passing self
def check_for_update(self):
"""
Print a warning if there's a new Zappa version available.
"""
try:
version = pkg_resources.require("zappa2")[0].version
updateable = check_new_version_available(version)
if updateable:
click.echo(
click.style("Important!", fg="yellow", bold=True)
+ " A new version of "
+ click.style("Zappa", bold=True)
+ " is available!"
)
click.echo(
"Upgrade with: "
+ click.style("pip install zappa --upgrade", bold=True)
)
click.echo(
"Visit the project page on GitHub to see the latest changes: "
+ click.style("https://github.com/Zappa/Zappa", bold=True)
)
except Exception as e: # pragma: no cover
print(e)
return
def load_settings(self, settings_file=None, session=None):
"""
Load the local zappa_settings file.
An existing boto session can be supplied, though this is likely for testing purposes.
Returns the loaded Zappa object.
"""
# Ensure we're passed a valid settings file.
if not settings_file:
settings_file = self.get_json_or_yaml_settings()
if not os.path.isfile(settings_file):
raise ClickException("Please configure your zappa_settings file.")
# Load up file
self.load_settings_file(settings_file)
# Make sure that the stages are valid names:
for stage_name in self.zappa_settings.keys():
try:
self.check_stage_name(stage_name)
except ValueError:
raise ValueError(
"API stage names must match a-zA-Z0-9_ ; '{0!s}' does not.".format(
stage_name
)
)
# Make sure that this stage is our settings
if self.api_stage not in self.zappa_settings.keys():
raise ClickException(
"Please define stage '{0!s}' in your Zappa settings.".format(
self.api_stage
)
)
# We need a working title for this project. Use one if supplied, else cwd dirname.
if "project_name" in self.stage_config: # pragma: no cover
# If the name is invalid, this will throw an exception with message up stack
self.project_name = validate_name(self.stage_config["project_name"])
else:
self.project_name = self.get_project_name()
# The name of the actual AWS Lambda function, ex, 'helloworld-dev'
# Assume that we already have have validated the name beforehand.
# Related: https://github.com/Miserlou/Zappa/pull/664
# https://github.com/Miserlou/Zappa/issues/678
# And various others from Slack.
self.lambda_name = slugify.slugify(self.project_name + "-" + self.api_stage)
# Load stage-specific settings
self.s3_bucket_name = self.stage_config.get(
"s3_bucket",
"zappa-"
+ "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(9)
),
)
self.vpc_config = self.stage_config.get("vpc_config", {})
self.memory_size = self.stage_config.get("memory_size", 512)
self.app_function = self.stage_config.get("app_function", None)
self.exception_handler = self.stage_config.get("exception_handler", None)
self.aws_region = self.stage_config.get("aws_region", None)
self.debug = self.stage_config.get("debug", True)
self.prebuild_script = self.stage_config.get("prebuild_script", None)
self.profile_name = self.stage_config.get("profile_name", None)
self.log_level = self.stage_config.get("log_level", "DEBUG")
self.domain = self.stage_config.get("domain", None)
self.base_path = self.stage_config.get("base_path", None)
self.timeout_seconds = self.stage_config.get("timeout_seconds", 30)
dead_letter_arn = self.stage_config.get("dead_letter_arn", "")
self.dead_letter_config = (
{"TargetArn": dead_letter_arn} if dead_letter_arn else {}
)
self.cognito = self.stage_config.get("cognito", None)
self.num_retained_versions = self.stage_config.get(
"num_retained_versions", None
)
# Check for valid values of num_retained_versions
if (
self.num_retained_versions is not None
and type(self.num_retained_versions) is not int
):
raise ClickException(
"Please supply either an integer or null for num_retained_versions in the zappa_settings.json. Found %s"
% type(self.num_retained_versions)
)
elif type(self.num_retained_versions) is int and self.num_retained_versions < 1:
raise ClickException(
"The value for num_retained_versions in the zappa_settings.json should be greater than 0."
)
# Provide legacy support for `use_apigateway`, now `apigateway_enabled`.
# https://github.com/Miserlou/Zappa/issues/490
# https://github.com/Miserlou/Zappa/issues/493
self.use_apigateway = self.stage_config.get("use_apigateway", True)
if self.use_apigateway:
self.use_apigateway = self.stage_config.get("apigateway_enabled", True)
self.apigateway_description = self.stage_config.get(
"apigateway_description", None
)
self.lambda_handler = self.stage_config.get(
"lambda_handler", "handler.lambda_handler"
)
# DEPRECATED. https://github.com/Miserlou/Zappa/issues/456
self.remote_env_bucket = self.stage_config.get("remote_env_bucket", None)
self.remote_env_file = self.stage_config.get("remote_env_file", None)
self.remote_env = self.stage_config.get("remote_env", None)
self.settings_file = self.stage_config.get("settings_file", None)
self.django_settings = self.stage_config.get("django_settings", None)
self.manage_roles = self.stage_config.get("manage_roles", True)
self.binary_support = self.stage_config.get("binary_support", True)
self.api_key_required = self.stage_config.get("api_key_required", False)
self.api_key = self.stage_config.get("api_key")
self.endpoint_configuration = self.stage_config.get(
"endpoint_configuration", None
)
self.iam_authorization = self.stage_config.get("iam_authorization", False)
self.cors = self.stage_config.get("cors", False)
self.lambda_description = self.stage_config.get(
"lambda_description", "Zappa Deployment"
)
self.lambda_concurrency = self.stage_config.get("lambda_concurrency", None)
self.environment_variables = self.stage_config.get("environment_variables", {})
self.aws_environment_variables = self.stage_config.get(
"aws_environment_variables", {}
)
self.check_environment(self.environment_variables)
self.authorizer = self.stage_config.get("authorizer", {})
self.runtime = self.stage_config.get(
"runtime", get_runtime_from_python_version()
)
self.aws_kms_key_arn = self.stage_config.get("aws_kms_key_arn", "")
self.context_header_mappings = self.stage_config.get(
"context_header_mappings", {}
)
self.xray_tracing = self.stage_config.get("xray_tracing", False)
self.desired_role_arn = self.stage_config.get("role_arn")
self.layers = self.stage_config.get("layers", None)
# Load ALB-related settings
self.use_alb = self.stage_config.get("alb_enabled", False)
self.alb_vpc_config = self.stage_config.get("alb_vpc_config", {})
# Additional tags
self.tags = self.stage_config.get("tags", {})
desired_role_name = self.lambda_name + "-ZappaLambdaExecutionRole"
self.zappa = Zappa(
boto_session=session,
profile_name=self.profile_name,
aws_region=self.aws_region,
load_credentials=self.load_credentials,
desired_role_name=desired_role_name,
desired_role_arn=self.desired_role_arn,
runtime=self.runtime,
tags=self.tags,
endpoint_urls=self.stage_config.get("aws_endpoint_urls", {}),
xray_tracing=self.xray_tracing,
)
for setting in CUSTOM_SETTINGS:
if setting in self.stage_config:
setting_val = self.stage_config[setting]
# Read the policy file contents.
if setting.endswith("policy"):
with open(setting_val, "r") as f:
setting_val = f.read()
setattr(self.zappa, setting, setting_val)
if self.app_function:
self.collision_warning(self.app_function)
if self.app_function[-3:] == ".py":
click.echo(
click.style("Warning!", fg="red", bold=True)
+ " Your app_function is pointing to a "
+ click.style("file and not a function", bold=True)
+ "! It should probably be something like 'my_file.app', not 'my_file.py'!"
)
return self.zappa
def get_json_or_yaml_settings(self, settings_name="zappa_settings"):
"""
Return zappa_settings path as JSON or YAML (or TOML), as appropriate.
"""
zs_json = settings_name + ".json"
zs_yml = settings_name + ".yml"
zs_yaml = settings_name + ".yaml"
zs_toml = settings_name + ".toml"
# Must have at least one
if (
not os.path.isfile(zs_json)
and not os.path.isfile(zs_yml)
and not os.path.isfile(zs_yaml)
and not os.path.isfile(zs_toml)
):
raise ClickException(
"Please configure a zappa_settings file or call `zappa init`."
)
# Prefer JSON
if os.path.isfile(zs_json):
settings_file = zs_json
elif os.path.isfile(zs_toml):
settings_file = zs_toml
elif os.path.isfile(zs_yml):
settings_file = zs_yml
else:
settings_file = zs_yaml
return settings_file
def load_settings_file(self, settings_file=None):
"""
Load our settings file.
"""
if not settings_file:
settings_file = self.get_json_or_yaml_settings()
if not os.path.isfile(settings_file):
raise ClickException(
"Please configure your zappa_settings file or call `zappa init`."
)
path, ext = os.path.splitext(settings_file)
if ext == ".yml" or ext == ".yaml":
with open(settings_file) as yaml_file:
try:
self.zappa_settings = yaml.safe_load(yaml_file)
except ValueError: # pragma: no cover
raise ValueError(
"Unable to load the Zappa settings YAML. It may be malformed."
)
elif ext == ".toml":
with open(settings_file) as toml_file:
try:
self.zappa_settings = toml.load(toml_file)
except ValueError: # pragma: no cover
raise ValueError(
"Unable to load the Zappa settings TOML. It may be malformed."
)
else:
with open(settings_file) as json_file:
try:
self.zappa_settings = json.load(json_file)
except ValueError: # pragma: no cover
raise ValueError(
"Unable to load the Zappa settings JSON. It may be malformed."
)
def create_package(self, output=None):
"""
Ensure that the package can be properly configured,
and then create it.
"""
# Create the Lambda zip package (includes project and virtualenvironment)
# Also define the path the handler file so it can be copied to the zip
# root for Lambda.
current_file = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
handler_file = (
os.sep.join(current_file.split(os.sep)[0:]) + os.sep + "handler.py"
)
# Create the zip file(s)
if self.stage_config.get("slim_handler", False):
# Create two zips. One with the application and the other with just the handler.
# https://github.com/Miserlou/Zappa/issues/510
self.zip_path = self.zappa.create_lambda_zip(
prefix=self.lambda_name,
use_precompiled_packages=self.stage_config.get(
"use_precompiled_packages", True
),
exclude=self.stage_config.get("exclude", []),
exclude_glob=self.stage_config.get("exclude_glob", []),
disable_progress=self.disable_progress,
archive_format="tarball",
)
# Make sure the normal venv is not included in the handler's zip
exclude = self.stage_config.get("exclude", [])
cur_venv = self.zappa.get_current_venv()
exclude.append(cur_venv.split("/")[-1])
self.handler_path = self.zappa.create_lambda_zip(
prefix="handler_{0!s}".format(self.lambda_name),
venv=self.zappa.create_handler_venv(),
handler_file=handler_file,
slim_handler=True,
exclude=exclude,
exclude_glob=self.stage_config.get("exclude_glob", []),
output=output,
disable_progress=self.disable_progress,
)
else:
# This could be python3.6 optimized.
exclude = self.stage_config.get(
"exclude", ["boto3", "dateutil", "botocore", "s3transfer", "concurrent"]
)
# Create a single zip that has the handler and application
self.zip_path = self.zappa.create_lambda_zip(
prefix=self.lambda_name,
handler_file=handler_file,
use_precompiled_packages=self.stage_config.get(
"use_precompiled_packages", True
),
exclude=exclude,
exclude_glob=self.stage_config.get("exclude_glob", []),
output=output,
disable_progress=self.disable_progress,
)
# Warn if this is too large for Lambda.
file_stats = os.stat(self.zip_path)
if file_stats.st_size > 52428800: # pragma: no cover
print(
"\n\nWarning: Application zip package is likely to be too large for AWS Lambda. "
'Try setting "slim_handler" to true in your Zappa settings file.\n\n'
)
# Throw custom settings into the zip that handles requests
if self.stage_config.get("slim_handler", False):
handler_zip = self.handler_path
else:
handler_zip = self.zip_path
with zipfile.ZipFile(handler_zip, "a") as lambda_zip:
settings_s = self.get_zappa_settings_string()
# Copy our Django app into root of our package.
# It doesn't work otherwise.
if self.django_settings:
base = __file__.rsplit(os.sep, 1)[0]
django_py = "".join(os.path.join(base, "ext", "django_zappa.py"))
lambda_zip.write(django_py, "django_zappa_app.py")
# Lambda requires a specific chmod
temp_settings = tempfile.NamedTemporaryFile(delete=False)
os.chmod(temp_settings.name, 0o644)
temp_settings.write(bytes(settings_s, "utf-8"))
temp_settings.close()
lambda_zip.write(temp_settings.name, "zappa_settings.py")
os.unlink(temp_settings.name)
def get_zappa_settings_string(self):
settings_s = "# Generated by Zappa\n"
if self.app_function:
if "." not in self.app_function: # pragma: no cover
raise ClickException(
"Your "
+ click.style("app_function", fg="red", bold=True)
+ " value is not a modular path."
+ " It needs to be in the format `"
+ click.style("your_module.your_app_object", bold=True)
+ "`."
)
app_module, app_function = self.app_function.rsplit(".", 1)
settings_s = (
settings_s
+ "APP_MODULE='{0!s}'\nAPP_FUNCTION='{1!s}'\n".format(
app_module, app_function
)
)
if self.exception_handler:
settings_s += "EXCEPTION_HANDLER='{0!s}'\n".format(self.exception_handler)
else:
settings_s += "EXCEPTION_HANDLER=None\n"
if self.debug:
settings_s = settings_s + "DEBUG=True\n"
else:
settings_s = settings_s + "DEBUG=False\n"
settings_s = settings_s + "LOG_LEVEL='{0!s}'\n".format((self.log_level))
if self.binary_support:
settings_s = settings_s + "BINARY_SUPPORT=True\n"
else:
settings_s = settings_s + "BINARY_SUPPORT=False\n"
head_map_dict = {}
head_map_dict.update(dict(self.context_header_mappings))
settings_s = settings_s + "CONTEXT_HEADER_MAPPINGS={0}\n".format(head_map_dict)
# If we're on a domain, we don't need to define the /<<env>> in
# the WSGI PATH
if self.domain:
settings_s = settings_s + "DOMAIN='{0!s}'\n".format((self.domain))
else:
settings_s = settings_s + "DOMAIN=None\n"
if self.base_path:
settings_s = settings_s + "BASE_PATH='{0!s}'\n".format((self.base_path))
else:
settings_s = settings_s + "BASE_PATH=None\n"
# Pass through remote config bucket and path
if self.remote_env:
settings_s = settings_s + "REMOTE_ENV='{0!s}'\n".format(self.remote_env)
# DEPRECATED. use remove_env instead
elif self.remote_env_bucket and self.remote_env_file:
settings_s = settings_s + "REMOTE_ENV='s3://{0!s}/{1!s}'\n".format(
self.remote_env_bucket, self.remote_env_file
)
# Local envs
env_dict = {}
if self.aws_region:
env_dict["AWS_REGION"] = self.aws_region
env_dict.update(dict(self.environment_variables))
# Environment variable keys must be ascii
# https://github.com/Miserlou/Zappa/issues/604
# https://github.com/Miserlou/Zappa/issues/998
try:
env_dict = dict(
(k.encode("ascii").decode("ascii"), v) for (k, v) in env_dict.items()
)
except Exception:
raise ValueError("Environment variable keys must be ascii.")
settings_s = settings_s + "ENVIRONMENT_VARIABLES={0}\n".format(env_dict)
# We can be environment-aware
settings_s = settings_s + "API_STAGE='{0!s}'\n".format((self.api_stage))
settings_s = settings_s + "PROJECT_NAME='{0!s}'\n".format((self.project_name))
if self.settings_file:
settings_s = settings_s + "SETTINGS_FILE='{0!s}'\n".format(
(self.settings_file)
)
else:
settings_s = settings_s + "SETTINGS_FILE=None\n"
if self.django_settings:
settings_s = settings_s + "DJANGO_SETTINGS='{0!s}'\n".format(
(self.django_settings)
)
else:
settings_s = settings_s + "DJANGO_SETTINGS=None\n"
# If slim handler, path to project zip
if self.stage_config.get("slim_handler", False):
settings_s += (
"ARCHIVE_PATH='s3://{0!s}/{1!s}_{2!s}_current_project.tar.gz'\n".format(
self.s3_bucket_name, self.api_stage, self.project_name
)
)
# since includes are for slim handler add the setting here by joining arbitrary list from zappa_settings file
# and tell the handler we are the slim_handler
# https://github.com/Miserlou/Zappa/issues/776
settings_s += "SLIM_HANDLER=True\n"
include = self.stage_config.get("include", [])
if len(include) >= 1:
settings_s += "INCLUDE=" + str(include) + "\n"
# AWS Events function mapping
event_mapping = {}
events = self.stage_config.get("events", [])
for event in events:
arn = event.get("event_source", {}).get("arn")
function = event.get("function")
if arn and function:
event_mapping[arn] = function
settings_s = settings_s + "AWS_EVENT_MAPPING={0!s}\n".format(event_mapping)
# Map Lext bot events
bot_events = self.stage_config.get("bot_events", [])
bot_events_mapping = {}
for bot_event in bot_events:
event_source = bot_event.get("event_source", {})
intent = event_source.get("intent")
invocation_source = event_source.get("invocation_source")
function = bot_event.get("function")
if intent and invocation_source and function:
bot_events_mapping[
str(intent) + ":" + str(invocation_source)
] = function
settings_s = settings_s + "AWS_BOT_EVENT_MAPPING={0!s}\n".format(
bot_events_mapping
)
# Map cognito triggers
cognito_trigger_mapping = {}
cognito_config = self.stage_config.get("cognito", {})
triggers = cognito_config.get("triggers", [])
for trigger in triggers:
source = trigger.get("source")
function = trigger.get("function")
if source and function:
cognito_trigger_mapping[source] = function
settings_s = settings_s + "COGNITO_TRIGGER_MAPPING={0!s}\n".format(
cognito_trigger_mapping
)
# Authorizer config
authorizer_function = self.authorizer.get("function", None)
if authorizer_function:
settings_s += "AUTHORIZER_FUNCTION='{0!s}'\n".format(authorizer_function)
# async response
async_response_table = self.stage_config.get("async_response_table", "")
settings_s += "ASYNC_RESPONSE_TABLE='{0!s}'\n".format(async_response_table)
return settings_s
def remove_local_zip(self):
"""
Remove our local zip file.
"""
if self.stage_config.get("delete_local_zip", True):
try:
if os.path.isfile(self.zip_path):
os.remove(self.zip_path)
if self.handler_path and os.path.isfile(self.handler_path):
os.remove(self.handler_path)
except Exception as e: # pragma: no cover
sys.exit(-1)
def remove_uploaded_zip(self):
"""
Remove the local and S3 zip file after uploading and updating.
"""
# Remove the uploaded zip from S3, because it is now registered..
if self.stage_config.get("delete_s3_zip", True):
self.zappa.remove_from_s3(self.zip_path, self.s3_bucket_name)
if self.stage_config.get("slim_handler", False):
# Need to keep the project zip as the slim handler uses it.
self.zappa.remove_from_s3(self.handler_path, self.s3_bucket_name)
def on_exit(self):
"""
Cleanup after the command finishes.
Always called: SystemExit, KeyboardInterrupt and any other Exception that occurs.
"""
if self.zip_path:
# Only try to remove uploaded zip if we're running a command that has loaded credentials
if self.load_credentials:
self.remove_uploaded_zip()
self.remove_local_zip()
def print_logs(
self, logs, colorize=True, http=False, non_http=False, force_colorize=None
):
"""
Parse, filter and print logs to the console.
"""
for log in logs:
timestamp = log["timestamp"]
message = log["message"]
if "START RequestId" in message:
continue
if "REPORT RequestId" in message:
continue
if "END RequestId" in message:
continue
if not colorize and not force_colorize:
if http:
if self.is_http_log_entry(message.strip()):
print("[" + str(timestamp) + "] " + message.strip())
elif non_http:
if not self.is_http_log_entry(message.strip()):
print("[" + str(timestamp) + "] " + message.strip())
else:
print("[" + str(timestamp) + "] " + message.strip())
else:
if http:
if self.is_http_log_entry(message.strip()):
click.echo(
click.style("[", fg="cyan")
+ click.style(str(timestamp), bold=True)
+ click.style("]", fg="cyan")
+ self.colorize_log_entry(message.strip()),
color=force_colorize,
)
elif non_http:
if not self.is_http_log_entry(message.strip()):
click.echo(
click.style("[", fg="cyan")
+ click.style(str(timestamp), bold=True)
+ click.style("]", fg="cyan")
+ self.colorize_log_entry(message.strip()),
color=force_colorize,
)
else:
click.echo(
click.style("[", fg="cyan")
+ click.style(str(timestamp), bold=True)
+ click.style("]", fg="cyan")
+ self.colorize_log_entry(message.strip()),
color=force_colorize,
)
def is_http_log_entry(self, string):
"""
Determines if a log entry is an HTTP-formatted log string or not.
"""
# Debug event filter
if "Zappa Event" in string:
return False
# IP address filter
for token in string.replace("\t", " ").split(" "):
try:
if token.count(".") == 3 and token.replace(".", "").isnumeric():
return True
except Exception: # pragma: no cover
pass
return False
def get_project_name(self):
return slugify.slugify(os.getcwd().split(os.sep)[-1])[:15]
def colorize_log_entry(self, string):
"""
Apply various heuristics to return a colorized version of a string.
If these fail, simply return the string in plaintext.
"""
final_string = string
try:
# First, do stuff in square brackets
inside_squares = re.findall(r"\[([^]]*)\]", string)
for token in inside_squares:
if token in ["CRITICAL", "ERROR", "WARNING", "DEBUG", "INFO", "NOTSET"]:
final_string = final_string.replace(
"[" + token + "]",
click.style("[", fg="cyan")
+ click.style(token, fg="cyan", bold=True)
+ click.style("]", fg="cyan"),
)
else:
final_string = final_string.replace(
"[" + token + "]",
click.style("[", fg="cyan")
+ click.style(token, bold=True)
+ click.style("]", fg="cyan"),
)
# Then do quoted strings
quotes = re.findall(r'"[^"]*"', string)
for token in quotes:
final_string = final_string.replace(
token, click.style(token, fg="yellow")
)
# And UUIDs
for token in final_string.replace("\t", " ").split(" "):
try:
if token.count("-") == 4 and token.replace("-", "").isalnum():
final_string = final_string.replace(
token, click.style(token, fg="magenta")
)
except Exception: # pragma: no cover
pass
# And IP addresses
try:
if token.count(".") == 3 and token.replace(".", "").isnumeric():
final_string = final_string.replace(
token, click.style(token, fg="red")
)
except Exception: # pragma: no cover
pass
# And status codes
try:
if token in ["200"]:
final_string = final_string.replace(
token, click.style(token, fg="green")
)
if token in ["400", "401", "403", "404", "405", "500"]:
final_string = final_string.replace(
token, click.style(token, fg="red")
)
except Exception: # pragma: no cover
pass
# And Zappa Events
try:
if "Zappa Event:" in final_string:
final_string = final_string.replace(
"Zappa Event:",
click.style("Zappa Event:", bold=True, fg="green"),
)
except Exception: # pragma: no cover
pass
# And dates
for token in final_string.split("\t"):
try:
is_date = parser.parse(token)
final_string = final_string.replace(
token, click.style(token, fg="green")
)
except Exception: # pragma: no cover
pass
final_string = final_string.replace("\t", " ").replace(" ", " ")
if final_string[0] != " ":
final_string = " " + final_string
return final_string
except Exception as e: # pragma: no cover
return string
def execute_prebuild_script(self):
"""
Parse and execute the prebuild_script from the zappa_settings.
"""
(pb_mod_path, pb_func) = self.prebuild_script.rsplit(".", 1)
try: # Prefer prebuild script in working directory
if (
pb_mod_path.count(".") >= 1
): # Prebuild script func is nested in a folder
(mod_folder_path, mod_name) = pb_mod_path.rsplit(".", 1)
mod_folder_path_fragments = mod_folder_path.split(".")
working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments)
else:
mod_name = pb_mod_path
working_dir = os.getcwd()
working_dir_importer = pkgutil.get_importer(working_dir)
module_ = working_dir_importer.find_module(mod_name).load_module(mod_name)
except (ImportError, AttributeError):
try: # Prebuild func might be in virtualenv
module_ = importlib.import_module(pb_mod_path)
except ImportError: # pragma: no cover
raise ClickException(
click.style("Failed ", fg="red")
+ "to "
+ click.style("import prebuild script ", bold=True)
+ 'module: "{pb_mod_path}"'.format(
pb_mod_path=click.style(pb_mod_path, bold=True)
)
)
if not hasattr(module_, pb_func): # pragma: no cover
raise ClickException(
click.style("Failed ", fg="red")
+ "to "
+ click.style("find prebuild script ", bold=True)
+ 'function: "{pb_func}" '.format(
pb_func=click.style(pb_func, bold=True)
)
+ 'in module "{pb_mod_path}"'.format(pb_mod_path=pb_mod_path)
)
prebuild_function = getattr(module_, pb_func)
prebuild_function() # Call the function
def collision_warning(self, item):
"""
Given a string, print a warning if this could
collide with a Zappa core package module.
Use for app functions and events.
"""
namespace_collisions = [
"zappa.",
"wsgi.",
"middleware.",
"handler.",
"util.",
"letsencrypt.",
"cli.",
]
for namespace_collision in namespace_collisions:
if item.startswith(namespace_collision):
click.echo(
click.style("Warning!", fg="red", bold=True)
+ " You may have a namespace collision between "
+ click.style(item, bold=True)
+ " and "
+ click.style(namespace_collision, bold=True)
+ "! You may want to rename that file."
)
def deploy_api_gateway(self, api_id):
cache_cluster_enabled = self.stage_config.get("cache_cluster_enabled", False)
cache_cluster_size = str(self.stage_config.get("cache_cluster_size", 0.5))
endpoint_url = self.zappa.deploy_api_gateway(
api_id=api_id,
stage_name=self.api_stage,
cache_cluster_enabled=cache_cluster_enabled,
cache_cluster_size=cache_cluster_size,
cloudwatch_log_level=self.stage_config.get("cloudwatch_log_level", "OFF"),
cloudwatch_data_trace=self.stage_config.get("cloudwatch_data_trace", False),
cloudwatch_metrics_enabled=self.stage_config.get(
"cloudwatch_metrics_enabled", False
),
cache_cluster_ttl=self.stage_config.get("cache_cluster_ttl", 300),
cache_cluster_encrypted=self.stage_config.get(
"cache_cluster_encrypted", False
),
)
return endpoint_url
def check_venv(self):
"""Ensure we're inside a virtualenv."""
if self.vargs and self.vargs.get("no_venv"):
return
if self.zappa:
venv = self.zappa.get_current_venv()
else:
# Just for `init`, when we don't have settings yet.
venv = Zappa.get_current_venv()
if not venv:
raise ClickException(
click.style("Zappa", bold=True)
+ " requires an "
+ click.style("active virtual environment", bold=True, fg="red")
+ "!\n"
+ "Learn more about virtual environments here: "
+ click.style(
"http://docs.python-guide.org/en/latest/dev/virtualenvs/",
bold=False,
fg="cyan",
)
)
def silence(self):
"""
Route all stdout to null.
"""
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
def touch_endpoint(self, endpoint_url):
"""
Test the deployed endpoint with a GET request.
"""
# Private APIGW endpoints most likely can't be reached by a deployer
# unless they're connected to the VPC by VPN. Instead of trying
# connect to the service, print a warning and let the user know
# to check it manually.
# See: https://github.com/Miserlou/Zappa/pull/1719#issuecomment-471341565
if "PRIVATE" in self.stage_config.get("endpoint_configuration", []):
print(
click.style("Warning!", fg="yellow", bold=True)
+ " Since you're deploying a private API Gateway endpoint,"
" Zappa cannot determine if your function is returning "
" a correct status code. You should check your API's response"
" manually before considering this deployment complete."
)
return
touch_path = self.stage_config.get("touch_path", "/")
req = requests.get(endpoint_url + touch_path)
# Sometimes on really large packages, it can take 60-90 secs to be
# ready and requests will return 504 status_code until ready.
# So, if we get a 504 status code, rerun the request up to 4 times or
# until we don't get a 504 error
if req.status_code == 504:
i = 0
status_code = 504
while status_code == 504 and i <= 4:
req = requests.get(endpoint_url + touch_path)
status_code = req.status_code
i += 1
if req.status_code >= 500:
raise ClickException(
click.style("Warning!", fg="red", bold=True)
+ " Status check on the deployed lambda failed."
+ " A GET request to '"
+ touch_path
+ "' yielded a "
+ click.style(str(req.status_code), fg="red", bold=True)
+ " response code."
)
####################################################################
# Main
####################################################################
def shamelessly_promote():
"""
Shamelessly promote our little community.
"""
click.echo(
"Need "
+ click.style("help", fg="green", bold=True)
+ "? Found a "
+ click.style("bug", fg="green", bold=True)
+ "? Let us "
+ click.style("know", fg="green", bold=True)
+ "! :D"
)
click.echo(
"File bug reports on "
+ click.style("GitHub", bold=True)
+ " here: "
+ click.style("https://github.com/Zappa/Zappa", fg="cyan", bold=True)
)
click.echo(
"And join our "
+ click.style("Slack", bold=True)
+ " channel here: "
+ click.style("https://zappateam.slack.com", fg="cyan", bold=True)
)
click.echo("Love!,")
click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!")
def disable_click_colors():
"""
Set a Click context where colors are disabled. Creates a throwaway BaseCommand
to play nicely with the Context constructor.
The intended side-effect here is that click.echo() checks this context and will
suppress colors.
https://github.com/pallets/click/blob/e1aa43a3/click/globals.py#L39
"""
ctx = Context(BaseCommand("AllYourBaseAreBelongToUs"))
ctx.color = False
push_context(ctx)
def handle(): # pragma: no cover
"""
Main program execution handler.
"""
try:
cli = ZappaCLI()
sys.exit(cli.handle())
except SystemExit as e: # pragma: no cover
cli.on_exit()
sys.exit(e.code)
except KeyboardInterrupt: # pragma: no cover
cli.on_exit()
sys.exit(130)
except Exception as e:
cli.on_exit()
click.echo(
"Oh no! An " + click.style("error occurred", fg="red", bold=True) + "! :("
)
click.echo("\n==============\n")
import traceback
traceback.print_exc()
click.echo("\n==============\n")
shamelessly_promote()
sys.exit(-1)
if __name__ == "__main__": # pragma: no cover
handle() | zappa2 | /zappa2-0.53.4-py3-none-any.whl/zappa/cli.py | cli.py |
# ZappaDock
[](https://pepy.tech/project/zappadock)
### Install
It's dead simple :
```
$ pip install zappadock
```
## Usage
1. Check that the Docker daemon is running by typing `docker info` command from a terminal.
2. Set your AWS credentials in environmental variables or in the `~/.aws` folder. See the [Amazon Docs](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html#environment-variables) for more information.
3. Run `zappadock` in the directory you wish to run your Zappa commands.
Your directory will be loaded in a docker container, and a virtual environment will be created and activated.
4. To run the built-in servers:
* Django:
`python manage.py runserver 0.0.0.0:8000`
* Flask:
`flask run --host=0.0.0.0 --port=8000`
If you have any problems, open a issue and we'll figure it out.
## About
This package makes dealing with [Zappa](https://github.com/zappa/Zappa) a walk in the park.
#### How ?
Zappa runs Flask/Django web apps on AWS Lambda.
We LOVE Zappa. However it's not the MOST user-friendly application ever.
#### Why ?
You see, Zappa builds all your requirements before uploading your app to AWS Lambda. However, pip downloads the packages that are compatible with the computer it is running on, so when Zappa uploads it to AWS Lambda, many packages don't work (notably `psycopg2` among others).
#### What's the solution ?
The solution recommend by the [The Django Guide for Zappa](https://romandc.com/zappa-django-guide/) is to run your Zappa commands in a docker container similar to the one that AWS Lambda uses.
This will ensure that all the dependencies will be AWS Lambda compatible.
This ZappaDock streamlines the workflow.
#### What Does ZappaDock Do ?
ZappaDock does 3 things.
1. Run a docker container with your code mounted.
2. Load your AWS Credentials from the `~/.aws` folder and environmental variables into the container.
3. Create and activate a virtual environment inside the container.
So now you can test and deploy your code confident that it will work once deployed.
## Contributing
I mostly made this for myself. If you want to help make this a masterpiece, be a sport and contribute.
Thanks!
| zappadock | /zappadock-0.1.10.tar.gz/zappadock-0.1.10/README.md | README.md |
<div align="center">
<img src="https://user-images.githubusercontent.com/98690785/222817990-3fc05ad4-989c-437e-877d-cffdac794fef.png">
[𝗙𝗘𝗔𝗧𝗨𝗥𝗘𝗦](#features) • [𝗜𝗡𝗦𝗧𝗔𝗟𝗟](#install)
# 🇿🇦🇵🇵🇪🇷
*Is a python library for text styling*
</div>
<div align="left">
## Features
- • ⌛ Choose the waiting time you want to add to your text
- • ✏️ Animated text with effects
- • ⚡ Effects with gradients (in development)
## Install
<a href="https://linux.org/"> On Linux </a>
```bash
pip install zapper
```
<a href="https://www.microsoft.com/ch-ch/windows"> On Windows </a>
```bash
pip3 install zapper
```
</div> | zapper-cli | /zapper-cli-1.0.2.tar.gz/zapper-cli-1.0.2/README.md | README.md |
# zappix - a package for Zabbix sender and get.
zappix aims to be a drop-in replacement for zabbix_get as well as zabbix_sender.
Its components are available not only as a module but from the command line as well.
zappix requires Python3 and is guaranteed to work with 3.6.
## Instalation
zappix is not dependant on any third party modules.
The easiest way to install it with pip:
```sh
pip install zappix
```
# Usage
As mentioned earlier - zappix can be used both as a module inside of an application, as well as from the Command Line Interface.
## As a module
At the moment zappix has two classes: Sender and Get. Both of which can be imported in the following manner:
```python
>>> from zappix.sender import Sender
>>> from zappix.get import Get
```
Then you can send or get some data. Asuming both Zabbix Agent and Server run on localhost and default ports:
```python
>>> getter = Get("127.0.0.1")
>>> getter.get_value("agent.ping")
1
>>> sender = Sender("127.0.0.1")
>>> sender.send_value('testhost', 'test', 1)
{"processed": 1, "failed": 0, "total": 1, "seconds spent": 0.005}
```
## CLI
To use this utility from the command line, you need to invoke python with the -m flag, followed by the module name and required parameters:
```sh
python -m zappix.sender -z 127.0.0.1 -s testhost -k testkey -o 1
```
## Testing
If you wish to contribute it's good to know how to conduct tests.
You can go with mocked tests only or add integration tests as well.
To enable integration tests, set the envvar `ZAPPIX_TEST_INTEGRATION=yes`
After if you wish to proceed with services in docker containers run the following:
```shell
docker run -d -e "ZBX_CACHEUPDATEFREQUENCY=1" -p 10051:10051 -p 80:80 zabbix/zabbix-appliance
docker run -d -e "ZBX_SERVER_HOST=0.0.0.0/0" -p 10050:10050 zabbix/zabbox-agent
```
Note that the zabbix-appliance might take a while to start. Once both containers are up and running, just run `tox`.
If you have your custom Zabbix services, it is possible to configure tests via envvars to connect to those:
| envvar | usage |
| --------------- | ------------------------------------------------------------------------------------------------------------------ |
| ZAPPIX_AGENT | IP address or DNS name of running Zabbix agent |
| ZAPPIX_SERVER | IP address or DNS name of running Zabbix Server |
| ZAPPIX_API | URL of Zabbix fronted. Schema is required |
| ZAPPIX_API_USER | User for creating entities via API. Should have RW permissions to a Host group with ID=2 - Usually 'Linux Servers' |
| ZAPPIX_API_PASS | Password for that user |
| zappix | /zappix-1.0.1.tar.gz/zappix-1.0.1/README.md | README.md |
# Zappy - distributed processing with NumPy and Zarr
Zappy is for distributed processing of chunked NumPy arrays on engines like [Pywren], Apache Spark, and Apache Beam.
[](https://travis-ci.org/lasersonlab/zappy)
[](https://opensource.org/licenses/Apache-2.0)
[](https://coveralls.io/github/lasersonlab/zappy)
[](https://pypi.python.org/pypi/zappy/)
The `zappy.base` module defines a `ZappyArray` class that exposes the same interface as `numpy.ndarray`, and which
is backed by distributed storage and processing. The array is broken into chunks, and is typically loaded from [Zarr],
and each chunk is processed independently.
There are a few engines provided:
* **direct** - for eager in-memory processing
* **spark** - for processing using Spark
* **beam** - for processing using Beam or Google Dataflow
* **executor** - for processing using Python's [concurrent.futures.Executor], of which [Pywren] is a notable implementation
Beam currently only runs on Python 2.
Full coverage of the `numpy.ndarray` interface is _not_ provided. Only enough has been implemented to support running
parts of [Scanpy], as demonstrated in the [Single Cell Experiments] repo.
## Installation
```
pip install zappy
```
Alternatively, zappy can be installed using [Conda](https://conda.io/docs/) (most easily obtained via the [Miniconda Python distribution](https://conda.io/miniconda.html)):
```
conda install -c conda-forge zappy
```
## Demo
Take a look at the rendered [demo Jupyter notebook](demo.ipynb), or try it out yourself as follows.
Create and activate a Python 3 virtualenv, and install the requirements:
```
python3 -m venv venv
. venv/bin/activate
pip install -r requirements.txt
pip install -e .
pip install s3fs jupyter
```
Then run the notebook with:
```
jupyter notebook demo.ipynb
```
## Testing
There is a test suite for all the engines, covering both Python 2 and 3.
Run everything in one go with tox:
```
pip install tox
tox
```
Formatting:
```
pip install black
black zappy tests/* *.py
```
Coverage:
```
pip install pytest-cov
pytest --cov-report html --cov=zappy
open htmlcov/index.html
```
## Publishing
```
pip install twine
python setup.py sdist
twine upload -r pypi dist/zappy-0.1.0.tar.gz
```
If successful, the package will be available on [PyPI].
[Scanpy]: https://scanpy.readthedocs.io/
[Single Cell Experiments]: https://github.com/lasersonlab/single-cell-experiments
[concurrent.futures.Executor]: https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.Executor
[PyPI]: https://pypi.org/project/zappy/
[Pywren]: http://pywren.io/
[Zarr]: https://zarr.readthedocs.io/
| zappy | /zappy-0.2.0.tar.gz/zappy-0.2.0/README.md | README.md |
ZAPR AWS Athena Client
=======================================
ZAPR AWS athena client is a python library to run the presto query on the AWS Athena.
At Zapr we have the largest repository of offline media consumption and we try to answer some of the hardest questions of brands, broadcasters and marketers, on top of this data. To make all this happen we have churn TBs of data in a somewhat interactive manner. AWS Athena comes to rescue to help the team achieve this. We are using this client as a middleware to submit the queries to Athena, as Athena has few shortcomings that we have tried to solve through this client.
Athena lacks in :
1. Submitting multiple queries at a time.
2. Insert overwrite is not supported in Athena.
3. Dropping of table doesn't delete the data, only schema is dropped.
Another benefit that we achieve using this client is that we can integrate Athena easily to all our existing data pipelines built on oozie, airflow.
# Supported Features
* submit the multiple queries from single file.
* insert overwrite.
* drop table (drop the table and delete the data as well).
* submitting the query by using aws athena workgroup. so we can track the cost of the query.
# Quick Start
## Prerequisite
* boto3
* configparser
## Usage
### Syntax
```
python athena_client.py config_file_location workgroup_name query_file_location input_macro1 input_macro2 ...
```
### Install dependencies
```
pip install -r requirements.txt
```
### Example - 1
```
python athena_client.py config.ini workgroup_testing_team sample-query-file.sql start_date=2020-09-25 end_date=2020-09-25
```
### Example - 2
```
python athena_client.py s3://sampe-bucket/sample-prefix/project-1/config.ini workgroup_testing_team s3://sampe-bucket/sample-prefix/project-1/sample-query-file.sql start_date=2020-09-25 end_date=2020-09-25
```
### Via PIP
```
pip install zapr-athena-client
zapr-athena-client config.ini workgroup_testing_team sample-query-file.sql start_date=2020-09-25 end_date=2020-09-25
```
### Sample Query
```
create table sample_db.${table_prefix}_username
WITH (external_location = 's3://sample_db/${table_prefix}_username/',format = 'ORC') as
select username
from raw_db.users
where date between '${start_date}' and '${end_date}';
```
### Disable Insert Overwrite and drop data
This athena client supports insert overwrite table and delete data if you are executing drop table query by default. We can add the following configurations to disable these features.
```
ENABLE_INSERT_OVERWRITE = False
ENABLE_EXTERNAL_TABLE_DROP = False
```
Contact
-------
For any features or bugs, please raise it in issues section
If anything else, get in touch with us at [email protected] | zapr-athena-client | /zapr-athena-client-0.1.tar.gz/zapr-athena-client-0.1/README.md | README.md |
import logging
import os
import sys
from zapr.utils.Utils import Utils
from zapr.utils.reader import ConfigReader, FileReader
from zapr.aws.zaprs3client import zaprS3Client
from zapr.aws.Athena import Athena
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
QUERY_FILE = "query.sql"
CONFIG_FILE = "config.ini"
def main():
total_args = len(sys.argv)
if total_args < 3:
logger.info("Client expects at least 3 arguments -> configfile_location workgroup sql_file")
sys.exit(os.EX_IOERR)
else:
config_location = sys.argv[1]
work_group = sys.argv[2]
query_location = sys.argv[3]
logger.info("Given cofig location {0}".format(config_location))
logger.info("Given QUERY location {0}".format(query_location))
"""Config Reader : download the config file from s3 or read from the local location
File Reader : Read from the file for query
"""
zapr_s3_client = zaprS3Client(logger)
utils = Utils(logger)
file_reader = FileReader(logger, zapr_s3_client.resource)
config_location = file_reader.get_file("CONFIG", config_location, CONFIG_FILE)
config_parser = ConfigReader(logger, config_location)
region, athena_output_location, staging_db, enable_insert_overwrite, enable_external_table_drop = config_parser.read_config()
'''Creating an instance for athena'''
athena = Athena(logger,
region,
work_group,
athena_output_location,
staging_db)
query_location = file_reader.get_file("QUERY", query_location, QUERY_FILE)
athena_output_location = athena_output_location + work_group + "/"
logger.debug("STAGING location {0}".format(athena_output_location))
logger.info("Reading the query...........")
query_string = utils.get_query_string(query_location)
logger.info("Replacing the macros with values....")
query_string = utils.replacing_macro(query_string, total_args)
utils.validate_all_macros(query_string)
logger.info("Final query after replacing all the given macro ...\n{0}".format(query_string))
queries = utils.split_queries(query_string)
for query in queries:
logger.info("Submitting the query ... {0}".format(query))
if utils.is_drop_table(query):
athena.drop_table_query(query, enable_external_table_drop)
elif utils.is_create_table(query):
athena.create_table_query(query)
elif utils.is_insert_into_table(query):
athena.insert_into_table_query(query, enable_insert_overwrite)
else:
query_execution_id = athena.submit_athena_query(query_string=query)
athena.sync_get_status(query_execution_id)
logger.info("query success....")
if __name__ == "__main__":
main() | zapr-athena-client | /zapr-athena-client-0.1.tar.gz/zapr-athena-client-0.1/athena_client.py | athena_client.py |
import sys
import os
import re
RUNNING = "RUNNING"
SUCCEEDED = "SUCCEEDED"
QUEUED = "QUEUED"
FAILED = "FAILED"
CANCELLED = "CANCELLED"
S3 = "s3://"
S3A = "s3a://"
class Utils:
def __init__(self, logger):
self.logger = logger
def get_query_string(selt, query_location):
with open(query_location, "r") as query_file:
data = query_file.read()
return data
def replacing_macro(self, query_string, total_args):
for x in range(4, total_args):
macro_input = sys.argv[x]
macro, macro_value = macro_input.split('=',1)
macro = "${" + macro + "}"
self.logger.info("Replacing {0} with {1} ".format(macro, macro_value))
query_string = query_string.replace(macro, macro_value)
return query_string
def validate_all_macros(self, query_string):
matched_string = re.search(".*\${.*}", query_string)
if matched_string is not None:
self.logger.error("Unable to replace the some of the macros value in query {}".format(query_string))
sys.exit(os.EX_IOERR)
def split_queries(self, query_string):
queries = query_string.rstrip().split(";")
queries = filter(None, queries)
return queries
def is_drop_table(self, query):
result = re.search("^([\s]*)DROP([\s]+)TABLE([\s]+)([.,^.]*)",query,flags=re.IGNORECASE)
return result
def is_create_table(self, query):
result = re.search("^([\s]*)CREATE([\s]+)TABLE([\s]+)([.,^.]*)",query,flags=re.IGNORECASE)
return result
def is_insert_into_table(self, query):
result = re.search("^([\s]*)INSERT([\s]+)INTO([\s]+)([.,^.]*)",query,flags=re.IGNORECASE)
return result
def get_table_name_drop_table(self, drop_table_query):
self.logger.info("drop table query ....." + drop_table_query)
result = re.sub("^DROP","", drop_table_query.lstrip(), flags=re.IGNORECASE)
result = re.sub("TABLE","", result, count=1, flags=re.IGNORECASE)
if result is not None and len(result.strip()) != 0:
table_name = result.split()[0]
return table_name
return None
def get_database_table(self, table_name):
try:
db, table = table_name.split('.')
return db, table
except ValueError:
self.logger.error("Unable to read table name and database from the given string {0}".format(table_name))
sys.exit(os.EX_IOERR)
def split_s3_path(self, s3_path):
path_parts = s3_path.replace(S3, "").replace(S3A, "").split("/")
s3_bucket = path_parts.pop(0)
prefix = "/".join(path_parts)
return s3_bucket, prefix
def get_table_name_from_insert_query(self, insert_into_query):
self.logger.info("insert into query....." + insert_into_query)
result = re.sub("^INSERT","", insert_into_query.lstrip(), flags=re.IGNORECASE)
result = re.sub("INTO","", result, count=1, flags=re.IGNORECASE)
if result is not None and len(result.strip()) != 0:
table_name = result.split()[0]
return table_name
return None
def set_staging_table_property(self,
staging_table_name,
athena_result_location,
table_storage_descriptor,
table_partition_keys):
staging_s3_location = athena_result_location + staging_table_name
table_storage_descriptor['Location'] = staging_s3_location
staging_table_properties = {'Name': staging_table_name,
'StorageDescriptor': table_storage_descriptor,
'TableType': 'EXTERNAL_TABLE',
'PartitionKeys': table_partition_keys}
return staging_table_properties, staging_s3_location | zapr-athena-client | /zapr-athena-client-0.1.tar.gz/zapr-athena-client-0.1/zapr/utils/Utils.py | Utils.py |
from configparser import ConfigParser
from s3transfer import RetriesExceededError
from s3transfer.exceptions import TransferNotDoneError
import os
import sys
S3 = "s3://"
S3A = "s3a://"
class ConfigReader:
def __init__(self, logger, config_location):
self.config_location = config_location
self.logger = logger
def read_config(self):
config = ConfigParser()
config.read(self.config_location)
enable_insert_overwrite = 'True'
enable_external_table_drop = 'True'
if 'aws' in config and 'region' in config['aws']:
aws_region = config['aws']['region']
else:
self.logger.error("Not able to read the region from the config ")
sys.exit(os.EX_CONFIG)
if 'athena' in config:
if 'ATHENA_OUTPUT_LOCATION' in config['athena']:
athena_output_location = config['athena']['ATHENA_OUTPUT_LOCATION']
else:
self.logger.error("Not able to read the ATHENA_OUTPUT_LOCATION from the config ")
sys.exit(os.EX_CONFIG)
if 'STAGING_DB' in config['athena']:
staging_db = config['athena']['STAGING_DB']
else:
self.logger.error("Not able to read the STAGING_DB from the config ")
sys.exit(os.EX_CONFIG)
if 'ENABLE_INSERT_OVERWRITE' in config['athena']:
enable_insert_overwrite = config['athena']['ENABLE_INSERT_OVERWRITE']
if 'ENABLE_EXTERNAL_TABLE_DROP' in config['athena']:
enable_external_table_drop = config['athena']['ENABLE_INSERT_OVERWRITE']
else:
self.logger.error("Not able to read the athena config")
sys.exit(os.EX_CONFIG)
return aws_region, athena_output_location, staging_db, enable_insert_overwrite, enable_external_table_drop
class FileReader:
def __init__(self, logger, s3_resource):
self.logger = logger
self.s3_resource = s3_resource
def split_s3_path(self, s3_location):
path_parts = s3_location.replace(S3, "").replace(S3A, "").split("/")
s3_bucket = path_parts.pop(0)
prefix = "/".join(path_parts)
return s3_bucket, prefix
def download_input_from_s3(self, s3_bucket, prefix, destination_location):
try:
self.s3_resource.meta.client.download_file(s3_bucket, prefix, destination_location)
except RetriesExceededError as e:
self.logger.fatal("Unable to download the file {0}".format(e))
self.logger.fatal("Unable to download the file from s3 to local : {0}/{1}".format(s3_bucket, prefix))
sys.exit(os.EX_DATAERR)
except TransferNotDoneError as e:
self.logger.fatal("Unable to download the file {0}".format(e))
sys.exit(os.EX_OSERR)
return destination_location
def get_file(self,file_type, source_location, destination_location):
if source_location.startswith(S3) or source_location.startswith(S3A):
self.logger.info("Downloading the {0} from {1} to {2}".format(file_type,
source_location,
destination_location))
s3_bucket, prefix = self.split_s3_path(source_location)
return self.download_input_from_s3(s3_bucket, prefix, destination_location)
else:
return source_location | zapr-athena-client | /zapr-athena-client-0.1.tar.gz/zapr-athena-client-0.1/zapr/utils/reader.py | reader.py |
import boto3
import sys
import os
from botocore.exceptions import ParamValidationError, ClientError
class zaprS3Client:
client = None
resource = None
def __init__(self, logger):
self.logger = logger
self.client = boto3.client('s3')
self.resource = boto3.resource('s3')
def copy_object(self, source_bucket, source_file_prefix, destination_bucket, destination_file_prefix):
copy_source = {
'Bucket': source_bucket,
'Key': source_file_prefix
}
try:
self.resource.meta.client.copy(copy_source, destination_bucket, destination_file_prefix)
except self.resource.Client.exceptions.ObjectNotInActiveTierError as e:
self.logger.fatal("Error occurred while copying object into s3 %e " % e)
sys.exit(os.EX_OSERR)
except ParamValidationError as e:
self.logger.fatal("Parameter validation error in copying object into s3 : %s" % e)
sys.exit(os.EX_OSERR)
except ClientError as e:
self.logger.fatal("Unexpected error in copying object into s3: %s" % e)
sys.exit(os.EX_OSERR)
def list_objects(self, bucket, prefix):
try:
response = self.client.list_objects(Bucket=bucket, Prefix=prefix)
return response
except ParamValidationError as e:
self.logger.fatal("Parameter validation error in getting objects from s3 : %s" % e)
sys.exit(os.EX_OSERR)
except ClientError as e:
self.logger.fatal("Unexpected error in getting objects from s3 : %s" % e)
sys.exit(os.EX_OSERR)
def delete_objects(self, bucket, prefix):
try:
if not prefix.endswith('/'):
prefix = prefix + '/'
response = self.client.list_objects(
Bucket=bucket,
Prefix=prefix)
except ParamValidationError as e:
self.logger.fatal("Parameter validation error in getting objects from s3 : %s" % e)
sys.exit(os.EX_OSERR)
except ClientError as e:
self.logger.fatal("Unexpected error in getting objects from s3 : %s" % e)
sys.exit(os.EX_OSERR)
if 'Contents' in response:
for object in response['Contents']:
print('Deleting', object['Key'])
try:
self.client.delete_object(Bucket=bucket, Key=object['Key'])
except ParamValidationError as e:
self.logger.fatal("Parameter validation error in deleting object from s3 : %s" % e)
sys.exit(os.EX_OSERR)
except ClientError as e:
self.logger.fatal("Unexpected error in deleting object from s3 : %s" % e)
sys.exit(os.EX_OSERR)
else:
self.logger.warn("Not able to delete data in s3 since there is no content ")
def move_objects(self,
source_bucket,
source_prefix,
destination_bucket,
destination_prefix):
response = self.list_objects(bucket=source_bucket,
prefix=source_prefix)
if 'Contents' in response:
list_objects = response['Contents']
length_objects = len(list_objects)
if length_objects > 0:
self.logger.info("Deleting data from Destination location before copying....{0}".format(
destination_bucket + "/" + destination_prefix))
self.delete_objects(destination_bucket, destination_prefix)
"""now moving the data from staging location"""
self.logger.info("Copying objects from Source to destination location")
for j in range(length_objects):
location = list_objects[j]['Key']
file_name = location.replace(source_prefix, "")
self.logger.debug("\nPrefix of the file {0}".format(location))
self.logger.debug("File name {0}".format(file_name))
self.logger.info("Copy....{0}".format(file_name))
staging_table_file = source_prefix + file_name
destination_table_file = destination_prefix + file_name
self.copy_object(source_bucket=source_bucket,
source_file_prefix=staging_table_file,
destination_bucket=destination_bucket,
destination_file_prefix=destination_table_file)
self.logger.info("Deleting data from source location .....{0}".format(
source_prefix + "/" + source_prefix))
self.delete_objects(source_bucket, source_prefix)
else:
self.logger.info("There are no files in the staging table location to copy..Reason : Zero Objects")
else:
self.logger.info("There are no contents in the staging table location to copy..Reason : no contents") | zapr-athena-client | /zapr-athena-client-0.1.tar.gz/zapr-athena-client-0.1/zapr/aws/zaprs3client.py | zaprs3client.py |
import boto3
from ..utils.Utils import Utils
from .Glue import Glue
from .zaprs3client import zaprS3Client
import sys
import os
from botocore.exceptions import ParamValidationError, ClientError
import time
import re
class Athena:
RUNNING = "RUNNING"
SUCCEEDED = "SUCCEEDED"
QUEUED = "QUEUED"
FAILED = "FAILED"
CANCELLED = "CANCELLED"
def __init__(self,
logger,
region,
work_group,
athena_result_location,
staging_database):
self.logger = logger
self.client = boto3.client('athena', region_name=region)
self.work_group = work_group
self.athena_result_location = athena_result_location
self.staging_database = staging_database
self.utils = Utils(logger)
self.glue = Glue(logger, region)
self.zapr_s3_client = zaprS3Client(logger)
def submit_athena_query(self, query_string):
try:
athena_response = self.client.start_query_execution(
QueryString=query_string,
ResultConfiguration={
'OutputLocation': self.athena_result_location
},
WorkGroup=self.work_group)
except ParamValidationError as e:
self.logger.fatal("Parameter validation error: %s" % e)
sys.exit(os.EX_OSERR)
except ClientError as e:
self.logger.fatal("Unexpected error: %s" % e)
sys.exit(os.EX_OSERR)
self.logger.info("Response from Athena submission {0}".format(athena_response))
query_id = athena_response['QueryExecutionId']
self.logger.info("Query id {0}".format(query_id))
return query_id
def sync_get_status(self, query_id):
while True:
try:
response = self.client.get_query_execution(
QueryExecutionId=query_id)
except ParamValidationError as e:
self.logger.fatal("Parameter validation error: %s" % e)
sys.exit(os.EX_OSERR)
status = response['QueryExecution']['Status']['State']
self.logger.info("Current Status for {0} (state: {1})".format(query_id, status))
if status == self.SUCCEEDED:
manifest = self.athena_result_location + query_id + "-manifest.csv"
result_metadata = self.athena_result_location + query_id + "metadata"
self.logger.debug(
"You can download the meta data of the results from {0}, {1}".format(manifest, result_metadata))
break
elif status == self.FAILED or status == self.CANCELLED:
self.logger.error("Query {0}....".format(status))
self.logger.error("REASON : {0}".format(response['QueryExecution']['Status']['StateChangeReason']))
sys.exit(os.EX_OSERR)
else:
time.sleep(5)
continue
def drop_table_query(self, query, enable_external_table_drop):
if enable_external_table_drop == 'False':
query_execution_id = self.submit_athena_query(query_string=query)
self.sync_get_status(query_execution_id)
else:
table_name = self.utils.get_table_name_drop_table(query)
if table_name is None:
self.logger.error("Error in drop table query {0}".format(query))
sys.exit(os.EX_IOERR)
database, table = self.utils.get_database_table(table_name)
table_data_location = self.glue.get_table_data_location(database, table)
self.logger.info("Deleting table {0}".format(table_name))
query_execution_id = self.submit_athena_query(query_string=query)
self.sync_get_status(query_execution_id)
if table_data_location is not None:
self.logger.info("Deleting the data from s3....{0}".format(table_data_location))
bucket, prefix = self.utils.split_s3_path(table_data_location)
self.zapr_s3_client.delete_objects(bucket, prefix)
def create_table_query(self, query):
query_execution_id = self.submit_athena_query(query_string=query)
self.sync_get_status(query_execution_id)
def insert_into_table_query(self, query, enable_insert_overwrite):
if enable_insert_overwrite == 'False':
query_execution_id = self.submit_athena_query(query_string=query)
self.sync_get_status(query_execution_id)
else:
"""
insert into table in athena is appending the data in the existing table. But we need to replace the old data
with new data. We are solving this via creating a staging table and insert into it and replace the data in
the destination location
"""
table_name = self.utils.get_table_name_from_insert_query(insert_into_query=query)
if table_name is None:
self.logger.info("Not able to figure out the table from INSERT INTO query")
sys.exit(os.EX_OSERR)
data_base, table_name = self.utils.get_database_table(table_name=table_name)
table_property = self.glue.get_table_property(database=data_base,
table=table_name)
if table_property is None:
self.logger.error("Table is not present in the aws glue")
sys.exit(os.EX_OSERR)
self.logger.info("Table Property : {0}".format(table_property))
""" create a staging table in the staging database with same table property as the given table.
The changes in the staging table are storage location of the staging table and table name
ex : location will be in athena result location and table name will be staging_given_table_current_timestamp
"""
table_storage_descriptor = table_property['Table']['StorageDescriptor']
storage_location = table_storage_descriptor['Location']
if storage_location.endswith('/'):
storage_location = storage_location[:-1]
partition_keys = table_property['Table']['PartitionKeys']
millis = int(round(time.time() * 1000))
staging_table_name = 'STAGING_' + table_name + '_' + str(millis)
self.logger.info("STAGING Table name : {0}".format(staging_table_name))
staging_table_property, staging_table_storage_location = self.utils.set_staging_table_property(
staging_table_name=staging_table_name,
athena_result_location=self.athena_result_location,
table_storage_descriptor=table_storage_descriptor,
table_partition_keys=partition_keys)
self.logger.info("Creating staging table with property : {0} and name : {1} ".format(staging_table_property,
staging_table_name))
# todo get repsonse code
self.glue.create_table_in_glue(self.staging_database, staging_table_property)
"""
Inserting data into the staging table
"""
self.logger.info("Inserting into staging table.....")
query = re.sub(data_base + "." + table_name, self.staging_database + "." + staging_table_name, query,
count=1)
self.logger.info("Insert query for staging table {0} : ".format(query))
query_execution_id = self.submit_athena_query(query_string=query)
self.sync_get_status(query_execution_id)
if partition_keys is None or len(partition_keys)==0:
"""If the given table does not have partition
then we do not need to get the resultant partitions from staging table"""
"""Move the data from staging table storage location to final table storage location"""
staging_table_storage_bucket, staging_table_storage_s3_prefix = self.utils.split_s3_path(
staging_table_storage_location)
final_table_storage_bucket, final_table_storage_s3_prefix = self.utils.split_s3_path(storage_location)
self.zapr_s3_client.move_objects(source_bucket=staging_table_storage_bucket,
source_prefix=staging_table_storage_s3_prefix,
destination_bucket=final_table_storage_bucket,
destination_prefix=final_table_storage_s3_prefix)
self.logger.debug("Dropping the staging table : {0}.{1}".format(self.staging_database, staging_table_name))
drop_table_query = "drop table {0}.{1}".format(self.staging_database, staging_table_name)
query_execution_id = self.submit_athena_query(drop_table_query)
self.sync_get_status(query_execution_id)
else:
""" get the partitions from the staging table.
so we can replace those partition's data in the given original table"""
staging_table_partition_response = self.glue.get_partitions(database=self.staging_database,
table=staging_table_name)
staging_table_partitions = staging_table_partition_response['Partitions']
""" If there are no partitions in the staging table response, then there are 2 possibilities.
result of insert into query did not generate any data(empty result) or
the given table itself is un partitioned table"""
final_table_storage_bucket, final_table_storage_s3_prefix = self.utils.split_s3_path(storage_location)
if staging_table_partitions:
length = len(staging_table_partitions)
for i in range(length):
''''sample: table partition s3://source-bucket/s3prefix/date=1900-01-01'''
''''sample: table partition s3://source-bucket/s3prefix/date=1900-01-02'''
table_partition = staging_table_partitions[i]['StorageDescriptor']['Location']
partition = table_partition.replace(staging_table_storage_location, "")
'''staging_table_storage_bucket : s3://source-bucket/
staging_table_storage_s3_prefix : s3prefix/date=1900-01-01
'''
staging_table_storage_bucket, staging_table_storage_s3_prefix = self.utils.split_s3_path(
table_partition)
destination_table_storage_s3_prefix = final_table_storage_s3_prefix + partition
self.zapr_s3_client.move_objects(source_bucket=staging_table_storage_bucket,
source_prefix=staging_table_storage_s3_prefix,
destination_bucket=final_table_storage_bucket,
destination_prefix=destination_table_storage_s3_prefix)
self.add_partition(table=data_base + "." + table_name,
partition=partition,
bucket=final_table_storage_bucket,
prefix=destination_table_storage_s3_prefix)
else:
self.logger.info("There are no data to move to final table.")
""""
dropping the staging table after copying the data
"""
drop_table_query = "drop table {0}.{1}".format(self.staging_database, staging_table_name)
self.logger.info("Dropping staging table ...{0}".format(drop_table_query))
query_execution_id = self.submit_athena_query(query_string=drop_table_query)
self.sync_get_status(query_execution_id)
def add_partition(self, table, partition, bucket, prefix):
add_partition_query = "alter table {0} add if not exists partition({1}) location '{2}'"
location = "s3://" + bucket + "/" + prefix
partitions = partition[1:].replace("/", ",").replace("=", "='").replace(",", "',") + "'"
self.logger.info(partitions)
add_partition_query = add_partition_query.format(table, partitions, location)
self.logger.info("Add partition query {0}".format(add_partition_query))
query_execution_id = self.submit_athena_query(add_partition_query)
self.sync_get_status(query_execution_id) | zapr-athena-client | /zapr-athena-client-0.1.tar.gz/zapr-athena-client-0.1/zapr/aws/Athena.py | Athena.py |
import boto3
import os
import sys
from botocore.exceptions import ParamValidationError, ClientError
class Glue:
def __init__(self, logger, region):
self.logger = logger
self.client = boto3.client('glue', region_name=region)
def create_table_in_glue(self, database, table_property):
try:
response = self.client.create_table(DatabaseName=database,
TableInput=table_property)
self.logger.info(response)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
self.logger.info("Staging table has been created ..... %s" % table_property['Name'])
else:
self.logger.fatal("Unexpected response from Glue ..")
sys.exit(os.EX_OSERR)
except ParamValidationError as e:
self.logger.fatal("Parameter validation error: %s" % e)
sys.exit(os.EX_OSERR)
except ClientError as e:
self.logger.fatal("Unexpected error: %s" % e)
sys.exit(os.EX_OSERR)
def get_table_data_location(self, database, table):
"""
get the table properties from the aws glue metastore
and get the storage location
"""
table_property = self.get_table_property(database=database,
table=table)
if table_property is None:
self.logger.warn("Table is not present in the AWS Glue..")
return None
self.logger.info("Table Properties : {0}".format(table_property))
# Table storage description
table_storage_descriptor = table_property['Table']['StorageDescriptor']
# Output location of the table
table_data_location = table_storage_descriptor['Location']
return table_data_location
def get_table_property(self, database, table):
try:
response = self.client.get_table(
DatabaseName=database,
Name=table)
return response
except ParamValidationError as e:
self.logger.fatal("Parameter validation error: %s" % e)
sys.exit(os.EX_OSERR)
except ClientError as e:
self.logger.fatal("Unexpected error: %s" % e)
if e.response['Error']['Code'] == 'EntityNotFoundException':
self.logger.info("EntityNotFoundException in Aws Glue")
return None
else:
sys.exit(os.EX_OSERR)
def get_partitions(self, database, table):
try:
response = self.client.get_partitions(
DatabaseName=database,
TableName=table)
except ParamValidationError as e:
self.logger.fatal("Parameter validation error: %s" % e)
sys.exit(os.EX_OSERR)
except ClientError as e:
self.logger.fatal("Unexpected error: %s" % e)
sys.exit(os.EX_OSERR)
return response | zapr-athena-client | /zapr-athena-client-0.1.tar.gz/zapr-athena-client-0.1/zapr/aws/Glue.py | Glue.py |
# ZAP Python API
[](https://pypi.python.org/pypi/zaproxy)
[](https://www.apache.org/licenses/LICENSE-2.0.html)
The Python implementation to access the [ZAP API](https://www.zaproxy.org/docs/api/). For more information
about ZAP consult the (main) [ZAP project](https://github.com/zaproxy/zaproxy/).
## How to Obtain
The latest released version can be downloaded from the [https://pypi.python.org/pypi/zaproxy](https://pypi.python.org/pypi/zaproxy) using:
pip install zaproxy
## Getting Help
For help using the ZAP API, refer to:
* [Examples](https://github.com/zaproxy/zap-api-python/tree/main/src/examples) - collection of examples using the library;
* [API Documentation](https://www.zaproxy.org/docs/api/)
* [ZAP User Group](https://groups.google.com/group/zaproxy-users) - for asking questions;
## Issues
To report issues related to ZAP API, bugs and enhancements requests, use the [issue tracker of the main ZAP project](https://github.com/zaproxy/zaproxy/issues).
## Updating the Generated Files
Most of the API code is generated from the ZAP java source code.
To regenerate the API code you will need the repos [zaproxy](https://github.com/zaproxy/zaproxy) and [zap-extensions](https://github.com/zaproxy/zap-extensions) checked out at the same level as this one.
You should typically generate the core API calls from the latest release tag e.g.:
```
cd zaproxy
git fetch upstream -t
git checkout tags/v2.13.0
./gradlew generatePythonApiEndpoints
cd ..
```
The add-on APIs can be generated from the zap-extensions `main` branch:
```
cd zap-extensions
git pull upstream main
./gradle generatePythonZapApiClientFiles --continue
cd ..
```
The above commands will update the files in `src/zapv2`.
If any new files are created then they should be manually added to `src/zapv2/__init__.py` as per the existing files.
Note that you should also update the `CHANGELOG.md` file to state whatever has been changed.
| zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/README.md | README.md |
import six
class pnh(object):
def __init__(self, zap):
self.zap = zap
def monitor(self, id, message, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pnh/action/monitor/', {'id': id, 'message': message, 'apikey': apikey})))
def oracle(self, id, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pnh/action/oracle/', {'id': id, 'apikey': apikey})))
def start_monitoring(self, url, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pnh/action/startMonitoring/', {'url': url, 'apikey': apikey})))
def stop_monitoring(self, id, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pnh/action/stopMonitoring/', {'id': id, 'apikey': apikey})))
def pnh(self, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return (self.zap._request_other(self.zap.base_other + 'pnh/other/pnh/', {'apikey': apikey}))
def manifest(self, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return (self.zap._request_other(self.zap.base_other + 'pnh/other/manifest/', {'apikey': apikey}))
def service(self, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return (self.zap._request_other(self.zap.base_other + 'pnh/other/service/', {'apikey': apikey}))
def fx__pnh_xpi(self, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return (self.zap._request_other(self.zap.base_other + 'pnh/other/fx_pnh.xpi/', {'apikey': apikey})) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/pnh.py | pnh.py |
import six
class spider(object):
def __init__(self, zap):
self.zap = zap
def status(self, scanid=None):
"""
This component is optional and therefore the API will only work if it is installed
"""
params = {}
if scanid is not None:
params['scanId'] = scanid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/status/', params)))
def results(self, scanid=None):
"""
This component is optional and therefore the API will only work if it is installed
"""
params = {}
if scanid is not None:
params['scanId'] = scanid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/results/', params)))
def full_results(self, scanid):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/fullResults/', {'scanId': scanid})))
@property
def scans(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/scans/')))
@property
def excluded_from_scan(self):
"""
Gets the regexes of URLs excluded from the spider scans.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/excludedFromScan/')))
@property
def all_urls(self):
"""
Returns a list of unique URLs from the history table based on HTTP messages added by the Spider.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/allUrls/')))
def added_nodes(self, scanid=None):
"""
Returns a list of the names of the nodes added to the Sites tree by the specified scan.
This component is optional and therefore the API will only work if it is installed
"""
params = {}
if scanid is not None:
params['scanId'] = scanid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/addedNodes/', params)))
@property
def domains_always_in_scope(self):
"""
Gets all the domains that are always in scope. For each domain the following are shown: the index, the value (domain), if enabled, and if specified as a regex.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/domainsAlwaysInScope/')))
@property
def option_domains_always_in_scope(self):
"""
Use view domainsAlwaysInScope instead.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionDomainsAlwaysInScope/')))
@property
def option_domains_always_in_scope_enabled(self):
"""
Use view domainsAlwaysInScope instead.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionDomainsAlwaysInScopeEnabled/')))
@property
def option_handle_parameters(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionHandleParameters/')))
@property
def option_max_children(self):
"""
Gets the maximum number of child nodes (per node) that can be crawled, 0 means no limit.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionMaxChildren/')))
@property
def option_max_depth(self):
"""
Gets the maximum depth the spider can crawl, 0 if unlimited.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionMaxDepth/')))
@property
def option_max_duration(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionMaxDuration/')))
@property
def option_max_parse_size_bytes(self):
"""
Gets the maximum size, in bytes, that a response might have to be parsed.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionMaxParseSizeBytes/')))
@property
def option_max_scans_in_ui(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionMaxScansInUI/')))
@property
def option_request_wait_time(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionRequestWaitTime/')))
@property
def option_skip_url_string(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionSkipURLString/')))
@property
def option_thread_count(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionThreadCount/')))
@property
def option_user_agent(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionUserAgent/')))
@property
def option_accept_cookies(self):
"""
Gets whether or not a spider process should accept cookies while spidering.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionAcceptCookies/')))
@property
def option_handle_o_data_parameters_visited(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionHandleODataParametersVisited/')))
@property
def option_parse_comments(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionParseComments/')))
@property
def option_parse_ds_store(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionParseDsStore/')))
@property
def option_parse_git(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionParseGit/')))
@property
def option_parse_robots_txt(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionParseRobotsTxt/')))
@property
def option_parse_svn_entries(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionParseSVNEntries/')))
@property
def option_parse_sitemap_xml(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionParseSitemapXml/')))
@property
def option_post_form(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionPostForm/')))
@property
def option_process_form(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionProcessForm/')))
@property
def option_send_referer_header(self):
"""
Gets whether or not the 'Referer' header should be sent while spidering.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionSendRefererHeader/')))
@property
def option_show_advanced_dialog(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionShowAdvancedDialog/')))
def scan(self, url=None, maxchildren=None, recurse=None, contextname=None, subtreeonly=None, apikey=''):
"""
Runs the spider against the given URL (or context). Optionally, the 'maxChildren' parameter can be set to limit the number of children scanned, the 'recurse' parameter can be used to prevent the spider from seeding recursively, the parameter 'contextName' can be used to constrain the scan to a Context and the parameter 'subtreeOnly' allows to restrict the spider under a site's subtree (using the specified 'url').
This component is optional and therefore the API will only work if it is installed
"""
params = {'apikey': apikey}
if url is not None:
params['url'] = url
if maxchildren is not None:
params['maxChildren'] = maxchildren
if recurse is not None:
params['recurse'] = recurse
if contextname is not None:
params['contextName'] = contextname
if subtreeonly is not None:
params['subtreeOnly'] = subtreeonly
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/scan/', params)))
def scan_as_user(self, contextid, userid, url=None, maxchildren=None, recurse=None, subtreeonly=None, apikey=''):
"""
Runs the spider from the perspective of a User, obtained using the given Context ID and User ID. See 'scan' action for more details.
This component is optional and therefore the API will only work if it is installed
"""
params = {'contextId': contextid, 'userId': userid, 'apikey': apikey}
if url is not None:
params['url'] = url
if maxchildren is not None:
params['maxChildren'] = maxchildren
if recurse is not None:
params['recurse'] = recurse
if subtreeonly is not None:
params['subtreeOnly'] = subtreeonly
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/scanAsUser/', params)))
def pause(self, scanid, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/pause/', {'scanId': scanid, 'apikey': apikey})))
def resume(self, scanid, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/resume/', {'scanId': scanid, 'apikey': apikey})))
def stop(self, scanid=None, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
params = {'apikey': apikey}
if scanid is not None:
params['scanId'] = scanid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/stop/', params)))
def remove_scan(self, scanid, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/removeScan/', {'scanId': scanid, 'apikey': apikey})))
def pause_all_scans(self, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/pauseAllScans/', {'apikey': apikey})))
def resume_all_scans(self, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/resumeAllScans/', {'apikey': apikey})))
def stop_all_scans(self, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/stopAllScans/', {'apikey': apikey})))
def remove_all_scans(self, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/removeAllScans/', {'apikey': apikey})))
def clear_excluded_from_scan(self, apikey=''):
"""
Clears the regexes of URLs excluded from the spider scans.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/clearExcludedFromScan/', {'apikey': apikey})))
def exclude_from_scan(self, regex, apikey=''):
"""
Adds a regex of URLs that should be excluded from the spider scans.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/excludeFromScan/', {'regex': regex, 'apikey': apikey})))
def add_domain_always_in_scope(self, value, isregex=None, isenabled=None, apikey=''):
"""
Adds a new domain that's always in scope, using the specified value. Optionally sets if the new entry is enabled (default, true) and whether or not the new value is specified as a regex (default, false).
This component is optional and therefore the API will only work if it is installed
"""
params = {'value': value, 'apikey': apikey}
if isregex is not None:
params['isRegex'] = isregex
if isenabled is not None:
params['isEnabled'] = isenabled
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/addDomainAlwaysInScope/', params)))
def modify_domain_always_in_scope(self, idx, value=None, isregex=None, isenabled=None, apikey=''):
"""
Modifies a domain that's always in scope. Allows to modify the value, if enabled or if a regex. The domain is selected with its index, which can be obtained with the view domainsAlwaysInScope.
This component is optional and therefore the API will only work if it is installed
"""
params = {'idx': idx, 'apikey': apikey}
if value is not None:
params['value'] = value
if isregex is not None:
params['isRegex'] = isregex
if isenabled is not None:
params['isEnabled'] = isenabled
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/modifyDomainAlwaysInScope/', params)))
def remove_domain_always_in_scope(self, idx, apikey=''):
"""
Removes a domain that's always in scope, with the given index. The index can be obtained with the view domainsAlwaysInScope.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/removeDomainAlwaysInScope/', {'idx': idx, 'apikey': apikey})))
def enable_all_domains_always_in_scope(self, apikey=''):
"""
Enables all domains that are always in scope.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/enableAllDomainsAlwaysInScope/', {'apikey': apikey})))
def disable_all_domains_always_in_scope(self, apikey=''):
"""
Disables all domains that are always in scope.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/disableAllDomainsAlwaysInScope/', {'apikey': apikey})))
def set_option_handle_parameters(self, string, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionHandleParameters/', {'String': string, 'apikey': apikey})))
def set_option_skip_url_string(self, string, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionSkipURLString/', {'String': string, 'apikey': apikey})))
def set_option_user_agent(self, string, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionUserAgent/', {'String': string, 'apikey': apikey})))
def set_option_accept_cookies(self, boolean, apikey=''):
"""
Sets whether or not a spider process should accept cookies while spidering.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionAcceptCookies/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_handle_o_data_parameters_visited(self, boolean, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionHandleODataParametersVisited/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_max_children(self, integer, apikey=''):
"""
Sets the maximum number of child nodes (per node) that can be crawled, 0 means no limit.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionMaxChildren/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_depth(self, integer, apikey=''):
"""
Sets the maximum depth the spider can crawl, 0 for unlimited depth.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionMaxDepth/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_duration(self, integer, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionMaxDuration/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_parse_size_bytes(self, integer, apikey=''):
"""
Sets the maximum size, in bytes, that a response might have to be parsed. This allows the spider to skip big responses/files.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionMaxParseSizeBytes/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_scans_in_ui(self, integer, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionMaxScansInUI/', {'Integer': integer, 'apikey': apikey})))
def set_option_parse_comments(self, boolean, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionParseComments/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_parse_ds_store(self, boolean, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionParseDsStore/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_parse_git(self, boolean, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionParseGit/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_parse_robots_txt(self, boolean, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionParseRobotsTxt/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_parse_svn_entries(self, boolean, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionParseSVNEntries/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_parse_sitemap_xml(self, boolean, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionParseSitemapXml/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_post_form(self, boolean, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionPostForm/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_process_form(self, boolean, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionProcessForm/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_request_wait_time(self, integer, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionRequestWaitTime/', {'Integer': integer, 'apikey': apikey})))
def set_option_send_referer_header(self, boolean, apikey=''):
"""
Sets whether or not the 'Referer' header should be sent while spidering.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionSendRefererHeader/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_show_advanced_dialog(self, boolean, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionShowAdvancedDialog/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_thread_count(self, integer, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionThreadCount/', {'Integer': integer, 'apikey': apikey}))) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/spider.py | spider.py |
import six
class context(object):
def __init__(self, zap):
self.zap = zap
@property
def context_list(self):
"""
List context names of current session
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/view/contextList/')))
def exclude_regexs(self, contextname):
"""
List excluded regexs for context
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/view/excludeRegexs/', {'contextName': contextname})))
def include_regexs(self, contextname):
"""
List included regexs for context
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/view/includeRegexs/', {'contextName': contextname})))
def context(self, contextname):
"""
List the information about the named context
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/view/context/', {'contextName': contextname})))
@property
def technology_list(self):
"""
Lists the names of all built in technologies
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/view/technologyList/')))
def included_technology_list(self, contextname):
"""
Lists the names of all technologies included in a context
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/view/includedTechnologyList/', {'contextName': contextname})))
def excluded_technology_list(self, contextname):
"""
Lists the names of all technologies excluded from a context
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/view/excludedTechnologyList/', {'contextName': contextname})))
def urls(self, contextname):
"""
Lists the URLs accessed through/by ZAP, that belong to the context with the given name.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/view/urls/', {'contextName': contextname})))
def exclude_from_context(self, contextname, regex, apikey=''):
"""
Add exclude regex to context
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/action/excludeFromContext/', {'contextName': contextname, 'regex': regex, 'apikey': apikey})))
def include_in_context(self, contextname, regex, apikey=''):
"""
Add include regex to context
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/action/includeInContext/', {'contextName': contextname, 'regex': regex, 'apikey': apikey})))
def set_context_regexs(self, contextname, incregexs, excregexs, apikey=''):
"""
Set the regexs to include and exclude for a context, both supplied as JSON string arrays
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/action/setContextRegexs/', {'contextName': contextname, 'incRegexs': incregexs, 'excRegexs': excregexs, 'apikey': apikey})))
def set_context_checking_strategy(self, contextname, checkingstrategy, pollurl=None, polldata=None, pollheaders=None, pollfrequency=None, pollfrequencyunits=None, apikey=''):
"""
Set the checking strategy for a context - this defines how ZAP checks that a request is authenticated
"""
params = {'contextName': contextname, 'checkingStrategy': checkingstrategy, 'apikey': apikey}
if pollurl is not None:
params['pollUrl'] = pollurl
if polldata is not None:
params['pollData'] = polldata
if pollheaders is not None:
params['pollHeaders'] = pollheaders
if pollfrequency is not None:
params['pollFrequency'] = pollfrequency
if pollfrequencyunits is not None:
params['pollFrequencyUnits'] = pollfrequencyunits
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/action/setContextCheckingStrategy/', params)))
def new_context(self, contextname, apikey=''):
"""
Creates a new context with the given name in the current session
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/action/newContext/', {'contextName': contextname, 'apikey': apikey})))
def remove_context(self, contextname, apikey=''):
"""
Removes a context in the current session
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/action/removeContext/', {'contextName': contextname, 'apikey': apikey})))
def export_context(self, contextname, contextfile, apikey=''):
"""
Exports the context with the given name to a file. If a relative file path is specified it will be resolved against the "contexts" directory in ZAP "home" dir.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/action/exportContext/', {'contextName': contextname, 'contextFile': contextfile, 'apikey': apikey})))
def import_context(self, contextfile, apikey=''):
"""
Imports a context from a file. If a relative file path is specified it will be resolved against the "contexts" directory in ZAP "home" dir.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/action/importContext/', {'contextFile': contextfile, 'apikey': apikey})))
def include_context_technologies(self, contextname, technologynames, apikey=''):
"""
Includes technologies with the given names, separated by a comma, to a context
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/action/includeContextTechnologies/', {'contextName': contextname, 'technologyNames': technologynames, 'apikey': apikey})))
def include_all_context_technologies(self, contextname, apikey=''):
"""
Includes all built in technologies in to a context
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/action/includeAllContextTechnologies/', {'contextName': contextname, 'apikey': apikey})))
def exclude_context_technologies(self, contextname, technologynames, apikey=''):
"""
Excludes technologies with the given names, separated by a comma, from a context
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/action/excludeContextTechnologies/', {'contextName': contextname, 'technologyNames': technologynames, 'apikey': apikey})))
def exclude_all_context_technologies(self, contextname, apikey=''):
"""
Excludes all built in technologies from a context
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/action/excludeAllContextTechnologies/', {'contextName': contextname, 'apikey': apikey})))
def set_context_in_scope(self, contextname, booleaninscope, apikey=''):
"""
Sets a context to in scope (contexts are in scope by default)
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'context/action/setContextInScope/', {'contextName': contextname, 'booleanInScope': booleaninscope, 'apikey': apikey}))) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/context.py | context.py |
import six
class reports(object):
def __init__(self, zap):
self.zap = zap
@property
def templates(self):
"""
View available templates.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'reports/view/templates/')))
def template_details(self, template):
"""
View details of the specified template.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'reports/view/templateDetails/', {'template': template})))
def generate(self, title, template, theme=None, description=None, contexts=None, sites=None, sections=None, includedconfidences=None, includedrisks=None, reportfilename=None, reportfilenamepattern=None, reportdir=None, display=None, apikey=''):
"""
Generate a report with the supplied parameters.
This component is optional and therefore the API will only work if it is installed
"""
params = {'title': title, 'template': template, 'apikey': apikey}
if theme is not None:
params['theme'] = theme
if description is not None:
params['description'] = description
if contexts is not None:
params['contexts'] = contexts
if sites is not None:
params['sites'] = sites
if sections is not None:
params['sections'] = sections
if includedconfidences is not None:
params['includedConfidences'] = includedconfidences
if includedrisks is not None:
params['includedRisks'] = includedrisks
if reportfilename is not None:
params['reportFileName'] = reportfilename
if reportfilenamepattern is not None:
params['reportFileNamePattern'] = reportfilenamepattern
if reportdir is not None:
params['reportDir'] = reportdir
if display is not None:
params['display'] = display
return six.next(six.itervalues(self.zap._request(self.zap.base + 'reports/action/generate/', params))) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/reports.py | reports.py |
import six
class exim(object):
def __init__(self, zap):
self.zap = zap
def import_har(self, filepath, apikey=''):
"""
Imports a HAR file.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'exim/action/importHar/', {'filePath': filepath, 'apikey': apikey})))
def import_urls(self, filepath, apikey=''):
"""
Imports URLs (one per line) from the file with the given file system path.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'exim/action/importUrls/', {'filePath': filepath, 'apikey': apikey})))
def import_zap_logs(self, filepath, apikey=''):
"""
Imports previously exported ZAP messages from the file with the given file system path.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'exim/action/importZapLogs/', {'filePath': filepath, 'apikey': apikey})))
def import_modsec_2_logs(self, filepath, apikey=''):
"""
Imports ModSecurity2 logs from the file with the given file system path.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'exim/action/importModsec2Logs/', {'filePath': filepath, 'apikey': apikey})))
def export_har(self, baseurl=None, start=None, count=None, apikey=''):
"""
Gets the HTTP messages sent through/by ZAP, in HAR format, optionally filtered by URL and paginated with 'start' position and 'count' of messages
This component is optional and therefore the API will only work if it is installed
"""
params = {'apikey': apikey}
if baseurl is not None:
params['baseurl'] = baseurl
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
return (self.zap._request_other(self.zap.base_other + 'exim/other/exportHar/', params))
def export_har_by_id(self, ids, apikey=''):
"""
Gets the HTTP messages with the given IDs, in HAR format.
This component is optional and therefore the API will only work if it is installed
"""
return (self.zap._request_other(self.zap.base_other + 'exim/other/exportHarById/', {'ids': ids, 'apikey': apikey}))
def send_har_request(self, request, followredirects=None, apikey=''):
"""
Sends the first HAR request entry, optionally following redirections. Returns, in HAR format, the request sent and response received and followed redirections, if any. The Mode is enforced when sending the request (and following redirections), custom manual requests are not allowed in 'Safe' mode nor in 'Protected' mode if out of scope.
This component is optional and therefore the API will only work if it is installed
"""
params = {'request': request, 'apikey': apikey}
if followredirects is not None:
params['followRedirects'] = followredirects
return (self.zap._request_other(self.zap.base_other + 'exim/other/sendHarRequest/', params)) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/exim.py | exim.py |
import six
class brk(object):
def __init__(self, zap):
self.zap = zap
@property
def is_break_all(self):
"""
Returns True if ZAP will break on both requests and responses
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'break/view/isBreakAll/')))
@property
def is_break_request(self):
"""
Returns True if ZAP will break on requests
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'break/view/isBreakRequest/')))
@property
def is_break_response(self):
"""
Returns True if ZAP will break on responses
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'break/view/isBreakResponse/')))
@property
def http_message(self):
"""
Returns the HTTP message currently intercepted (if any)
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'break/view/httpMessage/')))
def brk(self, type, state, scope=None, apikey=''):
"""
Controls the global break functionality. The type may be one of: http-all, http-request or http-response. The state may be true (for turning break on for the specified type) or false (for turning break off). Scope is not currently used.
"""
params = {'type': type, 'state': state, 'apikey': apikey}
if scope is not None:
params['scope'] = scope
return six.next(six.itervalues(self.zap._request(self.zap.base + 'break/action/break/', params)))
def set_http_message(self, httpheader, httpbody=None, apikey=''):
"""
Overwrites the currently intercepted message with the data provided
"""
params = {'httpHeader': httpheader, 'apikey': apikey}
if httpbody is not None:
params['httpBody'] = httpbody
return six.next(six.itervalues(self.zap._request(self.zap.base + 'break/action/setHttpMessage/', params)))
def cont(self, apikey=''):
"""
Submits the currently intercepted message and unsets the global request/response breakpoints
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'break/action/continue/', {'apikey': apikey})))
def step(self, apikey=''):
"""
Submits the currently intercepted message, the next request or response will automatically be intercepted
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'break/action/step/', {'apikey': apikey})))
def drop(self, apikey=''):
"""
Drops the currently intercepted message
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'break/action/drop/', {'apikey': apikey})))
def add_http_breakpoint(self, string, location, match, inverse, ignorecase, apikey=''):
"""
Adds a custom HTTP breakpoint. The string is the string to match. Location may be one of: url, request_header, request_body, response_header or response_body. Match may be: contains or regex. Inverse (match) may be true or false. Lastly, ignorecase (when matching the string) may be true or false.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'break/action/addHttpBreakpoint/', {'string': string, 'location': location, 'match': match, 'inverse': inverse, 'ignorecase': ignorecase, 'apikey': apikey})))
def remove_http_breakpoint(self, string, location, match, inverse, ignorecase, apikey=''):
"""
Removes the specified breakpoint
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'break/action/removeHttpBreakpoint/', {'string': string, 'location': location, 'match': match, 'inverse': inverse, 'ignorecase': ignorecase, 'apikey': apikey}))) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/brk.py | brk.py |
import six
class search(object):
def __init__(self, zap):
self.zap = zap
def urls_by_url_regex(self, regex, baseurl=None, start=None, count=None):
"""
Returns the URLs of the HTTP messages that match the given regular expression in the URL optionally filtered by URL and paginated with 'start' position and 'count' of messages.
"""
params = {'regex': regex}
if baseurl is not None:
params['baseurl'] = baseurl
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
return six.next(six.itervalues(self.zap._request(self.zap.base + 'search/view/urlsByUrlRegex/', params)))
def urls_by_request_regex(self, regex, baseurl=None, start=None, count=None):
"""
Returns the URLs of the HTTP messages that match the given regular expression in the request optionally filtered by URL and paginated with 'start' position and 'count' of messages.
"""
params = {'regex': regex}
if baseurl is not None:
params['baseurl'] = baseurl
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
return six.next(six.itervalues(self.zap._request(self.zap.base + 'search/view/urlsByRequestRegex/', params)))
def urls_by_response_regex(self, regex, baseurl=None, start=None, count=None):
"""
Returns the URLs of the HTTP messages that match the given regular expression in the response optionally filtered by URL and paginated with 'start' position and 'count' of messages.
"""
params = {'regex': regex}
if baseurl is not None:
params['baseurl'] = baseurl
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
return six.next(six.itervalues(self.zap._request(self.zap.base + 'search/view/urlsByResponseRegex/', params)))
def urls_by_header_regex(self, regex, baseurl=None, start=None, count=None):
"""
Returns the URLs of the HTTP messages that match the given regular expression in the header(s) optionally filtered by URL and paginated with 'start' position and 'count' of messages.
"""
params = {'regex': regex}
if baseurl is not None:
params['baseurl'] = baseurl
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
return six.next(six.itervalues(self.zap._request(self.zap.base + 'search/view/urlsByHeaderRegex/', params)))
def messages_by_url_regex(self, regex, baseurl=None, start=None, count=None):
"""
Returns the HTTP messages that match the given regular expression in the URL optionally filtered by URL and paginated with 'start' position and 'count' of messages.
"""
params = {'regex': regex}
if baseurl is not None:
params['baseurl'] = baseurl
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
return six.next(six.itervalues(self.zap._request(self.zap.base + 'search/view/messagesByUrlRegex/', params)))
def messages_by_request_regex(self, regex, baseurl=None, start=None, count=None):
"""
Returns the HTTP messages that match the given regular expression in the request optionally filtered by URL and paginated with 'start' position and 'count' of messages.
"""
params = {'regex': regex}
if baseurl is not None:
params['baseurl'] = baseurl
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
return six.next(six.itervalues(self.zap._request(self.zap.base + 'search/view/messagesByRequestRegex/', params)))
def messages_by_response_regex(self, regex, baseurl=None, start=None, count=None):
"""
Returns the HTTP messages that match the given regular expression in the response optionally filtered by URL and paginated with 'start' position and 'count' of messages.
"""
params = {'regex': regex}
if baseurl is not None:
params['baseurl'] = baseurl
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
return six.next(six.itervalues(self.zap._request(self.zap.base + 'search/view/messagesByResponseRegex/', params)))
def messages_by_header_regex(self, regex, baseurl=None, start=None, count=None):
"""
Returns the HTTP messages that match the given regular expression in the header(s) optionally filtered by URL and paginated with 'start' position and 'count' of messages.
"""
params = {'regex': regex}
if baseurl is not None:
params['baseurl'] = baseurl
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
return six.next(six.itervalues(self.zap._request(self.zap.base + 'search/view/messagesByHeaderRegex/', params)))
def har_by_url_regex(self, regex, baseurl=None, start=None, count=None, apikey=''):
"""
Returns the HTTP messages, in HAR format, that match the given regular expression in the URL optionally filtered by URL and paginated with 'start' position and 'count' of messages.
"""
params = {'regex': regex, 'apikey': apikey}
if baseurl is not None:
params['baseurl'] = baseurl
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
return (self.zap._request_other(self.zap.base_other + 'search/other/harByUrlRegex/', params))
def har_by_request_regex(self, regex, baseurl=None, start=None, count=None, apikey=''):
"""
Returns the HTTP messages, in HAR format, that match the given regular expression in the request optionally filtered by URL and paginated with 'start' position and 'count' of messages.
"""
params = {'regex': regex, 'apikey': apikey}
if baseurl is not None:
params['baseurl'] = baseurl
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
return (self.zap._request_other(self.zap.base_other + 'search/other/harByRequestRegex/', params))
def har_by_response_regex(self, regex, baseurl=None, start=None, count=None, apikey=''):
"""
Returns the HTTP messages, in HAR format, that match the given regular expression in the response optionally filtered by URL and paginated with 'start' position and 'count' of messages.
"""
params = {'regex': regex, 'apikey': apikey}
if baseurl is not None:
params['baseurl'] = baseurl
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
return (self.zap._request_other(self.zap.base_other + 'search/other/harByResponseRegex/', params))
def har_by_header_regex(self, regex, baseurl=None, start=None, count=None, apikey=''):
"""
Returns the HTTP messages, in HAR format, that match the given regular expression in the header(s) optionally filtered by URL and paginated with 'start' position and 'count' of messages.
"""
params = {'regex': regex, 'apikey': apikey}
if baseurl is not None:
params['baseurl'] = baseurl
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
return (self.zap._request_other(self.zap.base_other + 'search/other/harByHeaderRegex/', params)) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/search.py | search.py |
import six
class pscan(object):
def __init__(self, zap):
self.zap = zap
@property
def scan_only_in_scope(self):
"""
Tells whether or not the passive scan should be performed only on messages that are in scope.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pscan/view/scanOnlyInScope/')))
@property
def records_to_scan(self):
"""
The number of records the passive scanner still has to scan
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pscan/view/recordsToScan/')))
@property
def scanners(self):
"""
Lists all passive scan rules with their ID, name, enabled state, and alert threshold.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pscan/view/scanners/')))
@property
def current_rule(self):
"""
Show information about the passive scan rule currently being run (if any).
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pscan/view/currentRule/')))
@property
def current_tasks(self):
"""
Show information about the passive scan tasks currently being run (if any).
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pscan/view/currentTasks/')))
@property
def max_alerts_per_rule(self):
"""
Gets the maximum number of alerts a passive scan rule should raise.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pscan/view/maxAlertsPerRule/')))
def set_enabled(self, enabled, apikey=''):
"""
Sets whether or not the passive scanning is enabled (Note: the enabled state is not persisted).
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pscan/action/setEnabled/', {'enabled': enabled, 'apikey': apikey})))
def set_scan_only_in_scope(self, onlyinscope, apikey=''):
"""
Sets whether or not the passive scan should be performed only on messages that are in scope.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pscan/action/setScanOnlyInScope/', {'onlyInScope': onlyinscope, 'apikey': apikey})))
def enable_all_scanners(self, apikey=''):
"""
Enables all passive scan rules
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pscan/action/enableAllScanners/', {'apikey': apikey})))
def disable_all_scanners(self, apikey=''):
"""
Disables all passive scan rules
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pscan/action/disableAllScanners/', {'apikey': apikey})))
def enable_scanners(self, ids, apikey=''):
"""
Enables all passive scan rules with the given IDs (comma separated list of IDs)
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pscan/action/enableScanners/', {'ids': ids, 'apikey': apikey})))
def disable_scanners(self, ids, apikey=''):
"""
Disables all passive scan rules with the given IDs (comma separated list of IDs)
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pscan/action/disableScanners/', {'ids': ids, 'apikey': apikey})))
def set_scanner_alert_threshold(self, id, alertthreshold, apikey=''):
"""
Sets the alert threshold of the passive scan rule with the given ID, accepted values for alert threshold: OFF, DEFAULT, LOW, MEDIUM and HIGH
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pscan/action/setScannerAlertThreshold/', {'id': id, 'alertThreshold': alertthreshold, 'apikey': apikey})))
def set_max_alerts_per_rule(self, maxalerts, apikey=''):
"""
Sets the maximum number of alerts a passive scan rule should raise.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pscan/action/setMaxAlertsPerRule/', {'maxAlerts': maxalerts, 'apikey': apikey})))
def disable_all_tags(self, apikey=''):
"""
Disables all passive scan tags.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pscan/action/disableAllTags/', {'apikey': apikey})))
def enable_all_tags(self, apikey=''):
"""
Enables all passive scan tags.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pscan/action/enableAllTags/', {'apikey': apikey})))
def clear_queue(self, apikey=''):
"""
Clears the passive scan queue.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'pscan/action/clearQueue/', {'apikey': apikey}))) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/pscan.py | pscan.py |
import six
class stats(object):
def __init__(self, zap):
self.zap = zap
def stats(self, keyprefix=None):
"""
Statistics
"""
params = {}
if keyprefix is not None:
params['keyPrefix'] = keyprefix
return six.next(six.itervalues(self.zap._request(self.zap.base + 'stats/view/stats/', params)))
def all_sites_stats(self, keyprefix=None):
"""
Gets all of the site based statistics, optionally filtered by a key prefix
"""
params = {}
if keyprefix is not None:
params['keyPrefix'] = keyprefix
return six.next(six.itervalues(self.zap._request(self.zap.base + 'stats/view/allSitesStats/', params)))
def site_stats(self, site, keyprefix=None):
"""
Gets all of the global statistics, optionally filtered by a key prefix
"""
params = {'site': site}
if keyprefix is not None:
params['keyPrefix'] = keyprefix
return six.next(six.itervalues(self.zap._request(self.zap.base + 'stats/view/siteStats/', params)))
@property
def option_statsd_host(self):
"""
Gets the Statsd service hostname
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'stats/view/optionStatsdHost/')))
@property
def option_statsd_port(self):
"""
Gets the Statsd service port
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'stats/view/optionStatsdPort/')))
@property
def option_statsd_prefix(self):
"""
Gets the prefix to be applied to all stats sent to the configured Statsd service
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'stats/view/optionStatsdPrefix/')))
@property
def option_in_memory_enabled(self):
"""
Returns 'true' if in memory statistics are enabled, otherwise returns 'false'
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'stats/view/optionInMemoryEnabled/')))
@property
def option_statsd_enabled(self):
"""
Returns 'true' if a Statsd server has been correctly configured, otherwise returns 'false'
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'stats/view/optionStatsdEnabled/')))
def clear_stats(self, keyprefix=None, apikey=''):
"""
Clears all of the statistics
"""
params = {'apikey': apikey}
if keyprefix is not None:
params['keyPrefix'] = keyprefix
return six.next(six.itervalues(self.zap._request(self.zap.base + 'stats/action/clearStats/', params)))
def set_option_statsd_host(self, string, apikey=''):
"""
Sets the Statsd service hostname, supply an empty string to stop using a Statsd service
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'stats/action/setOptionStatsdHost/', {'String': string, 'apikey': apikey})))
def set_option_statsd_prefix(self, string, apikey=''):
"""
Sets the prefix to be applied to all stats sent to the configured Statsd service
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'stats/action/setOptionStatsdPrefix/', {'String': string, 'apikey': apikey})))
def set_option_in_memory_enabled(self, boolean, apikey=''):
"""
Sets whether in memory statistics are enabled
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'stats/action/setOptionInMemoryEnabled/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_statsd_port(self, integer, apikey=''):
"""
Sets the Statsd service port
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'stats/action/setOptionStatsdPort/', {'Integer': integer, 'apikey': apikey}))) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/stats.py | stats.py |
import six
class autoupdate(object):
def __init__(self, zap):
self.zap = zap
@property
def latest_version_number(self):
"""
Returns the latest version number
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/latestVersionNumber/')))
@property
def is_latest_version(self):
"""
Returns 'true' if ZAP is on the latest version
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/isLatestVersion/')))
@property
def installed_addons(self):
"""
Return a list of all of the installed add-ons
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/installedAddons/')))
@property
def local_addons(self):
"""
Returns a list with all local add-ons, installed or not.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/localAddons/')))
@property
def new_addons(self):
"""
Return a list of any add-ons that have been added to the Marketplace since the last check for updates
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/newAddons/')))
@property
def updated_addons(self):
"""
Return a list of any add-ons that have been changed in the Marketplace since the last check for updates
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/updatedAddons/')))
@property
def marketplace_addons(self):
"""
Return a list of all of the add-ons on the ZAP Marketplace (this information is read once and then cached)
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/marketplaceAddons/')))
@property
def option_addon_directories(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/optionAddonDirectories/')))
@property
def option_day_last_checked(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/optionDayLastChecked/')))
@property
def option_day_last_install_warned(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/optionDayLastInstallWarned/')))
@property
def option_day_last_update_warned(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/optionDayLastUpdateWarned/')))
@property
def option_download_directory(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/optionDownloadDirectory/')))
@property
def option_check_addon_updates(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/optionCheckAddonUpdates/')))
@property
def option_check_on_start(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/optionCheckOnStart/')))
@property
def option_download_new_release(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/optionDownloadNewRelease/')))
@property
def option_install_addon_updates(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/optionInstallAddonUpdates/')))
@property
def option_install_scanner_rules(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/optionInstallScannerRules/')))
@property
def option_report_alpha_addons(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/optionReportAlphaAddons/')))
@property
def option_report_beta_addons(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/optionReportBetaAddons/')))
@property
def option_report_release_addons(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/view/optionReportReleaseAddons/')))
def download_latest_release(self, apikey=''):
"""
Downloads the latest release, if any
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/action/downloadLatestRelease/', {'apikey': apikey})))
def install_addon(self, id, apikey=''):
"""
Installs or updates the specified add-on, returning when complete (i.e. not asynchronously)
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/action/installAddon/', {'id': id, 'apikey': apikey})))
def install_local_addon(self, file, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/action/installLocalAddon/', {'file': file, 'apikey': apikey})))
def uninstall_addon(self, id, apikey=''):
"""
Uninstalls the specified add-on
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/action/uninstallAddon/', {'id': id, 'apikey': apikey})))
def set_option_check_addon_updates(self, boolean, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/action/setOptionCheckAddonUpdates/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_check_on_start(self, boolean, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/action/setOptionCheckOnStart/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_download_new_release(self, boolean, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/action/setOptionDownloadNewRelease/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_install_addon_updates(self, boolean, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/action/setOptionInstallAddonUpdates/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_install_scanner_rules(self, boolean, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/action/setOptionInstallScannerRules/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_report_alpha_addons(self, boolean, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/action/setOptionReportAlphaAddons/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_report_beta_addons(self, boolean, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/action/setOptionReportBetaAddons/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_report_release_addons(self, boolean, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'autoupdate/action/setOptionReportReleaseAddons/', {'Boolean': boolean, 'apikey': apikey}))) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/autoupdate.py | autoupdate.py |
import six
class ascan(object):
def __init__(self, zap):
self.zap = zap
def status(self, scanid=None):
"""
"""
params = {}
if scanid is not None:
params['scanId'] = scanid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/status/', params)))
def scan_progress(self, scanid=None):
"""
"""
params = {}
if scanid is not None:
params['scanId'] = scanid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/scanProgress/', params)))
def messages_ids(self, scanid):
"""
Gets the IDs of the messages sent during the scan with the given ID. A message can be obtained with 'message' core view.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/messagesIds/', {'scanId': scanid})))
def alerts_ids(self, scanid):
"""
Gets the IDs of the alerts raised during the scan with the given ID. An alert can be obtained with 'alert' core view.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/alertsIds/', {'scanId': scanid})))
@property
def scans(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/scans/')))
@property
def scan_policy_names(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/scanPolicyNames/')))
@property
def excluded_from_scan(self):
"""
Gets the regexes of URLs excluded from the active scans.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/excludedFromScan/')))
def scanners(self, scanpolicyname=None, policyid=None):
"""
Gets the scan rules, optionally, of the given scan policy or scanner policy/category ID.
"""
params = {}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
if policyid is not None:
params['policyId'] = policyid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/scanners/', params)))
def policies(self, scanpolicyname=None, policyid=None):
"""
"""
params = {}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
if policyid is not None:
params['policyId'] = policyid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/policies/', params)))
@property
def attack_mode_queue(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/attackModeQueue/')))
@property
def excluded_params(self):
"""
Gets all the parameters that are excluded. For each parameter the following are shown: the name, the URL, and the parameter type.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/excludedParams/')))
@property
def option_excluded_param_list(self):
"""
Use view excludedParams instead.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionExcludedParamList/')))
@property
def excluded_param_types(self):
"""
Gets all the types of excluded parameters. For each type the following are shown: the ID and the name.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/excludedParamTypes/')))
@property
def option_attack_policy(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionAttackPolicy/')))
@property
def option_default_policy(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionDefaultPolicy/')))
@property
def option_delay_in_ms(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionDelayInMs/')))
@property
def option_handle_anti_csrf_tokens(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionHandleAntiCSRFTokens/')))
@property
def option_host_per_scan(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionHostPerScan/')))
@property
def option_max_alerts_per_rule(self):
"""
Gets the maximum number of alerts that a rule can raise before being skipped.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionMaxAlertsPerRule/')))
@property
def option_max_chart_time_in_mins(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionMaxChartTimeInMins/')))
@property
def option_max_results_to_list(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionMaxResultsToList/')))
@property
def option_max_rule_duration_in_mins(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionMaxRuleDurationInMins/')))
@property
def option_max_scan_duration_in_mins(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionMaxScanDurationInMins/')))
@property
def option_max_scans_in_ui(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionMaxScansInUI/')))
@property
def option_target_params_enabled_rpc(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionTargetParamsEnabledRPC/')))
@property
def option_target_params_injectable(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionTargetParamsInjectable/')))
@property
def option_thread_per_host(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionThreadPerHost/')))
@property
def option_add_query_param(self):
"""
Tells whether or not the active scanner should add a query parameter to GET request that don't have parameters to start with.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionAddQueryParam/')))
@property
def option_allow_attack_on_start(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionAllowAttackOnStart/')))
@property
def option_inject_plugin_id_in_header(self):
"""
Tells whether or not the active scanner should inject the HTTP request header X-ZAP-Scan-ID, with the ID of the scan rule that's sending the requests.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionInjectPluginIdInHeader/')))
@property
def option_prompt_in_attack_mode(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionPromptInAttackMode/')))
@property
def option_prompt_to_clear_finished_scans(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionPromptToClearFinishedScans/')))
@property
def option_rescan_in_attack_mode(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionRescanInAttackMode/')))
@property
def option_scan_headers_all_requests(self):
"""
Tells whether or not the HTTP Headers of all requests should be scanned. Not just requests that send parameters, through the query or request body.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionScanHeadersAllRequests/')))
@property
def option_scan_null_json_values(self):
"""
Tells whether or not the active scanner should scan null JSON values.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionScanNullJsonValues/')))
@property
def option_show_advanced_dialog(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionShowAdvancedDialog/')))
def scan(self, url=None, recurse=None, inscopeonly=None, scanpolicyname=None, method=None, postdata=None, contextid=None, apikey=''):
"""
Runs the active scanner against the given URL or Context. Optionally, the 'recurse' parameter can be used to scan URLs under the given URL, the parameter 'inScopeOnly' can be used to constrain the scan to URLs that are in scope (ignored if a Context is specified), the parameter 'scanPolicyName' allows to specify the scan policy (if none is given it uses the default scan policy), the parameters 'method' and 'postData' allow to select a given request in conjunction with the given URL.
"""
params = {'apikey': apikey}
if url is not None:
params['url'] = url
if recurse is not None:
params['recurse'] = recurse
if inscopeonly is not None:
params['inScopeOnly'] = inscopeonly
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
if method is not None:
params['method'] = method
if postdata is not None:
params['postData'] = postdata
if contextid is not None:
params['contextId'] = contextid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/scan/', params)))
def scan_as_user(self, url=None, contextid=None, userid=None, recurse=None, scanpolicyname=None, method=None, postdata=None, apikey=''):
"""
Active Scans from the perspective of a User, obtained using the given Context ID and User ID. See 'scan' action for more details.
"""
params = {'apikey': apikey}
if url is not None:
params['url'] = url
if contextid is not None:
params['contextId'] = contextid
if userid is not None:
params['userId'] = userid
if recurse is not None:
params['recurse'] = recurse
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
if method is not None:
params['method'] = method
if postdata is not None:
params['postData'] = postdata
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/scanAsUser/', params)))
def pause(self, scanid, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/pause/', {'scanId': scanid, 'apikey': apikey})))
def resume(self, scanid, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/resume/', {'scanId': scanid, 'apikey': apikey})))
def stop(self, scanid, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/stop/', {'scanId': scanid, 'apikey': apikey})))
def remove_scan(self, scanid, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/removeScan/', {'scanId': scanid, 'apikey': apikey})))
def pause_all_scans(self, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/pauseAllScans/', {'apikey': apikey})))
def resume_all_scans(self, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/resumeAllScans/', {'apikey': apikey})))
def stop_all_scans(self, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/stopAllScans/', {'apikey': apikey})))
def remove_all_scans(self, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/removeAllScans/', {'apikey': apikey})))
def clear_excluded_from_scan(self, apikey=''):
"""
Clears the regexes of URLs excluded from the active scans.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/clearExcludedFromScan/', {'apikey': apikey})))
def exclude_from_scan(self, regex, apikey=''):
"""
Adds a regex of URLs that should be excluded from the active scans.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/excludeFromScan/', {'regex': regex, 'apikey': apikey})))
def enable_all_scanners(self, scanpolicyname=None, apikey=''):
"""
Enables all scan rules of the scan policy with the given name, or the default if none given.
"""
params = {'apikey': apikey}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/enableAllScanners/', params)))
def disable_all_scanners(self, scanpolicyname=None, apikey=''):
"""
Disables all scan rules of the scan policy with the given name, or the default if none given.
"""
params = {'apikey': apikey}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/disableAllScanners/', params)))
def enable_scanners(self, ids, scanpolicyname=None, apikey=''):
"""
Enables the scan rules with the given IDs (comma separated list of IDs) of the scan policy with the given name, or the default if none given.
"""
params = {'ids': ids, 'apikey': apikey}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/enableScanners/', params)))
def disable_scanners(self, ids, scanpolicyname=None, apikey=''):
"""
Disables the scan rules with the given IDs (comma separated list of IDs) of the scan policy with the given name, or the default if none given.
"""
params = {'ids': ids, 'apikey': apikey}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/disableScanners/', params)))
def set_enabled_policies(self, ids, scanpolicyname=None, apikey=''):
"""
"""
params = {'ids': ids, 'apikey': apikey}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setEnabledPolicies/', params)))
def set_policy_attack_strength(self, id, attackstrength, scanpolicyname=None, apikey=''):
"""
"""
params = {'id': id, 'attackStrength': attackstrength, 'apikey': apikey}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setPolicyAttackStrength/', params)))
def set_policy_alert_threshold(self, id, alertthreshold, scanpolicyname=None, apikey=''):
"""
"""
params = {'id': id, 'alertThreshold': alertthreshold, 'apikey': apikey}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setPolicyAlertThreshold/', params)))
def set_scanner_attack_strength(self, id, attackstrength, scanpolicyname=None, apikey=''):
"""
"""
params = {'id': id, 'attackStrength': attackstrength, 'apikey': apikey}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setScannerAttackStrength/', params)))
def set_scanner_alert_threshold(self, id, alertthreshold, scanpolicyname=None, apikey=''):
"""
"""
params = {'id': id, 'alertThreshold': alertthreshold, 'apikey': apikey}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setScannerAlertThreshold/', params)))
def add_scan_policy(self, scanpolicyname, alertthreshold=None, attackstrength=None, apikey=''):
"""
"""
params = {'scanPolicyName': scanpolicyname, 'apikey': apikey}
if alertthreshold is not None:
params['alertThreshold'] = alertthreshold
if attackstrength is not None:
params['attackStrength'] = attackstrength
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/addScanPolicy/', params)))
def remove_scan_policy(self, scanpolicyname, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/removeScanPolicy/', {'scanPolicyName': scanpolicyname, 'apikey': apikey})))
def update_scan_policy(self, scanpolicyname, alertthreshold=None, attackstrength=None, apikey=''):
"""
"""
params = {'scanPolicyName': scanpolicyname, 'apikey': apikey}
if alertthreshold is not None:
params['alertThreshold'] = alertthreshold
if attackstrength is not None:
params['attackStrength'] = attackstrength
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/updateScanPolicy/', params)))
def import_scan_policy(self, path, apikey=''):
"""
Imports a Scan Policy using the given file system path.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/importScanPolicy/', {'path': path, 'apikey': apikey})))
def add_excluded_param(self, name, type=None, url=None, apikey=''):
"""
Adds a new parameter excluded from the scan, using the specified name. Optionally sets if the new entry applies to a specific URL (default, all URLs) and sets the ID of the type of the parameter (default, ID of any type). The type IDs can be obtained with the view excludedParamTypes.
"""
params = {'name': name, 'apikey': apikey}
if type is not None:
params['type'] = type
if url is not None:
params['url'] = url
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/addExcludedParam/', params)))
def modify_excluded_param(self, idx, name=None, type=None, url=None, apikey=''):
"""
Modifies a parameter excluded from the scan. Allows to modify the name, the URL and the type of parameter. The parameter is selected with its index, which can be obtained with the view excludedParams.
"""
params = {'idx': idx, 'apikey': apikey}
if name is not None:
params['name'] = name
if type is not None:
params['type'] = type
if url is not None:
params['url'] = url
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/modifyExcludedParam/', params)))
def remove_excluded_param(self, idx, apikey=''):
"""
Removes a parameter excluded from the scan, with the given index. The index can be obtained with the view excludedParams.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/removeExcludedParam/', {'idx': idx, 'apikey': apikey})))
def skip_scanner(self, scanid, scannerid, apikey=''):
"""
Skips the scan rule using the given IDs of the scan and the scan rule.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/skipScanner/', {'scanId': scanid, 'scannerId': scannerid, 'apikey': apikey})))
def set_option_attack_policy(self, string, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionAttackPolicy/', {'String': string, 'apikey': apikey})))
def set_option_default_policy(self, string, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionDefaultPolicy/', {'String': string, 'apikey': apikey})))
def set_option_add_query_param(self, boolean, apikey=''):
"""
Sets whether or not the active scanner should add a query param to GET requests which do not have parameters to start with.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionAddQueryParam/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_allow_attack_on_start(self, boolean, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionAllowAttackOnStart/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_delay_in_ms(self, integer, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionDelayInMs/', {'Integer': integer, 'apikey': apikey})))
def set_option_handle_anti_csrf_tokens(self, boolean, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionHandleAntiCSRFTokens/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_host_per_scan(self, integer, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionHostPerScan/', {'Integer': integer, 'apikey': apikey})))
def set_option_inject_plugin_id_in_header(self, boolean, apikey=''):
"""
Sets whether or not the active scanner should inject the HTTP request header X-ZAP-Scan-ID, with the ID of the scan rule that's sending the requests.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionInjectPluginIdInHeader/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_max_alerts_per_rule(self, integer, apikey=''):
"""
Sets the maximum number of alerts that a rule can raise before being skipped.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionMaxAlertsPerRule/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_chart_time_in_mins(self, integer, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionMaxChartTimeInMins/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_results_to_list(self, integer, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionMaxResultsToList/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_rule_duration_in_mins(self, integer, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionMaxRuleDurationInMins/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_scan_duration_in_mins(self, integer, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionMaxScanDurationInMins/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_scans_in_ui(self, integer, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionMaxScansInUI/', {'Integer': integer, 'apikey': apikey})))
def set_option_prompt_in_attack_mode(self, boolean, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionPromptInAttackMode/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_prompt_to_clear_finished_scans(self, boolean, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionPromptToClearFinishedScans/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_rescan_in_attack_mode(self, boolean, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionRescanInAttackMode/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_scan_headers_all_requests(self, boolean, apikey=''):
"""
Sets whether or not the HTTP Headers of all requests should be scanned. Not just requests that send parameters, through the query or request body.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionScanHeadersAllRequests/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_scan_null_json_values(self, boolean, apikey=''):
"""
Sets whether or not the active scanner should scan null JSON values.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionScanNullJsonValues/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_show_advanced_dialog(self, boolean, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionShowAdvancedDialog/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_target_params_enabled_rpc(self, integer, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionTargetParamsEnabledRPC/', {'Integer': integer, 'apikey': apikey})))
def set_option_target_params_injectable(self, integer, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionTargetParamsInjectable/', {'Integer': integer, 'apikey': apikey})))
def set_option_thread_per_host(self, integer, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionThreadPerHost/', {'Integer': integer, 'apikey': apikey}))) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/ascan.py | ascan.py |
import six
class httpSessions(object):
def __init__(self, zap):
self.zap = zap
@property
def sites(self):
"""
Gets all of the sites that have sessions.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'httpSessions/view/sites/')))
def sessions(self, site, session=None):
"""
Gets the sessions for the given site. Optionally returning just the session with the given name.
"""
params = {'site': site}
if session is not None:
params['session'] = session
return six.next(six.itervalues(self.zap._request(self.zap.base + 'httpSessions/view/sessions/', params)))
def active_session(self, site):
"""
Gets the name of the active session for the given site.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'httpSessions/view/activeSession/', {'site': site})))
def session_tokens(self, site):
"""
Gets the names of the session tokens for the given site.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'httpSessions/view/sessionTokens/', {'site': site})))
@property
def default_session_tokens(self):
"""
Gets the default session tokens.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'httpSessions/view/defaultSessionTokens/')))
def create_empty_session(self, site, session=None, apikey=''):
"""
Creates an empty session for the given site. Optionally with the given name.
"""
params = {'site': site, 'apikey': apikey}
if session is not None:
params['session'] = session
return six.next(six.itervalues(self.zap._request(self.zap.base + 'httpSessions/action/createEmptySession/', params)))
def remove_session(self, site, session, apikey=''):
"""
Removes the session from the given site.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'httpSessions/action/removeSession/', {'site': site, 'session': session, 'apikey': apikey})))
def set_active_session(self, site, session, apikey=''):
"""
Sets the given session as active for the given site.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'httpSessions/action/setActiveSession/', {'site': site, 'session': session, 'apikey': apikey})))
def unset_active_session(self, site, apikey=''):
"""
Unsets the active session of the given site.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'httpSessions/action/unsetActiveSession/', {'site': site, 'apikey': apikey})))
def add_session_token(self, site, sessiontoken, apikey=''):
"""
Adds the session token to the given site.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'httpSessions/action/addSessionToken/', {'site': site, 'sessionToken': sessiontoken, 'apikey': apikey})))
def remove_session_token(self, site, sessiontoken, apikey=''):
"""
Removes the session token from the given site.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'httpSessions/action/removeSessionToken/', {'site': site, 'sessionToken': sessiontoken, 'apikey': apikey})))
def set_session_token_value(self, site, session, sessiontoken, tokenvalue, apikey=''):
"""
Sets the value of the session token of the given session for the given site.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'httpSessions/action/setSessionTokenValue/', {'site': site, 'session': session, 'sessionToken': sessiontoken, 'tokenValue': tokenvalue, 'apikey': apikey})))
def rename_session(self, site, oldsessionname, newsessionname, apikey=''):
"""
Renames the session of the given site.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'httpSessions/action/renameSession/', {'site': site, 'oldSessionName': oldsessionname, 'newSessionName': newsessionname, 'apikey': apikey})))
def add_default_session_token(self, sessiontoken, tokenenabled=None, apikey=''):
"""
Adds a default session token with the given name and enabled state.
"""
params = {'sessionToken': sessiontoken, 'apikey': apikey}
if tokenenabled is not None:
params['tokenEnabled'] = tokenenabled
return six.next(six.itervalues(self.zap._request(self.zap.base + 'httpSessions/action/addDefaultSessionToken/', params)))
def set_default_session_token_enabled(self, sessiontoken, tokenenabled, apikey=''):
"""
Sets whether or not the default session token with the given name is enabled.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'httpSessions/action/setDefaultSessionTokenEnabled/', {'sessionToken': sessiontoken, 'tokenEnabled': tokenenabled, 'apikey': apikey})))
def remove_default_session_token(self, sessiontoken, apikey=''):
"""
Removes the default session token with the given name.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'httpSessions/action/removeDefaultSessionToken/', {'sessionToken': sessiontoken, 'apikey': apikey}))) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/httpSessions.py | httpSessions.py |
import six
class core(object):
def __init__(self, zap):
self.zap = zap
@property
def hosts(self):
"""
Gets the name of the hosts accessed through/by ZAP
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/hosts/')))
@property
def sites(self):
"""
Gets the sites accessed through/by ZAP (scheme and domain)
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/sites/')))
def urls(self, baseurl=None):
"""
Gets the URLs accessed through/by ZAP, optionally filtering by (base) URL.
"""
params = {}
if baseurl is not None:
params['baseurl'] = baseurl
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/urls/', params)))
def child_nodes(self, url=None):
"""
Gets the child nodes underneath the specified URL in the Sites tree
"""
params = {}
if url is not None:
params['url'] = url
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/childNodes/', params)))
def message(self, id):
"""
Gets the HTTP message with the given ID. Returns the ID, request/response headers and bodies, cookies, note, type, RTT, and timestamp.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/message/', {'id': id})))
def messages(self, baseurl=None, start=None, count=None):
"""
Gets the HTTP messages sent by ZAP, request and response, optionally filtered by URL and paginated with 'start' position and 'count' of messages
"""
params = {}
if baseurl is not None:
params['baseurl'] = baseurl
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/messages/', params)))
def messages_by_id(self, ids):
"""
Gets the HTTP messages with the given IDs.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/messagesById/', {'ids': ids})))
def number_of_messages(self, baseurl=None):
"""
Gets the number of messages, optionally filtering by URL
"""
params = {}
if baseurl is not None:
params['baseurl'] = baseurl
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/numberOfMessages/', params)))
@property
def mode(self):
"""
Gets the mode
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/mode/')))
@property
def version(self):
"""
Gets ZAP version
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/version/')))
@property
def excluded_from_proxy(self):
"""
Gets the regular expressions, applied to URLs, to exclude from the local proxies.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/excludedFromProxy/')))
@property
def session_location(self):
"""
Gets the location of the current session file
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/sessionLocation/')))
@property
def proxy_chain_excluded_domains(self):
"""
Gets all the domains that are excluded from the outgoing proxy. For each domain the following are shown: the index, the value (domain), if enabled, and if specified as a regex.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/proxyChainExcludedDomains/')))
@property
def zap_home_path(self):
"""
Gets the path to ZAP's home directory.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/zapHomePath/')))
@property
def option_maximum_alert_instances(self):
"""
Gets the maximum number of alert instances to include in a report.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionMaximumAlertInstances/')))
@property
def option_merge_related_alerts(self):
"""
Gets whether or not related alerts will be merged in any reports generated.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionMergeRelatedAlerts/')))
@property
def option_alert_overrides_file_path(self):
"""
Gets the path to the file with alert overrides.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionAlertOverridesFilePath/')))
@property
def home_directory(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/homeDirectory/')))
@property
def option_proxy_chain_skip_name(self):
"""
Use view proxyChainExcludedDomains instead.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionProxyChainSkipName/')))
@property
def option_proxy_excluded_domains(self):
"""
Use view proxyChainExcludedDomains instead.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionProxyExcludedDomains/')))
@property
def option_proxy_excluded_domains_enabled(self):
"""
Use view proxyChainExcludedDomains instead.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionProxyExcludedDomainsEnabled/')))
def alert(self, id):
"""
Gets the alert with the given ID, the corresponding HTTP message can be obtained with the 'messageId' field and 'message' API method
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/alert/', {'id': id})))
def alerts(self, baseurl=None, start=None, count=None, riskid=None):
"""
Gets the alerts raised by ZAP, optionally filtering by URL or riskId, and paginating with 'start' position and 'count' of alerts
"""
params = {}
if baseurl is not None:
params['baseurl'] = baseurl
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
if riskid is not None:
params['riskId'] = riskid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/alerts/', params)))
def alerts_summary(self, baseurl=None):
"""
Gets number of alerts grouped by each risk level, optionally filtering by URL
"""
params = {}
if baseurl is not None:
params['baseurl'] = baseurl
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/alertsSummary/', params)))
def number_of_alerts(self, baseurl=None, riskid=None):
"""
Gets the number of alerts, optionally filtering by URL or riskId
"""
params = {}
if baseurl is not None:
params['baseurl'] = baseurl
if riskid is not None:
params['riskId'] = riskid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/numberOfAlerts/', params)))
@property
def option_default_user_agent(self):
"""
Gets the user agent that ZAP should use when creating HTTP messages (for example, spider messages or CONNECT requests to outgoing proxy).
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionDefaultUserAgent/')))
@property
def option_dns_ttl_successful_queries(self):
"""
Gets the TTL (in seconds) of successful DNS queries.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionDnsTtlSuccessfulQueries/')))
@property
def option_http_state(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionHttpState/')))
@property
def option_http_state_enabled(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionHttpStateEnabled/')))
@property
def option_proxy_chain_name(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionProxyChainName/')))
@property
def option_proxy_chain_password(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionProxyChainPassword/')))
@property
def option_proxy_chain_port(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionProxyChainPort/')))
@property
def option_proxy_chain_prompt(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionProxyChainPrompt/')))
@property
def option_proxy_chain_realm(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionProxyChainRealm/')))
@property
def option_proxy_chain_user_name(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionProxyChainUserName/')))
@property
def option_single_cookie_request_header(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionSingleCookieRequestHeader/')))
@property
def option_timeout_in_secs(self):
"""
Gets the connection time out (in seconds).
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionTimeoutInSecs/')))
@property
def option_use_proxy_chain(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionUseProxyChain/')))
@property
def option_use_proxy_chain_auth(self):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionUseProxyChainAuth/')))
@property
def option_use_socks_proxy(self):
"""
Gets whether or not the SOCKS proxy should be used.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/view/optionUseSocksProxy/')))
def access_url(self, url, followredirects=None, apikey=''):
"""
Convenient and simple action to access a URL, optionally following redirections. Returns the request sent and response received and followed redirections, if any. Other actions are available which offer more control on what is sent, like, 'sendRequest' or 'sendHarRequest'.
"""
params = {'url': url, 'apikey': apikey}
if followredirects is not None:
params['followRedirects'] = followredirects
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/accessUrl/', params)))
def shutdown(self, apikey=''):
"""
Shuts down ZAP
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/shutdown/', {'apikey': apikey})))
def new_session(self, name=None, overwrite=None, apikey=''):
"""
Creates a new session, optionally overwriting existing files. If a relative path is specified it will be resolved against the "session" directory in ZAP "home" dir.
"""
params = {'apikey': apikey}
if name is not None:
params['name'] = name
if overwrite is not None:
params['overwrite'] = overwrite
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/newSession/', params)))
def load_session(self, name, apikey=''):
"""
Loads the session with the given name. If a relative path is specified it will be resolved against the "session" directory in ZAP "home" dir.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/loadSession/', {'name': name, 'apikey': apikey})))
def save_session(self, name, overwrite=None, apikey=''):
"""
Saves the session.
"""
params = {'name': name, 'apikey': apikey}
if overwrite is not None:
params['overwrite'] = overwrite
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/saveSession/', params)))
def snapshot_session(self, name=None, overwrite=None, apikey=''):
"""
Snapshots the session, optionally with the given name, and overwriting existing files. If no name is specified the name of the current session with a timestamp appended is used. If a relative path is specified it will be resolved against the "session" directory in ZAP "home" dir.
"""
params = {'apikey': apikey}
if name is not None:
params['name'] = name
if overwrite is not None:
params['overwrite'] = overwrite
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/snapshotSession/', params)))
def clear_excluded_from_proxy(self, apikey=''):
"""
Clears the regexes of URLs excluded from the local proxies.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/clearExcludedFromProxy/', {'apikey': apikey})))
def exclude_from_proxy(self, regex, apikey=''):
"""
Adds a regex of URLs that should be excluded from the local proxies.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/excludeFromProxy/', {'regex': regex, 'apikey': apikey})))
def set_home_directory(self, dir, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setHomeDirectory/', {'dir': dir, 'apikey': apikey})))
def set_mode(self, mode, apikey=''):
"""
Sets the mode, which may be one of [safe, protect, standard, attack]
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setMode/', {'mode': mode, 'apikey': apikey})))
def generate_root_ca(self, apikey=''):
"""
Generates a new Root CA certificate for the local proxies.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/generateRootCA/', {'apikey': apikey})))
def send_request(self, request, followredirects=None, apikey=''):
"""
Sends the HTTP request, optionally following redirections. Returns the request sent and response received and followed redirections, if any. The Mode is enforced when sending the request (and following redirections), custom manual requests are not allowed in 'Safe' mode nor in 'Protected' mode if out of scope.
"""
params = {'request': request, 'apikey': apikey}
if followredirects is not None:
params['followRedirects'] = followredirects
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/sendRequest/', params)))
def run_garbage_collection(self, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/runGarbageCollection/', {'apikey': apikey})))
def delete_site_node(self, url, method=None, postdata=None, apikey=''):
"""
Deletes the site node found in the Sites Tree on the basis of the URL, HTTP method, and post data (if applicable and specified).
"""
params = {'url': url, 'apikey': apikey}
if method is not None:
params['method'] = method
if postdata is not None:
params['postData'] = postdata
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/deleteSiteNode/', params)))
def add_proxy_chain_excluded_domain(self, value, isregex=None, isenabled=None, apikey=''):
"""
Adds a domain to be excluded from the outgoing proxy, using the specified value. Optionally sets if the new entry is enabled (default, true) and whether or not the new value is specified as a regex (default, false).
"""
params = {'value': value, 'apikey': apikey}
if isregex is not None:
params['isRegex'] = isregex
if isenabled is not None:
params['isEnabled'] = isenabled
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/addProxyChainExcludedDomain/', params)))
def modify_proxy_chain_excluded_domain(self, idx, value=None, isregex=None, isenabled=None, apikey=''):
"""
Modifies a domain excluded from the outgoing proxy. Allows to modify the value, if enabled or if a regex. The domain is selected with its index, which can be obtained with the view proxyChainExcludedDomains.
"""
params = {'idx': idx, 'apikey': apikey}
if value is not None:
params['value'] = value
if isregex is not None:
params['isRegex'] = isregex
if isenabled is not None:
params['isEnabled'] = isenabled
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/modifyProxyChainExcludedDomain/', params)))
def remove_proxy_chain_excluded_domain(self, idx, apikey=''):
"""
Removes a domain excluded from the outgoing proxy, with the given index. The index can be obtained with the view proxyChainExcludedDomains.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/removeProxyChainExcludedDomain/', {'idx': idx, 'apikey': apikey})))
def enable_all_proxy_chain_excluded_domains(self, apikey=''):
"""
Enables all domains excluded from the outgoing proxy.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/enableAllProxyChainExcludedDomains/', {'apikey': apikey})))
def disable_all_proxy_chain_excluded_domains(self, apikey=''):
"""
Disables all domains excluded from the outgoing proxy.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/disableAllProxyChainExcludedDomains/', {'apikey': apikey})))
def set_option_maximum_alert_instances(self, numberofinstances, apikey=''):
"""
Sets the maximum number of alert instances to include in a report. A value of zero is treated as unlimited.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setOptionMaximumAlertInstances/', {'numberOfInstances': numberofinstances, 'apikey': apikey})))
def set_option_merge_related_alerts(self, enabled, apikey=''):
"""
Sets whether or not related alerts will be merged in any reports generated.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setOptionMergeRelatedAlerts/', {'enabled': enabled, 'apikey': apikey})))
def set_option_alert_overrides_file_path(self, filepath=None, apikey=''):
"""
Sets (or clears, if empty) the path to the file with alert overrides.
"""
params = {'apikey': apikey}
if filepath is not None:
params['filePath'] = filepath
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setOptionAlertOverridesFilePath/', params)))
def enable_pkcs_12_client_certificate(self, filepath, password, index=None, apikey=''):
"""
Enables use of a PKCS12 client certificate for the certificate with the given file system path, password, and optional index.
"""
params = {'filePath': filepath, 'password': password, 'apikey': apikey}
if index is not None:
params['index'] = index
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/enablePKCS12ClientCertificate/', params)))
def disable_client_certificate(self, apikey=''):
"""
Disables the option for use of client certificates.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/disableClientCertificate/', {'apikey': apikey})))
def delete_all_alerts(self, apikey=''):
"""
Deletes all alerts of the current session.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/deleteAllAlerts/', {'apikey': apikey})))
def delete_alert(self, id, apikey=''):
"""
Deletes the alert with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/deleteAlert/', {'id': id, 'apikey': apikey})))
def set_option_default_user_agent(self, string, apikey=''):
"""
Sets the user agent that ZAP should use when creating HTTP messages (for example, spider messages or CONNECT requests to outgoing proxy).
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setOptionDefaultUserAgent/', {'String': string, 'apikey': apikey})))
def set_option_dns_ttl_successful_queries(self, integer, apikey=''):
"""
Sets the TTL (in seconds) of successful DNS queries (applies after ZAP restart).
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setOptionDnsTtlSuccessfulQueries/', {'Integer': integer, 'apikey': apikey})))
def set_option_http_state_enabled(self, boolean, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setOptionHttpStateEnabled/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_proxy_chain_name(self, string, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainName/', {'String': string, 'apikey': apikey})))
def set_option_proxy_chain_password(self, string, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainPassword/', {'String': string, 'apikey': apikey})))
def set_option_proxy_chain_port(self, integer, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainPort/', {'Integer': integer, 'apikey': apikey})))
def set_option_proxy_chain_prompt(self, boolean, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainPrompt/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_proxy_chain_realm(self, string, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainRealm/', {'String': string, 'apikey': apikey})))
def set_option_proxy_chain_skip_name(self, string, apikey=''):
"""
Use actions [add|modify|remove]ProxyChainExcludedDomain instead.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainSkipName/', {'String': string, 'apikey': apikey})))
def set_option_proxy_chain_user_name(self, string, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setOptionProxyChainUserName/', {'String': string, 'apikey': apikey})))
def set_option_single_cookie_request_header(self, boolean, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setOptionSingleCookieRequestHeader/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_timeout_in_secs(self, integer, apikey=''):
"""
Sets the connection time out (in seconds).
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setOptionTimeoutInSecs/', {'Integer': integer, 'apikey': apikey})))
def set_option_use_proxy_chain(self, boolean, apikey=''):
"""
Sets whether or not the outgoing proxy should be used. The address/hostname of the outgoing proxy must be set to enable this option.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setOptionUseProxyChain/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_use_proxy_chain_auth(self, boolean, apikey=''):
"""
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setOptionUseProxyChainAuth/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_use_socks_proxy(self, boolean, apikey=''):
"""
Sets whether or not the SOCKS proxy should be used.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'core/action/setOptionUseSocksProxy/', {'Boolean': boolean, 'apikey': apikey})))
def proxy_pac(self, apikey=''):
"""
"""
return (self.zap._request_other(self.zap.base_other + 'core/other/proxy.pac/', {'apikey': apikey}))
def rootcert(self, apikey=''):
"""
Gets the Root CA certificate used by the local proxies.
"""
return (self.zap._request_other(self.zap.base_other + 'core/other/rootcert/', {'apikey': apikey}))
def setproxy(self, proxy, apikey=''):
"""
"""
return (self.zap._request_other(self.zap.base_other + 'core/other/setproxy/', {'proxy': proxy, 'apikey': apikey}))
def xmlreport(self, apikey=''):
"""
Generates a report in XML format
"""
return (self.zap._request_other(self.zap.base_other + 'core/other/xmlreport/', {'apikey': apikey}))
def htmlreport(self, apikey=''):
"""
Generates a report in HTML format
"""
return (self.zap._request_other(self.zap.base_other + 'core/other/htmlreport/', {'apikey': apikey}))
def jsonreport(self, apikey=''):
"""
Generates a report in JSON format
"""
return (self.zap._request_other(self.zap.base_other + 'core/other/jsonreport/', {'apikey': apikey}))
def mdreport(self, apikey=''):
"""
Generates a report in Markdown format
"""
return (self.zap._request_other(self.zap.base_other + 'core/other/mdreport/', {'apikey': apikey}))
def message_har(self, id, apikey=''):
"""
Gets the message with the given ID in HAR format
"""
return (self.zap._request_other(self.zap.base_other + 'core/other/messageHar/', {'id': id, 'apikey': apikey}))
def messages_har(self, baseurl=None, start=None, count=None, apikey=''):
"""
Gets the HTTP messages sent through/by ZAP, in HAR format, optionally filtered by URL and paginated with 'start' position and 'count' of messages
"""
params = {'apikey': apikey}
if baseurl is not None:
params['baseurl'] = baseurl
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
return (self.zap._request_other(self.zap.base_other + 'core/other/messagesHar/', params))
def messages_har_by_id(self, ids, apikey=''):
"""
Gets the HTTP messages with the given IDs, in HAR format.
"""
return (self.zap._request_other(self.zap.base_other + 'core/other/messagesHarById/', {'ids': ids, 'apikey': apikey}))
def send_har_request(self, request, followredirects=None, apikey=''):
"""
Sends the first HAR request entry, optionally following redirections. Returns, in HAR format, the request sent and response received and followed redirections, if any. The Mode is enforced when sending the request (and following redirections), custom manual requests are not allowed in 'Safe' mode nor in 'Protected' mode if out of scope.
"""
params = {'request': request, 'apikey': apikey}
if followredirects is not None:
params['followRedirects'] = followredirects
return (self.zap._request_other(self.zap.base_other + 'core/other/sendHarRequest/', params)) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/core.py | core.py |
import six
class replacer(object):
def __init__(self, zap):
self.zap = zap
@property
def rules(self):
"""
Returns full details of all of the rules
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'replacer/view/rules/')))
def add_rule(self, description, enabled, matchtype, matchregex, matchstring, replacement=None, initiators=None, url=None, apikey=''):
"""
Adds a replacer rule. For the parameters: desc is a user friendly description, enabled is true or false, matchType is one of [REQ_HEADER, REQ_HEADER_STR, REQ_BODY_STR, RESP_HEADER, RESP_HEADER_STR, RESP_BODY_STR], matchRegex should be true if the matchString should be treated as a regex otherwise false, matchString is the string that will be matched against, replacement is the replacement string, initiators may be blank (for all initiators) or a comma separated list of integers as defined in <a href="https://github.com/zaproxy/zaproxy/blob/main/zap/src/main/java/org/parosproxy/paros/network/HttpSender.java">HttpSender</a>
This component is optional and therefore the API will only work if it is installed
"""
params = {'description': description, 'enabled': enabled, 'matchType': matchtype, 'matchRegex': matchregex, 'matchString': matchstring, 'apikey': apikey}
if replacement is not None:
params['replacement'] = replacement
if initiators is not None:
params['initiators'] = initiators
if url is not None:
params['url'] = url
return six.next(six.itervalues(self.zap._request(self.zap.base + 'replacer/action/addRule/', params)))
def remove_rule(self, description, apikey=''):
"""
Removes the rule with the given description
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'replacer/action/removeRule/', {'description': description, 'apikey': apikey})))
def set_enabled(self, description, bool, apikey=''):
"""
Enables or disables the rule with the given description based on the bool parameter
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'replacer/action/setEnabled/', {'description': description, 'bool': bool, 'apikey': apikey}))) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/replacer.py | replacer.py |
import six
class websocket(object):
def __init__(self, zap):
self.zap = zap
@property
def channels(self):
"""
Returns all of the registered web socket channels
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'websocket/view/channels/')))
def message(self, channelid, messageid):
"""
Returns full details of the message specified by the channelId and messageId
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'websocket/view/message/', {'channelId': channelid, 'messageId': messageid})))
def messages(self, channelid=None, start=None, count=None, payloadpreviewlength=None):
"""
Returns a list of all of the messages that meet the given criteria (all optional), where channelId is a channel identifier, start is the offset to start returning messages from (starting from 0), count is the number of messages to return (default no limit) and payloadPreviewLength is the maximum number bytes to return for the payload contents
This component is optional and therefore the API will only work if it is installed
"""
params = {}
if channelid is not None:
params['channelId'] = channelid
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
if payloadpreviewlength is not None:
params['payloadPreviewLength'] = payloadpreviewlength
return six.next(six.itervalues(self.zap._request(self.zap.base + 'websocket/view/messages/', params)))
@property
def break_text_message(self):
"""
Returns a text representation of an intercepted websockets message
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'websocket/view/breakTextMessage/')))
def send_text_message(self, channelid, outgoing, message, apikey=''):
"""
Sends the specified message on the channel specified by channelId, if outgoing is 'True' then the message will be sent to the server and if it is 'False' then it will be sent to the client
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'websocket/action/sendTextMessage/', {'channelId': channelid, 'outgoing': outgoing, 'message': message, 'apikey': apikey})))
def set_break_text_message(self, message, outgoing, apikey=''):
"""
Sets the text message for an intercepted websockets message
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'websocket/action/setBreakTextMessage/', {'message': message, 'outgoing': outgoing, 'apikey': apikey}))) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/websocket.py | websocket.py |
import six
class ajaxSpider(object):
def __init__(self, zap):
self.zap = zap
@property
def allowed_resources(self):
"""
Gets the allowed resources. The allowed resources are always fetched even if out of scope, allowing to include necessary resources (e.g. scripts) from 3rd-parties.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/allowedResources/')))
def excluded_elements(self, contextname):
"""
Gets the excluded elements. The excluded elements are not clicked during crawling, for example, to prevent logging out.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/excludedElements/', {'contextName': contextname})))
@property
def status(self):
"""
Gets the current status of the crawler. Actual values are Stopped and Running.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/status/')))
def results(self, start=None, count=None):
"""
Gets the current results of the crawler.
This component is optional and therefore the API will only work if it is installed
"""
params = {}
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/results/', params)))
@property
def number_of_results(self):
"""
Gets the number of resources found.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/numberOfResults/')))
@property
def full_results(self):
"""
Gets the full crawled content detected by the AJAX Spider. Returns a set of values based on 'inScope' URLs, 'outOfScope' URLs, and 'errors' encountered during the last/current run of the AJAX Spider.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/fullResults/')))
@property
def option_browser_id(self):
"""
Gets the configured browser to use for crawling.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionBrowserId/')))
@property
def option_event_wait(self):
"""
Gets the time to wait after an event (in milliseconds). For example: the wait delay after the cursor hovers over an element, in order for a menu to display, etc.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionEventWait/')))
@property
def option_max_crawl_depth(self):
"""
Gets the configured value for the max crawl depth.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionMaxCrawlDepth/')))
@property
def option_max_crawl_states(self):
"""
Gets the configured value for the maximum crawl states allowed.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionMaxCrawlStates/')))
@property
def option_max_duration(self):
"""
Gets the configured max duration of the crawl, the value is in minutes.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionMaxDuration/')))
@property
def option_number_of_browsers(self):
"""
Gets the configured number of browsers to be used.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionNumberOfBrowsers/')))
@property
def option_reload_wait(self):
"""
Gets the configured time to wait after reloading the page, this value is in milliseconds.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionReloadWait/')))
@property
def option_click_default_elems(self):
"""
Gets the configured value for 'Click Default Elements Only', HTML elements such as 'a', 'button', 'input', all associated with some action or links on the page.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionClickDefaultElems/')))
@property
def option_click_elems_once(self):
"""
Gets the value configured for the AJAX Spider to know if it should click on the elements only once.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionClickElemsOnce/')))
@property
def option_random_inputs(self):
"""
Gets if the AJAX Spider will use random values in form fields when crawling, if set to true.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionRandomInputs/')))
def scan(self, url=None, inscope=None, contextname=None, subtreeonly=None, apikey=''):
"""
Runs the AJAX Spider against a given target.
This component is optional and therefore the API will only work if it is installed
"""
params = {'apikey': apikey}
if url is not None:
params['url'] = url
if inscope is not None:
params['inScope'] = inscope
if contextname is not None:
params['contextName'] = contextname
if subtreeonly is not None:
params['subtreeOnly'] = subtreeonly
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/scan/', params)))
def scan_as_user(self, contextname, username, url=None, subtreeonly=None, apikey=''):
"""
Runs the AJAX Spider from the perspective of a User of the web application.
This component is optional and therefore the API will only work if it is installed
"""
params = {'contextName': contextname, 'userName': username, 'apikey': apikey}
if url is not None:
params['url'] = url
if subtreeonly is not None:
params['subtreeOnly'] = subtreeonly
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/scanAsUser/', params)))
def stop(self, apikey=''):
"""
Stops the AJAX Spider.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/stop/', {'apikey': apikey})))
def add_allowed_resource(self, regex, enabled=None, apikey=''):
"""
Adds an allowed resource.
This component is optional and therefore the API will only work if it is installed
"""
params = {'regex': regex, 'apikey': apikey}
if enabled is not None:
params['enabled'] = enabled
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/addAllowedResource/', params)))
def add_excluded_element(self, contextname, description, element, xpath=None, text=None, attributename=None, attributevalue=None, enabled=None, apikey=''):
"""
Adds an excluded element to a context.
This component is optional and therefore the API will only work if it is installed
"""
params = {'contextName': contextname, 'description': description, 'element': element, 'apikey': apikey}
if xpath is not None:
params['xpath'] = xpath
if text is not None:
params['text'] = text
if attributename is not None:
params['attributeName'] = attributename
if attributevalue is not None:
params['attributeValue'] = attributevalue
if enabled is not None:
params['enabled'] = enabled
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/addExcludedElement/', params)))
def modify_excluded_element(self, contextname, description, element, descriptionnew=None, xpath=None, text=None, attributename=None, attributevalue=None, enabled=None, apikey=''):
"""
Modifies an excluded element of a context.
This component is optional and therefore the API will only work if it is installed
"""
params = {'contextName': contextname, 'description': description, 'element': element, 'apikey': apikey}
if descriptionnew is not None:
params['descriptionNew'] = descriptionnew
if xpath is not None:
params['xpath'] = xpath
if text is not None:
params['text'] = text
if attributename is not None:
params['attributeName'] = attributename
if attributevalue is not None:
params['attributeValue'] = attributevalue
if enabled is not None:
params['enabled'] = enabled
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/modifyExcludedElement/', params)))
def remove_excluded_element(self, contextname, description, apikey=''):
"""
Removes an excluded element from a context.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/removeExcludedElement/', {'contextName': contextname, 'description': description, 'apikey': apikey})))
def remove_allowed_resource(self, regex, apikey=''):
"""
Removes an allowed resource.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/removeAllowedResource/', {'regex': regex, 'apikey': apikey})))
def set_enabled_allowed_resource(self, regex, enabled, apikey=''):
"""
Sets whether or not an allowed resource is enabled.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setEnabledAllowedResource/', {'regex': regex, 'enabled': enabled, 'apikey': apikey})))
def set_option_browser_id(self, string, apikey=''):
"""
Sets the configuration of the AJAX Spider to use one of the supported browsers.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionBrowserId/', {'String': string, 'apikey': apikey})))
def set_option_click_default_elems(self, boolean, apikey=''):
"""
Sets whether or not the the AJAX Spider will only click on the default HTML elements.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionClickDefaultElems/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_click_elems_once(self, boolean, apikey=''):
"""
When enabled, the crawler attempts to interact with each element (e.g., by clicking) only once.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionClickElemsOnce/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_event_wait(self, integer, apikey=''):
"""
Sets the time to wait after an event (in milliseconds). For example: the wait delay after the cursor hovers over an element, in order for a menu to display, etc.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionEventWait/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_crawl_depth(self, integer, apikey=''):
"""
Sets the maximum depth that the crawler can reach.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionMaxCrawlDepth/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_crawl_states(self, integer, apikey=''):
"""
Sets the maximum number of states that the crawler should crawl.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionMaxCrawlStates/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_duration(self, integer, apikey=''):
"""
The maximum time that the crawler is allowed to run.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionMaxDuration/', {'Integer': integer, 'apikey': apikey})))
def set_option_number_of_browsers(self, integer, apikey=''):
"""
Sets the number of windows to be used by AJAX Spider.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionNumberOfBrowsers/', {'Integer': integer, 'apikey': apikey})))
def set_option_random_inputs(self, boolean, apikey=''):
"""
When enabled, inserts random values into form fields.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionRandomInputs/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_reload_wait(self, integer, apikey=''):
"""
Sets the time to wait after the page is loaded before interacting with it.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionReloadWait/', {'Integer': integer, 'apikey': apikey}))) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/ajaxSpider.py | ajaxSpider.py |
import six
class accessControl(object):
def __init__(self, zap):
self.zap = zap
def get_scan_progress(self, contextid):
"""
Gets the Access Control scan progress (percentage integer) for the given context ID.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'accessControl/view/getScanProgress/', {'contextId': contextid})))
def get_scan_status(self, contextid):
"""
Gets the Access Control scan status (description string) for the given context ID.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'accessControl/view/getScanStatus/', {'contextId': contextid})))
def scan(self, contextid, userid, scanasunauthuser=None, raisealert=None, alertrisklevel=None, apikey=''):
"""
Starts an Access Control scan with the given context ID and user ID. (Optional parameters: user ID for Unauthenticated user, boolean identifying whether or not Alerts are raised, and the Risk level for the Alerts.) [This assumes the Access Control rules were previously established via ZAP gui and the necessary Context exported/imported.]
This component is optional and therefore the API will only work if it is installed
"""
params = {'contextId': contextid, 'userId': userid, 'apikey': apikey}
if scanasunauthuser is not None:
params['scanAsUnAuthUser'] = scanasunauthuser
if raisealert is not None:
params['raiseAlert'] = raisealert
if alertrisklevel is not None:
params['alertRiskLevel'] = alertrisklevel
return six.next(six.itervalues(self.zap._request(self.zap.base + 'accessControl/action/scan/', params)))
def write_htm_lreport(self, contextid, filename, apikey=''):
"""
Generates an Access Control report for the given context ID and saves it based on the provided filename (path).
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'accessControl/action/writeHTMLreport/', {'contextId': contextid, 'fileName': filename, 'apikey': apikey}))) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/accessControl.py | accessControl.py |
import six
class alert(object):
def __init__(self, zap):
self.zap = zap
def alert(self, id):
"""
Gets the alert with the given ID, the corresponding HTTP message can be obtained with the 'messageId' field and 'message' API method
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alert/view/alert/', {'id': id})))
def alerts(self, baseurl=None, start=None, count=None, riskid=None):
"""
Gets the alerts raised by ZAP, optionally filtering by URL or riskId, and paginating with 'start' position and 'count' of alerts
"""
params = {}
if baseurl is not None:
params['baseurl'] = baseurl
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
if riskid is not None:
params['riskId'] = riskid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alert/view/alerts/', params)))
def alerts_summary(self, baseurl=None):
"""
Gets number of alerts grouped by each risk level, optionally filtering by URL
"""
params = {}
if baseurl is not None:
params['baseurl'] = baseurl
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alert/view/alertsSummary/', params)))
def number_of_alerts(self, baseurl=None, riskid=None):
"""
Gets the number of alerts, optionally filtering by URL or riskId
"""
params = {}
if baseurl is not None:
params['baseurl'] = baseurl
if riskid is not None:
params['riskId'] = riskid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alert/view/numberOfAlerts/', params)))
def alerts_by_risk(self, url=None, recurse=None):
"""
Gets a summary of the alerts, optionally filtered by a 'url'. If 'recurse' is true then all alerts that apply to urls that start with the specified 'url' will be returned, otherwise only those on exactly the same 'url' (ignoring url parameters)
"""
params = {}
if url is not None:
params['url'] = url
if recurse is not None:
params['recurse'] = recurse
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alert/view/alertsByRisk/', params)))
def alert_counts_by_risk(self, url=None, recurse=None):
"""
Gets a count of the alerts, optionally filtered as per alertsPerRisk
"""
params = {}
if url is not None:
params['url'] = url
if recurse is not None:
params['recurse'] = recurse
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alert/view/alertCountsByRisk/', params)))
def delete_all_alerts(self, apikey=''):
"""
Deletes all alerts of the current session.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alert/action/deleteAllAlerts/', {'apikey': apikey})))
def delete_alert(self, id, apikey=''):
"""
Deletes the alert with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alert/action/deleteAlert/', {'id': id, 'apikey': apikey})))
def update_alerts_confidence(self, ids, confidenceid, apikey=''):
"""
Update the confidence of the alerts.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alert/action/updateAlertsConfidence/', {'ids': ids, 'confidenceId': confidenceid, 'apikey': apikey})))
def update_alerts_risk(self, ids, riskid, apikey=''):
"""
Update the risk of the alerts.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alert/action/updateAlertsRisk/', {'ids': ids, 'riskId': riskid, 'apikey': apikey})))
def update_alert(self, id, name, riskid, confidenceid, description, param=None, attack=None, otherinfo=None, solution=None, references=None, evidence=None, cweid=None, wascid=None, apikey=''):
"""
Update the alert with the given ID, with the provided details.
"""
params = {'id': id, 'name': name, 'riskId': riskid, 'confidenceId': confidenceid, 'description': description, 'apikey': apikey}
if param is not None:
params['param'] = param
if attack is not None:
params['attack'] = attack
if otherinfo is not None:
params['otherInfo'] = otherinfo
if solution is not None:
params['solution'] = solution
if references is not None:
params['references'] = references
if evidence is not None:
params['evidence'] = evidence
if cweid is not None:
params['cweId'] = cweid
if wascid is not None:
params['wascId'] = wascid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alert/action/updateAlert/', params)))
def add_alert(self, messageid, name, riskid, confidenceid, description, param=None, attack=None, otherinfo=None, solution=None, references=None, evidence=None, cweid=None, wascid=None, apikey=''):
"""
Add an alert associated with the given message ID, with the provided details. (The ID of the created alert is returned.)
"""
params = {'messageId': messageid, 'name': name, 'riskId': riskid, 'confidenceId': confidenceid, 'description': description, 'apikey': apikey}
if param is not None:
params['param'] = param
if attack is not None:
params['attack'] = attack
if otherinfo is not None:
params['otherInfo'] = otherinfo
if solution is not None:
params['solution'] = solution
if references is not None:
params['references'] = references
if evidence is not None:
params['evidence'] = evidence
if cweid is not None:
params['cweId'] = cweid
if wascid is not None:
params['wascId'] = wascid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alert/action/addAlert/', params))) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/alert.py | alert.py |
import six
class graphql(object):
def __init__(self, zap):
self.zap = zap
@property
def option_args_type(self):
"""
Returns how arguments are currently specified.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/view/optionArgsType/')))
@property
def option_lenient_max_query_depth_enabled(self):
"""
Returns whether or not lenient maximum query generation depth is enabled.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/view/optionLenientMaxQueryDepthEnabled/')))
@property
def option_max_additional_query_depth(self):
"""
Returns the current maximum additional query generation depth.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/view/optionMaxAdditionalQueryDepth/')))
@property
def option_max_args_depth(self):
"""
Returns the current maximum arguments generation depth.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/view/optionMaxArgsDepth/')))
@property
def option_max_query_depth(self):
"""
Returns the current maximum query generation depth.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/view/optionMaxQueryDepth/')))
@property
def option_optional_args_enabled(self):
"""
Returns whether or not optional arguments are currently specified.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/view/optionOptionalArgsEnabled/')))
@property
def option_query_gen_enabled(self):
"""
Returns whether the query generator is enabled.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/view/optionQueryGenEnabled/')))
@property
def option_query_split_type(self):
"""
Returns the current level for which a single query is generated.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/view/optionQuerySplitType/')))
@property
def option_request_method(self):
"""
Returns the current request method.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/view/optionRequestMethod/')))
def import_file(self, endurl, file, apikey=''):
"""
Imports a GraphQL Schema from a File.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/action/importFile/', {'endurl': endurl, 'file': file, 'apikey': apikey})))
def import_url(self, endurl, url=None, apikey=''):
"""
Imports a GraphQL Schema from a URL.
This component is optional and therefore the API will only work if it is installed
"""
params = {'endurl': endurl, 'apikey': apikey}
if url is not None:
params['url'] = url
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/action/importUrl/', params)))
def set_option_args_type(self, string, apikey=''):
"""
Sets how arguments are specified.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/action/setOptionArgsType/', {'String': string, 'apikey': apikey})))
def set_option_query_split_type(self, string, apikey=''):
"""
Sets the level for which a single query is generated.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/action/setOptionQuerySplitType/', {'String': string, 'apikey': apikey})))
def set_option_request_method(self, string, apikey=''):
"""
Sets the request method.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/action/setOptionRequestMethod/', {'String': string, 'apikey': apikey})))
def set_option_lenient_max_query_depth_enabled(self, boolean, apikey=''):
"""
Sets whether or not Maximum Query Depth is enforced leniently.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/action/setOptionLenientMaxQueryDepthEnabled/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_max_additional_query_depth(self, integer, apikey=''):
"""
Sets the maximum additional query generation depth (used if enforced leniently).
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/action/setOptionMaxAdditionalQueryDepth/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_args_depth(self, integer, apikey=''):
"""
Sets the maximum arguments generation depth.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/action/setOptionMaxArgsDepth/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_query_depth(self, integer, apikey=''):
"""
Sets the maximum query generation depth.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/action/setOptionMaxQueryDepth/', {'Integer': integer, 'apikey': apikey})))
def set_option_optional_args_enabled(self, boolean, apikey=''):
"""
Sets whether or not Optional Arguments should be specified.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/action/setOptionOptionalArgsEnabled/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_query_gen_enabled(self, boolean, apikey=''):
"""
Sets whether the query generator is enabled.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/action/setOptionQueryGenEnabled/', {'Boolean': boolean, 'apikey': apikey}))) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/graphql.py | graphql.py |
import six
class script(object):
def __init__(self, zap):
self.zap = zap
@property
def list_engines(self):
"""
Lists the script engines available
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/view/listEngines/')))
@property
def list_types(self):
"""
Lists the script types available.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/view/listTypes/')))
@property
def list_scripts(self):
"""
Lists the scripts available, with its engine, name, description, type and error state.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/view/listScripts/')))
def global_var(self, varkey):
"""
Gets the value of the global variable with the given key. Returns an API error (DOES_NOT_EXIST) if no value was previously set.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/view/globalVar/', {'varKey': varkey})))
def global_custom_var(self, varkey):
"""
Gets the value (string representation) of a global custom variable. Returns an API error (DOES_NOT_EXIST) if no value was previously set.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/view/globalCustomVar/', {'varKey': varkey})))
@property
def global_vars(self):
"""
Gets all the global variables (key/value pairs).
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/view/globalVars/')))
@property
def global_custom_vars(self):
"""
Gets all the global custom variables (key/value pairs, the value is the string representation).
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/view/globalCustomVars/')))
def script_var(self, scriptname, varkey):
"""
Gets the value of the variable with the given key for the given script. Returns an API error (DOES_NOT_EXIST) if no script with the given name exists or if no value was previously set.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/view/scriptVar/', {'scriptName': scriptname, 'varKey': varkey})))
def script_custom_var(self, scriptname, varkey):
"""
Gets the value (string representation) of a custom variable. Returns an API error (DOES_NOT_EXIST) if no script with the given name exists or if no value was previously set.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/view/scriptCustomVar/', {'scriptName': scriptname, 'varKey': varkey})))
def script_vars(self, scriptname):
"""
Gets all the variables (key/value pairs) of the given script. Returns an API error (DOES_NOT_EXIST) if no script with the given name exists.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/view/scriptVars/', {'scriptName': scriptname})))
def script_custom_vars(self, scriptname):
"""
Gets all the custom variables (key/value pairs, the value is the string representation) of a script. Returns an API error (DOES_NOT_EXIST) if no script with the given name exists.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/view/scriptCustomVars/', {'scriptName': scriptname})))
def enable(self, scriptname, apikey=''):
"""
Enables the script with the given name
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/action/enable/', {'scriptName': scriptname, 'apikey': apikey})))
def disable(self, scriptname, apikey=''):
"""
Disables the script with the given name
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/action/disable/', {'scriptName': scriptname, 'apikey': apikey})))
def load(self, scriptname, scripttype, scriptengine, filename, scriptdescription=None, charset=None, apikey=''):
"""
Loads a script into ZAP from the given local file, with the given name, type and engine, optionally with a description, and a charset name to read the script (the charset name is required if the script is not in UTF-8, for example, in ISO-8859-1).
"""
params = {'scriptName': scriptname, 'scriptType': scripttype, 'scriptEngine': scriptengine, 'fileName': filename, 'apikey': apikey}
if scriptdescription is not None:
params['scriptDescription'] = scriptdescription
if charset is not None:
params['charset'] = charset
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/action/load/', params)))
def remove(self, scriptname, apikey=''):
"""
Removes the script with the given name
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/action/remove/', {'scriptName': scriptname, 'apikey': apikey})))
def run_stand_alone_script(self, scriptname, apikey=''):
"""
Runs the stand alone script with the given name
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/action/runStandAloneScript/', {'scriptName': scriptname, 'apikey': apikey})))
def clear_global_var(self, varkey, apikey=''):
"""
Clears the global variable with the given key.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/action/clearGlobalVar/', {'varKey': varkey, 'apikey': apikey})))
def clear_global_custom_var(self, varkey, apikey=''):
"""
Clears a global custom variable.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/action/clearGlobalCustomVar/', {'varKey': varkey, 'apikey': apikey})))
def clear_global_vars(self, apikey=''):
"""
Clears the global variables.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/action/clearGlobalVars/', {'apikey': apikey})))
def clear_script_var(self, scriptname, varkey, apikey=''):
"""
Clears the variable with the given key of the given script. Returns an API error (DOES_NOT_EXIST) if no script with the given name exists.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/action/clearScriptVar/', {'scriptName': scriptname, 'varKey': varkey, 'apikey': apikey})))
def clear_script_custom_var(self, scriptname, varkey, apikey=''):
"""
Clears a script custom variable.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/action/clearScriptCustomVar/', {'scriptName': scriptname, 'varKey': varkey, 'apikey': apikey})))
def clear_script_vars(self, scriptname, apikey=''):
"""
Clears the variables of the given script. Returns an API error (DOES_NOT_EXIST) if no script with the given name exists.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/action/clearScriptVars/', {'scriptName': scriptname, 'apikey': apikey})))
def set_script_var(self, scriptname, varkey, varvalue=None, apikey=''):
"""
Sets the value of the variable with the given key of the given script. Returns an API error (DOES_NOT_EXIST) if no script with the given name exists.
"""
params = {'scriptName': scriptname, 'varKey': varkey, 'apikey': apikey}
if varvalue is not None:
params['varValue'] = varvalue
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/action/setScriptVar/', params)))
def set_global_var(self, varkey, varvalue=None, apikey=''):
"""
Sets the value of the global variable with the given key.
"""
params = {'varKey': varkey, 'apikey': apikey}
if varvalue is not None:
params['varValue'] = varvalue
return six.next(six.itervalues(self.zap._request(self.zap.base + 'script/action/setGlobalVar/', params))) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/script.py | script.py |
import six
class authentication(object):
def __init__(self, zap):
self.zap = zap
@property
def get_supported_authentication_methods(self):
"""
Gets the name of the authentication methods.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'authentication/view/getSupportedAuthenticationMethods/')))
def get_authentication_method_config_params(self, authmethodname):
"""
Gets the configuration parameters for the authentication method with the given name.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'authentication/view/getAuthenticationMethodConfigParams/', {'authMethodName': authmethodname})))
def get_authentication_method(self, contextid):
"""
Gets the name of the authentication method for the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'authentication/view/getAuthenticationMethod/', {'contextId': contextid})))
def get_logged_in_indicator(self, contextid):
"""
Gets the logged in indicator for the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'authentication/view/getLoggedInIndicator/', {'contextId': contextid})))
def get_logged_out_indicator(self, contextid):
"""
Gets the logged out indicator for the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'authentication/view/getLoggedOutIndicator/', {'contextId': contextid})))
def set_authentication_method(self, contextid, authmethodname, authmethodconfigparams=None, apikey=''):
"""
Sets the authentication method for the context with the given ID.
"""
params = {'contextId': contextid, 'authMethodName': authmethodname, 'apikey': apikey}
if authmethodconfigparams is not None:
params['authMethodConfigParams'] = authmethodconfigparams
return six.next(six.itervalues(self.zap._request(self.zap.base + 'authentication/action/setAuthenticationMethod/', params)))
def set_logged_in_indicator(self, contextid, loggedinindicatorregex, apikey=''):
"""
Sets the logged in indicator for the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'authentication/action/setLoggedInIndicator/', {'contextId': contextid, 'loggedInIndicatorRegex': loggedinindicatorregex, 'apikey': apikey})))
def set_logged_out_indicator(self, contextid, loggedoutindicatorregex, apikey=''):
"""
Sets the logged out indicator for the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'authentication/action/setLoggedOutIndicator/', {'contextId': contextid, 'loggedOutIndicatorRegex': loggedoutindicatorregex, 'apikey': apikey}))) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/authentication.py | authentication.py |
import six
class alertFilter(object):
def __init__(self, zap):
self.zap = zap
def alert_filter_list(self, contextid):
"""
Lists the alert filters of the context with the given ID.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alertFilter/view/alertFilterList/', {'contextId': contextid})))
@property
def global_alert_filter_list(self):
"""
Lists the global alert filters.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alertFilter/view/globalAlertFilterList/')))
def add_alert_filter(self, contextid, ruleid, newlevel, url=None, urlisregex=None, parameter=None, enabled=None, parameterisregex=None, attack=None, attackisregex=None, evidence=None, evidenceisregex=None, methods=None, apikey=''):
"""
Adds a new alert filter for the context with the given ID.
This component is optional and therefore the API will only work if it is installed
"""
params = {'contextId': contextid, 'ruleId': ruleid, 'newLevel': newlevel, 'apikey': apikey}
if url is not None:
params['url'] = url
if urlisregex is not None:
params['urlIsRegex'] = urlisregex
if parameter is not None:
params['parameter'] = parameter
if enabled is not None:
params['enabled'] = enabled
if parameterisregex is not None:
params['parameterIsRegex'] = parameterisregex
if attack is not None:
params['attack'] = attack
if attackisregex is not None:
params['attackIsRegex'] = attackisregex
if evidence is not None:
params['evidence'] = evidence
if evidenceisregex is not None:
params['evidenceIsRegex'] = evidenceisregex
if methods is not None:
params['methods'] = methods
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alertFilter/action/addAlertFilter/', params)))
def remove_alert_filter(self, contextid, ruleid, newlevel, url=None, urlisregex=None, parameter=None, enabled=None, parameterisregex=None, attack=None, attackisregex=None, evidence=None, evidenceisregex=None, methods=None, apikey=''):
"""
Removes an alert filter from the context with the given ID.
This component is optional and therefore the API will only work if it is installed
"""
params = {'contextId': contextid, 'ruleId': ruleid, 'newLevel': newlevel, 'apikey': apikey}
if url is not None:
params['url'] = url
if urlisregex is not None:
params['urlIsRegex'] = urlisregex
if parameter is not None:
params['parameter'] = parameter
if enabled is not None:
params['enabled'] = enabled
if parameterisregex is not None:
params['parameterIsRegex'] = parameterisregex
if attack is not None:
params['attack'] = attack
if attackisregex is not None:
params['attackIsRegex'] = attackisregex
if evidence is not None:
params['evidence'] = evidence
if evidenceisregex is not None:
params['evidenceIsRegex'] = evidenceisregex
if methods is not None:
params['methods'] = methods
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alertFilter/action/removeAlertFilter/', params)))
def add_global_alert_filter(self, ruleid, newlevel, url=None, urlisregex=None, parameter=None, enabled=None, parameterisregex=None, attack=None, attackisregex=None, evidence=None, evidenceisregex=None, methods=None, apikey=''):
"""
Adds a new global alert filter.
This component is optional and therefore the API will only work if it is installed
"""
params = {'ruleId': ruleid, 'newLevel': newlevel, 'apikey': apikey}
if url is not None:
params['url'] = url
if urlisregex is not None:
params['urlIsRegex'] = urlisregex
if parameter is not None:
params['parameter'] = parameter
if enabled is not None:
params['enabled'] = enabled
if parameterisregex is not None:
params['parameterIsRegex'] = parameterisregex
if attack is not None:
params['attack'] = attack
if attackisregex is not None:
params['attackIsRegex'] = attackisregex
if evidence is not None:
params['evidence'] = evidence
if evidenceisregex is not None:
params['evidenceIsRegex'] = evidenceisregex
if methods is not None:
params['methods'] = methods
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alertFilter/action/addGlobalAlertFilter/', params)))
def remove_global_alert_filter(self, ruleid, newlevel, url=None, urlisregex=None, parameter=None, enabled=None, parameterisregex=None, attack=None, attackisregex=None, evidence=None, evidenceisregex=None, methods=None, apikey=''):
"""
Removes a global alert filter.
This component is optional and therefore the API will only work if it is installed
"""
params = {'ruleId': ruleid, 'newLevel': newlevel, 'apikey': apikey}
if url is not None:
params['url'] = url
if urlisregex is not None:
params['urlIsRegex'] = urlisregex
if parameter is not None:
params['parameter'] = parameter
if enabled is not None:
params['enabled'] = enabled
if parameterisregex is not None:
params['parameterIsRegex'] = parameterisregex
if attack is not None:
params['attack'] = attack
if attackisregex is not None:
params['attackIsRegex'] = attackisregex
if evidence is not None:
params['evidence'] = evidence
if evidenceisregex is not None:
params['evidenceIsRegex'] = evidenceisregex
if methods is not None:
params['methods'] = methods
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alertFilter/action/removeGlobalAlertFilter/', params)))
def apply_all(self, apikey=''):
"""
Applies all currently enabled Global and Context alert filters.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alertFilter/action/applyAll/', {'apikey': apikey})))
def apply_context(self, apikey=''):
"""
Applies all currently enabled Context alert filters.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alertFilter/action/applyContext/', {'apikey': apikey})))
def apply_global(self, apikey=''):
"""
Applies all currently enabled Global alert filters.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alertFilter/action/applyGlobal/', {'apikey': apikey})))
def test_all(self, apikey=''):
"""
Tests all currently enabled Global and Context alert filters.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alertFilter/action/testAll/', {'apikey': apikey})))
def test_context(self, apikey=''):
"""
Tests all currently enabled Context alert filters.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alertFilter/action/testContext/', {'apikey': apikey})))
def test_global(self, apikey=''):
"""
Tests all currently enabled Global alert filters.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'alertFilter/action/testGlobal/', {'apikey': apikey}))) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/alertFilter.py | alertFilter.py |
import six
class network(object):
def __init__(self, zap):
self.zap = zap
@property
def get_root_ca_cert_validity(self):
"""
Gets the Root CA certificate validity, in days. Used when generating a new Root CA certificate.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/view/getRootCaCertValidity/')))
@property
def get_server_cert_validity(self):
"""
Gets the server certificate validity, in days. Used when generating server certificates.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/view/getServerCertValidity/')))
@property
def get_aliases(self):
"""
Gets the aliases used to identify the local servers/proxies.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/view/getAliases/')))
@property
def get_local_servers(self):
"""
Gets the local servers/proxies.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/view/getLocalServers/')))
@property
def get_pass_throughs(self):
"""
Gets the authorities that will pass-through the local proxies.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/view/getPassThroughs/')))
@property
def get_connection_timeout(self):
"""
Gets the connection timeout, in seconds.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/view/getConnectionTimeout/')))
@property
def get_default_user_agent(self):
"""
Gets the default user-agent.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/view/getDefaultUserAgent/')))
@property
def get_dns_ttl_successful_queries(self):
"""
Gets the TTL (in seconds) of successful DNS queries.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/view/getDnsTtlSuccessfulQueries/')))
@property
def get_http_proxy(self):
"""
Gets the HTTP proxy.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/view/getHttpProxy/')))
@property
def get_http_proxy_exclusions(self):
"""
Gets the HTTP proxy exclusions.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/view/getHttpProxyExclusions/')))
@property
def get_socks_proxy(self):
"""
Gets the SOCKS proxy.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/view/getSocksProxy/')))
@property
def is_http_proxy_auth_enabled(self):
"""
Tells whether or not the HTTP proxy authentication is enabled.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/view/isHttpProxyAuthEnabled/')))
@property
def is_http_proxy_enabled(self):
"""
Tells whether or not the HTTP proxy is enabled.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/view/isHttpProxyEnabled/')))
@property
def is_socks_proxy_enabled(self):
"""
Tells whether or not the SOCKS proxy is enabled.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/view/isSocksProxyEnabled/')))
@property
def is_use_global_http_state(self):
"""
Tells whether or not to use global HTTP state.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/view/isUseGlobalHttpState/')))
@property
def get_rate_limit_rules(self):
"""
List of rate limit rules.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/view/getRateLimitRules/')))
def generate_root_ca_cert(self, apikey=''):
"""
Generates a new Root CA certificate, used to issue server certificates.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/generateRootCaCert/', {'apikey': apikey})))
def import_root_ca_cert(self, filepath, apikey=''):
"""
Imports a Root CA certificate to be used to issue server certificates.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/importRootCaCert/', {'filePath': filepath, 'apikey': apikey})))
def set_root_ca_cert_validity(self, validity, apikey=''):
"""
Sets the Root CA certificate validity. Used when generating a new Root CA certificate.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/setRootCaCertValidity/', {'validity': validity, 'apikey': apikey})))
def set_server_cert_validity(self, validity, apikey=''):
"""
Sets the server certificate validity. Used when generating server certificates.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/setServerCertValidity/', {'validity': validity, 'apikey': apikey})))
def add_alias(self, name, enabled=None, apikey=''):
"""
Adds an alias for the local servers/proxies.
This component is optional and therefore the API will only work if it is installed
"""
params = {'name': name, 'apikey': apikey}
if enabled is not None:
params['enabled'] = enabled
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/addAlias/', params)))
def add_local_server(self, address, port, api=None, proxy=None, behindnat=None, decoderesponse=None, removeacceptencoding=None, apikey=''):
"""
Adds a local server/proxy.
This component is optional and therefore the API will only work if it is installed
"""
params = {'address': address, 'port': port, 'apikey': apikey}
if api is not None:
params['api'] = api
if proxy is not None:
params['proxy'] = proxy
if behindnat is not None:
params['behindNat'] = behindnat
if decoderesponse is not None:
params['decodeResponse'] = decoderesponse
if removeacceptencoding is not None:
params['removeAcceptEncoding'] = removeacceptencoding
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/addLocalServer/', params)))
def add_pass_through(self, authority, enabled=None, apikey=''):
"""
Adds an authority to pass-through the local proxies.
This component is optional and therefore the API will only work if it is installed
"""
params = {'authority': authority, 'apikey': apikey}
if enabled is not None:
params['enabled'] = enabled
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/addPassThrough/', params)))
def remove_alias(self, name, apikey=''):
"""
Removes an alias.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/removeAlias/', {'name': name, 'apikey': apikey})))
def remove_local_server(self, address, port, apikey=''):
"""
Removes a local server/proxy.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/removeLocalServer/', {'address': address, 'port': port, 'apikey': apikey})))
def remove_pass_through(self, authority, apikey=''):
"""
Removes a pass-through.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/removePassThrough/', {'authority': authority, 'apikey': apikey})))
def set_alias_enabled(self, name, enabled, apikey=''):
"""
Sets whether or not an alias is enabled.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/setAliasEnabled/', {'name': name, 'enabled': enabled, 'apikey': apikey})))
def set_pass_through_enabled(self, authority, enabled, apikey=''):
"""
Sets whether or not a pass-through is enabled.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/setPassThroughEnabled/', {'authority': authority, 'enabled': enabled, 'apikey': apikey})))
def set_connection_timeout(self, timeout, apikey=''):
"""
Sets the timeout, for reads and connects.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/setConnectionTimeout/', {'timeout': timeout, 'apikey': apikey})))
def set_default_user_agent(self, useragent, apikey=''):
"""
Sets the default user-agent.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/setDefaultUserAgent/', {'userAgent': useragent, 'apikey': apikey})))
def set_dns_ttl_successful_queries(self, ttl, apikey=''):
"""
Sets the TTL of successful DNS queries.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/setDnsTtlSuccessfulQueries/', {'ttl': ttl, 'apikey': apikey})))
def add_http_proxy_exclusion(self, host, enabled=None, apikey=''):
"""
Adds a host to be excluded from the HTTP proxy.
This component is optional and therefore the API will only work if it is installed
"""
params = {'host': host, 'apikey': apikey}
if enabled is not None:
params['enabled'] = enabled
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/addHttpProxyExclusion/', params)))
def remove_http_proxy_exclusion(self, host, apikey=''):
"""
Removes an HTTP proxy exclusion.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/removeHttpProxyExclusion/', {'host': host, 'apikey': apikey})))
def set_http_proxy(self, host, port, realm=None, username=None, password=None, apikey=''):
"""
Sets the HTTP proxy configuration.
This component is optional and therefore the API will only work if it is installed
"""
params = {'host': host, 'port': port, 'apikey': apikey}
if realm is not None:
params['realm'] = realm
if username is not None:
params['username'] = username
if password is not None:
params['password'] = password
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/setHttpProxy/', params)))
def set_http_proxy_auth_enabled(self, enabled, apikey=''):
"""
Sets whether or not the HTTP proxy authentication is enabled.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/setHttpProxyAuthEnabled/', {'enabled': enabled, 'apikey': apikey})))
def set_http_proxy_enabled(self, enabled, apikey=''):
"""
Sets whether or not the HTTP proxy is enabled.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/setHttpProxyEnabled/', {'enabled': enabled, 'apikey': apikey})))
def set_http_proxy_exclusion_enabled(self, host, enabled, apikey=''):
"""
Sets whether or not an HTTP proxy exclusion is enabled.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/setHttpProxyExclusionEnabled/', {'host': host, 'enabled': enabled, 'apikey': apikey})))
def set_socks_proxy(self, host, port, version=None, usedns=None, username=None, password=None, apikey=''):
"""
Sets the SOCKS proxy configuration.
This component is optional and therefore the API will only work if it is installed
"""
params = {'host': host, 'port': port, 'apikey': apikey}
if version is not None:
params['version'] = version
if usedns is not None:
params['useDns'] = usedns
if username is not None:
params['username'] = username
if password is not None:
params['password'] = password
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/setSocksProxy/', params)))
def set_socks_proxy_enabled(self, enabled, apikey=''):
"""
Sets whether or not the SOCKS proxy is enabled.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/setSocksProxyEnabled/', {'enabled': enabled, 'apikey': apikey})))
def set_use_global_http_state(self, use, apikey=''):
"""
Sets whether or not to use the global HTTP state.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/setUseGlobalHttpState/', {'use': use, 'apikey': apikey})))
def add_pkcs_12_client_certificate(self, filepath, password, index=None, apikey=''):
"""
Adds a client certificate contained in a PKCS#12 file, the certificate is automatically set as active and used.
This component is optional and therefore the API will only work if it is installed
"""
params = {'filePath': filepath, 'password': password, 'apikey': apikey}
if index is not None:
params['index'] = index
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/addPkcs12ClientCertificate/', params)))
def set_use_client_certificate(self, use, apikey=''):
"""
Sets whether or not to use the active client certificate.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/setUseClientCertificate/', {'use': use, 'apikey': apikey})))
def add_rate_limit_rule(self, description, enabled, matchregex, matchstring, requestspersecond, groupby, apikey=''):
"""
Adds a rate limit rule
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/addRateLimitRule/', {'description': description, 'enabled': enabled, 'matchRegex': matchregex, 'matchString': matchstring, 'requestsPerSecond': requestspersecond, 'groupBy': groupby, 'apikey': apikey})))
def remove_rate_limit_rule(self, description, apikey=''):
"""
Remove a rate limit rule
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/removeRateLimitRule/', {'description': description, 'apikey': apikey})))
def set_rate_limit_rule_enabled(self, description, enabled, apikey=''):
"""
Set enabled state for a rate limit rule.
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'network/action/setRateLimitRuleEnabled/', {'description': description, 'enabled': enabled, 'apikey': apikey})))
def proxy_pac(self, apikey=''):
"""
Provides a PAC file, proxying through the main proxy.
This component is optional and therefore the API will only work if it is installed
"""
return (self.zap._request_other(self.zap.base_other + 'network/other/proxy.pac/', {'apikey': apikey}))
def set_proxy(self, proxy, apikey=''):
"""
Sets the HTTP proxy configuration.
This component is optional and therefore the API will only work if it is installed
"""
return (self.zap._request_other(self.zap.base_other + 'network/other/setProxy/', {'proxy': proxy, 'apikey': apikey}))
def root_ca_cert(self, apikey=''):
"""
Gets the Root CA certificate used to issue server certificates. Suitable to import into client applications (e.g. browsers).
This component is optional and therefore the API will only work if it is installed
"""
return (self.zap._request_other(self.zap.base_other + 'network/other/rootCaCert/', {'apikey': apikey})) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/network.py | network.py |
import six
class importLogFiles(object):
def __init__(self, zap):
self.zap = zap
def import_zap_log_from_file(self, filepath, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'importLogFiles/action/ImportZAPLogFromFile/', {'FilePath': filepath, 'apikey': apikey})))
def import_mod_security_log_from_file(self, filepath, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'importLogFiles/action/ImportModSecurityLogFromFile/', {'FilePath': filepath, 'apikey': apikey})))
def import_zap_http_request_response_pair(self, httprequest, httpresponse, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'importLogFiles/action/ImportZAPHttpRequestResponsePair/', {'HTTPRequest': httprequest, 'HTTPResponse': httpresponse, 'apikey': apikey})))
def post_mod_security_audit_event(self, auditeventstring=None, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
params = {'apikey': apikey}
if auditeventstring is not None:
params['AuditEventString'] = auditeventstring
return six.next(six.itervalues(self.zap._request(self.zap.base + 'importLogFiles/action/PostModSecurityAuditEvent/', params)))
def other_post_mod_security_audit_event(self, auditeventstring, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return (self.zap._request_other(self.zap.base_other + 'importLogFiles/other/OtherPostModSecurityAuditEvent/', {'AuditEventString': auditeventstring, 'apikey': apikey})) | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/importLogFiles.py | importLogFiles.py |
__docformat__ = 'restructuredtext'
__version__ = '0.1.1'
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from .accessControl import accessControl
from .acsrf import acsrf
from .alert import alert
from .alertFilter import alertFilter
from .ascan import ascan
from .ajaxSpider import ajaxSpider
from .authentication import authentication
from .authorization import authorization
from .automation import automation
from .autoupdate import autoupdate
from .brk import brk
from .context import context
from .core import core
from .exim import exim
from .exportreport import exportreport
from .forcedUser import forcedUser
from .graphql import graphql
from .httpSessions import httpSessions
from .importLogFiles import importLogFiles
from .importurls import importurls
from .localProxies import localProxies
from .network import network
from .openapi import openapi
from .params import params
from .pnh import pnh
from .pscan import pscan
from .replacer import replacer
from .reports import reports
from .retest import retest
from .reveal import reveal
from .revisit import revisit
from .ruleConfig import ruleConfig
from .script import script
from .search import search
from .selenium import selenium
from .sessionManagement import sessionManagement
from .soap import soap
from .spider import spider
from .stats import stats
from .users import users
from .wappalyzer import wappalyzer
from .websocket import websocket
class ZAPv2(object):
"""
Client API implementation for integrating with ZAP v2.
"""
base = 'http://zap/JSON/'
base_other = 'http://zap/OTHER/'
def __init__(self, proxies=None, apikey=None, validate_status_code=False):
"""
Creates an instance of the ZAP api client.
:Parameters:
- `proxies`: dictionary of ZAP proxies to use.
Note that all of the other classes in this directory are generated
new ones will need to be manually added to this file
"""
self.__proxies = proxies or {
'http': 'http://127.0.0.1:8080',
'https': 'http://127.0.0.1:8080'
}
self.__apikey = apikey
self.__validate_status_code=validate_status_code
self.accessControl = accessControl(self)
self.acsrf = acsrf(self)
self.alert = alert(self)
self.alertFilter = alertFilter(self)
self.ajaxSpider = ajaxSpider(self)
self.ascan = ascan(self)
self.authentication = authentication(self)
self.authorization = authorization(self)
self.automation = automation(self)
self.autoupdate = autoupdate(self)
self.brk = brk(self)
self.context = context(self)
self.core = core(self)
self.exim = exim(self)
self.exportreport = exportreport(self)
self.forcedUser = forcedUser(self)
self.graphql = graphql(self)
self.httpsessions = httpSessions(self)
self.importLogFiles = importLogFiles(self)
self.importurls = importurls(self)
self.localProxies = localProxies(self)
self.network = network(self)
self.openapi = openapi(self)
self.params = params(self)
self.pnh = pnh(self)
self.pscan = pscan(self)
self.replacer = replacer(self)
self.reports = reports(self)
self.retest = retest(self)
self.reveal = reveal(self)
self.revisit = revisit(self)
self.ruleConfig = ruleConfig(self)
self.script = script(self)
self.search = search(self)
self.selenium = selenium(self)
self.sessionManagement = sessionManagement(self)
self.soap = soap(self)
self.spider = spider(self)
self.stats = stats(self)
self.users = users(self)
self.wappalyzer = wappalyzer(self)
self.websocket = websocket(self)
# not very nice, but prevents warnings when accessing the ZAP API via https
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Currently create a new session for each request to prevent request failing
# e.g. when polling the spider status
#self.session = requests.Session()
#if apikey is not None:
# self.session.headers['X-ZAP-API-Key'] = apikey
def urlopen(self, url, *args, **kwargs):
"""
Opens a url forcing the proxies to be used.
:Parameters:
- `args`: all non-keyword arguments.
- `kwargs`: all other keyword arguments.
"""
# Must never leak the API key via proxied requests
return requests.get(url, proxies=self.__proxies, verify=False, *args, **kwargs).text
def _request_api(self, url, query=None):
"""
Shortcut for an API request. Will always add the apikey (if defined)
:Parameters:
- `url`: the url to GET at.
"""
if not url.startswith('http://zap/'):
# Only allow requests to the API so that we never leak the apikey
raise ValueError('A non ZAP API url was specified ' + url)
# In theory we should be able to reuse the session,
# but there have been problems with that
self.session = requests.Session()
if self.__apikey is not None:
self.session.headers['X-ZAP-API-Key'] = self.__apikey
query = query or {}
if self.__apikey is not None:
# Add the apikey to get params for backwards compatibility
if not query.get('apikey'):
query['apikey'] = self.__apikey
response = self.session.get(url, params=query, proxies=self.__proxies, verify=False)
if (self.__validate_status_code and response.status_code >= 300 and response.status_code < 500):
raise Exception("Non-successful status code returned from ZAP, which indicates a bad request: "
+ str(response.status_code)
+ "response: " + response.text )
elif (self.__validate_status_code and response.status_code >= 500):
raise Exception("Non-successful status code returned from ZAP, which indicates a ZAP internal error: "
+ str(response.status_code)
+ "response: " + response.text )
return response
def _request(self, url, get=None):
"""
Shortcut for a GET request.
:Parameters:
- `url`: the url to GET at.
- `get`: the dictionary to turn into GET variables.
"""
data = self._request_api(url, get)
return data.json()
def _request_other(self, url, get=None):
"""
Shortcut for an API OTHER GET request.
:Parameters:
- `url`: the url to GET at.
- `get`: the dictionary to turn into GET variables.
"""
data = self._request_api(url, get)
return data.text | zaproxy | /zaproxy-0.1.1.tar.gz/zaproxy-0.1.1/src/zapv2/__init__.py | __init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.