response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
clones branch <branch_name> of repo_url | def local_git_branch(local_cxn, repo_url, branch_name, log_dir):
"""clones branch <branch_name> of repo_url"""
local_cxn.local('cd %s && if [ -d letsencrypt ]; then rm -rf letsencrypt; fi' % log_dir)
local_cxn.local('cd %s && git clone %s letsencrypt --branch %s --single-branch'%
(log_dir, repo_url, branch_name))
local_cxn.local('cd %s && tar czf le.tar.gz letsencrypt' % log_dir) |
clones specified pull request from repo_url and optionally merges into master | def local_git_PR(local_cxn, repo_url, PRnumstr, log_dir, merge_master=True):
"""clones specified pull request from repo_url and optionally merges into master"""
local_cxn.local('cd %s && if [ -d letsencrypt ]; then rm -rf letsencrypt; fi' % log_dir)
local_cxn.local('cd %s && git clone %s letsencrypt' % (log_dir, repo_url))
local_cxn.local('cd %s && cd letsencrypt && '
'git fetch origin pull/%s/head:lePRtest' % (log_dir, PRnumstr))
local_cxn.local('cd %s && cd letsencrypt && git checkout lePRtest' % log_dir)
if merge_master:
local_cxn.local('cd %s && cd letsencrypt && git remote update origin' % log_dir)
local_cxn.local('cd %s && cd letsencrypt && '
'git merge origin/master -m "testmerge"' % log_dir)
local_cxn.local('cd %s && tar czf le.tar.gz letsencrypt' % log_dir) |
copies local tarball of repo to remote | def local_repo_to_remote(cxn, log_dir):
"""copies local tarball of repo to remote"""
filename = 'le.tar.gz'
local_path = os.path.join(log_dir, filename)
cxn.put(local=local_path, remote='')
cxn.run('tar xzf %s' % filename) |
delete tarball | def local_repo_clean(local_cxn, log_dir):
"""delete tarball"""
filename = 'le.tar.gz'
local_path = os.path.join(log_dir, filename)
local_cxn.local('rm %s' % local_path) |
copies to remote and executes local script | def deploy_script(cxn, scriptpath, *args):
"""copies to remote and executes local script"""
cxn.put(local=scriptpath, remote='', preserve_mode=True)
scriptfile = os.path.split(scriptpath)[1]
args_str = ' '.join(args)
cxn.run('./'+scriptfile+' '+args_str) |
grabs letsencrypt.log via cat into logged stdout | def grab_certbot_log(cxn):
"grabs letsencrypt.log via cat into logged stdout"
cxn.sudo('/bin/bash -l -i -c \'if [ -f "/var/log/letsencrypt/letsencrypt.log" ]; then ' +
'cat "/var/log/letsencrypt/letsencrypt.log"; else echo "[novarlog]"; fi\'')
# fallback file if /var/log is unwriteable...? correct?
cxn.sudo('/bin/bash -l -i -c \'if [ -f ./certbot.log ]; then ' +
'cat ./certbot.log; else echo "[nolocallog]"; fi\'') |
Create a single client instance for running tests. | def create_client_instance(ec2_client, target, security_group_id, subnet_id, self_destruct):
"""Create a single client instance for running tests."""
if 'machine_type' in target:
machine_type = target['machine_type']
elif target['virt'] == 'hvm':
machine_type = 't2.medium'
else:
# 32 bit systems
machine_type = 'c1.medium'
name = 'le-%s'%target['name']
print(name, end=" ")
return make_instance(ec2_client,
name,
target['ami'],
KEYNAME,
machine_type=machine_type,
security_group_id=security_group_id,
subnet_id=subnet_id,
self_destruct=self_destruct) |
Return the version number stamped in certbot/__init__.py. | def certbot_version(letstest_scripts_dir):
"""Return the version number stamped in certbot/__init__.py."""
return re.search('''^__version__ = ['"](.+)['"].*''',
file_contents(join(dirname(dirname(letstest_scripts_dir)),
'certbot',
'certbot',
'__init__.py')),
re.M).group(1) |
Parse command line arguments.
:param args: command line arguments with the program name removed. This is
usually taken from sys.argv[1:].
:type args: `list` of `str`
:returns: parsed arguments
:rtype: argparse.Namespace | def parse_args(args):
"""Parse command line arguments.
:param args: command line arguments with the program name removed. This is
usually taken from sys.argv[1:].
:type args: `list` of `str`
:returns: parsed arguments
:rtype: argparse.Namespace
"""
# Use the file's docstring for the help text and don't let argparse reformat it.
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
return parser.parse_args(args) |
Confirms that snapcraft is logged in to an account.
:raises SystemExit: if the command snapcraft is unavailable or it
isn't logged into an account | def assert_logged_into_snapcraft():
"""Confirms that snapcraft is logged in to an account.
:raises SystemExit: if the command snapcraft is unavailable or it
isn't logged into an account
"""
cmd = 'snapcraft whoami'.split()
try:
subprocess.run(cmd, check=True, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, universal_newlines=True)
except (subprocess.CalledProcessError, OSError):
print("Please make sure that the command line tool snapcraft is")
print("installed and that you have logged in to an account by running")
print("'snapcraft login'. If that fails, your credentials may have expired")
print("and you should run `snapcraft logout` followed by 'snapcraft login'.")
sys.exit(1) |
Finds the revisions for the snap and version in the given channel.
If you call this function without being logged in with snapcraft, it
will hang with no output.
:param str snap: the name of the snap on the snap store
:param str channel: snap channel to pull revisions from
:param str version: snap version number, e.g. 1.7.0
:returns: list of revision numbers
:rtype: `list` of `str`
:raises subprocess.CalledProcessError: if the snapcraft command
fails
:raises AssertionError: if the expected snaps are not found | def get_snap_revisions(snap, channel, version):
"""Finds the revisions for the snap and version in the given channel.
If you call this function without being logged in with snapcraft, it
will hang with no output.
:param str snap: the name of the snap on the snap store
:param str channel: snap channel to pull revisions from
:param str version: snap version number, e.g. 1.7.0
:returns: list of revision numbers
:rtype: `list` of `str`
:raises subprocess.CalledProcessError: if the snapcraft command
fails
:raises AssertionError: if the expected snaps are not found
"""
print('Getting revision numbers for', snap, version)
cmd = ['snapcraft', 'status', snap]
process = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, universal_newlines=True)
pattern = f'^\s+{channel}\s+{version}\s+(\d+)\s*'
revisions = re.findall(pattern, process.stdout, re.MULTILINE)
assert len(revisions) == SNAP_ARCH_COUNT, f'Unexpected number of snaps found for {channel} {snap} {version} (expected {SNAP_ARCH_COUNT}, found {len(revisions)})'
return revisions |
Promotes the given snaps from source_channel to the stable channel.
If the snaps have already been released to the stable channel, this
function will try to release them again which has no effect.
:param snaps: snap package names to be promoted
:type snaps: `list` of `str`
:param str source_channel: snap channel to promote from
:param str version: the version number that should be found in the
candidate channel, e.g. 1.7.0
:param progressive_percentage: specifies the percentage of a progressive
deployment
:type progressive_percentage: int or None
:raises SystemExit: if the command snapcraft is unavailable or it
isn't logged into an account
:raises subprocess.CalledProcessError: if a snapcraft command fails
for another reason | def promote_snaps(snaps, source_channel, version, progressive_percentage=None):
"""Promotes the given snaps from source_channel to the stable channel.
If the snaps have already been released to the stable channel, this
function will try to release them again which has no effect.
:param snaps: snap package names to be promoted
:type snaps: `list` of `str`
:param str source_channel: snap channel to promote from
:param str version: the version number that should be found in the
candidate channel, e.g. 1.7.0
:param progressive_percentage: specifies the percentage of a progressive
deployment
:type progressive_percentage: int or None
:raises SystemExit: if the command snapcraft is unavailable or it
isn't logged into an account
:raises subprocess.CalledProcessError: if a snapcraft command fails
for another reason
"""
assert_logged_into_snapcraft()
for snap in snaps:
revisions = get_snap_revisions(snap, source_channel, version)
# The loop below is kind of slow, so let's print some output about what
# it is doing.
print('Releasing', snap, 'snaps to the stable channel')
for revision in revisions:
cmd = ['snapcraft', 'release', snap, revision, 'stable']
if progressive_percentage:
cmd.extend(f'--progressive {progressive_percentage}'.split())
try:
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, universal_newlines=True)
except subprocess.CalledProcessError as e:
print("The command", f"'{' '.join(cmd)}'", "failed.")
print("The output printed to stdout was:")
print(e.stdout)
raise |
Retrieve version number for release from Azure Pipelines
:param major_version: only consider releases for the specified major
version
:type major_version: str or None
:returns: version number | def fetch_version_number(major_version=None):
"""Retrieve version number for release from Azure Pipelines
:param major_version: only consider releases for the specified major
version
:type major_version: str or None
:returns: version number
"""
# Create a connection to the azure org
organization_url = 'https://dev.azure.com/certbot'
connection = Connection(base_url=organization_url)
# Find the build artifacts
build_client = connection.clients.get_build_client()
builds = build_client.get_builds('certbot', definitions='3')
for build in builds:
version = build_client.get_build('certbot', build.id).source_branch.split('v')[1]
if major_version is None or version.split('.')[0] == major_version:
return version
raise ValueError('Release not found on Azure Pipelines!') |
Find the relevant python executable that is of the given python major version.
Will test, in decreasing priority order:
* the current Python interpreter
* 'pythonX' executable in PATH (with X the given major version) if available
* 'python' executable in PATH if available
* Windows Python launcher 'py' executable in PATH if available
Incompatible python versions for Certbot will be evicted (e.g. Python 3
versions less than 3.7).
:rtype: str
:return: the relevant python executable path
:raise RuntimeError: if no relevant python executable path could be found | def find_python_executable() -> str:
"""
Find the relevant python executable that is of the given python major version.
Will test, in decreasing priority order:
* the current Python interpreter
* 'pythonX' executable in PATH (with X the given major version) if available
* 'python' executable in PATH if available
* Windows Python launcher 'py' executable in PATH if available
Incompatible python versions for Certbot will be evicted (e.g. Python 3
versions less than 3.7).
:rtype: str
:return: the relevant python executable path
:raise RuntimeError: if no relevant python executable path could be found
"""
python_executable_path = None
# First try, current python executable
if _check_version('{0}.{1}.{2}'.format(
sys.version_info[0], sys.version_info[1], sys.version_info[2])):
return sys.executable
# Second try, with python executables in path
for one_version in ('3', '',):
try:
one_python = 'python{0}'.format(one_version)
output = subprocess.check_output([one_python, '--version'],
universal_newlines=True, stderr=subprocess.STDOUT)
if _check_version(output.strip().split()[1]):
return subprocess.check_output([one_python, '-c',
'import sys; sys.stdout.write(sys.executable);'],
universal_newlines=True)
except (subprocess.CalledProcessError, OSError):
pass
# Last try, with Windows Python launcher
try:
output_version = subprocess.check_output(['py', '-3', '--version'],
universal_newlines=True, stderr=subprocess.STDOUT)
if _check_version(output_version.strip().split()[1]):
return subprocess.check_output(['py', env_arg, '-c',
'import sys; sys.stdout.write(sys.executable);'],
universal_newlines=True)
except (subprocess.CalledProcessError, OSError):
pass
if not python_executable_path:
raise RuntimeError('Error, no compatible Python executable for Certbot could be found.') |
Determines the venv path and prepares it for use.
This function cleans up any Python eggs in the current working directory
and ensures the venv path is available for use. The path used is the
VENV_NAME environment variable if it is set and venv_name otherwise. If
there is already a directory at the desired path, the existing directory is
renamed by appending a timestamp to the directory name.
:param str venv_name: The name or path at where the virtual
environment should be created if VENV_NAME isn't set.
:returns: path where the virtual environment should be created
:rtype: str | def prepare_venv_path(venv_name):
"""Determines the venv path and prepares it for use.
This function cleans up any Python eggs in the current working directory
and ensures the venv path is available for use. The path used is the
VENV_NAME environment variable if it is set and venv_name otherwise. If
there is already a directory at the desired path, the existing directory is
renamed by appending a timestamp to the directory name.
:param str venv_name: The name or path at where the virtual
environment should be created if VENV_NAME isn't set.
:returns: path where the virtual environment should be created
:rtype: str
"""
for path in glob.glob('*.egg-info'):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
env_venv_name = os.environ.get('VENV_NAME')
if env_venv_name:
print('Creating venv at {0}'
' as specified in VENV_NAME'.format(env_venv_name))
venv_name = env_venv_name
if os.path.isdir(venv_name):
os.rename(venv_name, '{0}.{1}.bak'.format(venv_name, int(time.time())))
return venv_name |
Installs packages in the given venv.
:param str venv_name: The name or path at where the virtual
environment should be created.
:param pip_args: Command line arguments that should be given to
pip to install packages
:type pip_args: `list` of `str` | def install_packages(venv_name, pip_args):
"""Installs packages in the given venv.
:param str venv_name: The name or path at where the virtual
environment should be created.
:param pip_args: Command line arguments that should be given to
pip to install packages
:type pip_args: `list` of `str`
"""
# Using the python executable from venv, we ensure to execute following commands in this venv.
py_venv = get_venv_python_path(venv_name)
command = [py_venv, os.path.abspath('tools/pip_install.py')]
command.extend(pip_args)
subprocess_with_print(command)
if os.path.isdir(os.path.join(venv_name, 'bin')):
# Linux/OSX specific
print('-------------------------------------------------------------------')
print('Please run the following command to activate developer environment:')
print('source {0}/bin/activate'.format(venv_name))
print('-------------------------------------------------------------------')
elif os.path.isdir(os.path.join(venv_name, 'Scripts')):
# Windows specific
print('---------------------------------------------------------------------------')
print('Please run one of the following commands to activate developer environment:')
print('{0}\\Scripts\\activate.bat (for Batch)'.format(venv_name))
print('.\\{0}\\Scripts\\Activate.ps1 (for Powershell)'.format(venv_name))
print('---------------------------------------------------------------------------')
else:
raise ValueError('Error, directory {0} is not a valid venv.'.format(venv_name)) |
Create a Python virtual environment at venv_path.
:param str venv_path: path where the venv should be created | def create_venv(venv_path):
"""Create a Python virtual environment at venv_path.
:param str venv_path: path where the venv should be created
"""
python = find_python_executable()
command = [python, '-m', 'venv', venv_path]
subprocess_with_print(command) |
Return all clients calls made in provided source code.
:returns: A dict of service_name -> set([client calls]).
Example: {"s3": set(["list_objects", "create_bucket"]),
"dynamodb": set(["describe_table"])} | def get_client_calls(source_code):
# type: (str) -> APICallT
"""Return all clients calls made in provided source code.
:returns: A dict of service_name -> set([client calls]).
Example: {"s3": set(["list_objects", "create_bucket"]),
"dynamodb": set(["describe_table"])}
"""
parsed = parse_code(source_code)
t = SymbolTableTypeInfer(parsed)
binder = t.bind_types()
collector = APICallCollector(binder)
api_calls = collector.collect_api_calls(parsed.parsed_ast)
return api_calls |
Return client calls for a chalice app.
This is similar to ``get_client_calls`` except it will
automatically traverse into chalice views with the assumption
that they will be called. | def get_client_calls_for_app(source_code):
# type: (str) -> APICallT
"""Return client calls for a chalice app.
This is similar to ``get_client_calls`` except it will
automatically traverse into chalice views with the assumption
that they will be called.
"""
parsed = parse_code(source_code)
t = AppViewTypeInfer(parsed)
binder = t.bind_types()
collector = APICallCollector(binder)
api_calls = collector.collect_api_calls(parsed.parsed_ast)
return api_calls |
Transform a name to a valid cfn name.
This will convert the provided name to a CamelCase name.
It's possible that the conversion to a CFN resource name
can result in name collisions. It's up to the caller
to handle name collisions appropriately. | def to_cfn_resource_name(name: str) -> str:
"""Transform a name to a valid cfn name.
This will convert the provided name to a CamelCase name.
It's possible that the conversion to a CFN resource name
can result in name collisions. It's up to the caller
to handle name collisions appropriately.
"""
if not name:
raise ValueError("Invalid name: %r" % name)
word_separators = ['-', '_']
for word_separator in word_separators:
word_parts = [p for p in name.split(word_separator) if p]
name = ''.join([w[0].upper() + w[1:] for w in word_parts])
return re.sub(r'[^A-Za-z0-9]+', '', name) |
Delete a top level key from the deployed JSON file. | def remove_stage_from_deployed_values(key: str, filename: str) -> None:
"""Delete a top level key from the deployed JSON file."""
final_values: Dict[str, Any] = {}
try:
with open(filename, 'r') as f:
final_values = json.load(f)
except IOError:
# If there is no file to delete from, then this funciton is a noop.
return
try:
del final_values[key]
with open(filename, 'wb') as outfile:
data = serialize_to_json(final_values)
outfile.write(data.encode('utf-8'))
except KeyError:
# If they key didn't exist then there is nothing to remove.
pass |
Record deployed values to a JSON file.
This allows subsequent deploys to lookup previously deployed values. | def record_deployed_values(
deployed_values: Dict[str, Any], filename: str
) -> None:
"""Record deployed values to a JSON file.
This allows subsequent deploys to lookup previously deployed values.
"""
final_values: Dict[str, Any] = {}
if os.path.isfile(filename):
with open(filename, 'r') as f:
final_values = json.load(f)
final_values.update(deployed_values)
with open(filename, 'wb') as outfile:
data = serialize_to_json(final_values)
outfile.write(data.encode('utf-8')) |
Serialize to pretty printed JSON.
This includes using 2 space indentation, no trailing whitespace, and
including a newline at the end of the JSON document. Useful when you want
to serialize JSON to disk. | def serialize_to_json(data: Any) -> str:
"""Serialize to pretty printed JSON.
This includes using 2 space indentation, no trailing whitespace, and
including a newline at the end of the JSON document. Useful when you want
to serialize JSON to disk.
"""
return json.dumps(data, indent=2, separators=(',', ': ')) + '\n' |
Create a zip file from a source input directory.
This function is intended to be an equivalent to
`zip -r`. You give it a source directory, `source_dir`,
and it will recursively zip up the files into a zipfile
specified by the `outfile` argument. | def create_zip_file(source_dir: str, outfile: str) -> None:
"""Create a zip file from a source input directory.
This function is intended to be an equivalent to
`zip -r`. You give it a source directory, `source_dir`,
and it will recursively zip up the files into a zipfile
specified by the `outfile` argument.
"""
with ChaliceZipFile(
outfile, 'w', compression=zipfile.ZIP_DEFLATED, osutils=OSUtils()
) as z:
for root, _, filenames in os.walk(source_dir):
for filename in filenames:
full_name = os.path.join(root, filename)
archive_name = os.path.relpath(full_name, source_dir)
z.write(full_name, archive_name) |
Development and debugging commands for chalice.
All the commands under the "chalice dev" namespace are provided
to help chalice developers introspect the internals of chalice.
They are also useful for users to better understand the chalice
deployment process.
These commands are provided for informational purposes only.
There is NO guarantee of backwards compatibility for any
"chalice dev" commands. Do not rely on the output of these commands.
These commands allow introspection of chalice internals, and the
internals of chalice are subject to change as needed. | def dev():
# type: () -> None
"""Development and debugging commands for chalice.
All the commands under the "chalice dev" namespace are provided
to help chalice developers introspect the internals of chalice.
They are also useful for users to better understand the chalice
deployment process.
These commands are provided for informational purposes only.
There is NO guarantee of backwards compatibility for any
"chalice dev" commands. Do not rely on the output of these commands.
These commands allow introspection of chalice internals, and the
internals of chalice are subject to change as needed.
""" |
Generate and display deployment plan.
This command will calculate and pretty print the deployment plan
without actually executing the plan. It's primarily used to better
understand the chalice deployment process. | def plan(ctx, autogen_policy, profile, api_gateway_stage, stage):
# type: (click.Context, Optional[bool], str, str, str) -> None
"""Generate and display deployment plan.
This command will calculate and pretty print the deployment plan
without actually executing the plan. It's primarily used to better
understand the chalice deployment process.
"""
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(
chalice_stage_name=stage, autogen_policy=autogen_policy,
api_gateway_stage=api_gateway_stage,
)
session = factory.create_botocore_session()
ui = UI()
d = factory.create_plan_only_deployer(
session=session, config=config, ui=ui)
d.deploy(config, chalice_stage_name=stage) |
Generate and display the application graph. | def appgraph(ctx, autogen_policy, profile, api_gateway_stage, stage):
# type: (click.Context, Optional[bool], str, str, str) -> None
"""Generate and display the application graph."""
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(
chalice_stage_name=stage, autogen_policy=autogen_policy,
api_gateway_stage=api_gateway_stage,
)
graph_build = ApplicationGraphBuilder()
graph = graph_build.build(config, stage)
ui = UI()
GraphPrettyPrint(ui).display_graph(graph) |
Invoke the deployed lambda function NAME.
Reads payload from STDIN. | def invoke(ctx, name, profile, stage):
# type: (click.Context, str, str, str) -> None
"""Invoke the deployed lambda function NAME.
Reads payload from STDIN.
"""
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
try:
invoke_handler = factory.create_lambda_invoke_handler(name, stage)
payload = factory.create_stdin_reader().read()
invoke_handler.invoke(payload)
except NoSuchFunctionError as e:
err = click.ClickException(
"could not find a lambda function named %s." % e.name)
err.exit_code = 2
raise err
except botocore.exceptions.ClientError as e:
error = e.response['Error']
err = click.ClickException(
"got '%s' exception back from Lambda\n%s"
% (error['Code'], error['Message']))
err.exit_code = 1
raise err
except UnhandledLambdaError:
err = click.ClickException(
"Unhandled exception in Lambda function, details above.")
err.exit_code = 1
raise err
except ReadTimeout as e:
err = click.ClickException(e.message)
err.exit_code = 1
raise err |
Generate a model from Chalice routes.
Currently only supports generating Swagger 2.0 models. | def generate_models(ctx, stage):
# type: (click.Context, str) -> None
"""Generate a model from Chalice routes.
Currently only supports generating Swagger 2.0 models.
"""
factory = ctx.obj['factory'] # type: CLIFactory
config = factory.create_config_obj(stage)
if not config.chalice_app.routes:
click.echo('No REST API found to generate model from.')
raise click.Abort()
swagger_generator = TemplatedSwaggerGenerator()
model = swagger_generator.generate_swagger(
config.chalice_app,
)
ui = UI()
ui.write(json.dumps(model, indent=4, cls=PlanEncoder))
ui.write('\n') |
Generate a cloudformation template for a starter CD pipeline.
This command will write a starter cloudformation template to
the filename you provide. It contains a CodeCommit repo,
a CodeBuild stage for packaging your chalice app, and a
CodePipeline stage to deploy your application using cloudformation.
You can use any AWS SDK or the AWS CLI to deploy this stack.
Here's an example using the AWS CLI:
$ chalice generate-pipeline pipeline.json
$ aws cloudformation deploy --stack-name mystack
--template-file pipeline.json --capabilities CAPABILITY_IAM | def generate_pipeline(ctx, pipeline_version, codebuild_image, source,
buildspec_file, filename):
# type: (click.Context, str, str, str, str, str) -> None
"""Generate a cloudformation template for a starter CD pipeline.
This command will write a starter cloudformation template to
the filename you provide. It contains a CodeCommit repo,
a CodeBuild stage for packaging your chalice app, and a
CodePipeline stage to deploy your application using cloudformation.
You can use any AWS SDK or the AWS CLI to deploy this stack.
Here's an example using the AWS CLI:
\b
$ chalice generate-pipeline pipeline.json
$ aws cloudformation deploy --stack-name mystack \b
--template-file pipeline.json --capabilities CAPABILITY_IAM
"""
from chalice import pipeline
factory = ctx.obj['factory'] # type: CLIFactory
config = factory.create_config_obj()
p = cast(pipeline.BasePipelineTemplate, None)
if pipeline_version == 'v1':
p = pipeline.CreatePipelineTemplateLegacy()
else:
p = pipeline.CreatePipelineTemplateV2()
params = pipeline.PipelineParameters(
app_name=config.app_name,
lambda_python_version=config.lambda_python_version,
codebuild_image=codebuild_image,
code_source=source,
pipeline_version=pipeline_version,
)
output = p.create_template(params)
if buildspec_file:
extractor = pipeline.BuildSpecExtractor()
buildspec_contents = extractor.extract_buildspec(output)
with open(buildspec_file, 'w') as f:
f.write(buildspec_contents)
with open(filename, 'w') as f:
f.write(serialize_to_json(output)) |
Validate app configuration.
The purpose of this method is to provide a fail fast mechanism
for anything we know is going to fail deployment.
We can detect common error cases and provide the user with helpful
error messages. | def validate_configuration(config):
# type: (Config) -> None
"""Validate app configuration.
The purpose of this method is to provide a fail fast mechanism
for anything we know is going to fail deployment.
We can detect common error cases and provide the user with helpful
error messages.
"""
routes = config.chalice_app.routes
validate_routes(routes)
validate_route_content_types(routes, config.chalice_app.api.binary_types)
validate_minimum_compression_size(config)
_validate_manage_iam_role(config)
validate_python_version(config)
validate_unique_function_names(config)
validate_feature_flags(config.chalice_app)
validate_endpoint_type(config)
validate_resource_policy(config)
validate_sqs_configuration(config.chalice_app)
validate_environment_variables_type(config) |
Validate configuration matches a specific python version.
If the ``actual_py_version`` is not provided, it will default
to the major/minor version of the currently running python
interpreter.
:param actual_py_version: The major/minor python version in
the form "pythonX.Y", e.g "python2.7", "python3.6". | def validate_python_version(config, actual_py_version=None):
# type: (Config, Optional[str]) -> None
"""Validate configuration matches a specific python version.
If the ``actual_py_version`` is not provided, it will default
to the major/minor version of the currently running python
interpreter.
:param actual_py_version: The major/minor python version in
the form "pythonX.Y", e.g "python2.7", "python3.6".
"""
lambda_version = config.lambda_python_version
if actual_py_version is None:
actual_py_version = 'python%s.%s' % sys.version_info[:2]
if actual_py_version != lambda_version:
# We're not making this a hard error for now, but we may
# turn this into a hard fail.
warnings.warn("You are currently running %s, but the closest "
"supported version on AWS Lambda is %s\n"
"Please use %s, otherwise you may run into "
"deployment issues. " %
(actual_py_version, lambda_version, lambda_version),
stacklevel=2) |
Ensure no local AWS configuration is used.
This is useful for unit/functional tests so we
can ensure that local configuration does not affect
the results of the test. | def no_local_config(monkeypatch):
"""Ensure no local AWS configuration is used.
This is useful for unit/functional tests so we
can ensure that local configuration does not affect
the results of the test.
"""
monkeypatch.setenv('AWS_DEFAULT_REGION', 'us-west-2')
monkeypatch.setenv('AWS_ACCESS_KEY_ID', 'foo')
monkeypatch.setenv('AWS_SECRET_ACCESS_KEY', 'bar')
monkeypatch.delenv('AWS_PROFILE', raising=False)
monkeypatch.delenv('AWS_DEFAULT_PROFILE', raising=False)
# Ensure that the existing ~/.aws/{config,credentials} file
# don't influence test results.
monkeypatch.setenv('AWS_CONFIG_FILE', '/tmp/asdfasdfaf/does/not/exist')
monkeypatch.setenv('AWS_SHARED_CREDENTIALS_FILE',
'/tmp/asdfasdfaf/does/not/exist2') |
Generator of sequential increasing numbers | def counter():
"""Generator of sequential increasing numbers"""
count = 1
while True:
yield count
count += 1 |
Get numbers from DynamoDB in the format written by testwebsocketapp.
| def get_numbers_from_dynamodb(temp_dirname):
"""Get numbers from DynamoDB in the format written by testwebsocketapp.
"""
factory = CLIFactory(temp_dirname)
session = factory.create_botocore_session()
ddb = session.create_client('dynamodb')
paginator = ddb.get_paginator('scan')
numbers = sorted([
int(item['entry']['N'])
for page in paginator.paginate(
TableName=RANDOM_APP_NAME,
ConsistentRead=True,
)
for item in page['Items']
])
return numbers |
Find non-sequential gaps in a sequence of numbers
:type numbers: Iterable of ints
:param numbers: Iterable to check for gaps
:returns: List of tuples with the gaps in the format
[(start_of_gap, end_of_gap, ...)]. If the list is empty then there
are no gaps. | def find_skips_in_seq(numbers):
"""Find non-sequential gaps in a sequence of numbers
:type numbers: Iterable of ints
:param numbers: Iterable to check for gaps
:returns: List of tuples with the gaps in the format
[(start_of_gap, end_of_gap, ...)]. If the list is empty then there
are no gaps.
"""
last = numbers[0] - 1
skips = []
for elem in numbers:
if elem != last + 1:
skips.append((last, elem))
last = elem
return skips |
Single line docstring. | def single_doc():
"""Single line docstring."""
return {'docstring': 'single'} |
Multi-line docstring.
And here is another line. | def multi_doc():
"""Multi-line docstring.
And here is another line.
"""
return {'docstring': 'multi'} |
Joins individual filters into one css filter. | def element_removal(selectors: List[str], html_content):
"""Joins individual filters into one css filter."""
selector = ",".join(selectors)
return subtractive_css_selector(selector, html_content) |
change elementpath.select results to string type
# The MIT License (MIT), Copyright (c), 2018-2021, SISSA (Scuola Internazionale Superiore di Studi Avanzati)
# https://github.com/sissaschool/elementpath/blob/dfcc2fd3d6011b16e02bf30459a7924f547b47d0/elementpath/xpath_tokens.py#L1038 | def elementpath_tostring(obj):
"""
change elementpath.select results to string type
# The MIT License (MIT), Copyright (c), 2018-2021, SISSA (Scuola Internazionale Superiore di Studi Avanzati)
# https://github.com/sissaschool/elementpath/blob/dfcc2fd3d6011b16e02bf30459a7924f547b47d0/elementpath/xpath_tokens.py#L1038
"""
import elementpath
from decimal import Decimal
import math
if obj is None:
return ''
# https://elementpath.readthedocs.io/en/latest/xpath_api.html#elementpath.select
elif isinstance(obj, elementpath.XPathNode):
return obj.string_value
elif isinstance(obj, bool):
return 'true' if obj else 'false'
elif isinstance(obj, Decimal):
value = format(obj, 'f')
if '.' in value:
return value.rstrip('0').rstrip('.')
return value
elif isinstance(obj, float):
if math.isnan(obj):
return 'NaN'
elif math.isinf(obj):
return str(obj).upper()
value = str(obj)
if '.' in value:
value = value.rstrip('0').rstrip('.')
if '+' in value:
value = value.replace('+', '')
if 'e' in value:
return value.upper()
return value
return str(obj) |
Converts html string to a string with just the text. If ignoring
rendering anchor tag content is enable, anchor tag content are also
included in the text
:param html_content: string with html content
:param render_anchor_tag_content: boolean flag indicating whether to extract
hyperlinks (the anchor tag content) together with text. This refers to the
'href' inside 'a' tags.
Anchor tag content is rendered in the following manner:
'[ text ](anchor tag content)'
:return: extracted text from the HTML | def html_to_text(html_content: str, render_anchor_tag_content=False, is_rss=False) -> str:
"""Converts html string to a string with just the text. If ignoring
rendering anchor tag content is enable, anchor tag content are also
included in the text
:param html_content: string with html content
:param render_anchor_tag_content: boolean flag indicating whether to extract
hyperlinks (the anchor tag content) together with text. This refers to the
'href' inside 'a' tags.
Anchor tag content is rendered in the following manner:
'[ text ](anchor tag content)'
:return: extracted text from the HTML
"""
# if anchor tag content flag is set to True define a config for
# extracting this content
if render_anchor_tag_content:
parser_config = ParserConfig(
annotation_rules={"a": ["hyperlink"]},
display_links=True
)
# otherwise set config to None/default
else:
parser_config = None
# RSS Mode - Inscriptis will treat `title` as something else.
# Make it as a regular block display element (//item/title)
# This is a bit of a hack - the real way it to use XSLT to convert it to HTML #1874
if is_rss:
html_content = re.sub(r'<title([\s>])', r'<h1\1', html_content)
html_content = re.sub(r'</title>', r'</h1>', html_content)
text_content = get_text(html_content, config=parser_config)
return text_content |
Some sites are using sneaky tactics to make prices and other information un-renderable by Inscriptis
This could go into its own Pip package in the future, for faster updates | def workarounds_for_obfuscations(content):
"""
Some sites are using sneaky tactics to make prices and other information un-renderable by Inscriptis
This could go into its own Pip package in the future, for faster updates
"""
# HomeDepot.com style <span>$<!-- -->90<!-- -->.<!-- -->74</span>
# https://github.com/weblyzard/inscriptis/issues/45
if not content:
return content
content = re.sub('<!--\s+-->', '', content)
return content |
Basic setting of user-agent
NOTE!!!!!! The service that does the actual Chrome fetching should handle any anti-robot techniques
THERE ARE MANY WAYS THAT IT CAN BE DETECTED AS A ROBOT!!
This does not take care of
- Scraping of 'navigator' (platform, productSub, vendor, oscpu etc etc) browser object (navigator.appVersion) etc
- TCP/IP fingerprint JA3 etc
- Graphic rendering fingerprinting
- Your IP being obviously in a pool of bad actors
- Too many requests
- Scraping of SCH-UA browser replies (thanks google!!)
- Scraping of ServiceWorker, new window calls etc
See https://filipvitas.medium.com/how-to-set-user-agent-header-with-puppeteer-js-and-not-fail-28c7a02165da
Puppeteer requests https://github.com/dgtlmoon/pyppeteerstealth
:param page:
:param headers:
:return: | def manage_user_agent(headers, current_ua=''):
"""
Basic setting of user-agent
NOTE!!!!!! The service that does the actual Chrome fetching should handle any anti-robot techniques
THERE ARE MANY WAYS THAT IT CAN BE DETECTED AS A ROBOT!!
This does not take care of
- Scraping of 'navigator' (platform, productSub, vendor, oscpu etc etc) browser object (navigator.appVersion) etc
- TCP/IP fingerprint JA3 etc
- Graphic rendering fingerprinting
- Your IP being obviously in a pool of bad actors
- Too many requests
- Scraping of SCH-UA browser replies (thanks google!!)
- Scraping of ServiceWorker, new window calls etc
See https://filipvitas.medium.com/how-to-set-user-agent-header-with-puppeteer-js-and-not-fail-28c7a02165da
Puppeteer requests https://github.com/dgtlmoon/pyppeteerstealth
:param page:
:param headers:
:return:
"""
# Ask it what the user agent is, if its obviously ChromeHeadless, switch it to the default
ua_in_custom_headers = next((v for k, v in headers.items() if k.lower() == "user-agent"), None)
if ua_in_custom_headers:
return ua_in_custom_headers
if not ua_in_custom_headers and current_ua:
current_ua = current_ua.replace('HeadlessChrome', 'Chrome')
return current_ua
return None |
Create application for the tests. | def app(request):
"""Create application for the tests."""
datastore_path = "./test-datastore"
# So they don't delay in fetching
os.environ["MINIMUM_SECONDS_RECHECK_TIME"] = "0"
try:
os.mkdir(datastore_path)
except FileExistsError:
pass
cleanup(datastore_path)
app_config = {'datastore_path': datastore_path, 'disable_checkver' : True}
cleanup(app_config['datastore_path'])
logger_level = 'TRACE'
logger.remove()
log_level_for_stdout = { 'DEBUG', 'SUCCESS' }
logger.configure(handlers=[
{"sink": sys.stdout, "level": logger_level,
"filter" : lambda record: record['level'].name in log_level_for_stdout},
{"sink": sys.stderr, "level": logger_level,
"filter": lambda record: record['level'].name not in log_level_for_stdout},
])
datastore = store.ChangeDetectionStore(datastore_path=app_config['datastore_path'], include_default_watches=False)
app = changedetection_app(app_config, datastore)
# Disable CSRF while running tests
app.config['WTF_CSRF_ENABLED'] = False
app.config['STOP_THREADS'] = True
def teardown():
datastore.stop_thread = True
app.config.exit.set()
cleanup(app_config['datastore_path'])
request.addfinalizer(teardown)
yield app |
Testing that the link changes are detected when
render_anchor_tag_content setting is set to true | def test_render_anchor_tag_content_true(client, live_server):
"""Testing that the link changes are detected when
render_anchor_tag_content setting is set to true"""
sleep_time_for_fetch_thread = 3
# Give the endpoint time to spin up
time.sleep(1)
# set original html text
set_original_ignore_response()
# Goto the settings page, choose to ignore links (dont select/send "application-render_anchor_tag_content")
res = client.post(
url_for("settings_page"),
data={
"requests-time_between_check-minutes": 180,
"application-fetch_backend": "html_requests",
},
follow_redirects=True,
)
assert b"Settings updated." in res.data
# Add our URL to the import page
test_url = url_for("test_endpoint", _external=True)
res = client.post(
url_for("import_page"), data={"urls": test_url},
follow_redirects=True
)
assert b"1 Imported" in res.data
time.sleep(sleep_time_for_fetch_thread)
# Trigger a check
client.get(url_for("form_watch_checknow"), follow_redirects=True)
# set a new html text with a modified link
set_modified_ignore_response()
time.sleep(sleep_time_for_fetch_thread)
# Trigger a check
client.get(url_for("form_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up
time.sleep(sleep_time_for_fetch_thread)
# We should not see the rendered anchor tag
res = client.get(url_for("preview_page", uuid="first"))
assert '(/modified_link)' not in res.data.decode()
# Goto the settings page, ENABLE render anchor tag
res = client.post(
url_for("settings_page"),
data={
"requests-time_between_check-minutes": 180,
"application-render_anchor_tag_content": "true",
"application-fetch_backend": "html_requests",
},
follow_redirects=True,
)
assert b"Settings updated." in res.data
# Trigger a check
client.get(url_for("form_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up
time.sleep(sleep_time_for_fetch_thread)
# check that the anchor tag content is rendered
res = client.get(url_for("preview_page", uuid="first"))
assert '(/modified_link)' in res.data.decode()
# since the link has changed, and we chose to render anchor tag content,
# we should detect a change (new 'unviewed' class)
res = client.get(url_for("index"))
assert b"unviewed" in res.data
assert b"/test-endpoint" in res.data
# Cleanup everything
res = client.get(url_for("form_delete", uuid="all"),
follow_redirects=True)
assert b'Deleted' in res.data |
Test can upload a excel spreadsheet and the watches are created correctly | def test_import_custom_xlsx(client, live_server):
"""Test can upload a excel spreadsheet and the watches are created correctly"""
#live_server_setup(live_server)
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'import/spreadsheet.xlsx')
with open(filename, 'rb') as f:
data= {
'file_mapping': 'custom',
'custom_xlsx[col_0]': '1',
'custom_xlsx[col_1]': '3',
'custom_xlsx[col_2]': '5',
'custom_xlsx[col_3]': '4',
'custom_xlsx[col_type_0]': 'title',
'custom_xlsx[col_type_1]': 'url',
'custom_xlsx[col_type_2]': 'include_filters',
'custom_xlsx[col_type_3]': 'interval_minutes',
'xlsx_file': (io.BytesIO(f.read()), 'spreadsheet.xlsx')
}
res = client.post(
url_for("import_page"),
data=data,
follow_redirects=True,
)
assert b'4 imported from custom .xlsx' in res.data
# Because this row was actually just a header with no usable URL, we should get an error
assert b'Error processing row number 1' in res.data
res = client.get(
url_for("index")
)
assert b'Somesite results ABC' in res.data
assert b'City news results' in res.data
# Just find one to check over
for uuid, watch in live_server.app.config['DATASTORE'].data['watching'].items():
if watch.get('title') == 'Somesite results ABC':
filters = watch.get('include_filters')
assert filters[0] == '/html[1]/body[1]/div[4]/div[1]/div[1]/div[1]||//*[@id=\'content\']/div[3]/div[1]/div[1]||//*[@id=\'content\']/div[1]'
assert watch.get('time_between_check') == {'weeks': 0, 'days': 1, 'hours': 6, 'minutes': 24, 'seconds': 0}
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
assert b'Deleted' in res.data |
Test can upload a excel spreadsheet and the watches are created correctly | def test_import_watchete_xlsx(client, live_server):
"""Test can upload a excel spreadsheet and the watches are created correctly"""
#live_server_setup(live_server)
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'import/spreadsheet.xlsx')
with open(filename, 'rb') as f:
data= {
'file_mapping': 'wachete',
'xlsx_file': (io.BytesIO(f.read()), 'spreadsheet.xlsx')
}
res = client.post(
url_for("import_page"),
data=data,
follow_redirects=True,
)
assert b'4 imported from Wachete .xlsx' in res.data
res = client.get(
url_for("index")
)
assert b'Somesite results ABC' in res.data
assert b'City news results' in res.data
# Just find one to check over
for uuid, watch in live_server.app.config['DATASTORE'].data['watching'].items():
if watch.get('title') == 'Somesite results ABC':
filters = watch.get('include_filters')
assert filters[0] == '/html[1]/body[1]/div[4]/div[1]/div[1]/div[1]||//*[@id=\'content\']/div[3]/div[1]/div[1]||//*[@id=\'content\']/div[1]'
assert watch.get('time_between_check') == {'weeks': 0, 'days': 1, 'hours': 6, 'minutes': 24, 'seconds': 0}
assert watch.get('fetch_backend') == 'html_requests' # Has inactive 'dynamic wachet'
if watch.get('title') == 'JS website':
assert watch.get('fetch_backend') == 'html_webdriver' # Has active 'dynamic wachet'
if watch.get('title') == 'system default website':
assert watch.get('fetch_backend') == 'system' # uses default if blank
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
assert b'Deleted' in res.data |
Create or load index from json path. | def _create_or_load_index(
index_type_str: Optional[str] = None,
index_json_path: Optional[str] = None,
index_type_to_index_cls: Optional[dict[str, Type[BaseGPTIndex]]] = None,
) -> BaseGPTIndex:
"""Create or load index from json path."""
index_json_path = index_json_path or INDEX_JSON_PATH
index_type_to_index_cls = (
index_type_to_index_cls or INDEX_STRUCT_TYPE_TO_INDEX_CLASS
)
index_type_str = index_type_str or INDEX_STRUCT_TYPE_STR
index_type = IndexStructType(index_type_str)
if index_type not in index_type_to_index_cls:
raise ValueError(f"Unknown index type: {index_type}")
if index_type in EXTERNAL_VECTOR_STORE_INDEX_STRUCT_TYPES:
raise ValueError("Please use vector store directly.")
index_cls = index_type_to_index_cls[index_type]
if index_json_path is None:
return index_cls(nodes=[]) # Create empty index
else:
return index_cls.load_from_disk(index_json_path) |
Create or load query kwargs from json path. | def _create_or_load_query_kwargs(
query_kwargs_json_path: Optional[str] = None,
) -> Optional[dict]:
"""Create or load query kwargs from json path."""
query_kwargs_json_path = query_kwargs_json_path or QUERY_KWARGS_JSON_PATH
query_kargs: Optional[dict] = None
if query_kwargs_json_path is not None:
with open(INDEX_JSON_PATH, "r") as f:
query_kargs = json.load(f)
return query_kargs |
Convert document chunk to Node | def _doc_chunk_to_node(doc_chunk: DocumentChunk, source_doc_id: str) -> Node:
"""Convert document chunk to Node"""
return Node(
doc_id=doc_chunk.id,
text=doc_chunk.text,
embedding=doc_chunk.embedding,
extra_info=doc_chunk.metadata.dict(),
relationships={DocumentRelationship.SOURCE: source_doc_id},
) |
Split a text into chunks of ~CHUNK_SIZE tokens, based on punctuation and newline boundaries.
Args:
text: The text to split into chunks.
chunk_token_size: The target size of each chunk in tokens, or None to use the default CHUNK_SIZE.
Returns:
A list of text chunks, each of which is a string of ~CHUNK_SIZE tokens. | def get_text_chunks(text: str, chunk_token_size: Optional[int]) -> List[str]:
"""
Split a text into chunks of ~CHUNK_SIZE tokens, based on punctuation and newline boundaries.
Args:
text: The text to split into chunks.
chunk_token_size: The target size of each chunk in tokens, or None to use the default CHUNK_SIZE.
Returns:
A list of text chunks, each of which is a string of ~CHUNK_SIZE tokens.
"""
# Return an empty list if the text is empty or whitespace
if not text or text.isspace():
return []
# Tokenize the text
tokens = tokenizer.encode(text, disallowed_special=())
# Initialize an empty list of chunks
chunks = []
# Use the provided chunk token size or the default one
chunk_size = chunk_token_size or CHUNK_SIZE
# Initialize a counter for the number of chunks
num_chunks = 0
# Loop until all tokens are consumed
while tokens and num_chunks < MAX_NUM_CHUNKS:
# Take the first chunk_size tokens as a chunk
chunk = tokens[:chunk_size]
# Decode the chunk into text
chunk_text = tokenizer.decode(chunk)
# Skip the chunk if it is empty or whitespace
if not chunk_text or chunk_text.isspace():
# Remove the tokens corresponding to the chunk text from the remaining tokens
tokens = tokens[len(chunk) :]
# Continue to the next iteration of the loop
continue
# Find the last period or punctuation mark in the chunk
last_punctuation = max(
chunk_text.rfind("."),
chunk_text.rfind("?"),
chunk_text.rfind("!"),
chunk_text.rfind("\n"),
)
# If there is a punctuation mark, and the last punctuation index is before MIN_CHUNK_SIZE_CHARS
if last_punctuation != -1 and last_punctuation > MIN_CHUNK_SIZE_CHARS:
# Truncate the chunk text at the punctuation mark
chunk_text = chunk_text[: last_punctuation + 1]
# Remove any newline characters and strip any leading or trailing whitespace
chunk_text_to_append = chunk_text.replace("\n", " ").strip()
if len(chunk_text_to_append) > MIN_CHUNK_LENGTH_TO_EMBED:
# Append the chunk text to the list of chunks
chunks.append(chunk_text_to_append)
# Remove the tokens corresponding to the chunk text from the remaining tokens
tokens = tokens[len(tokenizer.encode(chunk_text, disallowed_special=())) :]
# Increment the number of chunks
num_chunks += 1
# Handle the remaining tokens
if tokens:
remaining_text = tokenizer.decode(tokens).replace("\n", " ").strip()
if len(remaining_text) > MIN_CHUNK_LENGTH_TO_EMBED:
chunks.append(remaining_text)
return chunks |
Create a list of document chunks from a document object and return the document id.
Args:
doc: The document object to create chunks from. It should have a text attribute and optionally an id and a metadata attribute.
chunk_token_size: The target size of each chunk in tokens, or None to use the default CHUNK_SIZE.
Returns:
A tuple of (doc_chunks, doc_id), where doc_chunks is a list of document chunks, each of which is a DocumentChunk object with an id, a document_id, a text, and a metadata attribute,
and doc_id is the id of the document object, generated if not provided. The id of each chunk is generated from the document id and a sequential number, and the metadata is copied from the document object. | def create_document_chunks(
doc: Document, chunk_token_size: Optional[int]
) -> Tuple[List[DocumentChunk], str]:
"""
Create a list of document chunks from a document object and return the document id.
Args:
doc: The document object to create chunks from. It should have a text attribute and optionally an id and a metadata attribute.
chunk_token_size: The target size of each chunk in tokens, or None to use the default CHUNK_SIZE.
Returns:
A tuple of (doc_chunks, doc_id), where doc_chunks is a list of document chunks, each of which is a DocumentChunk object with an id, a document_id, a text, and a metadata attribute,
and doc_id is the id of the document object, generated if not provided. The id of each chunk is generated from the document id and a sequential number, and the metadata is copied from the document object.
"""
# Check if the document text is empty or whitespace
if not doc.text or doc.text.isspace():
return [], doc.id or str(uuid.uuid4())
# Generate a document id if not provided
doc_id = doc.id or str(uuid.uuid4())
# Split the document text into chunks
text_chunks = get_text_chunks(doc.text, chunk_token_size)
metadata = (
DocumentChunkMetadata(**doc.metadata.__dict__)
if doc.metadata is not None
else DocumentChunkMetadata()
)
metadata.document_id = doc_id
# Initialize an empty list of chunks for this document
doc_chunks = []
# Assign each chunk a sequential number and create a DocumentChunk object
for i, text_chunk in enumerate(text_chunks):
chunk_id = f"{doc_id}_{i}"
doc_chunk = DocumentChunk(
id=chunk_id,
text=text_chunk,
metadata=metadata,
)
# Append the chunk object to the list of chunks for this document
doc_chunks.append(doc_chunk)
# Return the list of chunks and the document id
return doc_chunks, doc_id |
Convert a list of documents into a dictionary from document id to list of document chunks.
Args:
documents: The list of documents to convert.
chunk_token_size: The target size of each chunk in tokens, or None to use the default CHUNK_SIZE.
Returns:
A dictionary mapping each document id to a list of document chunks, each of which is a DocumentChunk object
with text, metadata, and embedding attributes. | def get_document_chunks(
documents: List[Document], chunk_token_size: Optional[int]
) -> Dict[str, List[DocumentChunk]]:
"""
Convert a list of documents into a dictionary from document id to list of document chunks.
Args:
documents: The list of documents to convert.
chunk_token_size: The target size of each chunk in tokens, or None to use the default CHUNK_SIZE.
Returns:
A dictionary mapping each document id to a list of document chunks, each of which is a DocumentChunk object
with text, metadata, and embedding attributes.
"""
# Initialize an empty dictionary of lists of chunks
chunks: Dict[str, List[DocumentChunk]] = {}
# Initialize an empty list of all chunks
all_chunks: List[DocumentChunk] = []
# Loop over each document and create chunks
for doc in documents:
doc_chunks, doc_id = create_document_chunks(doc, chunk_token_size)
# Append the chunks for this document to the list of all chunks
all_chunks.extend(doc_chunks)
# Add the list of chunks for this document to the dictionary with the document id as the key
chunks[doc_id] = doc_chunks
# Check if there are no chunks
if not all_chunks:
return {}
# Get all the embeddings for the document chunks in batches, using get_embeddings
embeddings: List[List[float]] = []
for i in range(0, len(all_chunks), EMBEDDINGS_BATCH_SIZE):
# Get the text of the chunks in the current batch
batch_texts = [
chunk.text for chunk in all_chunks[i : i + EMBEDDINGS_BATCH_SIZE]
]
# Get the embeddings for the batch texts
batch_embeddings = get_embeddings(batch_texts)
# Append the batch embeddings to the embeddings list
embeddings.extend(batch_embeddings)
# Update the document chunk objects with the embeddings
for i, chunk in enumerate(all_chunks):
# Assign the embedding from the embeddings list to the chunk object
chunk.embedding = embeddings[i]
return chunks |
Convert a date string to a unix timestamp (seconds since epoch).
Args:
date_str: The date string to convert.
Returns:
The unix timestamp corresponding to the date string.
If the date string cannot be parsed as a valid date format, returns the current unix timestamp and prints a warning. | def to_unix_timestamp(date_str: str) -> int:
"""
Convert a date string to a unix timestamp (seconds since epoch).
Args:
date_str: The date string to convert.
Returns:
The unix timestamp corresponding to the date string.
If the date string cannot be parsed as a valid date format, returns the current unix timestamp and prints a warning.
"""
# Try to parse the date string using arrow, which supports many common date formats
try:
date_obj = arrow.get(date_str)
return int(date_obj.timestamp())
except arrow.parser.ParserError:
# If the parsing fails, return the current unix timestamp and print a warning
logger.info(f"Invalid date format: {date_str}")
return int(arrow.now().timestamp()) |
Return the text content of a file given its filepath. | def extract_text_from_filepath(filepath: str, mimetype: Optional[str] = None) -> str:
"""Return the text content of a file given its filepath."""
if mimetype is None:
# Get the mimetype of the file based on its extension
mimetype, _ = mimetypes.guess_type(filepath)
if not mimetype:
if filepath.endswith(".md"):
mimetype = "text/markdown"
else:
raise Exception("Unsupported file type")
try:
with open(filepath, "rb") as file:
extracted_text = extract_text_from_file(file, mimetype)
except Exception as e:
logger.error(e)
raise e
return extracted_text |
Embed texts using OpenAI's ada model.
Args:
texts: The list of texts to embed.
Returns:
A list of embeddings, each of which is a list of floats.
Raises:
Exception: If the OpenAI API call fails. | def get_embeddings(texts: List[str]) -> List[List[float]]:
"""
Embed texts using OpenAI's ada model.
Args:
texts: The list of texts to embed.
Returns:
A list of embeddings, each of which is a list of floats.
Raises:
Exception: If the OpenAI API call fails.
"""
# Call the OpenAI API to get the embeddings
# NOTE: Azure Open AI requires deployment id
deployment = os.environ.get("OPENAI_EMBEDDINGMODEL_DEPLOYMENTID")
response = {}
if deployment is None:
response = openai.Embedding.create(input=texts, model=EMBEDDING_MODEL, dimensions=EMBEDDING_DIMENSION)
else:
response = openai.Embedding.create(input=texts, deployment_id=deployment)
# Extract the embedding data from the response
data = response["data"] # type: ignore
# Return the embeddings as a list of lists of floats
return [result["embedding"] for result in data] |
Generate a chat completion using OpenAI's chat completion API.
Args:
messages: The list of messages in the chat history.
model: The name of the model to use for the completion. Default is gpt-3.5-turbo, which is a fast, cheap and versatile model. Use gpt-4 for higher quality but slower results.
Returns:
A string containing the chat completion.
Raises:
Exception: If the OpenAI API call fails. | def get_chat_completion(
messages,
model="gpt-3.5-turbo", # use "gpt-4" for better results
deployment_id=None,
):
"""
Generate a chat completion using OpenAI's chat completion API.
Args:
messages: The list of messages in the chat history.
model: The name of the model to use for the completion. Default is gpt-3.5-turbo, which is a fast, cheap and versatile model. Use gpt-4 for higher quality but slower results.
Returns:
A string containing the chat completion.
Raises:
Exception: If the OpenAI API call fails.
"""
# call the OpenAI chat completion API with the given messages
# Note: Azure Open AI requires deployment id
response = {}
if deployment_id == None:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
)
else:
response = openai.ChatCompletion.create(
deployment_id=deployment_id,
messages=messages,
)
choices = response["choices"] # type: ignore
completion = choices[0].message.content.strip()
logger.info(f"Completion: {completion}")
return completion |
Reads the Azure CosmosDB environment variables for the .env file.
Returns:
dict: The Azure CosmosDB environment variables | def azure_cosmos_db_settings_from_dot_env() -> dict:
"""
Reads the Azure CosmosDB environment variables for the .env file.
Returns:
dict: The Azure CosmosDB environment variables
"""
config = dotenv_values(".env")
env_variables = {
"DATASTORE": "azurecosmosdb",
"AZCOSMOS_API": config.get(
("AZCOSMOS_API")
), # Right now CosmosDB only supports vector search in Mongo vCore.
"AZCOSMOS_CONNSTR": config.get("AZCOSMOS_CONNSTR"),
"AZCOSMOS_DATABASE_NAME": config.get("AZCOSMOS_DATABASE_NAME"),
"AZCOSMOS_CONTAINER_NAME": config.get("AZCOSMOS_CONTAINER_NAME"),
}
return env_variables |
List of documents represents data to be embedded in the datastore.
Minimum requirements fpr Documents in the /upsert endpoint's UpsertRequest. | def documents():
""" List of documents represents data to be embedded in the datastore.
Minimum requirements fpr Documents in the /upsert endpoint's UpsertRequest.
"""
return [
{"text": "The quick brown fox jumped over the slimy green toad."},
{"text": "The big brown bear jumped over the lazy dog."},
{"text": "Toads are frogs."},
{"text": "Green toads are basically red frogs."},
] |
TestClient makes requests to FastAPI service. | def client():
"""TestClient makes requests to FastAPI service."""
endpoint_url = "http://127.0.0.1:8000"
headers = {"Authorization": f"Bearer {os.environ['BEARER_TOKEN']}"}
with TestClient(app=app, base_url=endpoint_url, headers=headers) as client:
yield client |
Drop existing documents from the collection | def delete(client) -> bool:
"""Drop existing documents from the collection"""
response = client.request("DELETE", "/delete", json={"delete_all": True})
sleep(2)
return response |
Upload documents to the datastore via plugin's REST API. | def upsert(delete, documents, client) -> bool:
"""Upload documents to the datastore via plugin's REST API."""
response = client.post("/upsert", json={"documents": documents})
sleep(2) # At this point, the Vector Search Index is being built
return response |
Simply confirm that delete fixture ran successfully | def test_delete(delete) -> None:
"""Simply confirm that delete fixture ran successfully"""
assert delete.status_code == 200
assert delete.json()['success'] |
Simply confirm that upsert fixture has run successfully | def test_upsert(upsert) -> None:
"""Simply confirm that upsert fixture has run successfully"""
assert upsert.status_code == 200
assert len(upsert.json()['ids']) == 4 |
Test queries produce reasonable results,
now that datastore contains embedded data which has been indexed | def test_query(upsert, client) -> None: # upsert,
"""Test queries produce reasonable results,
now that datastore contains embedded data which has been indexed
"""
question = "What did the fox jump over?"
n_requested = 2 # top N results per query
got_response = False
retries = 5
query_result = {}
while retries and not got_response:
response = client.post("/query", json={'queries': [{"query": question, "top_k": n_requested}]})
assert isinstance(response, Response)
assert response.status_code == 200
assert len(response.json()) == 1
query_result = response.json()['results'][0]
if len(query_result['results']) == n_requested:
got_response = True
else:
retries -= 1
sleep(5)
assert got_response # we got n_requested responses
assert query_result['query'] == question
answers = []
scores = []
for result in query_result['results']:
answers.append(result['text'])
scores.append(round(result['score'], 2))
assert 0.8 < scores[0] < 0.9
assert answers[0] == "The quick brown fox jumped over the slimy green toad." |
Confirm that the environment has all it needs | def test_required_vars() -> None:
"""Confirm that the environment has all it needs"""
required_vars = {'BEARER_TOKEN', 'OPENAI_API_KEY', 'DATASTORE', 'EMBEDDING_DIMENSION', 'EMBEDDING_MODEL',
'MONGODB_COLLECTION', 'MONGODB_DATABASE', 'MONGODB_INDEX', 'MONGODB_URI'}
assert os.environ["DATASTORE"] == 'mongodb'
missing = required_vars - set(os.environ)
assert len(missing) == 0 |
Confirm that the connection to the datastore works. | def test_mongodb_connection() -> None:
"""Confirm that the connection to the datastore works."""
client = MongoClient(os.environ["MONGODB_URI"])
assert client.admin.command('ping')['ok'] |
Check that we can call OpenAI Embedding models. | def test_openai_connection() -> None:
"""Check that we can call OpenAI Embedding models."""
openai.api_key = os.environ["OPENAI_API_KEY"]
models = openai.Model.list()
model_names = [model["id"] for model in models['data']]
for model_name in model_names:
try:
response = openai.Embedding.create(input=["Some input text"], model=model_name)
assert len(response['data'][0]['embedding']) >= int(os.environ['EMBEDDING_DIMENSION'])
except:
pass |
ID of an unchunked document | def document_id():
"""ID of an unchunked document"""
return "a5991f75a315f755c3365ab2" |
IDs of chunks | def chunk_ids(document_id):
"""IDs of chunks"""
return [f"{document_id}_{i}" for i in range(3)] |
Represents output of services.chunks.get_document_chunks
-> Dict[str, List[DocumentChunk]]
called on a list containing a single Document | def one_documents_chunks(document_id, chunk_ids):
"""Represents output of services.chunks.get_document_chunks
-> Dict[str, List[DocumentChunk]]
called on a list containing a single Document
"""
n_chunks = len(chunk_ids)
texts = [
"Aenean euismod bibendum laoreet",
"Vivamus non enim vitae tortor",
"Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae",
]
sources = [Source.email, Source.file, Source.chat]
created_ats = [
"1929-10-28T09:30:00-05:00",
"2009-01-03T16:39:57-08:00",
"2021-01-21T10:00:00-02:00",
]
authors = ["Fred Smith", "Bob Doe", "Appleton Doe"]
embeddings = sample_embeddings(n_chunks)
doc_chunks = []
for i in range(n_chunks):
chunk = DocumentChunk(
id=chunk_ids[i],
text=texts[i],
metadata=DocumentChunkMetadata(
document_id=document_id,
source=sources[i],
created_at=created_ats[i],
author=authors[i],
),
embedding=embeddings[i], # type: ignore
)
doc_chunks.append(chunk)
return {document_id: doc_chunks} |
Create a polar axes containing the matplotlib radar plot.
Parameters
----------
fig : matplotlib.figure.Figure
The figure to draw into.
ax_position : (float, float, float, float)
The position of the created Axes in figure coordinates as
(x, y, width, height).
lw_bars : float
The linewidth of the bars.
lw_grid : float
The linewidth of the grid.
lw_border : float
The linewidth of the Axes border.
rgrid : array-like
Positions of the radial grid.
Returns
-------
ax : matplotlib.axes.Axes
The created Axes. | def create_icon_axes(fig, ax_position, lw_bars, lw_grid, lw_border, rgrid):
"""
Create a polar axes containing the matplotlib radar plot.
Parameters
----------
fig : matplotlib.figure.Figure
The figure to draw into.
ax_position : (float, float, float, float)
The position of the created Axes in figure coordinates as
(x, y, width, height).
lw_bars : float
The linewidth of the bars.
lw_grid : float
The linewidth of the grid.
lw_border : float
The linewidth of the Axes border.
rgrid : array-like
Positions of the radial grid.
Returns
-------
ax : matplotlib.axes.Axes
The created Axes.
"""
with plt.rc_context({'axes.edgecolor': MPL_BLUE,
'axes.linewidth': lw_border}):
ax = fig.add_axes(ax_position, projection='polar')
ax.set_axisbelow(True)
N = 7
arc = 2. * np.pi
theta = np.arange(0.0, arc, arc / N)
radii = np.array([2, 6, 8, 7, 4, 5, 8])
width = np.pi / 4 * np.array([0.4, 0.4, 0.6, 0.8, 0.2, 0.5, 0.3])
bars = ax.bar(theta, radii, width=width, bottom=0.0, align='edge',
edgecolor='0.3', lw=lw_bars)
for r, bar in zip(radii, bars):
color = *cm.jet(r / 10.)[:3], 0.6 # color from jet with alpha=0.6
bar.set_facecolor(color)
ax.tick_params(labelbottom=False, labeltop=False,
labelleft=False, labelright=False)
ax.grid(lw=lw_grid, color='0.9')
ax.set_rmax(9)
ax.set_yticks(rgrid)
# the actual visible background - extends a bit beyond the axis
ax.add_patch(Rectangle((0, 0), arc, 9.58,
facecolor='white', zorder=0,
clip_on=False, in_layout=False))
return ax |
Create an axes in *fig* that contains 'matplotlib' as Text. | def create_text_axes(fig, height_px):
"""Create an axes in *fig* that contains 'matplotlib' as Text."""
ax = fig.add_axes((0, 0, 1, 1))
ax.set_aspect("equal")
ax.set_axis_off()
path = TextPath((0, 0), "matplotlib", size=height_px * 0.8,
prop=get_font_properties())
fp = get_font_properties()
fp.set_weight('light')
path1 = TextPath((80, -13), 'Cheat sheet', size=height_px * 0.12,
prop=fp)
path2 = TextPath((310, -13), f'Version {matplotlib. __version__}',
size=height_px * 0.12,
prop=fp)
angle = 4.25 # degrees
trans = mtrans.Affine2D().skew_deg(angle, 0)
patch = PathPatch(path, transform=trans + ax.transData, color=MPL_BLUE,
lw=0)
patch1 = PathPatch(path1, transform=trans + ax.transData, color=MPL_BLUE,
lw=0)
patch2 = PathPatch(path2, color=MPL_BLUE,
lw=0)
ax.add_patch(patch)
ax.add_patch(patch1)
ax.add_patch(patch2)
ax.autoscale() |
Create a full figure with the Matplotlib logo.
Parameters
----------
height_px : int
Height of the figure in pixel.
lw_bars : float
The linewidth of the bar border.
lw_grid : float
The linewidth of the grid.
lw_border : float
The linewidth of icon border.
rgrid : sequence of float
The radial grid positions.
with_text : bool
Whether to draw only the icon or to include 'matplotlib' as text. | def make_logo(height_px, lw_bars, lw_grid, lw_border, rgrid, with_text=False):
"""
Create a full figure with the Matplotlib logo.
Parameters
----------
height_px : int
Height of the figure in pixel.
lw_bars : float
The linewidth of the bar border.
lw_grid : float
The linewidth of the grid.
lw_border : float
The linewidth of icon border.
rgrid : sequence of float
The radial grid positions.
with_text : bool
Whether to draw only the icon or to include 'matplotlib' as text.
"""
dpi = 100
height = height_px / dpi
figsize = (5 * height, height) if with_text else (height, height)
fig = plt.figure(figsize=figsize, dpi=dpi)
fig.patch.set_alpha(0)
if with_text:
create_text_axes(fig, height_px)
ax_pos = (0.535, 0.12, .17, 0.75) if with_text else (0.03, 0.03, .94, .94)
ax = create_icon_axes(fig, ax_pos, lw_bars, lw_grid, lw_border, rgrid)
fig.savefig('mpl-logo2.pdf')
return fig, ax |
Set up Axes with just an x-Axis. | def setup(ax):
"""Set up Axes with just an x-Axis."""
ax.spines['right'].set_color('none')
ax.spines['left'].set_color('none')
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(which='major', width=1.00, length=5)
ax.tick_params(which='minor', width=0.75, length=2.5, labelsize=10)
ax.set_xlim(0, 5)
ax.set_ylim(0, 1)
ax.patch.set_alpha(0.0) |
Return formatted value with 2 decimal places. | def major_formatter(x, pos):
"""Return formatted value with 2 decimal places."""
return "[%.2f]" % x |
Builds a :class:`ClassyDataset` from a config.
This assumes a 'name' key in the config which is used to determine what
dataset class to instantiate. For instance, a config `{"name": "my_dataset",
"folder": "/data"}` will find a class that was registered as "my_dataset"
(see :func:`register_dataset`) and call .from_config on it. | def build_dataset(config, *args, **kwargs):
"""Builds a :class:`ClassyDataset` from a config.
This assumes a 'name' key in the config which is used to determine what
dataset class to instantiate. For instance, a config `{"name": "my_dataset",
"folder": "/data"}` will find a class that was registered as "my_dataset"
(see :func:`register_dataset`) and call .from_config on it."""
dataset = DATASET_REGISTRY[config["name"]].from_config(config, *args, **kwargs)
num_workers = config.get("num_workers")
if num_workers is not None:
dataset.set_num_workers(num_workers)
return dataset |
Registers a :class:`ClassyDataset` subclass.
This decorator allows Classy Vision to instantiate a subclass of
ClassyDataset from a configuration file, even if the class itself is not
part of the Classy Vision framework. To use it, apply this decorator to a
ClassyDataset subclass like this:
.. code-block:: python
@register_dataset("my_dataset")
class MyDataset(ClassyDataset):
...
To instantiate a dataset from a configuration file, see
:func:`build_dataset`. | def register_dataset(name, bypass_checks=False):
"""Registers a :class:`ClassyDataset` subclass.
This decorator allows Classy Vision to instantiate a subclass of
ClassyDataset from a configuration file, even if the class itself is not
part of the Classy Vision framework. To use it, apply this decorator to a
ClassyDataset subclass like this:
.. code-block:: python
@register_dataset("my_dataset")
class MyDataset(ClassyDataset):
...
To instantiate a dataset from a configuration file, see
:func:`build_dataset`."""
def register_dataset_cls(cls):
if not bypass_checks:
if name in DATASET_REGISTRY:
msg = "Cannot register duplicate dataset ({}). Already registered at \n{}\n"
raise ValueError(msg.format(name, DATASET_REGISTRY_TB[name]))
if not issubclass(cls, ClassyDataset):
raise ValueError(
"Dataset ({}: {}) must extend ClassyDataset".format(
name, cls.__name__
)
)
if cls.__name__ in DATASET_CLASS_NAMES:
msg = (
"Cannot register dataset with duplicate class name({})."
+ "Previously registered at \n{}\n"
)
raise ValueError(
msg.format(cls.__name__, DATASET_CLASS_NAMES_TB[cls.__name__])
)
tb = "".join(traceback.format_stack())
DATASET_REGISTRY[name] = cls
DATASET_CLASS_NAMES.add(cls.__name__)
DATASET_REGISTRY_TB[name] = tb
DATASET_CLASS_NAMES_TB[cls.__name__] = tb
return cls
return register_dataset_cls |
Standard CutMix bounding-box
Generates a random square bbox based on lambda value. This impl includes
support for enforcing a border margin as percent of bbox dimensions.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image)
count (int): Number of bbox to generate | def rand_bbox(img_shape, lam, margin=0.0, count=1):
"""Standard CutMix bounding-box
Generates a random square bbox based on lambda value. This impl includes
support for enforcing a border margin as percent of bbox dimensions.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image)
count (int): Number of bbox to generate
"""
ratio = math.sqrt(1 - lam)
img_h, img_w = img_shape[-2:]
cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)
margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)
cy = torch.randint(0 + margin_y, img_h - margin_y, (count,))
cx = torch.randint(0 + margin_x, img_w - margin_x, (count,))
yl = torch.clamp(cy - cut_h // 2, 0, img_h)
yh = torch.clamp(cy + cut_h // 2, 0, img_h)
xl = torch.clamp(cx - cut_w // 2, 0, img_w)
xh = torch.clamp(cx + cut_w // 2, 0, img_w)
return yl, yh, xl, xh |
Min-Max CutMix bounding-box
Inspired by Darknet cutmix impl, generates a random rectangular bbox
based on min/max percent values applied to each dimension of the input image.
Typical defaults for minmax are usually in the .2-.3 for min and .8-.9 range for max.
Args:
img_shape (tuple): Image shape as tuple
minmax (tuple or list): Min and max bbox ratios (as percent of image size)
count (int): Number of bbox to generate | def rand_bbox_minmax(img_shape, minmax, count=1):
"""Min-Max CutMix bounding-box
Inspired by Darknet cutmix impl, generates a random rectangular bbox
based on min/max percent values applied to each dimension of the input image.
Typical defaults for minmax are usually in the .2-.3 for min and .8-.9 range for max.
Args:
img_shape (tuple): Image shape as tuple
minmax (tuple or list): Min and max bbox ratios (as percent of image size)
count (int): Number of bbox to generate
"""
assert len(minmax) == 2
img_h, img_w = img_shape[-2:]
cut_h = np.random.randint(
int(img_h * minmax[0]), int(img_h * minmax[1]), size=count
)
cut_w = np.random.randint(
int(img_w * minmax[0]), int(img_w * minmax[1]), size=count
)
# torch's randint does not accept a vector of max values
yl = np.random.randint(0, img_h - cut_h, size=count)
xl = np.random.randint(0, img_w - cut_w, size=count)
yu = yl + cut_h
xu = xl + cut_w
return [torch.from_numpy(a) for a in [yl, yu, xl, xu]] |
Generate bbox and apply lambda correction. | def cutmix_bbox_and_lam(img_shape, lam, ratio_minmax=None, correct_lam=True, count=1):
"""Generate bbox and apply lambda correction."""
if ratio_minmax is not None:
yl, yu, xl, xu = rand_bbox_minmax(img_shape, ratio_minmax, count=count)
else:
yl, yu, xl, xu = rand_bbox(img_shape, lam, count=count)
if correct_lam or ratio_minmax is not None:
bbox_area = (yu - yl) * (xu - xl)
lam = (1.0 - bbox_area / float(img_shape[-2] * img_shape[-1])).item()
return (yl, yu, xl, xu), lam |
Returns a ApplyTransformToKey which applies a transform on the specified key.
The transform is built from the config, if it is not None.
Otherwise, uses one of the two mutually exclusive args: If
default_transform is not None, it is used. If split is not None,
imagenet transforms are used, using augmentation for "train", no
augmentation otherwise.
This function also provides an additional
function for mapping from tuples (or other keys) to a desired set
of keys
Args:
config: field transform config
default_transform: used if config is None
split: split for dataset, e.g. "train" or "test"
key: Key to apply transform to
key_map_transform: Used to produce desired map / keys
(e.g. for torchvision datasets, default samples is a
tuple so this argument can be used to map
(input, target) -> {"input": input, "target": target}) | def build_field_transform_default_imagenet(
config: Optional[List[Dict[str, Any]]],
default_transform: Optional[Callable] = None,
split: Optional[bool] = None,
key: Union[int, str] = "input",
key_map_transform: Optional[Callable] = DEFAULT_KEY_MAP,
) -> Callable:
"""Returns a ApplyTransformToKey which applies a transform on the specified key.
The transform is built from the config, if it is not None.
Otherwise, uses one of the two mutually exclusive args: If
default_transform is not None, it is used. If split is not None,
imagenet transforms are used, using augmentation for "train", no
augmentation otherwise.
This function also provides an additional
function for mapping from tuples (or other keys) to a desired set
of keys
Args:
config: field transform config
default_transform: used if config is None
split: split for dataset, e.g. "train" or "test"
key: Key to apply transform to
key_map_transform: Used to produce desired map / keys
(e.g. for torchvision datasets, default samples is a
tuple so this argument can be used to map
(input, target) -> {"input": input, "target": target})
"""
assert (
default_transform is None or split is None
), "Can only specify one of default_transform and split"
if config is None:
if default_transform is not None:
transform = default_transform
elif split is not None:
transform = (
ImagenetAugmentTransform()
if split == "train"
else ImagenetNoAugmentTransform()
)
else:
raise ValueError("No transform config provided with no defaults")
else:
transform = build_transforms(config)
transform = ApplyTransformToKey(transform, key=key)
if key_map_transform is None:
return transform
return transforms.Compose([key_map_transform, transform]) |
Default unnormalization transform which undo the "transforms.Normalize".
Specially, it cancels out mean subtraction and standard deviation division.
Args:
img (torch.Tensor): image data to which the transform will be applied | def default_unnormalize(img):
"""Default unnormalization transform which undo the "transforms.Normalize".
Specially, it cancels out mean subtraction and standard deviation division.
Args:
img (torch.Tensor): image data to which the transform will be applied
"""
# TODO T39752655: Allow this to be configurable
img = img.clone()
for channel, std, mean in zip(img, ImagenetConstants.STD, ImagenetConstants.MEAN):
channel.mul_(std).add_(mean)
return img |
Returns transform that first maps sample to video keys, then
returns a transform on the specified key in dict.
Converts tuple (list, etc) sample to dict with input / target keys.
For a dict sample, verifies that dict has input / target keys.
For all other samples throws.
Args:
config: If provided, it is a dict where key is the data modality, and
value is a dict specifying the transform config
split: the split of the data to which the transform will be applied
key: the key in data sample of type dict whose corresponding value will
undergo the transform | def build_video_field_transform_default(
config: Optional[Dict[str, List[Dict[str, Any]]]],
split: str = "train",
key: Optional[str] = "input",
key_map_transform: Optional[Callable] = DEFAULT_KEY_MAP,
) -> Callable:
"""Returns transform that first maps sample to video keys, then
returns a transform on the specified key in dict.
Converts tuple (list, etc) sample to dict with input / target keys.
For a dict sample, verifies that dict has input / target keys.
For all other samples throws.
Args:
config: If provided, it is a dict where key is the data modality, and
value is a dict specifying the transform config
split: the split of the data to which the transform will be applied
key: the key in data sample of type dict whose corresponding value will
undergo the transform
"""
if config is None and split is None:
raise ValueError("No transform config provided with no defaults")
transforms_for_type = {
"video": VideoDefaultAugmentTransform()
if split == "train"
else VideoDefaultNoAugmentTransform(),
"audio": DummyAudioTransform(),
}
if config is not None:
transforms_for_type.update(
{
mode: build_transforms(modal_config)
for mode, modal_config in config.items()
}
)
transform = transforms.Compose(
[
ApplyTransformToKey(default_transform, key=mode)
for mode, default_transform in transforms_for_type.items()
]
)
if key is not None:
transform = ApplyTransformToKey(
transforms.Compose([TupleToMapTransform(["video", "audio"]), transform]),
key=key,
)
if key_map_transform is None:
return transform
return transforms.Compose([key_map_transform, transform]) |
Builds a :class:`ClassyTransform` from a config.
This assumes a 'name' key in the config which is used to determine what
transform class to instantiate. For instance, a config `{"name":
"my_transform", "foo": "bar"}` will find a class that was registered as
"my_transform" (see :func:`register_transform`) and call .from_config on
it.
In addition to transforms registered with :func:`register_transform`, we
also support instantiating transforms available in the
`torchvision.transforms <https://pytorch.org/docs/stable/torchvision/
transforms.html>`_ module. Any keys in the config will get expanded
to parameters of the transform constructor. For instance, the following
call will instantiate a :class:`torchvision.transforms.CenterCrop`:
.. code-block:: python
build_transform({"name": "CenterCrop", "size": 224}) | def build_transform(transform_config: Dict[str, Any]) -> Callable:
"""Builds a :class:`ClassyTransform` from a config.
This assumes a 'name' key in the config which is used to determine what
transform class to instantiate. For instance, a config `{"name":
"my_transform", "foo": "bar"}` will find a class that was registered as
"my_transform" (see :func:`register_transform`) and call .from_config on
it.
In addition to transforms registered with :func:`register_transform`, we
also support instantiating transforms available in the
`torchvision.transforms <https://pytorch.org/docs/stable/torchvision/
transforms.html>`_ module. Any keys in the config will get expanded
to parameters of the transform constructor. For instance, the following
call will instantiate a :class:`torchvision.transforms.CenterCrop`:
.. code-block:: python
build_transform({"name": "CenterCrop", "size": 224})
"""
assert (
"name" in transform_config
), f"name not provided for transform: {transform_config}"
name = transform_config["name"]
transform_args = {k: v for k, v in transform_config.items() if k != "name"}
if name in TRANSFORM_REGISTRY:
transform = TRANSFORM_REGISTRY[name].from_config(transform_args)
else:
# the name should be available in torchvision.transforms
# if users specify the torchvision transform name in snake case,
# we need to convert it to title case.
if not (hasattr(transforms, name) or (name in TRANSFORM_VIDEO)):
name = name.title().replace("_", "")
assert hasattr(transforms, name) or (name in TRANSFORM_VIDEO), (
f"{name} isn't a registered tranform"
", nor is it available in torchvision.transforms"
)
if hasattr(transforms, name):
transform = getattr(transforms, name)(**transform_args)
else:
import torchvision.transforms._transforms_video as transforms_video
transform = getattr(transforms_video, name)(**transform_args)
log_class_usage("Transform", transform.__class__)
return transform |
Builds a transform from the list of transform configurations. | def build_transforms(transforms_config: List[Dict[str, Any]]) -> Callable:
"""
Builds a transform from the list of transform configurations.
"""
transform_list = [build_transform(config) for config in transforms_config]
return transforms.Compose(transform_list) |
Registers a :class:`ClassyTransform` subclass.
This decorator allows Classy Vision to instantiate a subclass of
:class:`ClassyTransform` from a configuration file, even if the class itself is not
part of the Classy Vision framework. To use it, apply this decorator to a
ClassyTransform subclass like this:
.. code-block:: python
@register_transform("my_transform")
class MyTransform(ClassyTransform):
...
To instantiate a transform from a configuration file, see
:func:`build_transform`. | def register_transform(name: str, bypass_checks=False):
"""Registers a :class:`ClassyTransform` subclass.
This decorator allows Classy Vision to instantiate a subclass of
:class:`ClassyTransform` from a configuration file, even if the class itself is not
part of the Classy Vision framework. To use it, apply this decorator to a
ClassyTransform subclass like this:
.. code-block:: python
@register_transform("my_transform")
class MyTransform(ClassyTransform):
...
To instantiate a transform from a configuration file, see
:func:`build_transform`."""
def register_transform_cls(cls: Callable[..., Callable]):
if not bypass_checks:
if name in TRANSFORM_REGISTRY:
msg = "Cannot register duplicate transform ({}). Already registered at \n{}\n"
raise ValueError(msg.format(name, TRANSFORM_REGISTRY_TB[name]))
if hasattr(transforms, name) or (name in TRANSFORM_VIDEO):
raise ValueError(
"{} has existed in torchvision.transforms, Please change the name!".format(
name
)
)
TRANSFORM_REGISTRY[name] = cls
tb = "".join(traceback.format_stack())
TRANSFORM_REGISTRY_TB[name] = tb
return cls
return register_transform_cls |
Helper function parsing the command line options.
@retval ArgumentParser | def parse_args():
"""Helper function parsing the command line options.
@retval ArgumentParser
"""
parser = ArgumentParser(
description="Classy Vision distributed training launch "
"helper utility that will spawn up multiple nodes using Ray"
)
# Optional arguments for the launch helper
parser.add_argument(
"--nnodes",
type=int,
default=1,
help="The number of nodes to use for distributed training",
)
parser.add_argument(
"--nproc_per_node",
type=int,
default=1,
help="The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU.",
)
parser.add_argument(
"--use_env",
default=False,
action="store_true",
help="Use environment variable to pass "
"'local rank'."
"If set to True, the script will not pass "
"--local_rank as argument, and will instead set LOCAL_RANK.",
)
parser.add_argument(
"-m",
"--module",
default=False,
action="store_true",
help="Changes each process to interpret the launch script "
"as a python module, executing with the same behavior as"
"'python -m'.",
)
parser.add_argument(
"--no_python",
default=False,
action="store_true",
help='Do not prepend the training script with "python" - just exec '
"it directly. Useful when the script is not a Python script.",
)
# Ray-related arguments
group = parser.add_argument_group("Ray related arguments")
group.add_argument("--ray-address", default="auto", type=str)
# positional
parser.add_argument(
"training_script",
type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script",
)
# rest from the training program
parser.add_argument("training_script_args", nargs=REMAINDER)
return parser.parse_args() |
For some backends, such as NCCL, communication only works if the
tensor is on the GPU. This helper function converts to the correct
device and returns the tensor + original device. | def convert_to_distributed_tensor(tensor: torch.Tensor) -> Tuple[torch.Tensor, str]:
"""
For some backends, such as NCCL, communication only works if the
tensor is on the GPU. This helper function converts to the correct
device and returns the tensor + original device.
"""
orig_device = "cpu" if not tensor.is_cuda else "gpu"
if (
torch.distributed.is_available()
and torch.distributed.get_backend() == torch.distributed.Backend.NCCL
and not tensor.is_cuda
):
tensor = tensor.cuda()
return (tensor, orig_device) |
For some backends, such as NCCL, communication only works if the
tensor is on the GPU. This converts the tensor back to original device. | def convert_to_normal_tensor(tensor: torch.Tensor, orig_device: str) -> torch.Tensor:
"""
For some backends, such as NCCL, communication only works if the
tensor is on the GPU. This converts the tensor back to original device.
"""
if tensor.is_cuda and orig_device == "cpu":
tensor = tensor.cpu()
return tensor |
Returns True if this is rank 0 of a distributed training job OR if it is
a single trainer job. Otherwise False. | def is_primary() -> bool:
"""
Returns True if this is rank 0 of a distributed training job OR if it is
a single trainer job. Otherwise False.
"""
return get_rank() == _PRIMARY_RANK |
Wrapper over torch.distributed.all_reduce for performing mean reduction
of tensor over all processes. | def all_reduce_mean(tensor: torch.Tensor) -> torch.Tensor:
"""
Wrapper over torch.distributed.all_reduce for performing mean reduction
of tensor over all processes.
"""
return all_reduce_op(
tensor,
torch.distributed.ReduceOp.SUM,
lambda t: t / torch.distributed.get_world_size(),
) |
Wrapper over torch.distributed.all_reduce for performing sum
reduction of tensor over all processes in both distributed /
non-distributed scenarios. | def all_reduce_sum(tensor: torch.Tensor) -> torch.Tensor:
"""
Wrapper over torch.distributed.all_reduce for performing sum
reduction of tensor over all processes in both distributed /
non-distributed scenarios.
"""
return all_reduce_op(tensor, torch.distributed.ReduceOp.SUM) |
Wrapper over torch.distributed.all_reduce for performing min
reduction of tensor over all processes in both distributed /
non-distributed scenarios. | def all_reduce_min(tensor: torch.Tensor) -> torch.Tensor:
"""
Wrapper over torch.distributed.all_reduce for performing min
reduction of tensor over all processes in both distributed /
non-distributed scenarios.
"""
return all_reduce_op(tensor, torch.distributed.ReduceOp.MIN) |
Wrapper over torch.distributed.all_reduce for performing min
reduction of tensor over all processes in both distributed /
non-distributed scenarios. | def all_reduce_max(tensor: torch.Tensor) -> torch.Tensor:
"""
Wrapper over torch.distributed.all_reduce for performing min
reduction of tensor over all processes in both distributed /
non-distributed scenarios.
"""
return all_reduce_op(tensor, torch.distributed.ReduceOp.MAX) |
Wrapper over torch.distributed.all_reduce for performing
reduction of tensor over all processes in both distributed /
non-distributed scenarios. | def all_reduce_op(
tensor: torch.Tensor,
op: torch.distributed.ReduceOp,
after_op_func: Callable[[torch.Tensor], torch.Tensor] = None,
) -> torch.Tensor:
"""
Wrapper over torch.distributed.all_reduce for performing
reduction of tensor over all processes in both distributed /
non-distributed scenarios.
"""
if is_distributed_training_run():
tensor, orig_device = convert_to_distributed_tensor(tensor)
torch.distributed.all_reduce(tensor, op)
if after_op_func is not None:
tensor = after_op_func(tensor)
tensor = convert_to_normal_tensor(tensor, orig_device)
return tensor |
Wrapper over torch.distributed.all_gather for performing
'gather' of 'tensor' over all processes in both distributed /
non-distributed scenarios. | def gather_tensors_from_all(tensor: torch.Tensor) -> List[torch.Tensor]:
"""
Wrapper over torch.distributed.all_gather for performing
'gather' of 'tensor' over all processes in both distributed /
non-distributed scenarios.
"""
if tensor.ndim == 0:
# 0 dim tensors cannot be gathered. so unsqueeze
tensor = tensor.unsqueeze(0)
if is_distributed_training_run():
tensor, orig_device = convert_to_distributed_tensor(tensor)
gathered_tensors = [
torch.zeros_like(tensor) for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(gathered_tensors, tensor)
gathered_tensors = [
convert_to_normal_tensor(_tensor, orig_device)
for _tensor in gathered_tensors
]
else:
gathered_tensors = [tensor]
return gathered_tensors |
Wrapper over torch.distributed.broadcast for broadcasting a tensor from the source
to all processes in both distributed / non-distributed scenarios. | def broadcast(tensor: torch.Tensor, src: int = 0) -> torch.Tensor:
"""
Wrapper over torch.distributed.broadcast for broadcasting a tensor from the source
to all processes in both distributed / non-distributed scenarios.
"""
if is_distributed_training_run():
tensor, orig_device = convert_to_distributed_tensor(tensor)
torch.distributed.broadcast(tensor, src)
tensor = convert_to_normal_tensor(tensor, orig_device)
return tensor |
Wrapper over torch.distributed.barrier, returns without waiting
if the distributed process group is not initialized instead of throwing error. | def barrier() -> None:
"""
Wrapper over torch.distributed.barrier, returns without waiting
if the distributed process group is not initialized instead of throwing error.
"""
if not torch.distributed.is_available() or not torch.distributed.is_initialized():
return
torch.distributed.barrier() |
Simple wrapper for correctly getting worldsize in both distributed
/ non-distributed settings | def get_world_size() -> int:
"""
Simple wrapper for correctly getting worldsize in both distributed
/ non-distributed settings
"""
return (
torch.distributed.get_world_size()
if torch.distributed.is_available() and torch.distributed.is_initialized()
else 1
) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.