input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
"""
Shared methods for Batch pipelines.
Batch docs: https://hail.is/docs/batch/api/batch/hailtop.batch.job.Job.html#hailtop.batch.job.Job
"""
import collections
import contextlib
import itertools
import logging
import os
import subprocess
from typing import List, Union
import configargparse
import hailtop.batch as hb
import hailtop.batch_client.client as bc
from hailtop.batch.job import Job
_GCLOUD_PROJECT = None
class HG38_REF_PATHS:
fasta = (
"gs://gcp-public-data--broad-references/hg38/v0/Homo_sapiens_assembly38.fasta"
)
fai = "gs://gcp-public-data--broad-references/hg38/v0/Homo_sapiens_assembly38.fasta.fai"
dict = "gs://gcp-public-data--broad-references/hg38/v0/Homo_sapiens_assembly38.dict"
gencode_v36_gtf = "gs://macarthurlab-rnaseq/ref/gencode.v36.annotation.gtf"
class HG37_REF_PATHS:
fasta = (
"gs://gcp-public-data--broad-references/hg19/v0/Homo_sapiens_assembly19.fasta"
)
fai = "gs://gcp-public-data--broad-references/hg19/v0/Homo_sapiens_assembly19.fasta.fai"
dict = "gs://gcp-public-data--broad-references/hg19/v0/Homo_sapiens_assembly19.dict"
def set_gcloud_project(gcloud_project):
global _GCLOUD_PROJECT
_GCLOUD_PROJECT = gcloud_project
def init_arg_parser(
default_billing_project="tgg-rare-disease",
default_temp_bucket="macarthurlab-cromwell",
default_cpu=1,
default_memory=3.75,
parser=configargparse.ArgumentParser(
formatter_class=configargparse.ArgumentDefaultsRawHelpFormatter
),
gsa_key_file=None,
):
"""Initializes and returns an argparse instance with common pipeline args pre-defined."""
local_or_cluster_grp = parser.add_mutually_exclusive_group(required=True)
local_or_cluster_grp.add_argument(
"--local", action="store_true", help="Batch: run locally"
)
local_or_cluster_grp.add_argument(
"--cluster", action="store_true", help="Batch: submit to cluster"
)
parser.add_argument(
"-r",
"--raw",
action="store_true",
help="Batch: run directly on the machine, without using a docker image",
)
parser.add_argument(
"--gsa-key-file",
default=gsa_key_file,
help="Batch: path of gcloud service account .json "
"key file. If provided, Batch will mount this file into the docker image so gcloud commands can run as this service account.",
)
parser.add_argument(
"--batch-billing-project",
default=default_billing_project,
help="Batch: this billing project will be "
"charged when running jobs on the Batch cluster. To set up a billing project name, contact the hail team.",
)
parser.add_argument(
"--batch-temp-bucket",
default=default_temp_bucket,
help="Batch: bucket where it stores temp "
"files. The batch service-account must have Admin permissions for this bucket. These can be added by running "
"gsutil iam ch serviceAccount:[SERVICE_ACCOUNT_NAME]:objectAdmin gs://[BUCKET_NAME]",
)
parser.add_argument(
"-t",
"--cpu",
type=float,
default=default_cpu,
choices=[0.25, 0.5, 1, 2, 4, 8, 16],
help="Batch: number of CPUs (eg. 0.5)",
)
parser.add_argument(
"-m",
"--memory",
type=float,
default=default_memory,
help="Batch: memory in gigabytes (eg. 3.75)",
)
parser.add_argument(
"-f",
"--force",
action="store_true",
help="Recompute and overwrite cached or previously computed data",
)
parser.add_argument(
"--start-with", type=int, help="Start from this step in the pipeline"
)
parser.add_argument(
"--dry-run", action="store_true", help="Don't run commands, just print them."
)
parser.add_argument("--verbose", action="store_true", help="Verbose log output.")
return parser
@contextlib.contextmanager
def run_batch(args, batch_name=None):
"""Wrapper around creating, running, and then closing a Batch run.
:param args: Parsed args from the ArgumentParser created via the init_arg_parser method
:param batch_name: (optional) batch label which will show up in the Batch web UI
Usage:
with run_batch(args) as batch:
... batch job definitions ...
"""
if args.local:
backend = (
hb.LocalBackend()
if args.raw
else hb.LocalBackend(gsa_key_file=args.gsa_key_file)
)
else:
backend = hb.ServiceBackend(
billing_project=args.batch_billing_project, bucket=args.batch_temp_bucket
)
try:
batch = hb.Batch(backend=backend, name=batch_name)
batch.batch_utils_temp_bucket = args.batch_temp_bucket
yield batch # returned to with ... as batch:
# run on end of with..: block
batch.run(dry_run=args.dry_run, verbose=args.verbose)
finally:
if isinstance(backend, hb.ServiceBackend):
backend.close()
def init_job(
batch,
name: str = None,
image: str = None,
cpu: float = None,
memory: float = None,
disk_size: float = None,
):
"""Common job init steps
:param batch: Batch object
:param name: job label which will show up in the Batch web UI
:param image: docker image name (eg. "weisburd/image-name@sha256:aa19845da5")
:param cpu: number of CPUs (between 0.25 to 16)
:param memory: amount of RAM in Gb (eg. 3.75)
:param disk_size: amount of disk in Gb (eg. 50)
:return: new job object
"""
j = batch.new_job(name=name)
if image:
j.image(image)
if cpu:
if cpu < 0.25 or cpu > 16:
raise ValueError(
f"CPU arg is {cpu}. This is outside the range of 0.25 to 16 CPUs"
)
j.cpu(cpu) # Batch default is 1
if memory:
if memory < 0.1 or memory > 60:
raise ValueError(
f"Memory arg is {memory}. This is outside the range of 0.1 to 60 Gb"
)
j.memory(f"{memory}Gi") # Batch default is 3.75G
if disk_size:
if disk_size < 1 or disk_size > 1000:
raise ValueError(
f"Disk size arg is {disk_size}. This is outside the range of 1 to 1000 Gb"
)
j.storage(f"{disk_size}Gi")
j.command(
"set -euxo pipefail"
) # set bash options for easier debugging and to make command execution more robust
return j
def switch_gcloud_auth_to_user_account(
batch_job: Job,
gs_path_of_gcloud_credentials: str,
gcloud_user_account: str,
gcloud_project: str = None,
):
"""This method adds some shell commands to your Batch job to switch gcloud auth from the Batch-provided service
account to your user account.
This can be used to access all your google buckets without having to first grant access to the Batch service account
For this to work, you must first
1) create a google bucket that only you have access to - for example: gs://weisburd-gcloud-secrets/
2) on your local machine, make sure you're logged in to gcloud by running
gcloud auth login
3) copy your local ~/.config directory (which caches your gcloud auth credentials) to the secrets bucket from step 1
gsutil -m cp -r ~/.config/ gs://weisburd-gcloud-secrets/
4) grant your default Batch service-account read access to your secrets bucket so it can download these credentials
into each docker container.
5) make sure gcloud & gsutil are installed inside the docker images you use for your Batch jobs
6) call this method at the beginning of your batch job:
Example:
switch_gcloud_auth_to_user_account(
batch_job,
"gs://weisburd-gcloud-secrets",
"<EMAIL>",
"seqr-project")
:param batch_job: Batch job object
:param gs_path_of_gcloud_credentials: google bucket path that contains your .config folder
:param gcloud_user_account: user account to activate
:param gcloud_project: (optional) set this as the default gcloud project
:return:
"""
batch_job.command(f"gcloud auth list")
batch_job.command(
f"gcloud auth activate-service-account --key-file /gsa-key/key.json"
)
batch_job.command(
f"gsutil -m cp -r {os.path.join(gs_path_of_gcloud_credentials, '.config')} /tmp/"
)
batch_job.command(f"rm -rf ~/.config")
batch_job.command(f"mv /tmp/.config ~/")
batch_job.command(f"gcloud config set account {gcloud_user_account}")
if gcloud_project or _GCLOUD_PROJECT:
batch_job.command(
f"gcloud config set project {gcloud_project or _GCLOUD_PROJECT}"
)
batch_job.command(
f"gcloud auth list"
) # print auth list again to show that 'gcloud config set account' succeeded.
# attach credentials to batch_job object
batch_job._batch_utils_gs_path_of_gcloud_credentials = gs_path_of_gcloud_credentials
batch_job._batch_utils_gcloud_user_account = gcloud_user_account
class StorageBucketRegionException(Exception):
pass
def check_storage_bucket_region(
google_storage_paths: Union[str, list],
gcloud_project: str = None,
verbose: bool = True,
):
"""Checks whether the given google storage path(s) are stored in US-CENTRAL1 - the region where the hail Batch
cluster is located. Localizing data from other regions will be slower and result in egress charges.
:param google_storage_paths: a gs:// path or a list of gs:// paths to check.
:param gcloud_project: (optional) if specified, it will be added to the gsutil command with the -u arg.
:raises StorageRegionException: If the given path(s) is not stored in the same region as the Batch cluster.
"""
if isinstance(google_storage_paths, str):
google_storage_paths = [google_storage_paths]
buckets = set([path.split("/")[2] for path in google_storage_paths])
for bucket in buckets:
gsutil_command = f"gsutil"
if gcloud_project or _GCLOUD_PROJECT:
gsutil_command += f" -u {gcloud_project or _GCLOUD_PROJECT}"
output = subprocess.check_output(
f"{gsutil_command} ls -L -b gs://{bucket}", shell=True, encoding="UTF-8"
)
for line in output.split("\n"):
if "Location constraint:" in line:
location = line.strip().split()[-1]
break
else:
raise StorageBucketRegionException(
f"ERROR: Couldn't determine gs://{bucket} bucket region."
)
if location not in {"US", "US-CENTRAL1"}:
raise StorageBucketRegionException(
f"ERROR: gs://{bucket} is located in {location}. This may cause egress "
f"charges when copying files to the Batch cluster which is in US-CENTRAL."
)
if verbose:
print(f"Confirmed gs://{bucket} is in {location}")
# dictionary that maps a job id to the set of buckets that have been gcsfuse-mounted into this job, to avoid mounting
# the same bucket 2x
_GCSFUSE_MOUNTED_BUCKETS_PER_JOB = collections.defaultdict(set)
def localize_file(
job, google_storage_path: str, gcloud_project: str = None, use_gcsfuse: bool = False
) -> str:
"""Copies a file from a google bucket to the local filesystem and returns the new absolute local path.
Requires gsutil to exist inside the docker container.
:param job: batch Job object
:param google_storage_path: gs:// path of file to localize
:param gcloud_project: (optional) if specified, it will be added to the gsutil command with the -u arg.
:param use_gcsfuse: instead of copying the file, use gcsfuse to mount the bucket containing this file.
:returns: Local file path after localization.
"""
path = google_storage_path.replace("gs://", "")
dirname = os.path.dirname(path)
bucket_name = path.split("/")[0]
if use_gcsfuse:
root_dir = "/gcsfuse_mounts"
local_bucket_dir = os.path.join(root_dir, bucket_name)
local_file_path = os.path.join(root_dir, path)
job_hash = hash(job)
if bucket_name not in _GCSFUSE_MOUNTED_BUCKETS_PER_JOB[job_hash]:
job.command(f"mkdir -p {local_bucket_dir}")
job.gcsfuse(bucket_name, local_bucket_dir, read_only=True)
_GCSFUSE_MOUNTED_BUCKETS_PER_JOB[job_hash].add(bucket_name)
else:
gsutil_command = f"gsutil"
if gcloud_project or _GCLOUD_PROJECT:
gsutil_command += f" -u {gcloud_project or _GCLOUD_PROJECT}"
root_dir = "/localized"
local_dir = os.path.join(root_dir, dirname)
local_file_path = os.path.join(root_dir, path)
job.command(
f"mkdir -p '{local_dir}'; time {gsutil_command} -m cp -r '{google_storage_path}' '{local_file_path}'"
)
job.command(f"ls -lh '{local_file_path}'") # make sure file exists
return local_file_path
# dictionary that maps a job | |
MOL 2 24.096 12.184 15.639 1.00 0.00 C \n',
'ATOM 834 H1 MOL 2 24.205 9.449 21.095 1.00 0.00 H1- \n',
'ATOM 835 H2 MOL 2 22.553 12.378 19.986 1.00 0.00 H1- \n',
'ATOM 836 H3 MOL 2 23.476 12.911 18.186 1.00 0.00 H1- \n',
'ATOM 837 H4 MOL 2 22.898 11.048 16.874 1.00 0.00 H1- \n',
'ATOM 838 H5 MOL 2 24.391 10.557 16.869 1.00 0.00 H1- \n',
'ATOM 839 H6 MOL 2 23.910 11.666 14.840 1.00 0.00 H1- \n',
'ATOM 840 H7 MOL 2 23.515 12.963 15.634 1.00 0.00 H1- \n',
'ATOM 841 N1 MOL 2 13.304 13.515 6.890 1.00 0.00 N3- \n',
'ATOM 842 C1 MOL 2 13.731 15.133 9.221 1.00 0.00 C \n',
'ATOM 843 C2 MOL 2 14.496 14.027 8.885 1.00 0.00 C \n',
'ATOM 844 C3 MOL 2 14.173 13.212 7.715 1.00 0.00 C \n',
'ATOM 845 C4 MOL 2 13.107 12.648 5.739 1.00 0.00 C \n',
'ATOM 846 C5 MOL 2 13.377 13.456 4.479 1.00 0.00 C \n',
'ATOM 847 C6 MOL 2 13.104 12.616 3.239 1.00 0.00 C \n',
'ATOM 848 H1 MOL 2 12.995 15.351 8.695 1.00 0.00 H1- \n',
'ATOM 849 H2 MOL 2 14.647 12.422 7.586 1.00 0.00 H1- \n',
'ATOM 850 H3 MOL 2 13.724 11.889 5.786 1.00 0.00 H1- \n',
'ATOM 851 H4 MOL 2 14.302 13.752 4.474 1.00 0.00 H1- \n',
'ATOM 852 H5 MOL 2 12.809 14.243 4.469 1.00 0.00 H1- \n',
'ATOM 853 H6 MOL 2 13.290 13.134 2.440 1.00 0.00 H1- \n',
'ATOM 854 H7 MOL 2 13.685 11.837 3.234 1.00 0.00 H1- \n',
'ATOM 855 N1 MOL 2 13.304 1.115 19.290 1.00 0.00 N3- \n',
'ATOM 856 C1 MOL 2 13.731 2.733 21.621 1.00 0.00 C \n',
'ATOM 857 C2 MOL 2 14.496 1.627 21.285 1.00 0.00 C \n',
'ATOM 858 C3 MOL 2 14.173 0.812 20.115 1.00 0.00 C \n',
'ATOM 859 C4 MOL 2 13.107 0.248 18.139 1.00 0.00 C \n',
'ATOM 860 C5 MOL 2 13.377 1.056 16.879 1.00 0.00 C \n',
'ATOM 861 C6 MOL 2 13.104 0.216 15.639 1.00 0.00 C \n',
'ATOM 862 H1 MOL 2 12.995 2.951 21.095 1.00 0.00 H1- \n',
'ATOM 863 H2 MOL 2 14.647 0.022 19.986 1.00 0.00 H1- \n',
'ATOM 864 H3 MOL 2 13.724 24.289 18.186 1.00 0.00 H1- \n',
'ATOM 865 H4 MOL 2 14.302 1.352 16.874 1.00 0.00 H1- \n',
'ATOM 866 H5 MOL 2 12.809 1.843 16.869 1.00 0.00 H1- \n',
'ATOM 867 H6 MOL 2 13.290 0.734 14.840 1.00 0.00 H1- \n',
'ATOM 868 H7 MOL 2 13.685 24.237 15.634 1.00 0.00 H1- \n',
'ATOM 869 N1 MOL 2 0.904 13.515 19.290 1.00 0.00 N3- \n',
'ATOM 870 C1 MOL 2 1.331 15.133 21.621 1.00 0.00 C \n',
'ATOM 871 C2 MOL 2 2.096 14.027 21.285 1.00 0.00 C \n',
'ATOM 872 C3 MOL 2 1.773 13.212 20.115 1.00 0.00 C \n',
'ATOM 873 C4 MOL 2 0.707 12.648 18.139 1.00 0.00 C \n',
'ATOM 874 C5 MOL 2 0.977 13.456 16.879 1.00 0.00 C \n',
'ATOM 875 C6 MOL 2 0.704 12.616 15.639 1.00 0.00 C \n',
'ATOM 876 H1 MOL 2 0.595 15.351 21.095 1.00 0.00 H1- \n',
'ATOM 877 H2 MOL 2 2.247 12.422 19.986 1.00 0.00 H1- \n',
'ATOM 878 H3 MOL 2 1.324 11.889 18.186 1.00 0.00 H1- \n',
'ATOM 879 H4 MOL 2 1.902 13.752 16.874 1.00 0.00 H1- \n',
'ATOM 880 H5 MOL 2 0.409 14.243 16.869 1.00 0.00 H1- \n',
'ATOM 881 H6 MOL 2 0.890 13.134 14.840 1.00 0.00 H1- \n',
'ATOM 882 H7 MOL 2 1.285 11.837 15.634 1.00 0.00 H1- \n',
'ATOM 883 N1 MOL 2 0.904 1.115 6.890 1.00 0.00 N3- \n',
'ATOM 884 C1 MOL 2 1.331 2.733 9.221 1.00 0.00 C \n',
'ATOM 885 C2 MOL 2 2.096 1.627 8.885 1.00 0.00 C \n',
'ATOM 886 C3 MOL 2 1.773 0.812 7.715 1.00 0.00 C \n',
'ATOM 887 C4 MOL 2 0.707 0.248 5.739 1.00 0.00 C \n',
'ATOM 888 C5 MOL 2 0.977 1.056 4.479 1.00 0.00 C \n',
'ATOM 889 C6 MOL 2 0.704 0.216 3.239 1.00 0.00 C \n',
'ATOM 890 H1 MOL 2 0.595 2.951 8.695 1.00 0.00 H1- \n',
'ATOM 891 H2 MOL 2 2.247 0.022 7.586 1.00 0.00 H1- \n',
'ATOM 892 H3 MOL 2 1.324 24.289 5.786 1.00 0.00 H1- \n',
'ATOM 893 H4 MOL 2 1.902 1.352 4.474 1.00 0.00 H1- \n',
'ATOM 894 H5 MOL 2 0.409 1.843 4.469 1.00 0.00 H1- \n',
'ATOM 895 H6 MOL 2 0.890 0.734 2.440 1.00 0.00 H1- \n',
'ATOM 896 H7 MOL 2 1.285 24.237 3.234 1.00 0.00 H1- \n',
'ATOM 897 N1 MOL 2 13.515 6.890 13.304 1.00 0.00 N3- \n',
'ATOM 898 C1 MOL 2 15.133 9.221 13.731 1.00 0.00 C \n',
'ATOM 899 C2 MOL 2 14.027 8.885 14.496 1.00 0.00 C \n',
'ATOM 900 C3 MOL 2 13.212 7.715 14.173 1.00 0.00 C \n',
'ATOM 901 C4 MOL 2 12.648 5.739 13.107 1.00 0.00 C \n',
'ATOM 902 C5 MOL 2 13.456 4.479 13.377 1.00 0.00 C \n',
'ATOM 903 C6 MOL 2 12.616 3.239 13.104 1.00 0.00 C \n',
'ATOM 904 H1 MOL 2 15.351 8.695 12.995 1.00 0.00 H1- \n',
'ATOM 905 H2 MOL 2 12.422 7.586 14.647 1.00 0.00 H1- \n',
'ATOM 906 H3 MOL 2 11.889 5.786 13.724 1.00 0.00 H1- \n',
'ATOM 907 H4 MOL 2 13.752 4.474 14.302 1.00 0.00 H1- \n',
'ATOM 908 H5 MOL 2 14.243 4.469 12.809 1.00 0.00 H1- \n',
'ATOM 909 H6 MOL 2 13.134 2.440 13.290 1.00 0.00 H1- \n',
'ATOM 910 H7 MOL 2 11.837 3.234 13.685 1.00 0.00 H1- \n',
'ATOM 911 N1 MOL 2 13.515 19.290 0.904 1.00 0.00 N3- \n',
'ATOM 912 C1 MOL 2 15.133 21.621 1.331 1.00 0.00 C \n',
'ATOM 913 C2 MOL 2 14.027 21.285 2.096 1.00 0.00 C \n',
'ATOM 914 C3 MOL 2 13.212 20.115 1.773 1.00 0.00 C \n',
'ATOM 915 C4 MOL 2 12.648 18.139 0.707 1.00 0.00 C \n',
'ATOM 916 C5 MOL 2 13.456 16.879 0.977 1.00 0.00 C \n',
'ATOM 917 C6 MOL 2 12.616 15.639 0.704 1.00 0.00 C \n',
'ATOM 918 H1 MOL 2 15.351 21.095 0.595 1.00 0.00 H1- \n',
'ATOM 919 H2 MOL 2 12.422 19.986 2.247 1.00 0.00 H1- \n',
'ATOM 920 H3 MOL 2 11.889 18.186 1.324 1.00 0.00 H1- \n',
'ATOM 921 H4 MOL 2 13.752 16.874 1.902 1.00 0.00 H1- \n',
'ATOM 922 H5 MOL 2 14.243 16.869 0.409 1.00 0.00 H1- \n',
'ATOM 923 H6 MOL 2 13.134 14.840 0.890 1.00 0.00 H1- \n',
'ATOM 924 H7 MOL 2 11.837 15.634 1.285 1.00 0.00 H1- \n',
'ATOM 925 N1 MOL 2 1.115 6.890 0.904 1.00 0.00 N3- \n',
'ATOM 926 C1 MOL 2 2.733 9.221 1.331 1.00 0.00 C \n',
'ATOM 927 C2 MOL 2 1.627 8.885 2.096 1.00 0.00 C \n',
'ATOM 928 C3 MOL 2 0.812 7.715 1.773 1.00 0.00 C \n',
'ATOM 929 C4 MOL 2 0.248 5.739 0.707 1.00 0.00 C \n',
'ATOM 930 C5 MOL 2 1.056 4.479 0.977 1.00 0.00 C \n',
'ATOM 931 C6 MOL 2 0.216 3.239 0.704 1.00 0.00 C \n',
'ATOM 932 H1 MOL 2 2.951 8.695 0.595 1.00 0.00 H1- \n',
'ATOM 933 H2 MOL 2 0.022 7.586 2.247 1.00 0.00 H1- \n',
'ATOM 934 H3 MOL 2 24.289 5.786 1.324 1.00 0.00 H1- \n',
'ATOM 935 H4 MOL 2 1.352 4.474 1.902 1.00 0.00 H1- \n',
'ATOM 936 H5 MOL 2 1.843 4.469 0.409 1.00 0.00 H1- \n',
'ATOM 937 H6 MOL 2 0.734 2.440 0.890 1.00 0.00 H1- \n',
'ATOM 938 H7 MOL 2 24.237 3.234 1.285 1.00 0.00 H1- \n',
'ATOM 939 N1 MOL 2 1.115 19.290 13.304 1.00 0.00 N3- | |
# for the prefix check, it is important that the compared pathes both have trailing slashes,
# so that a path /foobar will NOT be accepted with --restrict-to-path /foo option.
path_with_sep = os.path.join(path, '') # make sure there is a trailing slash (os.sep)
for restrict_to_path in self.restrict_to_paths:
restrict_to_path_with_sep = os.path.join(os.path.realpath(restrict_to_path), '') # trailing slash
if path_with_sep.startswith(restrict_to_path_with_sep):
break
else:
raise PathNotAllowed(path)
self.repository = Repository(path, create, lock_wait=lock_wait, lock=lock,
append_only=self.append_only or append_only,
exclusive=exclusive)
self.repository.__enter__() # clean exit handled by serve() method
return self.repository.id
def inject_exception(self, kind):
kind = kind.decode()
s1 = 'test string'
s2 = 'test string2'
if kind == 'DoesNotExist':
raise Repository.DoesNotExist(s1)
elif kind == 'AlreadyExists':
raise Repository.AlreadyExists(s1)
elif kind == 'CheckNeeded':
raise Repository.CheckNeeded(s1)
elif kind == 'IntegrityError':
raise IntegrityError(s1)
elif kind == 'PathNotAllowed':
raise PathNotAllowed()
elif kind == 'ObjectNotFound':
raise Repository.ObjectNotFound(s1, s2)
elif kind == 'InvalidRPCMethod':
raise InvalidRPCMethod(s1)
elif kind == 'divide':
0 // 0
class SleepingBandwidthLimiter:
def __init__(self, limit):
if limit:
self.ratelimit = int(limit * RATELIMIT_PERIOD)
self.ratelimit_last = time.monotonic()
self.ratelimit_quota = self.ratelimit
else:
self.ratelimit = None
def write(self, fd, to_send):
if self.ratelimit:
now = time.monotonic()
if self.ratelimit_last + RATELIMIT_PERIOD <= now:
self.ratelimit_quota += self.ratelimit
if self.ratelimit_quota > 2 * self.ratelimit:
self.ratelimit_quota = 2 * self.ratelimit
self.ratelimit_last = now
if self.ratelimit_quota == 0:
tosleep = self.ratelimit_last + RATELIMIT_PERIOD - now
time.sleep(tosleep)
self.ratelimit_quota += self.ratelimit
self.ratelimit_last = time.monotonic()
if len(to_send) > self.ratelimit_quota:
to_send = to_send[:self.ratelimit_quota]
written = os.write(fd, to_send)
if self.ratelimit:
self.ratelimit_quota -= written
return written
def api(*, since, **kwargs_decorator):
"""Check version requirements and use self.call to do the remote method call.
<since> specifies the version in which borg introduced this method,
calling this method when connected to an older version will fail without transmiting
anything to the server.
Further kwargs can be used to encode version specific restrictions.
If a previous hardcoded behaviour is parameterized in a version, this allows calls that
use the previously hardcoded behaviour to pass through and generates an error if another
behaviour is requested by the client.
e.g. when 'append_only' was introduced in 1.0.7 the previous behaviour was what now is append_only=False.
Thus @api(..., append_only={'since': parse_version('1.0.7'), 'previously': False}) allows calls
with append_only=False for all version but rejects calls using append_only=True on versions older than 1.0.7.
"""
def decorator(f):
@functools.wraps(f)
def do_rpc(self, *args, **kwargs):
sig = inspect.signature(f)
bound_args = sig.bind(self, *args, **kwargs)
named = {} # Arguments for the remote process
extra = {} # Arguments for the local process
for name, param in sig.parameters.items():
if name == 'self':
continue
if name in bound_args.arguments:
if name == 'wait':
extra[name] = bound_args.arguments[name]
else:
named[name] = bound_args.arguments[name]
else:
if param.default is not param.empty:
named[name] = param.default
if self.server_version < since:
raise self.RPCServerOutdated(f.__name__, format_version(since))
for name, restriction in kwargs_decorator.items():
if restriction['since'] <= self.server_version:
continue
if 'previously' in restriction and named[name] == restriction['previously']:
continue
raise self.RPCServerOutdated("{0} {1}={2!s}".format(f.__name__, name, named[name]),
format_version(restriction['since']))
return self.call(f.__name__, named, **extra)
return do_rpc
return decorator
class RemoteRepository:
extra_test_args = []
class RPCError(Exception):
def __init__(self, unpacked):
# for borg < 1.1: unpacked only has b'exception_class' as key
# for borg 1.1+: unpacked has keys: b'exception_args', b'exception_full', b'exception_short', b'sysinfo'
self.unpacked = unpacked
def get_message(self):
if b'exception_short' in self.unpacked:
return b'\n'.join(self.unpacked[b'exception_short']).decode()
else:
return self.exception_class
@property
def exception_class(self):
return self.unpacked[b'exception_class'].decode()
@property
def exception_full(self):
if b'exception_full' in self.unpacked:
return b'\n'.join(self.unpacked[b'exception_full']).decode()
else:
return self.get_message() + '\nRemote Exception (see remote log for the traceback)'
@property
def sysinfo(self):
if b'sysinfo' in self.unpacked:
return self.unpacked[b'sysinfo'].decode()
else:
return ''
class RPCServerOutdated(Error):
"""Borg server is too old for {}. Required version {}"""
@property
def method(self):
return self.args[0]
@property
def required_version(self):
return self.args[1]
# If compatibility with 1.0.x is not longer needed, replace all checks of this with True and simplify the code
dictFormat = False # outside of __init__ for testing of legacy free protocol
def __init__(self, location, create=False, exclusive=False, lock_wait=None, lock=True, append_only=False, args=None):
self.location = self._location = location
self.preload_ids = []
self.msgid = 0
self.to_send = b''
self.chunkid_to_msgids = {}
self.ignore_responses = set()
self.responses = {}
self.ratelimit = SleepingBandwidthLimiter(args.remote_ratelimit * 1024 if args and args.remote_ratelimit else 0)
self.unpacker = get_limited_unpacker('client')
self.server_version = parse_version('1.0.8') # fallback version if server is too old to send version information
self.p = None
testing = location.host == '__testsuite__'
borg_cmd = self.borg_cmd(args, testing)
env = dict(os.environ)
if not testing:
borg_cmd = self.ssh_cmd(location) + borg_cmd
# pyinstaller binary modifies LD_LIBRARY_PATH=/tmp/_ME... but we do not want
# that the system's ssh binary picks up (non-matching) libraries from there.
# thus we install the original LDLP, before pyinstaller has modified it:
lp_key = 'LD_LIBRARY_PATH'
lp_orig = env.get(lp_key + '_ORIG') # pyinstaller >= 20160820 has this
if lp_orig is not None:
env[lp_key] = lp_orig
else:
env.pop(lp_key, None)
env.pop('BORG_PASSPHRASE', None) # security: do not give secrets to subprocess
env['BORG_VERSION'] = __version__
logger.debug('SSH command line: %s', borg_cmd)
self.p = Popen(borg_cmd, bufsize=0, stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)
self.stdin_fd = self.p.stdin.fileno()
self.stdout_fd = self.p.stdout.fileno()
self.stderr_fd = self.p.stderr.fileno()
fcntl.fcntl(self.stdin_fd, fcntl.F_SETFL, fcntl.fcntl(self.stdin_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(self.stdout_fd, fcntl.F_SETFL, fcntl.fcntl(self.stdout_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(self.stderr_fd, fcntl.F_SETFL, fcntl.fcntl(self.stderr_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
self.r_fds = [self.stdout_fd, self.stderr_fd]
self.x_fds = [self.stdin_fd, self.stdout_fd, self.stderr_fd]
try:
try:
version = self.call('negotiate', {'client_data': {b'client_version': BORG_VERSION}})
except ConnectionClosed:
raise ConnectionClosedWithHint('Is borg working on the server?') from None
if version == RPC_PROTOCOL_VERSION:
self.dictFormat = False
elif isinstance(version, dict) and b'server_version' in version:
self.dictFormat = True
self.server_version = version[b'server_version']
else:
raise Exception('Server insisted on using unsupported protocol version %s' % version)
def do_open():
self.id = self.open(path=self.location.path, create=create, lock_wait=lock_wait,
lock=lock, exclusive=exclusive, append_only=append_only)
if self.dictFormat:
do_open()
else:
# Ugly detection of versions prior to 1.0.7: If open throws it has to be 1.0.6 or lower
try:
do_open()
except self.RPCError as err:
if err.exception_class != 'TypeError':
raise
msg = """\
Please note:
If you see a TypeError complaining about the number of positional arguments
given to open(), you can ignore it if it comes from a borg version < 1.0.7.
This TypeError is a cosmetic side effect of the compatibility code borg
clients >= 1.0.7 have to support older borg servers.
This problem will go away as soon as the server has been upgraded to 1.0.7+.
"""
# emit this msg in the same way as the 'Remote: ...' lines that show the remote TypeError
sys.stderr.write(msg)
self.server_version = parse_version('1.0.6')
compatMap['open'] = ('path', 'create', 'lock_wait', 'lock', ),
# try again with corrected version and compatMap
do_open()
except Exception:
self.close()
raise
def __del__(self):
if len(self.responses):
logging.debug('still %d cached responses left in RemoteRepository' % (len(self.responses),))
if self.p:
self.close()
assert False, 'cleanup happened in Repository.__del__'
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.location.canonical_path())
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type is not None:
self.rollback()
finally:
# in any case, we want to cleanly close the repo, even if the
# rollback can not succeed (e.g. because the connection was
# already closed) and raised another exception:
self.close()
@property
def id_str(self):
return bin_to_hex(self.id)
def borg_cmd(self, args, testing):
"""return a borg serve command line"""
# give some args/options to 'borg serve' process as they were given to us
opts = []
if args is not None:
opts.append('--umask=%03o' % args.umask)
root_logger = logging.getLogger()
if root_logger.isEnabledFor(logging.DEBUG):
opts.append('--debug')
elif root_logger.isEnabledFor(logging.INFO):
opts.append('--info')
elif root_logger.isEnabledFor(logging.WARNING):
pass # warning is default
elif root_logger.isEnabledFor(logging.ERROR):
opts.append('--error')
elif root_logger.isEnabledFor(logging.CRITICAL):
opts.append('--critical')
else:
raise ValueError('log level missing, fix this code')
env_vars = []
if yes(env_var_override='BORG_HOSTNAME_IS_UNIQUE', env_msg=None, prompt=False):
env_vars.append('BORG_HOSTNAME_IS_UNIQUE=yes')
if testing:
return env_vars + [sys.executable, '-m', 'borg.archiver', 'serve'] + opts + self.extra_test_args
else: # pragma: no cover
remote_path = args.remote_path or os.environ.get('BORG_REMOTE_PATH', 'borg')
remote_path = replace_placeholders(remote_path)
return env_vars + [remote_path, 'serve'] + opts
def ssh_cmd(self, location):
"""return a ssh command line that can be prefixed to a borg command line"""
args = shlex.split(os.environ.get('BORG_RSH', 'ssh'))
if location.port:
args += ['-p', str(location.port)]
if location.user:
args.append('%s@%s' % (location.user, location.host))
else:
args.append('%s' % location.host)
return args
def named_to_positional(self, method, kwargs):
return [kwargs[name] for name in compatMap[method]]
def call(self, cmd, args, **kw):
for resp in self.call_many(cmd, [args], **kw):
return resp
def call_many(self, cmd, calls, wait=True, is_preloaded=False):
if not calls:
return
def pop_preload_msgid(chunkid):
msgid = self.chunkid_to_msgids[chunkid].pop(0)
if not self.chunkid_to_msgids[chunkid]:
del self.chunkid_to_msgids[chunkid]
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v7.common.types import metrics as gagc_metrics
from google.ads.googleads.v7.common.types import segments as gagc_segments
from google.ads.googleads.v7.enums.types import (
response_content_type as gage_response_content_type,
)
from google.ads.googleads.v7.enums.types import (
summary_row_setting as gage_summary_row_setting,
)
from google.ads.googleads.v7.resources.types import (
account_budget as gagr_account_budget,
)
from google.ads.googleads.v7.resources.types import (
account_budget_proposal as gagr_account_budget_proposal,
)
from google.ads.googleads.v7.resources.types import (
account_link as gagr_account_link,
)
from google.ads.googleads.v7.resources.types import ad_group as gagr_ad_group
from google.ads.googleads.v7.resources.types import (
ad_group_ad as gagr_ad_group_ad,
)
from google.ads.googleads.v7.resources.types import (
ad_group_ad_asset_view as gagr_ad_group_ad_asset_view,
)
from google.ads.googleads.v7.resources.types import (
ad_group_ad_label as gagr_ad_group_ad_label,
)
from google.ads.googleads.v7.resources.types import (
ad_group_asset as gagr_ad_group_asset,
)
from google.ads.googleads.v7.resources.types import (
ad_group_audience_view as gagr_ad_group_audience_view,
)
from google.ads.googleads.v7.resources.types import (
ad_group_bid_modifier as gagr_ad_group_bid_modifier,
)
from google.ads.googleads.v7.resources.types import (
ad_group_criterion as gagr_ad_group_criterion,
)
from google.ads.googleads.v7.resources.types import (
ad_group_criterion_label as gagr_ad_group_criterion_label,
)
from google.ads.googleads.v7.resources.types import (
ad_group_criterion_simulation as gagr_ad_group_criterion_simulation,
)
from google.ads.googleads.v7.resources.types import (
ad_group_extension_setting as gagr_ad_group_extension_setting,
)
from google.ads.googleads.v7.resources.types import (
ad_group_feed as gagr_ad_group_feed,
)
from google.ads.googleads.v7.resources.types import (
ad_group_label as gagr_ad_group_label,
)
from google.ads.googleads.v7.resources.types import (
ad_group_simulation as gagr_ad_group_simulation,
)
from google.ads.googleads.v7.resources.types import (
ad_parameter as gagr_ad_parameter,
)
from google.ads.googleads.v7.resources.types import (
ad_schedule_view as gagr_ad_schedule_view,
)
from google.ads.googleads.v7.resources.types import (
age_range_view as gagr_age_range_view,
)
from google.ads.googleads.v7.resources.types import asset as gagr_asset
from google.ads.googleads.v7.resources.types import batch_job as gagr_batch_job
from google.ads.googleads.v7.resources.types import (
bidding_strategy as gagr_bidding_strategy,
)
from google.ads.googleads.v7.resources.types import (
bidding_strategy_simulation as gagr_bidding_strategy_simulation,
)
from google.ads.googleads.v7.resources.types import (
billing_setup as gagr_billing_setup,
)
from google.ads.googleads.v7.resources.types import call_view as gagr_call_view
from google.ads.googleads.v7.resources.types import campaign as gagr_campaign
from google.ads.googleads.v7.resources.types import (
campaign_asset as gagr_campaign_asset,
)
from google.ads.googleads.v7.resources.types import (
campaign_audience_view as gagr_campaign_audience_view,
)
from google.ads.googleads.v7.resources.types import (
campaign_bid_modifier as gagr_campaign_bid_modifier,
)
from google.ads.googleads.v7.resources.types import (
campaign_budget as gagr_campaign_budget,
)
from google.ads.googleads.v7.resources.types import (
campaign_criterion as gagr_campaign_criterion,
)
from google.ads.googleads.v7.resources.types import (
campaign_criterion_simulation as gagr_campaign_criterion_simulation,
)
from google.ads.googleads.v7.resources.types import (
campaign_draft as gagr_campaign_draft,
)
from google.ads.googleads.v7.resources.types import (
campaign_experiment as gagr_campaign_experiment,
)
from google.ads.googleads.v7.resources.types import (
campaign_extension_setting as gagr_campaign_extension_setting,
)
from google.ads.googleads.v7.resources.types import (
campaign_feed as gagr_campaign_feed,
)
from google.ads.googleads.v7.resources.types import (
campaign_label as gagr_campaign_label,
)
from google.ads.googleads.v7.resources.types import (
campaign_shared_set as gagr_campaign_shared_set,
)
from google.ads.googleads.v7.resources.types import (
campaign_simulation as gagr_campaign_simulation,
)
from google.ads.googleads.v7.resources.types import (
carrier_constant as gagr_carrier_constant,
)
from google.ads.googleads.v7.resources.types import (
change_event as gagr_change_event,
)
from google.ads.googleads.v7.resources.types import (
change_status as gagr_change_status,
)
from google.ads.googleads.v7.resources.types import (
click_view as gagr_click_view,
)
from google.ads.googleads.v7.resources.types import (
combined_audience as gagr_combined_audience,
)
from google.ads.googleads.v7.resources.types import (
conversion_action as gagr_conversion_action,
)
from google.ads.googleads.v7.resources.types import (
conversion_custom_variable as gagr_conversion_custom_variable,
)
from google.ads.googleads.v7.resources.types import (
currency_constant as gagr_currency_constant,
)
from google.ads.googleads.v7.resources.types import (
custom_audience as gagr_custom_audience,
)
from google.ads.googleads.v7.resources.types import (
custom_interest as gagr_custom_interest,
)
from google.ads.googleads.v7.resources.types import customer as gagr_customer
from google.ads.googleads.v7.resources.types import (
customer_asset as gagr_customer_asset,
)
from google.ads.googleads.v7.resources.types import (
customer_client as gagr_customer_client,
)
from google.ads.googleads.v7.resources.types import (
customer_client_link as gagr_customer_client_link,
)
from google.ads.googleads.v7.resources.types import (
customer_extension_setting as gagr_customer_extension_setting,
)
from google.ads.googleads.v7.resources.types import (
customer_feed as gagr_customer_feed,
)
from google.ads.googleads.v7.resources.types import (
customer_label as gagr_customer_label,
)
from google.ads.googleads.v7.resources.types import (
customer_manager_link as gagr_customer_manager_link,
)
from google.ads.googleads.v7.resources.types import (
customer_negative_criterion as gagr_customer_negative_criterion,
)
from google.ads.googleads.v7.resources.types import (
customer_user_access as gagr_customer_user_access,
)
from google.ads.googleads.v7.resources.types import (
customer_user_access_invitation as gagr_customer_user_access_invitation,
)
from google.ads.googleads.v7.resources.types import (
detail_placement_view as gagr_detail_placement_view,
)
from google.ads.googleads.v7.resources.types import (
display_keyword_view as gagr_display_keyword_view,
)
from google.ads.googleads.v7.resources.types import (
distance_view as gagr_distance_view,
)
from google.ads.googleads.v7.resources.types import (
domain_category as gagr_domain_category,
)
from google.ads.googleads.v7.resources.types import (
dynamic_search_ads_search_term_view as gagr_dynamic_search_ads_search_term_view,
)
from google.ads.googleads.v7.resources.types import (
expanded_landing_page_view as gagr_expanded_landing_page_view,
)
from google.ads.googleads.v7.resources.types import (
extension_feed_item as gagr_extension_feed_item,
)
from google.ads.googleads.v7.resources.types import feed as gagr_feed
from google.ads.googleads.v7.resources.types import feed_item as gagr_feed_item
from google.ads.googleads.v7.resources.types import (
feed_item_set as gagr_feed_item_set,
)
from google.ads.googleads.v7.resources.types import (
feed_item_set_link as gagr_feed_item_set_link,
)
from google.ads.googleads.v7.resources.types import (
feed_item_target as gagr_feed_item_target,
)
from google.ads.googleads.v7.resources.types import (
feed_mapping as gagr_feed_mapping,
)
from google.ads.googleads.v7.resources.types import (
feed_placeholder_view as gagr_feed_placeholder_view,
)
from google.ads.googleads.v7.resources.types import (
gender_view as gagr_gender_view,
)
from google.ads.googleads.v7.resources.types import (
geo_target_constant as gagr_geo_target_constant,
)
from google.ads.googleads.v7.resources.types import (
geographic_view as gagr_geographic_view,
)
from google.ads.googleads.v7.resources.types import (
group_placement_view as gagr_group_placement_view,
)
from google.ads.googleads.v7.resources.types import (
hotel_group_view as gagr_hotel_group_view,
)
from google.ads.googleads.v7.resources.types import (
hotel_performance_view as gagr_hotel_performance_view,
)
from google.ads.googleads.v7.resources.types import (
income_range_view as gagr_income_range_view,
)
from google.ads.googleads.v7.resources.types import (
keyword_plan as gagr_keyword_plan,
)
from google.ads.googleads.v7.resources.types import (
keyword_plan_ad_group as gagr_keyword_plan_ad_group,
)
from google.ads.googleads.v7.resources.types import (
keyword_plan_ad_group_keyword as gagr_keyword_plan_ad_group_keyword,
)
from google.ads.googleads.v7.resources.types import (
keyword_plan_campaign as gagr_keyword_plan_campaign,
)
from google.ads.googleads.v7.resources.types import (
keyword_plan_campaign_keyword as gagr_keyword_plan_campaign_keyword,
)
from google.ads.googleads.v7.resources.types import (
keyword_view as gagr_keyword_view,
)
from google.ads.googleads.v7.resources.types import label as gagr_label
from google.ads.googleads.v7.resources.types import (
landing_page_view as gagr_landing_page_view,
)
from google.ads.googleads.v7.resources.types import (
language_constant as gagr_language_constant,
)
from google.ads.googleads.v7.resources.types import (
life_event as gagr_life_event,
)
from google.ads.googleads.v7.resources.types import (
location_view as gagr_location_view,
)
from google.ads.googleads.v7.resources.types import (
managed_placement_view as gagr_managed_placement_view,
)
from google.ads.googleads.v7.resources.types import (
media_file as gagr_media_file,
)
from google.ads.googleads.v7.resources.types import (
mobile_app_category_constant as gagr_mobile_app_category_constant,
)
from google.ads.googleads.v7.resources.types import (
mobile_device_constant as gagr_mobile_device_constant,
)
from google.ads.googleads.v7.resources.types import (
offline_user_data_job as gagr_offline_user_data_job,
)
from google.ads.googleads.v7.resources.types import (
operating_system_version_constant as gagr_operating_system_version_constant,
)
from google.ads.googleads.v7.resources.types import (
paid_organic_search_term_view as gagr_paid_organic_search_term_view,
)
from google.ads.googleads.v7.resources.types import (
parental_status_view as gagr_parental_status_view,
)
from google.ads.googleads.v7.resources.types import (
product_bidding_category_constant as gagr_product_bidding_category_constant,
)
from google.ads.googleads.v7.resources.types import (
product_group_view as gagr_product_group_view,
)
from google.ads.googleads.v7.resources.types import (
recommendation as gagr_recommendation,
)
from google.ads.googleads.v7.resources.types import (
remarketing_action as gagr_remarketing_action,
)
from google.ads.googleads.v7.resources.types import (
search_term_view as gagr_search_term_view,
)
from google.ads.googleads.v7.resources.types import (
shared_criterion as gagr_shared_criterion,
)
from google.ads.googleads.v7.resources.types import (
shared_set as gagr_shared_set,
)
from google.ads.googleads.v7.resources.types import (
shopping_performance_view as gagr_shopping_performance_view,
)
from google.ads.googleads.v7.resources.types import (
third_party_app_analytics_link as gagr_third_party_app_analytics_link,
)
from google.ads.googleads.v7.resources.types import (
topic_constant as gagr_topic_constant,
)
from google.ads.googleads.v7.resources.types import (
topic_view as gagr_topic_view,
)
from google.ads.googleads.v7.resources.types import (
user_interest as gagr_user_interest,
)
from google.ads.googleads.v7.resources.types import user_list as gagr_user_list
from google.ads.googleads.v7.resources.types import (
user_location_view as gagr_user_location_view,
)
from google.ads.googleads.v7.resources.types import video as gagr_video
from google.ads.googleads.v7.resources.types import (
webpage_view as gagr_webpage_view,
)
from google.ads.googleads.v7.services.types import ad_group_ad_label_service
from google.ads.googleads.v7.services.types import ad_group_ad_service
from google.ads.googleads.v7.services.types import ad_group_asset_service
from google.ads.googleads.v7.services.types import ad_group_bid_modifier_service
from google.ads.googleads.v7.services.types import (
ad_group_criterion_label_service,
)
from google.ads.googleads.v7.services.types import ad_group_criterion_service
from google.ads.googleads.v7.services.types import (
ad_group_extension_setting_service,
)
from google.ads.googleads.v7.services.types import ad_group_feed_service
from google.ads.googleads.v7.services.types import ad_group_label_service
from google.ads.googleads.v7.services.types import ad_group_service
from google.ads.googleads.v7.services.types import ad_parameter_service
from google.ads.googleads.v7.services.types import ad_service
from google.ads.googleads.v7.services.types import asset_service
from google.ads.googleads.v7.services.types import bidding_strategy_service
from google.ads.googleads.v7.services.types import campaign_asset_service
from google.ads.googleads.v7.services.types import campaign_bid_modifier_service
from google.ads.googleads.v7.services.types import campaign_budget_service
from google.ads.googleads.v7.services.types import campaign_criterion_service
from google.ads.googleads.v7.services.types import campaign_draft_service
from google.ads.googleads.v7.services.types import campaign_experiment_service
from google.ads.googleads.v7.services.types import (
campaign_extension_setting_service,
)
from google.ads.googleads.v7.services.types import campaign_feed_service
from google.ads.googleads.v7.services.types import campaign_label_service
from google.ads.googleads.v7.services.types import campaign_service
from google.ads.googleads.v7.services.types import campaign_shared_set_service
from google.ads.googleads.v7.services.types import conversion_action_service
from google.ads.googleads.v7.services.types import (
conversion_custom_variable_service,
)
from google.ads.googleads.v7.services.types import customer_asset_service
from google.ads.googleads.v7.services.types import (
customer_extension_setting_service,
)
from google.ads.googleads.v7.services.types import customer_feed_service
from google.ads.googleads.v7.services.types import customer_label_service
from google.ads.googleads.v7.services.types import (
customer_negative_criterion_service,
)
from google.ads.googleads.v7.services.types import customer_service
from google.ads.googleads.v7.services.types import extension_feed_item_service
from google.ads.googleads.v7.services.types import feed_item_service
from google.ads.googleads.v7.services.types import feed_item_set_link_service
from google.ads.googleads.v7.services.types import feed_item_set_service
from google.ads.googleads.v7.services.types import feed_item_target_service
from google.ads.googleads.v7.services.types import feed_mapping_service
from google.ads.googleads.v7.services.types import feed_service
from google.ads.googleads.v7.services.types import (
keyword_plan_ad_group_keyword_service,
)
from google.ads.googleads.v7.services.types import keyword_plan_ad_group_service
from google.ads.googleads.v7.services.types import (
keyword_plan_campaign_keyword_service,
)
from google.ads.googleads.v7.services.types import keyword_plan_campaign_service
from google.ads.googleads.v7.services.types import keyword_plan_service
from google.ads.googleads.v7.services.types import label_service
from google.ads.googleads.v7.services.types import media_file_service
from google.ads.googleads.v7.services.types import remarketing_action_service
from google.ads.googleads.v7.services.types import shared_criterion_service
from google.ads.googleads.v7.services.types import shared_set_service
from google.ads.googleads.v7.services.types import user_list_service
from google.protobuf import field_mask_pb2 as gp_field_mask # type: ignore
from google.rpc import status_pb2 as status # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v7.services",
marshal="google.ads.googleads.v7",
manifest={
"SearchGoogleAdsRequest",
"SearchGoogleAdsResponse",
"SearchGoogleAdsStreamRequest",
"SearchGoogleAdsStreamResponse",
"GoogleAdsRow",
"MutateGoogleAdsRequest",
"MutateGoogleAdsResponse",
"MutateOperation",
"MutateOperationResponse",
},
)
class SearchGoogleAdsRequest(proto.Message):
r"""Request message for
[GoogleAdsService.Search][google.ads.googleads.v7.services.GoogleAdsService.Search].
Attributes:
customer_id (str):
Required. The ID of the customer being
queried.
query (str):
Required. The query string.
page_token (str):
Token of the page to retrieve. If not specified, the first
page of results will be returned. Use the value obtained
from ``next_page_token`` in the previous response in order
to request the next page of results.
page_size (int):
Number of elements to retrieve in a single
page. When too large a page is requested, the
server may decide to further limit the number of
returned resources.
validate_only (bool):
If true, the request is validated but not
executed.
return_total_results_count (bool):
If true, the total number of results that
match the query ignoring the LIMIT clause will
be included in the response. Default is false.
summary_row_setting (google.ads.googleads.v7.enums.types.SummaryRowSettingEnum.SummaryRowSetting):
Determines whether a summary row will be
returned. By default, summary row is not
returned. If requested, the summary row will be
sent in a response by itself after all other
query results are returned.
"""
customer_id = proto.Field(proto.STRING, number=1,)
query = proto.Field(proto.STRING, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
page_size = proto.Field(proto.INT32, number=4,)
validate_only = proto.Field(proto.BOOL, number=5,)
return_total_results_count = proto.Field(proto.BOOL, number=7,)
summary_row_setting = proto.Field(
proto.ENUM,
number=8,
enum=gage_summary_row_setting.SummaryRowSettingEnum.SummaryRowSetting,
)
class SearchGoogleAdsResponse(proto.Message):
r"""Response message for
[GoogleAdsService.Search][google.ads.googleads.v7.services.GoogleAdsService.Search].
Attributes:
results (Sequence[google.ads.googleads.v7.services.types.GoogleAdsRow]):
The list of rows that matched the query.
next_page_token (str):
Pagination token used to retrieve the next page of results.
Pass the content of this string as the ``page_token``
attribute of the next request. ``next_page_token`` is not
returned for the last page.
total_results_count (int):
Total number of results that match the query
ignoring the LIMIT clause.
field_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that represents what fields were
requested by the user.
summary_row (google.ads.googleads.v7.services.types.GoogleAdsRow):
Summary row that contains summary of metrics
in results. Summary | |
template = {"group": "", "alternate_configs": [{"members": [], "linked_song": []}]}
same_group_different_artists = [
{
"group": "StylipS",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [
13264,
13265,
20633,
13731,
13807,
13809,
14336,
21216,
14835,
15241,
],
}
],
},
{
"group": "Sanshuu Chuugaku Yuusha-bu",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [17758, 18468, 35129, 35130],
}
],
},
{
"group": "Oratorio The World God Only Knows",
"alternate_configs": [
{"members": ["ELISA", "Lia"], "linked_song": [11379]},
{"members": ["<NAME>"], "linked_song": [13321]},
{"members": ["big shrug"], "linked_song": [10881]},
],
},
{
"group": "Kalafina",
"alternate_configs": [
{
"members": [
"<NAME>",
"WAKANA",
"<NAME>",
"Maya (Kalafina)",
],
"linked_song": [31731, 31732, 8677, 8678],
},
{
"members": ["<NAME>", "WAKANA"],
"linked_song": [
31728,
31729,
31730,
31734,
31735,
8674,
8675,
8676,
],
},
],
},
{
"group": "ClariS",
"alternate_configs": [
{
"members": ["Clara (ClariS)", "Alice (ClariS)"],
"linked_song": [
10843,
"reunion",
12406,
"Connect",
"Naisho no Hanashi",
12530,
12533,
12536,
12742,
13352,
13354,
17899,
17900,
],
},
],
},
{
"group": "MYTH & ROID",
"alternate_configs": [
{
"members": ["<NAME>"],
"linked_song": [
"STYX HELIX",
"STRAIGHT BET",
"Paradisus-Paradoxum",
15595,
18567,
"JINGO JUNGLE",
"L.L.L.",
],
},
],
},
{
"group": "Colors",
"alternate_configs": [
{
"members": ["<NAME>", "<NAME>"],
"linked_song": [13319],
},
{
"members": ["<NAME>", "<NAME>"],
"linked_song": [20855],
},
],
},
{
"group": "eyelis",
"alternate_configs": [
{"members": ["<NAME>"], "linked_song": [12766, 12772]},
],
},
{
"group": "Zukkoke Girls",
"alternate_configs": [
{
"members": ["<NAME>", "<NAME>", "SUZUTOMO"],
"linked_song": [21827],
},
],
},
{
"group": "Needless\u2605Girls+",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [10209],
},
],
},
{
"group": "supercell",
"alternate_configs": [
{"members": ["<NAME>"], "linked_song": ["Black★Rock Shooter"]},
{
"members": ["<NAME>"],
"linked_song": [8521, 9031, "Kimi no Shiranai Monogatari", 13030],
},
{"members": ["Ann", "gaku"], "linked_song": [27198]},
{"members": ["Ann"], "linked_song": [27214]},
],
},
{
"group": "My Melodies",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [7583, 7588],
},
],
},
{
"group": "Nagarekawa Girls",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [14058],
},
{
"members": [
"<NAME>",
"<NAME>",
"The rest of the fucking town",
],
"linked_song": [22287],
},
],
},
{
"group": "Almost The Entire Fucking Cast",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [27281],
},
],
},
{
"group": "Uchujin",
"alternate_configs": [
{"members": ["Noko"], "linked_song": [12639]},
{"members": ["<NAME>"], "linked_song": [12640]},
{"members": ["<NAME>"], "linked_song": [12641]},
],
},
{
"group": "fripSide",
"alternate_configs": [
{"members": ["nao"], "linked_song": [9498, 22427]},
{
"members": ["<NAME>"],
"linked_song": [
"only my railgun",
"LEVEL5-judgelight-",
10918,
11105,
11276,
12751,
12752,
"eternal reality",
"Sister's Noise",
"black bullet",
15090,
15382,
15473,
16069,
16070,
16403,
22117,
19126,
16777,
23903,
21794,
27440,
30373,
31181,
35102,
35820,
],
},
],
},
{
"group": "Veil",
"alternate_configs": [
{"members": ["<NAME>"], "linked_song": [23669]},
{"members": ["<NAME>"], "linked_song": [23668]},
{"members": ["Lia"], "linked_song": [10780]},
],
},
{
"group": "Shirahamazaka Koukou Gasshou-bu",
"alternate_configs": [
{"members": ["<NAME>", "<NAME>"], "linked_song": [12390]},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [18033, 18066],
},
],
},
{
"group": "FAKY",
"alternate_configs": [
{
"members": [
"<NAME>",
"Mikako (FAKY)",
"Anna (FAKY)",
"HARUKI (FAKY)",
"Tina (FAKY)",
],
"linked_song": [13861],
},
{
"members": [
"<NAME>",
"Mikako (FAKY)",
"Akina (FAKY)",
"Anna (FAKY)",
],
"linked_song": [16713, 21041],
},
],
},
{
"group": "MAHO-dou",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [
30995,
31128,
3818,
30681,
30725,
5975,
5976,
5977,
33397,
34466,
],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [
31032,
31057,
31065,
31234,
30562,
30680,
30683,
30724,
3820,
3821,
30519,
30911,
6160,
],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [3814, 3817, 3817, 31061, 6163, 6164],
},
],
},
{
"group": "LizNoir",
"alternate_configs": [
{
"members": ["<NAME>", "<NAME>"],
"linked_song": [32447, 33141],
},
],
},
{
"group": "FranChouChou",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [
32926,
33454,
33478,
33547,
33616,
33617,
33932,
34165,
34166,
34167,
],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [34531, 33784],
},
],
},
{
"group": "POLKA DOTS",
"alternate_configs": [{"members": ["<NAME>"], "linked_song": [16561]}],
},
{
"group": "9nine",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [16595, 22106],
}
],
},
{
"group": "Shiritsu Ebisu Chuugaku",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [13736],
}
],
},
{
"group": "THE IDOLM@STER CINDERELLA GIRLS LITTLE STARS!",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [16501],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [19166],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [23989],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [23990],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [23991],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [23992],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [23993],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [23994],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [24014],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [24541],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [24681],
},
],
},
{
"group": "BiS",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Muropanako",
"Mewclub",
"<NAME>",
"YUiNA EMPiRE",
],
"linked_song": [24023],
}
],
},
{
"group": "ARCANA PROJECT",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [34417, 35029],
}
],
},
{
"group": "EMPiRE",
"alternate_configs": [
{
"members": [
"YU-KI EMPiRE",
"YUKA EMPiRE",
"MAYU EMPiRE",
"MiDORiKO EMPiRE",
"MAHO EMPiRE",
"MiKiNA EMPiRE",
],
"linked_song": [23196],
},
{
"members": [
"YU-KI EMPiRE",
"MAYU EMPiRE",
"MiDORiKO EMPiRE",
"MAHO EMPiRE",
"MiKiNA EMPiRE",
"NOW EMPiRE",
],
"linked_song": [26470, 32830],
},
],
},
{
"group": "Dempagumi.inc",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [11355],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [18079],
},
],
},
{
"group": "Niji no Conquistador",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [26627],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [27660],
},
],
},
{
"group": "<NAME>!",
"alternate_configs": [
{
"members": ["<NAME>", "<NAME>", "<NAME>"],
"linked_song": [10513],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [10514],
},
],
},
{
"group": "<NAME>",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [7180],
},
],
},
{
"group": "NEWS",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [15392, 30852],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [32166, 35048],
},
],
},
{
"group": "Hey! Say! JUMP",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [5856, 5881],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [15971, 27133],
},
],
},
{
"group": "KinKi Kids",
"alternate_configs": [
{
"members": [
"<NAME>",
],
"linked_song": [29685],
},
{
"members": [
"<NAME>",
],
"linked_song": [29686],
},
],
},
{
"group": "Matsuri nine.",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [34322],
},
],
},
{
"group": "BOYS AND MEN",
"alternate_configs": [
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Yuuhi",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [17760, 30117],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Yuuhi",
"<NAME>",
"<NAME>",
"<NAME>",
],
"linked_song": [26587],
},
{
"members": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Yuuhi",
"<NAME>",
"<NAME>",
| |
"""
Getter method for idle, mapped from YANG variable /system/cpus/cpu/state/idle (container)
YANG Description: Percentage of CPU time spent idle.
"""
return self.__idle
def _set_idle(self, v, load=False):
"""
Setter method for idle, mapped from YANG variable /system/cpus/cpu/state/idle (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_idle is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_idle() directly.
YANG Description: Percentage of CPU time spent idle.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=idle.idle, is_container='container', yang_name="idle", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """idle must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=idle.idle, is_container='container', yang_name="idle", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)""",
})
self.__idle = t
if hasattr(self, '_set'):
self._set()
def _unset_idle(self):
self.__idle = YANGDynClass(base=idle.idle, is_container='container', yang_name="idle", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
def _get_wait(self):
"""
Getter method for wait, mapped from YANG variable /system/cpus/cpu/state/wait (container)
YANG Description: Percentage of CPU time spent waiting for I/O.
"""
return self.__wait
def _set_wait(self, v, load=False):
"""
Setter method for wait, mapped from YANG variable /system/cpus/cpu/state/wait (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_wait is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_wait() directly.
YANG Description: Percentage of CPU time spent waiting for I/O.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=wait.wait, is_container='container', yang_name="wait", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """wait must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=wait.wait, is_container='container', yang_name="wait", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)""",
})
self.__wait = t
if hasattr(self, '_set'):
self._set()
def _unset_wait(self):
self.__wait = YANGDynClass(base=wait.wait, is_container='container', yang_name="wait", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
def _get_hardware_interrupt(self):
"""
Getter method for hardware_interrupt, mapped from YANG variable /system/cpus/cpu/state/hardware_interrupt (container)
YANG Description: Percentage of CPU time spent servicing hardware interrupts.
"""
return self.__hardware_interrupt
def _set_hardware_interrupt(self, v, load=False):
"""
Setter method for hardware_interrupt, mapped from YANG variable /system/cpus/cpu/state/hardware_interrupt (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_hardware_interrupt is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hardware_interrupt() directly.
YANG Description: Percentage of CPU time spent servicing hardware interrupts.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=hardware_interrupt.hardware_interrupt, is_container='container', yang_name="hardware-interrupt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """hardware_interrupt must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=hardware_interrupt.hardware_interrupt, is_container='container', yang_name="hardware-interrupt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)""",
})
self.__hardware_interrupt = t
if hasattr(self, '_set'):
self._set()
def _unset_hardware_interrupt(self):
self.__hardware_interrupt = YANGDynClass(base=hardware_interrupt.hardware_interrupt, is_container='container', yang_name="hardware-interrupt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
def _get_software_interrupt(self):
"""
Getter method for software_interrupt, mapped from YANG variable /system/cpus/cpu/state/software_interrupt (container)
YANG Description: Percentage of CPU time spent servicing software interrupts
"""
return self.__software_interrupt
def _set_software_interrupt(self, v, load=False):
"""
Setter method for software_interrupt, mapped from YANG variable /system/cpus/cpu/state/software_interrupt (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_software_interrupt is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_software_interrupt() directly.
YANG Description: Percentage of CPU time spent servicing software interrupts
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=software_interrupt.software_interrupt, is_container='container', yang_name="software-interrupt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """software_interrupt must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=software_interrupt.software_interrupt, is_container='container', yang_name="software-interrupt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)""",
})
self.__software_interrupt = t
if hasattr(self, '_set'):
self._set()
def _unset_software_interrupt(self):
self.__software_interrupt = YANGDynClass(base=software_interrupt.software_interrupt, is_container='container', yang_name="software-interrupt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
index = __builtin__.property(_get_index)
total = __builtin__.property(_get_total)
user = __builtin__.property(_get_user)
kernel = __builtin__.property(_get_kernel)
nice = __builtin__.property(_get_nice)
idle = __builtin__.property(_get_idle)
wait = __builtin__.property(_get_wait)
hardware_interrupt = __builtin__.property(_get_hardware_interrupt)
software_interrupt = __builtin__.property(_get_software_interrupt)
_pyangbind_elements = OrderedDict([('index', index), ('total', total), ('user', user), ('kernel', kernel), ('nice', nice), ('idle', idle), ('wait', wait), ('hardware_interrupt', hardware_interrupt), ('software_interrupt', software_interrupt), ])
from . import total
from . import user
from . import kernel
from . import nice
from . import idle
from . import wait
from . import hardware_interrupt
from . import software_interrupt
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-system - based on the path /system/cpus/cpu/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state data for the system CPU(s)
"""
__slots__ = ('_path_helper', '_extmethods', '__index','__total','__user','__kernel','__nice','__idle','__wait','__hardware_interrupt','__software_interrupt',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
helper = kwargs.pop("path_helper", None)
if helper is False:
self._path_helper = False
elif helper is not None and isinstance(helper, xpathhelper.YANGPathHelper):
self._path_helper = helper
elif hasattr(self, "_parent"):
helper = getattr(self._parent, "_path_helper", False)
self._path_helper = helper
else:
self._path_helper = False
self._extmethods = False
self.__index = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ALL': {}},),RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32),], is_leaf=True, yang_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='union', is_config=False)
self.__total = YANGDynClass(base=total.total, is_container='container', yang_name="total", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
self.__user = YANGDynClass(base=user.user, is_container='container', yang_name="user", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
self.__kernel = YANGDynClass(base=kernel.kernel, is_container='container', yang_name="kernel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
self.__nice = YANGDynClass(base=nice.nice, is_container='container', yang_name="nice", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
self.__idle = YANGDynClass(base=idle.idle, is_container='container', yang_name="idle", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
self.__wait = YANGDynClass(base=wait.wait, is_container='container', yang_name="wait", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
self.__hardware_interrupt = YANGDynClass(base=hardware_interrupt.hardware_interrupt, is_container='container', yang_name="hardware-interrupt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
self.__software_interrupt = YANGDynClass(base=software_interrupt.software_interrupt, is_container='container', yang_name="software-interrupt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['system', 'cpus', 'cpu', 'state']
def _get_index(self):
"""
Getter method for index, mapped from YANG variable /system/cpus/cpu/state/index (union)
YANG Description: The CPU index for each processor core on the system. On a
single-core system, the index should be zero. The ALL
index signifies an aggregation of the CPU utilization
statistics over all cores in the system.
"""
return self.__index
def _set_index(self, v, load=False):
"""
Setter method for index, mapped from YANG variable /system/cpus/cpu/state/index (union)
If this variable is read-only (config: false) in the
source YANG file, then _set_index is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_index() directly.
YANG Description: The CPU index for each processor core on the system. On a
single-core system, the index should be zero. The ALL
index signifies an aggregation of the CPU utilization
statistics over all cores in the system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ALL': {}},),RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32),], is_leaf=True, yang_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='union', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """index must be of a type compatible with union""",
'defined-type': "openconfig-system:union",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ALL': {}},),RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32),], is_leaf=True, yang_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='union', is_config=False)""",
})
self.__index = t
if hasattr(self, '_set'):
self._set()
def _unset_index(self):
self.__index = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ALL': {}},),RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32),], is_leaf=True, yang_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='union', is_config=False)
def _get_total(self):
"""
Getter method for total, mapped from YANG variable /system/cpus/cpu/state/total (container)
YANG Description: Total CPU utilization.
"""
return | |
indexes'''
self.__open_db()
self.__open_collections()
u, v = self.endpoint_names
self.edges.create_index(
[
(u, ASCENDING),
(v, ASCENDING)
],
name='incident',
unique=True)
def __check_metadata(self, metadata):
'''Checks if the provided metadata matches the existing
metadata in the meta collection'''
if self.directed is None:
assert metadata['directed'] is not None,\
"Meta collection exists but does not contain "\
"directed information"
self.directed = metadata['directed']
elif metadata['directed'] != self.directed:
raise ValueError((
"Input parameter directed={} does not match"
"directed value {} already in stored metadata")
.format(self.directed, metadata['directed']))
if self.total_roi is None:
if 'total_roi_offset' in metadata\
and 'total_roi_shape' in metadata:
offset = metadata['total_roi_offset']
shape = metadata['total_roi_shape']
self.total_roi = Roi(offset, shape)
else:
offset = self.total_roi.get_offset()
if list(offset) != metadata['total_roi_offset']:
raise ValueError((
"Input total_roi offset {} does not match"
"total_roi offset {} already stored in metadata")
.format(
self.total_roi.get_offset(),
metadata['total_roi_offset']))
if list(self.total_roi.get_shape()) != metadata['total_roi_shape']:
raise ValueError((
"Input total_roi shape {} does not match"
"total_roi shape {} already stored in metadata")
.format(
self.total_roi.get_shape(),
metadata['total_roi_shape']))
def __set_metadata(self):
'''Sets the metadata in the meta collection to the provided values'''
if not self.directed:
# default is false
self.directed = False
meta_data = {'directed': self.directed}
# if total_roi not specified, don't write it
if self.total_roi:
meta_data['total_roi_offset'] = self.total_roi.get_offset()
meta_data['total_roi_shape'] = self.total_roi.get_shape()
self.__open_collections()
# It's possible that another worker has already inserted the metadata -
# upsert to keep only one document in the collection
self.meta.replace_one(meta_data, meta_data, upsert=True)
def __pos_query(self, roi):
'''Generates a mongo query for position'''
begin = roi.get_begin()
end = roi.get_end()
if type(self.position_attribute) == list:
assert len(self.position_attribute) == roi.dims, (
'Number of position attributes does not match number of '
'dimensions')
return {
key: {
k: v
for k, v in zip(
["$gte", "$lt"],
[
b if b is not None else float("-inf"),
e if e is not None else float("inf"),
],
)
}
for key, b, e in zip(self.position_attribute, begin, end)
}
else:
return {
"position.%d"
% d: {
k: v
for k, v in zip(
["$gte", "$lt"],
[
b if b is not None else float("-inf"),
e if e is not None else float("inf"),
],
)
}
for d, (b, e) in enumerate(zip(begin, end))
}
class MongoDbSharedSubGraph(SharedSubGraph):
def __init__(
self,
graph_provider,
roi):
super().__init__()
self.provider = graph_provider
self.roi = roi
self.client = MongoClient(self.provider.host)
self.database = self.client[self.provider.db_name]
self.nodes_collection = self.database[
self.provider.nodes_collection_name]
self.edges_collection = self.database[
self.provider.edges_collection_name]
def write_nodes(
self,
roi=None,
attributes=None,
fail_if_exists=False,
fail_if_not_exists=False,
delete=False):
assert not delete, "Delete not implemented"
assert not(fail_if_exists and fail_if_not_exists),\
"Cannot have fail_if_exists and fail_if_not_exists simultaneously"
if self.provider.mode == 'r':
raise RuntimeError("Trying to write to read-only DB")
if roi is None:
roi = self.roi
logger.debug("Writing nodes")
nodes = []
for node_id, data in self.nodes(data=True):
if not self.__contains(roi, node_id):
logger.debug(
"Skipping node {} with data {} because not in roi {}"
.format(node_id, data, roi))
continue
node = {
'id': int(np.int64(node_id))
}
if not attributes:
node.update(data)
else:
for key in data:
if key in attributes:
node[key] = data[key]
nodes.append(node)
if len(nodes) == 0:
return
try:
self.__write(self.nodes_collection, ['id'], nodes,
fail_if_exists=fail_if_exists,
fail_if_not_exists=fail_if_not_exists,
delete=delete)
except BulkWriteError as e:
logger.error(e.details)
raise
def write_edges(
self,
roi=None,
attributes=None,
fail_if_exists=False,
fail_if_not_exists=False,
delete=False):
assert not delete, "Delete not implemented"
assert not(fail_if_exists and fail_if_not_exists),\
"Cannot have fail_if_exists and fail_if_not_exists simultaneously"
if self.provider.mode == 'r':
raise RuntimeError("Trying to write to read-only DB")
if roi is None:
roi = self.roi
logger.debug("Writing edges in %s", roi)
edges = []
u_name, v_name = self.provider.endpoint_names
for u, v, data in self.edges(data=True):
if not self.is_directed():
u, v = min(u, v), max(u, v)
if not self.__contains(roi, u):
logger.debug(
("Skipping edge with u {}, v {}," +
"and data {} because u not in roi {}")
.format(u, v, data, roi))
continue
edge = {
u_name: int(np.int64(u)),
v_name: int(np.int64(v)),
}
if not attributes:
edge.update(data)
else:
for key in data:
if key in attributes:
edge[key] = data[key]
edges.append(edge)
if len(edges) == 0:
logger.debug("No edges to insert in %s", roi)
return
try:
self.__write(self.edges_collection, [u_name, v_name], edges,
fail_if_exists=fail_if_exists,
fail_if_not_exists=fail_if_not_exists,
delete=delete)
except BulkWriteError as e:
logger.error(e.details)
raise
def update_node_attrs(
self,
roi=None,
attributes=None):
if self.provider.mode == 'r':
raise RuntimeError("Trying to write to read-only DB")
if roi is None:
roi = self.roi
logger.debug("Updating node attributes")
updates = []
for node_id, data in self.nodes(data=True):
if not self.__contains(roi, node_id):
logger.debug(
"Skipping node {} with data {} because not in roi {}"
.format(node_id, data, roi))
continue
_filter = {
'id': int(np.int64(node_id))
}
if not attributes:
update = {'$set': data}
else:
update = {}
for key in data:
if key in attributes:
update[key] = data[key]
if not update:
logger.info("Skipping node %s with data %s"
" - no attributes to update"
% (node_id, data))
continue
update = {'$set': update}
updates.append(UpdateOne(_filter, update))
if len(updates) == 0:
return
try:
self.nodes_collection.bulk_write(updates, ordered=False)
except BulkWriteError as e:
logger.error(e.details)
raise
def update_edge_attrs(
self,
roi=None,
attributes=None):
if self.provider.mode == 'r':
raise RuntimeError("Trying to write to read-only DB")
if roi is None:
roi = self.roi
logger.debug("Updating edge attributes")
updates = []
u_name, v_name = self.provider.endpoint_names
for u, v, data in self.edges(data=True):
if not self.is_directed():
u, v = min(u, v), max(u, v)
if not self.__contains(roi, u):
logger.debug(
("Skipping edge with u {}, v {}," +
"and data {} because u not in roi {}")
.format(u, v, data, roi))
continue
_filter = {
u_name: int(np.int64(u)),
v_name: int(np.int64(v)),
}
if not attributes:
update = {'$set': data}
else:
update = {}
for key in data:
if key in attributes:
update[key] = data[key]
if not update:
logger.info("Skipping edge %s -> %s with data %s"
"- no attributes to update"
% (u, v, data))
continue
update = {'$set': update}
updates.append(UpdateOne(_filter, update))
if len(updates) == 0:
logger.info("No updates in roi %s" % roi)
return
try:
self.edges_collection.bulk_write(updates, ordered=False)
except BulkWriteError as e:
logger.error(e.details)
raise
def get_connected_components(self):
'''Returns a list of connected components as networkx (di)graphs'''
subgraphs = []
if self.is_directed():
node_set_generator = nx.weakly_connected_components(self)
else:
node_set_generator = nx.connected_components(self)
for node_set in node_set_generator:
edge_set = self.edges(node_set, data=True)
if self.is_directed():
g = nx.DiGraph()
else:
g = nx.Graph()
g.add_nodes_from([(node, self.nodes[node]) for node in node_set])
g.add_edges_from(edge_set)
subgraphs.append(g)
return subgraphs
def __write(self, collection, match_fields, docs,
fail_if_exists=False, fail_if_not_exists=False, delete=False):
'''Writes documents to provided mongo collection, checking for restricitons.
Args:
collection (``pymongo.collection``):
The collection to write the documents into.
match_fields (``list`` of ``string``):
The set of fields to match to be considered the same document.
docs (``dict`` or ``bson``):
The documents to insert into the collection
fail_if_exists, fail_if_not_exists, delete (``bool``):
see write_nodes or write_edges for explanations of these flags
'''
assert not delete, "Delete not implemented"
match_docs = []
for doc in docs:
match_doc = {}
for field in match_fields:
match_doc[field] = doc[field]
match_docs.append(match_doc)
if fail_if_exists:
self.__write_fail_if_exists(collection, match_docs, docs)
elif fail_if_not_exists:
self.__write_fail_if_not_exists(collection, match_docs, docs)
else:
self.__write_no_flags(collection, match_docs, docs)
def __write_no_flags(self, collection, old_docs, new_docs):
bulk_query = [ReplaceOne(old, new, upsert=True)
for old, new in zip(old_docs, new_docs)]
collection.bulk_write(bulk_query, ordered=False)
def __write_fail_if_exists(self, collection, old_docs, new_docs):
for old in old_docs:
if collection.find(old):
raise WriteError(
"Found existing doc %s and fail_if_exists set to True."
" Aborting write for all docs." % old)
collection.insert_many(new_docs)
def __write_fail_if_not_exists(self, collection, old_docs, new_docs):
for old in old_docs:
if not collection.find(old):
raise WriteError(
"Did not find existing doc %s and fail_if_not_exists "
"set to True. Aborting write for all docs." % old)
bulk_query = [ReplaceOne(old, new, upsert=False)
for old, new in zip(old_docs, new_docs)]
result = collection.bulk_write(bulk_query, ordered=False)
assert len(new_docs) == result.matched_count,\
("Supposed to replace %s docs, but only replaced %s"
% (len(new_docs), result.matched_count))
def __contains(self, roi, node):
'''Determines if the given node is inside the given roi'''
node_data = self.nodes[node]
# Some nodes are outside of the originally requested ROI (they have
# been pulled in by edges leaving the ROI). These nodes have no
# attributes, so we can't perform an inclusion test. However, we
# know they are outside of the subgraph ROI, and therefore also
# outside of 'roi', whatever it is.
coordinate = []
if type(self.provider.position_attribute) == list:
for pos_attr in self.provider.position_attribute:
if pos_attr not in node_data:
return False
coordinate.append(node_data[pos_attr])
else:
if self.provider.position_attribute not in node_data:
return False
coordinate = node_data[self.provider.position_attribute]
logger.debug("Checking if coordinate {} is inside roi {}"
.format(coordinate, | |
116.50 SOURCE3_SOURCE5 720 1.3034
c3-c -ca 64.2 118.40 SOURCE4_SOURCE5 749 1.4991
c3-c -cc 65.0 117.29 CORR_SOURCE5 118 1.7737
c3-c -cd 65.0 117.29 CORR_SOURCE5 118 1.7737
c3-c -ce 64.9 116.44 CORR_SOURCE5 543 1.3559
c3-c -cf 64.9 116.44 CORR_SOURCE5 543 1.3559
c3-c -cg 66.0 115.00 SOURCE2 1
c3-c -ch 66.0 115.00 SOURCE2 1
c3-c -cl 71.2 111.99 SOURCE3 2 0.0125
c3-c -f 88.4 110.70 SOURCE2 1
c3-c -h4 46.1 114.64 SOURCE4_SOURCE5 193 0.4989
c3-c -ha 46.0 115.22 SOURCE3 15 0.3181
c3-c -i 60.4 112.94 SOURCE3 1
c3-c -n2 83.5 114.53 SOURCE3 1
c3-c -n4 81.1 112.26 SOURCE3 2
c3-c -n 84.3 115.18 SOURCE3_SOURCE5 2997 1.3885
c3-c -ne 84.9 112.61 CORR_SOURCE5 19 2.4426
c3-c -nf 84.9 112.61 CORR_SOURCE5 19 2.4426
c3-c -o 84.6 123.20 SOURCE3_SOURCE5 10083 1.8011
c3-c -oh 85.8 112.73 SOURCE3_SOURCE5 1989 1.3796
c3-c -os 86.4 110.72 SOURCE3_SOURCE5 1786 0.9391
c3-c -p3 77.8 116.42 SOURCE3 3 0.1291
c3-c -p5 77.0 118.90 SOURCE3 1
c3-c -pe 77.4 114.85 SOURCE3 1
c3-c -pf 77.4 114.85 SOURCE3 1
c3-c -px 77.4 115.60 SOURCE3 1
c3-c -py 77.7 118.16 SOURCE3 3 1.0735
c3-c -s4 61.4 114.79 SOURCE3 1
c3-c -s6 61.4 114.72 SOURCE3 1
c3-c -s 63.9 123.15 SOURCE3_SOURCE5 66 1.3121
c3-c -sh 63.8 112.65 SOURCE3_SOURCE5 9 1.5127
c3-c -ss 63.4 113.51 SOURCE3_SOURCE5 65 0.9334
c3-c -sx 61.2 113.97 SOURCE3 3 0.0610
c3-c -sy 61.6 114.28 SOURCE3 3 0.7341
ca-c -ca 65.0 118.11 SOURCE4_SOURCE5 506 1.8633
ca-c -cc 66.1 116.00 CORR_SOURCE5 670 1.7109
ca-c -cd 66.1 116.00 CORR_SOURCE5 670 1.7109
ca-c -ce 65.0 119.02 CORR_SOURCE5 83 1.3943
ca-c -cf 65.0 119.02 CORR_SOURCE5 83 1.3943
ca-c -h4 46.9 115.14 SOURCE4_SOURCE5 122 0.7683
ca-c -ha 47.2 114.12 SOURCE3 1
ca-c -n 85.4 115.25 SOURCE4_SOURCE5 1494 1.4889
ca-c -ne 85.3 114.71 SOURCE4_SOURCE5 14 0.5855
ca-c -o 86.2 122.60 SOURCE3_SOURCE5 3960 1.5802
ca-c -oh 86.7 113.45 SOURCE4_SOURCE5 656 0.8414
ca-c -os 87.0 112.44 SOURCE3_SOURCE5 493 0.8365
ca-c -s 64.6 122.68 SOURCE4_SOURCE5 32 1.3788
ca-c -sh 62.5 118.63 SOURCE3 1
ca-c -ss 63.4 115.05 SOURCE4_SOURCE5 37 1.0695
br-cc-c 65.2 116.28 SOURCE4_SOURCE5 32 1.1116
br-cc-cc 63.4 124.05 SOURCE4_SOURCE5 31 1.9388
br-cc-cd 63.7 124.23 SOURCE4_SOURCE5 116 2.3356
br-cc-na 80.6 121.58 SOURCE4_SOURCE5 19 0.8500
c2-cc-c3 65.3 126.11 SOURCE3 2
c2-cc-ca 67.0 124.42 CORR_SOURCE5 25 1.8245
c2-cc-cc 68.3 122.19 CORR_SOURCE5 46 2.3853
c2-cc-cd 71.3 117.02 SOURCE3 2 0.0703
c2-cc-ha 49.2 122.72 SOURCE3 2 0.0092
c2-cc-n 86.2 124.91 SOURCE3_SOURCE5 5 1.6803
c2-cc-os 88.0 121.42 CORR_SOURCE5 24 0.9570
c -c -c3 63.6 116.17 SOURCE3_SOURCE5 58 1.1332
c3-cc-ca 63.3 126.52 CORR_SOURCE5 370 1.8946
c3-cc-cc 66.7 115.97 SOURCE3 4 3.0507
c3-cc-cd 66.8 119.45 SOURCE3 35 8.2040
c3-cc-cf 67.4 117.84 CORR 2
c3-cc-ha 45.5 121.52 SOURCE3 32 3.2091
c3-cc-n2 83.3 125.69 CORR_SOURCE5 12 1.9935
c3-cc-n 83.6 119.19 CORR_SOURCE5 107 2.1078
c3-cc-na 82.4 122.73 CORR_SOURCE5 961 1.6482
c3-cc-nc 83.2 120.95 CORR_SOURCE5 456 0.8756
c3-cc-nd 83.9 122.41 CORR_SOURCE5 653 1.6992
c3-cc-os 84.9 116.80 CORR_SOURCE5 306 0.8990
c3-cc-ss 62.7 121.53 CORR_SOURCE5 270 1.0948
c -c -c 64.4 111.68 SOURCE3 2 6.1226
c -c -ca 63.6 118.60 SOURCE4_SOURCE5 90 1.0263
ca-cc-cc 69.3 111.04 SOURCE3 9 7.9455
ca-cc-cd 69.8 113.51 SOURCE3 26 7.4229
ca-cc-ce 64.3 127.01 SOURCE4_SOURCE5 38 1.6763
ca-cc-h4 45.4 129.25 SOURCE3_SOURCE5 54 1.5632
ca-cc-ha 46.3 124.04 SOURCE3 34 3.6691
ca-cc-n 85.6 117.67 CORR 18
ca-cc-nc 84.9 120.59 CORR_SOURCE5 224 1.0853
ca-cc-nd 85.3 123.24 CORR_SOURCE5 246 2.3557
ca-cc-nh 84.3 122.13 SOURCE4_SOURCE5 20 1.7636
ca-cc-oh 86.6 117.55 CORR_SOURCE5 35 1.9318
ca-cc-os 87.2 114.75 CORR_SOURCE5 247 2.0579
ca-cc-ss 63.4 120.80 CORR_SOURCE5 80 2.1212
c -cc-c2 67.5 121.17 CORR_SOURCE5 28 1.6484
c -cc-c3 65.4 117.76 CORR_SOURCE5 566 1.9588
c -cc-c 65.2 121.07 CORR_SOURCE5 128 0.8902
c -c -cc 66.0 111.67 SOURCE3 4 5.5146
c -cc-ca 65.0 122.95 SOURCE3 1
c -cc-cc 65.7 122.69 SOURCE3 2
cc-c -cc 66.7 115.84 CORR_SOURCE5 115 1.4659
cc-cc-cc 70.1 110.70 SOURCE3 54 3.4091
cc-cc-cd 70.3 114.19 SOURCE3 517 6.5960
cc-cc-ce 64.9 127.06 CORR_SOURCE5 61 2.3233
cc-cc-cf 68.0 122.72 CORR_SOURCE5 66 1.9701
cc-cc-cg 65.8 125.91 CORR_SOURCE5 41 1.1646
c -cc-cd 67.2 121.35 CORR_SOURCE5 3554 2.2084
cc-c -cd 67.6 112.79 SOURCE3 1
c -cc-ce 65.4 121.57 CORR_SOURCE5 29 1.1305
cc-c -ce 66.4 115.57 SOURCE4_SOURCE5 14 1.2088
cc-cc-f 88.4 119.19 SOURCE4_SOURCE5 26 0.8983
c -cc-cg 67.0 117.88 SOURCE4_SOURCE5 26 0.6759
cc-cc-h4 46.3 127.96 SOURCE3_SOURCE5 391 2.1732
cc-cc-ha 47.6 121.07 CORR_SOURCE5 2414 2.2010
c -cc-cl 72.6 116.38 CORR_SOURCE5 50 1.2099
cc-cc-n2 87.4 122.21 CORR_SOURCE5 37 1.6493
cc-cc-n 85.8 119.89 SOURCE3 36 0.2095
cc-cc-na 86.5 117.77 SOURCE3_SOURCE5 865 1.5665
cc-cc-nc 85.3 121.98 CORR_SOURCE5 141 1.9633
cc-cc-nd 90.3 112.56 SOURCE3 141 4.2871
cc-cc-nh 86.0 119.72 CORR_SOURCE5 348 1.7785
cc-cc-oh 86.2 121.27 CORR_SOURCE5 11 2.2744
cc-cc-os 87.2 117.34 CORR_SOURCE5 217 1.9304
cc-cc-pd 84.5 115.36 SOURCE3 84
cc-cc-ss 63.9 120.21 CORR_SOURCE5 52 2.1160
cc-cc-sy 61.1 128.25 SOURCE4_SOURCE5 20 0.9014
c -c -cd 66.0 111.67 SOURCE3 4 5.5146
cd-cc-cd 70.0 120.08 CORR_SOURCE5 119 1.6139
cd-cc-ce 65.8 128.05 CORR_SOURCE5 350 2.4628
cd-cc-cl 71.7 123.41 CORR_SOURCE5 115 2.1217
cd-cc-f 89.6 121.19 SOURCE4_SOURCE5 82 0.7206
cd-cc-h4 47.8 128.48 SOURCE3_SOURCE5 3291 2.3189
cd-cc-ha 49.0 121.76 SOURCE3_SOURCE5 4433 1.8701
cd-cc-n 87.0 121.33 SOURCE3_SOURCE5 821 1.9126
cd-cc-na 92.7 106.99 SOURCE3_SOURCE5 3003 2.3845
cd-cc-nc 91.1 111.65 CORR_SOURCE5 1656 1.8430
cd-cc-nh 86.3 123.84 CORR_SOURCE5 152 2.2360
cd-cc-no 82.9 128.69 SOURCE4_SOURCE5 314 1.4409
cd-cc-oh 87.2 123.78 CORR_SOURCE5 251 1.1988
cd-cc-os 88.0 120.30 SOURCE3 64 5.4354
cd-cc-ss 66.9 111.55 CORR_SOURCE5 1048 1.8648
cd-cc-sy 62.5 124.55 CORR_SOURCE5 56 1.7107
ce-cc-na 83.4 124.35 CORR_SOURCE5 87 1.3591
ce-cc-nc 84.8 121.10 CORR_SOURCE5 43 1.2959
ce-cc-nd 85.9 121.70 CORR_SOURCE5 58 1.4179
ce-cc-os 85.8 118.76 CORR_SOURCE5 92 1.3159
ce-cc-ss 63.2 121.58 CORR_SOURCE5 54 1.3126
c -cc-f 87.8 116.98 SOURCE4_SOURCE5 49 0.4690
cg-cc-na 84.9 122.61 SOURCE4_SOURCE5 12 0.9695
cg-cc-ss 63.8 120.73 SOURCE4_SOURCE5 27 0.9221
cc-c -h4 47.6 114.83 SOURCE4_SOURCE5 25 0.5124
c -cc-ha 47.4 116.64 SOURCE3_SOURCE5 896 1.3075
cl-cc-na 90.5 121.12 SOURCE4_SOURCE5 37 0.7206
cl-cc-nd 91.0 122.07 CORR_SOURCE5 19 1.6973
cl-cc-ss 71.9 119.85 SOURCE4_SOURCE5 27 0.9529
c -cc-n2 85.2 123.93 CORR_SOURCE5 6 0.0993
c -cc-n 85.7 116.37 CORR_SOURCE5 41 2.4875
cc-c -n 87.1 112.70 SOURCE3_SOURCE5 1124 1.8431
c -cc-nc 83.5 123.32 CORR_SOURCE5 27 2.2025
cc-c -nd 85.6 116.24 CORR_SOURCE5 38 1.0053
c -cc-nd 85.3 121.88 CORR_SOURCE5 54 2.0672
c -cc-ne 84.5 119.88 SOURCE4 6 0.3139
cc-c -o 86.7 123.93 SOURCE3_SOURCE5 3463 2.3073
c -cc-oh 87.6 113.66 CORR_SOURCE5 190 1.6462
cc-c -oh 87.8 112.84 CORR_SOURCE5 184 0.7264
c -cc-os 85.1 119.26 CORR_SOURCE5 104 2.4145
cc-c -os 87.1 114.20 SOURCE3_SOURCE5 427 2.2749
cc-c -s 64.0 126.28 SOURCE4_SOURCE5 69 1.9867
cc-c -ss 64.4 112.40 SOURCE4_SOURCE5 42 0.9902
cx-cc-nd 83.2 127.88 5/2017 15 1.5594
cx-cc-os 85.5 118.06 5/2017 13 0.0898
cd-c -cd 66.7 115.84 CORR_SOURCE5 115 1.4659
cd-c -cx 65.4 117.42 5/2017 24 0.1441
cd-c -n 87.1 112.70 SOURCE3_SOURCE5 1124 1.8431
cd-c -nc 85.6 116.24 CORR_SOURCE5 38 1.0053
cd-c -nd 86.5 113.75 SOURCE4_SOURCE5 28 0.0860
cd-c -o 86.7 123.93 SOURCE3_SOURCE5 3463 2.3073
cd-c -oh 87.8 112.84 CORR_SOURCE5 184 0.7264
cd-c -os 87.1 114.20 SOURCE3_SOURCE5 427 2.2749
ce-c -ce 66.0 115.82 CORR_SOURCE5 103 0.7143
ce-c -cf 65.9 116.37 SOURCE4_SOURCE5 31 1.3157
ce-c -cx 65.8 114.98 5/2017 36 3.8282
ce-c -h4 47.2 114.89 SOURCE4_SOURCE5 113 0.4718
ce-c -ha 47.2 115.22 SOURCE3 7 2.4188
ce-c -n 85.7 115.22 CORR_SOURCE5 38 1.1173
ce-c -o 86.3 123.20 SOURCE3_SOURCE5 2306 2.0617
ce-c -oh 87.0 113.62 CORR_SOURCE5 273 1.4501
ce-c -os 87.8 110.93 CORR_SOURCE5 445 1.6899
ce-c -s 64.7 122.63 SOURCE3_SOURCE5 11 1.3034
ce-c -ss 64.8 110.49 SOURCE4_SOURCE5 13 0.5852
cf-c -cf 66.0 115.82 CORR_SOURCE5 103 0.7143
cf-c -ha 47.2 115.22 SOURCE3 7
cf-c -n 85.7 115.22 CORR_SOURCE5 38 1.1173
cf-c -o 86.3 123.20 SOURCE3_SOURCE5 2306 2.0617
cf-c -oh 87.0 113.62 CORR_SOURCE5 273 1.4501
cf-c -os 87.8 110.93 CORR_SOURCE5 445 1.6899
cf-c -s 64.7 122.63 SOURCE3_SOURCE5 11 1.3034
cg-c -cg 67.6 115.38 SOURCE3 1
cg-c -ha 48.3 113.90 SOURCE2 1
cg-c -o 88.2 121.78 SOURCE3_SOURCE5 13 0.8393
c -c -h4 45.2 115.80 SOURCE4_SOURCE5 17 0.7492
h4-cc-n 62.7 115.69 SOURCE3_SOURCE5 425 0.9142
h4-cc-na 61.5 120.53 SOURCE3_SOURCE5 1801 1.3882
h4-cc-nc 61.7 121.14 SOURCE3_SOURCE5 574 0.5658
h4-cc-nd 64.3 118.47 SOURCE3_SOURCE5 435 1.3360
h4-cc-os 63.6 114.90 SOURCE3_SOURCE5 456 0.8638
h4-cc-ss 42.5 119.97 SOURCE3_SOURCE5 496 0.7119
h5-cc-n 62.7 115.70 CORR_SOURCE5 41 0.7665
h5-cc-na 61.2 121.55 SOURCE3_SOURCE5 1138 0.7136
h5-cc-nc 61.3 122.92 SOURCE3_SOURCE5 136 0.3532
h5-cc-nd 62.5 125.52 SOURCE3_SOURCE5 1309 0.7276
h5-cc-os 63.1 116.83 SOURCE3_SOURCE5 42 1.3051
h5-cc-ss 42.3 121.02 SOURCE3_SOURCE5 46 0.6462
c -c -ha 45.4 115.43 SOURCE2 3 0.6549
ha-cc-na 61.2 121.50 SOURCE2 1
ha-cc-nc 62.9 116.54 SOURCE3 5 1.4482
ha-cc-nd 64.2 118.88 SOURCE3 20 2.8923
ha-cc-os 64.8 110.86 SOURCE3 7 1.3846
ha-cc-pd 54.9 121.76 SOURCE3 84
ha-cc-ss 42.2 121.64 SOURCE2 5 1.3276
ch-c -ch 67.6 115.38 SOURCE3 1
ch-c -ha 48.3 113.90 SOURCE2 1
ch-c -o 88.2 121.78 SOURCE3_SOURCE5 13 0.8393
cl-c -cl 80.7 111.30 SOURCE2 1
cl-c -f 93.2 112.00 SOURCE2 1
cl-c -ha 48.2 109.90 SOURCE2 1
cl-c -o 89.0 120.69 SOURCE3_SOURCE5 14 1.1076
cl-c -s 69.9 127.60 SOURCE2 1
c -c -n 84.3 112.74 SOURCE4_SOURCE5 157 2.1770
na-cc-nc 108.8 121.95 CORR_SOURCE5 321 1.6221
na-cc-nd 115.5 112.22 SOURCE3_SOURCE5 2726 1.5103
na-cc-no 105.3 124.59 SOURCE4_SOURCE5 162 0.8093
na-cc-oh 111.7 117.48 SOURCE4_SOURCE5 39 0.9806
na-cc-sx 79.7 117.02 SOURCE4_SOURCE5 32 0.3937
na-cc-sy 79.5 120.46 SOURCE4_SOURCE5 15 1.7292
nc-cc-nd 114.2 115.83 CORR_SOURCE5 309 1.2424
nc-cc-nh 111.3 117.23 CORR_SOURCE5 51 1.7463
nc-cc-no 106.9 121.73 SOURCE4_SOURCE5 17 0.8729
nc-cc-ss 79.9 122.64 SOURCE3_SOURCE5 10 1.3100
nd-cc-nd 110.8 128.07 SOURCE4_SOURCE5 17 0.2580
nd-cc-ne 107.8 129.01 SOURCE4_SOURCE5 20 1.2478
nd-cc-nh 111.7 120.65 SOURCE3_SOURCE5 554 1.6769
nd-cc-no 108.2 122.75 SOURCE4_SOURCE5 80 0.3006
nd-cc-oh 112.7 121.12 CORR_SOURCE5 | |
<filename>network.py
import numpy as np
import tensorflow as tf
import re
import math
from config import *
from .deform_conv_layer import deform_conv_op as deform_conv_op
DEFAULT_PADDING = 'SAME'
DEFAULT_TYPE = tf.float32
def include_original(dec):
""" Meta decorator, which make the original function callable (via f._original() )"""
def meta_decorator(f):
decorated = dec(f)
decorated._original = f
return decorated
return meta_decorator
summary = True
def ActivationSummary(layer): #tensorBoard (jmfacil)
if summary:
TOWER_NAME = 'tower'
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', layer.op.name)
tf.summary.histogram(tensor_name + '/activations', layer)
@include_original
def layer(op):
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.inputs) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.inputs) == 1:
layer_input = self.inputs[0]
else:
layer_input = list(self.inputs)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, trainable=True, is_training = True,bs=16):#,reuse=None): #cfernandez
self.inputs = []
self.batch_size = bs
self.layers = dict(inputs)
self.trainable = trainable
self.is_training = is_training
self.setup()
def setup(self):
raise NotImplementedError('Must be subclassed.')
def load(self, data_path, session, ignore_missing=False):
def transform_names(k):
if k == 'mean':
return 'moving_mean'
if k == 'variance':
return 'moving_variance'
if k == 'scale':
return 'gamma'
if k == 'offset':
return 'beta'
return k
print(data_path)
data_dict = np.load(data_path,encoding='latin1').item()
for key in data_dict:
superkey=self.nname+"/"+key
with tf.variable_scope(superkey, reuse=True):
for subkey in data_dict[key]:
try:
nsubkey=transform_names(subkey)
var = tf.get_variable(nsubkey)
session.run(var.assign(data_dict[key][subkey]))
except ValueError:
print("ignore "+key,subkey)
print(superkey,tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=superkey))
if not ignore_missing:
raise
print ("Loaded weitghts")
def feed(self, *args):
assert len(args) != 0
self.inputs = []
for layer in args:
if isinstance(layer, str):
try:
layer = self.layers[layer]
print(layer)
except KeyError:
print(list(self.layers.keys()))
raise KeyError('Unknown layer name fed: %s' % layer)
self.inputs.append(layer)
return self
def get_output(self, layer):
try:
layer = self.layers[layer]
except KeyError:
print(list(self.layers.keys()))
raise KeyError('Unknown layer name fed: %s' % layer)
return layer
def get_layer_output(self, name):
return self.layers[name]
def get_unique_name(self, prefix):
id = sum(t.startswith(prefix) for t, _ in list(self.layers.items())) + 1
return '%s_%d' % (prefix, id)
def make_var(self, name, shape, initializer=None, trainable=True, regularizer=None):
return tf.get_variable(name, shape, initializer=initializer, trainable=trainable, regularizer=regularizer)
def validate_padding(self, padding):
assert padding in ('SAME', 'VALID')
def filler(self, params): #chema
#print "Filler: "+str(params)
value = params.get("value",0.0)
mean = params.get("mean",0.0)
std = params.get("std",0.1)
dtype = params.get("dtype",DEFAULT_TYPE)
name = params.get("name",None)
uniform = params.get("uniform",False)
return {
"xavier_conv2d" : tf.contrib.layers.xavier_initializer_conv2d(uniform = uniform),
"t_normal" : tf.truncated_normal_initializer(mean = mean, stddev = std, dtype = dtype) ,
"constant" : tf.constant_initializer(value = value, dtype = dtype)
}[params.get("type","t_normal")]
@layer
def conv(self, input, k_h, k_w, c_o, s_h, s_w, name, rate=1, biased=True, relu=True, padding=DEFAULT_PADDING, trainable=True, initializer=None):
""" contribution by miraclebiu, and biased option"""
self.validate_padding(padding)
c_i = input.get_shape()[-1]
convolve = lambda i, k: tf.nn.convolution(
i, k, padding=padding, strides=[s_h, s_w], dilation_rate=[rate, rate])
with tf.variable_scope(name,reuse=False) as scope: #cfernandez reuse
# init_weights = tf.truncated_normal_initializer(0.0, stddev=0.001)
init_weights = tf.zeros_initializer() if initializer is 'zeros' else tf.contrib.layers.variance_scaling_initializer(
factor=0.01, mode='FAN_AVG', uniform=False)
init_biases = tf.constant_initializer(0.0)
#kernel = self.make_var('weights', [k_h, k_w, c_i, c_o], init_weights, trainable,
# regularizer=self.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY))
kernel = self.make_var('weights', shape=[k_h, k_w, c_i // 1, c_o],initializer=self.filler({ "type" : "t_normal", #cfernandez
"mean" : 0.0,
"std" : 0.1
}),regularizer=self.l2_regularizer(args.weight_decay)) #0.0005 cfg.TRAIN.WEIGHT_DECAY
if biased:
biases = self.make_var('biases', [c_o], init_biases, trainable)
conv = convolve(input, kernel)
if relu:
bias = tf.nn.bias_add(conv, biases)
output = tf.nn.relu(bias)
output = tf.nn.bias_add(conv, biases)
else:
conv = convolve(input, kernel)
if relu:
output = tf.nn.relu(conv)
output = conv
return output
@staticmethod
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
@staticmethod
def equi_coord(pano_W,pano_H,k_W,k_H,u,v):
""" contribution by cfernandez and jmfacil """
fov_w = k_W * np.deg2rad(360./float(pano_W))
focal = (float(k_W)/2) / np.tan(fov_w/2)
c_x = 0
c_y = 0
u_r, v_r = u, v
u_r, v_r = u_r-float(pano_W)/2.,v_r-float(pano_H)/2.
phi, theta = u_r/(pano_W) * (np.pi) *2, -v_r/(pano_H) * (np.pi)
ROT = Network.rotation_matrix((0,1,0),phi)
ROT = np.matmul(ROT,Network.rotation_matrix((1,0,0),theta))#np.eye(3)
h_range = np.array(range(k_H))
w_range = np.array(range(k_W))
w_ones = (np.ones(k_W))
h_ones = (np.ones(k_H))
h_grid = np.matmul(np.expand_dims(h_range,-1),np.expand_dims(w_ones,0))+0.5-float(k_H)/2
w_grid = np.matmul(np.expand_dims(h_ones,-1),np.expand_dims(w_range,0))+0.5-float(k_W)/2
K=np.array([[focal,0,c_x],[0,focal,c_y],[0.,0.,1.]])
inv_K = np.linalg.inv(K)
rays = np.stack([w_grid,h_grid,np.ones(h_grid.shape)],0)
rays = np.matmul(inv_K,rays.reshape(3,k_H*k_W))
rays /= np.linalg.norm(rays,axis=0,keepdims=True)
rays = np.matmul(ROT,rays)
rays=rays.reshape(3,k_H,k_W)
phi = np.arctan2(rays[0,...],rays[2,...])
theta = np.arcsin(np.clip(rays[1,...],-1,1))
x = (pano_W)/(2.*np.pi)*phi +float(pano_W)/2.
y = (pano_H)/(np.pi)*theta +float(pano_H)/2.
roi_y = h_grid+v_r +float(pano_H)/2.
roi_x = w_grid+u_r +float(pano_W)/2.
new_roi_y = (y)
new_roi_x = (x)
offsets_x = (new_roi_x - roi_x)
offsets_y = (new_roi_y - roi_y)
return offsets_x, offsets_y
@staticmethod
def equi_coord_fixed_resoltuion(pano_W,pano_H,k_W,k_H,u,v,pano_Hf = -1, pano_Wf=-1):
""" contribution by cfernandez and jmfacil """
pano_Hf = pano_H if pano_Hf<=0 else pano_H/pano_Hf
pano_Wf = pano_W if pano_Wf<=0 else pano_W/pano_Wf
fov_w = k_W * np.deg2rad(360./float(pano_Wf))
focal = (float(k_W)/2) / np.tan(fov_w/2)
c_x = 0
c_y = 0
u_r, v_r = u, v
u_r, v_r = u_r-float(pano_W)/2.,v_r-float(pano_H)/2.
phi, theta = u_r/(pano_W) * (np.pi) *2, -v_r/(pano_H) * (np.pi)
ROT = Network.rotation_matrix((0,1,0),phi)
ROT = np.matmul(ROT,Network.rotation_matrix((1,0,0),theta))#np.eye(3)
h_range = np.array(range(k_H))
w_range = np.array(range(k_W))
w_ones = (np.ones(k_W))
h_ones = (np.ones(k_H))
h_grid = np.matmul(np.expand_dims(h_range,-1),np.expand_dims(w_ones,0))+0.5-float(k_H)/2
w_grid = np.matmul(np.expand_dims(h_ones,-1),np.expand_dims(w_range,0))+0.5-float(k_W)/2
K=np.array([[focal,0,c_x],[0,focal,c_y],[0.,0.,1.]])
inv_K = np.linalg.inv(K)
rays = np.stack([w_grid,h_grid,np.ones(h_grid.shape)],0)
rays = np.matmul(inv_K,rays.reshape(3,k_H*k_W))
rays /= np.linalg.norm(rays,axis=0,keepdims=True)
rays = np.matmul(ROT,rays)
rays=rays.reshape(3,k_H,k_W)
phi = np.arctan2(rays[0,...],rays[2,...])
theta = np.arcsin(np.clip(rays[1,...],-1,1))
x = (pano_W)/(2.*np.pi)*phi +float(pano_W)/2.
y = (pano_H)/(np.pi)*theta +float(pano_H)/2.
roi_y = h_grid+v_r +float(pano_H)/2.
roi_x = w_grid+u_r +float(pano_W)/2.
new_roi_y = (y)
new_roi_x = (x)
offsets_x = (new_roi_x - roi_x)
offsets_y = (new_roi_y - roi_y)
return offsets_x, offsets_y
@staticmethod
def distortion_aware_map(pano_W, pano_H, k_W, k_H, s_width = 1, s_height = 1,bs = 16):
""" contribution by cfernandez and jmfacil """
n=1
offset = np.zeros(shape=[pano_H,pano_W,k_H*k_W*2])
print(offset.shape)
for v in range(0, pano_H, s_height):
for u in range(0, pano_W, s_width):
offsets_x, offsets_y = Network.equi_coord_fixed_resoltuion(pano_W,pano_H,k_W,k_H,u,v,1,1)
offsets = np.concatenate((np.expand_dims(offsets_y,-1),np.expand_dims(offsets_x,-1)),axis=-1)
total_offsets = offsets.flatten().astype("float32")
offset[v,u,:] = total_offsets
offset = tf.constant(offset)
offset = tf.expand_dims(offset, 0)
offset = tf.concat([offset for _ in range(bs)],axis=0)
offset = tf.cast(offset, tf.float32)
return offset
@layer
def equi_conv(self, input, k_h, k_w, c_o, s_h, s_w, num_deform_group, name, num_groups = 1, rate = 1, biased=True, relu=True,
padding=DEFAULT_PADDING, trainable=True, initializer=None):
""" contribution by cfernandez and jmfacil """
self.validate_padding(padding)
data = input
n,h,w,_ = tuple(data.get_shape().as_list())
data_shape = data.shape
offset = tf.stop_gradient(Network.distortion_aware_map(w, h, k_w, k_h, s_width = s_w, s_height = s_h,bs= self.batch_size))
c_i = data.get_shape()[-1]
trans2NCHW = lambda x:tf.transpose(x, [0, 3 ,1 ,2])
trans2NHWC = lambda x:tf.transpose(x, [0, 2 ,3, 1])
# deform conv only supports NCHW
data = trans2NCHW(data)
offset = trans2NCHW(offset)
dconvolve = lambda i, k, o: deform_conv_op.deform_conv_op(
i, k, o, strides = [1, 1, s_h, s_w], rates=[1, 1, rate, rate], padding=padding, num_groups=num_groups, deformable_group=num_deform_group)
with tf.variable_scope(name, reuse=False) as scope:
init_weights = tf.zeros_initializer() if initializer is 'zeros' else tf.contrib.layers.variance_scaling_initializer(
factor=0.01, mode='FAN_AVG', uniform=False)
init_biases = tf.constant_initializer(0.0)
kernel = self.make_var('weights', [k_h, k_w, c_i, c_o], init_weights, trainable,
regularizer=self.l2_regularizer(args.weight_decay))
kernel = tf.transpose(kernel,[3,2,0,1])
ActivationSummary(offset)
print(data, kernel, offset)
dconv = trans2NHWC(dconvolve(data, kernel, offset))
if biased:
biases = self.make_var('biases', [c_o], init_biases, trainable)
if relu:
bias = tf.nn.bias_add(dconv, biases)
return tf.nn.relu(bias)
return tf.nn.bias_add(dconv, biases)
else:
if relu:
return tf.nn.relu(dconv)
return dconv
@layer
def upconv(self, input, shape, c_o, ksize=4, stride=2, name='upconv', biased=False, relu=True, padding=DEFAULT_PADDING,
trainable=True, initializer=None):
""" up-conv"""
self.validate_padding(padding)
c_in = input.get_shape()[3].value
in_shape_d = tf.shape(input)
in_shape = input.shape.as_list()
if shape is None:
h = ((in_shape[1]) * stride)
w = ((in_shape[2]) * stride)
new_shape = [in_shape_d[0], h, | |
import datetime
import json
from StringIO import StringIO
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.serializers import json as djson
from django.utils.encoding import force_unicode
from tastypie.bundle import Bundle
from tastypie.exceptions import UnsupportedFormat
from tastypie.utils import format_datetime, format_date, format_time, make_naive
try:
import lxml
from lxml.etree import parse as parse_xml
from lxml.etree import Element, tostring
except ImportError:
lxml = None
try:
import yaml
from django.core.serializers import pyyaml
except ImportError:
yaml = None
try:
import biplist
except ImportError:
biplist = None
# Ugh & blah.
# So doing a regular dump is generally fine, since Tastypie doesn't usually
# serialize advanced types. *HOWEVER*, it will dump out Python Unicode strings
# as a custom YAML tag, which of course ``yaml.safe_load`` can't handle.
if yaml is not None:
from yaml.constructor import SafeConstructor
from yaml.loader import Reader, Scanner, Parser, Composer, Resolver
class TastypieConstructor(SafeConstructor):
def construct_yaml_unicode_dammit(self, node):
value = self.construct_scalar(node)
try:
return value.encode('ascii')
except UnicodeEncodeError:
return value
TastypieConstructor.add_constructor(u'tag:yaml.org,2002:python/unicode', TastypieConstructor.construct_yaml_unicode_dammit)
class TastypieLoader(Reader, Scanner, Parser, Composer, TastypieConstructor, Resolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
TastypieConstructor.__init__(self)
Resolver.__init__(self)
class Serializer(object):
"""
A swappable class for serialization.
This handles most types of data as well as the following output formats::
* json
* jsonp
* xml
* yaml
* html
* plist (see http://explorapp.com/biplist/)
It was designed to make changing behavior easy, either by overridding the
various format methods (i.e. ``to_json``), by changing the
``formats/content_types`` options or by altering the other hook methods.
"""
formats = ['json', 'jsonp', 'xml', 'yaml', 'html', 'plist']
content_types = {
'json': 'application/json',
'jsonp': 'text/javascript',
'xml': 'application/xml',
'yaml': 'text/yaml',
'html': 'text/html',
'plist': 'application/x-plist',
}
def __init__(self, formats=None, content_types=None, datetime_formatting=None):
self.supported_formats = []
self.datetime_formatting = getattr(settings, 'TASTYPIE_DATETIME_FORMATTING', 'iso-8601')
if formats is not None:
self.formats = formats
if content_types is not None:
self.content_types = content_types
if datetime_formatting is not None:
self.datetime_formatting = datetime_formatting
for format in self.formats:
try:
self.supported_formats.append(self.content_types[format])
except KeyError:
raise ImproperlyConfigured("Content type for specified type '%s' not found. Please provide it at either the class level or via the arguments." % format)
def get_mime_for_format(self, format):
"""
Given a format, attempts to determine the correct MIME type.
If not available on the current ``Serializer``, returns
``application/json`` by default.
"""
try:
return self.content_types[format]
except KeyError:
return 'application/json'
def format_datetime(self, data):
"""
A hook to control how datetimes are formatted.
Can be overridden at the ``Serializer`` level (``datetime_formatting``)
or globally (via ``settings.TASTYPIE_DATETIME_FORMATTING``).
Default is ``iso-8601``, which looks like "2010-12-16T03:02:14".
"""
data = make_naive(data)
if self.datetime_formatting == 'rfc-2822':
return format_datetime(data)
return data.isoformat()
def format_date(self, data):
"""
A hook to control how dates are formatted.
Can be overridden at the ``Serializer`` level (``datetime_formatting``)
or globally (via ``settings.TASTYPIE_DATETIME_FORMATTING``).
Default is ``iso-8601``, which looks like "2010-12-16".
"""
if self.datetime_formatting == 'rfc-2822':
return format_date(data)
return data.isoformat()
def format_time(self, data):
"""
A hook to control how times are formatted.
Can be overridden at the ``Serializer`` level (``datetime_formatting``)
or globally (via ``settings.TASTYPIE_DATETIME_FORMATTING``).
Default is ``iso-8601``, which looks like "03:02:14".
"""
if self.datetime_formatting == 'rfc-2822':
return format_time(data)
return data.isoformat()
def serialize(self, bundle, format='application/json', options={}):
"""
Given some data and a format, calls the correct method to serialize
the data and returns the result.
"""
desired_format = None
for short_format, long_format in self.content_types.items():
if format == long_format:
if hasattr(self, "to_%s" % short_format):
desired_format = short_format
break
if desired_format is None:
raise UnsupportedFormat("The format indicated '%s' had no available serialization method. Please check your ``formats`` and ``content_types`` on your Serializer." % format)
serialized = getattr(self, "to_%s" % desired_format)(bundle, options)
return serialized
def deserialize(self, content, format='application/json'):
"""
Given some data and a format, calls the correct method to deserialize
the data and returns the result.
"""
desired_format = None
format = format.split(';')[0]
for short_format, long_format in self.content_types.items():
if format == long_format:
if hasattr(self, "from_%s" % short_format):
desired_format = short_format
break
if desired_format is None:
raise UnsupportedFormat("The format indicated '%s' had no available deserialization method. Please check your ``formats`` and ``content_types`` on your Serializer." % format)
deserialized = getattr(self, "from_%s" % desired_format)(content)
return deserialized
def to_simple(self, data, options):
"""
For a piece of data, attempts to recognize it and provide a simplified
form of something complex.
This brings complex Python data structures down to native types of the
serialization format(s).
"""
if isinstance(data, (list, tuple)):
return [self.to_simple(item, options) for item in data]
if isinstance(data, dict):
return dict((key, self.to_simple(val, options)) for (key, val) in data.iteritems())
elif isinstance(data, Bundle):
return dict((key, self.to_simple(val, options)) for (key, val) in data.data.iteritems())
elif hasattr(data, 'dehydrated_type'):
if getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == False:
if data.full:
return self.to_simple(data.fk_resource, options)
else:
return self.to_simple(data.value, options)
elif getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == True:
if data.full:
return [self.to_simple(bundle, options) for bundle in data.m2m_bundles]
else:
return [self.to_simple(val, options) for val in data.value]
else:
return self.to_simple(data.value, options)
elif isinstance(data, datetime.datetime):
return self.format_datetime(data)
elif isinstance(data, datetime.date):
return self.format_date(data)
elif isinstance(data, datetime.time):
return self.format_time(data)
elif isinstance(data, bool):
return data
elif type(data) in (long, int, float):
return data
elif data is None:
return None
else:
return force_unicode(data)
def to_etree(self, data, options=None, name=None, depth=0):
"""
Given some data, converts that data to an ``etree.Element`` suitable
for use in the XML output.
"""
if isinstance(data, (list, tuple)):
element = Element(name or 'objects')
if name:
element = Element(name)
element.set('type', 'list')
else:
element = Element('objects')
for item in data:
element.append(self.to_etree(item, options, depth=depth+1))
elif isinstance(data, dict):
if depth == 0:
element = Element(name or 'response')
else:
element = Element(name or 'object')
element.set('type', 'hash')
for (key, value) in data.iteritems():
element.append(self.to_etree(value, options, name=key, depth=depth+1))
elif isinstance(data, Bundle):
element = Element(name or 'object')
for field_name, field_object in data.data.items():
element.append(self.to_etree(field_object, options, name=field_name, depth=depth+1))
elif hasattr(data, 'dehydrated_type'):
if getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == False:
if data.full:
return self.to_etree(data.fk_resource, options, name, depth+1)
else:
return self.to_etree(data.value, options, name, depth+1)
elif getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == True:
if data.full:
element = Element(name or 'objects')
for bundle in data.m2m_bundles:
element.append(self.to_etree(bundle, options, bundle.resource_name, depth+1))
else:
element = Element(name or 'objects')
for value in data.value:
element.append(self.to_etree(value, options, name, depth=depth+1))
else:
return self.to_etree(data.value, options, name)
else:
element = Element(name or 'value')
simple_data = self.to_simple(data, options)
data_type = get_type_string(simple_data)
if data_type != 'string':
element.set('type', get_type_string(simple_data))
if data_type != 'null':
if isinstance(simple_data, unicode):
element.text = simple_data
else:
element.text = force_unicode(simple_data)
return element
def from_etree(self, data):
"""
Not the smartest deserializer on the planet. At the request level,
it first tries to output the deserialized subelement called "object"
or "objects" and falls back to deserializing based on hinted types in
the XML element attribute "type".
"""
if data.tag == 'request':
# if "object" or "objects" exists, return deserialized forms.
elements = data.getchildren()
for element in elements:
if element.tag in ('object', 'objects'):
return self.from_etree(element)
return dict((element.tag, self.from_etree(element)) for element in elements)
elif data.tag == 'object' or data.get('type') == 'hash':
return dict((element.tag, self.from_etree(element)) for element in data.getchildren())
elif data.tag == 'objects' or data.get('type') == 'list':
return [self.from_etree(element) for element in data.getchildren()]
else:
type_string = data.get('type')
if type_string in ('string', None):
return data.text
elif type_string == 'integer':
return int(data.text)
elif type_string == 'float':
return float(data.text)
elif type_string == 'boolean':
if data.text == 'True':
return True
else:
return False
else:
return None
def to_json(self, data, options=None):
"""
Given some Python data, produces JSON output.
"""
options = options or {}
data = self.to_simple(data, options)
return json.dumps(data, cls=djson.DjangoJSONEncoder, sort_keys=True, ensure_ascii=False)
def from_json(self, content):
"""
Given some JSON data, returns a Python dictionary of the decoded data.
"""
return json.loads(content)
def to_jsonp(self, data, options=None):
"""
Given some Python data, produces JSON output wrapped in the provided
callback.
"""
options = options or {}
return '%s(%s)' % (options['callback'], self.to_json(data, options))
def to_xml(self, data, options=None):
"""
Given some Python data, produces XML output.
"""
options = options or {}
if lxml is None:
raise ImproperlyConfigured("Usage of the XML aspects requires lxml.")
return tostring(self.to_etree(data, options), xml_declaration=True, encoding='utf-8')
def from_xml(self, content):
"""
Given some XML data, returns a Python dictionary of the decoded data.
"""
if lxml is None:
raise ImproperlyConfigured("Usage of the XML aspects requires lxml.")
return self.from_etree(parse_xml(StringIO(content)).getroot())
def to_yaml(self, data, options=None):
"""
| |
kdt
:param random_kdt_num_archs: Number of architectures for random kdt
:return: None
"""
prefix_name = lambda prefix, name: name if prefix is None else f'{prefix}-{name}'
try:
fname = prefix_name(prefix, f'rank_change-{epoch}.pdf')
fig = process_rank_data_nasbench(save_data, os.path.join(self.exp_dir, fname))
if self.writer:
self.writer.add_figure(tag=fname.split('.')[0].replace('_','-'), figure=fig, global_step=epoch)
except Exception as e:
logging.warning(e)
try:
ranking_per_epoch = save_data['ranking_per_epoch']
except KeyError as e:
logging.warning("save_data parsed into _save_ranking_results expect having key ranking_per_epoch"
"; got {}. Using self.ranking_per_epoch instead".format(save_data.keys()))
ranking_per_epoch = self.ranking_per_epoch
# Compute Kendall tau for every epochs and save them into result.
# IPython.embed()
kd_tau, kd_tau_report = self._compute_kendall_tau(ranking_per_epoch)
save_data['kendaltau'] = kd_tau
if compute_kdt_before and 'ranking_per_epoch_before' in self.running_stats.keys():
kd_tau_before, kd_tau_before_report = self._compute_kendall_tau(
self.running_stats['ranking_per_epoch_before'])
save_data['kendaltau_before'] = kd_tau_before
if self.writer is not None:
p = sorted([elem[1] for elem in ranking_per_epoch[epoch]], key=itemgetter(2))
tensorboard_summarize_list(
[e[0] for e in p], self.writer, prefix_name(prefix, 'eval_acc'), epoch, ascending=False
)
tensorboard_summarize_list(
[e[1] for e in p], self.writer, prefix_name(prefix, 'eval_obj'), epoch, ascending=True
)
self.writer.add_scalar(prefix_name(prefix, 'eval_kendall_tau'), kd_tau_report, epoch)
if compute_kdt_before and 'ranking_per_epoch_before' in self.running_stats.keys():
self.writer.add_scalar(prefix_name(prefix, 'eval_kendall_tau/original'), kd_tau_before_report, epoch)
# add these and collect writer keys
if any([sparse_kdt, percentile, random_kdt]):
data = ranking_per_epoch[epoch]
# ranking by valid accuracy
model_ids = [elem[1][3] for elem in data]
model_perfs = [elem[1][0] for elem in data]
model_ids, model_perfs = sort_hash_perfs(model_ids, model_perfs)
model_gt_perfs = self.search_space.query_gt_perfs(model_ids)
sorted_indices = np.argsort(model_perfs)[::-1]
sorted_model_ids = [model_ids[i] for i in sorted_indices]
# IPython.embed(header='checking the saving here.')
add_metrics = {}
if sparse_kdt:
if not isinstance(sparse_kdt_threshold, (tuple, list)):
sparse_kdt_threshold = [sparse_kdt_threshold]
for th in sparse_kdt_threshold:
kdt = compute_sparse_kendalltau(model_ids, model_perfs, model_gt_perfs,
threshold=th)
add_metrics[prefix_name(prefix, f'eval_kendall_tau/sparse_{th}')] = kdt.correlation
if percentile:
for top_k in percentile_top_K:
res = compute_percentile(sorted_model_ids,
self.search_space.num_architectures,
top_k,
verbose=self.args.debug)
mname = prefix_name(prefix, 'percentile')
add_metrics[f'{mname}/min_{top_k}'] = res.min()
add_metrics[f'{mname}/median_{top_k}'] = np.median(res)
add_metrics[f'{mname}/max_{top_k}'] = res.max()
logging.info("{} of top {}: {} - {} - {}".format(
mname,
top_k, res.min(), np.median(res), res.max()))
if random_kdt:
for subsample in random_kdt_num_archs:
if subsample > len(sorted_model_ids):
continue
kdt_final = []
for _ in range(random_kdt_numrepeat):
sub_model_indices = sorted(
np.random.choice(np.arange(0, len(sorted_model_ids)), subsample, replace=False).tolist())
sub_model_ids = [sorted_model_ids[i] for i in sub_model_indices]
kdt = kendalltau(sub_model_ids, list(reversed(sorted(sub_model_ids))))
kdt_final.append(kdt.correlation)
kdt_final = np.asanyarray(kdt_final, dtype=np.float)
mname = prefix_name(prefix, 'eval_kendall_tau')
add_metrics[f'{mname}/random_{subsample}_min'] = kdt_final.min()
add_metrics[f'{mname}/random_{subsample}_max'] = kdt_final.max()
add_metrics[f'{mname}/random_{subsample}_mean'] = kdt_final.mean()
logging.info("Random subsample {} archs: kendall tau {} ({},{})".format(
subsample, kdt_final.mean(), kdt_final.min(), kdt_final.max()))
# end of additioanl metrics
if self.writer:
for k, v in add_metrics.items():
self.writer.add_scalar(k, v, epoch)
return save_data
def _save_results(self, save_data, epoch, rank_details=False, filename='result.json', **kwargs):
if rank_details:
save_data = self._save_ranking_results(save_data, epoch, **kwargs)
utils.save_json(save_data, os.path.join(self.exp_dir, filename))
# if hasattr(self, 'writer') and self.writer is not None:
# self.writer.export_scalars_to_json(os.path.join(self.exp_dir, 'tb_scalars.json'))
def save_results(self, epoch, rank_details=True):
save_data = {
'ranking_per_epoch': self.ranking_per_epoch,
'trained_model_spec_per_steps': self.trained_model_spec_ids,
}
# for other to overwrite.
return self._save_results(save_data, epoch, rank_details, sparse_kdt=True, percentile=True, random_kdt=True)
def save_policy_results(self, epoch, sampled_arch_ids, sample_perfs=None):
"""Save policy results, used for policy such as DARTS, NAO and ENAS, to track the intermediate results
Parameters
----------
epoch : int
epoch number
sampled_arch_ids : list
List of sampled architecture IDs
"""
# Make sure this id pool is not None.
model_spec_id_pool = sampled_arch_ids
self.logger.info(f'saving policy sampled ID at epoch {epoch}')
# save the eval arch pool.
archs = []
for i in model_spec_id_pool:
if isinstance(i, int):
archs.append(self.search_space.topology_by_id(i))
else:
archs.append(i)
perfs = sample_perfs if sample_perfs else [0] * len(archs)
for i, pos in enumerate(model_spec_id_pool):
self.logger.info(f'particle gen id: {i}, performance: {perfs[i]}'
f'spec: {archs[i]}, pos {pos}')
self.save_arch_pool_performance(model_spec_id_pool, perfs, prefix='sampler')
self.save_duplicate_arch_pool(prefix='sampler', epoch=epoch)
if self.writer:
# process data into list
gt_perfs = self.search_space.query_gt_perfs(sampled_arch_ids)
if gt_perfs:
tensorboard_summarize_list(np.array(gt_perfs), writer=self.writer, key='policy/gt_acc', step=epoch, ascending=False)
percentile = np.array(sampled_arch_ids) / self.search_space.num_architectures
self.writer.add_scalar('policy/percentile_median', np.median(percentile), epoch)
self.writer.add_scalar('policy/percentile_max', np.max(percentile), epoch)
def check_should_save(self, epoch):
"""
invoke the evaluate step, this is also used to update epoch information.
:param epoch:
:return:
"""
self.running_stats['epoch'] = epoch
self.epoch
if self.args.extensive_save and epoch > 50:
return any([(epoch - i) % self.args.save_every_epoch == 0 for i in range(3)])
return epoch % self.args.save_every_epoch == 0 and epoch > 0
# logging for normal one.
def logging_at_epoch(self, acc, obj, epoch, keyword, display_dict=None):
message = f'{keyword} at epoch {epoch} | loss: {obj:8.2f} | top_1_acc: {acc:8.2f}'
if display_dict:
for k, v in display_dict.items():
message += f' | {k}: {v} '
logging.info(message)
self.running_stats['epoch'] = epoch
if self.writer:
self.writer.add_scalar(f'{keyword}/loss', obj, epoch)
self.writer.add_scalar(f'{keyword}/top_1_acc', acc, epoch)
if display_dict:
for k, v in display_dict.items():
self.writer.add_scalar(f'{keyword}/{k}', v, epoch)
def logging_maml(self, acc, obj, epoch, keyword, **kwargs):
""" specifically for MAML procedures"""
if isinstance(acc, tuple) and len(acc) == 2:
self.logging_at_epoch(acc[0], obj[0], epoch, keyword + '_task', **kwargs)
self.logging_at_epoch(acc[1], obj[1], epoch, keyword + '_meta', **kwargs)
else:
return self.logging_at_epoch(acc, obj, epoch, keyword, **kwargs)
def save_checkpoint(self, model, optimizer, backup_epoch=None, other_dict=None):
d = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'misc': self.running_stats
}
if self.amp:
d['amp'] = self.amp.state_dict()
if self.scheduler:
d['scheduler'] = self.scheduler.state_dict()
if other_dict:
d.update(other_dict)
utils.save_checkpoint_v2(d, self.exp_dir + '/checkpoint.pt', backup_weights_epoch=backup_epoch)
def resume_from_checkpoint(self, path=None, epoch=None):
""" resume the training and restoring the statistics. """
path = path or self.exp_dir
if self.args.resume_path and os.path.exists(self.args.resume_path):
path = self.args.resume_path
if os.path.exists(os.path.join(path, 'checkpoint.pt')):
res_dict = torch.load(os.path.join(path, 'checkpoint.pt'))
elif os.path.exists(path) and '.pt' in path[-10:]:
res_dict = torch.load(path)
else:
try:
res_dict = utils.load_checkpoint_v2(path, epoch=epoch)
except FileNotFoundError:
return None
if 'darts_nds' in path:
# wrapping the keys to counter the changes.
res_dict['model'] = darts_nds_map_state_dict_keys_from_non_wsbn_to_wsbn(res_dict['model'])
# reload the running status.
self.running_stats = res_dict['misc']
self.running_stats['epoch'] += 1
logging.info("=" * 80)
logging.info(f"Resume training from epoch {self.epoch}... ")
# logging.info(f"Reload to start from epoch {self.epoch}")
if res_dict:
if hasattr(self.parallel_model, 'module'):
if not any([k.startswith('module') for k in res_dict['model'].keys()]):
mk, uk = self.parallel_model.module.load_state_dict(res_dict['model'], strict=False)
else:
mk, uk = self.parallel_model.load_state_dict(res_dict['model'], strict=False)
else:
mk, uk = self.parallel_model.load_state_dict(res_dict['model'], strict=False)
logging.info('model resumed...')
if len(mk) > 0 and not self.args.debug:
import warnings
warnings.warn("Loading model state dicts error: missing keys {}".format(mk))
self.optimizer.load_state_dict(res_dict['optimizer'])
logging.info(f'optimizer resumed...')
if 'scheduler' in res_dict.keys():
self.scheduler.load_state_dict(res_dict['scheduler'])
logging.info(f'LR scheduler resumed, lr={self.scheduler.get_last_lr()[0]}')
else:
# step to the epoch
logging.info(f'LR scheduler resume to epoch number {self.epoch}')
self.scheduler.step(self.epoch)
if isinstance(self.scheduler, torch.optim.lr_scheduler.CosineAnnealingLR):
self.scheduler.T_max = max(self.args.epochs_lr, self.args.epochs)
self.scheduler.eta_min = self.args.learning_rate_min
logging.info(self.scheduler.__dict__)
else:
# import ipdb; ipdb.set_trace()
# raise NotImplementedError(f"TO support correct resume by override T_max. {self.scheduler}")
logging.warn(f'Do not set the T_max for learning rate scheduler {self.scheduler}')
if 'amp' in res_dict.keys():
self.amp.load_state_dict(res_dict['amp'])
logging.info(f'amp resume')
else:
logging.info("No model file found here. start from scratch from epoch {}".format(self.epoch))
logging.info("=" * 80)
# load eval results.
result_path = os.path.join(path, 'result.json')
if os.path.exists(result_path):
save_data = utils.load_json(result_path)
logging.info(f'loading results {save_data.keys()}.')
for k, v in save_data.items():
self.running_stats[k] = v
# process named tuple.
logging.info("resume the Rank namedtuple")
rp_dict = self.ranking_per_epoch
self.running_stats['ranking_per_epoch'] = OrderedDict()
for k, v in rp_dict.items():
self.ranking_per_epoch[int(k)] = [[i1, Rank(*i2)] for i1, i2 in v]
return res_dict
def save_duplicate_arch_pool(self, prefix, epoch):
f_pool = os.path.join(self.exp_dir, f'{prefix}_arch_pool')
f_perf = os.path.join(self.exp_dir, f'{prefix}_arch_pool.perf')
if os.path.exists(f_pool):
shutil.copy(f_pool, f_pool + '.{}'.format(epoch))
if os.path.exists(f_perf):
shutil.copy(f_perf, f_perf + '.{}'.format(epoch))
def save_arch_pool_performance(self, archs, perfs, prefix='valid'):
old_archs_sorted_indices = np.argsort(perfs)[::-1]
old_archs = [archs[i] for i in old_archs_sorted_indices]
old_archs_perf = [perfs[i] for i in old_archs_sorted_indices]
with open(os.path.join(self.exp_dir, f'{prefix}_arch_pool'), 'w') as fa_latest:
with open(os.path.join(self.exp_dir, f'{prefix}_arch_pool.perf'), 'w') as fp_latest:
for arch_id, perf in zip(old_archs, old_archs_perf):
if isinstance(arch_id, int):
arch = self.search_space.process_archname_by_id(arch_id)
else:
arch = arch_id
fa_latest.write('{}\n'.format(arch))
fp_latest.write('{}\n'.format(perf))
class CNNWarmupSearchPolicy(CNNSearchPolicy):
# def sample_topologies_by_distance(self, num_architectures):
# num_root = num_architectures // 10
# num_arch_per_root = 10 - 1
# ids = []
# distance = self.args.landmark_sample_distance
# for _ in range(num_root):
# mid, spec = self.search_space.random_topology()
# arch = NAOParsingNASBench201.parse_model_spec_to_arch(spec)
# ids.append(mid)
# for _ in range(num_arch_per_root):
# dist, counter = 0, 0
# n_spec = spec
# nid = None
# while dist <= distance and counter < 50:
# counter += 1
# nid, n_spec = self.search_space.mutate_topology(n_spec)
# n_arch = NAOParsingNASBench201.parse_model_spec_to_arch(n_spec)
# dist = hamming_distance([n_arch], [arch])
# if nid:
# logging.debug(f'sample architecture distance {dist}: ({nid}) {n_spec}')
# ids.append(nid)
# logging.debug(f'Sampling landmark by distance: {ids}')
# return list(sorted(ids))
def initialize_misc(self, mode='warmup'):
args = self.args
# initialize path and logger
if not args.continue_train:
self.sub_directory_path = mode or 'warmup'
self.exp_dir = os.path.join(self.args.main_path, self.sub_directory_path)
utils.create_exp_dir(self.exp_dir)
utils.save_json(args, self.exp_dir + '/args.json')
if self.args.visualize:
self.viz_dir_path = utils.create_viz_dir(self.exp_dir)
if self.args.tensorboard:
self.tb_dir = self.exp_dir
tboard_dir = os.path.join(self.args.tboard_dir, self.sub_directory_path)
self.writer = SummaryWriter(tboard_dir)
# Set logger and directory.
self.logger = utils.get_logger(
"train_search",
file_handler=utils.get_file_handler(os.path.join(self.exp_dir, 'log.txt')),
level=logging.INFO if not args.debug else logging.DEBUG
)
def run(self):
"""
Procedure of training. This run describes the | |
import asyncio
import functools
import collections
import locale
import threading
from contextlib import contextmanager
__all__ = (
"with_timeout",
"StreamIO",
"Throttle",
"StreamThrottle",
"ThrottleStreamIO",
"END_OF_LINE",
"DEFAULT_BLOCK_SIZE",
"wrap_with_container",
"AsyncStreamIterator",
"AbstractAsyncLister",
"AsyncListerMixin",
"async_enterable",
"DEFAULT_PORT",
"DEFAULT_USER",
"DEFAULT_PASSWORD",
"DEFAULT_ACCOUNT",
"setlocale",
)
END_OF_LINE = "\r\n"
DEFAULT_BLOCK_SIZE = 8192
DEFAULT_PORT = 21
DEFAULT_USER = "anonymous"
DEFAULT_PASSWORD = "<PASSWORD>@"
DEFAULT_ACCOUNT = ""
def _with_timeout(name):
def decorator(f):
@functools.wraps(f)
def wrapper(cls, *args, **kwargs):
coro = f(cls, *args, **kwargs)
timeout = getattr(cls, name)
return asyncio.wait_for(coro, timeout, loop=cls.loop)
return wrapper
return decorator
def with_timeout(name):
"""
Method decorator, wraps method with :py:func:`asyncio.wait_for`. `timeout`
argument takes from `name` decorator argument or "timeout".
:param name: name of timeout attribute
:type name: :py:class:`str`
:raises asyncio.TimeoutError: if coroutine does not finished in timeout
Wait for `self.timeout`
::
>>> def __init__(self, ...):
...
... self.timeout = 1
...
... @with_timeout
... async def foo(self, ...):
...
... pass
Wait for custom timeout
::
>>> def __init__(self, ...):
...
... self.foo_timeout = 1
...
... @with_timeout("foo_timeout")
... async def foo(self, ...):
...
... pass
"""
if isinstance(name, str):
return _with_timeout(name)
else:
return _with_timeout("timeout")(name)
class AsyncStreamIterator:
def __init__(self, read_coro):
self.read_coro = read_coro
async def __aiter__(self):
return self
async def __anext__(self):
data = await self.read_coro()
if data:
return data
else:
raise StopAsyncIteration
class AsyncListerMixin:
"""
Add ability to `async for` context to collect data to list via await.
::
>>> class Context(AsyncListerMixin):
... ...
>>> results = await Context(...)
"""
async def _to_list(self):
items = []
async for item in self:
items.append(item)
return items
def __await__(self):
return self._to_list().__await__()
class AbstractAsyncLister(AsyncListerMixin):
"""
Abstract context with ability to collect all iterables into
:py:class:`list` via `await` with optional timeout (via
:py:func:`aioftp.with_timeout`)
:param timeout: timeout for __aiter__, __anext__ operations
:type timeout: :py:class:`None`, :py:class:`int` or :py:class:`float`
:param loop: loop to use for timeouts
:type loop: :py:class:`asyncio.BaseEventLoop`
::
>>> class Lister(AbstractAsyncLister):
...
... @with_timeout
... async def __aiter__(self):
... ...
... @with_timeout
... async def __anext__(self):
... ...
::
>>> async for block in Lister(...):
... ...
::
>>> result = await Lister(...)
>>> result
[block, block, block, ...]
"""
def __init__(self, *, timeout=None, loop=None):
self.timeout = timeout
self.loop = loop or asyncio.get_event_loop()
@with_timeout
async def __aiter__(self):
raise NotImplementedError
@with_timeout
async def __anext__(self):
raise NotImplementedError
def async_enterable(f):
"""
Decorator. Bring coroutine result up, so it can be used as async context
::
>>> async def foo():
...
... ...
... return AsyncContextInstance(...)
...
... ctx = await foo()
... async with ctx:
...
... # do
::
>>> @async_enterable
... async def foo():
...
... ...
... return AsyncContextInstance(...)
...
... async with foo() as ctx:
...
... # do
...
... ctx = await foo()
... async with ctx:
...
... # do
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
class AsyncEnterableInstance:
async def __aenter__(self):
self.context = await f(*args, **kwargs)
return await self.context.__aenter__()
async def __aexit__(self, *args, **kwargs):
await self.context.__aexit__(*args, **kwargs)
def __await__(self):
return f(*args, **kwargs).__await__()
return AsyncEnterableInstance()
return wrapper
def wrap_with_container(o):
if isinstance(o, str):
o = (o,)
return o
class StreamIO:
"""
Stream input/output wrapper with timeout.
:param reader: stream reader
:type reader: :py:class:`asyncio.StreamReader`
:param writer: stream writer
:type writer: :py:class:`asyncio.StreamWriter`
:param timeout: socket timeout for read/write operations
:type timeout: :py:class:`int`, :py:class:`float` or :py:class:`None`
:param read_timeout: socket timeout for read operations, overrides
`timeout`
:type read_timeout: :py:class:`int`, :py:class:`float` or :py:class:`None`
:param write_timeout: socket timeout for write operations, overrides
`timeout`
:type write_timeout: :py:class:`int`, :py:class:`float` or :py:class:`None`
:param loop: loop to use for creating connection and binding with streams
:type loop: :py:class:`asyncio.BaseEventLoop`
"""
def __init__(self, reader, writer, *, timeout=None, read_timeout=None,
write_timeout=None, loop=None):
self.reader = reader
self.writer = writer
self.read_timeout = read_timeout or timeout
self.write_timeout = write_timeout or timeout
self.loop = loop or asyncio.get_event_loop()
@with_timeout("read_timeout")
async def readline(self):
"""
:py:func:`asyncio.coroutine`
Proxy for :py:meth:`asyncio.StreamReader.readline`.
"""
return await self.reader.readline()
@with_timeout("read_timeout")
async def read(self, count=-1):
"""
:py:func:`asyncio.coroutine`
Proxy for :py:meth:`asyncio.StreamReader.read`.
:param count: block size for read operation
:type count: :py:class:`int`
"""
return await self.reader.read(count)
@with_timeout("write_timeout")
async def write(self, data):
"""
:py:func:`asyncio.coroutine`
Combination of :py:meth:`asyncio.StreamWriter.write` and
:py:meth:`asyncio.StreamWriter.drain`.
:param data: data to write
:type data: :py:class:`bytes`
"""
self.writer.write(data)
await self.writer.drain()
def close(self):
"""
Close connection.
"""
self.writer.close()
class Throttle:
"""
Throttle for streams.
:param loop: loop to use
:type loop: :py:class:`asyncio.BaseEventLoop`
:param limit: speed limit in bytes or :py:class:`None` for unlimited
:type limit: :py:class:`int` or :py:class:`None`
:param reset_rate: time in seconds for «round» throttle memory (to deal
with float precision when divide)
:type reset_rate: :py:class:`int` or :py:class:`float`
"""
def __init__(self, *, loop=None, limit=None, reset_rate=10):
self.loop = loop or asyncio.get_event_loop()
self._limit = limit
self.reset_rate = reset_rate
self._start = None
self._sum = 0
async def wait(self):
"""
:py:func:`asyncio.coroutine`
Wait until can do IO
"""
if self._limit is not None and self._limit > 0 and \
self._start is not None:
now = self.loop.time()
end = self._start + self._sum / self._limit
await asyncio.sleep(max(0, end - now), loop=self.loop)
def append(self, data, start):
"""
Count `data` for throttle
:param data: bytes of data for count
:type data: :py:class:`bytes`
:param start: start of read/write time from
:py:meth:`asyncio.BaseEventLoop.time`
:type start: :py:class:`float`
"""
if self._limit is not None and self._limit > 0:
if self._start is None:
self._start = start
if start - self._start > self.reset_rate:
self._sum -= round((start - self._start) * self._limit)
self._start = start
self._sum += len(data)
@property
def limit(self):
"""
Throttle limit
"""
return self._limit
@limit.setter
def limit(self, value):
"""
Set throttle limit
:param value: bytes per second
:type value: :py:class:`int` or :py:class:`None`
"""
self._limit = value
self._start = None
self._sum = 0
def clone(self):
"""
Clone throttle without memory
"""
return Throttle(
loop=self.loop,
limit=self._limit,
reset_rate=self.reset_rate
)
def __repr__(self):
return "{}(loop={!r}, limit={!r}, reset_rate={!r})".format(
self.__class__.__name__,
self.loop,
self._limit,
self.reset_rate
)
class StreamThrottle(collections.namedtuple("StreamThrottle", "read write")):
"""
Stream throttle with `read` and `write` :py:class:`aioftp.Throttle`
:param read: stream read throttle
:type read: :py:class:`aioftp.Throttle`
:param write: stream write throttle
:type write: :py:class:`aioftp.Throttle`
"""
def clone(self):
"""
Clone throttles without memory
"""
return StreamThrottle(
read=self.read.clone(),
write=self.write.clone()
)
@classmethod
def from_limits(cls, read_speed_limit=None, write_speed_limit=None, *,
loop=None):
"""
Simple wrapper for creation :py:class:`aioftp.StreamThrottle`
:param read_speed_limit: stream read speed limit in bytes or
:py:class:`None` for unlimited
:type read_speed_limit: :py:class:`int` or :py:class:`None`
:param write_speed_limit: stream write speed limit in bytes or
:py:class:`None` for unlimited
:type write_speed_limit: :py:class:`int` or :py:class:`None`
:param loop: loop to use
:type loop: :py:class:`asyncio.BaseEventLoop`
"""
loop = loop or asyncio.get_event_loop()
return cls(
read=Throttle(
loop=loop,
limit=read_speed_limit
),
write=Throttle(
loop=loop,
limit=write_speed_limit
),
)
class ThrottleStreamIO(StreamIO):
"""
Throttled :py:class:`aioftp.StreamIO`. `ThrottleStreamIO` is subclass of
:py:class:`aioftp.StreamIO`. `throttles` attribute is dictionary of `name`:
:py:class:`aioftp.StreamThrottle` pairs
:param *args: positional arguments for :py:class:`aioftp.StreamIO`
:param **kwargs: keyword arguments for :py:class:`aioftp.StreamIO`
:param throttles: dictionary of throttles
:type throttles: :py:class:`dict` with :py:class:`aioftp.Throttle` values
::
>>> self.stream = ThrottleStreamIO(
... reader,
... writer,
... throttles={
... "main": StreamThrottle(
... read=Throttle(...),
... write=Throttle(...)
... )
... },
... timeout=timeout,
... loop=loop
... )
"""
def __init__(self, *args, throttles={}, **kwargs):
super().__init__(*args, **kwargs)
self.throttles = throttles
async def wait(self, name):
"""
:py:func:`asyncio.coroutine`
Wait for all throttles
:param name: name of throttle to acquire ("read" or "write")
:type name: :py:class:`str`
"""
waiters = []
for throttle in self.throttles.values():
curr_throttle = getattr(throttle, name)
if curr_throttle.limit:
waiters.append(curr_throttle.wait())
if waiters:
await asyncio.wait(waiters, loop=self.loop)
def append(self, name, data, start):
"""
Update timeout for all throttles
:param name: name of throttle to append to ("read" or "write")
:type name: :py:class:`str`
:param data: bytes of data for count
:type data: :py:class:`bytes`
:param start: start of read/write time from
:py:meth:`asyncio.BaseEventLoop.time`
:type start: :py:class:`float`
"""
for throttle in self.throttles.values():
getattr(throttle, name).append(data, start)
async def read(self, count=-1):
"""
:py:func:`asyncio.coroutine`
:py:meth:`aioftp.StreamIO.read` proxy
"""
await self.wait("read")
start = self.loop.time()
data = await super().read(count)
self.append("read", data, start)
return data
async def readline(self):
"""
:py:func:`asyncio.coroutine`
:py:meth:`aioftp.StreamIO.readline` proxy
"""
await self.wait("read")
start = self.loop.time()
data = await super().readline()
self.append("read", data, start)
return data
async def write(self, data):
"""
:py:func:`asyncio.coroutine`
:py:meth:`aioftp.StreamIO.write` proxy
"""
await self.wait("write")
start = self.loop.time()
await super().write(data)
self.append("write", data, start)
async def __aenter__(self):
return self
async def __aexit__(self, *args):
self.close()
def iter_by_line(self):
"""
Read/iterate stream by line.
:rtype: :py:class:`aioftp.AsyncStreamIterator`
::
>>> async for line in stream.iter_by_line():
... ...
"""
return AsyncStreamIterator(self.readline)
def iter_by_block(self, count=DEFAULT_BLOCK_SIZE):
"""
Read/iterate stream by block.
:rtype: :py:class:`aioftp.AsyncStreamIterator`
::
>>> async for | |
z_range[1], nsim )
# Draw a random redshift from a uniform distribution
self.z = uniform( low=z_range[0], high=z_range[1], size=nsim )
lightcurvelist = []
peakabsmagRlist = []
modelparamlist = []
subclasslist = []
modelindexlist = []
sourcenamelist = []
t0list = []
if sntype=='Ia':
x0list = []
x1list = []
clist = []
else :
amplitudelist = []
for isim in range(self.nsim):
# Randomly draw an sncosmo model from the available list, according to
# the predefined probability list, setting the SN sub-class for this
# simulated SN
imodel = choice( np.arange(len(modelset)), replace=True, p=self.SourceprobSet )
model = modelset[imodel]
subclass = self.SubclassSet[imodel]
z = self.z[isim]
EBV = self.EBV[isim]
Rv = self.Rv[isim]
# Set the peak absolute magnitude according to the observed
# luminosity functions, as defined in Table 3 of Graur:2014a;
# and set the host extinction according to the 'mid' dust model
# of Rodney:2014a.
if subclass == 'Ia' :
MR = normal( -19.37, 0.47 )
elif subclass == 'Ib' :
MR = normal( -17.90, 0.90 )
elif subclass == 'Ic' :
MR = normal( -18.30, 0.60 )
elif subclass == 'IIP' :
MR = normal( -16.56, 0.80 )
elif subclass == 'IIL' :
MR = normal( -17.66, 0.42 )
elif subclass == 'IIn' :
MR = normal( -18.25, 1.00 )
model.set(z=z)
model.set_source_peakabsmag( MR, 'bessellr', 'vega', cosmo=self.cosmo)
modelindexlist.append( imodel )
subclasslist.append( subclass )
peakabsmagRlist.append( MR )
sourcenamelist.append( self.SourcenameSet[imodel] )
if subclass =='Ia' :
x0 = model.get('x0')
# TODO : use bifurcated gaussians for more realistic x1,c dist'ns
x1 = normal(0., 1.)
c = normal(0., 0.1)
t0 = uniform( t0_range[0], t0_range[1] )
modelparams = {'z':z, 't0':t0, 'x0':x0, 'x1':x1, 'c':c, 'hostebv':EBV, 'hostr_v':Rv}
t0list.append( t0 )
x0list.append( x0 )
x1list.append( x1 )
clist.append( c )
t0list.append( t0 )
else :
amplitude = model.get('amplitude')
t0 = uniform( t0_range[0], t0_range[1] )
modelparams = {'z':z, 't0':t0, 'amplitude':amplitude, 'hostebv':EBV, 'hostr_v':Rv }
amplitudelist.append( amplitude )
t0list.append( t0 )
modelparamlist.append( modelparams )
# Generate one simulated SN:
snlc = sncosmo.realize_lcs(self.observations, model, [ modelparams ],
thresh=None)#, perfect=perfect )
lightcurvelist.append( snlc[0] )
self.lightcurves = lightcurvelist
self.t0 = np.array( t0list )
self.modelindex = np.array( modelindexlist )
self.sourcename = np.array( sourcenamelist )
self.subclass = np.array( subclasslist )
self.modelparam = np.array( modelparamlist )
self.peakabsmagR = np.array( peakabsmagRlist )
if sntype=='Ia':
self.x0 = np.array( x0list )
self.x1 = np.array( x1list )
self.c = np.array( clist )
else :
self.amplitude = np.array( amplitudelist )
return
def scumsum( a ):
"""
Sorted Cumulative Sum function :
Construct an array "sumabove" such that the cell at index i in sumabove
is equal to the sum of all cells from the input array "a" that have a
cell value higher than a[i]
"""
# Collapse the array into 1 dimension
sumabove = a.ravel()
# Sort the raveled array by descending cell value
iravelsorted = sumabove.argsort( axis=0 )[::-1]
# Reassign each cell to be the cumulative sum of all
# input array cells with a higher value :
sumabove[iravelsorted] = sumabove[iravelsorted].cumsum()
# Now unravel back into shape of original array and return
return( sumabove.reshape( a.shape ) )
def sncosmo_sim( snroot='nebra', z_range=[1.4,2.3], t0_range=[-20,20],
filterset='hst',nsim=1000, verbose=True, clobber=False ):
""" Run sncosmo simulations for a color-color figure for SN Nebra
"""
import os
import cPickle
simIapkl='%s_SncosmoSim_Ia.pkl'%snroot
simIIpkl='%s_SncosmoSim_II.pkl'%snroot
simIbcpkl='%s_SncosmoSim_Ibc.pkl'%snroot
if os.path.isfile( simIapkl ) and not clobber>1 :
if verbose: print("Loading Ia simulation from pickle : %s"%simIapkl)
fin = open( simIapkl, 'rb' )
simIa = cPickle.load( fin )
fin.close()
else :
if verbose: print("Running a new Ia simulation, then saving to pickle : %s"%simIapkl)
simIa = SncosmoSim( 'Ia' , z_range=z_range, t0_range=t0_range, nsim=nsim, filterset=filterset )
fout = open( simIapkl, 'wb' )
cPickle.dump( simIa, fout, protocol=-1 )
fout.close()
if os.path.isfile( simIIpkl ) and not clobber>1 :
if verbose: print("Loading II simulation from pickle : %s"%simIIpkl)
fin = open( simIIpkl, 'rb' )
simII = cPickle.load(fin)
fin.close()
else :
if verbose: print("Running a new II simulation, then saving to pickle : %s"%simIIpkl)
simII = SncosmoSim( 'II' , z_range=z_range, t0_range=t0_range, nsim=nsim, filterset=filterset )
fout = open( simIIpkl, 'wb' )
cPickle.dump( simII, fout, protocol=-1 )
fout.close()
if os.path.isfile( simIbcpkl ) and not clobber>1 :
if verbose: print("Loading Ibc simulation from pickle : %s"%simIbcpkl)
fin = open( simIbcpkl, 'rb' )
simIbc = cPickle.load(fin)
fin.close()
else :
if verbose: print("Running a new Ibc simulation, then saving to pickle : %s"%simIbcpkl)
simIbc = SncosmoSim( 'Ibc' , z_range=z_range, t0_range=t0_range, nsim=nsim, filterset=filterset )
fout = open( simIbcpkl, 'wb' )
cPickle.dump( simIbc, fout, protocol=-1 )
fout.close()
return( simIa, simII, simIbc )
def _plot_colorcolor_singlesim(snsim, band1, band2, band3,
plotstyle='points',
nbins=None, **plotargs):
""" plot a color-color diagram
:param snsim:
:return:
"""
import numpy as np
from matplotlib import pyplot as pl, cm, ticker
igood = np.where([np.all(snlc['flux']>0) for snlc in snsim.lightcurves])[0]
# ibad = np.where([np.any(snlc['flux']<=0) for snlc in snsim.lightcurves])[0]
lclist = [snsim.lightcurves[i] for i in igood]
mag = np.array([
-2.5*np.log10( np.ma.masked_less_equal(snlc['flux'],0,copy=False) )\
+ snlc['zp'] for snlc in lclist ])
flt = np.array( [snlc['band'] for snlc in lclist] )
i1 = np.where((flt == band1))
i2 = np.where((flt == band2))
i3 = np.where((flt == band3))
ax = pl.gca()
if plotstyle=='points':
plotargfinal = {'marker':'o', 'alpha':0.3, 'color':'darkorange', 'ls':' '}
plotargfinal.update( **plotargs )
ax.plot( mag[i1]-mag[i2], mag[i2]-mag[i3], **plotargfinal )
elif plotstyle.startswith('contour') or plotstyle=='gradient':
xarray = mag[i2]-mag[i3]
nsim = len(xarray)
if nbins is None : nbins = int( np.sqrt( nsim ) )
if plotstyle.startswith('contour'):
plotargfinal = {'levels':[0.0,0.68,0.95],'colors':['r','g','b'],
'ls':'-','alpha':0.5, 'extend':'neither'}
else :
plotargfinal = {'levels':np.arange(0.68,0.99,0.01),
'cmap':cm.Greys,'ls':'-', 'alpha':0.5,
'extend':'neither'}
plotargfinal.update( **plotargs )
# Plot filled contours, showing the full extent of the population,
# and contour lines containing 68% of the population.
# First, bin the points into a 2-d histogram:
# (Note that we reverse the x-y order here to get the binned arrays
# plotted in the correct direction )
count,y,x = np.histogram2d( mag[i2]-mag[i3],mag[i1]-mag[i2],
bins=nbins, range=__CONTOUR_RANGE__ )
# Renormalize relative to the sum of all SNe in this class :
count /= count.sum()
# Now set up an array 'cabove' such that the cell value in cabove[i,j]
# is equal to the sum of all cells that have a value higher than c[i,j]
cabove = scumsum( count )
# solid lines give probability contours at specified levels
# (defaults to 0.68 for "1-sigma contours")
#ax.contour( x[:-1], y[:-1], cabove, **plotargfinal )
ax.contourf( x[:-1], y[:-1], cabove, **plotargfinal )
ax.set_xlabel( '%s - %s' % (band1.upper(), band2.upper()))
ax.set_ylabel( '%s - %s' % (band2.upper(), band3.upper()))
#ax.xaxis.set_major_locator( ticker.MultipleLocator( 0.1 ) )
#ax.xaxis.set_minor_locator( ticker.MultipleLocator( 0.05 ) )
#ax.yaxis.set_major_locator( ticker.MultipleLocator( 0.1 ) )
#ax.yaxis.set_minor_locator( ticker.MultipleLocator( 0.05 ) )
return( ax )
def plotcontours( sim1, sim2, sim3=None, band1='f350lp', band2='f125w',
band3='f160w', nbins=None, **plotargs ):
""" Make a circle diagram, i.e. a med-wide band pseudo-color-color plot,
showing both Type Ia and CC simulations over the given redshift range.
:param snsim:
:return:
"""
plotargs1 = { 'levels':[0.,0.68,0.95], 'colors':[_COLOR1], 'alpha':0.3 }
plotargs1.update( **plotargs )
plotargs2 = { 'levels':[0.,0.68,0.95], 'colors':[_COLOR2], 'alpha':0.3 }
plotargs2.update( **plotargs )
plotargs3 = { 'levels':[0.,0.68,0.95], 'colors':[_COLOR3], 'alpha':0.3 }
plotargs3.update( **plotargs )
ax = _plot_colorcolor_singlesim(sim1, band1, band2, band3, nbins=nbins, plotstyle='contourf', **plotargs1)
ax = _plot_colorcolor_singlesim(sim2, band1, band2, band3, nbins=nbins, plotstyle='contourf', **plotargs2)
if sim3 is not None :
ax = _plot_colorcolor_singlesim(sim3, band1, band2, band3, nbins=nbins, plotstyle='contourf', **plotargs3)
return( ax )
def plotgradient( sim1, sim2, sim3=None, band1='f350lp', band2='f125w',
band3='f160w', nbins=None, **plotargs ):
""" Make a circle diagram, i.e. a med-wide band pseudo-color-color plot,
showing both Type Ia and CC simulations over the given redshift range.
:param snsim:
:return:
"""
plotargs1 = { 'levels':[0.,0.68,0.95], 'colors':[_COLOR1], 'alpha':0.3 }
plotargs1.update( **plotargs )
plotargs2 = { 'levels':[0.,0.68,0.95], 'colors':[_COLOR2], 'alpha':0.3 }
plotargs2.update( **plotargs )
plotargs3 = { 'levels':[0.,0.68,0.95], 'colors':[_COLOR3], 'alpha':0.3 }
plotargs3.update( **plotargs )
ax = _plot_colorcolor_singlesim(sim1, band1, band2, band3, nbins=nbins, plotstyle='gradient', **plotargs1)
ax = _plot_colorcolor_singlesim(sim2, band1, band2, band3, nbins=nbins, plotstyle='gradient', **plotargs2)
if sim3 is not None :
ax = _plot_colorcolor_singlesim(sim3, band1, band2, band3, nbins=nbins, plotstyle='gradient', **plotargs3)
return( ax )
def plotpoints(sim1, sim2, sim3=None, band1='f350lp', band2='f125w',
band3='f160w', **plotargs):
""" Make a circle diagram, i.e. a med-wide band pseudo-color-color plot,
showing both Type Ia and CC simulations over the given redshift range.
:param snsim:
:return:
"""
| |
'states', 'cells'],
outputs=['output_policy', 'states', 'cells'])
def apply_policy(self, input_image, states, cells):
h, c = self.lstm_block.apply(inputs=self.linear_to_lstm.apply(
self.shared_a3c.apply(input_image)),
states=states, cells=cells)
h = h.sum(axis=1)
c = c.sum(axis=1)
output_policy = self.policy.apply(h)
return output_policy, h, c
@application(inputs=['input_image', 'states', 'cells'],
outputs=['output_value'])
def apply_value(self, input_image, states, cells):
h, c = self.lstm_block.apply(inputs=self.linear_to_lstm.apply(
self.shared_a3c.apply(input_image)),
states=states, cells=cells)
h = h.sum(axis=1)
c = c.sum(axis=1)
output_value = self.value.apply(h)
return output_value
@application(inputs=['input_image', 'input_actions', 'input_reward',
'states', 'cells'],
outputs=['total_error'])
def cost(self, input_image, input_actions, input_reward, states, cells):
h, c = self.lstm_block.apply(inputs=self.linear_to_lstm.apply(
self.shared_a3c.apply(input_image)),
states=states, cells=cells)
h = h.sum(axis=1)
c = c.sum(axis=1)
p_value = self.policy.apply(h)
log_prob = T.log(T.sum((p_value) * input_actions,
axis=1, keepdims=True))
v_value = self.value.apply(h)
p_loss = -log_prob * theano.gradient.disconnected_grad(
input_reward[:, None] - v_value)
entropy = -T.sum(p_value * T.log(p_value), axis=1,
keepdims=True)
# encourage action diversity by substracting entropy
p_loss = p_loss - self.beta * entropy
v_loss = T.sqr(input_reward[:, None] - v_value)
total_error = T.mean(p_loss + (0.5 * v_loss))
return total_error
def build_a3c_network(feature_maps=[16, 32],
conv_sizes=[8, 4],
pool_sizes=[4, 2],
# FIXME: used image_shape elsewhere
image_size=(80, 80),
step_size=[4, 2],
num_channels=10,
mlp_hiddens=[256],
num_actions=10,
lr=0.00025,
clip_c=0.8,
border_mode='full',
async_update=False):
""" Builds the agent networks/functions
Parameters:
-----------
feature_maps : list of [int, int]
size of the filters (width, height) at each convolutional layer
conv_sizes: list of int # FIXME: change the name
num of filters at each convolutional layer
pooling sizes: list of int # FIXME: not used
size of the pooling layer. One element per convolutional layer
image_size : list of int
width and height shape of the resized image
step_size: list of int
typically called stride
num_channels : int
input channels in the first convolution layer. It is the number
of historic frames used as the input state of the agent.
mlp_hiddens: list of int
size of the output layer of the hidden layers. One element per
hidden layer.
num_actions: int
number of actions of the Actor (output of the policy network)
lr : float
learning rate of async rmsprop
clip_c : float
> 0 if gradient should be clipped. FIXME: actually not used
border_mode : str
full or valid are accepted by Blocks. Full will be usually employed.
async_update: bool
true if the network to be created is the shared worker or False if
it is just a worker.
"""
# Activation functions
conv_activations = [Rectifier() for _ in feature_maps]
mlp_activations = [Rectifier() for _ in mlp_hiddens]
conv_subsample = [[step, step] for step in step_size]
policy_and_value_net = PolicyAndValueA3C(
conv_activations,
num_channels,
image_size,
filter_sizes=zip(conv_sizes, conv_sizes),
feature_maps=feature_maps,
pooling_sizes=zip(pool_sizes, pool_sizes),
mlp_hiddens=mlp_hiddens,
number_actions=num_actions,
mlp_activations=mlp_activations,
conv_step=conv_subsample,
border_mode='full',
weights_init=Uniform(width=.2),
biases_init=Constant(.0))
# We push initialization config to set different initialization schemes
# for convolutional layers.
policy_and_value_net.shared_a3c.push_initialization_config()
policy_and_value_net.push_initialization_config()
# Xavier initialization
for i in range(len(policy_and_value_net.shared_a3c.layers)):
if i == 0:
policy_and_value_net.shared_a3c.layers[i].weights_init = Uniform(
std=1.0/np.sqrt((image_size[0] *
image_size[1] *
num_channels)))
else:
policy_and_value_net.shared_a3c.layers[i].weights_init = Uniform(
std=1.0/np.sqrt((conv_sizes[(i-1)/2] *
conv_sizes[(i-1)/2] *
feature_maps[(i-1)/2])))
policy_and_value_net.shared_a3c.layers[i].bias_init = Constant(.1)
for i in range(len(policy_and_value_net.shared_a3c.
top_mlp.linear_transformations)):
policy_and_value_net.shared_a3c.top_mlp.linear_transformations[
i].weights_init = Uniform(std=1.0/np.sqrt((conv_sizes[-1] *
conv_sizes[-1] *
feature_maps[-1])))
policy_and_value_net.shared_a3c.top_mlp.linear_transformations[
i].bias_init = Constant(.0)
policy_and_value_net.policy.weights_init = Uniform(
std=1.0/np.sqrt(mlp_hiddens[-1]))
policy_and_value_net.value.weights_init = Uniform(
std=1.0/np.sqrt(mlp_hiddens[-1]))
policy_and_value_net.shared_a3c.initialize()
policy_and_value_net.initialize()
logging.info("Input dim: {} {} {}".format(
*policy_and_value_net.shared_a3c.children[0].get_dim('input_')))
for i, layer in enumerate(policy_and_value_net.shared_a3c.layers):
if isinstance(layer, Activation):
logging.info("Layer {} ({})".format(
i, layer.__class__.__name__))
else:
logging.info("Layer {} ({}) dim: {} {} {}".format(
i, layer.__class__.__name__, *layer.get_dim('output')))
th_input_image = T.tensor4('input_image')
th_reward = T.fvector('input_reward')
th_actions = T.imatrix('input_actions')
policy_network = policy_and_value_net.apply_policy(th_input_image)
value_network = policy_and_value_net.apply_value(th_input_image)
cost_network = policy_and_value_net.cost(th_input_image, th_actions,
th_reward)
# FIXME: added for debug, remove
extracost_network = policy_and_value_net.extra_cost(th_input_image,
th_actions,
th_reward) # DEBUG
cg_policy = ComputationGraph(policy_network)
cg_value = ComputationGraph(value_network)
# Perform some optimization step
cg = ComputationGraph(cost_network)
# FIXME: Remove
cg_extra = ComputationGraph(extracost_network) # DEBUG
# Print shapes of network parameters
shapes = [param.get_value().shape for param in cg.parameters]
logger.info("Parameter shapes: ")
for shape, count in Counter(shapes).most_common():
logger.info(' {:15}: {}'.format(shape, count))
logger.info("Total number of parameters: {}".format(len(shapes)))
# Set up training algorithm
logger.info("Initializing training algorithm")
cost_model = Model(cost_network)
value_model = Model(value_network)
if not async_update:
# A threaded worker: steep gradient descent
# A trick was done here to reuse existent bricks. The system performed
# steepest descent to aggregate the gradients. However, the gradients
# are averaged in a minibatch (instead of being just added). Therefore,
# the agent is going to perform the following operations in each
# minibatch:
# 1) steepes descent with learning rate of 1 to only aggregate the
# gradients.
# 2) undo the update operation to obtain the avg. gradient :
# gradient = parameter_before_minibatch - parameter_after_minibatch
# 3) Multiply the gradient by the length of the minibatch to obtain the
# exact gradient at each minibatch.
algorithm = GradientDescent(
cost=cost_network, parameters=cg.parameters,
step_rule=Scale())
else:
# Async update for the shared worker
# The other part of the trick. A custom optimization block was
# developed
# here to receive as inputs the acc. gradients at each worker
algorithm = AsyncUpdate(parameters=cg.parameters,
inputs=cost_model.get_parameter_dict().keys(),
step_rule=AsyncRMSProp(learning_rate=lr,
# FIXME: put as
# parameter
decay_rate=0.99,
max_scaling=10))
algorithm.initialize()
f_cost = theano.function(inputs=cg.inputs, outputs=cg.outputs)
f_policy = theano.function(inputs=cg_policy.inputs,
outputs=cg_policy.outputs)
f_value = theano.function(inputs=cg_value.inputs, outputs=cg_value.outputs)
# f_extracost = theano.function(inputs=cg_extra.inputs,
# outputs=cg_extra.outputs)
return cost_model, f_policy, f_value, algorithm, f_cost
def build_a3c_network_lstm(feature_maps=[16, 32],
conv_sizes=[8, 4],
pool_sizes=[4, 2],
# FIXME: used image_shape elsewhere
image_size=(80, 80),
step_size=[4, 2],
num_channels=10,
mlp_hiddens=[256],
lstm_output_units=256,
num_actions=10,
lr=0.00025,
clip_c=0.8,
border_mode='full',
async_update=False):
""" Builds the agent networks/functions
Parameters:
-----------
feature_maps : list of [int, int]
size of the filters (width, height) at each convolutional layer
conv_sizes: list of int # FIXME: change the name
num of filters at each convolutional layer
pooling sizes: list of int # FIXME: not used
size of the pooling layer. One element per convolutional layer
image_size : list of int
width and height shape of the resized image
step_size: list of int
typically called stride
num_channels : int
input channels in the first convolution layer. It is the number
of historic frames used as the input state of the agent.
mlp_hiddens: list of int
size of the output layer of the hidden layers. One element per
hidden layer.
lstm_output_units: int
number of units in the lstm output
num_actions: int
number of actions of the Actor (output of the policy network)
lr : float
learning rate of async rmsprop
clip_c : float
> 0 if gradient should be clipped. FIXME: actually not used
border_mode : str
full or valid are accepted by Blocks. Full will be usually employed.
async_update: bool
true if the network to be created is the shared worker or False if
it is just a worker.
"""
# Activation functions
conv_activations = [Rectifier() for _ in feature_maps]
mlp_activations = [Rectifier() for _ in mlp_hiddens]
conv_subsample = [[step, step] for step in step_size]
policy_and_value_net = PolicyAndValueA3CLSTM(
conv_activations,
num_channels,
image_size,
filter_sizes=zip(conv_sizes, conv_sizes),
feature_maps=feature_maps,
pooling_sizes=zip(pool_sizes, pool_sizes),
mlp_hiddens=mlp_hiddens,
lstm_output_units=lstm_output_units,
number_actions=num_actions,
mlp_activations=mlp_activations,
conv_step=conv_subsample,
border_mode='full',
weights_init=Uniform(width=.2),
biases_init=Constant(.0))
# We push initialization config to set different initialization schemes
# for convolutional layers.
policy_and_value_net.shared_a3c.push_initialization_config()
policy_and_value_net.push_initialization_config()
# Xavier initialization
for i in range(len(policy_and_value_net.shared_a3c.layers)):
if i == 0:
policy_and_value_net.shared_a3c.layers[i].weights_init = Uniform(
std=1.0/np.sqrt((image_size[0] *
image_size[1] *
num_channels)))
else:
policy_and_value_net.shared_a3c.layers[i].weights_init = Uniform(
std=1.0/np.sqrt((conv_sizes[(i-1)/2] *
conv_sizes[(i-1)/2] *
feature_maps[(i-1)/2])))
policy_and_value_net.shared_a3c.layers[i].bias_init = Constant(.1)
for i in range(len(policy_and_value_net.shared_a3c.
top_mlp.linear_transformations)):
policy_and_value_net.shared_a3c.top_mlp.linear_transformations[
i].weights_init = Uniform(std=1.0/np.sqrt((conv_sizes[-1] *
conv_sizes[-1] *
feature_maps[-1])))
policy_and_value_net.shared_a3c.top_mlp.linear_transformations[
i].bias_init = Constant(.0)
policy_and_value_net.linear_to_lstm.weights_init = Uniform(
std=1.0/np.sqrt(mlp_hiddens[-1]))
policy_and_value_net.linear_to_lstm.biases_init = Constant(.0)
policy_and_value_net.linear_to_lstm.initialize()
policy_and_value_net.lstm_block.weights_init = Uniform(
std=1.0/np.sqrt(mlp_hiddens[-1]))
policy_and_value_net.lstm_block.biases_init = Constant(.0)
policy_and_value_net.lstm_block.initialize()
policy_and_value_net.policy.weights_init = Uniform(
std=1.0/np.sqrt(lstm_output_units))
policy_and_value_net.value.weights_init = Uniform(
std=1.0/np.sqrt(lstm_output_units))
policy_and_value_net.shared_a3c.initialize()
policy_and_value_net.initialize()
logging.info("Input dim: {} {} {}".format(
*policy_and_value_net.shared_a3c.children[0].get_dim('input_')))
for i, layer in enumerate(policy_and_value_net.shared_a3c.layers):
if isinstance(layer, Activation):
logging.info("Layer {} ({})".format(
i, layer.__class__.__name__))
else:
logging.info("Layer {} ({}) dim: {} {} {}".format(
i, layer.__class__.__name__, *layer.get_dim('output')))
th_input_image = T.tensor4('input_image')
th_reward = T.fvector('input_reward')
th_actions = T.imatrix('input_actions')
th_states = T.matrix('states')
th_cells = T.matrix('cells')
policy_network = policy_and_value_net.apply_policy(th_input_image,
th_states,
th_cells)
value_network = policy_and_value_net.apply_value(th_input_image,
th_states,
th_cells)
cost_network = policy_and_value_net.cost(th_input_image, th_actions,
th_reward, th_states,
th_cells)
cg_policy = ComputationGraph(policy_network)
cg_value = ComputationGraph(value_network)
print "POLICY INPUTS ", cg_policy.inputs
print "VALUE INPUTS ", cg_value.inputs
print "POLICY OUTPUTS ", cg_policy.outputs
print "VALUE OUTPUTS ", cg_value.outputs
# Perform some optimization step
cg = ComputationGraph(cost_network)
# Print shapes of network parameters
shapes = [param.get_value().shape for param in cg.parameters]
logger.info("Parameter shapes: ")
for shape, count in Counter(shapes).most_common():
logger.info(' {:15}: {}'.format(shape, count))
logger.info("Total number of parameters: {}".format(len(shapes)))
# Set up | |
<filename>tensorflow_model_analysis/view/util.py<gh_stars>0
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""View API for Tensorflow Model Analysis."""
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import json
import os
from typing import Any, Dict, List, Optional, Text, Tuple, Union
from absl import logging
from tensorflow_model_analysis import config
from tensorflow_model_analysis.metrics import example_count
from tensorflow_model_analysis.metrics import metric_types
from tensorflow_model_analysis.metrics import weighted_example_count
from tensorflow_model_analysis.post_export_metrics import metric_keys
from tensorflow_model_analysis.proto import metrics_for_slice_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.view import view_types
from google.protobuf import json_format
def get_slicing_metrics(
results: List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]],
slicing_column: Optional[Text] = None,
slicing_spec: Optional[slicer.SingleSliceSpec] = None,
) -> List[Dict[Text, Union[Dict[Text, Any], Text]]]:
"""Util function that extracts slicing metrics from the results.
If neither slicing_column nor slicing_spec is provided, get Overall. If
slicing_column is set, use it to filter metrics from results. Otherwise, use
slicing_spec for filtering.
Args:
results: A list of records. Each record is a tuple of (slice_name,
{output_name: {sub_key: {metric_name, metric_value}}}).
slicing_column: The column to filter the resuslts with.
slicing_spec: The slicer.SingleSliceSpec to filter the resutls with.
Returns:
A list of {slice, metrics}
Raises:
ValueError: The provided slicing_column does not exist in results or more
than one set of overall result is found.
"""
if slicing_column:
data = find_all_slices(results,
slicer.SingleSliceSpec(columns=[slicing_column]))
elif not slicing_spec:
data = find_all_slices(results, slicer.SingleSliceSpec())
else:
data = find_all_slices(results, slicing_spec)
slice_count = len(data)
if not slice_count:
if not slicing_spec:
if not slicing_column:
slicing_column = slicer.OVERALL_SLICE_NAME
raise ValueError('No slices found for %s' % slicing_column)
else:
raise ValueError('No slices found for %s' % slicing_spec)
elif not slicing_column and not slicing_spec and slice_count > 1:
raise ValueError('More than one slice found for %s' %
slicer.OVERALL_SLICE_NAME)
else:
return data
def find_all_slices(
results: List[Tuple[slicer.SliceKeyType,
Dict[Text, Any]]], slicing_spec: slicer.SingleSliceSpec
) -> List[Dict[Text, Union[Dict[Text, Any], Text]]]:
"""Util function that extracts slicing metrics for the named column.
Args:
results: A list of records. Each record is a tuple of (slice_name,
{metric_name, metric_value}).
slicing_spec: The spec to slice on.
Returns:
A list of {slice, metrics}
"""
data = []
for (slice_key, metric_value) in results:
if slicing_spec.is_slice_applicable(slice_key):
data.append({
'slice': slicer.stringify_slice_key(slice_key),
'metrics': metric_value
})
return data # pytype: disable=bad-return-type
def get_time_series(
results: view_types.EvalResults, slicing_spec: slicer.SingleSliceSpec,
display_full_path: bool
) -> List[Dict[Text, Union[Dict[Union[float, Text], Any], Text]]]:
"""Util function that extracts time series data for the specified slice.
Args:
results: A collection of EvalResult whose metrics should be visualized in a
time series.
slicing_spec: The spec specifying the slice to show in the time series.
display_full_path: Whether to display the full path or just the file name.
Returns:
A list of dictionaries, where each dictionary contains the config and the
metrics for the specified slice for a single eval run.
Raises:
ValueError: if the given slice spec matches more than one slice for any eval
run in results or if the slicing spec matches nothing in all eval runs.
"""
data = []
for result in results.get_results():
matching_slices = find_all_slices(result.slicing_metrics, slicing_spec)
slice_count = len(matching_slices)
if slice_count == 1:
data.append({
'metrics': matching_slices[0]['metrics'],
'config': {
'modelIdentifier':
_get_identifier(result.model_location, display_full_path),
'dataIdentifier':
_get_identifier(result.data_location, display_full_path),
}
})
elif slice_count > 1:
raise ValueError('Given slice spec matches more than one slice.')
run_count = len(data)
if not run_count:
raise ValueError('Given slice spec has no matches in any eval run.')
return data # pytype: disable=bad-return-type
def _get_identifier(path: Text, use_full_path: bool) -> Text:
""""Returns the desired identifier based on the path to the file.
Args:
path: The full path to the file.
use_full_path: Whether to use the full path or just the file name as the
identifier.
Returns:
A string containing the identifier
"""
return path if use_full_path else os.path.basename(path)
# Passing the keys from python means that it is possible to reuse the plot UI
# with other data by overwriting the config on python side.
_SUPPORTED_PLOT_KEYS = {
'calibrationPlot': {
'metricName': 'calibrationHistogramBuckets',
'dataSeries': 'buckets',
},
'confusionMatrixPlot': {
'metricName': 'confusionMatrixAtThresholds',
'dataSeries': 'matrices',
},
'multiClassConfusionMatrixPlot': {
'metricName': 'multiClassConfusionMatrixAtThresholds',
'dataSeries': 'matrices',
},
'multiLabelConfusionMatrixPlot': {
'metricName': 'multiLabelConfusionMatrixAtThresholds',
'dataSeries': 'matrices',
}
}
def _replace_nan_with_none(
plot_data: Union[Dict[Text, Any], Text],
plot_keys: Dict[Text, Dict[Text, Text]]) -> Union[Dict[Text, Any], Text]:
"""Replaces all instances of nan with None in plot data.
This is necessary for Colab integration where we serializes the data into json
string as NaN is not supported by json standard. Turning nan into None will
make the value null once parsed. The visualization already handles falsy
values by setting them to zero.
Args:
plot_data: The original plot data
plot_keys: A dictionary containing field names of plot data.
Returns:
Transformed plot data where all nan has been replaced with None.
"""
output_metrics = {}
for plot_type in plot_keys:
metric_name = plot_keys[plot_type]['metricName']
if metric_name in plot_data:
data_series_name = plot_keys[plot_type]['dataSeries']
if data_series_name in plot_data[metric_name]:
data_series = plot_data[metric_name][data_series_name]
outputs = []
for entry in data_series:
output = {}
for key in entry:
value = entry[key]
# When converting protocol buffer into dict, float value nan is
# automatically converted into the string 'NaN'.
output[key] = None if value == 'NaN' else value
outputs.append(output)
output_metrics[metric_name] = {data_series_name: outputs}
return output_metrics
def get_plot_data_and_config(
results: List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]],
slicing_spec: slicer.SingleSliceSpec,
output_name: Optional[Text] = None,
class_id: Optional[int] = None,
top_k: Optional[int] = None,
k: Optional[int] = None,
label: Optional[Text] = None,
) -> Tuple[Union[Dict[Text, Any], Text], Dict[Text, Union[Dict[Text, Dict[
Text, Text]], Text]]]:
"""Util function that extracts plot for a particular slice from the results.
Args:
results: A list of records. Each record is a tuple of (slice_name,
{metric_name, metric_value}).
slicing_spec: The slicer.SingleSliceSpec to identify the slice to fetch plot
for.
output_name: The name of the output.
class_id: An int representing the class id if model is multi-class.
top_k: The k used to compute prediction in the top k position.
k: The k used to compute prediciton at the kth position.
label: A partial label used to match a set of plots in the results. This is
kept for backward compatibility.
Returns:
(plot_data, plot_config) for the specified slice.
Note that plot_data should be of type Dict[Text, Any]. However, PyType
can't figure it out. As a result, the annotation has to be
Union[Dict[Text, Any], Text].
Raises:
ValueError: The provided slicing_column does not exist in results or more
than one result is found; or there is not plot data available; or there are
multiple sets of plot while label is not provided; or label matches to more
than one set of plots; or label does not match any set of plots.
"""
if label is not None and (output_name is not None or class_id is not None or
top_k is not None or k is not None):
# Plot key (specified by output_name and class_id / top k / k) and label (
# for backward compatibiility only) should not be provided together.
raise ValueError('Do not specify both label and output_name / class_id')
sub_key_oneof_check = 0
sub_key_id = None
if class_id is not None:
sub_key_oneof_check = sub_key_oneof_check + 1
sub_key_id = 'classId:' + str(class_id)
if top_k is not None:
sub_key_oneof_check = sub_key_oneof_check + 1
sub_key_id = 'topK:' + str(top_k)
if k is not None:
sub_key_oneof_check = sub_key_oneof_check + 1
sub_key_id = 'k:' + str(k)
if sub_key_oneof_check > 1:
raise ValueError('Up to one of class_id, top_k and k can be provided.')
output_name = '' if output_name is None else output_name
matching_slices = find_all_slices(results, slicing_spec)
count = len(matching_slices)
if count == 0:
raise ValueError('No slice matching slicing spec is found.')
elif count > 1:
raise ValueError('More than one slice matching slicing spec is found.')
target_slice = matching_slices[0]
plot_config = {
'sliceName': target_slice['slice'],
'metricKeys': _SUPPORTED_PLOT_KEYS,
}
if output_name not in target_slice['metrics']:
if output_name:
raise ValueError('No plot data found for | |
E501
:return: The os_api_level of this Device. # noqa: E501
:rtype: integer
"""
return self._os_api_level
@os_api_level.setter
def os_api_level(self, os_api_level):
"""Sets the os_api_level of this Device.
API level when applicable like in Android (example: 15).
# noqa: E501
:param os_api_level: The os_api_level of this Device. # noqa: E501
:type: integer
"""
self._os_api_level = os_api_level
@property
def locale(self):
"""Gets the locale of this Device. # noqa: E501
Language code (example: en_US).
# noqa: E501
:return: The locale of this Device. # noqa: E501
:rtype: string
"""
return self._locale
@locale.setter
def locale(self, locale):
"""Sets the locale of this Device.
Language code (example: en_US).
# noqa: E501
:param locale: The locale of this Device. # noqa: E501
:type: string
"""
if locale is None:
raise ValueError("Invalid value for `locale`, must not be `None`") # noqa: E501
self._locale = locale
@property
def time_zone_offset(self):
"""Gets the time_zone_offset of this Device. # noqa: E501
The offset in minutes from UTC for the device time zone, including daylight savings time.
# noqa: E501
:return: The time_zone_offset of this Device. # noqa: E501
:rtype: integer
"""
return self._time_zone_offset
@time_zone_offset.setter
def time_zone_offset(self, time_zone_offset):
"""Sets the time_zone_offset of this Device.
The offset in minutes from UTC for the device time zone, including daylight savings time.
# noqa: E501
:param time_zone_offset: The time_zone_offset of this Device. # noqa: E501
:type: integer
"""
if time_zone_offset is None:
raise ValueError("Invalid value for `time_zone_offset`, must not be `None`") # noqa: E501
self._time_zone_offset = time_zone_offset
@property
def screen_size(self):
"""Gets the screen_size of this Device. # noqa: E501
Screen size of the device in pixels (example: 640x480).
# noqa: E501
:return: The screen_size of this Device. # noqa: E501
:rtype: string
"""
return self._screen_size
@screen_size.setter
def screen_size(self, screen_size):
"""Sets the screen_size of this Device.
Screen size of the device in pixels (example: 640x480).
# noqa: E501
:param screen_size: The screen_size of this Device. # noqa: E501
:type: string
"""
self._screen_size = screen_size
@property
def app_version(self):
"""Gets the app_version of this Device. # noqa: E501
Application version name, e.g. 1.1.0
# noqa: E501
:return: The app_version of this Device. # noqa: E501
:rtype: string
"""
return self._app_version
@app_version.setter
def app_version(self, app_version):
"""Sets the app_version of this Device.
Application version name, e.g. 1.1.0
# noqa: E501
:param app_version: The app_version of this Device. # noqa: E501
:type: string
"""
if app_version is None:
raise ValueError("Invalid value for `app_version`, must not be `None`") # noqa: E501
self._app_version = app_version
@property
def carrier_name(self):
"""Gets the carrier_name of this Device. # noqa: E501
Carrier name (for mobile devices).
# noqa: E501
:return: The carrier_name of this Device. # noqa: E501
:rtype: string
"""
return self._carrier_name
@carrier_name.setter
def carrier_name(self, carrier_name):
"""Sets the carrier_name of this Device.
Carrier name (for mobile devices).
# noqa: E501
:param carrier_name: The carrier_name of this Device. # noqa: E501
:type: string
"""
self._carrier_name = carrier_name
@property
def carrier_code(self):
"""Gets the carrier_code of this Device. # noqa: E501
Carrier country code (for mobile devices).
# noqa: E501
:return: The carrier_code of this Device. # noqa: E501
:rtype: string
"""
return self._carrier_code
@carrier_code.setter
def carrier_code(self, carrier_code):
"""Sets the carrier_code of this Device.
Carrier country code (for mobile devices).
# noqa: E501
:param carrier_code: The carrier_code of this Device. # noqa: E501
:type: string
"""
self._carrier_code = carrier_code
@property
def carrier_country(self):
"""Gets the carrier_country of this Device. # noqa: E501
Carrier country.
# noqa: E501
:return: The carrier_country of this Device. # noqa: E501
:rtype: string
"""
return self._carrier_country
@carrier_country.setter
def carrier_country(self, carrier_country):
"""Sets the carrier_country of this Device.
Carrier country.
# noqa: E501
:param carrier_country: The carrier_country of this Device. # noqa: E501
:type: string
"""
self._carrier_country = carrier_country
@property
def app_build(self):
"""Gets the app_build of this Device. # noqa: E501
The app's build number, e.g. 42.
# noqa: E501
:return: The app_build of this Device. # noqa: E501
:rtype: string
"""
return self._app_build
@app_build.setter
def app_build(self, app_build):
"""Sets the app_build of this Device.
The app's build number, e.g. 42.
# noqa: E501
:param app_build: The app_build of this Device. # noqa: E501
:type: string
"""
if app_build is None:
raise ValueError("Invalid value for `app_build`, must not be `None`") # noqa: E501
self._app_build = app_build
@property
def app_namespace(self):
"""Gets the app_namespace of this Device. # noqa: E501
The bundle identifier, package identifier, or namespace, depending on what the individual plattforms use, .e.g com.microsoft.example.
# noqa: E501
:return: The app_namespace of this Device. # noqa: E501
:rtype: string
"""
return self._app_namespace
@app_namespace.setter
def app_namespace(self, app_namespace):
"""Sets the app_namespace of this Device.
The bundle identifier, package identifier, or namespace, depending on what the individual plattforms use, .e.g com.microsoft.example.
# noqa: E501
:param app_namespace: The app_namespace of this Device. # noqa: E501
:type: string
"""
self._app_namespace = app_namespace
@property
def live_update_release_label(self):
"""Gets the live_update_release_label of this Device. # noqa: E501
Label that is used to identify application code 'version' released via Live Update beacon running on device
# noqa: E501
:return: The live_update_release_label of this Device. # noqa: E501
:rtype: string
"""
return self._live_update_release_label
@live_update_release_label.setter
def live_update_release_label(self, live_update_release_label):
"""Sets the live_update_release_label of this Device.
Label that is used to identify application code 'version' released via Live Update beacon running on device
# noqa: E501
:param live_update_release_label: The live_update_release_label of this Device. # noqa: E501
:type: string
"""
self._live_update_release_label = live_update_release_label
@property
def live_update_deployment_key(self):
"""Gets the live_update_deployment_key of this Device. # noqa: E501
Identifier of environment that current application release belongs to, deployment key then maps to environment like Production, Staging.
# noqa: E501
:return: The live_update_deployment_key of this Device. # noqa: E501
:rtype: string
"""
return self._live_update_deployment_key
@live_update_deployment_key.setter
def live_update_deployment_key(self, live_update_deployment_key):
"""Sets the live_update_deployment_key of this Device.
Identifier of environment that current application release belongs to, deployment key then maps to environment like Production, Staging.
# noqa: E501
:param live_update_deployment_key: The live_update_deployment_key of this Device. # noqa: E501
:type: string
"""
self._live_update_deployment_key = live_update_deployment_key
@property
def live_update_package_hash(self):
"""Gets the live_update_package_hash of this Device. # noqa: E501
Hash of all files (ReactNative or Cordova) deployed to device via LiveUpdate beacon. Helps identify the Release version on device or need to download updates in future.
# noqa: E501
:return: The live_update_package_hash of this Device. # noqa: E501
:rtype: string
"""
return self._live_update_package_hash
@live_update_package_hash.setter
def live_update_package_hash(self, live_update_package_hash):
"""Sets the live_update_package_hash of this Device.
Hash of all files (ReactNative or Cordova) deployed to device via LiveUpdate beacon. Helps identify the Release version on device or need to download updates in future.
# noqa: E501
:param live_update_package_hash: The live_update_package_hash of this Device. # noqa: E501
:type: string
"""
self._live_update_package_hash = live_update_package_hash
@property
def wrapper_runtime_version(self):
"""Gets the wrapper_runtime_version of this Device. # noqa: E501
Version of the wrapper technology framework (Xamarin runtime version or ReactNative or Cordova etc...). See wrapper_sdk_name to see if this version refers to Xamarin or ReactNative or other.
# noqa: E501
:return: The wrapper_runtime_version of this Device. # noqa: E501
:rtype: string
"""
return self._wrapper_runtime_version
@wrapper_runtime_version.setter
def wrapper_runtime_version(self, wrapper_runtime_version):
"""Sets the wrapper_runtime_version of this Device.
Version of the wrapper technology framework (Xamarin runtime version or ReactNative or Cordova etc...). See wrapper_sdk_name to see if this version refers to Xamarin or ReactNative or other.
# noqa: E501
:param wrapper_runtime_version: The wrapper_runtime_version of this Device. # noqa: E501
:type: string
"""
self._wrapper_runtime_version = wrapper_runtime_version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects | |
def pid_query
def query_changes (self, * filter, ** kw) :
"""Return changes matching `filter` and `kw`"""
return self.ems.changes (* filter, ** kw)
# end def query_changes
@TFL.Meta.Lazy_Method_RLV
def r_incorrect (self, gauge = Gauge_Logger (), eiter = None) :
"""Returns all objects which are region-wise incorrect (i.e., violating
the object's `region` predicates).
"""
with self.as_active () :
return self._check_inv (gauge, "region", eiter)
# end def i_incorrect
def record_change (self, Change, * args, ** kw) :
"""Record the `Change` specified by `args` and `kw`"""
with self.ems.save_point () :
result = self.historian.record (Change, * args, ** kw)
if result is not None :
result.user = self.user
self.ems.register_change (result)
result.do_callbacks (self)
return result
# end def record_change
def remove (self, entity) :
"""Remove `entity` from scope `self`"""
assert (entity != self.root)
Change = MOM.SCM.Change.Destroy
with self.nested_change_recorder (Change, entity) :
entity._destroy ()
self.ems.remove (entity)
# end def remove
def rename (self, entity, new_epk, renamer) :
self.ems.rename (entity, new_epk, renamer)
# end def rename
def rollback (self, keep_zombies = False) :
"""Rollback and discard the outstanding changes."""
self.ems.rollback (keep_zombies)
self.count_change ()
# end def rollback
def rollback_pending_change (self) :
"""Rollback the last, not yet recorded, change but keep all earlier
outstanding changes.
"""
changes = tuple (self.uncommitted_changes.changes)
self.rollback (keep_zombies = True)
for c in changes :
c.redo (self)
# end def rollback_pending_change
def start_change_recorder (self) :
if not self.historian._rec_stack :
self.historian.push_recorder (MOM.SCM.Tracker.Preferred_Recorder)
# end def start_change_recorder
def stop_change_recorder (self) :
if self.historian._rec_stack :
self.historian.pop_recorder ()
# end def stop_change_recorder
@TFL.Contextmanager
def temp_change_recorder (self, Recorder) :
with self.historian.temp_recorder (Recorder) :
yield
# end def temp_change_recorder
def user_diff (self, other, ignore = ()) :
"""Return differences of entities `self` and `other` concerning user attributes."""
result = {}
seen = set ()
def diff (lhs, rhs) :
for e in lhs :
k = e.epk_raw
t = e.type_name
if k not in seen :
seen.add (k)
o = rhs [t].instance (* k, raw = True)
if o is None :
diff = "Present in %s, missing in %s" % (lhs, rhs)
else :
diff = e.user_diff (o, ignore)
if diff :
result [(t, k)] = diff
diff (self, other)
diff (other, self)
return result
# end def user_diff
def user_equal (self, other) :
"""Compare entities of `self` and `other` regarding user attributes."""
s_count = self.ems.count (self.MOM.Id_Entity.E_Type, strict = False)
o_count = other.ems.count (other.MOM.Id_Entity.E_Type, strict = False)
if s_count == o_count :
for e in self :
o = other [e.type_name].instance (* e.epk_raw, raw = True)
if not (o and e.user_equal (o)) :
break
else :
return True
return False
# end def user_equal
def _check_inv (self, gauge, kind, eiter = None) :
err_result = []
wrn_result = []
sk = self.MOM.Id_Entity.sort_key
if eiter is None :
eiter = self.entity_iter_gauge \
(gauge, label = "Checking %s invariants" % kind)
for e in eiter :
try :
ews = e._pred_man.check_kind (kind, e)
if ews.errors :
err_result.append (e)
if ews.warnings :
wrn_result.append (e)
except Exception :
print \
( "Error during evaluation of", kind, "invariant for ", e
, file = sys.stderr
)
traceback.print_exc ()
err_result.append (e)
return MOM.Pred.Err_and_Warn_List \
(sorted (err_result, key = sk), sorted (wrn_result, key = sk))
# end def _check_inv
def _get_etm (self, name) :
try :
result = self._etm [name]
except KeyError :
pn, _, rest = split_hst (name, ".")
try :
result = self._pkg_ns [pn]
except KeyError :
raise AttributeError (name)
for k in rest.split (".") :
result = getattr (result, k)
self._etm [name] = result
return result
# end def _get_etm
def _new_guid (self) :
return str (uuid.uuid4 ())
# end def _new_guid
def _new_id (self) :
result = Scope.__id
Scope.__id += 1
return result
# end def _new_id
def _outer_pgk_ns (self, outer, pns, _pkg_ns) :
while True :
outer, _, name = rsplit_hst (outer, ".")
if (not outer) or outer in _pkg_ns :
break
pns = pns._Outer
yield outer, pns
# end def _outer_pgk_ns
def _register_root (self, root) :
if root is not None :
if self.root is None :
self.root = self._roots [root.type_base_name] = root
self.root_pid = root.pid
self.bname = root.ui_display
else :
raise TypeError ("Root was already set to %r" % (self.root, ))
# end def _register_root
def _run_init_callbacks (self) :
for c in self.init_callback :
c (self)
self.app_type.run_init_callbacks (self)
# end def _run_init_callbacks
def _setup_pkg_ns (self, app_type) :
_pkg_ns = self._pkg_ns = {}
Pkg_NS = self.Pkg_NS
for name, pns in sorted \
(pyk.iteritems (app_type.PNS_Map), key = TFL.Getter [0]) :
_pkg_ns [name] = Pkg_NS (self, pns, name)
for outer, pns in self._outer_pgk_ns (name, pns, _pkg_ns):
_pkg_ns [outer] = Pkg_NS (self, pns, outer)
# end def _setup_pkg_ns
def _setup_root (self, app_type, root_spec) :
RT = self.Root_Type
if root_spec and RT :
if callable (root_spec) :
result = root_spec (self)
if not isinstance (result, RT.Essence) :
raise TypeError \
( "%s returned %s %r, expected %s"
% (root_spec, result.__class__, result, RT)
)
else :
result = RT (* root_spec)
self._register_root (result)
return result
# end def _setup_root
def __getattr__ (self, name) :
if name.startswith ("__") and name.endswith ("__") :
### Placate inspect.unwrap of Python 3.5,
### which accesses `__wrapped__` and eventually throws `ValueError`
return getattr (self.__super, name)
if "." in name :
if name in self._etm :
return self._etm [name]
else :
return self._get_etm (name)
else :
for dict in self._roots, self._pkg_ns :
try :
result = dict [name]
except KeyError :
pass
else :
setattr (self, name, result)
return result
return getattr (self.app_type, name)
# end def __getattr__
def __getitem__ (self, name) :
if not isinstance (name, pyk.string_types) :
name = name.type_name
try :
return self._get_etm (name)
except AttributeError :
raise KeyError (name)
# end def __getitem__
def __iter__ (self) :
"""Generate all essential instances stored in database"""
return iter (self.ems)
# end def __iter__
def __str__ (self) :
url = self._cleaned_url (str (self.db_url))
return "%s %s<%s>" % (self.__class__.__name__, self.bname, url)
# end def __str__
# end class Scope
atexit.register (Scope.destroy_all)
### «text» ### start of documentation
__doc__ = """
.. class:: Scope
`MOM.Scope` maps the object model of a specific derived
:class:`~_MOM.App_Type.App_Type` to a concrete database storing
instances of the essential objects and links.
`Scope` instances cannot be created by just calling the `Scope` class,
like normal Python types. Instead, :meth:`load` and :meth:`new` create
scopes connected to existing or newly created databases, respectively.
For each package namespace defining essential object types, `Scope`
provides an attribute with the name of the package namespace. That
attribute lets one access all essential types of the package namespace.
For instance, if the scope contains a package namespace ``PAP``, one can
access ``scope.PAP.Person`` or ``scope.PAP.Phone``. Each attribute of
``scope.PAP`` refers to the :class:`~_MOM.E_Type_Manager.Object` or
:class:`~_MOM.E_Type_Manager.Link` instance of the corresponding essential
type.
**`Scope` provides the attributes:**
.. attribute:: app_type
The derived app_type of the scope.
.. attribute:: changes
The number of changes up to now.
.. attribute:: changes_to_save
The number of outstanding changes waiting to be commited (or
rollbacked).
.. attribute:: db_meta_data
Meta data about the scope and its database.
.. attribute:: db_url
The URL of the database the scope is connected to.
.. attribute:: max_cid
The currently maximum change-id.
.. attribute:: max_pid
The currently maximum permanent id in use.
.. attribute:: relevant_roots
A list of all relevant roots of the application.
A relevant root is an etype that has its own table in the
database.
.. attribute:: uncommitted_changes
The list of outstanding changes waiting to be commited (or
rollbacked).
**`Scope` provides the class methods:**
.. automethod:: load
.. automethod:: new
**`Scope` provides the class and instance methods:**
.. automethod:: add_after_commit_callback(* callbacks)
.. automethod:: add_init_callback(* callbacks)
.. automethod:: add_kill_callback(* callbacks)
**`Scope` provides the instance methods:**
.. automethod:: add
.. automethod:: as_active
.. automethod:: commit
.. automethod:: copy
.. automethod:: destroy
.. automethod:: entity_iter
.. automethod:: entity_iter_gauge
.. automethod:: entity_type
.. automethod:: g_incorrect()
.. automethod:: has_changed
.. automethod:: i_incorrect()
.. automethod:: nested_change_recorder
.. automethod:: | |
#!/usr/bin/env python3
"""
USBI2C AVR Miner 3.1 © MIT licensed
Modified by JK-Rolling
20220101
Full credit belong to
https://duinocoin.com
https://github.com/revoxhere/duino-coin
Duino-Coin Team & Community 2019-2022
"""
from os import _exit, mkdir
from os import name as osname
from os import path
from os import system as ossystem
from platform import machine as osprocessor
from platform import system
import sys
from configparser import ConfigParser
from pathlib import Path
from json import load as jsonload
import json
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from re import sub
from socket import socket
from datetime import datetime
from statistics import mean
from signal import SIGINT, signal
from time import ctime, sleep, strptime, time
import pip
from subprocess import DEVNULL, Popen, check_call, call
from threading import Thread
from threading import Lock as thread_lock
from threading import Semaphore
import base64 as b64
import os
printlock = Semaphore(value=1)
serlock = Semaphore(value=1)
# Python <3.5 check
f"Your Python version is too old. Duino-Coin Miner requires version 3.6 or above. Update your packages and try again"
def install(package):
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
call([sys.executable, __file__])
try:
from serial import Serial
import serial.tools.list_ports
except ModuleNotFoundError:
print("Pyserial is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install pyserial")
install('pyserial')
try:
import requests
except ModuleNotFoundError:
print("Requests is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install requests")
install('requests')
try:
from colorama import Back, Fore, Style, init
init(autoreset=True)
except ModuleNotFoundError:
print("Colorama is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install colorama")
install("colorama")
try:
from pypresence import Presence
except ModuleNotFoundError:
print("Pypresence is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install pypresence")
install("pypresence")
def now():
return datetime.now()
def port_num(com):
#return str(''.join(filter(str.isdigit, com)))
return com
class Settings:
VER = '3.1'
SOC_TIMEOUT = 45
REPORT_TIME = 60
AVR_TIMEOUT = 10 # diff 16 * 100 / 269 h/s = 5.94 s
DELAY_START = 60 # 60 seconds start delay between worker to help kolka sync efficiency drop
CRC8_EN = "y"
BAUDRATE = 115200
DATA_DIR = "Duino-Coin AVR Miner " + str(VER)
SEPARATOR = ","
USBI2C_SEPARATOR = ":"
USBI2C_EOL = "$"
ENCODING = "utf-8"
try:
# Raspberry Pi latin users can't display this character
"‖".encode(sys.stdout.encoding)
BLOCK = " ‖ "
except:
BLOCK = " | "
PICK = ""
COG = " @"
if (osname != "nt"
or bool(osname == "nt"
and os.environ.get("WT_SESSION"))):
# Windows' cmd does not support emojis, shame!
# And some codecs same, for example the Latin-1 encoding don`t support emoji
try:
"⛏ ⚙".encode(sys.stdout.encoding) # if the terminal support emoji
PICK = " ⛏"
COG = " ⚙"
except UnicodeEncodeError: # else
PICK = ""
COG = " @"
def check_mining_key(user_settings):
user_settings = user_settings["AVR Miner"]
if user_settings["mining_key"] != "None":
key = b64.b64decode(user_settings["mining_key"]).decode('utf-8')
else:
key = ''
response = requests.get(
"https://server.duinocoin.com/mining_key"
+ "?u=" + user_settings["username"]
+ "&k=" + key,
timeout=10
).json()
if response["success"] and not response["has_key"]: # if the user doesn't have a mining key
user_settings["mining_key"] = "None"
config["AVR Miner"] = user_settings
with open(Settings.DATA_DIR + '/Settings.cfg',
"w") as configfile:
config.write(configfile)
print("sys0",
Style.RESET_ALL + get_string("config_saved"),
"info")
sleep(1.5)
return
if not response["success"]:
if user_settings["mining_key"] == "None":
pretty_print(
"sys0",
get_string("mining_key_required"),
"warning")
mining_key = input("Enter your mining key: ")
user_settings["mining_key"] = b64.b64encode(mining_key.encode("utf-8")).decode('utf-8')
config["AVR Miner"] = user_settings
with open(Settings.DATA_DIR + '/Settings.cfg',
"w") as configfile:
config.write(configfile)
print("sys0",
Style.RESET_ALL + get_string("config_saved"),
"info")
sleep(1.5)
check_mining_key(config)
else:
pretty_print(
"sys0",
get_string("invalid_mining_key"),
"error")
retry = input("You want to retry? (y/n): ")
if retry == "y" or retry == "Y":
mining_key = input("Enter your mining key: ")
user_settings["mining_key"] = b64.b64encode(mining_key.encode("utf-8")).decode('utf-8')
config["AVR Miner"] = user_settings
with open(Settings.DATA_DIR + '/Settings.cfg',
"w") as configfile:
config.write(configfile)
print("sys0",
Style.RESET_ALL + get_string("config_saved"),
"info")
sleep(1.5)
check_mining_key(config)
else:
return
class Client:
"""
Class helping to organize socket connections
"""
def connect(pool: tuple):
s = socket()
s.settimeout(Settings.SOC_TIMEOUT)
s.connect((pool))
return s
def send(s, msg: str):
sent = s.sendall(str(msg).encode(Settings.ENCODING))
return True
def recv(s, limit: int = 128):
data = s.recv(limit).decode(Settings.ENCODING).rstrip("\n")
return data
def fetch_pool():
while True:
pretty_print("net0", " " + get_string("connection_search"),
"info")
try:
response = requests.get(
"https://server.duinocoin.com/getPool",
timeout=10).json()
if response["success"] == True:
pretty_print("net0", get_string("connecting_node")
+ response["name"],
"info")
NODE_ADDRESS = response["ip"]
NODE_PORT = response["port"]
debug_output(f"Fetched pool: {response['name']}")
return (NODE_ADDRESS, NODE_PORT)
elif "message" in response:
pretty_print(f"Warning: {response['message']}"
+ ", retrying in 15s", "warning", "net0")
sleep(15)
else:
raise Exception(
"no response - IP ban or connection error")
except Exception as e:
if "Expecting value" in str(e):
pretty_print("net0", get_string("node_picker_unavailable")
+ f"15s {Style.RESET_ALL}({e})",
"warning")
else:
pretty_print("net0", get_string("node_picker_error")
+ f"15s {Style.RESET_ALL}({e})",
"error")
sleep(15)
class Donate:
def load(donation_level):
if donation_level > 0:
if osname == 'nt':
if not Path(
f"{Settings.DATA_DIR}/Donate.exe").is_file():
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableWindows.exe')
r = requests.get(url, timeout=15)
with open(f"{Settings.DATA_DIR}/Donate.exe",
'wb') as f:
f.write(r.content)
elif osname == "posix":
if osprocessor() == "aarch64":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH64')
elif osprocessor() == "armv7l":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH32')
else:
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableLinux')
if not Path(
f"{Settings.DATA_DIR}/Donate").is_file():
r = requests.get(url, timeout=15)
with open(f"{Settings.DATA_DIR}/Donate",
"wb") as f:
f.write(r.content)
def start(donation_level):
if osname == 'nt':
cmd = (f'cd "{Settings.DATA_DIR}" & Donate.exe '
+ '-o stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*3}')
elif osname == 'posix':
cmd = (f'cd "{Settings.DATA_DIR}" && chmod +x Donate '
+ '&& nice -20 ./Donate -o '
+ 'stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*3}')
if donation_level <= 0:
pretty_print(
'sys0', Fore.YELLOW
+ get_string('free_network_warning').lstrip()
+ get_string('donate_warning').replace("\n", "\n\t\t")
+ Fore.GREEN + 'https://duinocoin.com/donate'
+ Fore.YELLOW + get_string('learn_more_donate'),
'warning')
sleep(5)
if donation_level > 0:
debug_output(get_string('starting_donation'))
donateExecutable = Popen(cmd, shell=True, stderr=DEVNULL)
pretty_print('sys0',
get_string('thanks_donation').replace("\n", "\n\t\t"),
'error')
shares = [0, 0, 0]
bad_crc8 = 0
i2c_retry_count = 0
hashrate_mean = []
ping_mean = []
diff = 0
shuffle_ports = "y"
donator_running = False
job = ''
debug = 'n'
discord_presence = 'y'
rig_identifier = 'None'
donation_level = 0
hashrate = 0
config = ConfigParser()
mining_start_time = time()
if not path.exists(Settings.DATA_DIR):
mkdir(Settings.DATA_DIR)
if not Path(Settings.DATA_DIR + '/Translations.json').is_file():
url = ('https://raw.githubusercontent.com/'
+ 'revoxhere/'
+ 'duino-coin/master/Resources/'
+ 'AVR_Miner_langs.json')
r = requests.get(url, timeout=5)
with open(Settings.DATA_DIR + '/Translations.json', 'wb') as f:
f.write(r.content)
# Load language file
with open(Settings.DATA_DIR + '/Translations.json', 'r',
encoding='utf8') as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if system() == 'Darwin':
if getlocale()[0] is None:
setlocale(LC_ALL, 'en_US.UTF-8')
try:
if not Path(Settings.DATA_DIR + '/Settings.cfg').is_file():
locale = getdefaultlocale()[0]
if locale.startswith('es'):
lang = 'spanish'
elif locale.startswith('sk'):
lang = 'slovak'
elif locale.startswith('ru'):
lang = 'russian'
elif locale.startswith('pl'):
lang = 'polish'
elif locale.startswith('de'):
lang = 'german'
elif locale.startswith('fr'):
lang = 'french'
elif locale.startswith('tr'):
lang = 'turkish'
elif locale.startswith('it'):
lang = 'italian'
elif locale.startswith('pt'):
lang = 'portuguese'
elif locale.startswith('zh'):
lang = 'chinese_simplified'
elif locale.startswith('th'):
lang = 'thai'
elif locale.startswith('az'):
lang = 'azerbaijani'
elif locale.startswith('nl'):
lang = 'dutch'
elif locale.startswith('ko'):
lang = 'korean'
elif locale.startswith("id"):
lang = "indonesian"
elif locale.startswith("cz"):
lang = "czech"
else:
lang = 'english'
else:
try:
config.read(Settings.DATA_DIR + '/Settings.cfg')
lang = config["AVR Miner"]['language']
except Exception:
lang = 'english'
except:
lang = 'english'
def get_string(string_name: str):
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file['english']:
return lang_file['english'][string_name]
else:
return string_name
def get_prefix(symbol: str,
val: float,
accuracy: int):
"""
H/s, 1000 => 1 kH/s
"""
if val >= 1_000_000_000_000: # Really?
val = str(round((val / 1_000_000_000_000), accuracy)) + " T"
elif val >= 1_000_000_000:
val = str(round((val / 1_000_000_000), accuracy)) + " G"
elif val >= 1_000_000:
val = str(round((val / 1_000_000), accuracy)) + " M"
elif val >= 1_000:
val = str(round((val / 1_000))) + " k"
else:
if symbol:
val = str(round(val)) + " "
else:
val = str(round(val))
return val + symbol
def debug_output(text: str):
if debug == 'y':
print(Style.RESET_ALL + Fore.WHITE
+ now().strftime(Style.DIM + '%H:%M:%S.%f ')
+ Style.NORMAL + f'DEBUG: {text}')
def ondemand_print(text: str):
print(Style.RESET_ALL + Fore.WHITE
+ now().strftime(Style.DIM + '%H:%M:%S.%f ')
+ Style.NORMAL + f'DEBUG: {text}')
def title(title: str):
if osname == 'nt':
"""
Changing the title in Windows' cmd
is easy - just use the built-in
title command
"""
ossystem('title ' + title)
else:
"""
Most *nix terminals use
this escape sequence to change
the console window title
"""
try:
print('\33]0;' + title + | |
<reponame>henriktao/pulumi-azure
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['SubscriptionArgs', 'Subscription']
@pulumi.input_type
class SubscriptionArgs:
def __init__(__self__, *,
subscription_name: pulumi.Input[str],
alias: Optional[pulumi.Input[str]] = None,
billing_scope_id: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workload: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Subscription resource.
:param pulumi.Input[str] subscription_name: The Name of the Subscription. This is the Display Name in the portal.
:param pulumi.Input[str] alias: The Alias name for the subscription. This provider will generate a new GUID if this is not supplied. Changing this forces a new Subscription to be created.
:param pulumi.Input[str] billing_scope_id: The Azure Billing Scope ID. Can be a Microsoft Customer Account Billing Scope ID, a Microsoft Partner Account Billing Scope ID or an Enrollment Billing Scope ID.
:param pulumi.Input[str] subscription_id: The ID of the Subscription. Changing this forces a new Subscription to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the Subscription.
:param pulumi.Input[str] workload: The workload type of the Subscription. Possible values are `Production` (default) and `DevTest`. Changing this forces a new Subscription to be created.
"""
pulumi.set(__self__, "subscription_name", subscription_name)
if alias is not None:
pulumi.set(__self__, "alias", alias)
if billing_scope_id is not None:
pulumi.set(__self__, "billing_scope_id", billing_scope_id)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if workload is not None:
pulumi.set(__self__, "workload", workload)
@property
@pulumi.getter(name="subscriptionName")
def subscription_name(self) -> pulumi.Input[str]:
"""
The Name of the Subscription. This is the Display Name in the portal.
"""
return pulumi.get(self, "subscription_name")
@subscription_name.setter
def subscription_name(self, value: pulumi.Input[str]):
pulumi.set(self, "subscription_name", value)
@property
@pulumi.getter
def alias(self) -> Optional[pulumi.Input[str]]:
"""
The Alias name for the subscription. This provider will generate a new GUID if this is not supplied. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "alias")
@alias.setter
def alias(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alias", value)
@property
@pulumi.getter(name="billingScopeId")
def billing_scope_id(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Billing Scope ID. Can be a Microsoft Customer Account Billing Scope ID, a Microsoft Partner Account Billing Scope ID or an Enrollment Billing Scope ID.
"""
return pulumi.get(self, "billing_scope_id")
@billing_scope_id.setter
def billing_scope_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "billing_scope_id", value)
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subscription. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "subscription_id")
@subscription_id.setter
def subscription_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the Subscription.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def workload(self) -> Optional[pulumi.Input[str]]:
"""
The workload type of the Subscription. Possible values are `Production` (default) and `DevTest`. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "workload")
@workload.setter
def workload(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workload", value)
@pulumi.input_type
class _SubscriptionState:
def __init__(__self__, *,
alias: Optional[pulumi.Input[str]] = None,
billing_scope_id: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
subscription_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
workload: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Subscription resources.
:param pulumi.Input[str] alias: The Alias name for the subscription. This provider will generate a new GUID if this is not supplied. Changing this forces a new Subscription to be created.
:param pulumi.Input[str] billing_scope_id: The Azure Billing Scope ID. Can be a Microsoft Customer Account Billing Scope ID, a Microsoft Partner Account Billing Scope ID or an Enrollment Billing Scope ID.
:param pulumi.Input[str] subscription_id: The ID of the Subscription. Changing this forces a new Subscription to be created.
:param pulumi.Input[str] subscription_name: The Name of the Subscription. This is the Display Name in the portal.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the Subscription.
:param pulumi.Input[str] tenant_id: The ID of the Tenant to which the subscription belongs.
:param pulumi.Input[str] workload: The workload type of the Subscription. Possible values are `Production` (default) and `DevTest`. Changing this forces a new Subscription to be created.
"""
if alias is not None:
pulumi.set(__self__, "alias", alias)
if billing_scope_id is not None:
pulumi.set(__self__, "billing_scope_id", billing_scope_id)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
if subscription_name is not None:
pulumi.set(__self__, "subscription_name", subscription_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
if workload is not None:
pulumi.set(__self__, "workload", workload)
@property
@pulumi.getter
def alias(self) -> Optional[pulumi.Input[str]]:
"""
The Alias name for the subscription. This provider will generate a new GUID if this is not supplied. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "alias")
@alias.setter
def alias(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alias", value)
@property
@pulumi.getter(name="billingScopeId")
def billing_scope_id(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Billing Scope ID. Can be a Microsoft Customer Account Billing Scope ID, a Microsoft Partner Account Billing Scope ID or an Enrollment Billing Scope ID.
"""
return pulumi.get(self, "billing_scope_id")
@billing_scope_id.setter
def billing_scope_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "billing_scope_id", value)
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subscription. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "subscription_id")
@subscription_id.setter
def subscription_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_id", value)
@property
@pulumi.getter(name="subscriptionName")
def subscription_name(self) -> Optional[pulumi.Input[str]]:
"""
The Name of the Subscription. This is the Display Name in the portal.
"""
return pulumi.get(self, "subscription_name")
@subscription_name.setter
def subscription_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the Subscription.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Tenant to which the subscription belongs.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter
def workload(self) -> Optional[pulumi.Input[str]]:
"""
The workload type of the Subscription. Possible values are `Production` (default) and `DevTest`. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "workload")
@workload.setter
def workload(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workload", value)
class Subscription(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alias: Optional[pulumi.Input[str]] = None,
billing_scope_id: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
subscription_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workload: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an Alias for a Subscription - which adds an Alias to an existing Subscription, allowing it to be managed in the provider - or create a new Subscription with a new Alias.
> **NOTE:** Destroying a Subscription controlled by this resource will place the Subscription into a cancelled state. It is possible to re-activate a subscription within 90-days of cancellation, after which time the Subscription is irrevocably deleted, and the Subscription ID cannot be re-used. For further information see [here](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/cancel-azure-subscription#what-happens-after-subscription-cancellation). Users can optionally delete a Subscription once 72 hours have passed, however, this functionality is not suitable for this provider. A `Deleted` subscription cannot be reactivated.
> **NOTE:** It is not possible to destroy (cancel) a subscription if it contains resources. If resources are present that are not managed by the provider then these will need to be removed before the Subscription can be destroyed.
> **NOTE:** Azure supports Multiple Aliases per Subscription, however, to reliably manage this resource in this provider only a single Alias is supported.
## Example Usage
### Creating A New Alias And Subscription For An Enrollment Account
```python
import pulumi
import pulumi_azure as azure
example_enrollment_account_scope = azure.billing.get_enrollment_account_scope(billing_account_name="1234567890",
enrollment_account_name="0123456")
example_subscription = azure.core.Subscription("exampleSubscription",
subscription_name="My Example EA Subscription",
billing_scope_id=example_enrollment_account_scope.id)
```
### Creating A New Alias And Subscription For A Microsoft Customer Account
```python
import pulumi
import pulumi_azure as azure
example_mca_account_scope = azure.billing.get_mca_account_scope(billing_account_name="e879cf0f-2b4d-5431-109a-f72fc9868693:024cabf4-7321-4cf9-be59-df0c77ca51de_2019-05-31",
billing_profile_name="PE2Q-NOIT-BG7-TGB",
invoice_section_name="MTT4-OBS7-PJA-TGB")
example_subscription = azure.core.Subscription("exampleSubscription",
subscription_name="My Example MCA Subscription",
billing_scope_id=example_mca_account_scope.id)
```
### Creating A New Alias And Subscription For A Microsoft Partner Account
```python
import pulumi
import | |
})
// SV length
sizeChart
.width(plotw).height(ploth).gap(1)
.margins({top: 10, right: 50, bottom: 30, left: 40})
.x(d3.scaleLinear().domain([0, sizeKeys.length]))
.round(Math.floor)
.brushOn(true)
.elasticX(true)
.dimension(sizeDimension)
.group(index_group(nonEmptySizeGroup))
.elasticY(true)
.yAxisLabel('Count')
.filterPrinter(function (filters) {
var filter = filters[0]
return sizeKeys[filter[0]] + ' - ' + sizeKeys[filter[1]]
})
// adds left padding to plots inside filtering panel
.on('renderlet', function() {
d3.selectAll("svg")
.classed("pl-3", true)
})
// limit the number of labels along x-axis
sizeChart.xAxis().ticks(10)
sizeChart.yAxis().ticks(5)
// update labels from keys
sizeChart.xAxis().tickFormat(function(v) {
return sizeKeys[v]
})
// update the status format for this chart
sizeChart.filterHandler(function (dimension, filters) {
if (filters.length === 0) {
// the empty case (no filtering)
dimension.filter(null)
} else {
dimension.filterRange([sizeKeys[filters[0][0]], sizeKeys[filters[0][1]]])
}
return filters
})
// sv type
typeChart
.width(plotw).height(ploth).gap(1)
.margins({top: 10, right: 50, bottom: 30, left: 40})
.x(d3.scaleBand())
.xUnits(dc.units.ordinal)
.elasticX(true)
.elasticY(true)
.dimension(typeDimension)
.group(nonEmptyTypeGroup)
.yAxisLabel('Count')
typeChart.yAxis().ticks(5)
// chromosome
chromChart
.width(plotw).height(ploth).gap(1)
.margins({top: 10, right: 50, bottom: 30, left: 40})
.x(d3.scaleBand())
.xUnits(dc.units.ordinal)
.yAxisLabel('Count')
.elasticX(true)
.elasticY(true)
.dimension(chromDimension)
.group(nonEmptyChromGroup)
.ordering((d) => {
v = parseInt(d.key)
if (v) {
return v
} else {
return d.key
}
})
chromChart.yAxis().ticks(5)
// overlaps
if (annotation) {
var overlapsDimension = ndx.dimension((d) => { return d.overlaps })
var overlapsGroup = overlapsDimension.group().reduceCount()
var nonEmptyOverlapsGroup = remove_empty_bins(overlapsGroup)
overlapsChart = dc.barChart("#overlaps-chart")
overlapsChart
.width(plotw).height(ploth).gap(1)
.margins({top: 10, right: 50, bottom: 30, left: 40})
.x(d3.scaleBand())
.xUnits(dc.units.ordinal)
.elasticX(true)
.elasticY(true)
.dimension(overlapsDimension)
.group(nonEmptyOverlapsGroup)
.yAxisLabel('Count')
overlapsChart.yAxis().ticks(5)
}
variantCount
.crossfilter(ndx)
.groupAll(all)
// (_optional_) `.html` sets different html when some records or all records are selected.
// `.html` replaces everything in the anchor with the html given using the following function.
// `%filter-count` and `%total-count` are replaced with the values obtained.
.html({
some: '<strong>%filter-count</strong> selected out of <strong>%total-count</strong> records' +
' | <a href=\\'javascript:dc.filterAll(); dc.renderAll();\\'>Reset All</a>',
all: '<strong>%total-count</strong> records'
});
dc.renderAll()
})
</script>
</html>
"""
cmp_lookup = {
">": operator.gt, # e.g. DHFC < 0.5
"<": operator.lt,
"<=": operator.le,
">=": operator.ge,
"==": operator.eq,
"contains": operator.contains, # e.g. CSQ contains HIGH
"exists": lambda a, b: True, # e.g. exists smoove_gene
}
class Sample(object):
__slots__ = [
"family_id",
"id",
"paternal_id",
"maternal_id",
"mom",
"dad",
"kids",
"i",
]
def __init__(self, line):
toks = line.rstrip().split()
self.family_id = toks[0]
self.id = toks[1]
self.paternal_id = toks[2]
self.maternal_id = toks[3]
self.kids = []
self.i = -1 # index in the vcf.
def __repr__(self):
return "Sample(id:{id},paternal_id:{pid},maternal_id:{mid})".format(
id=self.id, pid=self.paternal_id, mid=self.maternal_id
)
def flatten(value, sep=","):
"""
>>> flatten([1,2,3,4])
'1,2,3,4'
>>> flatten((5,6))
'5,6'
>>> flatten(0.987654321)
'0.987654'
>>> flatten(7)
'7'
>>> flatten("flatten")
'flatten'
"""
flat = None
# tuple or list
if isinstance(value, tuple) or isinstance(value, list):
flat = sep.join([str(i) for i in value])
# reformats long float values
elif isinstance(value, float):
flat = "%.6f" % (value,)
# string and int
else:
flat = str(value)
return flat
def zip_lists(value):
"""
>>> zip_lists([[0,1,2], [3,4,5]])
['0 3', '1 4', '2 5']
"""
return [flatten(i, sep=" ") for i in zip(*value)]
def get_format_fields(ids, variant):
"""
args:
ids (list) - list of FORMAT field IDs, e.g. ['AS', 'AP', 'DHFFC']
variant (pysam.libcbcf.VariantRecord)
returns:
list
"""
fields = list()
for i in ids:
fields.append(
["%s=%s" % (i, flatten(j.get(i, ""))) for j in variant.samples.values()]
)
return zip_lists(fields)
def get_format_title(samples, ids, variant):
"""
args:
samples (list) - list of sample IDs in order of VCF annotations
ids (list) - list of FORMAT field IDs, e.g. ['AS', 'AP', 'DHFFC']
variant (pysam.libcbcf.VariantRecord)
returns:
dict
"""
fields = get_format_fields(ids, variant)
return dict(zip(samples, fields))
def make_plot_titles(samples, attr_values):
"""
keeping this method separate in the event we add more things to the title
args:
samples (list) - list of sample IDs
attr_values (str) - string of VCF FORMAT values
returns:
dict
>>> make_plot_titles(['s1', 's2', 's3'], {'s1': 'AS=0 AP=0', 's2': 'AS=0 AP=1', 's3': 'AS=1 AP=1'})
{'s1': "'s1 AS=0 AP=0'", 's2': "'s2 AS=0 AP=1'", 's3': "'s3 AS=1 AP=1'"}
"""
plot_titles = dict()
for sample in samples:
if sample in attr_values:
plot_titles[sample] = quote("%s %s" % (sample, attr_values[sample]))
return plot_titles
def get_overlap(
tabix,
chrom,
start,
end,
priority=["exon", "gene", "transcript", "cds"],
no_hit="intergenic",
fix_chr=True,
):
"""
args:
tabix (pysam.libctabix.TabixFile) - open TabixFile
chrom (str)
start (int)
end (int)
priority (Optional[list]) - order of preferred region annotation
no_hit (Optional[str]) - use this annotation if no matches among priority
fix_chr (Optional[bool]) - try to fetch a region using both non-'chr' and 'chr' prefix on failures
returns:
str
"""
overlaps = None
try:
overlaps = set(
[i.split("\t")[2].lower() for i in tabix.fetch(chrom, start, end)]
)
except IndexError:
# probably not a gff or gtf
print("Invalid annotation file specified for --gff")
overlaps = None
except ValueError:
if fix_chr:
# try removing chr
if chrom.startswith("chr"):
overlaps = get_overlap(
tabix, chrom[3:], start, end, priority, no_hit, False
)
# or adding chr
else:
overlaps = get_overlap(
tabix,
"chr{chrom}".format(chrom=chrom),
start,
end,
priority,
no_hit,
False,
)
except:
# bad regions
print(
"Error fetching {chrom}:{start}-{end}".format(
chrom=chrom, start=start, end=end
)
)
overlaps = None
overlap = ""
if overlaps:
for feature in priority:
if feature in overlaps:
overlap = feature
break
else:
# fetching overlaps failed
overlap = "unknown"
if not overlap and no_hit:
overlap = no_hit
return overlap
def parse_ped(path, vcf_samples=None):
if path is None:
return {}
samples = []
look = {}
for line in open(path):
samples.append(Sample(line))
look[samples[-1].id] = samples[-1]
for s in samples:
s.dad = look.get(s.paternal_id)
if s.dad is not None:
s.dad.kids.append(s)
s.mom = look.get(s.maternal_id)
if s.mom is not None:
s.mom.kids.append(s)
# match these samples to the ones in the VCF.
if vcf_samples is not None:
result = []
for i, variant_sample in enumerate(vcf_samples):
if not variant_sample in look:
continue
result.append(next(s for s in samples if s.id == variant_sample))
result[-1].i = i
samples = result
return {s.id: s for s in samples}
def get_names_to_bams(bams, name_list=None):
"""
get mapping from names (read group samples) to bam paths)
this is useful because the VCF has the names and we'll want the bam paths
for those samples
if name_list is passed in as a parameter those will be used instead
"""
names = {}
if name_list:
if len(name_list) != len(bams):
sys.exit("List of sample IDs does not match list of alignment files.")
for i, p in enumerate(bams):
names[name_list[i]] = p
else:
for p in bams:
b = pysam.AlignmentFile(p)
try:
names[b.header["RG"][0]["SM"]] = p
except:
sys.exit(
"No RG field in alignment file "
+ p
+ ". \nInclude ordered list of sample IDs to avoid this error"
)
return names
def tryfloat(v):
try:
return float(v)
except:
return v
def to_exprs(astr):
"""
an expr is just a 3-tuple of "name", fn, value"
e.g. "DHFFC", operator.lt, 0.7"
>>> to_exprs("DHFFC < 0.5 & SVTYPE == 'DEL'")
[('DHFFC', <built-in function lt>, 0.5), ('SVTYPE', <built-in function eq>, 'DEL')]
>>> to_exprs("CSQ contains 'HIGH'")
[('CSQ', <built-in function contains>, 'HIGH')]
"""
astr = (x.strip() for x in astr.strip().split("&"))
result = []
for a in astr:
a = [x.strip() for x in a.split()]
if len(a) == 2:
assert a[1] == "exists", ("bad expression", a)
a.append("extra_arg")
assert len(a) == 3, ("bad expression", a)
assert a[1] in cmp_lookup, (
"comparison:"
+ a[1]
+ " not supported. must be one of:"
+ ",".join(cmp_lookup.keys())
)
result.append((a[0], cmp_lookup[a[1]], tryfloat(a[2].strip("'").strip('"'))))
return result
def check_expr(vdict, expr):
"""
>>> check_expr({"CSQ": "asdfHIGHasdf"}, to_exprs("CSQ contains 'HIGH'"))
True
>>> check_expr({"CSQ": "asdfHIGHasdf", "DHFC": 1.1}, to_exprs("CSQ contains 'HIGH' & DHFC < 0.5"))
False
>>> check_expr({"CSQ": "asdfHIGHasdf", "DHFC": 1.1}, to_exprs("CSQ contains 'HIGH' & DHFC < 1.5"))
True
>>> check_expr({"smoove_gene": "asdf"}, to_exprs("smoove_gene exists"))
True
>>> check_expr({"smooe_gene": "asdf"}, to_exprs("smoove_gene exists"))
False
>>> check_expr({"smoove_gene": ""}, to_exprs("smoove_gene exists"))
True
"""
# a single set of exprs must be "anded"
for name, fcmp, val in expr:
# NOTE: asking for a missing annotation will return false.
if not name in vdict:
return False
if not fcmp(vdict[name], val):
return False
return True
def make_single(vdict):
"""
>>> d = {"xx": (1,)}
>>> make_single(d)
{'xx': 1}
"""
for k in vdict.keys():
if isinstance(vdict[k], tuple) and len(vdict[k]) == 1:
vdict[k] = vdict[k][0]
return vdict
def get_dn_row(ped_samples):
for s in ped_samples.values():
if s.mom is not None and s.dad is not None:
return '{title:"de novo", field:"dn"}'
return ""
def read_important_regions(bedfilename):
important_regions = {}
with open(bedfilename, "r") as bedfile:
for line in bedfile:
pos_fields = line.strip().split()
region_string = "_".join(pos_fields[1:3])
if pos_fields[0] not in important_regions:
important_regions[pos_fields[0]] = []
important_regions[pos_fields[0]].append(region_string)
return important_regions
def var_in_important_regions(important_regions, | |
params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling ``") # noqa: E501
# verify the required parameter 'month' is set
if ('month' not in params or
params['month'] is None):
raise ValueError("Missing the required parameter `month` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
if 'month' in params:
path_params['month'] = params['month'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
collection_formats['fields'] = 'csv' # noqa: E501
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
collection_formats['exclude_fields'] = 'csv' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/lists/{list_id}/growth-history/{month}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GrowthHistory', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_list_interest_categories(self, list_id, **kwargs): # noqa: E501
"""List interest categories # noqa: E501
Get information about a list's interest categories. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_interest_categories(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:param int count: The number of records to return. Default value is 10. Maximum value is 1000
:param int offset: Used for [pagination](https://mailchimp.com/developer/marketing/docs/methods-parameters/#pagination), this it the number of records from a collection to skip. Default value is 0.
:param str type: Restrict results a type of interest group
:return: InterestGroupings
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_list_interest_categories_with_http_info(list_id, **kwargs) # noqa: E501
else:
(data) = self.get_list_interest_categories_with_http_info(list_id, **kwargs) # noqa: E501
return data
def get_list_interest_categories_with_http_info(self, list_id, **kwargs): # noqa: E501
"""List interest categories # noqa: E501
Get information about a list's interest categories. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_interest_categories_with_http_info(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:param int count: The number of records to return. Default value is 10. Maximum value is 1000
:param int offset: Used for [pagination](https://mailchimp.com/developer/marketing/docs/methods-parameters/#pagination), this it the number of records from a collection to skip. Default value is 0.
:param str type: Restrict results a type of interest group
:return: InterestGroupings
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'fields', 'exclude_fields', 'count', 'offset', 'type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_list_interest_categories" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling ``") # noqa: E501
if 'count' in params and params['count'] > 1000: # noqa: E501
raise ValueError("Invalid value for parameter `count` when calling ``, must be a value less than or equal to `1000`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
collection_formats['fields'] = 'csv' # noqa: E501
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
collection_formats['exclude_fields'] = 'csv' # noqa: E501
if 'count' in params:
query_params.append(('count', params['count'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/lists/{list_id}/interest-categories', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InterestGroupings', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_interest_category(self, list_id, interest_category_id, **kwargs): # noqa: E501
"""Get interest category info # noqa: E501
Get information about a specific interest category. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_interest_category(list_id, interest_category_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str interest_category_id: The unique ID for the interest category. (required)
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:return: InterestCategory
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_interest_category_with_http_info(list_id, interest_category_id, **kwargs) # noqa: E501
else:
(data) = self.get_interest_category_with_http_info(list_id, interest_category_id, **kwargs) # noqa: E501
return data
def get_interest_category_with_http_info(self, list_id, interest_category_id, **kwargs): # noqa: E501
"""Get interest category info # noqa: E501
Get information about a specific interest category. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_interest_category_with_http_info(list_id, interest_category_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str interest_category_id: The unique ID for the interest category. (required)
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:return: InterestCategory
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'interest_category_id', 'fields', 'exclude_fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_interest_category" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling ``") # noqa: E501
# verify the required parameter 'interest_category_id' is set
if ('interest_category_id' not in params or
params['interest_category_id'] is None):
raise ValueError("Missing the required parameter `interest_category_id` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
if 'interest_category_id' in params:
path_params['interest_category_id'] = params['interest_category_id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
collection_formats['fields'] = 'csv' # noqa: E501
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
collection_formats['exclude_fields'] = 'csv' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/lists/{list_id}/interest-categories/{interest_category_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InterestCategory', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_interest_category_interests(self, list_id, interest_category_id, **kwargs): # noqa: E501
"""List interests in category # noqa: | |
AuiDockingGuideWindow(self, rectCenter, wx.CENTER, True, useAero)
# top-left diamond
tld = [wx.Point(rectTop.x, rectTop.y+rectTop.height-8),
wx.Point(rectLeft.x+rectLeft.width-8, rectLeft.y),
rectTop.GetBottomLeft()]
# bottom-left diamond
bld = [wx.Point(rectLeft.x+rectLeft.width-8, rectLeft.y+rectLeft.height),
wx.Point(rectBottom.x, rectBottom.y+8),
rectBottom.GetTopLeft()]
# top-right diamond
trd = [wx.Point(rectTop.x+rectTop.width, rectTop.y+rectTop.height-8),
wx.Point(rectRight.x+8, rectRight.y),
rectRight.GetTopLeft()]
# bottom-right diamond
brd = [wx.Point(rectRight.x+8, rectRight.y+rectRight.height),
wx.Point(rectBottom.x+rectBottom.width, rectBottom.y+8),
rectBottom.GetTopRight()]
self._triangles = [tld[0:2], bld[0:2],
[wx.Point(rectTop.x+rectTop.width-1, rectTop.y+rectTop.height-8),
wx.Point(rectRight.x+7, rectRight.y)],
[wx.Point(rectRight.x+7, rectRight.y+rectRight.height),
wx.Point(rectBottom.x+rectBottom.width-1, rectBottom.y+8)]]
region = wx.Region()
region.UnionRect(rectLeft)
region.UnionRect(rectTop)
region.UnionRect(rectRight)
region.UnionRect(rectBottom)
region.UnionRect(rectCenter)
region.UnionRegion(wx.RegionFromPoints(tld))
region.UnionRegion(wx.RegionFromPoints(bld))
region.UnionRegion(wx.RegionFromPoints(trd))
region.UnionRegion(wx.RegionFromPoints(brd))
elif useAero:
self._aeroBmp = aero_dock_pane.GetBitmap()
region = wx.RegionFromBitmap(self._aeroBmp)
self._allAeroBmps = [aero_dock_pane_left.GetBitmap(), aero_dock_pane_top.GetBitmap(),
aero_dock_pane_right.GetBitmap(), aero_dock_pane_bottom.GetBitmap(),
aero_dock_pane_center.GetBitmap(), aero_dock_pane.GetBitmap()]
self._deniedBitmap = aero_denied.GetBitmap()
self._aeroRects = [rectLeft, rectTop, rectRight, rectBottom, rectCenter]
self._valid = True
elif useWhidbey:
self._aeroBmp = whidbey_dock_pane.GetBitmap()
region = wx.RegionFromBitmap(self._aeroBmp)
self._allAeroBmps = [whidbey_dock_pane_left.GetBitmap(), whidbey_dock_pane_top.GetBitmap(),
whidbey_dock_pane_right.GetBitmap(), whidbey_dock_pane_bottom.GetBitmap(),
whidbey_dock_pane_center.GetBitmap(), whidbey_dock_pane.GetBitmap()]
self._deniedBitmap = whidbey_denied.GetBitmap()
self._aeroRects = [rectLeft, rectTop, rectRight, rectBottom, rectCenter]
self._valid = True
self.region = region
def SetGuideShape(self, event=None):
"""
Sets the correct shape for the docking guide window.
:param `event`: on wxGTK, a `wx.WindowCreateEvent` event to process.
"""
self.SetShape(self.region)
if event is not None:
# Skip the event on wxGTK
event.Skip()
wx.CallAfter(wx.SafeYield, self, True)
def UpdateDockGuide(self, pos):
"""
Updates the docking guides images depending on the mouse position, using focused
images if the mouse is inside the docking guide or unfocused images if it is
outside.
:param `pos`: a `wx.Point` mouse position.
"""
if not self._useAero:
for target in self.GetChildren():
target.UpdateDockGuide(pos)
else:
lenRects = len(self._aeroRects)
for indx, rect in enumerate(self._aeroRects):
if rect.Contains(pos):
if self._allAeroBmps[indx] != self._aeroBmp:
if indx < lenRects - 1 or (indx == lenRects - 1 and self._valid):
self._aeroBmp = self._allAeroBmps[indx]
self.Refresh()
else:
self._aeroBmp = self._allAeroBmps[-1]
self.Refresh()
return
if self._aeroBmp != self._allAeroBmps[-1]:
self._aeroBmp = self._allAeroBmps[-1]
self.Refresh()
def HitTest(self, x, y):
"""
Checks if the mouse position is inside the target windows rect.
:param `x`: the `x` mouse position;
:param `y`: the `y` mouse position.
"""
if not self._useAero:
if self.targetLeft.GetScreenRect().Contains((x, y)):
return wx.LEFT
if self.targetTop.GetScreenRect().Contains((x, y)):
return wx.UP
if self.targetRight.GetScreenRect().Contains((x, y)):
return wx.RIGHT
if self.targetBottom.GetScreenRect().Contains((x, y)):
return wx.DOWN
if self.targetCenter.IsValid() and self.targetCenter.GetScreenRect().Contains((x, y)):
return wx.CENTER
else:
constants = [wx.LEFT, wx.UP, wx.RIGHT, wx.DOWN, wx.CENTER]
lenRects = len(self._aeroRects)
for indx, rect in enumerate(self._aeroRects):
if rect.Contains((x, y)):
if indx < lenRects or (indx == lenRects-1 and self._valid):
return constants[indx]
return -1
def ValidateNotebookDocking(self, valid):
"""
Sets whether a pane can be docked on top of another to create an automatic
L{AuiNotebook}.
:param `valid`: whether a pane can be docked on top to another to form an automatic
L{AuiNotebook}.
"""
if not self._useAero:
if self.targetCenter.IsValid() != valid:
self.targetCenter.SetValid(valid)
self.targetCenter.Refresh()
else:
if self._valid != valid:
self._valid = valid
self.Refresh()
def AeroMove(self, pos):
"""
Moves the docking guide window to the new position.
:param `pos`: the new docking guide position.
"""
if not self._useAero:
return
useWhidbey = (GetManager(self.GetParent()).GetFlags() & AUI_MGR_WHIDBEY_DOCKING_GUIDES) != 0
if useWhidbey:
sizeX, sizeY = whidbeySizeX, whidbeySizeY
else:
sizeX, sizeY = aeroguideSizeX, aeroguideSizeY
size = self.GetSize()
leftRect, topRect, rightRect, bottomRect, centerRect = self._aeroRects
thePos = pos + wx.Point((size.x-sizeY)/2, (size.y-sizeX)/2)
centerRect.SetPosition(thePos)
leftRect.SetPosition(thePos + wx.Point(-sizeY, 0))
topRect.SetPosition(thePos + wx.Point(0, -sizeY))
rightRect.SetPosition(thePos + wx.Point(sizeX, 0))
bottomRect.SetPosition(thePos + wx.Point(0, sizeX))
def OnEraseBackground(self, event):
"""
Handles the ``wx.EVT_ERASE_BACKGROUND`` event for L{AuiCenterDockingGuide}.
:param `event`: `wx.EraseEvent` to be processed.
:note: This is intentiobnally empty to reduce flickering while drawing.
"""
pass
def OnPaint(self, event):
"""
Handles the ``wx.EVT_PAINT`` event for L{AuiCenterDockingGuide}.
:param `event`: a `wx.PaintEvent` to be processed.
"""
dc = wx.AutoBufferedPaintDC(self)
if self._useAero:
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetPen(wx.TRANSPARENT_PEN)
else:
dc.SetBrush(wx.Brush(colourTargetBackground))
dc.SetPen(wx.Pen(colourTargetBorder))
rect = self.GetClientRect()
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
if self._useAero:
dc.DrawBitmap(self._aeroBmp, 0, 0, True)
if not self._valid:
diff = (self._useAero == 2 and [1] or [0])[0]
bmpX, bmpY = self._deniedBitmap.GetWidth(), self._deniedBitmap.GetHeight()
xPos, yPos = (rect.x + (rect.width)/2 - bmpX/2), (rect.y + (rect.height)/2 - bmpY/2)
dc.DrawBitmap(self._deniedBitmap, xPos+1, yPos+diff, True)
return
dc.SetPen(wx.Pen(colourTargetBorder, 2))
for pts in self._triangles:
dc.DrawLinePoint(pts[0], pts[1])
# ----------------------------------------------------------------------------
# AuiDockingHintWindow
# ----------------------------------------------------------------------------
class AuiDockingHintWindow(wx.Frame):
""" The original wxAUI docking window hint. """
def __init__(self, parent, id=wx.ID_ANY, title="", pos=wx.DefaultPosition,
size=wx.Size(1, 1), style=wx.FRAME_TOOL_WINDOW | wx.FRAME_FLOAT_ON_PARENT |
wx.FRAME_NO_TASKBAR | wx.NO_BORDER | wx.FRAME_SHAPED,
name="auiHintWindow"):
"""
Default class constructor. Used internally, do not call it in your code!
:param `parent`: the L{AuiDockingGuide} parent;
:param `id`: the window identifier. It may take a value of -1 to indicate a default value.
:param `title`: the caption to be displayed on the frame's title bar;
:param `pos`: the window position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform;
:param `size`: the window size. A value of (-1, -1) indicates a default size, chosen by
either the windowing system or wxPython, depending on platform;
:param `style`: the window style;
:param `name`: the name of the window. This parameter is used to associate a name with the
item, allowing the application user to set Motif resource values for individual windows.
"""
if wx.Platform == '__WXMAC__' and style & wx.FRAME_SHAPED:
# Having the shaped frame causes the frame to not be visible
# with the transparent style hints.
style -= wx.FRAME_SHAPED
wx.Frame.__init__(self, parent, id, title, pos, size, style, name=name)
self._blindMode = False
self.SetBackgroundColour(colourHintBackground)
# Can't set background colour on a frame on wxMac
# so add a panel to set the colour on.
if wx.Platform == '__WXMAC__':
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.panel = wx.Panel(self)
sizer.Add(self.panel, 1, wx.EXPAND)
self.SetSizer(sizer)
self.panel.SetBackgroundColour(colourHintBackground)
self.Bind(wx.EVT_SIZE, self.OnSize)
def MakeVenetianBlinds(self):
"""
Creates the "venetian blind" effect if L{AuiManager} has the ``AUI_MGR_VENETIAN_BLINDS_HINT``
flag set.
"""
amount = 128
size = self.GetClientSize()
region = wx.Region(0, 0, size.x, 1)
for y in xrange(size.y):
# Reverse the order of the bottom 4 bits
j = (y & 8 and [1] or [0])[0] | (y & 4 and [2] or [0])[0] | \
(y & 2 and [4] or [0])[0] | (y & 1 and [8] or [0])[0]
if 16*j+8 < amount:
region.Union(0, y, size.x, 1)
self.SetShape(region)
def SetBlindMode(self, flags):
"""
Sets whether venetian blinds or transparent hints will be shown as docking hint.
This depends on the L{AuiManager} flags.
:param `flags`: the L{AuiManager} flags.
"""
self._blindMode = (flags & AUI_MGR_VENETIAN_BLINDS_HINT) != 0
if self._blindMode or not self.CanSetTransparent():
self.MakeVenetianBlinds()
self.SetTransparent(255)
else:
self.SetShape(wx.Region())
if flags & AUI_MGR_HINT_FADE == 0:
self.SetTransparent(80)
else:
self.SetTransparent(0)
def SetShape(self, region):
"""
If the platform supports it, sets the shape of the window to that depicted by `region`.
The system will not display or respond to any mouse event for the pixels that lie
outside of the region. To reset the window to the normal rectangular shape simply call
L{SetShape} again with an empty region.
:param `region`: the shape of the frame.
:note: Overridden for wxMac.
"""
if wx.Platform == '__WXMAC__':
# HACK so we don't crash when SetShape is called
return
else:
super(AuiDockingHintWindow, self).SetShape(region)
def Show(self, show=True):
"""
Show the hint window.
:param `show`: whether to show or hide the frame.
"""
super(AuiDockingHintWindow, self).Show(show)
if wx.Platform == '__WXMAC__':
# Need to manually do layout since its a borderless frame.
self.Layout()
def OnSize(self, event):
"""
Handles the ``wx.EVT_SIZE`` event for L{AuiDockingHintWindow}.
:param `event`: a `wx.SizeEvent` to be processed.
"""
if self._blindMode or not self.CanSetTransparent():
self.MakeVenetianBlinds()
# ---------------------------------------------------------------------------- #
# -- AuiFloatingFrame class implementation --
class AuiFloatingFrame(wx.MiniFrame):
""" AuiFloatingFrame is the frame class that holds floating panes. """
def __init__(self, parent, owner_mgr, pane=None, id=wx.ID_ANY, title="",
style=wx.FRAME_TOOL_WINDOW | wx.FRAME_FLOAT_ON_PARENT |
wx.FRAME_NO_TASKBAR | wx.CLIP_CHILDREN):
"""
Default class constructor. Used internally, do not call it in your code!
:param `parent`: the L{AuiFloatingFrame} parent;
:param `owner_mgr`: the L{AuiManager} that manages the floating pane;
:param `pane`: the L{AuiPaneInfo} pane that is about to float;
:param `id`: the window identifier. It may take a value of -1 to indicate a default value.
:param `title`: the caption to be displayed on the frame's title bar.
:param `style`: the window style.
"""
if pane and pane.IsResizeable():
style += wx.RESIZE_BORDER
if pane:
self._is_toolbar = pane.IsToolbar()
self._useNativeMiniframes = False
if AuiManager_UseNativeMiniframes(owner_mgr):
# On wxMac we always use native miniframes
self._useNativeMiniframes = True
style += wx.CAPTION + wx.SYSTEM_MENU
if pane.HasCloseButton():
style += wx.CLOSE_BOX
if pane.HasMaximizeButton():
style += wx.MAXIMIZE_BOX
if pane.HasMinimizeButton():
style += wx.MINIMIZE_BOX
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 <NAME> <<EMAIL>>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
import re
import sys
import six
import pytest
import requests
import mock
from os.path import dirname
from os.path import join
from apprise import Apprise
from apprise import AppriseAsset
from apprise import AppriseAttachment
from apprise import NotifyBase
from apprise import NotifyType
from apprise import NotifyFormat
from apprise import NotifyImageSize
from apprise import __version__
from apprise import URLBase
from apprise import PrivacyMode
from apprise.plugins import SCHEMA_MAP
from apprise.plugins import __load_matrix
from apprise.plugins import __reset_matrix
from apprise.utils import parse_list
import inspect
# Disable logging for a cleaner testing output
import logging
logging.disable(logging.CRITICAL)
# Sending notifications requires the coroutines to be awaited, so we need to
# wrap the original function when mocking it. But don't import for Python 2.
if not six.PY2:
import apprise.py3compat.asyncio as py3aio
else:
class py3aio:
def notify():
pass
# Attachment Directory
TEST_VAR_DIR = join(dirname(__file__), 'var')
def test_apprise():
"""
API: Apprise() object
"""
def do_notify(server, *args, **kwargs):
return server.notify(*args, **kwargs)
apprise_test(do_notify)
@pytest.mark.skipif(sys.version_info.major <= 2, reason="Requires Python 3.x+")
def test_apprise_async():
"""
API: Apprise() object asynchronous methods
"""
def do_notify(server, *args, **kwargs):
return py3aio.tosync(server.async_notify(*args, **kwargs))
apprise_test(do_notify)
def apprise_test(do_notify):
# Caling load matix a second time which is an internal function causes it
# to skip over content already loaded into our matrix and thefore accesses
# other if/else parts of the code that aren't otherwise called
__load_matrix()
a = Apprise()
# no items
assert len(a) == 0
# Apprise object can also be directly tested with 'if' keyword
# No entries results in a False response
assert not a
# Create an Asset object
asset = AppriseAsset(theme='default')
# We can load the device using our asset
a = Apprise(asset=asset)
# We can load our servers up front as well
servers = [
'faast://abcdefghijklmnop-abcdefg',
'kodi://kodi.server.local',
]
a = Apprise(servers=servers)
# 2 servers loaded
assert len(a) == 2
# Apprise object can also be directly tested with 'if' keyword
# At least one entry results in a True response
assert a
# We can retrieve our URLs this way:
assert len(a.urls()) == 2
# We can add another server
assert a.add('mmosts://mattermost.server.local/'
'3ccdd113474722377935511fc85d3dd4') is True
assert len(a) == 3
# Try adding nothing but delimiters
assert a.add(',, ,, , , , ,') is False
# The number of servers added doesn't change
assert len(a) == 3
# We can pop an object off of our stack by it's indexed value:
obj = a.pop(0)
assert isinstance(obj, NotifyBase) is True
assert len(a) == 2
# We can retrieve elements from our list too by reference:
assert isinstance(a[0].url(), six.string_types) is True
# We can iterate over our list too:
count = 0
for o in a:
assert isinstance(o.url(), six.string_types) is True
count += 1
# verify that we did indeed iterate over each element
assert len(a) == count
# We can empty our set
a.clear()
assert len(a) == 0
# An invalid schema
assert a.add('this is not a parseable url at all') is False
assert len(a) == 0
# An unsupported schema
assert a.add(
'invalid://we.just.do.not.support.this.plugin.type') is False
assert len(a) == 0
# A poorly formatted URL
assert a.add('json://user:@@@:bad?no.good') is False
assert len(a) == 0
# Add a server with our asset we created earlier
assert a.add('mmosts://mattermost.server.local/'
'3ccdd113474722377935511fc85d3dd4', asset=asset) is True
# Clear our server listings again
a.clear()
# No servers to notify
assert do_notify(a, title="my title", body="my body") is False
class BadNotification(NotifyBase):
def __init__(self, **kwargs):
super(BadNotification, self).__init__(**kwargs)
# We fail whenever we're initialized
raise TypeError()
def url(self):
# Support URL
return ''
@staticmethod
def parse_url(url, *args, **kwargs):
# always parseable
return NotifyBase.parse_url(url, verify_host=False)
class GoodNotification(NotifyBase):
def __init__(self, **kwargs):
super(GoodNotification, self).__init__(
notify_format=NotifyFormat.HTML, **kwargs)
def url(self):
# Support URL
return ''
def send(self, **kwargs):
# Pretend everything is okay
return True
@staticmethod
def parse_url(url, *args, **kwargs):
# always parseable
return NotifyBase.parse_url(url, verify_host=False)
# Store our bad notification in our schema map
SCHEMA_MAP['bad'] = BadNotification
# Store our good notification in our schema map
SCHEMA_MAP['good'] = GoodNotification
# Just to explain what is happening here, we would have parsed the
# url properly but failed when we went to go and create an instance
# of it.
assert a.add('bad://localhost') is False
assert len(a) == 0
# We'll fail because we've got nothing to notify
assert do_notify(
a, title="my title", body="my body") is False
# Clear our server listings again
a.clear()
assert a.add('good://localhost') is True
assert len(a) == 1
# Bad Notification Type is still allowed as it is presumed the user
# know's what their doing
assert do_notify(
a, title="my title", body="my body", notify_type='bad') is True
# No Title/Body combo's
assert do_notify(a, title=None, body=None) is False
assert do_notify(a, title='', body=None) is False
assert do_notify(a, title=None, body='') is False
# As long as one is present, we're good
assert do_notify(a, title=None, body='present') is True
assert do_notify(a, title='present', body=None) is True
assert do_notify(a, title="present", body="present") is True
# Send Attachment with success
attach = join(TEST_VAR_DIR, 'apprise-test.gif')
assert do_notify(
a, body='body', title='test', notify_type=NotifyType.INFO,
attach=attach) is True
# Send the attachment as an AppriseAttachment object
assert do_notify(
a, body='body', title='test', notify_type=NotifyType.INFO,
attach=AppriseAttachment(attach)) is True
# test a invalid attachment
assert do_notify(
a, body='body', title='test', notify_type=NotifyType.INFO,
attach='invalid://') is False
# Repeat the same tests above...
# however do it by directly accessing the object; this grants the similar
# results:
assert do_notify(
a[0], body='body', title='test', notify_type=NotifyType.INFO,
attach=attach) is True
# Send the attachment as an AppriseAttachment object
assert do_notify(
a[0], body='body', title='test', notify_type=NotifyType.INFO,
attach=AppriseAttachment(attach)) is True
# test a invalid attachment
assert do_notify(
a[0], body='body', title='test', notify_type=NotifyType.INFO,
attach='invalid://') is False
class ThrowNotification(NotifyBase):
def notify(self, **kwargs):
# Pretend everything is okay
raise TypeError()
def url(self):
# Support URL
return ''
class RuntimeNotification(NotifyBase):
def notify(self, **kwargs):
# Pretend everything is okay
raise RuntimeError()
def url(self):
# Support URL
return ''
class FailNotification(NotifyBase):
def notify(self, **kwargs):
# Pretend everything is okay
return False
def url(self):
# Support URL
return ''
# Store our bad notification in our schema map
SCHEMA_MAP['throw'] = ThrowNotification
# Store our good notification in our schema map
SCHEMA_MAP['fail'] = FailNotification
# Store our good notification in our schema map
SCHEMA_MAP['runtime'] = RuntimeNotification
for async_mode in (True, False):
# Create an Asset object
asset = AppriseAsset(theme='default', async_mode=async_mode)
# We can load the device using our asset
a = Apprise(asset=asset)
assert a.add('runtime://localhost') is True
assert a.add('throw://localhost') is True
assert a.add('fail://localhost') is True
assert len(a) == 3
# Test when our notify both throws an exception and or just
# simply returns False
assert do_notify(a, title="present", body="present") is False
# Create a Notification that throws an unexected exception
class ThrowInstantiateNotification(NotifyBase):
def __init__(self, **kwargs):
# Pretend everything is okay
raise TypeError()
def url(self):
# Support URL
return ''
SCHEMA_MAP['throw'] = ThrowInstantiateNotification
# Reset our object
a.clear()
assert len(a) == 0
# Test our socket details
# rto = Socket Read Timeout
# cto = Socket Connect Timeout
plugin = a.instantiate('good://localhost?rto=5.1&cto=10')
assert isinstance(plugin, NotifyBase)
assert plugin.socket_connect_timeout == 10.0
assert plugin.socket_read_timeout == 5.1
plugin = a.instantiate('good://localhost?rto=invalid&cto=invalid')
assert isinstance(plugin, NotifyBase)
assert | |
from __future__ import division
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import cPickle as pickle
import numpy as np
import os, sys
try:
from scipy.stats import linregress
except ImportError:
print "Didn't load linregress from scipy.stats"
class Collecting_clashscore_data(object):
def __init__(self):
"""
Collect and analyse clashscore test data
good test result should have only one line
x1::x2::x3::x4::x5
Where:
x1 (0): pdb_file_name
x2 (1): macro_molecule_cctbx_clashscore
x3 (2): symmetry_cctbx_clashscore
x4 (3): total_cctbx_clashscore
x5 (4): probe_clashscore
if test results contains only -1: PDB file was not found
if test results contains only -2: there where issues getting the
clashscore and proper structure factor file was not available
if test results start with "Sorry" set values to -3
"""
self.current_dir = os.getcwd()
self.working_path = ''
self.data_file_name = 'test_data'
self.clean_data_file_name = 'test_clean_data'
self.data_dict_file_name = 'test_data_dict'
self.files_with_problem_file_name = 'files_with_issues.txt'
self.data = []
self.clean_data = []
self.pdb_id_with_issues = []
self.data_dict = {}
self.queue_data_path = "/net/cci/youval/work/work/Clashes/queue_clash_compare"
# containers for results
self.data_sym_clashes = {}
self.data_outliers = {} # when cctbx very different than probe
#
self.structure_where_pdb_not_found = []
self.structure_with_error_when_processing_phenix_clashscore = []
self.other_issues_with_results = []
def set_working_path(self,all_data=True):
"""
Set working and data folders
When all_data=False use the data for the test on macro molecule (not
the complete model)
C:\Phenix\Dev\Work\work\Clashes\CCTBX_PROBE_compare
"""
r1 = r'C:\Users\Youval\Google Drive\Documents\LBNL\phenix\publications'
r1 += r'\news letter\clash score\related code'
data_1 = r1 + r'\pdb_scan_result_macro_molecule_files'
data_2 = r1 + r'\pdb_scan_result_complete_files'
osType = sys.platform
if osType.startswith('win'):
assert os.path.isdir(data_1)
assert os.path.isdir(data_2)
if all_data:
self.working_path = data_2
print data_2
else:
self.working_path = data_1
print data_1
else:
path = '/net/cci/youval/work/work/Clashes'
self.working_path = path
# convert the path to python format
self.working_path = os.path.realpath(self.working_path)
os.chdir(self.working_path)
def change_to_original_path(self):
""" change current directory to original one """
os.chdir(self.current_dir)
def get_test_data(self):
"""
If test_data.txt and test_data_dict exit, use them, otherwise create them
"data_dict" is a dictionary for the "clean_data"
"data" contains all data collected
"""
have_data = os.path.isfile(self.data_file_name)
have_data &= os.path.isfile(self.data_dict_file_name)
if have_data:
print 'using existing data files'
self.data = pickle.load(open(self.data_file_name,'r'))
self.clean_data = pickle.load(open(self.clean_data_file_name,'r'))
self.clean_data = [x for x in self.clean_data if x[1] >= 0 ]
self.data_dict = pickle.load(open(self.data_dict_file_name,'r'))
self.pdb_id_with_issues = open(
self.files_with_problem_file_name,'r').read().splitlines()
print "Number of good file with issues: ",len(self.pdb_id_with_issues)
print "Total number files processed: ",len(self.data)
else:
print 'getting new data from {}'.format(self.queue_data_path)
# check if data folder exist
if os.path.isdir(self.queue_data_path):
# Read files in directory_path
files = os.listdir(self.queue_data_path)
# collect only the files that starts with log_
files = [x for x in files if x.startswith('log_')]
print "Number of log files: ",len(files)
for fn in files:
d = open(os.path.join(self.queue_data_path, fn), "r").readlines()
if d:
raw_data = d[0].split('::')
else:
raw_data = []
if (len(d)==1) and (len(raw_data) == 5):
pdb_id = raw_data[0]
data = [round(float(x),1) for x in raw_data[1:]]
data = [pdb_id] + data
else:
# Some issue with results
pdb_id = fn[-4:]
data = [pdb_id,-3,-3,-3,-3]
self.data.append(data)
# clean data, collect good data
print 'Total number data records: {}'.format(len(self.data))
for d in self.data:
pdb_id = d[0]
if d[1] >= 0:
self.clean_data.append(d)
self.data_dict[pdb_id] = d
else:
self.pdb_id_with_issues.append(pdb_id)
print "Number of good records: ",len(self.clean_data)
pickle.dump(self.data, open(self.data_file_name,'w'))
pickle.dump(self.clean_data, open(self.clean_data_file_name,'w'))
pickle.dump(self.data_dict, open(self.data_dict_file_name,'w'))
pdb_id_with_issues = '\n'.join(self.pdb_id_with_issues)
open(self.files_with_problem_file_name,'w').write(pdb_id_with_issues)
print 'Number of good data points: {}'.format(len(self.clean_data))
for d in self.data:
pdb_id = d[0]
if d[1] == -1:
self.structure_where_pdb_not_found.append(pdb_id)
elif d[1] == -2:
self.structure_with_error_when_processing_phenix_clashscore.append(pdb_id)
elif d[1] == -3:
self.other_issues_with_results.append(pdb_id)
print "structure_where_pdb_not_found: ",len(self.structure_where_pdb_not_found)
print "structure_with_error_when_processing_phenix_clashscore: ",\
len(self.structure_with_error_when_processing_phenix_clashscore)
n_to_print = min(10,len(self.structure_with_error_when_processing_phenix_clashscore))
print self.structure_with_error_when_processing_phenix_clashscore[:n_to_print]
print "other_issues_with_results: ",len(self.other_issues_with_results)
print self.other_issues_with_results
print '-'*50
def plot_reference(self,ignore_delta=100):
"""
Compare CCBTX macro molecule non-bonded clashscore to PROBE clashscore
Args:
ignore_delta (float): ignore outlier points where
abs(cctbx_score - probe_score) > ignore_delta
"""
if self.clean_data:
# figure limits
max_x = 100
max_y = 100
fontsize = 20
# get data
cctbx_prob = [(x[4],x[1]) for x in self.clean_data
if (abs(x[1]-x[4])<ignore_delta)]
outliers = [x[0] for x in self.clean_data
if (abs(x[1]-x[4])>20) and (x[4] < 20)]
n_ignored = len(self.clean_data)-len(cctbx_prob)
n_all = len(self.clean_data)
print '(macro. mol.) Number of data points ignored: ',n_ignored
print outliers
cctbx_prob.sort()
# cctbx
cctbx_score = [x[1] for x in cctbx_prob]
probe_score = [x[0] for x in cctbx_prob]
# Get linear fitting parameters
x_fit = [0,max_x]
# m,b = plb.polyfit(cctbx_score, probe_score, 1)
m,b ,r_value,_,_ = linregress(cctbx_score, probe_score)
print 'r-value: ',r_value
y_fit = [b,m * max_x + b]
# gr = 1.61803398875 # Golden ratio
gr = 1
h = 12 # figure height
w = gr*h # figure width
fig = plt.figure(figsize=(w,h))
plt.plot(cctbx_score,probe_score,'.b',x_fit,y_fit,'y',linewidth=2)
plt.xticks(fontsize=fontsize - 2)
plt.yticks(fontsize=fontsize - 2)
# plt.title(
# 'CCBTX macro molecule vs PROBE non-bonded clashscore',
# fontsize=fontsize)
plt.ylabel('PROBE clashscore',fontsize=fontsize)
plt.xlabel('Non-bonded overlaps per 1000 atoms',fontsize=fontsize)
ignore_str = 'Ignore {} of {} outlier points where\n'
ignore_str += 'abs(macro_mol cctbx_score - probe_score) >= {}'
ignore_str = ignore_str.format(n_ignored,n_all,ignore_delta)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
# plt.text(2,max_y - 2,ignore_str,
# fontsize=fontsize,verticalalignment='top',bbox=props)
plt.xlim([0,max_x])
plt.ylim([0,max_y])
plt.show()
fig.savefig('cctbx_macro_mol_vs_probe.png')
print 'Linear fit info: PROBE = {} * CCBTX + {}'.format(m,b)
print ignore_str
print '-'*50
else:
print 'Load data before attempting to plot'
def plot_total(self,ignore_delta=100):
"""
Compare CCBTX total_cctbx_clashscore to PROBE clashscore
Args:
ignore_delta (float): ignore outlier points where
abs(cctbx_score - probe_score) > ignore_delta
"""
if self.clean_data:
# figure limits
max_x = 100
max_y = 100
fontsize = 20
# get data
assert len(self.clean_data[0]) == 5
cctbx_prob = [(x[4],x[3]) for x in self.clean_data
if abs(x[4]-x[3])<ignore_delta]
n_ignored = len(self.clean_data)-len(cctbx_prob)
n_all = len(self.clean_data)
print 'Number of data points ignored: ',n_ignored
cctbx_prob.sort()
# cctbx
cctbx_score = [x[1] for x in cctbx_prob]
probe_score = [x[0] for x in cctbx_prob]
# Get linear fitting parameters
x_fit = [0,max_x]
# m,b = plb.polyfit(cctbx_score, probe_score, 1)
m,b ,r_value,_,_ = linregress(cctbx_score, probe_score)
print 'r-value: ',r_value
y_fit = [b,m * max_x + b]
#gr = 1.61803398875 # Golden ratio
gr = 1
h = 12 # figure height
w = gr*h # figure width
fig = plt.figure(figsize=(w,h))
plt.plot(cctbx_score,probe_score,'.b',x_fit,y_fit,'y',linewidth=2)
plt.xticks(fontsize=fontsize - 2)
plt.yticks(fontsize=fontsize - 2)
# plt.title(
# 'CCBTX total vs PROBE non-bonded clashscore',fontsize=fontsize)
plt.ylabel('PROBE clashscore',fontsize=fontsize)
plt.xlabel( 'Non-bonded overlaps per 1000 atoms', fontsize=fontsize)
ignore_str = 'Ignore {} of {} outlier points where\n'
ignore_str += 'abs(clashscore difference) >= {}'
ignore_str = ignore_str.format(n_ignored,n_all,ignore_delta)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
# plt.text(2,max_y - 2,ignore_str,
# fontsize=fontsize,verticalalignment='top',bbox=props)
plt.xlim([0,max_x])
plt.ylim([0,max_y])
plt.show()
fig.savefig('cctbx_total_vs_probe.png')
print 'Linear fit all data, info: PROBE = {} * CCBTX + {}'.format(m,b)
print ignore_str
print '-'*50
else:
print 'Load data before attempting to plot'
def plot_sym(self,ignore_delta=100):
"""
Plot plot of CCBTX all clashscore and symmetry clashscore vs. PROBE
clashscore
Args:
ignore_delta (float): ignore outlier points where
abs(cctbx_score - probe_score) > ignore_delta
"""
if self.clean_data:
# figure limits
max_x = 100
max_y = 100
fontsize = 20
# get data
assert len(self.clean_data[0]) == 5
# x[4] : probe
# x[3] : cctbx_total_vs_probe
# x[2] : symmetry_cctbx_clashscore
cctbx_prob = [(x[4],x[3],x[2]) for x in self.clean_data
if abs(x[4]-x[3])<ignore_delta]
n_ignored = len(self.clean_data)-len(cctbx_prob)
n_all = len(self.clean_data)
print 'Number of data points ignored: ',n_ignored
cctbx_prob.sort()
# cctbx
cctbx_score = [x[1] for x in cctbx_prob]
cctbx_sym = [x[2] for x in cctbx_prob]
probe_score = [x[0] for x in cctbx_prob]
# Get linear fitting parameters
x_fit = [0,max_x]
# m,b = plb.polyfit(cctbx_score, probe_score, 1)
m,b ,r_value,_,_ = linregress(cctbx_score, probe_score)
print 'r-value: ',r_value
y_fit = [b,m * max_x + b]
print 'Linear fit info: PROBE = {} * CCBTX + {}'.format(m,b)
print '-'*50
# create plot
# gr = 1.61803398875 # Golden ratio
plt.close('all')
gr = 1
h = 4 # figure height
w = gr*2*h # figure width
# setup subplots
fig = plt.figure(figsize=(8.3,8.2))
# gs = gridspec.GridSpec(2,1,width_ratios=[1,1],height_ratios=[2,1])
gs = gridspec.GridSpec(2,1,height_ratios=[2,1])
# gs.update(left=0.05, right=0.48, wspace=0.05)
ax1 = plt.subplot(gs[0,0])
ax2 = plt.subplot(gs[1,0])
ax1.plot(cctbx_score,probe_score,'.b',x_fit,y_fit,'y',linewidth=2)
ax1.tick_params(axis='both',labelsize=fontsize)
ax1.ticklabel_format(axis='both',labelsize=fontsize)
# ax1.set_title(
# 'Clashscores and symmetry related clashes',
# fontsize=fontsize)
ax1.set_ylabel('PROBE clashscore',fontsize=fontsize)
ax1.set_xlabel('Non-bonded overlaps per 1000 atoms',fontsize=fontsize)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
text_params = {'fontsize':fontsize,'verticalalignment':'top','bbox':props}
ax1.text(2,max_y - 2,'a',**text_params)
ax1.set_xlim([0,max_x])
ax1.set_ylim([0,max_y])
# second plot
n_files = np.arange(0,len(cctbx_sym))
ax2.plot(cctbx_sym,n_files,'.g')
ax2.text(2,max_y - 2,'b',**text_params)
ax2.set_ylabel('Files',fontsize=fontsize)
ax2.set_xlabel('CCTBX symmetry clashscore',fontsize=fontsize)
ax2.tick_params(axis='both',labelsize=fontsize)
ax2.set_ylim([0,len(cctbx_sym)])
ax2.set_xlim([0,max_y])
ax2.set_yticks((5000,20000))
ax2.set_yticklabels(('5k','20k'))
plt.show()
# fig.savefig('cctbx_simple_vs_probe.png')
else:
print 'Load data before attempting to plot'
def hist_sym(self,prob_limit=1000):
"""
CCTBX symmetry clashscore histogram
- Considering clashes where PROBE clashscore is <= prob_limit
- All symmetry clashscores larger than 9 will | |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# TODO: import only necessary tensorflow functions
import tensorflow as tf
import tensorflow_datasets as tfds
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay,\
roc_curve, roc_auc_score, classification_report, accuracy_score, precision_score, recall_score
# TODO: Add docstrings
# Loads the Patch Camelyon dataset
def load_pcam(data_dir=None):
pcam, pcam_info = tfds.load("patch_camelyon", with_info=True, data_dir=data_dir)
print(pcam_info)
return pcam, pcam_info
# Converts images to prepare them for modelling
def convert_sample(sample):
# Credit: <NAME>
image, label = sample['image'], sample['label']
image = tf.image.convert_image_dtype(image, tf.float32)
label = tf.one_hot(label, 2, dtype=tf.float32)
return image, label
# Alternative to convert_sample which also converts images to grayscale
def convert_sample_grayscale(sample):
image, label = sample['image'], sample['label']
image = tf.image.rgb_to_grayscale(image, name=None)
image = tf.image.convert_image_dtype(image, tf.float32)
label = tf.one_hot(label, 2, dtype=tf.float32)
return image, label
# Substitute for ImageDataGenerator which gets along with the TensorFlow Dataset object
def build_pipelines(pcam, grayscale=False):
# Uses the grayscale version of convert_sample
if grayscale:
train_pipeline = pcam['train'].map(convert_sample_grayscale, num_parallel_calls=8).shuffle(1024).repeat().batch(64).prefetch(2)
valid_pipeline = pcam['validation'].map(convert_sample_grayscale, num_parallel_calls=8).repeat().batch(128).prefetch(2)
test_pipeline = pcam['test'].map(convert_sample_grayscale, num_parallel_calls=8).batch(128).prefetch(2)
# Uses the normal version of convert_sample
else:
# Credit: <NAME>
train_pipeline = pcam['train'].map(convert_sample, num_parallel_calls=8).shuffle(1024).repeat().batch(64).prefetch(2)
valid_pipeline = pcam['validation'].map(convert_sample, num_parallel_calls=8).repeat().batch(128).prefetch(2)
test_pipeline = pcam['test'].map(convert_sample, num_parallel_calls=8).batch(128).prefetch(2)
return train_pipeline, valid_pipeline, test_pipeline
# Export the training history to a .csv file
def save_history(hist_df, filepath):
# Sample filepath: 'data/models/history/cnn1_history.csv'
hist_csv_file = filepath
with open(hist_csv_file, mode='w') as f:
hist_df.to_csv(f)
# Loads model training history .csv into a pandas dataframe
def load_history(filepath):
# Sample filepath: 'data/models/history/cnn1_history.csv'
hist_df = pd.read_csv(filepath, index_col=0)
return hist_df
# Plot the training accuracy and loss from training history
def plot_history(hist_df, figsize=(10,4), title=None, save=False, filepath=None):
# Create subplots
plt.subplots(1, 2, figsize=figsize)
# Creates a title for the whole plot
plt.suptitle(title, fontsize=24)
# Plot accuracies for train and validation sets
plt.subplot(1, 2, 1)
plt.plot(hist_df['accuracy'], label='Train', marker='o')
plt.plot(hist_df['val_accuracy'], label='Validation', marker='o')
plt.title('Training and Validation Accuracy', size=20)
plt.xlabel('Epoch', size=16)
plt.ylabel('Accuracy', size=16)
plt.legend()
# Plot losses
plt.subplot(1, 2, 2)
plt.plot(hist_df['loss'], label='Train', marker='o')
plt.plot(hist_df['val_loss'], label='Validation', marker='o')
plt.title('Training and Validation Loss', size=20)
plt.xlabel('Epoch', size=16)
plt.ylabel('Loss', size=16)
plt.legend()
# This ensures the subplots do not overlap
plt.tight_layout()
if save:
# Sample filepath: 'data/plots/cnn1_acc_loss_plot.png'
plt.savefig(filepath)
# Show the subplots
plt.show()
# Plot the confusion matrix for a model
def plot_cf_matrix(y_true, y_pred, normalize=True, save=False, filepath=None):
cf_matrix = confusion_matrix(y_true, y_pred)
# Turns the values in the confusion matrix into percentages
if normalize:
cf_matrix = cf_matrix / cf_matrix.sum(axis=1)
ConfusionMatrixDisplay(cf_matrix, display_labels=['Healthy (0)', 'Cancer (1)']).plot()
if save:
# Sample filepath: 'data/plots/cnn1_cf_matrix.png'
plt.savefig(filepath)
plt.show()
# Plot the ROC curve and calculate AUC
def plot_roc_curve(y_true, y_proba, save=False, filepath=None):
if y_proba.shape[1] == 2:
# y_proba is still one-hot encoded, so grab only the class 1 probabilities
y_proba = np.array([i[1] for i in y_proba])
fprs, tprs, thresholds = roc_curve(y_true, y_proba)
roc_auc = roc_auc_score(y_true, y_proba)
plt.figure(figsize=(8, 6))
plt.plot(fprs, tprs, color='darkorange',
lw=2, label='AUC = %0.2f' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlabel('False Positive Rate (FPR)', size=16)
plt.ylabel('True Positive Rate (TPR)', size=16)
plt.title('ROC Curve for Cancer Detection', size=20)
plt.legend(loc="best")
if save:
# Sample filepath: 'data/plots/cnn1_roc.png'
plt.savefig(filepath)
plt.show()
print(f'Area under curve (AUC):{roc_auc}')
# Create a list of ground truth labels from a specified data split
def generate_y_true(pcam, split='test'):
# Initialize iterator so it starts from the beginning
iterator = pcam[split].__iter__()
# Create an empty list to store the labels
y_true = []
if split == 'train':
# There are 262144 images in the training set
for i in range(262144):
y_true.append(int(iterator.get_next()['label']))
else:
# There are 32768 images in the validation and test sets
for i in range(32768):
y_true.append(int(iterator.get_next()['label']))
return np.array(y_true)
# Get predictions as probabilities from a trained model
def generate_y_proba(model, test_pipeline, class_1=False, save=False, filepath=None):
y_proba = model.predict(test_pipeline)
if class_1:
# Return just the class_1 predictions rather than one-hot encoded predictions
y_proba = np.array([i[1] for i in y_proba])
# Save y_proba to a .csv file to load later without training the model
if save:
y_proba_df = pd.DataFrame(y_proba)
# Sample filepath: 'data/models/cnn1_y_proba.csv'
y_proba_csv_file = filepath
with open(y_proba_csv_file, mode='w') as f:
y_proba_df.to_csv(f)
return y_proba
# Load y_proba from a .csv file
def load_y_proba(filepath):
# Sample filepath: 'data/models/cnn1_y_proba.csv'
y_proba = pd.read_csv(filepath, index_col=0).to_numpy()
return y_proba
# Get predictions based on y_proba with the ability to change the decision threshold
def generate_y_pred(y_proba, threshold=0.5):
if y_proba.shape[1] == 2:
# y_proba is still one-hot encoded, so grab only the class 1 probabilities
y_proba = np.array([i[1] for i in y_proba])
# Predict the positive class when the probability exceeds the given threshold
y_pred = np.where(y_proba >= threshold, 1, 0)
return y_pred
# Print test set accuracy score
def print_test_accuracy(y_true, y_pred):
print(accuracy_score(y_true, y_pred))
# Print the percentage the pathologist's workload has been reduced by pre-screening healthy images
def print_workload_reduction(y_pred):
size_of_test_set = 32768
# Cancerous images are class 1 predictions
cancer_images = np.count_nonzero(y_pred)
# Healthy images are class 0 predictions and are discarded
healthy_images = size_of_test_set - cancer_images
# Workload reduction is the percent of predicted healthy images expressed as a percentage of the test set
workload_reduction = round((100*healthy_images / size_of_test_set), 1)
print(f'{workload_reduction}%')
# Print the classification report to get precision, accuracy, and f1 score to 4 decimal places
def print_classification_report(y_true, y_pred):
print(classification_report(y_true, y_pred, digits=4))
# Plot a 3x3 grid of sample images from a given data split, with the option for grayscale and saving the figure
def plot_examples(pcam, split='train', grayscale=False, save=False, filepath=None):
iterator = pcam[split].__iter__()
fig, ax = plt.subplots(3, 3, figsize=(10,10))
# Plot title
plt.suptitle(split + ' set samples', size=20)
for i in range(9):
ax = plt.subplot(3, 3, i+1)
# Get the next image from the iterator
sample_image = iterator.get_next()
# Extract the image and its label
image = sample_image['image']
label = int(sample_image['label'])
# Convert the image to grayscale if specified
if grayscale:
image = tf.image.rgb_to_grayscale(image)
print(image.shape)
# Need to change the colormap of matplotlib to 'Greys_r' or else the images look yellow/green when plotted
ax.imshow(image, cmap='Greys_r')
else:
ax.imshow(image)
plt.title('Class Label: '+ str(label), size=16)
# Create a green rectangle patch to highlight the central 32 x 32 pixel region
# I couldn't find documentation for how the linewidth is extended, it's possible I've covered a couple pixels of the central region
rect = patches.Rectangle((31, 31), 32, 32, linewidth=3, edgecolor='g', facecolor='none')
# Add the patch to the axes
ax.add_patch(rect)
# Need to specify values for rect=[left, bottom, right, top] to ensure suptitle isn't overlapping the images
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
if save:
# Sample filepath: 'data/plots/example_images.png'
plt.savefig(filepath)
plt.show()
# Plot a 3x3 grid of images misclassified by the model, with the option to view different samples of images
def plot_misclassified_images(pcam, y_true, y_pred, grayscale=False, image_index=0, save=False, filepath=None):
# Create an iterator object to iterate through images in the test set
test_iterator = pcam['test'].__iter__()
images_plotted = 0
# If image_index is set to a value, iterate through the images to start from the specified index
# i.e. if image_index = 10, we iterate through the first 9 images here so when we load the next image inside the loop, we will load the 10th image
for i in range(image_index):
next_image = test_iterator.get_next()
fig, ax = plt.subplots(3, 3, figsize=(10,10))
# Title for the entire plot
plt.suptitle('Misclassified Images from the Test Set', size=20)
while True:
next_image = test_iterator.get_next()
image = next_image['image']
label = int(next_image['label'])
# If the image was misclassified
if y_true[image_index] != y_pred[image_index]:
ax = plt.subplot(3, 3, images_plotted+1)
if grayscale:
image = tf.image.rgb_to_grayscale(image)
# Need to change the colormap of matplotlib to 'Greys_r' or else the images look yellow/green when plotted
ax.imshow(image, cmap='Greys_r')
else:
ax.imshow(image)
# Title format for image #1 which was predicted class 1 but is really class 0:
# Image 1
# Predicted Label: 1 (0)
title = f'Image {str(image_index)}\nPredicted Label: {str(y_pred[image_index])} ({str(label)})'
plt.title(title, size=16)
# Create a green rectangle patch to highlight the central 32 x 32 pixel region
# I couldn't find documentation for how the linewidth is extended, it's possible I've covered a couple pixels of the central region
rect = patches.Rectangle((31, 31), 32, 32, linewidth=3, edgecolor='g', facecolor='none')
# Add the patch to the axes
ax.add_patch(rect)
images_plotted += 1
# Stop the loop after | |
""" Wrapper function for performing CCI analysis, varrying the analysis based on
the inputted data / state of the anndata object.
"""
import os
import numba
import numpy as np
import pandas as pd
from typing import Union
from anndata import AnnData
from .base import calc_neighbours, get_lrs_scores, calc_distance
from .permutation import perform_spot_testing
from .go import run_GO
from .het import (
count,
get_data_for_counting,
get_interaction_matrix,
get_interaction_pvals,
)
from statsmodels.stats.multitest import multipletests
################################################################################
# Functions related to Ligand-Receptor interactions #
################################################################################
def load_lrs(names: Union[str, list, None] = None, species: str = "human") -> np.array:
"""Loads inputted LR database, & concatenates into consistent database set of pairs without duplicates. If None loads 'connectomeDB2020_lit'.
Parameters
----------
names: list Databases to load, options: 'connectomeDB2020_lit' (literature verified), 'connectomeDB2020_put' (putative). If more than one specified, loads all & removes duplicates.
species: str Format of the LR genes, either 'human' or 'mouse'.
Returns
-------
lrs: np.array lr pairs from the database in format ['L1_R1', 'LN_RN']
"""
if type(names) == type(None):
names = ["connectomeDB2020_lit"]
if type(names) == str:
names = [names]
path = os.path.dirname(os.path.realpath(__file__))
dbs = [pd.read_csv(f"{path}/databases/{name}.txt", sep="\t") for name in names]
lrs_full = []
for db in dbs:
lrs = [f"{db.values[i,0]}_{db.values[i,1]}" for i in range(db.shape[0])]
lrs_full.extend(lrs)
lrs_full = np.unique(lrs_full)
# If dealing with mouse, need to reformat #
if species == "mouse":
genes1 = [lr_.split("_")[0] for lr_ in lrs_full]
genes2 = [lr_.split("_")[1] for lr_ in lrs_full]
lrs_full = np.array(
[
genes1[i][0]
+ genes1[i][1:].lower()
+ "_"
+ genes2[i][0]
+ genes2[i][1:].lower()
for i in range(len(lrs_full))
]
)
return lrs_full
def grid(adata, n_row: int = 10, n_col: int = 10, use_label: str = None):
"""Creates a new anndata representing a gridded version of the data; can be
used upstream of CCI pipeline. NOTE: intended use is for single cell
spatial data, not Visium or other lower resolution tech.
Parameters
----------
adata: AnnData
The data object.
n_row: int
The number of rows in the grid.
n_col: int
The number of columns in the grid.
use_label: str
The cell type labels in adata.obs to join together & save as deconvolution data.
Returns
-------
grid_data: AnnData
Equivalent expression data to adata, except values have been summed by
cells that fall within defined bins.
"""
# Retrieving the coordinates of each grid #
n_squares = n_row * n_col
cell_bcs = adata.obs_names.values
xs, ys = adata.obs["imagecol"].values, adata.obs["imagerow"].values
grid_counts, xedges, yedges = np.histogram2d(xs, ys, bins=[n_col, n_row])
grid_expr = np.zeros((n_squares, adata.shape[1]))
grid_coords = np.zeros((n_squares, 2))
grid_bcs = []
grid_cell_counts = []
gridded_cells = []
cell_grid = []
# If use_label specified, then will generate deconvolution information
if type(use_label) != type(None):
cell_labels = adata.obs[use_label].values.astype(str)
cell_set = np.unique(cell_labels)
cell_info = np.zeros((n_squares, len(cell_set)))
# generate grids from top to bottom and left to right
n = 0
for i in range(n_col):
x_left, x_right = xedges[i], xedges[i + 1]
for j in range(n_row):
y_down, y_up = yedges[j], yedges[j + 1]
grid_coords[n, :] = [(x_right + x_left) / 2, (y_up + y_down) / 2]
# Now determining the cells within the gridded area #
if i != n_col - 1 and j == n_row - 1: # top left corner
x_true = (xs >= x_left) & (xs < x_right)
y_true = (ys <= y_up) & (ys > y_down)
elif i == n_col - 1 and j != n_row - 1: # bottom righ corner
x_true = (xs > x_left) & (xs <= x_right)
y_true = (ys < y_up) & (ys >= y_down)
else: # average case
x_true = (xs >= x_left) & (xs < x_right)
y_true = (ys < y_up) & (ys >= y_down)
grid_cells = cell_bcs[x_true & y_true]
grid_cells_str = ",".join(grid_cells)
grid_bcs.append(grid_cells_str)
grid_cell_counts.append(len(grid_cells))
gridded_cells.extend(grid_cells)
cell_grid.extend([f"grid_{n}"] * len(grid_cells))
# Summing the expression across these cells to get the grid expression #
if len(grid_cells) > 0:
cell_bool = [cell in grid_cells for cell in cell_bcs]
grid_expr[n, :] = adata.X[cell_bool, :].sum(axis=0)
# If we have cell type information, will record #
if type(use_label) != type(None) and len(grid_cells) > 0:
grid_cell_types = cell_labels[cell_bool]
cell_info[n, :] = [
len(np.where(grid_cell_types == ct)[0]) / len(grid_cell_types)
for ct in cell_set
]
n += 1
# Creating gridded anndata #
grid_expr = pd.DataFrame(
grid_expr,
index=[f"grid_{i}" for i in range(n_squares)],
columns=adata.var_names.values.astype(str),
)
grid_data = AnnData(grid_expr)
grid_data.obs["imagecol"] = grid_coords[:, 0]
grid_data.obs["imagerow"] = grid_coords[:, 1]
grid_data.obs["n_cells"] = grid_cell_counts
grid_data.obsm["spatial"] = grid_coords
grid_data.uns["spatial"] = adata.uns["spatial"]
if type(use_label) != type(None):
grid_data.uns[use_label] = pd.DataFrame(
cell_info, index=grid_data.obs_names.values.astype(str), columns=cell_set
)
max_indices = np.apply_along_axis(np.argmax, 1, cell_info)
cell_set = np.unique(grid_data.uns[use_label].columns.values)
grid_data.obs[use_label] = [cell_set[index] for index in max_indices]
grid_data.obs[use_label] = grid_data.obs[use_label].astype("category")
# Subsetting to only gridded spots that contain cells #
grid_data = grid_data[grid_data.obs["n_cells"] > 0, :].copy()
if type(use_label) != type(None):
grid_data.uns[use_label] = grid_data.uns[use_label].loc[grid_data.obs_names, :]
grid_data.uns["grid_counts"] = grid_counts
grid_data.uns["grid_xedges"] = xedges
grid_data.uns["grid_yedges"] = yedges
return grid_data
def run(
adata: AnnData,
lrs: np.array,
min_spots: int = 10,
distance: int = None,
n_pairs: int = 1000,
n_cpus: int = None,
use_label: str = None,
adj_method: str = "fdr_bh",
pval_adj_cutoff: float = 0.05,
min_expr: float = 0,
save_bg: bool = False,
neg_binom: bool = False,
verbose: bool = True,
):
"""Performs stLearn LR analysis.
Parameters
-----------
adata: AnnData
The data object.
lrs: np.array
The LR pairs to score/test for enrichment (in format 'L1_R1').
min_spots: int
Minimum number of spots with an LR score for an LR to be considered for
further testing.
distance: int
Distance to determine the neighbours (default [None] is immediately
adjacent neighbours if using Visium), distance=0 means within spot
(only for non-single-cell spatial data).
n_pairs: int
Number of random pairs of genes to generate when creating the background
distribution per LR pair; higher than more accurate p-value estimation.
n_cpus: int
The number of cpus to use for multi-threading.
use_label: str
The cell type deconvolution results to use in counting stored in
adata.uns; if not specified only considered LR expression without cell
heterogeneity.
adj_method: str
Parsed to statsmodels.stats.multitest.multipletests for multiple
hypothesis testing correction; see there for other options.
pval_adj_cutoff: float
P-value below which LR is considered significant in spot neighbourhood.
min_expr: float
Minimum gene expression of either L or R for spot to be considered to
expression of either.
save_bg: bool
Whether to save the background per LR pair; for method development only.
Not recommended since huge memory.
neg_binom: bool
Whether to fit a negative binomial distribution for all background
scores generated across spots per LR after discretising the random
scores. Can be extremely slow.
verbose: bool
Whether print dialogue to user during run-time.
Returns
--------
adata: AnnData
Relevant information stored:
adata.uns['lr_summary']
Summary of significant spots detected per LR,
the LRs listed in the index is the same order of LRs in the columns of
results stored in adata.obsm below. Hence the order of this must be maintained.
adata.obsm
Additional keys are added; 'lr_scores', 'lr_sig_scores', 'p_vals',
'p_adjs', '-log10(p_adjs)'. All are numpy matrices, with columns
referring to the LRs listed in adata.uns['lr_summary']. 'lr_scores'
is the raw scores, while 'lr_sig_scores' is the same except only for
significant scores; non-significant scores are set to zero.
adata.obsm['het']
Only if use_label specified; contains the counts of the cell types found per spot.
"""
# Setting threads for paralellisation #
if type(n_cpus) != type(None):
numba.set_num_threads(n_cpus)
# Making sure none of the var_names contains '_' already, these will need
# to be renamed.
prob_genes = [gene for gene in adata.var_names if '_' in gene]
if len(prob_genes)>0:
raise Exception("Detected '_' within some gene names, which breaks " +\
"internal string handling for the lrs in format 'L_R'.\n"+\
"Recommend to rename adata.var_names or remove these "+\
f"genes from adata:\n {prob_genes}")
# Calculating neighbour & storing #
distance = calc_distance(adata, distance)
neighbours = calc_neighbours(adata, distance, verbose=verbose)
adata.obsm["spot_neighbours"] = pd.DataFrame(
[",".join(x.astype(str)) for x in neighbours],
index=adata.obs_names,
columns=["neighbour_indices"],
)
spot_neighs_df = adata.obsm["spot_neighbours"]
spot_neigh_bcs = []
for i in range(spot_neighs_df.shape[0]):
neigh_indices = [
int(index)
for index in spot_neighs_df.values[i, 0].split(",")
if index != ""
]
neigh_bcs = [adata.obs_names[index] for index in neigh_indices]
spot_neigh_bcs.append(",".join(neigh_bcs))
spot_neigh_bcs_df = pd.DataFrame(
spot_neigh_bcs, index=spot_neighs_df.index, columns=["neighbour_bcs"]
)
# Important to store barcodes in-case adata subsetted #
adata.obsm["spot_neigh_bcs"] = spot_neigh_bcs_df
if verbose:
print(
"Spot neighbour indices | |
pulumi.get(self, "locations")
@locations.setter
def locations(self, value: pulumi.Input[Sequence[pulumi.Input['AlbLoadBalancerAllocationPolicyLocationArgs']]]):
pulumi.set(self, "locations", value)
@pulumi.input_type
class AlbLoadBalancerAllocationPolicyLocationArgs:
def __init__(__self__, *,
subnet_id: pulumi.Input[str],
zone_id: pulumi.Input[str],
disable_traffic: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] subnet_id: Provided by the client or computed automatically.
:param pulumi.Input[str] zone_id: ID of the zone that location is located at.
:param pulumi.Input[bool] disable_traffic: If set, will disable all L7 instances in the zone for request handling.
"""
pulumi.set(__self__, "subnet_id", subnet_id)
pulumi.set(__self__, "zone_id", zone_id)
if disable_traffic is not None:
pulumi.set(__self__, "disable_traffic", disable_traffic)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Input[str]:
"""
Provided by the client or computed automatically.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: pulumi.Input[str]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="zoneId")
def zone_id(self) -> pulumi.Input[str]:
"""
ID of the zone that location is located at.
"""
return pulumi.get(self, "zone_id")
@zone_id.setter
def zone_id(self, value: pulumi.Input[str]):
pulumi.set(self, "zone_id", value)
@property
@pulumi.getter(name="disableTraffic")
def disable_traffic(self) -> Optional[pulumi.Input[bool]]:
"""
If set, will disable all L7 instances in the zone for request handling.
"""
return pulumi.get(self, "disable_traffic")
@disable_traffic.setter
def disable_traffic(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_traffic", value)
@pulumi.input_type
class AlbLoadBalancerListenerArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['AlbLoadBalancerListenerEndpointArgs']]]] = None,
http: Optional[pulumi.Input['AlbLoadBalancerListenerHttpArgs']] = None,
tls: Optional[pulumi.Input['AlbLoadBalancerListenerTlsArgs']] = None):
"""
:param pulumi.Input[str] name: name of SNI match.
:param pulumi.Input[Sequence[pulumi.Input['AlbLoadBalancerListenerEndpointArgs']]] endpoints: Network endpoints (addresses and ports) of the listener. The structure is documented below.
:param pulumi.Input['AlbLoadBalancerListenerHttpArgs'] http: HTTP listener resource. The structure is documented below.
:param pulumi.Input['AlbLoadBalancerListenerTlsArgs'] tls: TLS listener resource. The structure is documented below.
"""
pulumi.set(__self__, "name", name)
if endpoints is not None:
pulumi.set(__self__, "endpoints", endpoints)
if http is not None:
pulumi.set(__self__, "http", http)
if tls is not None:
pulumi.set(__self__, "tls", tls)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
name of SNI match.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AlbLoadBalancerListenerEndpointArgs']]]]:
"""
Network endpoints (addresses and ports) of the listener. The structure is documented below.
"""
return pulumi.get(self, "endpoints")
@endpoints.setter
def endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AlbLoadBalancerListenerEndpointArgs']]]]):
pulumi.set(self, "endpoints", value)
@property
@pulumi.getter
def http(self) -> Optional[pulumi.Input['AlbLoadBalancerListenerHttpArgs']]:
"""
HTTP listener resource. The structure is documented below.
"""
return pulumi.get(self, "http")
@http.setter
def http(self, value: Optional[pulumi.Input['AlbLoadBalancerListenerHttpArgs']]):
pulumi.set(self, "http", value)
@property
@pulumi.getter
def tls(self) -> Optional[pulumi.Input['AlbLoadBalancerListenerTlsArgs']]:
"""
TLS listener resource. The structure is documented below.
"""
return pulumi.get(self, "tls")
@tls.setter
def tls(self, value: Optional[pulumi.Input['AlbLoadBalancerListenerTlsArgs']]):
pulumi.set(self, "tls", value)
@pulumi.input_type
class AlbLoadBalancerListenerEndpointArgs:
def __init__(__self__, *,
addresses: pulumi.Input[Sequence[pulumi.Input['AlbLoadBalancerListenerEndpointAddressArgs']]],
ports: pulumi.Input[Sequence[pulumi.Input[int]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input['AlbLoadBalancerListenerEndpointAddressArgs']]] addresses: Provided by the client or computed automatically.
:param pulumi.Input[Sequence[pulumi.Input[int]]] ports: One or more ports to listen on.
"""
pulumi.set(__self__, "addresses", addresses)
pulumi.set(__self__, "ports", ports)
@property
@pulumi.getter
def addresses(self) -> pulumi.Input[Sequence[pulumi.Input['AlbLoadBalancerListenerEndpointAddressArgs']]]:
"""
Provided by the client or computed automatically.
"""
return pulumi.get(self, "addresses")
@addresses.setter
def addresses(self, value: pulumi.Input[Sequence[pulumi.Input['AlbLoadBalancerListenerEndpointAddressArgs']]]):
pulumi.set(self, "addresses", value)
@property
@pulumi.getter
def ports(self) -> pulumi.Input[Sequence[pulumi.Input[int]]]:
"""
One or more ports to listen on.
"""
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: pulumi.Input[Sequence[pulumi.Input[int]]]):
pulumi.set(self, "ports", value)
@pulumi.input_type
class AlbLoadBalancerListenerEndpointAddressArgs:
def __init__(__self__, *,
external_ipv4_address: Optional[pulumi.Input['AlbLoadBalancerListenerEndpointAddressExternalIpv4AddressArgs']] = None,
external_ipv6_address: Optional[pulumi.Input['AlbLoadBalancerListenerEndpointAddressExternalIpv6AddressArgs']] = None,
internal_ipv4_address: Optional[pulumi.Input['AlbLoadBalancerListenerEndpointAddressInternalIpv4AddressArgs']] = None):
"""
:param pulumi.Input['AlbLoadBalancerListenerEndpointAddressExternalIpv4AddressArgs'] external_ipv4_address: External IPv4 address. The structure is documented below.
:param pulumi.Input['AlbLoadBalancerListenerEndpointAddressExternalIpv6AddressArgs'] external_ipv6_address: External IPv6 address. The structure is documented below.
:param pulumi.Input['AlbLoadBalancerListenerEndpointAddressInternalIpv4AddressArgs'] internal_ipv4_address: Internal IPv4 address. The structure is documented below.
"""
if external_ipv4_address is not None:
pulumi.set(__self__, "external_ipv4_address", external_ipv4_address)
if external_ipv6_address is not None:
pulumi.set(__self__, "external_ipv6_address", external_ipv6_address)
if internal_ipv4_address is not None:
pulumi.set(__self__, "internal_ipv4_address", internal_ipv4_address)
@property
@pulumi.getter(name="externalIpv4Address")
def external_ipv4_address(self) -> Optional[pulumi.Input['AlbLoadBalancerListenerEndpointAddressExternalIpv4AddressArgs']]:
"""
External IPv4 address. The structure is documented below.
"""
return pulumi.get(self, "external_ipv4_address")
@external_ipv4_address.setter
def external_ipv4_address(self, value: Optional[pulumi.Input['AlbLoadBalancerListenerEndpointAddressExternalIpv4AddressArgs']]):
pulumi.set(self, "external_ipv4_address", value)
@property
@pulumi.getter(name="externalIpv6Address")
def external_ipv6_address(self) -> Optional[pulumi.Input['AlbLoadBalancerListenerEndpointAddressExternalIpv6AddressArgs']]:
"""
External IPv6 address. The structure is documented below.
"""
return pulumi.get(self, "external_ipv6_address")
@external_ipv6_address.setter
def external_ipv6_address(self, value: Optional[pulumi.Input['AlbLoadBalancerListenerEndpointAddressExternalIpv6AddressArgs']]):
pulumi.set(self, "external_ipv6_address", value)
@property
@pulumi.getter(name="internalIpv4Address")
def internal_ipv4_address(self) -> Optional[pulumi.Input['AlbLoadBalancerListenerEndpointAddressInternalIpv4AddressArgs']]:
"""
Internal IPv4 address. The structure is documented below.
"""
return pulumi.get(self, "internal_ipv4_address")
@internal_ipv4_address.setter
def internal_ipv4_address(self, value: Optional[pulumi.Input['AlbLoadBalancerListenerEndpointAddressInternalIpv4AddressArgs']]):
pulumi.set(self, "internal_ipv4_address", value)
@pulumi.input_type
class AlbLoadBalancerListenerEndpointAddressExternalIpv4AddressArgs:
def __init__(__self__, *,
address: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] address: Provided by the client or computed automatically.
"""
if address is not None:
pulumi.set(__self__, "address", address)
@property
@pulumi.getter
def address(self) -> Optional[pulumi.Input[str]]:
"""
Provided by the client or computed automatically.
"""
return pulumi.get(self, "address")
@address.setter
def address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address", value)
@pulumi.input_type
class AlbLoadBalancerListenerEndpointAddressExternalIpv6AddressArgs:
def __init__(__self__, *,
address: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] address: Provided by the client or computed automatically.
"""
if address is not None:
pulumi.set(__self__, "address", address)
@property
@pulumi.getter
def address(self) -> Optional[pulumi.Input[str]]:
"""
Provided by the client or computed automatically.
"""
return pulumi.get(self, "address")
@address.setter
def address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address", value)
@pulumi.input_type
class AlbLoadBalancerListenerEndpointAddressInternalIpv4AddressArgs:
def __init__(__self__, *,
address: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] address: Provided by the client or computed automatically.
:param pulumi.Input[str] subnet_id: Provided by the client or computed automatically.
"""
if address is not None:
pulumi.set(__self__, "address", address)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
@property
@pulumi.getter
def address(self) -> Optional[pulumi.Input[str]]:
"""
Provided by the client or computed automatically.
"""
return pulumi.get(self, "address")
@address.setter
def address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
Provided by the client or computed automatically.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@pulumi.input_type
class AlbLoadBalancerListenerHttpArgs:
def __init__(__self__, *,
handler: Optional[pulumi.Input['AlbLoadBalancerListenerHttpHandlerArgs']] = None,
redirects: Optional[pulumi.Input['AlbLoadBalancerListenerHttpRedirectsArgs']] = None):
"""
:param pulumi.Input['AlbLoadBalancerListenerHttpHandlerArgs'] handler: HTTP handler that sets plaintext HTTP router. The structure is documented below.
:param pulumi.Input['AlbLoadBalancerListenerHttpRedirectsArgs'] redirects: Shortcut for adding http > https redirects. The structure is documented below.
"""
if handler is not None:
pulumi.set(__self__, "handler", handler)
if redirects is not None:
pulumi.set(__self__, "redirects", redirects)
@property
@pulumi.getter
def handler(self) -> Optional[pulumi.Input['AlbLoadBalancerListenerHttpHandlerArgs']]:
"""
HTTP handler that sets plaintext HTTP router. The structure is documented below.
"""
return pulumi.get(self, "handler")
@handler.setter
def handler(self, value: Optional[pulumi.Input['AlbLoadBalancerListenerHttpHandlerArgs']]):
pulumi.set(self, "handler", value)
@property
@pulumi.getter
def redirects(self) -> Optional[pulumi.Input['AlbLoadBalancerListenerHttpRedirectsArgs']]:
"""
Shortcut for adding http > https redirects. The structure is documented below.
"""
return pulumi.get(self, "redirects")
@redirects.setter
def redirects(self, value: Optional[pulumi.Input['AlbLoadBalancerListenerHttpRedirectsArgs']]):
pulumi.set(self, "redirects", value)
@pulumi.input_type
class AlbLoadBalancerListenerHttpHandlerArgs:
def __init__(__self__, *,
allow_http10: Optional[pulumi.Input[bool]] = None,
http2_options: Optional[pulumi.Input['AlbLoadBalancerListenerHttpHandlerHttp2OptionsArgs']] = None,
http_router_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[bool] allow_http10: If set, will enable only HTTP1 protocol with HTTP1.0 support.
:param pulumi.Input['AlbLoadBalancerListenerHttpHandlerHttp2OptionsArgs'] http2_options: If set, will enable HTTP2 protocol for the handler. The structure is documented below.
:param pulumi.Input[str] http_router_id: HTTP router id.
"""
if allow_http10 is not None:
pulumi.set(__self__, "allow_http10", allow_http10)
if http2_options is not None:
pulumi.set(__self__, "http2_options", http2_options)
if http_router_id is not None:
pulumi.set(__self__, "http_router_id", http_router_id)
@property
@pulumi.getter(name="allowHttp10")
def allow_http10(self) -> Optional[pulumi.Input[bool]]:
"""
If set, will enable only HTTP1 protocol with HTTP1.0 support.
"""
return pulumi.get(self, "allow_http10")
@allow_http10.setter
def allow_http10(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_http10", value)
@property
@pulumi.getter(name="http2Options")
def http2_options(self) -> Optional[pulumi.Input['AlbLoadBalancerListenerHttpHandlerHttp2OptionsArgs']]:
"""
If set, will enable HTTP2 protocol for the handler. The structure is documented below.
"""
return pulumi.get(self, "http2_options")
@http2_options.setter
def http2_options(self, value: Optional[pulumi.Input['AlbLoadBalancerListenerHttpHandlerHttp2OptionsArgs']]):
pulumi.set(self, "http2_options", value)
@property
@pulumi.getter(name="httpRouterId")
def http_router_id(self) -> Optional[pulumi.Input[str]]:
"""
HTTP router id.
"""
return pulumi.get(self, "http_router_id")
@http_router_id.setter
def http_router_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "http_router_id", value)
@pulumi.input_type
class AlbLoadBalancerListenerHttpHandlerHttp2OptionsArgs:
def __init__(__self__, *,
max_concurrent_streams: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] max_concurrent_streams: Maximum number of concurrent streams.
"""
if max_concurrent_streams is not None:
pulumi.set(__self__, "max_concurrent_streams", max_concurrent_streams)
@property
@pulumi.getter(name="maxConcurrentStreams")
def max_concurrent_streams(self) -> Optional[pulumi.Input[int]]:
"""
Maximum number of concurrent streams.
"""
return pulumi.get(self, "max_concurrent_streams")
@max_concurrent_streams.setter
def max_concurrent_streams(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_concurrent_streams", value)
@pulumi.input_type
class AlbLoadBalancerListenerHttpRedirectsArgs:
def __init__(__self__, *,
http_to_https: Optional[pulumi.Input[bool]] = None):
if http_to_https is not None:
pulumi.set(__self__, "http_to_https", http_to_https)
@property
@pulumi.getter(name="httpToHttps")
def http_to_https(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "http_to_https")
@http_to_https.setter
def http_to_https(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "http_to_https", value)
@pulumi.input_type
class AlbLoadBalancerListenerTlsArgs:
def __init__(__self__, *,
default_handler: pulumi.Input['AlbLoadBalancerListenerTlsDefaultHandlerArgs'],
sni_handlers: Optional[pulumi.Input[Sequence[pulumi.Input['AlbLoadBalancerListenerTlsSniHandlerArgs']]]] = None):
"""
:param pulumi.Input['AlbLoadBalancerListenerTlsDefaultHandlerArgs'] default_handler: TLS handler resource. The structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input['AlbLoadBalancerListenerTlsSniHandlerArgs']]] sni_handlers: SNI match resource. The structure is documented below.
"""
pulumi.set(__self__, "default_handler", default_handler)
if sni_handlers is not None:
pulumi.set(__self__, "sni_handlers", sni_handlers)
@property
@pulumi.getter(name="defaultHandler")
def default_handler(self) -> pulumi.Input['AlbLoadBalancerListenerTlsDefaultHandlerArgs']:
"""
TLS handler resource. The structure is documented below.
"""
return pulumi.get(self, "default_handler")
@default_handler.setter
def default_handler(self, value: pulumi.Input['AlbLoadBalancerListenerTlsDefaultHandlerArgs']):
pulumi.set(self, "default_handler", value)
@property
@pulumi.getter(name="sniHandlers")
def sni_handlers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AlbLoadBalancerListenerTlsSniHandlerArgs']]]]:
"""
SNI match resource. The structure is documented below.
"""
return pulumi.get(self, "sni_handlers")
@sni_handlers.setter
def sni_handlers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AlbLoadBalancerListenerTlsSniHandlerArgs']]]]):
| |
= ['_'.join(i.split('(')) for i in sixyNineVideos] ### Renaming the experiment Ids taken from csv file
sixyNineVideos = ['_'.join(i.split(')')) for i in sixyNineVideos] ### Renaming the experiment Ids taken from csv file
sixyNineVideos = ['_'.join(i.split('&')) for i in sixyNineVideos] ### Renaming the experiment Ids taken from csv file
sixyNineVideos = [i.split('.')[0] for i in sixyNineVideos]
for emot in sixyNineVideos:
summary_data_frame.loc[emot, '69Videos'] = 'Final'
if (date == '2018_Oct_10-Nov_15'): #videoPrefix == 'WithThirty':
clipDir = '/mnt/7CBFA0EC210FC340/ExperimentRelatedData/FromUbuntuAcerSystem/Experiment/block_For_30_Stimuli/Videos'
videoStims = glob.glob(os.path.join(clipDir, '*'))
videoStims = [i.split('/')[-1] for i in videoStims] ### Renaming the experiment Ids taken from csv file
videoStims = ['_'.join(i.split(' ')) for i in videoStims] ### Renaming the experiment Ids taken from csv file
videoStims = ['_'.join(i.split("'")) for i in videoStims] ### Renaming the experiment Ids taken from csv file
videoStims = ['_'.join(i.split('(')) for i in videoStims] ### Renaming the experiment Ids taken from csv file
videoStims = ['_'.join(i.split(')')) for i in videoStims] ### Renaming the experiment Ids taken from csv file
videoStims = ['_'.join(i.split('&')) for i in videoStims] ### Renaming the experiment Ids taken from csv file
videoStims = [i.split('.')[0] for i in videoStims]
for emot in videoStims:
summary_data_frame.loc[emot, '30Videos'] = 'Final'
elif videoPrefix == 'WithForty':
import pickle
### This file is created using the program /mnt/7CBFA0EC210FC340/ExperimentRelatedData/FromUbuntuAcerSystem/Experiment/Survey/knowingAboutBlocks.py.
fortyVideosDir = '/mnt/7CBFA0EC210FC340/ExperimentRelatedData/FromUbuntuAcerSystem/Experiment/Survey'
fortyVidoes = pickle.load(open(os.path.join(fortyVideosDir, 'WithAllVideos__BlockInformationForStimuli_Nov_1-Nov_15.pkl'), 'rb'))[1]
sixyNineVideos = [i.split('/')[-1] for i in summary_data_frame.index.values] ### Renaming the experiment Ids taken from csv file
sixyNineVideos = ['_'.join(i.split(' ')) for i in sixyNineVideos] ### Renaming the experiment Ids taken from csv file
sixyNineVideos = ['_'.join(i.split("'")) for i in sixyNineVideos] ### Renaming the experiment Ids taken from csv file
sixyNineVideos = ['_'.join(i.split('(')) for i in sixyNineVideos] ### Renaming the experiment Ids taken from csv file
sixyNineVideos = ['_'.join(i.split(')')) for i in sixyNineVideos] ### Renaming the experiment Ids taken from csv file
sixyNineVideos = ['_'.join(i.split('&')) for i in sixyNineVideos] ### Renaming the experiment Ids taken from csv file
sixyNineVideos = [i.split('.')[0] for i in sixyNineVideos]
summary_data_frame['Experiment_id'] = sixyNineVideos
summary_data_frame.set_index('Experiment_id', drop=True, inplace=True)
for emot in fortyVidoes:
summary_data_frame.loc[emot, '40Videos'] = 'Final'
os.chdir('..')
summary_data_frame.to_csv(os.path.join(_thisDir, 'NewTarget', 'summary_data_frame_'+cleaning_flag+date+'.csv'))
pd.DataFrame(greater_than_50).to_csv(os.path.join(_thisDir, 'NewTarget', 'Greater_Then_50_'+cleaning_flag+date+'.csv'))
########################## Clustering
if cleaning_flag == 'after_cleaning':
import sklearn.cluster
dataToCluster = summary_data_frame[["mean_dist_origin", "VA_std"]]
dataToCluster.dropna(inplace=True)
clusterObject = sklearn.cluster.KMeans(n_clusters=2)
cluster = clusterObject.fit(dataToCluster)
cluster_1 = dataToCluster.index.values[np.where(cluster.labels_==0)[0]]
cluster_2 = dataToCluster.index.values[np.where(cluster.labels_==1)[0]]
summary_data_frame.loc[cluster_1, 'Cluster_Marker'] = '*'
summary_data_frame.loc[cluster_2, 'Cluster_Marker'] = '#'
if (date == '2018_Oct_10-Oct_20'): #videoPrefix == 'WithSixtyNine':
NaNVal = summary_data_frame.index.values[np.where(np.isnan(summary_data_frame['VideoId']))[0]]
summary_data_frame.drop(NaNVal, inplace=True, axis=0)
selectedForScatter = summary_data_frame.index.values[np.where(summary_data_frame['69Videos']=='Final')[0]]
elif (date == '2018_Oct_10-Nov_15') and (videoPrefix == 'With69Videos_'):
selectedForScatter = summary_data_frame.index.values[np.where(summary_data_frame['69Videos']=='Final')[0]]
elif (date == '2018_Oct_10-Nov_15'): #videoPrefix == 'WithThirty':
selectedForScatter = summary_data_frame.index.values[np.where(summary_data_frame['30Videos']=='Final')[0]]
summary_data_frame.to_csv(os.path.join(_thisDir, 'NewTarget', 'WithClustering_summary_data_frame_'+cleaning_flag+date+'.csv'))
from adjustText import adjust_text
if (videoPrefix == 'WithThirtyVideos_'):
ax = summary_data_frame.loc[selectedForScatter, ['V_mean', 'A_mean']].plot.scatter(x='V_mean', y='A_mean', s=40, figsize=(20,10), fontsize=40)
texts = []
for v, a, emt in summary_data_frame.loc[selectedForScatter, ['V_mean', 'A_mean','MostRated']].values:
texts.append(plt.text(v, a, emt, fontsize=35))
#ax.annotate(emt, (v,a), fontsize=30)
plt.ylabel('Arousal Mean', fontsize=30)
plt.xlabel('Valence Mean', fontsize=30)
adjust_text(texts, only_move={'points':'y', 'texts':'y'}, arrowprops=dict(arrowstyle="->", color='r', lw=1.5))
plt.savefig(os.path.join(_thisDir, 'NewTarget', '%s_Valence_ArousalRepresentationSelectedStimuli%s_%s.png' %(videoPrefix, date, cleaning_flag)), bbox_inches='tight')
plt.savefig(os.path.join(_thisDir, 'NewTarget', '%s_Valence_ArousalRepresentationSelectedStimuli%s_%s.pdf' %(videoPrefix, date, cleaning_flag)), bbox_inches='tight')
elif (videoPrefix == 'With69Videos_'):
plt.clf()
plt.close()
ax = summary_data_frame.loc[selectedForScatter, ['V_mean', 'A_mean']].plot.scatter(x='V_mean', y='A_mean', s=40, fontsize=40)#, figsize=(15,15), fontsize=40)
texts = []
'''### Annotating data ponts with emotion names
for v, a, emt in summary_data_frame.loc[selectedForScatter, ['V_mean', 'A_mean','MostRated']].values:
texts.append(plt.text(v, a, emt, fontsize=23))
#ax.annotate(emt, (v,a), fontsize=30)
adjust_text(texts, only_move={'points':'y', 'texts':'y'}, arrowprops=dict(arrowstyle="->", color='r', lw=1.5))'''
plt.ylabel('Arousal Mean', fontsize=30)
plt.xlabel('Valence Mean', fontsize=30)
plt.savefig(os.path.join(_thisDir, 'NewTarget', '%s_Valence_ArousalRepresentationSelectedStimuli%s_%s.png' %(videoPrefix, date, cleaning_flag)), bbox_inches='tight')
plt.savefig(os.path.join(_thisDir, 'NewTarget', '%s_Valence_ArousalRepresentationSelectedStimuli%s_%s.pdf' %(videoPrefix, date, cleaning_flag)), bbox_inches='tight')
plt.clf()
plt.close()
#elif videoPrefix == 'WithSixtyNine':
'''else:
pdb.set_trace()
ax = summary_data_frame.loc[selectedForScatter, ['V_mean', 'A_mean']].plot.scatter(x='V_mean', y='A_mean', s=40, figsize=(20,10), fontsize=40)
texts = []
for v, a, emt in summary_data_frame.loc[selectedForScatter, ['V_mean', 'A_mean','WarinnerName']].values:
texts.append(plt.text(v, a, emt, fontsize=35))
#ax.annotate(emt, (v,a), fontsize=30)
plt.ylabel('Arousal Mean', fontsize=30)
plt.xlabel('Valence Mean', fontsize=30)
adjust_text(texts, only_move={'points':'y', 'texts':'y'}, arrowprops=dict(arrowstyle="->", color='r', lw=1.5))
#summary_data_frame.loc[selectedForScatter, ['V_mean', 'A_mean']].plot.scatter(x='V_mean', y='A_mean', s=40, figsize=(20,10), fontsize=40)
#plt.ylabel('Arousal Mean', fontsize=30)
#plt.xlabel('Valence Mean', fontsize=30)
plt.savefig(os.path.join(_thisDir, 'NewTarget', 'Valence_ArousalRepresentationSelectedStimuli%s_%s.png' %(date, cleaning_flag)), bbox_inches='tight')
plt.savefig(os.path.join(_thisDir, 'NewTarget', 'Valence_ArousalRepresentationSelectedStimuli%s_%s.pdf' %(date, cleaning_flag)), bbox_inches='tight')'''
print("After this please go to VAD_Plotting for summarized data in this module or go for Data cleaning")
#################################### Incluse MAD information also.
if cleaning_flag == 'after_cleaning':
MADFile = 'RProgram_MeanAbsoluteDifference_%s%s.csv' %(cleaning_flag, date)
MADFrame = pd.read_csv(os.path.join(_thisDir, 'NewTarget', MADFile), index_col = 0)
MADEmt = MADFrame.index.values
MADEmt = [i.split('/')[-1] for i in MADFrame.index.values] ### Renaming the experiment Ids taken from csv file
MADEmt = ['_'.join(i.split(' ')) for i in MADEmt] ### Renaming the experiment Ids taken from csv file
MADEmt = ['_'.join(i.split("'")) for i in MADEmt] ### Renaming the experiment Ids taken from csv file
MADEmt = ['_'.join(i.split('(')) for i in MADEmt] ### Renaming the experiment Ids taken from csv file
MADEmt = ['_'.join(i.split(')')) for i in MADEmt] ### Renaming the experiment Ids taken from csv file
MADEmt = ['_'.join(i.split('&')) for i in MADEmt] ### Renaming the experiment Ids taken from csv file
MADEmt = [i.split('.')[0] for i in MADEmt]
MADFrame['Experiment_id'] = MADEmt
MADFrame.set_index('Experiment_id', drop=True, inplace=True)
for vidStim in summary_data_frame.index.values:
try:
summary_data_frame.loc[vidStim, 'VMAD'] = MADFrame.loc[vidStim, 'VMAD']
summary_data_frame.loc[vidStim, 'AMAD'] = MADFrame.loc[vidStim, 'AMAD']
summary_data_frame.loc[vidStim, 'DMAD'] = MADFrame.loc[vidStim, 'DMAD']
summary_data_frame.loc[vidStim, 'LMAD'] = MADFrame.loc[vidStim, 'LMAD']
summary_data_frame.loc[vidStim, 'FMAD'] = MADFrame.loc[vidStim, 'FMAD']
except:
continue
####################### Concordance Results
## This file is created using R Program: /mnt/7CBFA0EC210FC340/ExperimentRelatedData/FromUbuntuAcerSystem/Experiment/Survey/IRRTest_KendallVegan_StimulationWise.R
CCCFile = 'AllStimuli_CCC_Test_Result_%s.csv' %(date.split('2018_')[1])
CCCFile_Cluster_1 = 'Cluster-1_CCC_Test_Result_%s.csv' %(date.split('2018_')[1])
CCCFile_Cluster_2 = 'Cluster-2_CCC_Test_Result_%s.csv' %(date.split('2018_')[1])
CCCFrame = pd.read_csv(os.path.join(_thisDir, 'NewTarget', videoPrefix, CCCFile), index_col = 0)
CCCFrame_Cluster_1 = pd.read_csv(os.path.join(_thisDir, 'NewTarget', videoPrefix, CCCFile_Cluster_1), index_col = 0)
CCCFrame_Cluster_2 = pd.read_csv(os.path.join(_thisDir, 'NewTarget', videoPrefix, CCCFile_Cluster_2), index_col = 0)
CCCEmt = CCCFrame.index.values
CCCEmt = [i.split('/')[-1] for i in CCCFrame.index.values] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split(' ')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split("'")) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split('(')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split(')')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split('&')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = [i.split('.')[0] for i in CCCEmt]
CCCFrame['Experiment_id'] = CCCEmt
CCCFrame.set_index('Experiment_id', drop=True, inplace=True)
CCCEmt = [i.split('/')[-1] for i in CCCFrame_Cluster_1.index.values] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split(' ')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split("'")) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split('(')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split(')')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split('&')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = [i.split('.')[0] for i in CCCEmt]
CCCFrame_Cluster_1['Experiment_id'] = CCCEmt
CCCFrame_Cluster_1.set_index('Experiment_id', drop=True, inplace=True)
CCCEmt = [i.split('/')[-1] for i in CCCFrame_Cluster_2.index.values] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split(' ')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split("'")) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split('(')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split(')')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = ['_'.join(i.split('&')) for i in CCCEmt] ### Renaming the experiment Ids taken from csv file
CCCEmt = [i.split('.')[0] for i in CCCEmt]
CCCFrame_Cluster_2['Experiment_id'] = CCCEmt
CCCFrame_Cluster_2.set_index('Experiment_id', drop=True, inplace=True)
for vidStim in summary_data_frame.index.values:
try:
summary_data_frame.loc[vidStim, 'AllStim_W'] = CCCFrame.loc[vidStim, 'Concord_W']
summary_data_frame.loc[vidStim, 'AllStim_F'] = CCCFrame.loc[vidStim, 'Concord_F']
summary_data_frame.loc[vidStim, 'AllStim_Prob.F'] = CCCFrame.loc[vidStim, 'Concord_Prob.F']
summary_data_frame.loc[vidStim, 'AllStim_Chi2'] = CCCFrame.loc[vidStim, 'Concord_Chi2']
summary_data_frame.loc[vidStim, 'AllStim_Prob.perm'] = CCCFrame.loc[vidStim, 'Concord_Prob.perm']
summary_data_frame.loc[vidStim, 'AllStim_Dimension'] = CCCFrame.loc[vidStim, 'Concord_Dimension']
summary_data_frame.loc[vidStim, 'AllStimCateg'] = CCCFrame.loc[vidStim, 'ConcordCateg']
summary_data_frame.loc[vidStim, 'Clust_1_W'] = CCCFrame_Cluster_1.loc[vidStim, 'Concord_W']
summary_data_frame.loc[vidStim, 'Clust_1_F'] = CCCFrame_Cluster_1.loc[vidStim, 'Concord_F']
summary_data_frame.loc[vidStim, 'Clust_1_Prob.F'] = CCCFrame_Cluster_1.loc[vidStim, 'Concord_Prob.F']
summary_data_frame.loc[vidStim, 'Clust_1_Chi2'] = CCCFrame_Cluster_1.loc[vidStim, 'Concord_Chi2']
summary_data_frame.loc[vidStim, 'Clust_1_Prob.perm'] = CCCFrame_Cluster_1.loc[vidStim, 'Concord_Prob.perm']
summary_data_frame.loc[vidStim, 'Clust_1_Dimension'] = CCCFrame_Cluster_1.loc[vidStim, 'Concord_Dimension']
summary_data_frame.loc[vidStim, 'Clust_1Categ'] = CCCFrame_Cluster_1.loc[vidStim, 'ConcordCateg']
summary_data_frame.loc[vidStim, 'Clust_2_W'] = CCCFrame_Cluster_2.loc[vidStim, 'Concord_W']
summary_data_frame.loc[vidStim, 'Clust_2_F'] | |
fourier_amplitude[numpy.where(fourier_amplitude > 1.0)] = 1.0
fourier_amplitude[2:][numpy.where(numpy.greater(fourier_amplitude[2:], fourier_amplitude[1:-1]))] = 0
else:
if fourier_amplitude > 1.0 : return 1.0
if fourier_amplitude < 0.0 : return 0.0
#check the previous
return fourier_amplitude
######################################################################
# STRAIN
######################################################################
# INVARIANT PAH --------------------------------
def strain_invariant_function_pah(L, h, k, l, lattice_parameter, a, b, C_hkl):
s_hkl = Utilities.s_hkl(lattice_parameter, h, k, l)
return numpy.exp(-((2*numpy.pi**2)/((s_hkl**2)*(lattice_parameter**4))) * C_hkl * (a*L + b*(L**2)))
def displacement_invariant_pah(L, h, k, l, a, b, C_hkl):
return numpy.sqrt((C_hkl*(a*L + b*(L**2)))/((h**2+k**2+l**2)**2))
# Krivoglaz-Wilkens --------------------------------
from scipy import integrate
from numpy import pi, log, sqrt, arcsin, sin, cos # TO SHORTEN FORMULAS
def clausen_integral_inner_function(t):
return log(2*sin(t/2))
def clausen_integral(x=0.0):
_v_integrate_quad = numpy.vectorize(integrate.quad)
return -1*(_v_integrate_quad(lambda t: clausen_integral_inner_function(t), 0.0, x)[0])
def f_star(eta, use_simplified_calculation=True):
is_array = isinstance(eta, list) or isinstance(eta, numpy.ndarray)
if not is_array:
if eta >= 1:
return (256/(45*pi*eta)) - ((11/24) + (log(2) - log(eta))/4)/(eta**2)
else:
if use_simplified_calculation:
return (7/4) - log(2) - log(eta) + ((eta**2)/6) - (32*(eta**3))/(225*pi)
else:
return (256/(45*pi*eta)) \
+ ((eta**2)/6) - log(2) - log(eta) \
+ -eta*sqrt(1-(eta**2))*(769 + 4*(eta**2)*(20.5 + (eta**2)))/(180*pi*(eta**2)) \
+ -((45 - 180*eta**2)*clausen_integral(2*arcsin(eta)) \
+ (15*arcsin(eta)*(11 + 4*(eta**2)*(10.5 + (eta**2)) + (6 - 24*(eta**2))*(log(2) + log(eta)))))/(180*pi*(eta**2))
else:
result = numpy.zeros(len(eta))
cursor_1 = numpy.where(eta >= 1)
cursor_2 = numpy.where(eta < 1)
eta1 = eta[cursor_1]
eta2 = eta[cursor_2]
result[cursor_1] = (256/(45*pi*eta1)) - ((11/24) + (log(2) - log(eta1))/4)/(eta1**2)
if use_simplified_calculation:
result[cursor_2] = (7/4) - log(2) - log(eta2) + ((eta2**2)/6) - (32*(eta2**3))/(225*pi)
else:
result[cursor_2] = (256/(45*pi*eta2)) \
+ ((eta2**2)/6) - log(2) - log(eta2) \
+ -eta2*sqrt(1-(eta2**2))*(769 + 4*(eta2**2)*(20.5 + (eta2**2)))/(180*pi*(eta2**2)) \
+ -((45 - 180*eta2**2)*clausen_integral(2*arcsin(eta2)) \
+ (15*arcsin(eta2)*(11 + 4*(eta2**2)*(10.5 + (eta2**2)) + (6 - 24*(eta2**2))*(log(2) + log(eta2)))))/(180*pi*(eta2**2))
return result
def C_hkl_krivoglaz_wilkens(h, k, l, Ae, Be, As, Bs, mix):
H_2 = __H_invariant_square(h, k, l)
C_hkl_edge = Ae + Be*H_2
C_hkl_screw = As + Bs*H_2
return mix*C_hkl_edge + (1-mix)*C_hkl_screw
def strain_krivoglaz_wilkens(L, h, k, l, lattice_parameter, rho, Re, Ae, Be, As, Bs, mix, b):
s_hkl = Utilities.s_hkl(lattice_parameter, h, k, l)
C_hkl = C_hkl_krivoglaz_wilkens(h, k, l, Ae, Be, As, Bs, mix)
return numpy.exp(-(0.5*pi*(s_hkl**2)*(b**2)*rho*C_hkl*(L**2)*f_star(L/Re)))
def displacement_krivoglaz_wilkens(L, h, k, l, rho, Re, Ae, Be, As, Bs, mix, b):
C_hkl = C_hkl_krivoglaz_wilkens(h, k, l, Ae, Be, As, Bs, mix)
return numpy.sqrt(rho*C_hkl*(b**2)*(L**2)*f_star(L/Re)/(4*numpy.pi))
# WARREN MODEL --------------------------------
def load_warren_files():
delta_l_dict = {}
delta_l2_dict = {}
path = os.path.join(os.path.dirname(__file__), "data")
path = os.path.join(path, "delta_l_files")
filenames = os.listdir(path)
for filename in filenames:
if filename.endswith('FTinfo'):
hkl = filename[0:3]
name = os.path.join(path, filename)
data = numpy.loadtxt(name)
L = data[:,0]
delta_l_dict[hkl] = [L, data[:, 1]] # deltal_fun
delta_l2_dict[hkl] = [L, data[:,2]] # deltal2_fun
return delta_l_dict, delta_l2_dict
delta_l_dict, delta_l2_dict = load_warren_files()
def modify_delta_l(l, delta_l, lattice_parameter, average_lattice_parameter):
return delta_l - (average_lattice_parameter/lattice_parameter -1)*l
def modify_delta_l2(l, delta_l, delta_l2, lattice_parameter, average_lattice_parameter):
return delta_l2 - 2*delta_l*(average_lattice_parameter/lattice_parameter -1)*l \
+ ((average_lattice_parameter/lattice_parameter -1)*l)**2
def re_warren_strain(s_hkl, delta_l2):
return numpy.exp(-0.5*((s_hkl*2*numpy.pi)**2)*delta_l2)
def im_warren_strain(s_hkl, delta_l):
return (s_hkl*2*numpy.pi)*delta_l
def strain_warren_function(L, h, k, l, lattice_parameter, average_lattice_parameter):
hkl = str(h) + str(k) + str(l)
if hkl not in delta_l_dict.keys():
return numpy.ones(len(L)), numpy.zeros(len(L))
delta_l_entry = delta_l_dict[hkl]
delta_l2_entry = delta_l2_dict[hkl]
l_local = delta_l_entry[0]
delta_l = delta_l_entry[1]
delta_l2 = delta_l2_entry[1]
new_delta_l = modify_delta_l(l_local, delta_l, lattice_parameter, average_lattice_parameter)
new_delta_l2 = modify_delta_l2(l_local, delta_l, delta_l2, lattice_parameter, average_lattice_parameter)
new_delta_l = numpy.interp(L, l_local, new_delta_l)
new_delta_l2 = numpy.interp(L, l_local, new_delta_l2)
s_hkl = Utilities.s_hkl(average_lattice_parameter, h, k, l)
return re_warren_strain(s_hkl, new_delta_l2), im_warren_strain(s_hkl, new_delta_l)
######################################################################
# STRUCTURE
######################################################################
def __load_atomic_scattering_factor_coefficients():
atomic_scattering_factor_coefficients = {}
path = os.path.join(os.path.dirname(__file__), "data")
file_name = os.path.join(path, "atomic_scattering_factor_coefficients.dat")
file = open(file_name, "r")
rows = file.readlines()
for row in rows:
tokens = numpy.array(row.strip().split(sep=" "))
tokens = tokens[numpy.where(tokens != '')]
if not tokens is None and len(tokens) == 10:
element = tokens[0].strip()
coefficients =[[[float(tokens[1].strip()), float(tokens[2].strip())],
[float(tokens[3].strip()), float(tokens[4].strip())],
[float(tokens[5].strip()), float(tokens[6].strip())],
[float(tokens[7].strip()), float(tokens[8].strip())]],
float(tokens[9].strip())]
atomic_scattering_factor_coefficients[element] = coefficients
file.close()
return atomic_scattering_factor_coefficients
atomic_scattering_factor_coefficients = __load_atomic_scattering_factor_coefficients()
def multiplicity_cubic(h, k, l):
p = [6, 12, 24, 8, 24, 48]
hkl = sorted([h, k, l], reverse=True)
h, k, l = hkl[0], hkl[1], hkl[2]
if (h != 0 and k == 0 and l ==0):
return p[0]
elif (h == k and l == 0):
return p[1]
elif ((h == k and l != h and l != k) or (k==l and h != k and h != l)):
return p[2]
elif (h == k and k == l):
return p[3]
elif (h != k and l == 0):
return p[4]
elif (h != k and k != l and h!=l):
return p[5]
def atomic_scattering_factor(s, element):
coefficients = atomic_scattering_factor_coefficients[str(element).upper()]
ab = coefficients[0]
c = coefficients[1]
f_s = numpy.zeros(numpy.size(s))
s_angstrom = s/10 # to angstrom-1
for index in range(0, len(ab)):
a = ab[index][0]
b = ab[index][1]
f_s += a*numpy.exp(-b*((0.5*s_angstrom)**2))
# TODO: AGGIUNGERE DFi e DFii
return f_s + c
def structure_factor(s, formula, h, k, l, symmetry):
elements = ChemicalFormulaParser.parse_formula(formula)
if len(elements) == 1: #TODO: this is valid for Cubic materials only
if symmetry == Symmetry.FCC:
return 4*atomic_scattering_factor(s, elements[0]._element)
elif symmetry == Symmetry.BCC:
return 2*atomic_scattering_factor(s, elements[0]._element)
elif symmetry == Symmetry.SIMPLE_CUBIC:
return atomic_scattering_factor(s, elements[0]._element)
else:
total_weight = 0.0
total_structure_factor = 0.0
cell = get_cell(symmetry)
for element in elements:
weight = element._n_atoms
element_structure_factor = 0.0
for atom in cell:
element_structure_factor += atomic_scattering_factor(s, element._element) * numpy.exp(2 * numpy.pi * 1j * (numpy.dot(atom, [h, k ,l])))
element_structure_factor *= weight
total_weight += weight
total_structure_factor += element_structure_factor
total_structure_factor /= total_weight
return total_structure_factor
def get_cell(symmetry=Symmetry.FCC):
if symmetry == Symmetry.SIMPLE_CUBIC:
return [[0, 0, 0]]
elif symmetry == Symmetry.BCC:
return [[0, 0, 0], [0.5, 0.5, 0.5]]
elif symmetry == Symmetry.FCC:
return [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5]]
def squared_modulus_structure_factor(s, formula, h, k, l, symmetry=Symmetry.FCC):
return numpy.absolute(structure_factor(s, formula, h, k, l, symmetry))**2
def saxs(s, D, a0, formula, symmetry, normalize_to):
f = atomic_scattering_factor(s, ChemicalFormulaParser.parse_formula(formula)[0]._element)
Z = 4 if symmetry == Symmetry.FCC else 2 if symmetry == Symmetry.BCC else 1
N = (Z*pi*(D**3))/(6*(a0**3))
if normalize_to == Normalization.NORMALIZE_TO_N:
normalization = N*(numpy.absolute(f)**2)
elif normalize_to == Normalization.NORMALIZE_TO_N2:
normalization = (N*numpy.absolute(f))**2
x = pi*D*s
saxs = normalization*(3*(sin(x)-x*cos(x))/(x**3))**2
saxs[numpy.where(numpy.isnan(saxs))] = 1.0
return saxs
######################################################################
# INSTRUMENTAL
######################################################################
def caglioti_eta(a, b, c, theta): # input: radians
eta = a + b * theta + c * theta**2
if isinstance(eta, numpy.float64):
eta = 0 if eta < 0 else 1 if eta > 1 else eta
else:
eta[numpy.where(eta < 0)] = 0
eta[numpy.where(eta > 1)] = 1
return eta
def caglioti_fwhm(U, V, W, theta): # input: radians, output: degrees
return numpy.sqrt(W + V * numpy.tan(theta) + U * (numpy.tan(theta)**2))
def delta_two_theta_lab6(ax, bx, cx, dx, ex, theta): # input: radians
tan_theta = numpy.tan(theta)
delta_twotheta = numpy.radians(ax*(1/tan_theta) + bx + cx*tan_theta + dx*tan_theta**2 + ex*tan_theta**3)
delta_twotheta[numpy.where(numpy.isnan(delta_twotheta))] = 0.0
delta_twotheta[numpy.where(numpy.isinf(delta_twotheta))] = 0.0
return delta_twotheta
def delta_two_theta_specimen_displacement(goniometer_radius, displacement, theta):
return -(2*displacement/goniometer_radius)*cos(theta)
def lab6_tan_correction(theta, wavelength, ax, bx, cx, dx, ex):
delta_twotheta = delta_two_theta_lab6(ax, bx, cx, dx, ex, theta)
return delta_twotheta*numpy.cos(theta)/wavelength
def specimen_displacement(theta, wavelength, goniometer_radius, displacement): # input radians
delta_twotheta = delta_two_theta_specimen_displacement(goniometer_radius, displacement, theta)
return delta_twotheta*numpy.cos(theta)/wavelength
def instrumental_function(L, h, k, l, lattice_parameter, wavelength, U, V, W, a, b, c):
theta = Utilities.theta_hkl(lattice_parameter, h, k, l, wavelength)
eta = caglioti_eta(a, b, c, numpy.degrees(theta))
sigma = numpy.radians(caglioti_fwhm(U, V, W, theta))*0.5*(numpy.cos(theta)/wavelength)
k = eta * numpy.sqrt(numpy.pi*numpy.log(2))
k /= k + (1-eta)
exponent = numpy.pi * sigma * L
return k*numpy.exp(-2.0*exponent) + (1-k)*numpy.exp(-(exponent**2)/numpy.log(2))
######################################################################
# CALCULATION OF INTEGRAL BREADTH
######################################################################
def __instrumental_function(L, reflection, lattice_parameter, wavelength, instrumental_profile_parameters, ib_total=False):
if instrumental_profile_parameters is None:
return 1.0 if ib_total else 0.0
else:
return instrumental_function(L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
wavelength,
instrumental_profile_parameters.U.value,
instrumental_profile_parameters.V.value,
instrumental_profile_parameters.W.value,
instrumental_profile_parameters.a.value,
instrumental_profile_parameters.b.value,
instrumental_profile_parameters.c.value)
def __size_function(L, reflection, size_parameters, ib_total=False):
if not size_parameters is None and size_parameters.active:
if size_parameters.distribution == Distribution.LOGNORMAL:
if size_parameters.shape == Shape.WULFF:
return size_function_wulff_solids_lognormal(L, reflection.h, reflection.k, reflection.l,
size_parameters.sigma.value, size_parameters.mu.value,
size_parameters.truncation.value, size_parameters.cube_face)
else:
return size_function_lognormal(L, size_parameters.sigma.value, size_parameters.mu.value)
elif size_parameters.distribution == Distribution.DELTA:
return size_function_delta(L, size_parameters.mu.value)
elif size_parameters.distribution == Distribution.GAMMA:
return size_function_gamma(L, size_parameters.sigma.value, size_parameters.mu.value)
else:
return 1.0 if ib_total else 0.0
else:
return 1.0 if ib_total else 0.0
def __strain_function(L, reflection, lattice_parameter, strain_parameters, ib_total=False):
if not strain_parameters is None and strain_parameters.active:
if isinstance(strain_parameters, InvariantPAH):
return strain_invariant_function_pah(L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
strain_parameters.aa.value,
strain_parameters.bb.value,
strain_parameters.get_invariant(reflection.h,
reflection.k,
reflection.l))
elif isinstance(strain_parameters, KrivoglazWilkensModel):
return strain_krivoglaz_wilkens(L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
strain_parameters.rho.value,
strain_parameters.Re.value,
strain_parameters.Ae.value,
strain_parameters.Be.value,
strain_parameters.As.value,
strain_parameters.Bs.value,
strain_parameters.mix.value,
strain_parameters.b.value)
else:
return 1.0 if ib_total else 0.0
else:
return 1.0 if ib_total else 0.0
def integral_breadth_instrumental_function(reflection, lattice_parameter, wavelength, instrumental_profile_parameters):
return 1 / (2 * integrate.quad(lambda L: __instrumental_function(L, reflection, lattice_parameter, wavelength, instrumental_profile_parameters), 0, numpy.inf)[0])
def integral_breadth_size(reflection, size_parameters):
if size_parameters.active: return 1 / (2 * integrate.quad(lambda L: __size_function(L, reflection, size_parameters), 0, numpy.inf)[0])
else: return numpy.nan
def integral_breadth_strain(reflection, lattice_parameter, | |
#!/usr/bin/env python
#
# pKaTool - analysis of systems of titratable groups
# Copyright (C) 2010 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_<EMAIL>
# Normal mail:
# <NAME>
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
import sys, os
import pKarun.pKarun_main as pKarun
from pKarun.pKa_utility_functions import *
import pKaIO
class recordstate:
"""Class for keeping track of states in f.ex. MC sampling"""
def __init__(self):
self.states=[]
return
def recordstate(self,state):
self.states.append((state.copy()))
return
#
# Dummy function for accessing pKarun routines
#
class pKacalc(pKarun.pKarun):
def dummy_function(self):
return
#
# ------
#
class Monte_Carlo(pKaIO.pKaIO):
#
# Monte Carlo algorithm and base class for the other pKa calc routines
#
def acid_base(self,group):
#
# Return 1 for a base, return -1 for an acid
#
import string
return self.acidbase[string.split(group,':')[-1]]
#
# -----------------------
#
def get_modelpKa(self,group):
#
# Return the model pKa value for the group
#
import string
group=string.split(group,':')[-1]
return self.modelpKas[group]
#
# -------------------------
#
def prepare_matrix(self):
"""
# Prepare the matrix
#
# Precompute full term
"""
self.intene={}
for key in self.matrix.keys():
self.intene[key]={}
for key2 in self.matrix[key].keys():
#print key,key2,self.matrix[key][key2]
self.intene[key][key2]= self.matrix[key][key2][0] \
-self.matrix[key][key2][1] \
-self.matrix[key][key2][2] \
+self.matrix[key][key2][3]
#
# Make sure that the matrix is symmetrical
#
residues=self.intene.keys()
residues.sort()
for key in residues:
for key2 in residues:
if not self.intene[key].has_key(key2):
print 'prepare matrix failed'
print 'Matrix[%s] is missing key; %s' %(key,key2)
print key
print self.intene[key].keys()
raise Exception('Matrix[%s] is missing key; %s' %(key,key2))
E12=self.intene[key][key2]
E21=self.intene[key2][key]
new_ene=min(E12,E21)
self.intene[key][key2]=new_ene
self.intene[key2][key]=new_ene
return
#
# ----------------------
#
def calc_intpKas(self):
#
# Set a few constants
#
# Check that all dictionaries have been filled
#
self.groups=self.desolv.keys()
if self.groups!=self.backgr.keys():
print
print 'Inconsistent desolv and backgr'
print
raise Exception('Error in Python Monte Carlo routine')
#
# Calculate the intrinsic pKa values
#
import math
self.ln10=math.log(10)
self.intrinsic_pKa={}
self.groups.sort()
for group in self.groups:
resname=get_resname(group)
self.intrinsic_pKa[group]=self.get_modelpKa(group)+ \
-float(self.acid_base(group))*self.desolv[group]/self.ln10 + \
float(self.acid_base(group))*self.backgr[group]/self.ln10
return
#
# -----------------
#
def calc_pKas(self,mcsteps=2000,phstep=0.1,phstart=2.0,phend=14.0,verbose=1,complete_pka=None,exp_pHs=[]):
"""Calculate pKa values for the system"""
#
# Init
#
# Set a few constants
#
# KBol and Temp are equal to 1.0 everywhere in this derived class!
#
# Check that all dictionaries have been filled
#
self.groups=self.desolv.keys()
self.groups.sort()
b_groups=self.backgr.keys()
b_groups.sort()
m_groups=self.matrix.keys()
m_groups.sort()
#
if self.groups!=b_groups or self.groups!=m_groups:
print
print 'Inconsistent desolv, backgr and matrix dictionaries'
print
ndes=len(self.desolv.keys())
nback=len(self.backgr.keys())
nmat=len(self.matrix.keys())
print 'Groups in desolv: %3d, groups in backgr: %3d, groups in matrix: %3d \n' %(ndes,nback,nmat)
groups=self.backgr.keys()+self.desolv.keys()+self.matrix.keys()
g_dict={}
for group in groups:
g_dict[group]=1
groups=g_dict.keys()
groups.sort()
for group in groups:
has=['-','-','-']
if self.desolv.has_key(group):
has[0]='+'
if self.backgr.has_key(group):
has[1]='+'
if self.matrix.has_key(group):
has[2]='+'
print '%14s Desolv: %s, Backgr: %s, Matrix: %s' %(group,has[0],has[1],has[2])
print 'Done with ',group
print 'Totally done'
import sys
sys.stdout.flush()
raise 'Error in Python Monte Carlo routine'
#
# Prepare the matrix
#
self.prepare_matrix()
#
# Calculate the intrinsic pKa values
#
self.calc_intpKas()
#
# Calculate pKa values
#
return self._calc_pKas(mcsteps,phstep,phstart,phend,verbose,complete_pka,exp_pHs=exp_pHs)
#
# ------
#
def _calc_pKas(self,mcsteps=200000,phstep=0.1,phstart=1.0,phend=20.0,verbose=1,complete_pka=None,exp_pHs=[]):
"""Calculate pKa values from intrinsic pKa values and interaction energies. No checking done"""
#
# KBol and Temp are equal to 1.0 everywhere
#
import math
self.ln10=math.log(10)
#
# Start calculating protonation states
#
self.mcsteps=mcsteps
pHs=range(int(phstart*100.0),int(phend*100.0),int(phstep*100.0))
pHvalues=[]
for pH in pHs:
pHvalues.append(float(pH)/100.0)
pHvalues.extend(exp_pHs)
self.pHvalues=pHvalues
self.prot_states_tmp={}
#
# Calculate protonation states at each pH value
#
self.all_states={}
import copy
for pH in pHvalues:
tmp,states=self.calc_fractional_charge(pH)
if tmp=={}:
print pH
print 'No result'
raise Exception('I dont believe it')
self.prot_states_tmp[pH]=copy.deepcopy(tmp)
self.all_states[pH]=copy.deepcopy(states)
if verbose>1:
print
#
# Determine pKas
#
pkavalues=self.determine_pKa_values()
#
# Reformat the titration data
#
self.prot_states={}
for group in self.groups:
self.prot_states[group]={}
for ph in pHvalues:
self.prot_states[group][ph]=self.prot_states_tmp[ph][group]
self.prot_states[group]['pKa']=pkavalues[group]
self.prot_states_tmp=None
#
# ----------
#
self.pka={}
for group in pkavalues.keys():
self.pka[group]={'pKa':pkavalues[group]}
if complete_pka:
self.complete_pka()
return pkavalues,self.prot_states
#
# --------------------------
#
def determine_pKa_values(self):
"""Determine pKa values as half-points of titration from titration data"""
pkavalues={}
pHvalues=self.pHvalues
pHvalues.sort()
for group in self.groups:
pka=-99.9
last_crg=self.prot_states_tmp[pHvalues[0]][group]
phstep=float(pHvalues[1]-pHvalues[0])
for ph in pHvalues:
try:
crg=self.prot_states_tmp[ph][group]
except:
grps=self.prot_states_tmp[ph].keys()
grps.sort()
print grps
print group
raise 'same error'
#
# ----
#
if crg<last_crg:
if self.acid_base(group)==1:
if crg<=0.5 and last_crg>0.5:
pka=(last_crg-0.5)/(last_crg-crg)*phstep+(ph-phstep)
break
else:
if crg<=-0.5 and last_crg>-0.5:
pka=(last_crg-(-0.5))/(last_crg-crg)*phstep+(ph-phstep)
break
last_crg=crg
if pka<-90.0:
if self.acid_base(group)==1:
if last_crg>0.5:
pka=99.9
else:
pka=-99.9
else:
if last_crg>-0.5:
pka=99.9
else:
pka=-99.9
pkavalues[group]=pka
return pkavalues
#
# --------------------------
#
def calc_fractional_charge(self,pH):
#
# Calculate the fractional charge for all residues
# at this pH
#
# Get all the groups
#
# Define the Monte Carlo parameters
#
eqsteps=self.mcsteps/10
#
# Initialise the random number generator
#
import random, math
rand=random.Random(198984)
#
# Initialise helper MC class
#
X=recordstate()
#
# Construct the starting state
# State is a dictionary. For each group the value
# is either 1 (charged) or 0 (neutral)
#
state={}
old_cha={}
for group in self.groups:
state[group]=rand.randint(0,1)
curE=self.get_energy(pH,state)
#
# Start the MC steps
#
for step in range(self.mcsteps):
#
# Construct the new state
#
change_group=rand.choice(self.groups)
new_state=state.copy()
new_state[change_group]=abs(new_state[change_group]-1)
#
# Calculate the new energy
#
newE=self.get_energy(pH,new_state)
if newE<=curE:
state=new_state.copy()
curE=newE
else:
deltaE=newE-curE
if deltaE<50.0:
if rand.random()<=math.exp(-deltaE):
state=new_state.copy()
curE=newE
else:
pass
else:
pass
if step>eqsteps:
X.recordstate(state)
#
# Find the fractional degree of protonation
#
sumstate={}
for group in self.groups:
sum=0
for state in X.states:
sum=sum+state[group]
sumstate[group]=float(sum)/float(len(X.states))
if isacid(group):
sumstate[group]=-sumstate[group]
#
# Done
#
return sumstate,{}
#
# --------------------
#
def get_energy(self,pH,state):
#
# Get the energy for this state
#
energy=0.0
for group in self.groups:
#
# Add the effect of the non-titratable environment
#
if state[group]==1:
energy=energy+float(self.acid_base(group))*self.ln10* \
(pH-self.intrinsic_pKa[group])
#
# Add the effect of all other titratable groups in the system
#
for group2 in self.groups:
if state[group2]==1 and group!=group2:
energy=energy+self.intene[group][group2]/2.0
#
# If we have a non-system groups to take into account, then we include
# that here
#
if hasattr(self,'non_system_groups'):
energy=energy+self.non_system_groups[group][round(pH,1)]
return energy
#
# -----
#
def complete_pka(self):
"""Complete the self.pka dictionary. Insert delec,ddesolv,dbackgr,dpka and intpka values"""
for group in self.pka.keys():
self.pka[group]['intpka']=self.intrinsic_pKa[group]
self.pka[group]['modelpK']=self.get_modelpKa(group)
self.pka[group]['desolv']=-float(self.acid_base(group))*self.desolv[group]/self.ln10
self.pka[group]['backgr']=float(self.acid_base(group))*self.backgr[group]/self.ln10
self.pka[group]['delec']=self.pka[group]['pKa']-self.pka[group]['intpka']
return
#
# -------------------------------
#
class Monte_Carlo_CPP(Monte_Carlo):
"""
# C++ implementation of the Monte Carlo alg
"""
def test(self):
"""Test if we can import the C++ module"""
import pMC
return
#
# --------
#
def make_matrix_linear(self):
"""Change the matrix to linear form - this makes it easy to pass it to the C++ code"""
linear=[]
residues=self.intene.keys()
residues.sort()
for group1 in residues:
for group2 in residues:
linear.append(self.intene[group1][group2])
return linear
#
# -----------------
#
def calc_pKas(self,mcsteps=200000,phstep=0.1,phstart=0.0,phend=14,verbose=1,complete_pka=None,exp_pHs=[],monitor_states=None):
"""
# Calculate pKa values
"""
# Init
#
# Set constants
# KBol and Temp are equal to 1.0 everywhere in this derived class!
#
# Check that all dictionaries have been filled
#
self.groups=self.desolv.keys()
self.groups.sort()
b_groups=self.backgr.keys()
b_groups.sort()
m_groups=self.matrix.keys()
m_groups.sort()
if self.groups!=b_groups or self.groups!=m_groups:
print
print 'Inconsistent desolv, backgr and matrix dictionaries'
print
ndes=len(self.desolv.keys())
nback=len(self.backgr.keys())
nmat=len(self.matrix.keys())
print 'Groups in desolv: %3d, groups in backgr: %3d, groups in matrix: %3d \n' %(ndes,nback,nmat)
groups=self.backgr.keys()+self.desolv.keys()+self.matrix.keys()
g_dict={}
for group in groups:
g_dict[group]=1
groups=g_dict.keys()
groups.sort()
for group in groups:
has=['-','-','-']
if self.desolv.has_key(group):
has[0]='+'
if self.backgr.has_key(group):
has[1]='+'
if self.matrix.has_key(group):
has[2]='+'
print '%14s Desolv: %s, Backgr: %s, Matrix: %s' %(group,has[0],has[1],has[2])
print 'Totall done here'
import sys
sys.stdout.flush()
raise Exception('Error in C++ Monte Carlo module')
#
# Prepare the matrix
#
self.prepare_matrix()
#
# Calculate the intrinsic pKa values
#
self.calc_intpKas()
return self._calc_pKas(mcsteps,phstep,phstart,phend,verbose,complete_pka,monitor_states=monitor_states)
def allok(self,list):
for value in list:
if not value and value!=0.0:
return None
return 1
#
# ----
#
def _calc_pKas(self,mcsteps=200000,phstep=0.1,phstart=1.0,phend=20.0,verbose=1,complete_pka=None,exp_pHs=[],
monitor_groups=None,monitor_states=None):
"""Do the pKa calculation with the CPP module"""
#
# Do specific CPP setup
#
import time
starttime=time.time()
residues=self.intrinsic_pKa.keys()
residues.sort()
intpkas=[]
acidbase=[]
for residue | |
####Please do not remove lines below####
from lmfit import Parameters
import numpy as np
import sys
import os
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('./Functions'))
sys.path.append(os.path.abspath('./Fortran_rountines'))
####Please do not remove lines above####
####Import your modules below if needed####
from FormFactors.Sphere import Sphere
from Chemical_Formula import Chemical_Formula
from PeakFunctions import LogNormal, Gaussian
from utils import find_minmax
class Sphere_Uniform: #Please put the class name same as the function name
def __init__(self, x=0, Np=10, flux=1e13, dist='Gaussian', Energy=None, relement='Au', NrDep=1, norm=1.0, bkg=0.0, mpar={'Material':['Au','H2O'],'Density':[19.32,1.0],'Sol_Density':[1.0,1.0],'Rmoles':[1.0,0.0],'R':[1.0,0.0],'Rsig':[0.0,0.0]}):
"""
Documentation
Calculates the Energy dependent form factor of multilayered nanoparticles with different materials
x : Reciprocal wave-vector 'Q' inv-Angs in the form of a scalar or an array
relement : Resonant element of the nanoparticle. Default: 'Au'
Energy : Energy of X-rays in keV at which the form-factor is calculated. Default: None
Np : No. of points with which the size distribution will be computed. Default: 10
NrDep : Energy dependence of the non-resonant element. Default= 1 (Energy Dependent), 0 (Energy independent)
dist : The probablity distribution fucntion for the radii of different interfaces in the nanoparticles. Default: Gaussian
norm : The density of the nanoparticles in Molar (Moles/Liter)
bkg : Constant incoherent background
flux : Total X-ray flux to calculate the errorbar to simulate the errorbar for the fitted data
mpar : Multi-parameter which defines the following including the solvent/bulk medium which is the last one. Default: 'H2O'
Material ('Materials' using chemical formula),
Density ('Density' in gm/cubic-cms),
Density of solvent ('Sol_Density' in gm/cubic-cms) of the particular layer
Mole-fraction ('Rmoles') of resonant element in the material)
Radii ('R' in Angs), and
Widths of the distributions ('Rsig' in Angs) of radii of all the interfaces present in the nanoparticle system. Default: [0.0]
"""
if type(x)==list:
self.x=np.array(x)
else:
self.x=x
self.norm=norm
self.bkg=bkg
self.dist=dist
self.Np=Np
self.Energy=Energy
self.relement=relement
self.NrDep=NrDep
#self.rhosol=rhosol
self.flux=flux
self.__mpar__=mpar #If there is any multivalued parameter
self.choices={'dist':['Gaussian','LogNormal']} #If there are choices available for any fixed parameters
self.init_params()
self.__cf__=Chemical_Formula()
self.__fit__=False
def init_params(self):
"""
Define all the fitting parameters like
self.param.add('sig',value = 0, vary = 0, min = -np.inf, max = np.inf, expr = None, brute_step = None)
"""
self.params=Parameters()
self.params.add('norm',value=self.norm,vary=0, min = -np.inf, max = np.inf, expr = None, brute_step = 0.1)
self.params.add('bkg',value=self.bkg,vary=0, min = -np.inf, max = np.inf, expr = None, brute_step = 0.1)
for key in self.__mpar__.keys():
if key!='Material':
for i in range(len(self.__mpar__[key])):
self.params.add('__%s__%03d'%(key,i),value=self.__mpar__[key][i],vary=0,min=-np.inf,max=np.inf,expr=None,brute_step=None)
def calc_rho(self,material=['Au','H2O'], density=[19.3,1.0], sol_density=[1.0,1.0], Rmoles=[1.0,0.0], Energy=None, NrDep=1):
"""
Calculates the complex electron density of core-shell type multilayered particles in el/Angstroms^3
R :: list of Radii and subsequent shell thicknesses in Angstroms of the nanoparticle system
material :: list of material of all the shells starting from the core to outside
density :: list of density of all the materials in gm/cm^3 starting from the inner core to outside
rmoles :: mole-fraction of the resonant element in the materials
Energy :: Energy in keV
"""
self.output_params['scaler_parameters']={}
if len(material) == len(density):
Nl = len(material)
rho = []
adensity = [] # Density of anomalous element
eirho = [] # Energy independent electron density
for i in range(Nl):
mat=material[i].split(':')
if len(mat)==2:
solute,solvent=mat
solute_formula=self.__cf__.parse(solute)
if self.relement in solute_formula.keys():
self.__cf__.formula_dict[self.relement] = Rmoles[i]
solute_elements=self.__cf__.elements()
solute_mw=self.__cf__.molecular_weight()
solute_mv=self.__cf__.molar_volume()
solute_mole_ratio=self.__cf__.element_mole_ratio()
solvent_formula=self.__cf__.parse(solvent)
solvent_elements=self.__cf__.elements()
solvent_mw=self.__cf__.molecular_weight()
solvent_mole_ratio=self.__cf__.element_mole_ratio()
solvent_moles=sol_density[i]/solvent_mw
solute_moles=density[i]/solute_mw
total_moles=solvent_moles+solute_moles
solvent_mole_fraction=solvent_moles/total_moles
solute_mole_fraction=solute_moles/total_moles
comb_material=''
for ele in solute_mole_ratio.keys():
comb_material+='%s%.6f'%(ele,solute_mole_ratio[ele]*solute_mole_fraction)
for ele in solvent_mole_ratio.keys():
comb_material+='%s%.6f'%(ele,solvent_mole_ratio[ele]*solvent_mole_fraction)
tdensity=density[i]+sol_density[i]*(1-solute_mv*density[i]/solute_mw)
self.output_params['scaler_parameters']['density[%s]' % material[i]]=tdensity
else:
formula=self.__cf__.parse(material[i])
if self.relement in formula.keys():
self.__cf__.formula_dict[self.relement]=Rmoles[i]
mole_ratio=self.__cf__.element_mole_ratio()
comb_material=''
for ele in mole_ratio.keys():
comb_material+='%s%.6f'%(ele,mole_ratio[ele])
#comb_material=material[i]
tdensity=density[i]
self.output_params['scaler_parameters']['density[%s]' % material[i]] = tdensity
formula = self.__cf__.parse(comb_material)
molwt = self.__cf__.molecular_weight()
elements = self.__cf__.elements()
mole_ratio = self.__cf__.element_mole_ratio()
# numbers=np.array(chemical_formula.get_element_numbers(material[i]))
moles = [mole_ratio[ele] for ele in elements]
nelectrons = 0.0
felectrons = complex(0.0, 0.0)
aden=0.0
for j in range(len(elements)):
f0 = self.__cf__.xdb.f0(elements[j], 0.0)[0]
nelectrons = nelectrons + moles[j] * f0
if Energy is not None:
if elements[j]!=self.relement:
if NrDep==1:
f1 = self.__cf__.xdb.f1_chantler(element=elements[j], energy=Energy * 1e3, smoothing=0)
f2 = self.__cf__.xdb.f2_chantler(element=elements[j], energy=Energy * 1e3, smoothing=0)
felectrons = felectrons + moles[j] * complex(f1, f2)
else:
f1 = self.__cf__.xdb.f1_chantler(element=elements[j], energy=Energy * 1e3, smoothing=0)
f2 = self.__cf__.xdb.f2_chantler(element=elements[j], energy=Energy * 1e3, smoothing=0)
felectrons = felectrons + moles[j] * complex(f1, f2)
if elements[j]==self.relement:
aden+=0.6023 * moles[j]*tdensity/molwt
adensity.append(aden)# * np.where(r > Radii[i - 1], 1.0, 0.0) * pl.where(r <= Radii[i], 1.0, 0.0) / molwt
eirho.append(0.6023 * (nelectrons) * tdensity/molwt)# * np.where(r > Radii[i - 1], 1.0,0.0) * pl.where(r <= Radii[i], 1.0,0.0) / molwt
rho.append(0.6023 * (nelectrons + felectrons) * tdensity/molwt)# * np.where(r > Radii[i - 1], 1.0,0.0) * pl.where(r <= Radii[i], 1.0, 0.0) / molwt
# else:
# eirho.append(0.6023 * (nelectrons) * density[i]/molwt)# * np.where(r <= Radii[i], 1.0, 0.0) / molwt
# rho.append(0.6023 * (nelectrons + felectrons) * density[i]/molwt)# * np.where(r <= Radii[i], 1.0,0.0) / molwt
self.output_params['scaler_parameters']['rho[%s]' % material[i]]=rho[-1]
self.output_params['scaler_parameters']['eirho[%s]' % material[i]] = eirho[-1]
self.output_params['scaler_parameters']['adensity[%s]' % material[i]] = adensity[-1]
return rho, eirho, adensity
def calc_form(self, q, r, rho):
"""
Calculates the isotropic form factor in cm^-1 using the isotropic electron density as a funciton of radial distance
q :: scaler or array of reciprocal reciprocal wave vector in inv. Angstroms at which the form factor needs to be calculated in
r :: array of radial distances at which he electron density in known in Angstroms
rho :: array of electron densities as a funciton of radial distance in el/Angstroms^3. Note: The electron density should decay to zero at the last radial distance
"""
dr = r[1] - r[0]
amp = np.zeros_like(q)
rho = rho - rho[-1]
for r1, rho1 in zip(r, rho):
amp = amp + 4 * np.pi * r1 * rho1 * np.sin(q * r1) / q
form = 2.818e-5 ** 2 * np.absolute(amp) ** 2 * dr ** 2 * 1e-16
return form, 2.818e-5 * amp * dr * 1e-8
def calc_mesh(self,R=[1.0],Rsig=[0.0],Np=100):
"""
Computes a multi-dimensional meshgrid of radii (R) of interfaces with a finite widths (Rsig>0.001) of distribution
:param R:
:param Rsig:
:return:
"""
r1 = 'np.meshgrid('
for (i, r) in enumerate(R):
if Rsig[i] > 0.001:
lgn = eval(self.dist+'.'+self.dist+'(x=0.001, pos=r, wid=Rsig[i])')
rmin, rmax = find_minmax(lgn, r, Rsig[i])
r1 = r1 + 'np.linspace(%f,%f,%d),' % (rmin, rmax, Np)
else:
r1 = r1 + '[%f],' % r
r1 = r1[:-1] + ')'
return (eval(r1))
def sphere(self,q, R, dist, sdist, rho, eirho, adensity):
form = np.zeros_like(R[0])
eiform = np.zeros_like(R[0])
aform = np.zeros_like(R[0])
r1 = np.zeros_like(R[0])
for i, r in enumerate(R):
drho = rho[i] - rho[i + 1]
deirho = eirho[i] - eirho[i+1]
darho = adensity[i] - adensity[i+1]
r1 += r
fact=4* np.pi * 2.818e-5*1.0e-8*(np.sin(q * r1) - q * r1 * np.cos(q * r1)) / q ** 3
form = form + drho * fact
eiform = eiform + deirho*fact
aform = aform + darho*fact
return np.sum(np.abs(form) ** 2 * dist) / sdist, np.sum(np.abs(eiform) ** 2 * dist) / sdist, np.sum(np.abs(aform) ** 2 * dist) / sdist, np.sum(eiform*aform*dist) / sdist #in cm^2
def sphere_dict(self,q, R, dist, sdist, rho, eirho, adensity,key='SAXS-term'):
form = np.zeros_like(R[0])
eiform = np.zeros_like(R[0])
aform = np.zeros_like(R[0])
r1 = np.zeros_like(R[0])
for i, r in enumerate(R):
drho = rho[i] - rho[i + 1]
deirho = eirho[i] - eirho[i+1]
darho = adensity[i] - adensity[i+1]
r1 += r
fact=4* np.pi * 2.818e-5*1.0e-8*(np.sin(q * r1) - q * r1 * np.cos(q * r1)) / q ** 3
eiform = eiform + deirho*fact
aform = aform + darho*fact
form = form + drho * fact
if key=='SAXS-term':
return np.sum(np.abs(eiform) ** 2 * dist) / sdist # in cm^2
elif key=='Resonant-term':
return np.sum(np.abs(aform) ** 2 * dist) / sdist # in cm^2
elif key=='Cross-term':
return np.sum(eiform * aform * dist) / sdist # in cm^2
elif key=='Total':
return np.sum(np.abs(form) ** 2 * dist) / sdist # in cm^2
def update_params(self):
self.norm=self.params['norm'].value
self.bkg=self.params['bkg'].value
key='Density'
self.__density__=[self.params['__%s__%03d'%(key,i)].value for i in range(len(self.__mpar__[key]))]
key='Sol_Density'
self.__sol_density__=[self.params['__%s__%03d'%(key,i)].value for i in range(len(self.__mpar__[key]))]
key='Rmoles'
self.__Rmoles__=[self.params['__%s__%03d'%(key,i)].value for i in range(len(self.__mpar__[key]))]
key='R'
self.__R__=[self.params['__%s__%03d'%(key,i)].value for i in range(len(self.__mpar__[key]))]
key='Rsig'
self.__Rsig__=[self.params['__%s__%03d'%(key,i)].value for i in range(len(self.__mpar__[key]))]
key='Material'
self.__material__=[self.__mpar__[key][i] for i in range(len(self.__mpar__[key]))]
def y(self):
"""
Define the function in terms of x to return some value
"""
self.output_params={}
self.update_params()
rho,eirho,adensity=self.calc_rho(material=self.__material__, density=self.__density__, sol_density=self.__sol_density__,Energy=self.Energy, Rmoles= self.__Rmoles__, NrDep=self.NrDep)
#rho.append(self.rhosol)
#eirho.append(self.rhosol)
#adensity.append(0.0)
r=self.calc_mesh(R=self.__R__[:-1],Rsig=self.__Rsig__,Np=self.Np)
adist = np.ones_like(r[0])
for | |
<reponame>zackbatist/QualCoder
# -*- coding: utf-8 -*-
'''
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Author: <NAME> (ccbogel)
https://github.com/ccbogel/QualCoder
https://qualcoder.wordpress.com/
'''
from copy import copy
import logging
from lxml import etree
import os
import shutil
import sys
import traceback
import uuid
import zipfile
from PyQt5 import QtWidgets
path = os.path.abspath(os.path.dirname(__file__))
logger = logging.getLogger(__name__)
def exception_handler(exception_type, value, tb_obj):
""" Global exception handler useful in GUIs.
tb_obj: exception.__traceback__ """
tb = '\n'.join(traceback.format_tb(tb_obj))
text = 'Traceback (most recent call last):\n' + tb + '\n' + exception_type.__name__ + ': ' + str(value)
print(text)
logger.error(_("Uncaught exception: ") + text)
#QtWidgets.QMessageBox.critical(None, _('Uncaught Exception'), text)
class Refi(QtWidgets.QDialog):
"""
Create Rotterdam Exchange Format Initiative (refi) xml documents for codebook.xml and project.xml
NOTES:
https://stackoverflow.com/questions/299588/validating-with-an-xml-schema-in-python
http://infohost.nmt.edu/tcc/help/pubs/pylxml/web/index.html
"""
categories = []
codes = []
users = []
sources = []
guids = []
notes = [] # contains xml of guid and note (memo) text
variables = [] # contains dictionary of variable xml, guid, name
xml = ""
parent_textEdit = None
settings = None
tree = None
def __init__(self, settings, parent_textEdit):
""" """
sys.excepthook = exception_handler
self.settings = settings
self.parent_textEdit = parent_textEdit
self.get_categories()
self.get_codes()
self.get_users()
self.get_sources()
#self.codebook_xml()
#self.xml_validation("codebook")
self.project_xml()
self.xml_validation("project")
self.export_project()
print(self.notes)
exit(0)
def export_project(self):
'''
.qde file
Internal files are identified in the path attribute of the source element by the URL naming scheme internal://
/sources folder
Audio and video source file size:
The maximum size in bytes allowed for an internal file is 2,147,483,647 bytes (2^31−1 bytes, or 2 GiB
minus 1 byte). An exporting application must detect file size limit during export and inform the
user.
Source types:
Plain text, PDF
Images must be jpeg or png - although I will export all types
Create an unzipped folder with a /sources folder and project.qde xml document
Then create zip wih suffix .qdpx
'''
project_name = self.settings['projectName'][:-4]
prep_path = os.path.expanduser('~') + '/.qualcoder/' + project_name
print(prep_path)
try:
shutil.rmtree(prep_path)
except FileNotFoundError:
pass
try:
os.mkdir(prep_path)
os.mkdir(prep_path + "/sources")
except Exception as e:
logger.error(_("Project export error ") + str(e))
QtWidgets.QMessageBox.warning(None, _("Project"), _("Project not exported. Exiting. ") + str(e))
exit(0)
try:
with open(prep_path +'/' + project_name + '.qde', 'w') as f:
f.write(self.xml)
except Exception as e:
QtWidgets.QMessageBox.warning(None, _("Project"), _("Project not exported. Exiting. ") + str(e))
print(e)
exit(0)
for s in self.sources:
#print(s)
destination = '/sources/' + s['filename']
if s['mediapath'] is not None:
try:
if s['external'] is None:
shutil.copyfile(self.settings['path'] + s['mediapath'],
prep_path + destination)
else:
shutil.copyfile(self.settings['path'] + s['mediapath'],
self.settings['directory'] + '/' + s['filename'])
except FileNotFoundError as e:
print(e)
if s['mediapath'] is None: # a document
try:
shutil.copyfile(self.settings['path'] + '/documents/' + s['name'],
prep_path + destination)
except FileNotFoundError as e:
with open(prep_path + destination, 'w') as f:
f.write(s['fulltext'])
# Also need to add the plain text file as a source
# plaintext has different guid from richtext
with open(prep_path + '/sources/' + s['plaintext_filename'], 'w') as f:
f.write(s['fulltext'])
export_path = self.settings['path'][:-4]
shutil.make_archive(export_path, 'zip', prep_path)
os.rename(export_path + ".zip", export_path + ".qpdx")
try:
shutil.rmtree(prep_path)
except FileNotFoundError:
pass
msg = export_path + ".qpdx\n"
msg += "Journals, most memos and variables are not exported. "
msg += "GIFs (if present) are not converted to jpg on export, which does not meet the exchange standard. "
msg += "This project exchange is not fully compliant with the exchange standard."
QtWidgets.QMessageBox.information(None, _("Project exported"), _(msg))
def user_guid(self, username):
""" Requires a username. returns matching guid """
for u in self.users:
if u['name'] == username:
return u['guid']
return ""
def code_guid(self, code_id):
""" Requires a code id. returns matching guid """
for c in self.codes:
if c['cid'] == code_id:
return c['guid']
return ""
def project_xml(self):
""" Creates the xml for the .qde file.
base path for external sources is set to the settings directory. """
self.xml = '<?xml version="1.0" standalone="yes"?>\n' #encoding="UTF-8"?>\n'
self.xml += '<Project '
self.xml += 'xmlns="urn:QDA-XML:project:1.0" '
guid = self.create_guid()
self.xml += 'creatingUserGUID="' + guid + '" ' # there is no creating user in QualCoder
cur = self.settings['conn'].cursor()
cur.execute("select date,memo from project")
result = cur.fetchone()
dtime = result[0].replace(" ", "T")
self.xml += 'creationDateTime="' + dtime + '" '
#self.xml += 'basePath="' + self.settings['directory'] + '" '
self.xml += 'name="' + self.settings['projectName'] + '" '
self.xml += 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
self.xml += 'origin="Qualcoder-1.3" '
self.xml += 'xsi:schemaLocation="urn:QDA-XML:project:1:0 http://schema.qdasoftware.org/versions/Project/v1.0/Project.xsd"'
self.xml += '>\n'
# add users
self.xml += "<Users>\n"
for row in self.users:
self.xml += '<User guid="' + row['guid'] + '" name="' + row['name'] + '"/>\n'
self.xml += "</Users>\n"
self.xml += self.codebook_xml()
self.xml += self.variables_xml()
self.xml += self.cases_xml()
self.xml += self.sources_xml()
self.xml += self.notes_xml()
#self.sets_xml()
self.xml += '</Project>'
def variables_xml(self):
""" Variables are associated with Sources and Cases """
self.variables = []
xml = ""
cur = self.settings['conn'].cursor()
cur.execute("select name, date ,owner, memo, caseOrFile,valuetype from attribute_type")
results = cur.fetchall()
if results == []:
return xml
xml = '<Variables>\n'
for r in results:
guid = self.create_guid()
xml += '<Variable guid="' + guid + '" '
xml += 'name="' + r[0] + '" '
xml += 'typeOfVariable="'
# Only two variable options in QualCoder
if r[5] == 'numeric':
xml += 'Float" '
else:
xml += 'Text" '
xml += '>\n'
xml += '</Variable>\n'
self.variables.append({'guid': guid, 'name': r[0], 'type': r[5], 'caseOrFile': r[4]})
xml += '</Variables>\n'
return xml
def create_note_xml(self, guid, text, user, datetime, name=""):
""" Create a Note xml for project, sources, cases, codes, etc
Appends xml in notes list.
name is used for names of journal entries.
returns a guid for a NoteRef """
guid = self.create_guid()
xml = '<Note guid="' + guid + '" '
xml += 'creatingUser="' + user + '" '
xml += 'creationDateTime="' + datetime + '" '
if name != "":
xml += 'name="' + name + '" '
xml += '>\n'
xml += '<PlainTextContent>' + text + '</PlainTextContent>\n'
xml += '</Note>\n'
self.notes.append(xml)
noteref = '<NoteRef targetGUID="' + guid + '" />\n'
return noteref
def notes_xml(self):
""" Collate note_xml list into final xml
<Notes><Note></Note></Notes>
Note xml requires a NoteRef in the source or case
returns xml """
if self.notes == []:
return ''
xml = '<Notes>\n'
for note in self.notes:
xml += note
xml += '</Notes>\n'
return xml
def cases_xml(self):
""" Create xml for cases.
Putting memo into description, but should I also create a Note xml too?
returns xml """
xml = ''
cur = self.settings['conn'].cursor()
cur.execute("select caseid, name, memo, owner, date from cases")
result = cur.fetchall()
if result == []:
return xml
xml = '<Cases>\n'
for r in result:
xml += '<Case guid="' + self.create_guid() + '" '
xml += 'name="' + r[1] + '">\n'
if r[2] != "":
xml += '<Description>' + r[2] + '</Description>\n'
xml += self.case_source_ref_xml(r[0])
#TODO unsure how this works as only has a targetRef
#xml += self.case_selection_xml(r[0])
#TODO unsure how this works
#xml += self.case_variables_xml(r[0])
xml += '</Case>\n'
xml += '</Cases>\n'
return xml
def case_source_ref_xml(self, caseid):
""" Find sources linked to this case, pos0 and pos1 must equal zero. """
xml = ''
cur = self.settings['conn'].cursor()
cur.execute("select fid, owner, date from case_text where caseid=? and pos0=0 and pos1=0", [caseid,])
result = cur.fetchall()
if result == []:
return xml
for row in result:
for s in self.sources:
if s['id'] == row[0]:
# put xml creation | |
<reponame>robot-acceleration/RBDReference<filename>RBDReference.py
import numpy as np
import copy
np.set_printoptions(precision=4, suppress=True, linewidth = 100)
class RBDReference:
def __init__(self, robotObj):
self.robot = robotObj
def mxS(self, S, vec, alpha = 1.0):
if S[0] == 1:
return self.mx1(vec,alpha)
elif S[1] == 1:
return self.mx2(vec,alpha)
elif S[2] == 1:
return self.mx3(vec,alpha)
elif S[3] == 1:
return self.mx4(vec,alpha)
elif S[4] == 1:
return self.mx5(vec,alpha)
elif S[5] == 1:
return self.mx6(vec,alpha)
else:
return np.zeros((6))
def mx1(self, vec, alpha = 1.0):
vecX = np.zeros((6))
try:
vecX[1] = vec[2]*alpha
vecX[2] = -vec[1]*alpha
vecX[4] = vec[5]*alpha
vecX[5] = -vec[4]*alpha
except:
vecX[1] = vec[0,2]*alpha
vecX[2] = -vec[0,1]*alpha
vecX[4] = vec[0,5]*alpha
vecX[5] = -vec[0,4]*alpha
return vecX
def mx2(self, vec, alpha = 1.0):
vecX = np.zeros((6))
try:
vecX[0] = -vec[2]*alpha
vecX[2] = vec[0]*alpha
vecX[3] = -vec[5]*alpha
vecX[5] = vec[3]*alpha
except:
vecX[0] = -vec[0,2]*alpha
vecX[2] = vec[0,0]*alpha
vecX[3] = -vec[0,5]*alpha
vecX[5] = vec[0,3]*alpha
return vecX
def mx3(self, vec, alpha = 1.0):
vecX = np.zeros((6))
try:
vecX[0] = vec[1]*alpha
vecX[1] = -vec[0]*alpha
vecX[3] = vec[4]*alpha
vecX[4] = -vec[3]*alpha
except:
vecX[0] = vec[0,1]*alpha
vecX[1] = -vec[0,0]*alpha
vecX[3] = vec[0,4]*alpha
vecX[4] = -vec[0,3]*alpha
return vecX
def mx4(self, vec, alpha = 1.0):
vecX = np.zeros((6))
try:
vecX[4] = vec[2]*alpha
vecX[5] = -vec[1]*alpha
except:
vecX[4] = vec[0,2]*alpha
vecX[5] = -vec[0,1]*alpha
return vecX
def mx5(self, vec, alpha = 1.0):
vecX = np.zeros((6))
try:
vecX[3] = -vec[2]*alpha
vecX[5] = vec[0]*alpha
except:
vecX[3] = -vec[0,2]*alpha
vecX[5] = vec[0,0]*alpha
return vecX
def mx6(self, vec, alpha = 1.0):
vecX = np.zeros((6))
try:
vecX[3] = vec[1]*alpha
vecX[4] = -vec[0]*alpha
except:
vecX[3] = vec[0,1]*alpha
vecX[4] = -vec[0,0]*alpha
return vecX
def fxv(self, fxVec, timesVec):
# Fx(fxVec)*timesVec
# 0 -v(2) v(1) 0 -v(5) v(4)
# v(2) 0 -v(0) v(5) 0 -v(3)
#-v(1) v(0) 0 -v(4) v(3) 0
# 0 0 0 0 -v(2) v(1)
# 0 0 0 v(2) 0 -v(0)
# 0 0 0 -v(1) v(0) 0
result = np.zeros((6))
result[0] = -fxVec[2] * timesVec[1] + fxVec[1] * timesVec[2] - fxVec[5] * timesVec[4] + fxVec[4] * timesVec[5]
result[1] = fxVec[2] * timesVec[0] - fxVec[0] * timesVec[2] + fxVec[5] * timesVec[3] - fxVec[3] * timesVec[5]
result[2] = -fxVec[1] * timesVec[0] + fxVec[0] * timesVec[1] - fxVec[4] * timesVec[3] + fxVec[3] * timesVec[4]
result[3] = -fxVec[2] * timesVec[4] + fxVec[1] * timesVec[5]
result[4] = fxVec[2] * timesVec[3] - fxVec[0] * timesVec[5]
result[5] = -fxVec[1] * timesVec[3] + fxVec[0] * timesVec[4]
return result
def fxS(self, S, vec, alpha = 1.0):
return -self.mxS(S, vec, alpha)
def vxIv(self, vec, Imat):
temp = np.matmul(Imat,vec)
vecXIvec = np.zeros((6))
vecXIvec[0] = -vec[2]*temp[1] + vec[1]*temp[2] + -vec[2+3]*temp[1+3] + vec[1+3]*temp[2+3]
vecXIvec[1] = vec[2]*temp[0] + -vec[0]*temp[2] + vec[2+3]*temp[0+3] + -vec[0+3]*temp[2+3]
vecXIvec[2] = -vec[1]*temp[0] + vec[0]*temp[1] + -vec[1+3]*temp[0+3] + vec[0+3]*temp[1+3]
vecXIvec[3] = -vec[2]*temp[1+3] + vec[1]*temp[2+3]
vecXIvec[4] = vec[2]*temp[0+3] + -vec[0]*temp[2+3]
vecXIvec[5] = -vec[1]*temp[0+3] + vec[0]*temp[1+3]
return vecXIvec
def rnea_fpass(self, q, qd, qdd = None, GRAVITY = -9.81):
# allocate memory
n = len(qd)
v = np.zeros((6,n))
a = np.zeros((6,n))
f = np.zeros((6,n))
gravity_vec = np.zeros((6))
gravity_vec[5] = -GRAVITY # a_base is gravity vec
# forward pass
for ind in range(n):
parent_ind = self.robot.get_parent_id(ind)
Xmat = self.robot.get_Xmat_Func_by_id(ind)(q[ind])
S = self.robot.get_S_by_id(ind)
# compute v and a
if parent_ind == -1: # parent is base
# v_base is zero so v[:,ind] remains 0
a[:,ind] = np.matmul(Xmat,gravity_vec)
else:
v[:,ind] = np.matmul(Xmat,v[:,parent_ind])
a[:,ind] = np.matmul(Xmat,a[:,parent_ind])
v[:,ind] += S*qd[ind]
a[:,ind] += self.mxS(S,v[:,ind],qd[ind])
if qdd is not None:
a[:,ind] += S*qdd[ind]
# compute f
Imat = self.robot.get_Imat_by_id(ind)
f[:,ind] = np.matmul(Imat,a[:,ind]) + self.vxIv(v[:,ind],Imat)
return (v,a,f)
def rnea_bpass(self, q, qd, f, USE_VELOCITY_DAMPING = False):
# allocate memory
n = len(q) # assuming len(q) = len(qd)
c = np.zeros(n)
# backward pass
for ind in range(n-1,-1,-1):
S = self.robot.get_S_by_id(ind)
# compute c
c[ind] = np.matmul(np.transpose(S),f[:,ind])
# update f if applicable
parent_ind = self.robot.get_parent_id(ind)
if parent_ind != -1:
Xmat = self.robot.get_Xmat_Func_by_id(ind)(q[ind])
temp = np.matmul(np.transpose(Xmat),f[:,ind])
f[:,parent_ind] = f[:,parent_ind] + temp.flatten()
# add velocity damping (defaults to 0)
if USE_VELOCITY_DAMPING:
for k in range(n):
c[k] += self.robot.get_damping_by_id(k) * qd[k]
return (c,f)
def rnea(self, q, qd, qdd = None, GRAVITY = -9.81, USE_VELOCITY_DAMPING = False):
# forward pass
(v,a,f) = self.rnea_fpass(q, qd, qdd, GRAVITY)
# backward pass
(c,f) = self.rnea_bpass(q, qd, f, USE_VELOCITY_DAMPING)
return (c,v,a,f)
def rnea_grad_fpass_dq(self, q, qd, v, a, GRAVITY = -9.81):
# allocate memory
n = len(qd)
dv_dq = np.zeros((6,n,n))
da_dq = np.zeros((6,n,n))
df_dq = np.zeros((6,n,n))
gravity_vec = np.zeros((6))
gravity_vec[5] = -GRAVITY # a_base is gravity vec
for ind in range(n):
parent_ind = self.robot.get_parent_id(ind)
Xmat = self.robot.get_Xmat_Func_by_id(ind)(q[ind])
S = self.robot.get_S_by_id(ind)
# dv_du = X * dv_du_parent + (if c == ind){mxS(Xvp)}
if parent_ind != -1: # note that v_base is zero so dv_du parent contribution is 0
dv_dq[:,:,ind] = np.matmul(Xmat,dv_dq[:,:,parent_ind])
dv_dq[:,ind,ind] += self.mxS(S,np.matmul(Xmat,v[:,parent_ind]))
# da_du = x*da_du_parent + mxS_onCols(dv_du)*qd + (if c == ind){mxS(Xap)}
if parent_ind != -1: # note that a_base is constant gravity so da_du parent contribution is 0
da_dq[:,:,ind] = np.matmul(Xmat,da_dq[:,:,parent_ind])
for c in range(n):
da_dq[:,c,ind] += self.mxS(S,dv_dq[:,c,ind],qd[ind])
if parent_ind != -1: # note that a_base is just gravity
da_dq[:,ind,ind] += self.mxS(S,np.matmul(Xmat,a[:,parent_ind]))
else:
da_dq[:,ind,ind] += self.mxS(S,np.matmul(Xmat,gravity_vec))
# df_du = I*da_du + fx_onCols(dv_du)*Iv + fx(v)*I*dv_du
Imat = self.robot.get_Imat_by_id(ind)
df_dq[:,:,ind] = np.matmul(Imat,da_dq[:,:,ind])
Iv = np.matmul(Imat,v[:,ind])
for c in range(n):
df_dq[:,c,ind] += self.fxv(dv_dq[:,c,ind],Iv)
df_dq[:,c,ind] += self.fxv(v[:,ind],np.matmul(Imat,dv_dq[:,c,ind]))
return (dv_dq, da_dq, df_dq)
def rnea_grad_fpass_dqd(self, q, qd, v):
# allocate memory
n = len(qd)
dv_dqd = np.zeros((6,n,n))
da_dqd = np.zeros((6,n,n))
df_dqd = np.zeros((6,n,n))
# forward pass
for ind in range(n):
parent_ind = self.robot.get_parent_id(ind)
Xmat = self.robot.get_Xmat_Func_by_id(ind)(q[ind])
S = self.robot.get_S_by_id(ind)
# dv_du = X * dv_du_parent + (if c == ind){S}
if parent_ind != -1: # note that v_base is zero so dv_du parent contribution is 0
dv_dqd[:,:,ind] = np.matmul(Xmat,dv_dqd[:,:,parent_ind])
dv_dqd[:,ind,ind] += S
# da_du = x*da_du_parent + mxS_onCols(dv_du)*qd + (if c == ind){mxS(v)}
if parent_ind != -1: # note that a_base is constant gravity so da_du parent contribution is 0
da_dqd[:,:,ind] = np.matmul(Xmat,da_dqd[:,:,parent_ind])
for c in range(n):
da_dqd[:,c,ind] += self.mxS(S,dv_dqd[:,c,ind],qd[ind])
da_dqd[:,ind,ind] += self.mxS(S,v[:,ind])
# df_du = I*da_du + fx_onCols(dv_du)*Iv + fx(v)*I*dv_du
Imat = self.robot.get_Imat_by_id(ind)
df_dqd[:,:,ind] = np.matmul(Imat,da_dqd[:,:,ind])
Iv = np.matmul(Imat,v[:,ind])
for c in range(n):
df_dqd[:,c,ind] += self.fxv(dv_dqd[:,c,ind],Iv)
df_dqd[:,c,ind] += self.fxv(v[:,ind],np.matmul(Imat,dv_dqd[:,c,ind]))
return (dv_dqd, da_dqd, df_dqd)
def rnea_grad_bpass_dq(self, q, f, df_dq):
# allocate memory
n = len(q) # assuming len(q) = len(qd)
dc_dq = np.zeros((n,n))
for ind in range(n-1,-1,-1):
# dc_du is S^T*df_du
S = self.robot.get_S_by_id(ind)
dc_dq[ind,:] = np.matmul(np.transpose(S),df_dq[:,:,ind])
# df_du_parent += X^T*df_du + (if ind == c){X^T*fxS(f)}
parent_ind = self.robot.get_parent_id(ind)
if parent_ind != -1:
Xmat = self.robot.get_Xmat_Func_by_id(ind)(q[ind])
df_dq[:,:,parent_ind] += np.matmul(np.transpose(Xmat),df_dq[:,:,ind])
delta_dq = np.matmul(np.transpose(Xmat),self.fxS(S,f[:,ind]))
for entry in range(6):
df_dq[entry,ind,parent_ind] += delta_dq[entry]
return dc_dq
def rnea_grad_bpass_dqd(self, q, df_dqd, USE_VELOCITY_DAMPING = False):
# allocate memory
n = len(q) # assuming len(q) = len(qd)
dc_dqd = np.zeros((n,n))
for ind in range(n-1,-1,-1):
# dc_du is S^T*df_du
S = self.robot.get_S_by_id(ind)
dc_dqd[ind,:] = np.matmul(np.transpose(S),df_dqd[:,:,ind])
# df_du_parent += X^T*df_du
parent_ind = self.robot.get_parent_id(ind)
if parent_ind != -1:
Xmat = self.robot.get_Xmat_Func_by_id(ind)(q[ind])
df_dqd[:,:,parent_ind] += np.matmul(np.transpose(Xmat),df_dqd[:,:,ind])
# add in the damping
if USE_VELOCITY_DAMPING:
for ind in range(n):
dc_dqd[ind,ind] += self.robot.get_damping_by_id(ind)
return dc_dqd
def rnea_grad(self, q, qd, qdd = None, GRAVITY = -9.81, USE_VELOCITY_DAMPING = False):
(c, v, a, f) = self.rnea(q, qd, qdd, GRAVITY)
# forward pass, dq
(dv_dq, da_dq, df_dq) = self.rnea_grad_fpass_dq(q, qd, v, a, GRAVITY)
# forward pass, dqd
(dv_dqd, da_dqd, df_dqd) = self.rnea_grad_fpass_dqd(q, qd, v)
# backward pass, dq
dc_dq = self.rnea_grad_bpass_dq(q, f, df_dq)
# backward pass, dqd
dc_dqd = self.rnea_grad_bpass_dqd(q, df_dqd, USE_VELOCITY_DAMPING)
dc_du = np.hstack((dc_dq,dc_dqd))
return dc_du
def minv_bpass(self, q):
# allocate memory
n = len(q)
Minv = np.zeros((n,n))
F = np.zeros((n,6,n))
U = np.zeros((n,6))
Dinv = np.zeros(n)
# set initial IA to I
IA = copy.deepcopy(self.robot.get_Imats_dict_by_id())
# backward pass
for ind in range(n-1,-1,-1):
# Compute U, D
S = self.robot.get_S_by_id(ind)
subtreeInds = self.robot.get_subtree_by_id(ind)
U[ind,:] = np.matmul(IA[ind],S)
Dinv[ind] = 1/np.matmul(S.transpose(),U[ind,:])
# Update Minv
Minv[ind,ind] = Dinv[ind]
for subInd in subtreeInds:
Minv[ind,subInd] -= Dinv[ind] * np.matmul(S.transpose(),F[ind,:,subInd])
# update parent if applicable
parent_ind = self.robot.get_parent_id(ind)
if parent_ind != -1:
Xmat = self.robot.get_Xmat_Func_by_id(ind)(q[ind])
# update F
for subInd in | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# # PyKOALA: KOALA data processing and analysis
# by <NAME> and <NAME>
# Extra work by <NAME> (MQ PACE student)
# Plus Taylah and Matt (sky subtraction)
from __future__ import absolute_import, division, print_function
from past.utils import old_div
version = "Version 0.72 - 13th February 2020"
import copy
import os.path as pth
import sys
from astropy.convolution import Gaussian2DKernel, interpolate_replace_nans
from astropy.io import fits
from astropy.wcs import WCS
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
from scipy import interpolate
from scipy.ndimage.interpolation import shift
import scipy.signal as sig
from .constants import C, PARSEC as pc
from .utils.cube_alignment import offset_between_cubes, compare_cubes, align_n_cubes
from .utils.flux import search_peaks, fluxes, dfluxes, substract_given_gaussian
from .utils.io import read_table, save_rss_fits, save_fits_file
from .utils.moffat import fit_Moffat
from .utils.plots import (
plot_redshift_peaks, plot_weights_for_getting_smooth_spectrum,
plot_correction_in_fibre_p_fibre, plot_suspicious_fibres_graph, plot_skyline_5578,
plot_offset_between_cubes, plot_response, plot_telluric_correction, plot_plot
)
from .utils.sky_spectrum import scale_sky_spectrum, median_filter
from .utils.spectrum_tools import rebin_spec_shift, smooth_spectrum
from .utils.utils import (
FitsExt, FitsFibresIFUIndex, coord_range, median_absolute_deviation,
)
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
# -----------------------------------------------------------------------------
# Define constants
# -----------------------------------------------------------------------------
DATA_PATH = pth.join(pth.dirname(__file__), "data")
# -----------------------------------------------------------------------------
# Define COLOUR scales
# -----------------------------------------------------------------------------
fuego_color_map = colors.LinearSegmentedColormap.from_list(
"fuego",
(
(0.25, 0, 0),
(0.5, 0, 0),
(1, 0, 0),
(1, 0.5, 0),
(1, 0.75, 0),
(1, 1, 0),
(1, 1, 1),
),
N=256,
gamma=1.0,
)
fuego_color_map.set_bad("lightgray")
plt.register_cmap(cmap=fuego_color_map)
projo = [0.25, 0.5, 1, 1.0, 1.00, 1, 1]
pverde = [0.00, 0.0, 0, 0.5, 0.75, 1, 1]
pazul = [0.00, 0.0, 0, 0.0, 0.00, 0, 1]
# -----------------------------------------------------------------------------
# RSS CLASS
# -----------------------------------------------------------------------------
class RSS(object):
"""
Collection of row-stacked spectra (RSS).
Attributes
----------
wavelength: np.array(float)
Wavelength, in Angstroms.
intensity: np.array(float)
Intensity :math:`I_\lambda` per unit wavelength.
variance: np.array(float)
Variance :math:`\sigma^2_\lambda` per unit wavelength
(note the square in the definition of the variance).
"""
# -----------------------------------------------------------------------------
def __init__(self):
self.description = "Undefined row-stacked spectra (RSS)"
self.n_spectra = 0
self.n_wave = 0
self.wavelength = np.zeros((0))
self.intensity = np.zeros((0, 0))
self.intensity_corrected = self.intensity
self.variance = np.zeros_like(self.intensity)
self.RA_centre_deg = 0.0
self.DEC_centre_deg = 0.0
self.offset_RA_arcsec = np.zeros((0))
self.offset_DEC_arcsec = np.zeros_like(self.offset_RA_arcsec)
self.ALIGNED_RA_centre_deg = 0.0 # Added by ANGEL, 6 Sep
self.ALIGNED_DEC_centre_deg = 0.0 # Added by ANGEL, 6 Sep
self.relative_throughput = np.ones((0)) # Added by ANGEL, 16 Sep
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def compute_integrated_fibre(
self,
list_spectra="all",
valid_wave_min=0,
valid_wave_max=0,
min_value=0.1,
plot=False,
title=" - Integrated values",
warnings=True,
text="...",
correct_negative_sky=False,
):
"""
Compute the integrated flux of a fibre in a particular range, valid_wave_min to valid_wave_max.
Parameters
----------
list_spectra: float (default "all")
list with the number of fibres for computing integrated value
if using "all" it does all fibres
valid_wave_min, valid_wave_max : float
the integrated flux value will be computed in the range [valid_wave_min, valid_wave_max]
(default = , if they all 0 we use [self.valid_wave_min, self.valid_wave_max]
min_value: float (default 0)
For values lower than min_value, we set them as min_value
plot : Boolean (default = False)
Plot
title : string
Title for the plot
text: string
A bit of extra text
warnings : Boolean (default = False)
Write warnings, e.g. when the integrated flux is negative
correct_negative_sky : Boolean (default = False)
Corrects negative values making 0 the integrated flux of the lowest fibre
Example
----------
integrated_fibre_6500_6600 = star1r.compute_integrated_fibre(valid_wave_min=6500, valid_wave_max=6600,
title = " - [6500,6600]", plot = True)
"""
print("\n Computing integrated fibre values {}".format(text))
if list_spectra == "all":
list_spectra = list(range(self.n_spectra))
if valid_wave_min == 0:
valid_wave_min = self.valid_wave_min
if valid_wave_max == 0:
valid_wave_max = self.valid_wave_max
self.integrated_fibre = np.zeros(self.n_spectra)
region = np.where(
(self.wavelength > valid_wave_min) & (self.wavelength < valid_wave_max)
)
waves_in_region = len(region[0])
n_negative_fibres = 0
negative_fibres = []
for i in range(self.n_spectra):
self.integrated_fibre[i] = np.nansum(self.intensity_corrected[i, region])
if self.integrated_fibre[i] < 0:
if warnings:
print(
" WARNING: The integrated flux in fibre {:4} is negative, flux/wave = {:10.2f}, (probably sky), CHECK !".format(
i, self.integrated_fibre[i]/waves_in_region
))
n_negative_fibres = n_negative_fibres + 1
# self.integrated_fibre[i] = min_value
negative_fibres.append(i)
if len(negative_fibres) != 0:
print("\n> Number of fibres with integrated flux < 0 : {:4}, that is the {:5.2f} % of the total !".format(
n_negative_fibres, n_negative_fibres * 100.0 / self.n_spectra
))
negative_fibres_sorted = []
integrated_intensity_sorted = np.argsort(
self.integrated_fibre/waves_in_region
)
for fibre_ in range(n_negative_fibres):
negative_fibres_sorted.append(integrated_intensity_sorted[fibre_])
# print "\n> Checking results using",n_negative_fibres,"fibres with the lowest integrated intensity"
# print " which are :",negative_fibres_sorted
if correct_negative_sky:
min_sky_value = self.integrated_fibre[negative_fibres_sorted[0]]
min_sky_value_per_wave = min_sky_value/waves_in_region
print(
"\n> Correcting negative values making 0 the integrated flux of the lowest fibre, which is {:4} with {:10.2f} counts/wave".format(
negative_fibres_sorted[0], min_sky_value_per_wave
))
# print self.integrated_fibre[negative_fibres_sorted[0]]
self.integrated_fibre = self.integrated_fibre - min_sky_value
for i in range(self.n_spectra):
self.intensity_corrected[i] = (
self.intensity_corrected[i] - min_sky_value_per_wave
)
else:
print(
"\n> Adopting integrated flux = {:5.2f} for all fibres with negative integrated flux (for presentation purposes)".format(
min_value
))
for i in negative_fibres_sorted:
self.integrated_fibre[i] = min_value
# for i in range(self.n_spectra):
# if self.integrated_fibre[i] < 0:
# if warnings: print " WARNING: The integrated flux in fibre {:4} STILL is negative, flux/wave = {:10.2f}, (probably sky), CHECK !".format(i,self.integrated_fibre[i]/waves_in_region)
if plot:
# print"\n Plotting map with integrated values:"
self.RSS_map(
self.integrated_fibre,
norm=colors.PowerNorm(gamma=1.0 / 4.0),
title=title,
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def identify_el(
self,
high_fibres=10,
brightest_line="Ha",
cut=1.5,
fibre=0,
broad=1.0,
verbose=True,
plot=True,
):
"""
Identify fibres with highest intensity (high_fibres=10).
Add all in a single spectrum.
Identify emission features.
These emission features should be those expected in all the cube!
Also, choosing fibre=number, it identifies el in a particular fibre.
Parameters
----------
high_fibres: float (default 10)
use the high_fibres highest intensity fibres for identifying
brightest_line : string (default "Ha")
string name with the emission line that is expected to be the brightest in integrated spectrum
cut: float (default 1.5)
The peak has to have a cut higher than cut to be considered as emission line
fibre: integer (default 0)
If fibre is given, it identifies emission lines in the given fibre
broad: float (default 1.0)
Broad (FWHM) of the expected emission lines
verbose : boolean (default = True)
Write results
plot : boolean (default = False)
Plot results
Example
----------
self.el=self.identify_el(high_fibres=10, brightest_line = "Ha",
cut=2., verbose=True, plot=True, fibre=0, broad=1.5)
"""
if fibre == 0:
integrated_intensity_sorted = np.argsort(self.integrated_fibre)
region = []
for fibre in range(high_fibres):
region.append(integrated_intensity_sorted[-1 - fibre])
if verbose:
print("\n> Identifying emission lines using the {} fibres with the highest integrated intensity".format(high_fibres))
print(" which are : {}".format(region))
combined_high_spectrum = np.nansum(self.intensity_corrected[region], axis=0)
else:
combined_high_spectrum = self.intensity_corrected[fibre]
if verbose:
print("\n> Identifying emission lines in fibre {}".format(fibre))
# Search peaks
peaks, peaks_name, peaks_rest, continuum_limits = search_peaks(
self.wavelength,
combined_high_spectrum,
plot=plot,
cut=cut,
brightest_line=brightest_line,
verbose=False,
)
p_peaks_l = []
p_peaks_fwhm = []
# Do Gaussian fit and provide center & FWHM (flux could be also included, not at the moment as not abs. flux-cal done)
if verbose:
print("\n Emission lines identified:")
for eline in range(len(peaks)):
lowlow = continuum_limits[0][eline]
lowhigh = continuum_limits[1][eline]
highlow = continuum_limits[2][eline]
highhigh = continuum_limits[3][eline]
resultado = fluxes(
self.wavelength,
combined_high_spectrum,
peaks[eline],
verbose=False,
broad=broad,
lowlow=lowlow,
lowhigh=lowhigh,
highlow=highlow,
highhigh=highhigh,
plot=plot,
fcal=False,
)
p_peaks_l.append(resultado[1])
p_peaks_fwhm.append(resultado[5])
if verbose:
print(" {:3}. {:7s} {:8.2f} centered at {:8.2f} and FWHM = {:6.2f}".format(
eline + 1,
peaks_name[eline],
peaks_rest[eline],
p_peaks_l[eline],
p_peaks_fwhm[eline],
))
return [peaks_name, peaks_rest, p_peaks_l, p_peaks_fwhm]
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def correct_high_cosmics_and_defects(
self,
step=50,
correct_high_cosmics=False,
fibre_p=0,
remove_5578=False, # if fibre_p=fibre plots the corrections in that fibre
clip_high=100,
warnings=False,
plot=True,
plot_suspicious_fibres=True,
verbose=False,
fig_size=12,
):
"""
Task for correcting high cosmics and CCD defects using median values of nearby pixels.
2dFdr corrects for (the majority) of the cosmic rays, usually correct_high_cosmics = False.
ANGEL COMMENT: Check, probably can be improved using MATT median running + plotting outside
Parameters
----------
rect_high_cosmics: boolean (default = False)
Correct ONLY CCD defects
re_p: integer (default = 0)
Plots the corrections in fibre fibre_p
ove_5578: boolean (default = False)
Removes skyline 5578 (blue spectrum) using Gaussian fit
ND CHECK: This also MODIFIES the throughput correction correcting for flux_5578_medfilt /median_flux_5578_medfilt
step: integer (default = 50)
Number of points for calculating median value
clip_high : float (default = 100)
Minimum value of flux/median in a pixel to be consider as a cosmic
if s[wave] > clip_high*fit_median[wave] -> IT IS A COSMIC
verbose: boolean (default = False)
Write results
warnings: boolean (default = False)
Write warnings
plot: boolean (default = False)
Plot results
plot_suspicious_fibres: boolean (default = False)
Plots fibre(s) that could have a cosmic left (but it could | |
# Copyright 2014 Altera Corporation. All Rights Reserved.
# Copyright 2015-2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides a base class derived from `unittest.TestClass`
for unit tests using the :py:class:`pyfakefs` module.
`fake_filesystem_unittest.TestCase` searches `sys.modules` for modules
that import the `os`, `io`, `path` `shutil`, and `pathlib` modules.
The `setUpPyfakefs()` method binds these modules to the corresponding fake
modules from `pyfakefs`. Further, the `open()` built-in is bound to a fake
`open()`. In Python 2, built-in `file()` is similarly bound to the fake
`open()`.
It is expected that `setUpPyfakefs()` be invoked at the beginning of the
derived class' `setUp()` method. There is no need to add anything to the
derived class' `tearDown()` method.
During the test, everything uses the fake file system and modules. This means
that even in your test fixture, familiar functions like `open()` and
`os.makedirs()` manipulate the fake file system.
Existing unit tests that use the real file system can be retrofitted to use
pyfakefs by simply changing their base class from `:py:class`unittest.TestCase`
to `:py:class`pyfakefs.fake_filesystem_unittest.TestCase`.
"""
import doctest
import inspect
import sys
import tempfile
import unittest
from pyfakefs.deprecator import Deprecator
try:
from importlib.machinery import ModuleSpec
except ImportError:
ModuleSpec = object
try:
# python >= 3.4
from importlib import reload
except ImportError:
try:
# python 3.0 - 3.3
from imp import reload
except ImportError:
# python 2 - reload is built-in
pass
from pyfakefs import fake_filesystem
from pyfakefs import fake_filesystem_shutil
from pyfakefs import mox3_stubout
if sys.version_info >= (3, 4):
from pyfakefs import fake_pathlib
try:
import scandir # noqa: F401 import used to set has_scandir
import fake_scandir
has_scandir = True
except ImportError:
has_scandir = False
if sys.version_info < (3, ):
import __builtin__ as builtins # pylint: disable=import-error
else:
import builtins
def load_doctests(loader, tests, ignore, module,
additional_skip_names=None,
patch_path=True): # pylint: disable=unused-argument
"""Load the doctest tests for the specified module into unittest.
Args:
loader, tests, ignore : arguments passed in from `load_tests()`
module: module under test
additional_skip_names: see :py:class:`TestCase` for an explanation
patch_path: see :py:class:`TestCase` for an explanation
File `example_test.py` in the pyfakefs release provides a usage example.
"""
_patcher = Patcher(additional_skip_names=additional_skip_names,
patch_path=patch_path)
globs = _patcher.replace_globs(vars(module))
tests.addTests(doctest.DocTestSuite(module,
globs=globs,
setUp=_patcher.setUp,
tearDown=_patcher.tearDown))
return tests
class TestCaseMixin(object):
"""Test case mixin that automatically replaces file-system related
modules by fake implementations.
Attributes:
additional_skip_names: names of modules inside of which no module
replacement shall be performed, in addition to the names in
:py:attr:`fake_filesystem_unittest.Patcher.SKIPNAMES`.
patch_path: if False, modules named *path* will not be patched with
the fake ``os.path`` module. Set this to False when you need
to import some other module named ``path``, for example::
from my_module import path
Irrespective of patch_path, module ``os.path`` is still
correctly faked if imported the usual way using ``import
os`` or ``import os.path``.
modules_to_reload: A list of modules that need to be reloaded
to be patched dynamically; may be needed if the module
imports file system modules under an alias
.. note:: This is done independently of `use_dynamic_patch`
.. caution:: Reloading modules may have unwanted side effects.
use_dynamic_patch: If `True`, dynamic patching after setup is used
(for example for modules loaded locally inside of functions).
Can be switched off if it causes unwanted side effects.
modules_to_patch: A dictionary of fake modules mapped to the
patched module names. Can be used to add patching of modules
not provided by `pyfakefs`.
If you want to patch a class in a module imported using
`from some_module import SomeClass`, you have to specify
`some_module.Class` as the key for the fake class.
If you specify attributes `additional_skip_names` or `patch_path` here
and you have DocTests, consider also specifying the same arguments to
:py:func:`load_doctests`.
Example usage in derived test classes::
from unittest import TestCase
from fake_filesystem_unittest import TestCaseMixin
class MyTestCase(TestCase, TestCaseMixin):
def __init__(self, methodName='runTest'):
super(MyTestCase, self).__init__(
methodName=methodName,
additional_skip_names=['posixpath'])
import sut
class AnotherTestCase(TestCase, TestCaseMixin):
def __init__(self, methodName='runTest'):
super(MyTestCase, self).__init__(
methodName=methodName, modules_to_reload=[sut])
"""
additional_skip_names = None
patch_patch = True
modules_to_reload = None
use_dynamic_patch = True
modules_to_patch = None
@property
def fs(self):
return self._stubber.fs
def setUpPyfakefs(self):
"""Bind the file-related modules to the :py:class:`pyfakefs` fake file
system instead of the real file system. Also bind the fake `open()`
function, and on Python 2, the `file()` function.
Invoke this at the beginning of the `setUp()` method in your unit test
class.
"""
self._stubber = Patcher(
additional_skip_names=self.additional_skip_names,
patch_path=self.patch_path,
use_dynamic_patch=self.use_dynamic_patch,
modules_to_reload=self.modules_to_reload,
modules_to_patch=self.modules_to_patch)
self._stubber.setUp()
self.addCleanup(self._stubber.tearDown)
class TestCase(unittest.TestCase, TestCaseMixin):
"""Test case class that automatically replaces file-system related
modules by fake implementations.
"""
def __init__(self, methodName='runTest',
additional_skip_names=None,
patch_path=True,
modules_to_reload=None,
use_dynamic_patch=True,
modules_to_patch=None):
"""Creates the test class instance and the stubber used to stub out
file system related modules.
Args:
methodName: The name of the test method (same as in
unittest.TestCase)
"""
super(TestCase, self).__init__(methodName)
self.additional_skip_names = additional_skip_names
self.patch_path = patch_path
self.modules_to_reload = modules_to_reload
self.use_dynamic_patch = use_dynamic_patch
self.modules_to_patch = modules_to_patch
@Deprecator('add_real_file')
def copyRealFile(self, real_file_path, fake_file_path=None,
create_missing_dirs=True):
"""Add the file `real_file_path` in the real file system to the same
path in the fake file system.
**This method is deprecated** in favor of
:py:meth:`FakeFilesystem..add_real_file`.
`copyRealFile()` is retained with limited functionality for backward
compatibility only.
Args:
real_file_path: Path to the file in both the real and fake
file systems
fake_file_path: Deprecated. Use the default, which is
`real_file_path`.
If a value other than `real_file_path` is specified, a `ValueError`
exception will be raised.
create_missing_dirs: Deprecated. Use the default, which creates
missing directories in the fake file system. If `False` is
specified, a `ValueError` exception is raised.
Returns:
The newly created FakeFile object.
Raises:
IOError: If the file already exists in the fake file system.
ValueError: If deprecated argument values are specified.
See:
:py:meth:`FakeFileSystem.add_real_file`
"""
if fake_file_path is not None and real_file_path != fake_file_path:
raise ValueError("CopyRealFile() is deprecated and no longer "
"supports different real and fake file paths")
if not create_missing_dirs:
raise ValueError("CopyRealFile() is deprecated and no longer "
"supports NOT creating missing directories")
return self._stubber.fs.add_real_file(real_file_path, read_only=False)
@DeprecationWarning
def tearDownPyfakefs(self):
"""This method is deprecated and exists only for backward
compatibility. It does nothing.
"""
pass
class Patcher(object):
"""
Instantiate a stub creator to bind and un-bind the file-related modules to
the :py:mod:`pyfakefs` fake modules.
The arguments are explained in :py:class:`TestCase`.
:py:class:`Patcher` is used in :py:class:`TestCase`. :py:class:`Patcher`
also works as a context manager for PyTest::
with Patcher():
doStuff()
"""
SKIPMODULES = {None, fake_filesystem, fake_filesystem_shutil, sys}
'''Stub nothing that is imported within these modules.
`sys` is included to prevent `sys.path` from being stubbed with the fake
`os.path`.
'''
assert None in SKIPMODULES, ("sys.modules contains 'None' values;"
" must skip them.")
HAS_PATHLIB = sys.version_info >= (3, 4)
IS_WINDOWS = sys.platform in ('win32', 'cygwin')
SKIPNAMES = {'os', 'path', 'io', 'genericpath'}
if HAS_PATHLIB:
SKIPNAMES.add('pathlib')
def __init__(self, additional_skip_names=None, patch_path=True,
modules_to_reload=None, use_dynamic_patch=True,
modules_to_patch=None):
"""For a description of the arguments, see TestCase.__init__"""
self._skipNames = self.SKIPNAMES.copy()
if additional_skip_names is not None:
self._skipNames.update(additional_skip_names)
self._patchPath = patch_path
if not patch_path:
self._skipNames.discard('path')
self._skipNames.discard('genericpath')
self.modules_to_reload = [tempfile]
if modules_to_reload is not None:
self.modules_to_reload.extend(modules_to_reload)
self._use_dynamic_patch = use_dynamic_patch
# Attributes set by _findModules()
# IMPORTANT TESTING NOTE: Whenever you add a new module below, test
# it by adding an attribute in fixtures/module_with_attributes.py
# and a test in fake_filesystem_unittest_test.py, class
# TestAttributesWithFakeModuleNames.
self._fake_module_classes = {
'os': fake_filesystem.FakeOsModule,
'shutil': fake_filesystem_shutil.FakeShutilModule,
'io': fake_filesystem.FakeIoModule,
}
if self.HAS_PATHLIB:
self._fake_module_classes[
'pathlib'] = fake_pathlib.FakePathlibModule
if has_scandir:
self._fake_module_classes[
'scandir'] = fake_scandir.FakeScanDirModule
self._class_modules = {}
if modules_to_patch is not None:
for name, fake_module in modules_to_patch.items():
if '.' in name:
module_name, name = name.split('.')
self._class_modules[name] = module_name
self._fake_module_classes[name] = fake_module
self._modules = {}
for name in self._fake_module_classes:
self._modules[name] = set()
if self._patchPath:
self._modules['path'] = set()
self._find_modules()
assert None not in vars(self).values(), \
"_findModules() missed the initialization of an instance variable"
# Attributes set by _refresh()
self._stubs = None
self.fs = None
self.fake_open = None
self.fake_modules = {}
self._dyn_patcher = None
# _isStale is set by tearDown(), reset by _refresh()
self._isStale | |
<gh_stars>10-100
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
import wx
import wx.wizard
import shutil
import threading
import zipfile
import glob, os
import shutil
import Core.Common.FGlobals as FGlobals
import Core.Common.FUtils as FUtils
from Core.Common.FConstants import *
from Core.Gui.Dialog.FAppSettingsDialog import *
from Core.Gui.Dialog.FBlessedViewerDialog import *
from Core.Gui.Dialog.FCompareSetupDialog import *
from Core.Gui.Dialog.FExecutionDialog import *
from Core.Gui.Dialog.FOpenDialog import *
from Core.Gui.Dialog.FPreferenceDialog import *
from Core.Gui.Dialog.FProgressDialog import *
from Core.Gui.Dialog.FRegExDialog import *
from Core.Gui.Dialog.FRunConfigDialog import *
from Core.Gui.Dialog.FSelectDataSetDialog import *
from Core.Gui.Dialog.FSettingDialog import *
from Core.Gui.Grid.FExecutionGrid import *
from Core.Gui.FMenuBar import *
from Core.Gui.FImageType import *
from Core.FTestSuite import *
from Core.FHtmlExporter import *
from Core.FCsvExporter import *
def makeArchive(fileList, archive):
"""
'fileList' is a list of file names - full path each name
'archive' is the file name for the archive with a full path
"""
try:
print "making archive: %s adopters: %s " % (archive, FGlobals.adoptersPackage)
if (FGlobals.adoptersPackage == 'True'):
typeList = [".png", ".dae", ".html", ".csv", ".sha", ".log", ".py", ".txt"]
else:
typeList = [".html", ".csv", ".sha", ".txt", ".py"]
print "TypeList: %s" % (typeList)
a = zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED)
for f in fileList:
found = False
for t in typeList:
pos = f.find(t)
if (pos > -1):
# Insure its the last thing
flen = len(f)
tlen = len(t)
# print "flen: %s tlen: %s pos: %s" % (flen, tlen, pos)
if (pos == (flen - tlen)):
pos = f.find("blessed")
if (pos < 0):
found = True
if (found):
print "archiving file %s" % (f)
a.write(f)
else:
print "skipping file %s" % (f)
a.close()
print "Done making archive"
return True
except: return False
def dirEntries(dir_name, subdir, *args):
'''Return a list of file names found in directory 'dir_name'
If 'subdir' is True, recursively access subdirectories under 'dir_name'.
Additional arguments, if any, are file extensions to match filenames. Matched
file names are added to the list.
If there are no additional arguments, all files found in the directory are
added to the list.
Example usage: fileList = dirEntries(r'H:\TEMP', False, 'txt', 'py')
Only files with 'txt' and 'py' extensions will be added to the list.
Example usage: fileList = dirEntries(r'H:\TEMP', True)
All files and all the files in subdirectories under H:\TEMP will be added
to the list.
'''
fileList = []
for file in os.listdir(dir_name):
dirfile = os.path.join(dir_name, file)
if os.path.isfile(dirfile):
if not args:
fileList.append(dirfile)
else:
if os.path.splitext(dirfile)[1][1:] in args:
fileList.append(dirfile)
# recursively access file names in subdirectories
elif os.path.isdir(dirfile) and subdir:
print "Accessing directory:", dirfile
fileList.extend(dirEntries(dirfile, subdir, *args))
return fileList
class FSFrame(FTestSuite):
def __init__(self, MDIparent, createToolbar):
FTestSuite.__init__(self)
self.__MDIparent = MDIparent
self.menu = FMenuBar(self, createToolbar)
self.SetMenuBar(self.menu)
self.menu.Bind(FMenuBar.ID_NEW, self.__OnNew)
self.menu.Bind(FMenuBar.ID_EXIT, self.__OnExit)
self.menu.Bind(FMenuBar.ID_HELP, self.__OnHelp)
self.menu.Bind(FMenuBar.ID_ABOUT, self.__OnAbout)
self.menu.Bind(FMenuBar.ID_OPEN, self.__OnOpen)
def __OnNew(self, e):
dialog = FRunConfigDialog(self, self.applicationMap)
if (dialog.ShowModal() == wx.ID_OK):
testProcedure = self.SaveProcedure(dialog.title,
dialog.selectedRun, dialog.GetComments())
if (testProcedure != None):
child = RunTable(self.__MDIparent, wx.ID_ANY, testProcedure)
child.Maximize(True)
child.Show(True)
dialog.Destroy()
def __OnOpen(self, e):
fileChooser = FOpenDialog(self)
if (fileChooser.ShowModal() == wx.ID_OK):
self.OpenTestProcedure(fileChooser.GetPath())
def __BusyInfoOpenTestProcedure(self, filename):
busyInfo = wx.BusyInfo("Opening test procedure: loading. Please " +
"wait...")
return self.Load(filename)
def __BusyInfoCheckForNewTests(self, testProcedure, regExId):
busyInfo = wx.BusyInfo("Opening test procedure: checking regular " +
"expression. Please wait...")
return testProcedure.CheckForNewTests(regExId)
def OpenTestProcedure(self, filename):
testProcedure = self.__BusyInfoOpenTestProcedure(filename)
recovered = testProcedure.GetRecoveredTestIds()
if (recovered != ""):
FUtils.ShowWarning(self, "Encountered unfinished test " +
"executions. Recovering to previous finished execution " +
"for these tests:\n\n" + recovered)
for regExId in testProcedure.GetRegExIdGenerator():
dataSets = self.__BusyInfoCheckForNewTests(testProcedure, regExId)
if (len(dataSets) == 0): continue
displayDataSets = ""
for dataSet in dataSets:
displayDataSets = (displayDataSets +
FUtils.GetRelativePath(dataSet, MAIN_FOLDER) + "\n")
if (FUtils.ShowConfirmation(self,
"Found these missing data sets for " +
"Regular Expression " + str(regExId) + ": \n" +
testProcedure.GetRegExString(regExId) + "\n\n\n" +
displayDataSets + "\n\n" +
"Do you want to add them to the test procedure? " +
"Selecting \"No\" will also ignore them from future " +
"confirmations.", False)):
settings = testProcedure.GetRegExSettings(regExId)
for dataSet in dataSets:
testProcedure.AddTest(dataSet, settings)
else:
ignored = testProcedure.GetIgnoredRegExList(regExId)
if (len(ignored) == 0):
ignored.append("") # len(dataSet) != 0
for dataSet in dataSets:
displayedFilename = FUtils.GetRelativePath(dataSet,
MAIN_FOLDER)
regExPath = FUtils.NormalizeRegEx(displayedFilename)
newIgnored = ignored[-1]
if (newIgnored != ""):
newIgnored = newIgnored + "|"
newIgnored = newIgnored + regExPath
if (len(newIgnored) < 30000):
ignored[-1] = newIgnored
else:
ignored.append(regExPath)
testProcedure.SetRegEx(regExId,
testProcedure.GetRegExList(regExId), ignored)
busyInfo = wx.BusyInfo("Opening test procedure: Creating grid. " +
"Please wait...")
child = RunTable(self.__MDIparent, wx.ID_ANY, testProcedure)
child.Maximize(True)
child.Show(True)
def __OnExit(self, e):
self.__MDIparent.Destroy()
def __OnHelp(self, e):
# XXX: this is windows only
os.startfile(DOCUMENTATION)
def __OnAbout(self, e):
message = ("COLLADA Conformance Test Suite v" + str(VERSION) +"\n\n" +
"Copyright (C) 2006-2010 Khronos Group\n" +
"Available only to Khronos members.\n")
wx.MessageDialog(self, message, "About COLLADA Conformance Test Suite",
style = wx.OK).ShowModal()
class RunTable(FSFrame, wx.MDIChildFrame):
def __init__(self, parent, id, testProcedure):
wx.MDIChildFrame.__init__(self, parent, id, testProcedure.GetName(),
size = (400, 320),
style = wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE)
FSFrame.__init__(self, self.GetParent(), True)
self.menu.Bind(FMenuBar.ID_SAVEAS, self.__OnSaveAs)
self.menu.Bind(FMenuBar.ID_EXPORT_ALL_CSV, self.__OnExportAllCsv)
self.menu.Bind(FMenuBar.ID_EXPORT_ALL, self.__OnExportAll)
self.menu.Bind(FMenuBar.ID_EXPORT_SELECTED, self.__OnExportSelected)
self.menu.Bind(FMenuBar.ID_CLOSE, self.__OnClose)
self.menu.Bind(FMenuBar.ID_PACKAGE_RESULTS, self.__OnPackageResults)
self.Bind(wx.EVT_CLOSE, self.__OnClose)
self.menu.Bind(FMenuBar.ID_RELOAD, self.__OnReload)
self.menu.Bind(FMenuBar.ID_PREFERENCES, self.__OnPreference)
self.menu.Bind(FMenuBar.ID_ADD_TEST, self.__OnAddTest)
self.menu.Bind(FMenuBar.ID_RUN_SELECTED, self.__OnRunSelected)
self.menu.Bind(FMenuBar.ID_RUN_ALL, self.__OnRunAll)
self.menu.Bind(FMenuBar.ID_RUN_UNRAN, self.__OnRunUnran)
self.menu.Bind(FMenuBar.ID_SELECT_ALL, self.__OnSelectAll)
self.menu.Bind(FMenuBar.ID_REFRESH, self.__OnRefreshTable)
self.menu.Bind(FMenuBar.ID_REFRESH_SELECTED, self.__OnRefreshSelected)
self.menu.Bind(FMenuBar.ID_ANIMATE, self.__OnAnimate)
self.menu.Bind(FMenuBar.ID_REGEX, self.__OnRegEx)
self.CreateStatusBar()
self.__mdiId = self.GetParent().AddTestProcedure(testProcedure)
self.__testProcedure = testProcedure
self.__csvExporter = FCsvExporter()
self.__htmlExporter = FHtmlExporter()
self.__animateAll = False
self.__grid = self.__CreateGrid()
self.__grid.SortColumn(0, True)
for test in self.__testProcedure.GetTestGenerator():
id = test.GetTestId()
self.__grid.AddExecution(id, test, test.GetCurrentExecution())
self.__grid.PartialRefreshAdd(test)
self.__grid.PartialRefreshDone()
def SetStatistics(self, total, passed, failed):
self.menu.SetTotal(total)
self.menu.SetPassed(passed)
self.menu.SetFailed(failed)
def SetBadgesEarned(self, badgesEarned):
self.menu.SetBadgesEarned(badgesEarned)
def __OnAnimate(self, e):
e.Skip()
newValue = not e.IsChecked()
if (self.__animateAll != newValue):
self.__animateAll = newValue
self.__grid.SetAnimateAll(newValue)
def __OnSelectAll(self, e):
e.Skip()
self.__grid.SelectAll()
def __CreateGrid(self):
grid = FExecutionGrid(self, self.__testProcedure, False,
self.configDict["feelingViewerGUI"],
self.configDict["pythonExecutable"])
grid.AppendContext("Run Selected", self.__OnContextRun)
grid.AppendContext(None, None)
grid.AppendContext("Show Previous", self.__OnContextShowPrevious)
grid.AppendContext("Compare Execution With", self.__OnCompare)
grid.AppendContext(None, None)
grid.AppendContext("View Settings", self.__OnViewSettings)
grid.AppendContext("Change Settings", self.__OnChangeSettings)
grid.AppendContext(None, None)
grid.AppendContext("Delete Execution", self.__OnContextDeleteExecution)
grid.AppendContext("Delete Test", self.__OnContextDeleteTest)
grid.AppendContext(None, None)
grid.AppendContext("View Blessed", self.__OnContextViewBlessed)
grid.AppendExecutionContext()
return grid
def __OnRegEx(self, e):
dialog = FRegExDialog(self, self.__testProcedure, self.applicationMap)
dialog.ShowModal()
def __OnContextViewBlessed(self, e):
if (len(self.__grid.GetSelectedKeys()) == 0): return
if (len(self.__grid.GetSelectedKeys()) > 1):
FUtils.ShowWarning(self, "Select only one test to view settings.")
return
key = self.__grid.GetSelectedKeys()[0]
test = self.__testProcedure.GetTest(key)
# FBlessedViewerDialog may unbless.
self.__grid.PartialRefreshRemove(test)
FBlessedViewerDialog(self, test.GetDataSetPath()).ShowModal()
self.__grid.PartialRefreshAdd(test)
self.__grid.PartialRefreshDone()
def __OnViewSettings(self, e):
if (len(self.__grid.GetSelectedKeys()) == 0): return
if (len(self.__grid.GetSelectedKeys()) > 1):
FUtils.ShowWarning(self, "Select only one test to view settings.")
return
key = self.__grid.GetSelectedKeys()[0]
setting = self.__testProcedure.GetTest(key).GetSettings()
FSettingDialog(self, self.__testProcedure, self.applicationMap, False,
setting).ShowModal()
def __OnChangeSettings(self, e):
if (len(self.__grid.GetSelectedKeys()) == 0): return
settings = []
for step, app, op, setting in self.__testProcedure.GetStepGenerator():
settings.append(None)
for key in self.__grid.GetSelectedKeys():
test = self.__testProcedure.GetTest(key)
if (settings[-1] == None):
settings[-1] = test.GetSettings()[step]
else:
if (settings[-1] != test.GetSettings()[step]):
settings[-1] = None
break
dialog = FSettingDialog(self, self.__testProcedure,
self.applicationMap, True, settings)
if (dialog.ShowModal() == wx.ID_OK):
newSettings = dialog.GetSettings()
addedTestDesc = []
removedTestKeys = []
for key in self.__grid.GetSelectedKeys():
test = self.__testProcedure.GetTest(key)
settings = test.GetSettings()
changed = False
# should be same length or else something is seriously wrong
for i in range(len(newSettings)):
if (newSettings[i] != settings[i]):
| |
in the header
3
>>> field_expected_1 = 'project.s1'+util.str_source_variable_separator+'a'
>>> field_expected_1 in header_combined
True
>>> f_expect2 = 'project.s1'+util.str_source_variable_separator+'b'
>>> f_expect3 = 'project.s2'+util.str_source_variable_separator+'z'
>>> all([f_expect3 in header_combined, f_expect2 in header_combined])
True
>>> result3 = [('big','set')] #Try adding a large result set
>>> result3.extend([(3.141,'2999-12-31T23:59:59Z') for i in range(9990000)])
>>> dict_results['project2.big_set'] = result3
>>> combined = get_source_results_combined( dict_results)
>>> len(combined) #Expect header row + 9990003 data row
9990004
"""
# Combine resultsets
# FIXME: Below may be possible with SQLalchemy ORM, but either way
# Warehouse db schema is expected to obviate the need for this function.
header_row_combined = []
data_rows_combined = numpy.array([], dtype=object)#conserve memory w/ NumPy
for str_source_id in dict_results_by_id.keys():
#1) Add columns from this resultset to header row
#No dataset fields have been identified as compatible/shared dimensions
#.. so we make no attempt to combine/consolidate them.
result = dict_results_by_id[ str_source_id]
if result is None:
continue #No results Skipping.
try:
header_result = result[0]
header_prefixed = []
for str_field in header_result:
str_field_prefixed =util.prefix_field_name(str_field, str_source_id)
header_prefixed.append( str_field_prefixed)
header_row_combined.extend( header_prefixed)
except IndexError:
#source resultset has no header! Skipping.
continue
#2) Extend all combined data rows w/ null values for the new header fields
int_starting_combined_width = 0
if len(data_rows_combined) > 0:
int_rows, int_starting_combined_width = data_rows_combined.shape
int_header_len = len(header_result)
tuple_col_padding_size = (int_rows, int_header_len)
array_padding = numpy.empty(tuple_col_padding_size, dtype=object)
array_padding.fill(str_no_data_placeholder)
list_add_columns = [data_rows_combined, array_padding]
data_rows_combined = numpy.column_stack(list_add_columns)
#3) Add rows from this resultset to combined data rows
array_result_data_rows = numpy.array(result[1:], dtype=object)
if int_starting_combined_width > 0:
# extend the new data rows with additional columns of padding
int_pad_width = int_starting_combined_width#offset new fields
int_padding_rows = len(array_result_data_rows)
tuple_padding_size = (int_padding_rows, int_pad_width)
array_padding_new_rows = numpy.empty(tuple_padding_size, dtype=object)
array_padding_new_rows.fill(str_no_data_placeholder)
list_add_columns = [array_padding_new_rows, array_result_data_rows]
array_result_data_rows = numpy.column_stack(list_add_columns)
if int_starting_combined_width == 0:
data_rows_combined = array_result_data_rows
continue
#Otherwise,now that new rows size is same as master list: append
list_add_new_rows = [data_rows_combined, array_result_data_rows]
data_rows_combined = numpy.concatenate( list_add_new_rows)
# convert header list, plus list of data-row lists
# into one list of tuples
result_combined = []
result_combined.append( tuple(header_row_combined) )
for list_data_row in data_rows_combined:
result_combined.append( tuple(list_data_row) )
return result_combined
def get_list_of_warehouse_variables():
"""
Returns a list of names, representing all available Warehouse variables
"""
list_variables = []
loader = api.config_loader
for dict_source in loader.get_list_of_etl_dicts():
str_dataset_id = dict_source['id']
# retrieve & decode the configured list of fields+types
str_field_types_json = dict_source['python_types']
dict_field_types = json.loads( str_field_types_json)
# add the field names, to our list
for str_source_variable in dict_field_types.keys():
str_warehouse_variable = util.prefix_field_name(str_source_variable, str_dataset_id)
list_variables.append( str_warehouse_variable)
return list_variables
def get_source_tables():
"""
Returns list of 'table' warehouse support DTOs representing all configured
data sources.
"""
with get_source_model_session() as available_model:
return available_model['tables']
def get_source_variables():
"""
Returns 'variable' DWSupport objects representing all configured fields
"""
with get_source_model_session() as available_model:
return available_model['variables']
@contextlib.contextmanager
def get_source_model_session():
"""
returns a dict, representing the complete DWSupport configuration
"""
yield prefetch_cache.get_model()
def get_sql_filtered(source_table, python_types, filters
,empty_fact_dimensions = []):
"""
Returns String,representing an inline view definition for retrieving source
Keyword Parameters:
source_table -- a 'table' warehouse support DTO,representing the source
returned SQL will retrieve.
python_types -- JSON encoded string representing a Dict that maps
field names to Python type constructors
filters -- list of Strings, representing Selection filter expressions
empty_cell_dimensions -- list of Strings representing Dimension
tables (or OLAP-Roles) which are to be OUTER JOINED to produce
empty Fact value cells for all Dimensional values not found in
the fact.
TODO: provide unittest coverage
"""
try:
table_type.validate( source_table['type'])
except table_type.ValidateUnexpectedValue as e:
raise NotImplementedError('No SQL Generation method, for type: {}'.format(source_table)) from e #TODO: make this into a local class
schema = "dw" #Default, for now all warehoused tables are in same schema
#if source is a Fact table, join on its dimensions & alias all dimensional fields
with get_source_model_session() as current_model:
if source_table['type'] == 'fact':
# compose sql:
fact_name = source_table['name']
# get variable lists & associations, by parent table_name
variables, associations_by_parent, aliases, nonselects = get_fact_children(fact_name, current_model)
return sqlgenerator.get_fact_sql(fact_name, schema, variables
,associations_by_parent, aliases, current_model
,nonselects, filters, empty_fact_dimensions)
#source is a Dimension or OLAP-Role
# get dimension fields
dimension_name = source_table['name']
associations = current_model['associations']
if source_table['type'] == 'dimension role':
role_name = source_table['name'] #Name is actually an alias
# locate base dimension name
dimension_name = _get_alias_base( role_name, associations)
variables = variable.get(dimension_name, connection_func=util.get_dwsupport_connection)
if source_table['type'] == 'dimension role':
variables = _get_aliased_variables(role_name, variables)
# compose SQL
return _get_dim_sql(dimension_name, schema, variables, python_types, current_model, filters)
def get_fact_children(fact_name, current_model):
"""
Returns Fact table variables, associations, OLAP-roles & role-support Dims
Variables & associations a represented as Dicts, indexed by parent
table_name. Dimension roles are returned as a list of names.
Keyword Parameters:
fact_name -- String, representing name of the fact table for which dicts
of variable lists and associations are to be retrieved.
current_model -- Dict, representing the full DWSupport configuration
"""
# get associations, by parent table_name
associations_all = current_model['associations']
associations_by_parent = {}
aliases = []
nonselect_tables = []
for a in associations_all:
if a['table'] == fact_name:
# add association to Dict,only if it relates to fact_name
dimension_name = a['parent']
associations_by_parent[ dimension_name] = a
if a['type'] == 'fact dimension role':
# Make a note, if the Dict is an alias("Role")
aliases.append( dimension_name)
# construct artificial Dimension relations,for any standalone OLAP-Roles
for table_name in aliases:
#fetch the Role information
role_association = associations_by_parent[table_name]
role_name = role_association['parent']
dimension_name = _get_alias_base( role_name, associations_all)
# check if base dimension is listed
if dimension_name not in associations_by_parent.keys():
# build a fake association, for the dimension
fake_base_association = dict(role_association)
fake_base_association['parent'] = dimension_name
fake_base_association['type'] = 'fact dimension'
# map artificial relation, to enable SQL generation
associations_by_parent[ dimension_name] = fake_base_association
nonselect_tables.append( dimension_name) #mark as artificial
# get variables, by table_name
relevant_tables = [fact_name] + list(associations_by_parent.keys())
variables_all = current_model['variables']
variables_by_parent = {}
variables_by_table_name = {} #also, prepare a map of variables
for v in variables_all:
table_name = v['table']
variables_by_table_name.setdefault(table_name, [])#map is for Aliases
variables_by_table_name[table_name].append(v)
if table_name in relevant_tables:
# add variable to our Dict lists, if it relates to this source
variables_by_parent.setdefault(table_name, [])#if new table,init our Dict
variables_by_parent[table_name].append(v)
# get variables for aliases (Aliases dont have their own variables)
# also, construct artificial Fact-to-dim associations for aliases.
for table_name in aliases:
role_association = associations_by_parent[table_name]
role_name = role_association['parent']
dimension_name = _get_alias_base( role_name, associations_all)
dimension_variables = variables_by_table_name[dimension_name]
alias_variables = _get_aliased_variables( role_name
,dimension_variables)
variables_by_parent[table_name] = alias_variables
# replace "role" association with an 'aliased' fact association
aliased_association = dict(role_association)
aliased_association['parent'] = dimension_name
associations_by_parent[table_name] = aliased_association
return variables_by_parent, associations_by_parent, aliases, nonselect_tables
def get_fact_variables(fact_table, current_model):
"""
Returns Dicts,representing fact's variable dtos & physical columns
Both dicts are indexed by variable's web API identifier (e.g.:
'fact_column_name', 'associated_dimension$dim_column_name', or
'custom_variable_identifier')
Keyword Parameters:
fact_table -- dwsupport DTO,representing the fact table for which Dict of
python types is to be retreived.
current_model -- Dict, representing the full DWSupport configuration
TODO: improve test coverage
"""
fact_name = fact_table['name']
variables, associations_garbage, alias_garbage, nonselect_tables = get_fact_children(fact_name, current_model)
fact_variables = {}
physical_columns ={}
for table_name in variables:
#retrieve variables list for all selectable Fact fields
if table_name in nonselect_tables:
continue # skip
variable_list = variables[table_name]
for var in variable_list:
final_var = dict(var)
variable_id = _get_fact_variable_name(var, fact_name)
final_var['column'] = variable_id
custom_id = util.get_custom_variable_name(
final_var
,current_model)
if custom_id is not None:
variable_id = custom_id
final_var['column'] = custom_id
fact_variables[variable_id] = final_var
physical_columns[variable_id] = var['column']
return fact_variables, physical_columns
def _get_fact_variable_name( source_variable, fact_name):
"""
Utility function,returning API identifier for the referenced variable
Keyword Parameters:
source_variable -- dwsupport DTO, representing a dimension or fact summary
field.
fact_name -- String, representing the name of the fact table fields are
associated with.
>>> fact_var1 = {'column':'measured_kg','title': 'Measured Fact1','table':'foo_fact'}
>>> _get_fact_variable_name( fact_var1, 'foo_fact')
'measured_kg'
>>> dim_var1 = {'column':'date','title': 'Trip Date','table':'date_dim'}
>>> _get_fact_variable_name( dim_var1, 'foo_fact')
'date_dim$date'
"""
identifier = source_variable['column'] #initialize
variable_table = source_variable['table']
if not variable_table == fact_name: #variable did not come from the Fact
# table. Prefix with the name of it's originating table,to orient user
identifier = util.prefix_field_name( identifier, variable_table)
return identifier
def _get_dim_sql( dimension_name, schema, variables, python_types, model, filters):
"""
Utility function, to generate SQL select statement for a single dimension
Keyword Parameters:
dimension_name -- String representation of the dimension table name
| |
utils.runscript(script, args)
assert os.path.exists(outfile)
return outfile
def test_filter_stoptags():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
stopfile = utils.get_temp_filename('stoptags', in_dir)
# first, copy test-abund-read-2.fa to 'test.fa' in the temp dir.
# now, create a file with some stop tags in it --
K = 18
kh = khmer._Nodegraph(K, [1])
kh.add_stop_tag('GTTGACGGGGCTCAGGGG')
kh.save_stop_tags(stopfile)
del kh
# finally, run filter-stoptags.
script = 'filter-stoptags.py'
args = ['-k', str(K), stopfile, infile, infile]
utils.runscript(script, args, in_dir)
# verify that the basic output file exists
outfile = infile + '.stopfilt'
assert os.path.exists(outfile), outfile
# it should contain only one unique sequence, because we've trimmed
# off everything after the beginning of the only long sequence in there.
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 1, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs, seqs
def test_filter_stoptags_fq():
infile = utils.copy_test_data('test-abund-read-2.fq')
in_dir = os.path.dirname(infile)
stopfile = utils.get_temp_filename('stoptags', in_dir)
# first, copy test-abund-read-2.fa to 'test.fa' in the temp dir.
# now, create a file with some stop tags in it --
K = 18
kh = khmer._Nodegraph(K, [1])
kh.add_stop_tag('GTTGACGGGGCTCAGGGG')
kh.save_stop_tags(stopfile)
del kh
# finally, run filter-stoptags.
script = 'filter-stoptags.py'
args = ['-k', str(K), stopfile, infile, infile]
utils.runscript(script, args, in_dir)
# verify that the basic output file exists
outfile = infile + '.stopfilt'
assert os.path.exists(outfile), outfile
# it should contain only one unique sequence, because we've trimmed
# off everything after the beginning of the only long sequence in there.
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 1, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs, seqs
# make sure that record names are carried through unparsed
names = [r.name for r in screed.open(outfile)]
names = set(names)
assert 'seq 1::BAR' in names
def test_count_median():
infile = utils.copy_test_data('test-abund-read-2.fa')
outfile = infile + '.counts'
counting_ht = _make_counting(infile, K=8)
script = 'count-median.py'
args = [counting_ht, infile, outfile]
utils.runscript(script, args)
assert os.path.exists(outfile), outfile
data = [x.strip() for x in open(outfile).readlines()[1:]]
data = set(data)
assert len(data) == 2, data
assert 'seq,1001,1001.0,0.0,18' in data, data
assert '895:1:37:17593:9954/1,1,103.803741455,303.702941895,114' in data
def test_count_median_fq_csv():
infile = utils.copy_test_data('test-abund-read-2.fq')
outfile = infile + '.counts'
counting_ht = _make_counting(infile, K=8)
script = 'count-median.py'
args = [counting_ht, infile, outfile]
utils.runscript(script, args)
assert os.path.exists(outfile), outfile
data = [x.strip() for x in open(outfile)]
data = set(data)
assert len(data) == 4, data
assert 'name,median,average,stddev,seqlen' in data
assert 'seq,1001,1001.0,0.0,18' in data
# verify that sequence names remain unparsed
names = set([line.split(',')[0] for line in data])
assert '895:1:37:17593:9954 1::FOO' in names, names
def test_count_median_fq_csv_stdout():
infile = utils.copy_test_data('test-abund-read-2.fq')
outfile = '-'
counting_ht = _make_counting(infile, K=8)
script = 'count-median.py'
args = [counting_ht, infile, outfile]
(status, out, err) = utils.runscript(script, args)
assert 'name,median,average,stddev,seqlen' in out
assert 'seq,1001,1001.0,0.0,18' in out
def test_load_graph():
script = 'load-graph.py'
args = ['-x', '1e7', '-N', '2', '-k', '20']
outfile = utils.get_temp_filename('out')
infile = utils.get_test_data('random-20-a.fa')
args.extend([outfile, infile])
(status, out, err) = utils.runscript(script, args)
assert 'Total number of unique k-mers: 3960' in err, err
ht_file = outfile
assert os.path.exists(ht_file), ht_file
tagset_file = outfile + '.tagset'
assert os.path.exists(tagset_file), tagset_file
try:
ht = khmer.load_nodegraph(ht_file)
except OSError as err:
assert 0, str(err)
ht.load_tagset(tagset_file)
# check to make sure we get the expected result for this data set
# upon partitioning (all in one partition). This is kind of a
# roundabout way of checking that load-graph.py worked :)
subset = ht.do_subset_partition(0, 0)
x = ht.subset_count_partitions(subset)
assert x == (1, 0), x
@pytest.mark.known_failing
def test_oxli_build_graph():
script = 'oxli'
args = ['build-graph', '-x', '1e7', '-N', '2', '-k', '20']
outfile = utils.get_temp_filename('out')
infile = utils.get_test_data('random-20-a.fa')
args.extend([outfile, infile])
(status, out, err) = utils.runscript(script, args)
assert 'Total number of unique k-mers: 3960' in err, err
ht_file = outfile
assert os.path.exists(ht_file), ht_file
tagset_file = outfile + '.tagset'
assert os.path.exists(tagset_file), tagset_file
ht = khmer.load_nodegraph(ht_file)
ht.load_tagset(tagset_file)
# check to make sure we get the expected result for this data set
# upon partitioning (all in one partition). This is kind of a
# roundabout way of checking that load-graph.py worked :)
subset = ht.do_subset_partition(0, 0)
x = ht.subset_count_partitions(subset)
assert x == (1, 0), x
@pytest.mark.known_failing
def test_oxli_build_graph_unique_kmers_arg():
script = 'oxli'
args = ['build-graph', '-x', '1e7', '-N', '2', '-k', '20', '-U', '3960']
outfile = utils.get_temp_filename('out')
infile = utils.get_test_data('random-20-a.fa')
args.extend([outfile, infile])
(status, out, err) = utils.runscript(script, args)
assert 'Total number of unique k-mers: 3960' in err, err
assert 'INFO: set memory ceiling automatically' in err, err
assert 'Ceiling is: 1e+06 bytes' in err, err
ht_file = outfile
assert os.path.exists(ht_file), ht_file
tagset_file = outfile + '.tagset'
assert os.path.exists(tagset_file), tagset_file
ht = khmer.load_nodegraph(ht_file)
ht.load_tagset(tagset_file)
# check to make sure we get the expected result for this data set
# upon partitioning (all in one partition). This is kind of a
# roundabout way of checking that load-graph.py worked :)
subset = ht.do_subset_partition(0, 0)
x = ht.subset_count_partitions(subset)
assert x == (1, 0), x
@pytest.mark.known_failing
def test_oxli_nocommand():
script = 'oxli'
(status, out, err) = utils.runscript(script, [])
assert status == 0
def test_load_graph_no_tags():
script = 'load-graph.py'
args = ['-x', '1e7', '-N', '2', '-k', '20', '-n']
outfile = utils.get_temp_filename('out')
infile = utils.get_test_data('random-20-a.fa')
args.extend([outfile, infile])
utils.runscript(script, args)
ht_file = outfile
assert os.path.exists(ht_file), ht_file
tagset_file = outfile + '.tagset'
assert not os.path.exists(tagset_file), tagset_file
assert khmer.load_nodegraph(ht_file)
# can't think of a good way to make sure this worked, beyond just
# loading the ht file...
@pytest.mark.known_failing
def test_oxli_build_graph_no_tags():
script = 'oxli'
args = ['build-graph', '-x', '1e7', '-N', '2', '-k', '20', '-n']
outfile = utils.get_temp_filename('out')
infile = utils.get_test_data('random-20-a.fa')
args.extend([outfile, infile])
utils.runscript(script, args)
ht_file = outfile
assert os.path.exists(ht_file), ht_file
tagset_file = outfile + '.tagset'
assert not os.path.exists(tagset_file), tagset_file
assert khmer.load_nodegraph(ht_file)
# can't think of a good way to make sure this worked, beyond just
# loading the ht file...
def test_load_graph_fail():
script = 'load-graph.py'
args = ['-x', '1e3', '-N', '2', '-k', '20'] # use small HT
outfile = utils.get_temp_filename('out')
infile = utils.get_test_data('random-20-a.fa')
args.extend([outfile, infile])
(status, out, err) = utils.runscript(script, args, fail_ok=True)
assert status == 1, status
assert "** ERROR: the graph structure is too small" in err
@pytest.mark.known_failing
def test_oxli_build_graph_fail():
script = 'oxli'
args = ['build-graph', '-x', '1e3', '-N', '2', '-k', '20'] # use small HT
outfile = utils.get_temp_filename('out')
infile = utils.get_test_data('random-20-a.fa')
args.extend([outfile, infile])
(status, out, err) = utils.runscript(script, args, fail_ok=True)
assert status == 1, status
assert "** ERROR: the graph structure is too small" in err
@pytest.mark.known_failing
def test_oxli_build_graph_yuge():
script = 'oxli'
args = ['build-graph', '-M', '800T', '-k', '20']
outfile = utils.get_temp_filename('out')
infile = utils.get_test_data('random-20-a.fa')
args.extend([outfile, infile])
(status, out, err) = utils.runscript(script, args, fail_ok=True)
assert status != 0, status
assert 'ERROR: Not enough free space on disk' in err
def test_load_graph_write_fp():
script = 'load-graph.py'
args = ['-x', '1e5', '-N', '2', '-k', '20'] # use small HT
outfile = utils.get_temp_filename('out')
infile = utils.get_test_data('random-20-a.fa')
args.extend([outfile, infile])
(status, out, err) = utils.runscript(script, args)
ht_file = outfile
assert os.path.exists(ht_file), ht_file
info_file = outfile + '.info'
assert os.path.exists(info_file), info_file
data = [x.strip() for x in open(info_file)]
data = set(data)
assert '3959 unique k-mers' in data, data
assert 'false positive rate estimated to be 0.002' in data
@pytest.mark.known_failing
def test_oxli_build_graph_write_fp():
script = 'oxli'
# use small HT
args = ['build-graph', '-x', '1e5', '-N', '2', '-k', '20']
outfile = utils.get_temp_filename('out')
infile = utils.get_test_data('random-20-a.fa')
args.extend([outfile, infile])
(status, out, err) = utils.runscript(script, args)
ht_file = outfile
assert os.path.exists(ht_file), ht_file
info_file = outfile + '.info'
assert os.path.exists(info_file), info_file
data = [x.strip() for x in open(info_file)]
data = set(data)
assert '3959 unique k-mers' in data
assert 'false positive rate estimated to be 0.002' in data
def test_load_graph_multithread():
script = 'load-graph.py'
outfile = utils.get_temp_filename('test')
infile = utils.get_test_data('test-reads.fa')
args = ['-N', '4', '-x', '1e7', '-T', '8', outfile, infile]
(status, out, err) = utils.runscript(script, args)
@pytest.mark.known_failing
def test_oxli_build_graph_multithread():
script = 'oxli'
outfile = utils.get_temp_filename('test')
infile = utils.get_test_data('test-reads.fa')
args = ['build-graph', '-N', '4', '-x', '1e7', '-T', '8', outfile, infile]
(status, out, err) = utils.runscript(script, args)
def test_load_graph_max_memory_usage_parameter():
script = 'load-graph.py'
args = ['-M', '2e7', '-k', '20', '-n']
outfile = utils.get_temp_filename('out')
infile = utils.get_test_data('random-20-a.fa')
args.extend([outfile, infile])
(status, out, err) = utils.runscript(script, args)
assert 'Total number of unique k-mers: 3960' in err, err
ht_file = outfile
assert os.path.exists(ht_file), ht_file
try:
ht = khmer.load_nodegraph(ht_file)
except OSError as err:
assert 0, str(err)
assert (sum(ht.hashsizes()) / 8.) < 2e7, ht.hashsizes()
def _make_graph(infilename, min_hashsize=1e7, n_hashes=2, | |
<reponame>pyrv/pycontract<filename>pycontract_core.py<gh_stars>0
"""
PyContract
"""
import copy
import inspect
from abc import ABC
from dataclasses import dataclass
from typing import List, Set, Callable, Optional, Dict
import pyfiglet
def print_banner(text: str):
'''
Prints a banner as ASCII art in slant format.
See: https://github.com/pwaller/pyfiglet.
:param text: the text to be printed as ASCII art.
'''
ascii_banner = pyfiglet.figlet_format(text, font='slant')
print(ascii_banner)
def data(cls):
"""
Decorator for decorating events and states, allowing to
declare parameters more easily than with __init__.
Also, the unsafe_hash=True introduces a hash function needed
for storing states in sets.
"""
return dataclass(cls, unsafe_hash=True)
"""
Auxiliary types.
"""
Char = str
"""
When set to true output will contain debugging information.
"""
DEBUG: bool = False
"""
When set garbage collected states will be printed.
This can be used to study how garbage collection works.
"""
DEBUG_GC: bool = False
"""
When set progress will be reported for every `DEBUG_PROGRESS`
event. Default is `None` which means no progress reports.
"""
DEBUG_PROGRESS: int = None
"""
The current monitor being evaluated. Is used from within states
to access the monitor they are part of.
"""
__monitor__: object = None
def test(nr: int, txt: str, msg: str = ''):
"""
Prints error message. Used when locating a bug and temporary print statements
are needed. By calling this function instead of print, it is possible quickly
locate all such calls when one is done bug hunting, so that they can be removed again.
:param nr: a number allowing to trace printed message back to code
:param txt: a headline explaining the message
:param msg: the message to be printed
"""
print(f'# test [{nr}] {txt}: {msg}')
def set_debug(value: bool):
"""
Sets the debug flag. When True, for each submitted event will be printed:
1. the event number and event
2. for each monitor:
2.1 internal transitions in the monitor
2.2 final set of states of the monitor
:param value: when True debugging information is printed.
"""
global DEBUG
DEBUG = value
def set_debug_gc(value: bool):
"""
Sets the garbage collection debug flag. When True, garbage collected
states will be printed
:param value: when True debugging information is printed.
"""
global DEBUG_GC
DEBUG_GC = value
def set_debug_progress(value: int):
"""
Sets the progress reporting debug value. When different from None,
a message is issued for every `value` event.
:param value: a message will be issued for every `value` event.
"""
global DEBUG_PROGRESS
DEBUG_PROGRESS = value
def debug_mode() -> bool:
"""
Returns value of DEBUG flag. Used in other modules where
the DEBUG variable is not accessible.
:return: the value of the DEBUG flag.
"""
return DEBUG
def debug(msg: str):
"""
Prints debugging information. By giving this function a special
name it is easier to locate debugging statements.
:param msg: the message to be printed.
"""
print(msg)
def debug_frame(symbol: Char, msg: str):
"""
Prints a message surrounded by a line of symbols before and after,
if the DEBUG flag is True.
:param symbol: the symbol to make up the line, as long as the message.
:param msg: the message to be printed.
"""
if DEBUG:
print_frame(symbol, msg)
def print_frame(symbol: Char, msg: str):
"""
Prints a message surrounded by a line of symbols before and after,
:param symbol: the symbol to make up the line, as long as the message.
:param msg: the message to be printed.
"""
length = len(msg)
print(symbol * length)
print(msg)
print(symbol * length)
print()
def mk_string(begin: str, separator: str, end: str, args: list) -> str:
"""
Turns a list of values into a string, surrounded by given begin and end strings, and
elements separated by a given separator.
:param begin: string to begin with.
:param separator: separator to separate list elements.
:param end: string to end with.
:param args: the list of values.
:return: the resulting string.
"""
result = begin
sep = ""
for arg in args:
result += f'{sep}{quote(arg)}'
sep = separator + ' '
result += end
return result
def is_state_class(member: object) -> bool:
"""
Returns true if the object member is a class and specifically a subclass of State.
Used for identifying the states in a monitor.
:param member: the member object to check.
:return: True iff the object member is a class and specifically a subclass of State.
"""
return inspect.isclass(member) and issubclass(member, State)
def is_transition_method(member: object) -> bool:
"""
Returns true if the object member is a method named `transition`.
Used for identifying the methods in a monitor that model transitions.
:param member: the member object to check.
:return: True iff the object member is a function named `transition`.
"""
return inspect.ismethod(member) and member.__name__ == 'transition'
def hasattr_really(obj: object, attr) -> bool:
"""
Examines whether an object really has an attribute, without calling
__getattr__, which in states looks up the attribute in the monitor
it is part of.
:param obj: the object to look for the attribute.
:param attr: the attribute.
:return: True iff the attribute is really defined in that object.
"""
try:
obj.__getattribute__(attr)
return True
except:
return False
def quote(arg: object) -> object:
"""
Puts quotes around a string (so it appears as a string in output). Otherwise returns
argument unchanged.
:param arg: argument to transform.
:return: argument quoted if a string, otherwise unchanged.
"""
if isinstance(arg,str):
return f"'{arg}'"
else:
return arg
@data
class Event:
pass
@data
class State:
"""
Objects of this class represent active states in the state machine.
It is defined as data state, which activates the hash function, used
for storing states in a hashset.
"""
def __init__(self):
"""
Will eventually point to the monitor instance this state instance is part of.
__init__ does not need to be called, but its definition removes some warnings
in PyCharm.
"""
self.monitor = None
"""
Data specific to a particular form of analysis can be stored here and printed
out when the `end()` method of the monitor containing the state is called.
"""
self.__data_object__: object = None
def __str__(self) -> str:
result = self.get_state_name()
if hasattr_really(self, '__init__'):
args = inspect.getfullargspec(self.__init__).args[1:]
result += mk_string('(', ',', ')', [getattr(self, arg) for arg in args])
if hasattr_really(self, '__data_object__') and self.__data_object__ is not None:
result += '\n' + self.__data_object__.__str__()
return result
def __getattr__(self, attr) -> object:
"""
Used for looking up an attribute in the monitor of a state, when
the attribute is not defined in the state. This is used for
transition methods that are defined at the outermost level, and which
get inserted into the anonymous always state. In that state the self argument
of these methods no longer refers to the monitor. One would have to write
self.monitor.attr in those methods which is annoying.
:param attr: the attribute to look up.
:return: the value of the attribute.
"""
return getattr(self.monitor, attr)
def __bool__(self):
"""
Allows a state to be used as a Boolean, which is True if
the state is in the state vector. Can be used for expressing
past time properties.
:return: True of the state is in the state vector.
"""
return __monitor__.contains_state(self)
def __del__(self):
"""
Called when a state object is garbage collected.
Prints the state if `DEBUG_GC` is True.
"""
if DEBUG_GC:
print(f'{str(self)} garbage collected')
def set_monitor_to(self, monitor: "Monitor"):
"""
Assigns the monitor instance this state instance is part of to the
variable self.monitor.
:param monitor: assumed to be the parent monitor instance of this state instance.
"""
self.monitor = monitor
def get_state_name(self) -> str:
"""
Returns the name of the state.
:return: the name of the state.
"""
return self.__class__.__name__
def exists(self, predicate: Callable[["State"], bool]) -> bool:
"""
Returns True iff there exists a state s in the state vector of the monitor,
such that predicate(s) is True.
:param predicate: the predicate which a state in the state vector must satisfy.
:return: True iff. a state in the state vector satisfies the predicate.
"""
return self.monitor.exists(predicate)
def transition(self, event) -> Optional[List["State"]]:
"""
Evaluates a state on an event. The result is an optional list of resulting states.
None is returned if either there is no transition corresponding to the event.
:param event: the event on which the state is evaluated.
:return: the optional list of | |
"1.7",
"пытлив": "1.7",
"укромн": "1.7",
"обдума": "1.7",
"ценостн": "1.7",
"звучн": "1.7",
"кокетлив": "1.7",
"неуязвим": "1.7",
"статусн": "1.7",
"горчичн": "1.7",
"обезжирен": "1.7",
"эталон": "1.7",
"разоблачительн": "1.7",
"прийшл": "1.7",
"юниорск": "1.7",
"зрим": "1.7",
"взыскательн": "1.7",
"этичн": "1.7",
"ельцинск": "1.7",
"прорывн": "1.7",
"голев": "1.7",
"диснеевск": "1.7",
"вялен": "1.7",
"отбивн": "1.7",
"доказательн": "1.7",
"ненаглядн": "1.7",
"стосовн": "1.7",
"хвалебн": "1.7",
"неберджаевск": "1.7",
"остыл": "1.7",
"безосновательн": "1.7",
"фантастичн": "1.7",
"грозненск": "1.7",
"наступательн": "1.7",
"поэтичн": "1.7",
"перуанск": "1.7",
"пятнадцатилетн": "1.7",
"монархическ": "1.7",
"скрупулезн": "1.7",
"пленительн": "1.7",
"педантичн": "1.7",
"излечим": "1.7",
"матвеевск": "1.7",
"ошеломительн": "1.7",
"путн": "1.7",
"лестн": "1.7",
"безрисков": "1.7",
"предусмотрительн": "1.7",
"нецелев": "1.7",
"артистичн": "1.7",
"мны": "1.7",
"темпераментн": "1.7",
"честолюбив": "1.7",
"эргономичн": "1.7",
"ультрасовремен": "1.7",
"поправим": "1.7",
"цапк": "1.7",
"быстроразвива": "1.7",
"гротескн": "1.7",
"поподробн": "1.7",
"вожделен": "1.7",
"утешительн": "1.7",
"высокоприбыльн": "1.7",
"закадычн": "1.7",
"мускулист": "1.7",
"могат": "1.7",
"пристойн": "1.7",
"суперсовремен": "1.7",
"интересненек": "1.7",
"ратн": "1.7",
"сносн": "1.7",
"доподлин": "1.7",
"высокопрофессиональн": "1.7",
"услужлив": "1.7",
"источникграмотн": "1.7",
"эмпирическ": "1.7",
"рентабельн": "1.7",
"позволительн": "1.7",
"высокоразвит": "1.7",
"натхнен": "1.7",
"изобильн": "1.7",
"богобоязнен": "1.7",
"пояснительн": "1.7",
"виключн": "1.7",
"затейлив": "1.7",
"статн": "1.7",
"полнофункциональн": "1.7",
"сберегательн": "1.7",
"высокопроизводительн": "1.7",
"проворн": "1.7",
"сильнодейств": "1.7",
"поскромн": "1.7",
"контртеррористическ": "1.7",
"лик": "1.7",
"неиспользова": "1.7",
"богослужебн": "1.7",
"проактивн": "1.7",
"плодовит": "1.7",
"недюжин": "1.7",
"непробива": "1.7",
"сприя": "1.7",
"простительн": "1.7",
"положен": "1.7",
"вальяжн": "1.7",
"неслаб": "1.7",
"урожайн": "1.7",
"полноразмерн": "1.7",
"прохладительн": "1.7",
"поступательн": "1.7",
"высокоточн": "1.7",
"визначен": "1.7",
"теологическ": "1.7",
"неистребим": "1.7",
"ретив": "1.7",
"лицензирова": "1.7",
"набожн": "1.7",
"здоровеньк": "1.7",
"фотогеничн": "1.7",
"целительск": "1.7",
"увеселительн": "1.7",
"увесист": "1.7",
"ликвидн": "1.7",
"несокрушим": "1.7",
"сохра": "1.7",
"оскароносн": "1.7",
"вумн": "1.7",
"антистрессов": "1.7",
"вольготн": "1.7",
"релевантн": "1.7",
"негнущ": "1.7",
"дифференцирова": "1.7",
"небанальн": "1.7",
"достатъчн": "1.7",
"стабилизацион": "1.7",
"богоявленск": "1.7",
"душещипательн": "1.7",
"горячительн": "1.7",
"щеняч": "1.7",
"неотступн": "1.7",
"антигитлеровск": "1.7",
"преогромн": "1.7",
"высокохудожествен": "1.7",
"беспреста": "1.7",
"знательн": "1.7",
"гармоническ": "1.7",
"жилист": "1.7",
"бескрайн": "1.7",
"уловим": "1.7",
"шаловлив": "1.7",
"хвален": "1.7",
"нелишн": "1.7",
"правельн": "1.7",
"стопудов": "1.7",
"неистощим": "1.7",
"беспроблемн": "1.7",
"обольстительн": "1.7",
"сверхбыстр": "1.7",
"непридума": "1.7",
"всемерн": "1.7",
"сверхтонк": "1.7",
"безотлагательн": "1.7",
"прагматическ": "1.7",
"обогатительн": "1.7",
"хватательн": "1.7",
"запаслив": "1.7",
"предрождественск": "1.7",
"объединительн": "1.7",
"позновательн": "1.7",
"вещн": "1.7",
"модернизацион": "1.7",
"экслюзивн": "1.7",
"розважальн": "1.7",
"противовирусн": "1.7",
"сакраментальн": "1.7",
"актуальненьк": "1.7",
"настощ": "1.7",
"интересненьк": "1.7",
"родненьк": "1.7",
"некисл": "1.7",
"гыг": "1.7",
"добавочн": "1.7",
"актуальненек": "1.7",
"поприличн": "1.7",
"соромн": "1.7",
"лесн": "1.7",
"восстановим": "1.7",
"всезна": "1.7",
"беспримерн": "1.7",
"импозантн": "1.7",
"неодноразов": "1.7",
"мимимишн": "1.7",
"миловидн": "1.7",
"противораков": "1.7",
"экзальтирова": "1.7",
"цивильн": "1.7",
"соразмерн": "1.7",
"неувяда": "1.7",
"бесхитростн": "1.7",
"великодержавн": "1.7",
"усидчив": "1.7",
"гыгыг": "1.7",
"влюбчив": "1.7",
"оглашен": "1.7",
"распрекрасн": "1.7",
"неповин": "1.7",
"тусовочн": "1.7",
"обличительн": "1.7",
"высокопрочн": "1.7",
"экстатическ": "1.7",
"деликатесн": "1.7",
"певуч": "1.7",
"побогат": "1.7",
"молодецк": "1.7",
"улюблен": "1.7",
"сладострастн": "1.7",
"богатеньк": "1.7",
"приснопамятн": "1.7",
"исполним": "1.7",
"ваа": "1.7",
"живоначальн": "1.7",
"професиональн": "1.7",
"небезынтересн": "1.7",
"незамутнен": "1.7",
"самокритичн": "1.7",
"самоуправля": "1.7",
"яншн": "1.7",
"непогрешим": "1.7",
"стоическ": "1.7",
"человеколюбив": "1.7",
"чудненьк": "1.7",
"смирительн": "1.7",
"преспокойн": "1.7",
"антивоен": "1.7",
"узкоспециализирова": "1.7",
"охренен": "1.7",
"понятлив": "1.7",
"кейн": "1.7",
"антидопингов": "1.7",
"амурн": "1.7",
"фартов": "1.7",
"бодренек": "1.7",
"неопасн": "1.7",
"суперпопулярн": "1.7",
"ессн": "1.7",
"фул": "1.7",
"сверхпрочн": "1.7",
"атличн": "1.7",
"естесствен": "1.7",
"серъезн": "1.7",
"ударопрочн": "1.7",
"бронебойн": "1.7",
"гигиеничн": "1.7",
"хэллоуинск": "1.7",
"шутовск": "1.7",
"беззлобн": "1.7",
"кульн": "1.7",
"правомочн": "1.7",
"нашенск": "1.7",
"просветительн": "1.7",
"папулярн": "1.7",
"пральн": "1.7",
"ахуительн": "1.7",
"гламурненьк": "1.7",
"платоническ": "1.7",
"анекдотичн": "1.7",
"мультифункциональн": "1.7",
"свабодн": "1.7",
"работн": "1.7",
"всепобежда": "1.7",
"беспплатн": "1.7",
"супергеройск": "1.7",
"зауважен": "1.7",
"свежеприготовлен": "1.7",
"каллиграфическ": "1.7",
"кавайн": "1.7",
"середн": "1.7",
"незапятна": "1.7",
"верноподда": "1.7",
"хардкорн": "1.7",
"заправск": "1.7",
"высок": "1.7",
"высокобюджетн": "1.7",
"клэшмоб": "1.7",
"ассортиментн": "1.7",
"подр": "1.7",
"союзническ": "1.7",
"фотокор": "1.7",
"высоколиквидн": "1.7",
"лавов": "1.7",
"одушевлен": "1.7",
"долгоживущ": "1.7",
"голеньк": "1.7",
"высокопородн": "1.7",
"сверхсовремен": "1.7",
"непобежден": "1.7",
"раздольн": "1.7",
"културн": "1.7",
"август": "1.7",
"скоромн": "1.7",
"всеблаг": "1.7",
"безубыточн": "1.7",
"патетическ": "1.7",
"уи": "1.7",
"моегобесплатногомультимедийн": "1.7",
"релаксацион": "1.7",
"неженск": "1.7",
"побудительн": "1.7",
"филосовск": "1.7",
"богородичн": "1.7",
"синергетическ": "1.7",
"жествен": "1.7",
"сверхпопулярн": "1.7",
"мишн": "1.7",
"еееее": "1.7",
"комплиментарн": "1.7",
"всеохватыва": "1.7",
"гутн": "1.7",
"конкурентноспособн": "1.7",
"высокорейтингов": "1.7",
"моногамн": "1.7",
"непоган": "1.7",
"смишн": "1.7",
"сверхприбыльн": "1.7",
"антиэкстремистск": "1.7",
"безотходн": "1.7",
"смехов": "1.7",
"эстетск": "1.7",
"хлебосольн": "1.7",
"свойск": "1.7",
"быстроходн": "1.7",
"хоробр": "1.7",
"обществен": "1.7",
"непротиворечив": "1.7",
"яхонтов": "1.7",
"гладеньк": "1.7",
"небезосновательн": "1.7",
"износостойк": "1.7",
"организован": "1.7",
"пригож": "1.7",
"духоподъемн": "1.7",
"долготерпелив": "1.7",
"всемирноизвестн": "1.7",
"нерастрачен": "1.7",
"великосветск": "1.7",
"небьющ": "1.7",
"апробирова": "1.7",
"забавненьк": "1.7",
"класическ": "1.7",
"первосортн": "1.7",
"мегапопулярн": "1.7",
"прогностическ": "1.7",
"итересн": "1.7",
"сорвремен": "1.7",
"коренаст": "1.7",
"контактн": "1.7",
"невиноват": "1.7",
"адмиральск": "1.7",
"легкоусвоя": "1.7",
"рассудочн": "1.7",
"зычн": "1.7",
"незамерза": "1.7",
"ии": "1.7",
"сверхбогат": "1.7",
"профориентацион": "1.7",
"крепеньк": "1.7",
"саморазвива": "1.7",
"разнобразн": "1.7",
"всепроща": "1.7",
"обеспечительн": "1.7",
"философическ": "1.7",
"живоносн": "1.7",
"субтильн": "1.7",
"смотрибельн": "1.7",
"менторск": "1.7",
"энергонезависим": "1.7",
"патрэбн": "1.7",
"реформистск": "1.7",
"острожн": "1.7",
"давольн": "1.7",
"стебловск": "1.7",
"сердешн": "1.7",
"благоугодн": "1.7",
"топиарн": "1.7",
"сверхэффективн": "1.7",
"животворн": "1.7",
"самоокупа": "1.7",
"издат": "1.7",
"съвършен": "1.7",
"обоятельн": "1.7",
"диетологическ": "1.7",
"экстенсивн": "1.7",
"нанотехнологичн": "1.7",
"многоопытн": "1.7",
"конгениальн": "1.7",
"цесн": "1.7",
"каноничн": "1.7",
"мягеньк": "1.7",
"супермодн": "1.7",
"душеспасительн": "1.7",
"обьективн": "1.7",
"проффесиональн": "1.7",
"веснян": "1.7",
"неповрежден": "1.7",
"травоньк": "1.7",
"мотивировочн": "1.7",
"удаленьк": "1.7",
"деятельностн": "1.7",
"трезвомысля": "1.7",
"поетичн": "1.7",
"обезательн": "1.7",
"ееееее": "1.7",
"сухоньк": "1.7",
"всамделишн": "1.7",
"легеньк": "1.7",
"празничн": "1.7",
"благовон": "1.7",
"богодухновен": "1.7",
"сейшн": "1.7",
"саморегулир": "1.7",
"лощен": "1.7",
"мироточив": "1.7",
"польз": "1.7",
"резистивн": "1.7",
"сверхцен": "1.7",
"шал": "2.5",
"доверчив": "2.5",
"застенчив": "2.5",
"страстн": "2.5",
"гладк": "2.5",
"деликатн": "2.5",
"уместн": "2.5",
"кротк": "2.5",
"комическ": "2.5",
"величав": "2.5",
"лиричн": "2.5",
"эротичн": "2.5",
"простодушн": "2.5",
"сочувствен": "2.5",
"культурн": "2.5",
"холен": "2.5",
"духовн": "2.5",
"благоуха": "2.5",
"полюбовн": "2.5",
"гарн": "2.5",
"бож": "2.5",
"мягк": "2.5",
"фантастическ": "2.5",
"сказочн": "2.5",
"тщательн": "2.5",
"горд": "2.5",
"стойк": "2.5",
"царск": "2.5",
"необычайн": "2.5",
"толков": "2.5",
"бережн": "2.5",
"радужн": "2.5",
"аппетитн": "2.5",
"величествен": "2.5",
"складн": "2.5",
"доходчив": "2.5",
"колоритн": "2.5",
"резв": "2.5",
"комичн": "2.5",
"геройск": "2.5",
"почтительн": "2.5",
"мирн": "2.5",
"стильн": "2.5",
"идеальн": "2.5",
"обязательн": "2.5",
"уверен": "2.5",
"творческ": "2.5",
"умн": "2.5",
"достойн": "2.5",
"талантлив": "2.5",
"душевн": "2.5",
"мудр": "2.5",
"разумн": "2.5",
"священ": "2.5",
"симпатичн": "2.5",
"красочн": "2.5",
"плодотворн": "2.5",
"щедр": "2.5",
"изыска": "2.5",
"изящн": "2.5",
"заботлив": "2.5",
"впечатля": "2.5",
"эффектн": "2.5",
"терпелив": "2.5",
"праведн": "2.5",
"героическ": "2.5",
"отважн": "2.5",
"доверительн": "2.5",
"преславн": "2.5",
"усердн": "2.5",
"приветлив": "2.5",
"добросовестн": "2.5",
"виртуозн": "2.5",
"ослепительн": "2.5",
"знатн": "2.5",
"доблестн": "2.5",
"феноменальн": "2.5",
"добротн": "2.5",
"хорошеньк": "2.5",
"выигрышн": "2.5",
"прилежн": "2.5",
"шутлив": "2.5",
"эстетичн": "2.5",
"дурач": "2.5",
"добива": "2.5",
"аплодирова": "2.5",
"безмерн": "2.5",
"беспрекословн": "2.5",
"благодарствова": "2.5",
"благополучч": "2.5",
"благоприятствова": "2.5",
"блиста": "2.5",
"богоблад": "2.5",
"боголепн": "2.5",
"боготвор": "2.5",
"будораж": "2.5",
"везт": "2.5",
"велич": "2.5",
"венча": "2.5",
"верифицирова": "2.5",
"верова": "2.5",
"взбодр": "2.5",
"внемл": "2.5",
"внима": "2.5",
"возблагодар": "2.5",
"возвеличива": "2.5",
"возвелич": "2.5",
"возвыс": "2.5",
"возда": "2.5",
"вознаград": "2.5",
"вознагражда": "2.5",
"возрожда": "2.5",
"воодушев": "2.5",
"воплоща": "2.5",
"воскреснут": "2.5",
"воспева": "2.5",
"воспет": "2.5",
"воспрянут": "2.5",
"впечатлет": "2.5",
"впечатл": "2.5",
"всенепремен": "2.5",
"вступа": "2.5",
"выигрыва": "2.5",
"выруча": "2.5",
"гармонизирова": "2.5",
"гармонирова": "2.5",
"гогота": "2.5",
"дар": "2.5",
"дарува": "2.5",
"дерза": "2.5",
"дзякова": "2.5",
"див": "2.5",
"доверя": "2.5",
"договарива": "2.5",
"долюб": "2.5",
"дорож": "2.5",
"достига": "2.5",
"друж": "2.5",
"дякова": "2.5",
"жалет": "2.5",
"животвор": "2.5",
"забавля": "2.5",
"заважают": "2.5",
"задобр": "2.5",
"заигрыва": "2.5",
"заинтригова": "2.5",
"замечта": "2.5",
"засия": "2.5",
"заслужива": "2.5",
"заслуж": "2.5",
"заступ": "2.5",
"затрепета": "2.5",
"затус": "2.5",
"захохота": "2.5",
"зачарова": "2.5",
"зачаровыва": "2.5",
"здраствова": "2.5",
"извиня": "2.5",
"излечива": "2.5",
"излюб": "2.5",
"изнеж": "2.5",
"изобилова": "2.5",
"изумля": "2.5",
"импонирова": "2.5",
"искорен": "2.5",
"искуп": "2.5",
"исправ": "2.5",
"исцеля": "2.5",
"красивит": "2.5",
"красова": "2.5",
"кураж": "2.5",
"лавстор": "2.5",
"лапул": "2.5",
"лелея": "2.5",
"лидирова": "2.5",
"лицезрет": "2.5",
"лобза": "2.5",
"лобыза": "2.5",
"лыб": "2.5",
"мамахохота": "2.5",
"ман": "2.5",
"мер": "2.5",
"милет": "2.5",
"милова": "2.5",
"мир": "2.5",
"млет": "2.5",
"мур": "2.5",
"мурлыка": "2.5",
"мурлыч": "2.5",
"навесел": "2.5",
"надея": "2.5",
"насмеш": "2.5",
"насмешлив": "2.5",
"насмея": "2.5",
"нафантазирова": "2.5",
"нахвалива": "2.5",
"начистот": "2.5",
"ниспосла": "2.5",
"нормализова": "2.5",
"обескураж": "2.5",
"облагоражива": "2.5",
"облагород": "2.5",
"облегча": "2.5",
"облюбова": "2.5",
"обнадежива": "2.5",
"обнадеж": "2.5",
"обогаща": "2.5",
"ободр": "2.5",
"обожеств": "2.5",
"обожествля": "2.5",
"обольща": "2.5",
"оборжа": "2.5",
"образум": "2.5",
"обхохота": "2.5",
"одарива": "2.5",
"одаря": "2.5",
"одобр": | |
'фагоцит',
'falsifika(ts)iya': 'фальсификация',
'farma(ts)evt': 'фармацевт',
'farma(ts)evtika': 'фармацевтика',
'farma(ts)iya': 'фармация',
'federa(ts)iya': 'федерация',
'fermenta(ts)iya': 'ферментация',
'film-kon(s)ert': 'фильм-концерт',
'filtra(ts)iya': 'фильтрация',
'fiton(s)id': 'фитонцид',
'forma(ts)iya': 'формация',
'frak(s)ion': 'фракцион',
'frak(s)iooner': 'фракциоонер',
'frak(s)iya': 'фракция',
'fran(s)iya': 'франция',
'fran(s)uz': 'француз',
'fran(s)uzlar': 'французлар',
'fran(s)uzcha': 'французча',
'fri(ts)': 'фриц',
'funk(s)ional': 'функционал',
'funk(s)iya': 'функция',
'xemosorb(s)iya': 'хемосорбция',
'xole(ts)istit': 'холецистит',
'(s)anga': 'цанга',
'(s)apfa': 'цапфа',
'(s)edra': 'цедра',
'(s)eziy': 'цезий',
'(s)eytnot': 'цейтнот',
'(s)ellofan': 'целлофан',
'(s)elluloid': 'целлулоид',
'(s)ellyuloza': 'целлюлоза',
'(s)elsiy': 'цельсий',
'(s)ement': 'цемент',
'(s)ementlamoq': 'цементламоқ',
'(s)enz': 'ценз',
'(s)enzor': 'цензор',
'(s)enzura': 'цензура',
'(s)ent': 'цент',
'(s)entner': 'центнер',
'(s)entnerli': 'центнерли',
'(s)entnerchi': 'центнерчи',
'(s)entralizm': 'централизм',
'(s)entrizm': 'центризм',
'(s)entrist': 'центрист',
'(s)entrifuga': 'центрифуга',
'(s)eriy': 'церий',
'(s)esarka': 'цесарка',
'(s)ex': 'цех',
'(s)ian': 'циан',
'(s)ianli': 'цианли',
'(s)iviliza(ts)iya': 'цивилизация',
'(s)igara': 'цигара',
'(s)ikl': 'цикл',
'(s)iklik': 'циклик',
'(s)ikllashtirmoq': 'цикллаштирмоқ',
'(s)iklli': 'циклли',
'(s)iklon': 'циклон',
'(s)iklotron': 'циклотрон',
'(s)ilindr': 'цилиндр',
'(s)ilindrik': 'цилиндрик',
'(s)ilindrli': 'цилиндрли',
'(s)inga': 'цинга',
'(s)ink': 'цинк',
'(s)inkograf': 'цинкограф',
'(s)inkografiya': 'цинкография',
'(s)irk': 'цирк',
'(s)irkoniy': 'цирконий',
'(s)irkul': 'циркуль',
'(s)irkulyar': 'циркуляр',
'(s)irkchi': 'циркчи',
'(s)irroz': 'цирроз',
'(s)isterna': 'цистерна',
'(s)isternali': 'цистернали',
'(s)istit': 'цистит',
'(s)itata': 'цитата',
'(s)itatabozlik': 'цитатабозлик',
'(s)ito-': 'цито-',
'(s)itodiagnostika': 'цитодиагностика',
'(s)itokimyo': 'цитокимё',
'(s)itoliz': 'цитолиз',
'(s)itologiya': 'цитология',
'(s)itrus': 'цитрус',
'(s)iferblat': 'циферблат',
'(s)iferblatli': 'циферблатли',
'(s)okol': 'цоколь',
'(s)unami': 'цунами',
'cherepi(ts)a': 'черепица',
'shvey(s)ar': 'швейцар',
'shmu(ts)titul': 'шмуцтитул',
'shni(ts)el': 'шницель',
'shpri(ts)': 'шприц',
'shtangen(s)irkul': 'штангенциркуль',
'evakua(ts)iya': 'эвакуация',
'evolyu(ts)ion': 'эволюцион',
'evolyu(ts)iya': 'эволюция',
'ego(ts)entrizm': 'эгоцентризм',
'eksguma(ts)iya': 'эксгумация',
'ekspedi(ts)ion': 'экспедицион',
'ekspedi(ts)iya': 'экспедиция',
'ekspedi(ts)iyachi': 'экспедициячи',
'ekspluata(ts)iya': 'эксплуатация',
'ekspluata(ts)iyachi': 'эксплуатациячи',
'ekspozi(ts)iya': 'экспозиция',
'ekspropria(ts)iya': 'экспроприация',
'ekstradi(ts)iya': 'экстрадиция',
'ekstrak(s)iya': 'экстракция',
'elektrifika(ts)iya': 'электрификация',
'elektrostan(s)iya': 'электростанция',
'emansipa(ts)iya': 'эмансипация',
'emigra(ts)iya': 'эмиграция',
'emo(ts)ional': 'эмоционал',
'emo(ts)ionallik': 'эмоционаллик',
'emo(ts)iya': 'эмоция',
'empiriokriti(ts)izm': 'эмпириокритицизм',
'en(s)efalit': 'энцефалит',
'en(s)efalogramma': 'энцефалограмма',
'en(s)iklopedik': 'энциклопедик',
'en(s)iklopedist': 'энциклопедист',
'en(s)iklopediya': 'энциклопедия',
'en(s)iklopediyachi': 'энциклопедиячи',
'epi(ts)entr': 'эпицентр',
'eritro(ts)itlar': 'эритроцитлар',
'erudi(ts)iya': 'эрудиция',
'eskala(ts)iya': 'эскалация',
'esmine(ts)': 'эсминец',
'essen(s)iya': 'эссенция',
'yurisdik(s)iya': 'юрисдикция',
'yurispruden(s)iya': 'юриспруденция',
'yusti(ts)iya': 'юстиция',
}
# These words cannot be reliably transliterated into cyrillic
E_WORDS = {
'bel(e)taj': 'бельэтаж',
'bugun-(e)rta': 'бугун-эрта',
'diqqat-(e)ʼtibor': 'диққат-эътибор',
'ich-(e)t': 'ич-эт',
'karat(e)': 'каратэ',
'm(e)r': 'мэр',
'obroʻ-(e)ʼtiborli': 'обрў-эътиборли',
'omon-(e)son': 'омон-эсон',
'r(e)ket': 'рэкет',
'sut(e)mizuvchilar': 'сутэмизувчилар',
'upa-(e)lik': 'упа-элик',
'xayr-(e)hson': 'хайр-эҳсон',
'qayn(e)gachi': 'қайнэгачи',
}
# Not to confuse with ш
SH_WORDS = {
'a(sh)ob': 'асҳоб',
'mu(sh)af': 'мусҳаф'
}
# Not to confuse with ё
YO_WORDS = {
'general-ma(yo)r': 'генерал-майор',
'(yo)g': 'йог',
'(yo)ga': 'йога',
'(yo)gurt': 'йогурт',
'(yo)d': 'йод',
'(yo)dlamoq': 'йодламоқ',
'(yo)dli': 'йодли',
'ma(yo)nez': 'майонез',
'mikrorayon': 'микрорайон',
'ma(yo)r': 'майор',
'ra(yo)n': 'район',
}
YU_WORDS = {
'mo(yu)pa': 'мойупа',
'po(yu)stun': 'пойустун'
}
YA_WORDS = {
'po(ya)bzal': 'пойабзал',
'po(ya)ndoz': 'пойандоз',
'po(ya)fzal': 'пойафзал'
}
YE_WORDS = {
'i(ye)': 'ийе',
'konve(ye)r': 'конвейер',
'ple(ye)r': 'плейер',
'sta(ye)r': 'стайер',
'fo(ye)': 'фойе'
}
SOFT_SIGN_WORDS = {
'aviamodel': 'авиамодель',
'avtomagistralavtomat': 'автомагистральавтомат',
'avtomobil': 'автомобиль',
'akvarel': 'акварель',
'alkogol': 'алкоголь',
'albatros': 'альбатрос',
'albom': 'альбом',
'alpinizm': 'альпинизм',
'alpinist': 'альпинист',
'alt': 'альт',
'alternativ': 'альтернатив',
'alternativa': 'альтернатива',
'altimetr': 'альтиметр',
'altchi': 'альтчи',
'alfa': 'альфа',
'alfa-zarralar': 'альфа-зарралар',
'alma-terapiya': 'альма-терапия',
'alyans': 'альянс',
'amalgama': 'амальгама',
'ansambl': 'ансамбль',
'apelsin': 'апельсин',
'aprel': 'апрель',
'artel': 'артель',
'artikl': 'артикль',
'arergard': 'арьергард',
'asfalt': 'асфальт',
'asfaltlamoq': 'асфальтламоқ',
'asfaltli': 'асфальтли',
'atele': 'ателье',
'bazalt': 'базальт',
'balzam': 'бальзам',
'balzamlash': 'бальзамлаш',
'balneolog': 'бальнеолог',
'balneologik': 'бальнеологик',
'balneologiya': 'бальнеология',
'balneoterapiya': 'бальнеотерапия',
'balneotexnika': 'бальнеотехника',
'banderol': 'бандероль',
'barelef': 'барельеф',
'barrel': 'баррель',
'barer': 'барьер',
'batalon': 'батальон',
'belveder': 'бельведер',
'belgiyalik': 'бельгиялик',
'belting': 'бельтинг',
'beletaj': 'бельэтаж',
'bilyard': 'бильярд',
'binokl': 'бинокль',
'biofiltr': 'биофильтр',
'bolonya': 'болонья',
'bolshevizm': 'большевизм',
'bolshevik': 'большевик',
'brakonerlik': 'браконьерлик',
'broneavtomobil': 'бронеавтомобиль',
'bron': 'бронь',
'budilnik': 'будильник',
'bulvar': 'бульвар',
'buldenej': 'бульденеж',
'buldog': 'бульдог',
'buldozer': 'бульдозер',
'buldozerchi': 'бульдозерчи',
'bulon': 'бульон',
'byulleten': 'бюллетень',
'valeryanka': 'валерьянка',
'valvatsiya': 'вальвация',
'vals': 'вальс',
'vanil': 'ваниль',
'varete': 'варьете',
'vedomost': 'ведомость',
'veksel': 'вексель',
'ventil': 'вентиль',
'vermishel': 'вермишель',
'verner': 'верньер',
'verf': 'верфь',
'vestibyul': 'вестибюль',
'videofilm': 'видеофильм',
'viklyuchatel': 'виключатель',
'vinetka': 'виньетка',
'violonchel': 'виолончель',
'vklyuchatel': 'включатель',
'vodevil': 'водевиль',
'volost': 'волость',
'volt': 'вольт',
'volta': 'вольта',
'voltli': 'вольтли',
'voltmetr': 'вольтметр',
'volfram': 'вольфрам',
'vulgar': 'вульгар',
'vulgarizm': 'вульгаризм',
'vulgarlashtirmoq': 'вульгарлаштирмоқ',
'gavan': 'гавань',
'galvanizatsiya': 'гальванизация',
'galvanik': 'гальваник',
'galvanometr': 'гальванометр',
'gantel': 'гантель',
'garmon': 'гармонь',
'gastrol': 'гастроль',
'gastrol-konsert': 'гастроль-концерт',
'gelmint': 'гельминт',
'gelmintoz': 'гельминтоз',
'gelmintologiya': 'гельминтология',
'geraldika': 'геральдика',
'gilza': 'гильза',
'giposulfit': 'гипосульфит',
'golf': 'гольф',
'gorelef': 'горельеф',
'gorizontal': 'горизонталь',
'gospital': 'госпиталь',
'grifel': 'грифель',
'guash': 'гуашь',
'daltonizm': 'дальтонизм',
'dvigatel': 'двигатель',
'devalvatsiya': 'девальвация',
'dekabr': 'декабрь',
'delta': 'дельта',
'delfin': 'дельфин',
'delfinariy': 'дельфинарий',
'delfinsimonlar': 'дельфинсимонлар',
'detal': 'деталь',
'diagonal': 'диагональ',
'diafilm': 'диафильм',
'dizel': 'дизель',
'dizel-motor': 'дизель-мотор',
'dirijabl': 'дирижабль',
'drel': 'дрель',
'duel': 'дуэль',
'jenshen': 'женьшень',
'impuls': 'импульс',
'inventar': 'инвентарь',
'insult': 'инсульт',
'intervyu': 'интервью',
'interer': 'интерьер',
'italyan': 'итальян',
'italyanlar': 'итальянлар',
'italyancha': 'итальянча',
'iyul': 'июль',
'iyun': 'июнь',
'kabel': 'кабель',
'kalendar': 'календарь',
'kalka': 'калька',
'kalkalamoq': 'калькаламоқ',
'kalkulyator': 'калькулятор',
'kalkulyatsiya': 'калькуляция',
'kalsiy': 'кальций',
'kanifol': 'канифоль',
'kapelmeyster': 'капельмейстер',
'kapsyul': 'капсюль',
'karamel': 'карамель',
'kartel': 'картель',
'kartech': 'картечь',
'karusel': 'карусель',
'karer': 'карьер',
'kastryul': 'кастрюль',
'kastryulka': 'кастрюлька',
'katapulta': 'катапульта',
'kafel': 'кафель',
'kinofestival': 'кинофестиваль',
'kinofilm': 'кинофильм',
'kisel': 'кисель',
'kitel': 'китель',
'knyaz': 'князь',
'kobalt': 'кобальт',
'kokil': 'кокиль',
'kokteyl': 'коктейль',
'kompyuter': 'компьютер',
'kompyuterlashtirmoq': 'компьютерлаштирмоқ',
'konsultant': 'консультант',
'konsultativ': 'консультатив',
'konsultatsiya': 'консультация',
'kontrol': 'контроль',
'konferanse': 'конферансье',
'konslager': 'концлагерь',
'kon': 'конь',
'konki': 'коньки',
'konkichi': 'конькичи',
'konyunktiva': 'коньюнктива',
'konyunktivit': 'коньюнктивит',
'konyunktura': 'коньюнктура',
'konyak': 'коньяк',
'korol': 'король',
'kreml': 'кремль',
'krovat': 'кровать',
'kulminatsion': 'кульминацион',
'kulminatsiya': 'кульминация',
'kultivator': 'культиватор',
'kultivatsiya': 'культивация',
'kulturizm': 'культуризм',
'kurer': 'курьер',
'kyat': 'кьят',
'lager': 'лагерь',
'latun': 'латунь',
'losos': 'лосось',
'loson': 'лосьон',
'magistral': 'магистраль',
'marseleza': 'марсельеза',
'mebel': 'мебель',
'medal': 'медаль',
'medalon': 'медальон',
'melxior': 'мельхиор',
'menshevizm': 'меньшевизм',
'menshevik': 'меньшевик',
'migren': 'мигрень',
'mikroinsult': 'микроинсульт',
'mikrofilm': 'микрофильм',
'model': 'модель',
'modeler': 'модельер',
'molbert': 'мольберт',
'monastir': 'монастирь',
'monokultoura': 'монокультоура',
'motel': 'мотель',
'multi-': 'мульти-',
'multimediya': 'мультимедия',
'multimillioner': 'мультимиллионер',
'multiplikatsion': 'мультипликацион',
'multiplikator': 'мультипликатор',
'multiplikatsiya': 'мультипликация',
'neft': 'нефть',
'nikel': 'никель',
'nimpalto': 'нимпальто',
'nippel': 'ниппель',
'nol': 'ноль',
'normal': 'нормаль',
'noyabr': 'ноябрь',
'oblast': 'область',
'okkultizm': 'оккультизм',
'oktabr': 'октябрь',
'otel': 'отель',
'oftalmologiya': 'офтальмология',
'ochered': 'очередь',
'pavilon': 'павильон',
'palma': 'пальма',
'palmazor': 'пальмазор',
'palpatsiya': 'пальпация',
'palto': 'пальто',
'paltobop': 'пальтобоп',
'paltolik': 'пальтолик',
'panel': 'панель',
'parallel': 'параллель',
'parol': 'пароль',
'patrul': 'патруль',
'pedal': 'педаль',
'penalti': 'пенальти',
'pechat': 'печать',
'pechene': 'печенье',
'pech': 'печь',
'plastir': 'пластирь',
'povest': 'повесть',
'polka': 'полька',
'portfel': 'портфель',
'porshen': 'поршень',
'pochtalon': 'почтальон',
'predoxranitel': 'предохранитель',
'premera': 'премьера',
'premer-ministr': 'премьер-министр',
'press-pape': 'пресс-папье',
'press-sekretar': 'пресс-секретарь',
'pristan': 'пристань',
'profil': 'профиль',
'pulverizator': 'пульверизатор',
'pulmonologiya': 'пульмонология',
'pulpa': 'пульпа',
'pulpit': 'пульпит',
'puls': 'пульс',
'pult': 'пульт',
'pesa': 'пьеса',
'radiospektakl': 'радиоспектакль',
'rante': 'рантье',
'revalvatsiya': 'ревальвация',
'revolver': 'револьвер',
'rezba': 'резьба',
'rezbali': 'резьбали',
'relef': 'рельеф',
'rels': 'рельс',
'relsli': 'рельсли',
'relssiz': 'рельссиз',
'retush': 'ретушь',
'riyel': 'риель',
'ritsar': 'рицарь',
'rol': 'роль',
'royal': 'рояль',
'rubilnik': 'рубильник',
'rubl': 'рубль',
'rul': 'руль',
'saldo': 'сальдо',
'salto': 'сальто',
'sekretar': 'секретарь',
'selderey': 'сельдерей',
'seld': 'сельдь',
'sentabr': 'сентябрь',
'senor': 'сеньор',
'senora': 'сеньора',
'sinka': 'синька',
'sinkalamoq': 'синькаламоқ',
'siren': 'сирень',
'skalpel': 'скальпель',
'slesar': 'слесарь',
'sobol': 'соболь',
'sol': 'соль',
'spektakl': 'спектакль',
'spiral': 'спираль',
'statya': 'статья',
'stelka': 'стелька',
'sterjen': 'стержень',
'stil': 'стиль',
'sudya': 'судья',
'sudyalik': 'судьялик',
'sulfat': 'сульфат',
'sulfatlar': 'сульфатлар',
'tabel': 'табель',
'talk': 'тальк',
'tekstil': 'текстиль',
'telefilm': 'телефильм',
'tigel': 'тигель',
'tokar': 'токарь',
'tol': 'толь',
'tonnel': 'тоннель',
'tunnel': 'туннель',
'tush': 'тушь',
'tyulen': 'тюлень',
'tyul': 'тюль',
'ultimatum': 'ультиматум',
'ultra-': 'ультра-',
'ultrabinafsha': 'ультрабинафша',
'ultramikroskop': 'ультрамикроскоп',
'ultratovush': 'ультратовуш',
'ultraqisqa': 'ультрақисқа',
'umivalnik': 'умивальник',
'util': 'утиль',
'fakultativ': 'факультатив',
'fakultet': 'факультет',
'fakultetlalaro': 'факультетлаларо',
'falsifikator': 'фальсификатор',
'falsifikatsiya': 'фальсификация',
'fevral': 'февраль',
'feldmarshal': 'фельдмаршал',
'feldsher': 'фельдшер',
'feldʼeger': 'фельдъегерь',
'feleton': 'фельетон',
'feletonchi': 'фельетончи',
'festival': 'фестиваль',
'fizkultura': 'физкультура',
'fizkulturachi': 'физкультурачи',
'film': 'фильм',
'film-konsert': 'фильм-концерт',
'filmoskop': 'фильмоскоп',
'filmoteka': 'фильмотека',
'filtr': 'фильтр',
'filtratsiya': 'фильтрация',
'filtrlamoq': 'фильтрламоқ',
'filtrli': 'фильтрли',
'folga': 'фольга',
'folklor': 'фольклор',
'folklorist': 'фольклорист',
'folkloristika': 'фольклористика',
'folklorchi': 'фольклорчи',
'folklorshunos': 'фольклоршунос',
'folklorshunoslik': 'фольклоршунослик',
'fonar': 'фонарь',
'fortepyano': 'фортепьяно',
'xolodilnik': 'холодильник',
'xrustal': 'хрусталь',
'selsiy': 'цельсий',
'sirkul': 'циркуль',
'sokol': 'цоколь',
'chizel': 'чизель',
'shagren': 'шагрень',
'shampun': 'шампунь',
'sherst': 'шерсть',
'shinel': 'шинель',
'shifoner': 'шифоньер',
'shnitsel': 'шницель',
'shpatel': 'шпатель',
'shpilka': 'шпилька',
'shpindel': 'шпиндель',
'shtangensirkul': 'штангенциркуль',
'shtapel': 'штапель',
'shtempel': 'штемпель',
'emal': 'эмаль',
'emulsiya': 'эмульсия',
'endshpil': 'эндшпиль',
'eskadrilya': 'эскадрилья',
'yuan': 'юань',
'yuriskonsult': 'юрисконсульт',
'yakor': 'якорь',
'yanvar': 'январь',
}
CYRILLIC_TO_LATIN = {
'а': 'a', 'А': 'A',
'б': 'b', 'Б': 'B',
'в': 'v', 'В': 'V',
'г': 'g', 'Г': 'G',
'д': 'd', 'Д': 'D',
'е': 'e', 'Е': 'E',
'ё': 'yo', 'Ё': 'Yo',
'ж': 'j', 'Ж': 'J',
'з': 'z', 'З': 'Z',
'и': 'i', 'И': 'I',
'й': 'y', 'Й': 'Y',
'к': 'k', 'К': 'K',
'л': 'l', 'Л': 'L',
'м': 'm', 'М': 'M',
'н': 'n', 'Н': 'N',
'о': 'o', 'О': 'O',
'п': 'p', 'П': 'P',
'р': 'r', 'Р': 'R',
'с': 's', 'С': 'S',
'т': 't', 'Т': 'T',
'у': 'u', 'У': 'U',
'ф': 'f', 'Ф': 'F',
'х': 'x', 'Х': 'X',
'ц': 's', 'Ц': 'S',
'ч': 'ch', 'Ч': 'Ch',
'ш': 'sh', 'Ш': 'Sh',
'ъ': 'ʼ', 'Ъ': 'ʼ',
'ь': '', 'Ь': '',
'э': 'e', 'Э': 'E',
'ю': 'yu', 'Ю': 'Yu',
'я': 'ya', 'Я': 'Ya',
'ў': 'oʻ', 'Ў': 'Oʻ',
'қ': 'q', 'Қ': 'Q',
'ғ': 'gʻ', 'Ғ': 'Gʻ',
'ҳ': 'h', 'Ҳ': 'H',
}
CYRILLIC_VOWELS = (
'а', 'А', 'е', 'Е', 'ё', | |
Markers', icon='RENDER_RESULT').tmarkers = True
elif len(marker_list_camera) == len(cameras):
if scene.frame_current>0 :
row.operator("cameramanager.render_all_camera",text='Render All Cameras', icon='RENDER_RESULT')
else:
row.operator("cameramanager.render_all_camera",text='Render All', icon='RENDER_RESULT')
row.operator("cameramanager.render_all_camera",text='Render Markers', icon='RENDER_RESULT').tmarkers = True
else:
if len(render_all_list) <2:
row.label(text='Choose at least two Cameras', icon ='ERROR')
elif 1 < len(render_all_list) < len(cameras) :
row.operator("cameramanager.render_all_camera",text='Render Selection: {0}'.format(len(render_all_list)), icon='RENDER_RESULT')
elif len(render_all_list) == len(cameras) :
row.operator("cameramanager.render_all_camera",text='Render All Cameras', icon='RENDER_RESULT')
elif len(cameras) > 2:
if rs.switchRenderSelection == True:
if len(render_all_list) <2:
row.label(text='Choose at least two Cameras', icon ='ERROR')
#Switch button for cameras listing for batch rendering
if len(cameras) > 2:
row.separator()
row.prop(rs,"switchRenderSelection",text='', icon='RESTRICT_SELECT_OFF')
### ]Buttons below Cameras List
row = layout.row(align=True)
else:
##
row = layout.row(align=True)
row.alignment='CENTER'
row.alert = True
row.label(text=" No cameras in this scene", icon ='ERROR')
row.alert = False
###Camera Manager Settings[ _____________________________________________________________________________________
else:
## Manager Options [-----------
row = layout.row(align=True)
box = layout.box()
row = box.row(align=True)
row.alert = True
row.alignment='CENTER'
row.label(text='Manager Options')
row = box.row(align=True)
row = row.box()
row = row.row(align=True)
row.label(text='Tools Toggles:')
row.prop(rs,"cmBut_Render",text="",icon='SEQ_PREVIEW')
row.prop(rs,"cmBut_AlignV",text="",icon='VIEW_PERSPECTIVE')
row.prop(rs,"cmBut_AlignO",text="",icon='CUBE')
row.prop(rs,"cmBut_Trackto",text="",icon='TRACKER')
row.prop(rs,"cmBut_Marker",text="",icon='MARKER')
row.prop(rs,"cmBut_AnimData",text="",icon='KEYTYPE_KEYFRAME_VEC')
box.use_property_split = True
box.use_property_decorate = False
row = layout.row(align=True)
row = box.row(align=True)
row = row.box()
row.prop(rs,'Manager_ShowSelect_Color',text='Selection Highlight')
## ]Manager Options
## New Camera Lens Settings [-----------
row = layout.row(align=True)
box = layout.box()
row = box.row(align=True)
row.alert = True
row.alignment='CENTER'
row.label(text='New Camera Lens Settings')
row = box.row(align=True)
row = row.box()
row.label(text='Camera Perspective',icon='VIEW_PERSPECTIVE')
row.prop(rs,"NewCam_lensPersp")
row = row.row(align=True)
row.prop(rs,"NewCam_ClipStart",text="Clip Start")
row.prop(rs,"NewCam_ClipEnd",text="End")
row = box.row(align=True)
row = row.box()
row.label(text='Camera Orthogaphic',icon='VIEW_ORTHO')
row.prop(rs,"NewCam_lensOrtho",text="Scale")
row = row.row(align=True)
row.prop(rs,"NewCam_ClipStartOrtho",text="Clip Start")
row.prop(rs,"NewCam_ClipEndOrtho",text="End")
## ]New Camera Lens Settings
# CAMERA QUICK SETTINGS ######################################################################################
class CAMMANAGER_PT_QuickSettings(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_context = "objectmode"
bl_category = "Render"
bl_label = "Settings :"
#bl_options = {'DEFAULT_CLOSED'}
bl_idname = "CAMMANAGER_PT_QuickSettings"
bl_parent_id = "CAMMANAGER_PT_Cammanager"
_selectedCam = []
@classmethod
def poll(cls, context):
return (context.active_object is not None and context.active_object==bpy.context.space_data.camera) and bpy.context.scene.RBTab_Settings.cmOptions == False
def draw_header_preset(self, context):
scene = context.scene
cameras = sorted([o for o in scene.objects if o.type == 'CAMERA'],key=lambda o: o.name)
ob = context.active_object
selectedObj = bpy.context.selected_objects
selectedCam = sorted([o for o in selectedObj if o.type == 'CAMERA'],key=lambda o: o.name)
noCustomDimCam = sorted([o for o in cameras if o.RBTab_obj_Settings.Custom_CamRes_prop == False],key=lambda o: o.name)
layout = self.layout
row = layout.row(align=True)
if len(cameras) > 0 and (ob in cameras):
if len(selectedCam) == 1 :
if ob in selectedCam:
chosen_camera = context.active_object
row.label(text="{0}".format(chosen_camera .name))
elif len(selectedCam) > 1:
if ob in selectedCam:
row.alert = True
chosen_camera = context.active_object
row.label(text="[..{0}..]".format(chosen_camera .name))
else:
row.active = False
chosen_camera = context.active_object
row.label(text="{0}".format(chosen_camera .name))
else:
chosen_camera = context.active_object
row.label(text="{0}".format(chosen_camera .name))
def draw(self, context):
scene = context.scene
rs = scene.RBTab_Settings
ob = context.active_object
cs = ob.RBTab_obj_Settings
render = scene.render
cameras = sorted([o for o in scene.objects if o.type == 'CAMERA'],key=lambda o: o.name)
view = context.space_data
chosen_camera = bpy.context.object.data
cam = chosen_camera
selectedObj = bpy.context.selected_objects
selectedCam = sorted([o for o in selectedObj if o.type == 'CAMERA'],key=lambda o: o.name)
noCustomDimCam = sorted([o for o in cameras if o.RBTab_obj_Settings.Custom_CamRes_prop == False],key=lambda o: o.name)
selectedCustomDimCam = list(set(selectedCam) - set(noCustomDimCam))
self._selectedCam = selectedCam
layout = self.layout
if len(cameras) > 0 and (ob in cameras):
row = layout.row(align=True)
# if len(selectedCam) > 1 and ob not in selectedCam:
# row.enabled = False
# layout.emboss = 'NONE'################
row.prop(cam, "type", text="")
row = layout.row(align=True)
#if len(selectedCam) > 1 and ob not in selectedCam: row.enabled = False ################
if cam.type == 'PERSP':
row.prop(cam, "lens", text="Focal")
elif cam.type == 'ORTHO':
row.prop(cam, "ortho_scale", text="Scale")
elif cam.type == 'PANO':
engine = context.engine
if engine == 'CYCLES':
ccam = cam.cycles
row = box.row()
row.prop(ccam, "panorama_type", text="")
row = box.row()
if ccam.panorama_type == 'FISHEYE_EQUIDISTANT':
row.prop(ccam, "fisheye_fov", text="FOV")
elif ccam.panorama_type == 'FISHEYE_EQUISOLID':
row.prop(ccam, "fisheye_lens", text="Lens")
row.prop(ccam, "fisheye_fov", text="FOV")
elif ccam.panorama_type == 'EQUIRECTANGULAR':
row = box.row()
row.label(text="Latitude:")
row = box.row()
row.prop(ccam, "latitude_min", text="Min")
row.prop(ccam, "latitude_max", text="Max")
row = box.row()
row.label(text="Longitude:")
row = box.row()
row.prop(ccam, "longitude_min", text="Min")
row.prop(ccam, "longitude_max", text="Max")
elif engine in {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}:
if cam.lens_unit == 'MILLIMETERS':
row.prop(cam, "lens")
elif cam.lens_unit == 'FOV':
row.prop(cam, "angle")
row.prop(cam, "lens_unit")
row = layout.row(align=True)
#if len(selectedCam) > 1 and ob not in selectedCam: row.enabled = False ################
row.prop(cam, "shift_x", text="Shift H")
row.prop(cam, "shift_y", text="V")
row = layout.row(align=True)
#if len(selectedCam) > 1 and ob not in selectedCam: row.enabled = False ################
row.prop(cam, "clip_start", text="Clip Start")
row.prop(cam, "clip_end", text="End")
layout.separator()
row = layout.row(align=True)
#if len(selectedCam) > 1 and ob not in selectedCam: row.enabled = False ################
if cs.Custom_CamRes_prop == False:
row.operator('cameramanager.custom_resolution',text="Save Custom Resolution",icon='FILE_TICK').crrefresh = False
elif cs.Custom_CamRes_prop == True and (cs.Custom_CamHRes_prop == render.resolution_x) and (cs.Custom_CamVRes_prop == render.resolution_y):
row.operator('cameramanager.custom_resolution',text="{0} x {1}".format(cs.Custom_CamHRes_prop,cs.Custom_CamVRes_prop), icon='LOCKED')
row.operator('cameramanager.custom_resolution',text="", icon='PANEL_CLOSE',emboss=False).crdel = True
elif cs.Custom_CamRes_prop == True and (cs.Custom_CamHRes_prop != render.resolution_x) or (cs.Custom_CamVRes_prop != render.resolution_y):
row.operator('cameramanager.custom_resolution',text="{0} x {1}".format(cs.Custom_CamHRes_prop,cs.Custom_CamVRes_prop), icon='LOCKED').crrefresh = True
row.operator('cameramanager.custom_resolution',text="", icon='PANEL_CLOSE',emboss=False).crdel = True
# CAMERA MANAGER FOOTER INFOS ######################################################################################
class CAMMANAGER_PT_InfosCamActiv(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_context = "objectmode"
bl_category = "Render"
bl_label = "Camera Infos"
bl_idname = "CAMMANAGER_PT_InfosCamActiv"
bl_options = {'HIDE_HEADER'}
bl_parent_id = "CAMMANAGER_PT_Cammanager"
@classmethod
def poll(cls, context):
return (context.active_object is not None
and context.active_object==bpy.context.space_data.camera
and bpy.context.scene.RBTab_Settings.cmOptions == False)
def draw(self, context):
scene = context.scene
ob = context.active_object
cs = ob.RBTab_obj_Settings
marker_list = context.scene.timeline_markers
chosen_camera = context.active_object
render = context.scene.render
cameras = sorted([o for o in scene.objects if o.type == 'CAMERA'],key=lambda o: o.name)
layout = self.layout
split = layout.split()
layout.use_property_split = True
layout.use_property_decorate = False
row = layout.row(align=True)
row.scale_y = 0.7
if (context.active_object is not None):
if len(cameras) > 0 and (ob in cameras):
_customDim = ""
_trackTo = ""
_markerName = ""
_markerFrame = ""
_infos = ""
if cs.Custom_CamRes_prop == True: _customDim = "{0}x{1} ".format(cs.Custom_CamHRes_prop,cs.Custom_CamVRes_prop)
if len(chosen_camera.constraints) > 0 and chosen_camera.constraints[0].target is not None: _trackTo = " [{0}] ".format(chosen_camera.constraints[0].target.name)
for marker in marker_list:
if marker.camera == chosen_camera and scene.frame_current != 0:
_markerName = " <{0}>".format(marker.camera.name)
_markerFrame = "({0})".format(marker.frame)
_infos = _customDim + _trackTo + _markerName + _markerFrame
if len(chosen_camera.constraints) > 0 and chosen_camera.constraints[0].target is None: _infos ="No Target"
if _infos != "":
if _infos == "No Target":
row.alert = True
row.label(text = "Track To Error : " + _infos, icon ='ERROR')
else: row.label(text = _infos, icon ='INFO')
# RENDER PRESET ######################################################################################
class RENDER_PT_presets(PresetPanel, Panel):
bl_label = "Render Presets"
preset_subdir = "render"
preset_operator = "script.execute_preset"
preset_add_operator = "render.preset_add"
# RENDER DIMENSIONS SUBPANEL ######################################################################################
class MYBIGBUTTONTAB_PT_RenderDimensions(Panel):
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Render"
bl_label = "Dimensions"
bl_options = {'DEFAULT_CLOSED'}
bl_idname = "MYBIGBUTTON_PT_RenderDimensions"
bl_parent_id = "MYBIGBUTTONTAB_PT_MyBigButton"
@classmethod
def poll(cls, context):
return bpy.context.scene.RBTab_Settings.mbbOptions == False
#return context.mode == 'OBJECT'
def draw_header_preset(self, _context):
RENDER_PT_presets.draw_panel_header(self.layout)
def draw(self, context):
scene = context.scene
rd = scene.render
rs = scene.RBTab_Settings
layout = self.layout
row = layout.row(align=True)
row.prop(scene.render, 'resolution_x', text="H")
row.operator("render.toggle_orientation", text="", icon='ARROW_LEFTRIGHT')
row.prop(scene.render, 'resolution_y', text="V")
if (rd.resolution_x != rs.Default_HRes_prop) or (rd.resolution_y != rs.Default_VRes_prop):
row.operator("render.store_as_defaultres", text="", icon='FILE_TICK',emboss=False)
layout.prop(context.scene.render, "resolution_percentage", text="")
row = layout.row(align=True)
area = next(area for area in bpy.context.screen.areas if area.type == 'VIEW_3D')
if area.spaces[0].region_3d.view_perspective == 'CAMERA':
row.active = True
row.enabled = True
row.prop(rd, "use_border", text="Render Region", icon='SHADING_BBOX')
if rd.use_border == True:
row.prop(rd, "use_crop_to_border", text="Crop Region", icon='IMAGE_PLANE')
else:
row.active = False
row.enabled = False
row.prop(rd, "use_border", text="Render Region", icon='SHADING_BBOX')
if rd.use_border == True:
row.prop(rd, "use_crop_to_border", text="Crop Region", icon='IMAGE_PLANE')
# visual alarm ######################################################################################
class MYBIGBUTTONTAB_PT_VisualAlarm(Panel):
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Render"
bl_label = "ALARME - ESC to Abort"
bl_options = {'HIDE_HEADER'}
bl_idname = "MYBIGBUTTONTAB_PT_VisualAlarm"
@classmethod
def poll(cls, context):
return bpy.context.scene.RBTab_Settings.alarmInProgress == True
def draw(self, context):
| |
<gh_stars>10-100
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import scarlet
class TestProjections(object):
"""Test project_image
Because the behavior of projections is dependent on
whether the input image and the output image have an
even or odd number of pixels, we have tests for all
four different cases (odd-odd, even-even, odd-even, even-odd).
"""
def test_odd2odd(self):
project_image = scarlet.interpolation.project_image
img = np.arange(35).reshape(5, 7)
# samller to bigger
shape = (11, 9)
result = project_image(img, shape)
truth = np.zeros(shape)
truth[3:-3, 1:-1] = img
assert_array_equal(result, truth)
# bigger to smaller
shape = (3, 3)
result = project_image(img, shape)
truth = img[1:-1, 2:-2]
assert_array_equal(result, truth)
# lower left smaller to bigger
shape = (11, 9)
result = project_image(img, shape, (-6, -6))
truth = np.zeros(shape)
truth[:4, :5] = img[-4:, -5:]
assert_array_equal(result, truth)
# lower left bigger to smaller
shape = (3, 3)
result = project_image(img, shape, (-4, -6))
truth = np.zeros(shape)
truth[:2, :2] = img[-2:, -2:]
assert_array_equal(result, truth)
# upper right smaller to bigger
shape = (11, 9)
result = project_image(img, shape, (4, 0))
truth = np.zeros(shape)
truth[-2:, -5:] = img[:2, :5]
assert_array_equal(result, truth)
# upper right bigger to smaller
shape = (3, 3)
result = project_image(img, shape, (0, 1))
truth = np.zeros(shape)
truth[-2:, -1:] = img[:2, :1]
assert_array_equal(result, truth)
def test_even2even(self):
project_image = scarlet.interpolation.project_image
img = np.arange(48).reshape(8, 6)
# samller to bigger
shape = (12, 8)
result = project_image(img, shape)
truth = np.zeros(shape)
truth[2:-2, 1:-1] = img
assert_array_equal(result, truth)
# bigger to smaller
shape = (6, 4)
result = project_image(img, shape)
truth = img[1:-1, 1:-1]
assert_array_equal(result, truth)
# lower left smaller to bigger
shape = (14, 18)
result = project_image(img, shape, (-10, -11))
truth = np.zeros(shape)
truth[:5, :4] = img[-5:, -4:]
assert_array_equal(result, truth)
# lower left bigger to smaller
shape = (4, 4)
result = project_image(img, shape, (-1, -1))
truth = np.zeros(shape)
truth[-3:, -3:] = img[:3, :3]
assert_array_equal(result, truth)
# upper right smaller to bigger
shape = (12, 10)
result = project_image(img, shape, (3, 1))
truth = np.zeros(shape)
truth[-3:, -4:] = img[:3, :4]
assert_array_equal(result, truth)
# upper right bigger to smaller
shape = (4, 4)
result = project_image(img, shape, (0, -1))
truth = np.zeros(shape)
truth[-2:, -3:] = img[:2, :3]
assert_array_equal(result, truth)
def test_odd2even(self):
project_image = scarlet.interpolation.project_image
img = np.arange(35).reshape(5, 7)
# samller to bigger
shape = (10, 8)
result = project_image(img, shape)
truth = np.zeros(shape)
truth[3:8, 1:] = img
assert_array_equal(result, truth)
# bigger to smaller
shape = (4, 4)
result = project_image(img, shape)
truth = img[:4, 1:-2]
assert_array_equal(result, truth)
# lower left smaller to bigger
shape = (14, 18)
result = project_image(img, shape, (-9, -11))
truth = np.zeros(shape)
truth[:3, :5] = img[-3:, -5:]
assert_array_equal(result, truth)
# lower left bigger to smaller
shape = (4, 4)
result = project_image(img, shape, (-4, -5))
truth = np.zeros(shape)
truth[:3, :4] = img[-3:, -4:]
assert_array_equal(result, truth)
# upper right smaller to bigger
shape = (12, 10)
result = project_image(img, shape, (3, 1))
truth = np.zeros(shape)
truth[-3:, -4:] = img[:3, :4]
# upper right bigger to smaller
shape = (4, 4)
result = project_image(img, shape, (1, 0))
truth = np.zeros(shape)
truth[-1:, -2:] = img[:1, :2]
assert_array_equal(result, truth)
def test_even2odd(self):
project_image = scarlet.interpolation.project_image
img = np.arange(48).reshape(8, 6)
# samller to bigger
shape = (11, 9)
result = project_image(img, shape)
truth = np.zeros(shape)
truth[1:-2, 1:-2] = img
assert_array_equal(result, truth)
# bigger to smaller
shape = (3, 3)
result = project_image(img, shape)
truth = img[3:-2, 2:-1]
assert_array_equal(result, truth)
# lower left smaller to bigger
shape = (11, 9)
result = project_image(img, shape, (-9, -5))
truth = np.zeros(shape)
truth[:4, :5] = img[-4:, -5:]
assert_array_equal(result, truth)
# lower left bigger to smaller
shape = (3, 3)
result = project_image(img, shape, (-7, -5))
truth = np.zeros(shape)
truth[:2, :2] = img[-2:, -2:]
assert_array_equal(result, truth)
# upper right smaller to bigger
shape = (11, 9)
result = project_image(img, shape, (4, 0))
truth = np.zeros(shape)
truth[-2:, -5:] = img[:2, :5]
assert_array_equal(result, truth)
# upper right bigger to smaller
shape = (3, 3)
result = project_image(img, shape, (0, 1))
truth = np.zeros(shape)
truth[-2:, -1:] = img[:2, :1]
assert_array_equal(result, truth)
def test_zoom(self):
# Test that zomming out and in keeps a consistent center
kernel = np.arange(4).reshape(2, 2) + 1
p3 = scarlet.interpolation.project_image(kernel, (3, 3))
p6 = scarlet.interpolation.project_image(p3, (6, 6))
p5 = scarlet.interpolation.project_image(p6, (5, 5))
p2 = scarlet.interpolation.project_image(p3, (2, 2))
assert_array_equal(p2, kernel)
truth = [[1.0, 2.0, 0.0], [3.0, 4.0, 0.0], [0.0, 0.0, 0.0]]
assert_array_equal(p3, truth)
truth = [
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 2.0, 0.0, 0.0],
[0.0, 0.0, 3.0, 4.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
]
assert_array_equal(p6, truth)
truth = [
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 2.0, 0.0, 0.0],
[0.0, 3.0, 4.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
assert_array_equal(p5, truth)
def interpolate_comparison(func, zero_truth, positive_truth, **kwargs):
# zero shift
result = func(0, **kwargs)
truth = zero_truth
assert_almost_equal(result[0], truth[0])
assert_array_equal(result[1], truth[1])
# positive shift
result = func(0.103, **kwargs)
truth = positive_truth
assert_almost_equal(result[0], truth[0])
assert_array_equal(result[1], truth[1])
# negative shift
result = func(-0.103, **kwargs)
truth = (truth[0][::-1], -truth[1][::-1])
assert_almost_equal(result[0], truth[0])
assert_array_equal(result[1], truth[1])
with pytest.raises(ValueError):
scarlet.interpolation.lanczos(1.1)
with pytest.raises(ValueError):
scarlet.interpolation.lanczos(-1.1)
class TestConvolutions:
"""Test FFT convolutions and interpolation algorithms
"""
def test_bilinear(self):
zero_truth = (np.array([1, 0]), np.array([0, 1]))
positive_truth = (np.array([1 - 0.103, 0.103]), np.array([0, 1]))
interpolate_comparison(
scarlet.interpolation.bilinear, zero_truth, positive_truth
)
def test_cubic_spline(self):
zero_truth = (np.array([0.0, 1.0, 0.0, 0.0]), np.array([-1, 0, 1, 2]))
positive_truth = (
np.array([-0.08287473, 0.97987473, 0.11251627, -0.00951627]),
np.array([-1, 0, 1, 2]),
)
interpolate_comparison(
scarlet.interpolation.cubic_spline, zero_truth, positive_truth
)
def test_catmull_rom(self):
# Catmull Rom should be the same as the cubic spline
# with a=0.5 and b=0
zero_truth = scarlet.interpolation.cubic_spline(0, a=0.5)
positive_truth = scarlet.interpolation.cubic_spline(0.103, a=0.5)
interpolate_comparison(
scarlet.interpolation.catmull_rom, zero_truth, positive_truth
)
def test_mitchel_netravali(self):
# Mitchel Netravali should be the same as the cubic spline
# with a=1/3 and b=1/3
zero_truth = scarlet.interpolation.cubic_spline(0, a=1 / 3, b=1 / 3)
positive_truth = scarlet.interpolation.cubic_spline(0.103, a=1 / 3, b=1 / 3)
interpolate_comparison(
scarlet.interpolation.mitchel_netravali, zero_truth, positive_truth
)
def test_lanczos(self):
# test Lanczos 3
zero_truth = (np.array([0, 0, 1, 0, 0, 0]), np.arange(6) - 2)
positive_truth = (
np.array(
[
0.01763955,
-0.07267534,
0.98073579,
0.09695747,
-0.0245699,
0.00123974,
]
),
np.array([-2, -1, 0, 1, 2, 3]),
)
interpolate_comparison(
scarlet.interpolation.lanczos, zero_truth, positive_truth
)
# test Lanczos 5
_truth = np.zeros((10,))
_truth[4] = 1
zero_truth = (_truth, np.arange(10) - 4)
positive_truth = (
np.array(
[
5.11187895e-03,
-1.55432491e-02,
3.52955166e-02,
-8.45895745e-02,
9.81954247e-01,
1.06954413e-01,
-4.15882547e-02,
1.85994926e-02,
-6.77652513e-03,
4.34415682e-04,
]
),
np.array([-4, -3, -2, -1, 0, 1, 2, 3, 4, 5]),
)
interpolate_comparison(
scarlet.interpolation.lanczos, zero_truth, positive_truth, a=5
)
def test_separable(self):
result = scarlet.interpolation.get_separable_kernel(0.103, 0.42)
truth = [
[
0.000506097,
-0.002566513,
0.012535221,
0.008810656,
-0.002073032,
0.000332194,
],
[
-0.002085129,
0.010574090,
-0.051645379,
-0.036300092,
0.008540937,
-0.001368644,
],
[
0.028138304,
-0.142694735,
0.696941621,
0.489860766,
-0.115257837,
0.018469518,
],
[
0.002781808,
-0.014107082,
0.068901018,
0.048428598,
-0.011394616,
0.001825933,
],
[
-0.000704935,
0.003574863,
-0.017460144,
-0.012272247,
0.002887499,
-0.000462708,
],
[
0.000035569,
-0.000180379,
0.000880996,
0.000619227,
-0.000145696,
0.000023347,
],
]
assert_almost_equal(result[0], truth)
assert_array_equal(result[1], [-2, -1, 0, 1, 2, 3])
assert_array_equal(result[2], [-2, -1, 0, 1, 2, 3])
result = scarlet.interpolation.get_separable_kernel(
0.103, -0.42, kernel=scarlet.interpolation.bilinear
)
truth = [[0.376740000, 0.520260000], [0.043260000, 0.059740000]]
assert_almost_equal(result[0], truth)
assert_array_equal(result[1], [0, 1])
assert_array_equal(result[2], [-1, 0])
result = scarlet.interpolation.get_separable_kernel(0.103, 0.42, a=5)
truth = [
[
0.0000458,
-0.0001796,
0.0004278,
-0.0009684,
0.0037091,
0.0026576,
-0.0008415,
0.0003764,
-0.0001524,
0.0000312,
],
[
-0.0001391,
0.0005461,
-0.0013007,
0.0029445,
-0.0112779,
-0.0080806,
0.0025588,
-0.0011444,
0.0004633,
-0.0000948,
],
[
0.0003160,
-0.0012401,
0.0029536,
-0.0066863,
0.0256097,
0.0183494,
-0.0058105,
0.0025986,
-0.0010520,
0.0002154,
],
[
-0.0007572,
0.0029722,
-0.0070786,
0.0160245,
-0.0613765,
-0.0439765,
0.0139254,
-0.0062278,
0.0025211,
-0.0005161,
],
[
0.0087903,
-0.0345021,
0.0821710,
-0.1860199,
0.7124863,
0.5104987,
-0.1616529,
0.0722953,
-0.0292664,
0.0059916,
],
[
0.0009574,
-0.0037580,
0.0089501,
-0.0202613,
0.0776040,
0.0556035,
-0.0176072,
0.0078744,
-0.0031877,
0.0006526,
],
[
-0.0003723,
0.0014613,
-0.0034802,
0.0078784,
-0.0301756,
-0.0216209,
0.0068464,
-0.0030619,
0.0012395,
-0.0002538,
],
[
0.0001665,
-0.0006535,
0.0015564,
-0.0035235,
0.0134954,
0.0096695,
-0.0030619,
0.0013694,
-0.0005543,
0.0001135,
],
[
-0.0000607,
0.0002381,
-0.0005671,
0.0012837,
-0.0049169,
-0.0035230,
0.0011156,
-0.0004989,
0.0002020,
-0.0000413,
],
[
0.0000039,
-0.0000153,
0.0000364,
-0.0000823,
0.0003152,
0.0002258,
-0.0000715,
0.0000320,
-0.0000129,
0.0000027,
| |
- mckin/mbkin)))/
(1216215*mbkin) + (64*mckin**2*(-72701 + 99099*np.log(2) +
99099*np.log(1 - mckin/mbkin)))/(405405*mbkin**2) -
(64*mckin**3*(-72701 + 99099*np.log(2) + 99099*np.log(1 - mckin/mbkin)))/
(110565*mbkin**3) + (32*mckin**4*(-72701 + 99099*np.log(2) +
99099*np.log(1 - mckin/mbkin)))/(22113*mbkin**4) -
(32*mckin**5*(-72701 + 99099*np.log(2) + 99099*np.log(1 - mckin/mbkin)))/
(12285*mbkin**5) + (128*mckin**6*(-72701 + 99099*np.log(2) +
99099*np.log(1 - mckin/mbkin)))/(36855*mbkin**6) -
(128*mckin**7*(-72701 + 99099*np.log(2) + 99099*np.log(1 - mckin/mbkin)))/
(36855*mbkin**7) + (32*mckin**8*(-72701 + 99099*np.log(2) +
99099*np.log(1 - mckin/mbkin)))/(12285*mbkin**8) -
(32*mckin**9*(-72701 + 99099*np.log(2) + 99099*np.log(1 - mckin/mbkin)))/
(22113*mbkin**9) + (64*mckin**10*(-72701 + 99099*np.log(2) +
99099*np.log(1 - mckin/mbkin)))/(110565*mbkin**10) -
(64*mckin**11*(-72701 + 99099*np.log(2) + 99099*np.log(1 - mckin/mbkin)))/
(405405*mbkin**11) + (32*mckin**12*(-72701 + 99099*np.log(2) +
99099*np.log(1 - mckin/mbkin)))/(1216215*mbkin**12) -
(32*mckin**13*(-72701 + 99099*np.log(2) + 99099*np.log(1 - mckin/mbkin)))/
(15810795*mbkin**13) + (-1182523 + 1552320*np.log(2) +
1552320*np.log(1 - mckin/mbkin))/7203735 -
(4*mckin*(-1182523 + 1552320*np.log(2) + 1552320*np.log(1 -
mckin/mbkin)))/(2401245*mbkin) +
(2*mckin**2*(-1182523 + 1552320*np.log(2) + 1552320*
np.log(1 - mckin/mbkin)))/(218295*mbkin**2) -
(4*mckin**3*(-1182523 + 1552320*np.log(2) + 1552320*
np.log(1 - mckin/mbkin)))/(130977*mbkin**3) +
(mckin**4*(-1182523 + 1552320*np.log(2) + 1552320*np.log(1 -
mckin/mbkin)))/(14553*mbkin**4) -
(8*mckin**5*(-1182523 + 1552320*np.log(2) + 1552320*
np.log(1 - mckin/mbkin)))/(72765*mbkin**5) +
(4*mckin**6*(-1182523 + 1552320*np.log(2) + 1552320*
np.log(1 - mckin/mbkin)))/(31185*mbkin**6) -
(8*mckin**7*(-1182523 + 1552320*np.log(2) + 1552320*
np.log(1 - mckin/mbkin)))/(72765*mbkin**7) +
(mckin**8*(-1182523 + 1552320*np.log(2) + 1552320*np.log(1 -
mckin/mbkin)))/(14553*mbkin**8) -
(4*mckin**9*(-1182523 + 1552320*np.log(2) + 1552320*
np.log(1 - mckin/mbkin)))/(130977*mbkin**9) +
(2*mckin**10*(-1182523 + 1552320*np.log(2) + 1552320*
np.log(1 - mckin/mbkin)))/(218295*mbkin**10) -
(4*mckin**11*(-1182523 + 1552320*np.log(2) + 1552320*
np.log(1 - mckin/mbkin)))/(2401245*mbkin**11) +
(mckin**12*(-1182523 + 1552320*np.log(2) + 1552320*np.log(1 -
mckin/mbkin)))/(7203735*mbkin**12) +
(8*(-20507983 + 28522494*np.log(2) + 28522494*np.log(1 - mckin/mbkin)))/
1217431215 - (16*mckin*(-20507983 + 28522494*np.log(2) +
28522494*np.log(1 - mckin/mbkin)))/(173918745*mbkin) +
(8*mckin**2*(-20507983 + 28522494*np.log(2) + 28522494*
np.log(1 - mckin/mbkin)))/(13378365*mbkin**2) -
(32*mckin**3*(-20507983 + 28522494*np.log(2) + 28522494*
np.log(1 - mckin/mbkin)))/(13378365*mbkin**3) +
(8*mckin**4*(-20507983 + 28522494*np.log(2) + 28522494*
np.log(1 - mckin/mbkin)))/(1216215*mbkin**4) -
(16*mckin**5*(-20507983 + 28522494*np.log(2) + 28522494*
np.log(1 - mckin/mbkin)))/(1216215*mbkin**5) +
(8*mckin**6*(-20507983 + 28522494*np.log(2) + 28522494*
np.log(1 - mckin/mbkin)))/(405405*mbkin**6) -
(64*mckin**7*(-20507983 + 28522494*np.log(2) + 28522494*
np.log(1 - mckin/mbkin)))/(2837835*mbkin**7) +
(8*mckin**8*(-20507983 + 28522494*np.log(2) + 28522494*
np.log(1 - mckin/mbkin)))/(405405*mbkin**8) -
(16*mckin**9*(-20507983 + 28522494*np.log(2) + 28522494*
np.log(1 - mckin/mbkin)))/(1216215*mbkin**9) +
(8*mckin**10*(-20507983 + 28522494*np.log(2) + 28522494*
np.log(1 - mckin/mbkin)))/(1216215*mbkin**10) -
(32*mckin**11*(-20507983 + 28522494*np.log(2) + 28522494*
np.log(1 - mckin/mbkin)))/(13378365*mbkin**11) +
(8*mckin**12*(-20507983 + 28522494*np.log(2) + 28522494*
np.log(1 - mckin/mbkin)))/(13378365*mbkin**12) -
(16*mckin**13*(-20507983 + 28522494*np.log(2) + 28522494*
np.log(1 - mckin/mbkin)))/(173918745*mbkin**13) +
(8*mckin**14*(-20507983 + 28522494*np.log(2) + 28522494*
np.log(1 - mckin/mbkin)))/(1217431215*mbkin**14) +
(-33813661 + 47893560*np.log(2) + 47893560*np.log(1 - mckin/mbkin))/
289864575 - (16*mckin*(-33813661 + 47893560*np.log(2) +
47893560*np.log(1 - mckin/mbkin)))/(289864575*mbkin) +
(8*mckin**2*(-33813661 + 47893560*np.log(2) + 47893560*
np.log(1 - mckin/mbkin)))/(19324305*mbkin**2) -
(16*mckin**3*(-33813661 + 47893560*np.log(2) + 47893560*
np.log(1 - mckin/mbkin)))/(8281845*mbkin**3) +
(4*mckin**4*(-33813661 + 47893560*np.log(2) + 47893560*
np.log(1 - mckin/mbkin)))/(637065*mbkin**4) -
(16*mckin**5*(-33813661 + 47893560*np.log(2) + 47893560*
np.log(1 - mckin/mbkin)))/(1061775*mbkin**5) +
(8*mckin**6*(-33813661 + 47893560*np.log(2) + 47893560*
np.log(1 - mckin/mbkin)))/(289575*mbkin**6) -
(16*mckin**7*(-33813661 + 47893560*np.log(2) + 47893560*
np.log(1 - mckin/mbkin)))/(405405*mbkin**7) +
(2*mckin**8*(-33813661 + 47893560*np.log(2) + 47893560*
np.log(1 - mckin/mbkin)))/(45045*mbkin**8) -
(16*mckin**9*(-33813661 + 47893560*np.log(2) + 47893560*
np.log(1 - mckin/mbkin)))/(405405*mbkin**9) +
(8*mckin**10*(-33813661 + 47893560*np.log(2) + 47893560*
np.log(1 - mckin/mbkin)))/(289575*mbkin**10) -
(16*mckin**11*(-33813661 + 47893560*np.log(2) + 47893560*
np.log(1 - mckin/mbkin)))/(1061775*mbkin**11) +
(4*mckin**12*(-33813661 + 47893560*np.log(2) + 47893560*
np.log(1 - mckin/mbkin)))/(637065*mbkin**12) -
(16*mckin**13*(-33813661 + 47893560*np.log(2) + 47893560*
np.log(1 - mckin/mbkin)))/(8281845*mbkin**13) +
(8*mckin**14*(-33813661 + 47893560*np.log(2) + 47893560*
np.log(1 - mckin/mbkin)))/(19324305*mbkin**14) -
(16*mckin**15*(-33813661 + 47893560*np.log(2) + 47893560*
np.log(1 - mckin/mbkin)))/(289864575*mbkin**15) +
(mckin**16*(-33813661 + 47893560*np.log(2) + 47893560*
np.log(1 - mckin/mbkin)))/(289864575*mbkin**16) +
(-253404061 + 356516160*np.log(2) + 356516160*np.log(1 - mckin/mbkin))/
2029052025 - (mckin*(-253404061 + 356516160*np.log(2) +
356516160*np.log(1 - mckin/mbkin)))/(135270135*mbkin) +
(mckin**2*(-253404061 + 356516160*np.log(2) + 356516160*
np.log(1 - mckin/mbkin)))/(19324305*mbkin**2) -
(mckin**3*(-253404061 + 356516160*np.log(2) + 356516160*
np.log(1 - mckin/mbkin)))/(4459455*mbkin**3) +
(mckin**4*(-253404061 + 356516160*np.log(2) + 356516160*
np.log(1 - mckin/mbkin)))/(1486485*mbkin**4) -
(mckin**5*(-253404061 + 356516160*np.log(2) + 356516160*
np.log(1 - mckin/mbkin)))/(675675*mbkin**5) +
(mckin**6*(-253404061 + 356516160*np.log(2) + 356516160*
np.log(1 - mckin/mbkin)))/(405405*mbkin**6) -
(mckin**7*(-253404061 + 356516160*np.log(2) + 356516160*
np.log(1 - mckin/mbkin)))/(315315*mbkin**7) +
(mckin**8*(-253404061 + 356516160*np.log(2) + 356516160*
np.log(1 - mckin/mbkin)))/(315315*mbkin**8) -
(mckin**9*(-253404061 + 356516160*np.log(2) + 356516160*
np.log(1 - mckin/mbkin)))/(405405*mbkin**9) +
(mckin**10*(-253404061 + 356516160*np.log(2) + 356516160*
np.log(1 - mckin/mbkin)))/(675675*mbkin**10) -
(mckin**11*(-253404061 + 356516160*np.log(2) + 356516160*
np.log(1 - mckin/mbkin)))/(1486485*mbkin**11) +
(mckin**12*(-253404061 + 356516160*np.log(2) + 356516160*
np.log(1 - mckin/mbkin)))/(4459455*mbkin**12) -
(mckin**13*(-253404061 + 356516160*np.log(2) + 356516160*
np.log(1 - mckin/mbkin)))/(19324305*mbkin**13) +
(mckin**14*(-253404061 + 356516160*np.log(2) + 356516160*
np.log(1 - mckin/mbkin)))/(135270135*mbkin**14) -
(mckin**15*(-253404061 + 356516160*np.log(2) + 356516160*
np.log(1 - mckin/mbkin)))/(2029052025*mbkin**15) +
(-36724834687 + 52219630320*np.log(2) + 52219630320*
np.log(1 - mckin/mbkin))/335083448700 -
(mckin*(-36724834687 + 52219630320*np.log(2) + 52219630320*
np.log(1 - mckin/mbkin)))/(19710791100*mbkin) +
(2*mckin**2*(-36724834687 + 52219630320*np.log(2) + 52219630320*
np.log(1 - mckin/mbkin)))/(4927697775*mbkin**2) -
(2*mckin**3*(-36724834687 + 52219630320*np.log(2) + 52219630320*
np.log(1 - mckin/mbkin)))/(985539555*mbkin**3) +
(mckin**4*(-36724834687 + 52219630320*np.log(2) + 52219630320*
np.log(1 - mckin/mbkin)))/(140791365*mbkin**4) -
(mckin**5*(-36724834687 + 52219630320*np.log(2) + 52219630320*
np.log(1 - mckin/mbkin)))/(54150525*mbkin**5) +
(2*mckin**6*(-36724834687 + 52219630320*np.log(2) + 52219630320*
np.log(1 - mckin/mbkin)))/(54150525*mbkin**6) -
(2*mckin**7*(-36724834687 + 52219630320*np.log(2) + 52219630320*
np.log(1 - mckin/mbkin)))/(34459425*mbkin**7) +
(mckin**8*(-36724834687 + 52219630320*np.log(2) + 52219630320*
np.log(1 - mckin/mbkin)))/(13783770*mbkin**8) -
(mckin**9*(-36724834687 + 52219630320*np.log(2) + 52219630320*
np.log(1 - mckin/mbkin)))/(13783770*mbkin**9) +
(2*mckin**10*(-36724834687 + 52219630320*np.log(2) +
52219630320*np.log(1 - mckin/mbkin)))/(34459425*mbkin**10) -
(2*mckin**11*(-36724834687 + 52219630320*np.log(2) +
52219630320*np.log(1 - mckin/mbkin)))/(54150525*mbkin**11) +
(mckin**12*(-36724834687 + 52219630320*np.log(2) + 52219630320*
np.log(1 - mckin/mbkin)))/(54150525*mbkin**12) -
(mckin**13*(-36724834687 + 52219630320*np.log(2) + 52219630320*
np.log(1 - mckin/mbkin)))/(140791365*mbkin**13) +
(2*mckin**14*(-36724834687 + 52219630320*np.log(2) +
52219630320*np.log(1 - mckin/mbkin)))/(985539555*mbkin**14) -
(2*mckin**15*(-36724834687 + 52219630320*np.log(2) +
52219630320*np.log(1 - mckin/mbkin)))/(4927697775*mbkin**15) +
(mckin**16*(-36724834687 + 52219630320*np.log(2) + 52219630320*
np.log(1 - mckin/mbkin)))/(19710791100*mbkin**16) -
(mckin**17*(-36724834687 + 52219630320*np.log(2) + 52219630320*
np.log(1 - mckin/mbkin)))/(335083448700*mbkin**17) +
(-485272020137 + 691512341520*np.log(2) + 691512341520*
np.log(1 - mckin/mbkin))/4691168281800 -
(mckin*(-485272020137 + 691512341520*np.log(2) + 691512341520*
np.log(1 - mckin/mbkin)))/(260620460100*mbkin) +
(mckin**2*(-485272020137 + 691512341520*np.log(2) + 691512341520*
np.log(1 - mckin/mbkin)))/(30661230600*mbkin**2) -
(2*mckin**3*(-485272020137 + 691512341520*np.log(2) +
691512341520*np.log(1 - mckin/mbkin)))/(11497961475*mbkin**3) +
(mckin**4*(-485272020137 + 691512341520*np.log(2) + 691512341520*
np.log(1 - mckin/mbkin)))/(1533061530*mbkin**4) -
(mckin**5*(-485272020137 + 691512341520*np.log(2) + 691512341520*
np.log(1 - mckin/mbkin)))/(547521975*mbkin**5) +
(mckin**6*(-485272020137 + 691512341520*np.log(2) + 691512341520*
np.log(1 - mckin/mbkin)))/(252702450*mbkin**6) -
(2*mckin**7*(-485272020137 + 691512341520*np.log(2) +
691512341520*np.log(1 - mckin/mbkin)))/(294819525*mbkin**7) +
(mckin**8*(-485272020137 + 691512341520*np.log(2) + 691512341520*
np.log(1 - mckin/mbkin)))/(107207100*mbkin**8) -
(mckin**9*(-485272020137 + 691512341520*np.log(2) + 691512341520*
np.log(1 - mckin/mbkin)))/(96486390*mbkin**9) +
(mckin**10*(-485272020137 + 691512341520*np.log(2) + 691512341520*
np.log(1 - mckin/mbkin)))/(107207100*mbkin**10) -
(2*mckin**11*(-485272020137 + 691512341520*np.log(2) +
691512341520*np.log(1 - mckin/mbkin)))/(294819525*mbkin**11) +
(mckin**12*(-485272020137 + 691512341520*np.log(2) + 691512341520*
np.log(1 - mckin/mbkin)))/(252702450*mbkin**12) -
(mckin**13*(-485272020137 + 691512341520*np.log(2) + 691512341520*
np.log(1 - mckin/mbkin)))/(547521975*mbkin**13) +
(mckin**14*(-485272020137 + 691512341520*np.log(2) + 691512341520*
np.log(1 - mckin/mbkin)))/(1533061530*mbkin**14) -
(2*mckin**15*(-485272020137 + 691512341520*np.log(2) +
691512341520*np.log(1 - mckin/mbkin)))/(11497961475*mbkin**15) +
(mckin**16*(-485272020137 + 691512341520*np.log(2) + 691512341520*
np.log(1 - mckin/mbkin)))/(30661230600*mbkin**16) -
(mckin**17*(-485272020137 + 691512341520*np.log(2) + 691512341520*
np.log(1 - mckin/mbkin)))/(260620460100*mbkin**17) +
(mckin**18*(-485272020137 + 691512341520*np.log(2) + 691512341520*
np.log(1 - mckin/mbkin)))/(4691168281800*mbkin**18) +
(-4916068298621 + 7016274641376*np.log(2) + 7016274641376*
np.log(1 - mckin/mbkin))/52784781809760 -
(mckin*(-4916068298621 + 7016274641376*np.log(2) + 7016274641376*
np.log(1 - mckin/mbkin)))/(2639239090488*mbkin) +
(mckin**2*(-4916068298621 + 7016274641376*np.log(2) +
7016274641376*np.log(1 - mckin/mbkin)))/(277814641104*mbkin**2) -
(mckin**3*(-4916068298621 + 7016274641376*np.log(2) +
7016274641376*np.log(1 - mckin/mbkin)))/(46302440184*mbkin**3) +
(mckin**4*(-4916068298621 + 7016274641376*np.log(2) +
7016274641376*np.log(1 - mckin/mbkin)))/(10894691808*mbkin**4) -
(mckin**5*(-4916068298621 + 7016274641376*np.log(2) +
7016274641376*np.log(1 - mckin/mbkin)))/(3404591190*mbkin**5) +
(mckin**6*(-4916068298621 + 7016274641376*np.log(2) +
7016274641376*np.log(1 - mckin/mbkin)))/(1361836476*mbkin**6) -
(mckin**7*(-4916068298621 + 7016274641376*np.log(2) +
7016274641376*np.log(1 - mckin/mbkin)))/(680918238*mbkin**7) +
(mckin**8*(-4916068298621 + 7016274641376*np.log(2) +
7016274641376*np.log(1 - mckin/mbkin)))/(419026608*mbkin**8) -
(mckin**9*(-4916068298621 + 7016274641376*np.log(2) +
7016274641376*np.log(1 - mckin/mbkin)))/(314269956*mbkin**9) +
(mckin**10*(-4916068298621 + 7016274641376*np.log(2) +
7016274641376*np.log(1 - mckin/mbkin)))/(285699960*mbkin**10) -
(mckin**11*(-4916068298621 + 7016274641376*np.log(2) +
7016274641376*np.log(1 - mckin/mbkin)))/(314269956*mbkin**11) +
(mckin**12*(-4916068298621 + 7016274641376*np.log(2) +
7016274641376*np.log(1 - mckin/mbkin)))/(419026608*mbkin**12) -
(mckin**13*(-4916068298621 + 7016274641376*np.log(2) +
7016274641376*np.log(1 - mckin/mbkin)))/(680918238*mbkin**13) +
(mckin**14*(-4916068298621 + 7016274641376*np.log(2) +
7016274641376*np.log(1 - mckin/mbkin)))/(1361836476*mbkin**14) -
(mckin**15*(-4916068298621 + 7016274641376*np.log(2) +
7016274641376*np.log(1 - mckin/mbkin)))/(3404591190*mbkin**15) +
(mckin**16*(-4916068298621 + 7016274641376*np.log(2) +
7016274641376*np.log(1 - mckin/mbkin)))/(10894691808*mbkin**16) -
(mckin**17*(-4916068298621 + 7016274641376*np.log(2) +
7016274641376*np.log(1 - mckin/mbkin)))/(46302440184*mbkin**17) +
(mckin**18*(-4916068298621 + 7016274641376*np.log(2) +
7016274641376*np.log(1 - mckin/mbkin)))/(277814641104*mbkin**18) -
(mckin**19*(-4916068298621 + 7016274641376*np.log(2) +
7016274641376*np.log(1 - mckin/mbkin)))/(2639239090488*mbkin**19) +
(mckin**20*(-4916068298621 + 7016274641376*np.log(2) +
7016274641376*np.log(1 - mckin/mbkin)))/(52784781809760*mbkin**20) +
(-995781239706241 + 1420555140164160*np.log(2) + 1420555140164160*
np.log(1 - mckin/mbkin))/10161070498378800 -
(mckin*(-995781239706241 + 1420555140164160*np.log(2) +
1420555140164160*np.log(1 - mckin/mbkin)))/(534793184125200*
mbkin) + (mckin**2*(-995781239706241 + 1420555140164160*np.log(2) +
1420555140164160*np.log(1 - mckin/mbkin)))/(59421464902800*
mbkin**2) - (mckin**3*(-995781239706241 + 1420555140164160*np.log(2) +
1420555140164160*np.log(1 - mckin/mbkin)))/(10486140865200*
mbkin**3) + (mckin**4*(-995781239706241 + 1420555140164160*np.log(2) +
1420555140164160*np.log(1 - mckin/mbkin)))/(2621535216300*
mbkin**4) - (mckin**5*(-995781239706241 + 1420555140164160*np.log(2) +
1420555140164160*np.log(1 - mckin/mbkin)))/(873845072100*mbkin**5) +
(mckin**6*(-995781239706241 + 1420555140164160*np.log(2) +
1420555140164160*np.log(1 - mckin/mbkin)))/(374505030900*mbkin**6) -
(mckin**7*(-995781239706241 + 1420555140164160*np.log(2) +
1420555140164160*np.log(1 - mckin/mbkin)))/(201656555100*mbkin**7) +
(mckin**8*(-995781239706241 + 1420555140164160*np.log(2) +
1420555140164160*np.log(1 - mckin/mbkin)))/(134437703400*mbkin**8) -
(mckin**9*(-995781239706241 + 1420555140164160*np.log(2) +
1420555140164160*np.log(1 - mckin/mbkin)))/(109994484600*mbkin**9) +
(mckin**10*(-995781239706241 + 1420555140164160*np.log(2) +
1420555140164160*np.log(1 - mckin/mbkin)))/(109994484600*
mbkin**10) - (mckin**11*(-995781239706241 + 1420555140164160*
np.log(2) + 1420555140164160*np.log(1 - mckin/mbkin)))/
(134437703400*mbkin**11) + (mckin**12*(-995781239706241 +
1420555140164160*np.log(2) + 1420555140164160*np.log(1 -
mckin/mbkin)))/(201656555100*mbkin**12) -
(mckin**13*(-995781239706241 + 1420555140164160*np.log(2) +
1420555140164160*np.log(1 - mckin/mbkin)))/(374505030900*
mbkin**13) + (mckin**14*(-995781239706241 + 1420555140164160*
np.log(2) + 1420555140164160*np.log(1 - mckin/mbkin)))/
(873845072100*mbkin**14) - (mckin**15*(-995781239706241 +
1420555140164160*np.log(2) + 1420555140164160*np.log(1 -
mckin/mbkin)))/(2621535216300*mbkin**15) | |
from abc import ABC, abstractmethod
from enum import Enum
from functools import partial
# from math import isinf
from typing import Union, Optional, Any
from typing import Callable, Tuple, Dict, List, Set, Type # noqa: F401
from ..builtin_values import Bool, ops_symbols
from ..abstract_value import AbstractValue
from ...abstract_domain import AbstractDomain
from ...errors import TypeCheckLogger
from .objects_ids import new_id
from ...miscelaneous import Pos
__all__ = ['PythonValue', 'PT', 'AbstractMutVal', 'Args']
class PT(Enum):
"""Python types supported in pytropos"""
# Undefined = 0
Top = 1
# Bottom = 2
InConstruction = 11
class PythonValue(AbstractDomain):
def __init__(self,
val: Union[AbstractValue, PT] = PT.Top
) -> None:
self.val = val
__top = None # type: PythonValue
@classmethod
def top(cls) -> 'PythonValue':
"""Returns the Top element from the lattice: Any?"""
if cls.__top is None:
cls.__top = PythonValue(PT.Top)
return cls.__top
def is_top(self) -> 'bool':
"""Returns True if this object is the top of the lattice, ie, if Any?"""
return self.val is PT.Top
def join(self, other: 'PythonValue') -> 'PythonValue':
if self.val is PT.Top or other.val is PT.Top:
return PythonValue.top()
assert isinstance(self.val, AbstractValue)
assert isinstance(other.val, AbstractValue)
if type(self.val) is type(other.val): # noqa: E721
return PythonValue(self.val.join(other.val))
return PythonValue.top()
def widen_op(self, other: 'PythonValue') -> 'Tuple[PythonValue, bool]':
# eg: PythonValue(Int(5)) == PythonValue(Int(5))
if self == other:
return self, True
# eg: PythonValue(PT.Top) and PythonValue(Int(5))
if self.val is PT.Top or other.val is PT.Top:
return PythonValue.top(), False
# eg: PythonValue(Float(3)) and PythonValue(Int(5))
if type(self.val) is not type(other.val): # noqa: E721
return PythonValue.top(), False
assert isinstance(self.val, AbstractValue)
assert isinstance(other.val, AbstractValue)
# eg: PythonValue(List([3])) and PythonValue(List([3,5]))
if self.__op_in_abstractvalue_overwritten(self.val.widen_op):
new_val, fix = self.val.widen_op(other.val)
# eg: PythonValue(Int(3)) and PythonValue(Int(5))
else:
new_val = self.val.join(other.val)
# TODO(helq): This is not how a widening operator is defined, actually we
# compare with <= not == !!!
fix = new_val == self.val
return PythonValue(new_val), fix
def is_mut(self) -> 'bool':
"""Checks if the object is mutable"""
return isinstance(self.val, AbstractMutVal)
@property
def mut_id(self) -> 'int':
"""Returns id of object if it is mutable"""
assert isinstance(self.val, AbstractMutVal)
return self.val.mut_id
def copy_mut(self,
mut_heap: 'Dict[int, PythonValue]'
) -> 'PythonValue':
"""Copies a mutable object recursively"""
assert isinstance(self.val, AbstractMutVal)
if self.is_top():
return self
if self.mut_id in mut_heap:
return mut_heap[self.mut_id]
else:
new_obj = mut_heap[self.mut_id] = PythonValue(PT.InConstruction)
new_obj.val = self.val.copy_mut(mut_heap)
return new_obj
def convert_into_top(self, converted: 'Set[int]') -> None:
"""Makes the underlying AbstractMutVal Top"""
assert isinstance(self.val, AbstractMutVal)
self.val.convert_into_top(converted)
self.val = self.val.top()
def new_vals_to_top(
self,
mut_heap: 'Dict[Tuple[str, int], Tuple[int, int, PythonValue]]',
side: str
) -> None:
"""Makes a mutable object Top"""
assert isinstance(self.val, AbstractMutVal)
self.val.new_vals_to_top(mut_heap, side)
def join_mut(self,
other: 'PythonValue',
mut_heap: 'Dict[Tuple[str, int], Tuple[int, int, PythonValue]]'
) -> 'PythonValue':
"""Joining two mutable PythonValues"""
assert isinstance(self.val, AbstractMutVal)
assert isinstance(other.val, AbstractMutVal)
left_iden = ('left', self.mut_id)
right_iden = ('right', other.mut_id)
# Checking if we have encounter already this value
if (left_iden in mut_heap) or (right_iden in mut_heap):
# self and other have already been joined
if (left_iden in mut_heap) and mut_heap[left_iden][1] == other.mut_id:
# assert right_iden in mut_heap
assert mut_heap[right_iden][0] == self.mut_id
assert mut_heap[right_iden][2] is mut_heap[left_iden][2]
return mut_heap[left_iden][2]
# left has been already been joined with other object
else:
self.new_vals_to_top(mut_heap, 'left')
other.new_vals_to_top(mut_heap, 'right')
return PythonValue.top()
if type(self.val) is not type(other.val): # noqa: E721
self.new_vals_to_top(mut_heap, 'left')
other.new_vals_to_top(mut_heap, 'right')
return PythonValue.top()
# If the value is top the result its top
if self.val.is_top():
other.new_vals_to_top(mut_heap, 'right')
return PythonValue(self.val.top())
if other.val.is_top():
self.new_vals_to_top(mut_heap, 'right')
return PythonValue(self.val.top())
new_obj = PythonValue(PT.InConstruction)
mut_heap[left_iden] = mut_heap[right_iden] = \
(self.mut_id, other.mut_id, new_obj)
new_val = self.val.join_mut(other.val, mut_heap)
if new_obj.val == PT.InConstruction:
new_obj.val = new_val
# Notice that we don't change the value of the Object if it is not InConstruction.
# If a PythonValue is not anymore in construction it means that it has been made
# "top" by some call before it
return new_obj
# TODO(helq): This equality function is faulty (because of the underlying mutable
# variables). An equality function should be defined in Store, not here, to compare
# two different Stores. Similar to how `join_mut` is defined
def __eq__(self, other: Any) -> 'bool':
if self is other:
return True
if not isinstance(other, PythonValue):
return False
return self.val == other.val
__repr_visited = set() # type: Set[int]
def __repr__(self) -> str:
if self.val is PT.Top:
return "Top"
elif self.val is PT.InConstruction:
return "InConstruction"
else: # self.type is PT.Top
assert not isinstance(self.val, PT)
if self.is_mut():
if self.mut_id in self.__repr_visited:
return 'Ref'
else:
self.__repr_visited.add(self.mut_id)
r = self.val.abstract_repr
self.__repr_visited.remove(self.mut_id)
return r
else:
return self.val.abstract_repr
# TODO(helq): Improve by checking if the given parameters correspond to the arguments
# the function receives, if not return Top
def call(self,
store: Any,
args: 'Args',
pos: Optional[Pos] = None) -> 'PythonValue':
if self.is_top():
return PythonValue.top()
# This assert is always true, it's just to keep Mypy from crying
assert isinstance(self.val, AbstractValue), \
f"The type is {type(self.val)} but should have been an AbstractValue"
call_method = self.val.fun_call
if self.__op_in_abstractvalue_overwritten(call_method):
newval = call_method(store, args, pos) # type: PythonValue
assert isinstance(newval, PythonValue), "A function call didn't return a PythonValue"
else:
TypeCheckLogger().new_warning(
"E016",
f"TypeError: '{self.val.type_name}' object is not callable",
pos)
newval = PythonValue.top()
return newval
@property
def attr(self) -> 'AttrsContainer':
if self.is_top():
return AttrsTopContainer()
# This assert is always true, it's just to keep Mypy from crying
assert isinstance(self.val, AbstractValue), \
f"The type is {type(self.val)} but should have been an AbstractValue"
call_method = self.val.get_attrs
if self.__op_in_abstractvalue_overwritten(call_method):
return call_method() # type: ignore
else:
return AttrsTopContainer()
def subs(self, pos: 'Optional[Pos]' = None) -> 'SubscriptsContainer':
if self.is_top():
return SubscriptsTopContainer()
# This assert is always true, it's just to keep Mypy from crying
assert isinstance(self.val, AbstractValue), \
f"The type is {type(self.val)} but should have been an AbstractValue"
call_method = self.val.get_subscripts
if self.__op_in_abstractvalue_overwritten(call_method):
return call_method(pos) # type: ignore
else:
TypeCheckLogger().new_warning(
"E015",
f"TypeError: '{self.val.type_name}' object is not subscriptable",
pos)
return SubscriptsTopContainer()
def __getattr__(self, name: str) -> Any:
# Checking if name is add, mul, truediv
if name in ops_symbols.keys():
return partial(self.operate, name)
raise AttributeError(f"PythonValue has no attribute called '{name}'")
@staticmethod
def __op_in_abstractvalue_overwritten(method: Any) -> 'bool':
"""Checks whether the method (defined in AbstractValue) was overwriten or not"""
notoverwritten = hasattr(method, '__qualname__') and \
method.__qualname__.split('.')[0] == "AbstractValue"
return not notoverwritten # ie, True if method overwritten
def operate(self, op: str, other: 'PythonValue', pos: Optional[Pos] = None) -> 'PythonValue':
op_sym = ops_symbols[op]
if self.val is PT.Top or other.val is PT.Top:
return PythonValue.top()
# This assert is always true, it's just to keep Mypy from crying
assert isinstance(self.val, AbstractValue), \
f"Left type is {type(self.val)} but should have been an AbstractValue"
assert isinstance(other.val, AbstractValue), \
f"Left type is {type(other.val)} but should have been an AbstractValue"
# If both values have the same type use val.op_add(otherval)
if type(self.val) is type(other.val): # noqa: E721
# Checking if op_add has been overwritten by the class that has been called
# If it hasn't, the operation result is Top
op_method = getattr(self.val, f'op_{op}')
if self.__op_in_abstractvalue_overwritten(op_method):
newval = op_method(other.val, pos)
else:
TypeCheckLogger().new_warning(
"E009",
f"TypeError: unsupported operand type(s) for {op_sym}: "
f"'{self.val.type_name}' and '{other.val.type_name}'",
pos)
newval = PT.Top
# If values have different type use val.op_add_OtherType(otherval)
# or otherval.op_add_Type(val)
else:
leftOpName = "op_r{op}_{class_name}".format(op=op, class_name=type(self.val).__name__)
rightOpName = "op_{op}_{class_name}".format(op=op, class_name=type(other.val).__name__)
try:
newval = getattr(self.val, rightOpName)(other.val, pos)
except AttributeError:
try:
newval = getattr(other.val, leftOpName)(self.val, pos)
except AttributeError:
TypeCheckLogger().new_warning(
"E009",
f"TypeError: unsupported operand type(s) for {op_sym}: "
f"'{self.val.type_name}' and '{other.val.type_name}'",
pos)
newval = PT.Top
if newval is None:
return PythonValue.top()
return PythonValue(newval)
def bool(self, pos: Optional[Pos] = None) -> 'PythonValue':
"""method documentation"""
if isinstance(self.val, Bool):
return self
if self.val is PT.Top:
return PythonValue(Bool.top())
assert isinstance(self.val, AbstractValue)
op_method = self.val.op_bool
if self.__op_in_abstractvalue_overwritten(op_method):
bool_val = op_method(pos)
# Checking bool_val is a boolean!
if not isinstance(bool_val, Bool):
TypeCheckLogger().new_warning(
"E010",
f"TypeError: __bool__ should return bool, returned {bool_val.val.type_name}",
pos)
return PythonValue(Bool.top())
return PythonValue(bool_val)
# TODO(helq): If the operation was not defined more stuff is to be done, like
# checking __len__.
# More info: https://docs.python.org/3/reference/datamodel.html#object.__bool__
return PythonValue(Bool.top())
def type(self) -> str:
"""Returns the type of the value hold self.val"""
if self.val is PT.Top:
return "Top"
elif self.val is PT.InConstruction:
return "InConstruction"
else: # self.type is PT.Top
assert not isinstance(self.val, PT)
return str(self.val.type_name)
def __lt__(self, other: 'PythonValue') -> '__builtins__.bool':
if | |
description_btn]
box244 = Box(children=row, layout=box_layout)
name_btn = Button(description='activated_speed', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float229 = FloatText(value='0.4', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='micron/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='speed after activation', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float229, units_btn, description_btn]
box245 = Box(children=row, layout=box_layout)
name_btn = Button(description='activated_cytokine_secretion_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float230 = FloatText(value='1', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='rate of secreting pro-inflamatory cytokine after activation', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float230, units_btn, description_btn]
box246 = Box(children=row, layout=box_layout)
name_btn = Button(description='activated_immune_cell', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float231 = FloatText(value='0.0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='used internally to track activation state', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float231, units_btn, description_btn]
box247 = Box(children=row, layout=box_layout)
name_btn = Button(description='antiinflammatory_cytokine_secretion_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float232 = FloatText(value='15', step='1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='secretion rate of anti-inflammatory from infected epithelium cell', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float232, units_btn, description_btn]
box248 = Box(children=row, layout=box_layout)
name_btn = Button(description='collagen_secretion_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float233 = FloatText(value='1', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='secretion rate of collagen from fibroblast', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float233, units_btn, description_btn]
box249 = Box(children=row, layout=box_layout)
self.cell_def_vbox1 = VBox([
div_row9, box125, box126, box127, box128, div_row10, death_model1,box129, box130, box131, box132, box133, box134, box135, death_model2,box136, box137, box138, box139, box140, box141, box142, div_row11, box143, box144, box145, box146, box147, box148, box149, box150, box151, div_row12, box152, box153, box154, box155, box156, div_row13, box157,box158,box159,self.bool7,self.bool8,chemotaxis_btn,self.bool9,box160,box161,div_row14, box162,box163,box164,box165,box166,box167,box168,box169,box170,box171,box172,box173,box174,box175,box176,box177,box178,box179,div_row15, div_row16, box180,
box181,
box182,
box183,
box184,
box185,
box186,
box187,
box188,
box189,
box190,
box191,
box192,
box193,
box194,
box195,
box196,
box197,
box198,
box199,
box200,
box201,
box202,
box203,
box204,
box205,
box206,
box207,
box208,
box209,
box210,
box211,
box212,
box213,
box214,
box215,
box216,
box217,
box218,
box219,
box220,
box221,
box222,
box223,
box224,
box225,
box226,
box227,
box228,
box229,
box230,
box231,
box232,
box233,
box234,
box235,
box236,
box237,
box238,
box239,
box240,
box241,
box242,
box243,
box244,
box245,
box246,
box247,
box248,
box249,
])
# ------------------------------------------
self.cell_def_vboxes.append(self.cell_def_vbox1)
# >>>>>>>>>>>>>>>>> <cell_definition> = CD8 Tcell
# -------------------------
div_row17 = Button(description='phenotype:cycle (model: flow_cytometry_separated_cycle_model; code=6)', disabled=True, layout=divider_button_layout)
div_row17.style.button_color = 'orange'
name_btn = Button(description='Phase 0 -> Phase 1 transition rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float234 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float234, units_btn, ]
box250 = Box(children=row, layout=box_layout)
name_btn = Button(description='Phase 1 -> Phase 2 transition rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float235 = FloatText(value='0.00208333', step='0.0001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float235, units_btn, ]
box251 = Box(children=row, layout=box_layout)
name_btn = Button(description='Phase 2 -> Phase 3 transition rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float236 = FloatText(value='0.00416667', step='0.001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float236, units_btn, ]
box252 = Box(children=row, layout=box_layout)
name_btn = Button(description='Phase 3 -> Phase 0 transition rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float237 = FloatText(value='0.0166667', step='0.001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float237, units_btn, ]
box253 = Box(children=row, layout=box_layout)
# -------------------------
div_row18 = Button(description='phenotype:death', disabled=True, layout=divider_button_layout)
div_row18.style.button_color = 'orange'
death_model1 = Button(description='model: apoptosis', disabled=True, layout={'width':'30%'})
death_model1.style.button_color = '#ffde6b'
name_btn = Button(description='death rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float238 = FloatText(value='2.8e-4', step='1e-05', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float238, units_btn, ]
box254 = Box(children=row, layout=box_layout)
name_btn = Button(description='unlysed_fluid_change_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float239 = FloatText(value='0.05', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float239, units_btn, ]
box255 = Box(children=row, layout=box_layout)
name_btn = Button(description='lysed_fluid_change_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float240 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float240, units_btn, ]
box256 = Box(children=row, layout=box_layout)
name_btn = Button(description='cytoplasmic_biomass_change_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float241 = FloatText(value='1.66667e-02', step='0.001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float241, units_btn, ]
box257 = Box(children=row, layout=box_layout)
name_btn = Button(description='nuclear_biomass_change_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float242 = FloatText(value='5.83333e-03', step='0.001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float242, units_btn, ]
box258 = Box(children=row, layout=box_layout)
name_btn = Button(description='calcification_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float243 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float243, units_btn, ]
box259 = Box(children=row, layout=box_layout)
name_btn = Button(description='relative_rupture_volume', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float244 = FloatText(value='2.0', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float244, units_btn, ]
box260 = Box(children=row, layout=box_layout)
death_model2 = Button(description='model: necrosis', disabled=True, layout={'width':'30%'})
death_model2.style.button_color = '#ffde6b'
name_btn = Button(description='death rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float245 = FloatText(value='0.0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float245, units_btn, ]
box261 = Box(children=row, layout=box_layout)
name_btn = Button(description='unlysed_fluid_change_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float246 = FloatText(value='0.05', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float246, units_btn, ]
box262 = Box(children=row, layout=box_layout)
name_btn = Button(description='lysed_fluid_change_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float247 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float247, units_btn, ]
box263 = Box(children=row, layout=box_layout)
name_btn = Button(description='cytoplasmic_biomass_change_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float248 = FloatText(value='1.66667e-02', step='0.001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float248, units_btn, ]
box264 = Box(children=row, layout=box_layout)
name_btn = Button(description='nuclear_biomass_change_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float249 = FloatText(value='5.83333e-03', step='0.001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float249, units_btn, ]
box265 = Box(children=row, layout=box_layout)
name_btn = Button(description='calcification_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float250 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float250, units_btn, ]
box266 = Box(children=row, layout=box_layout)
name_btn = Button(description='relative_rupture_volume', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float251 = FloatText(value='2.0', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float251, units_btn, ]
box267 = Box(children=row, layout=box_layout)
# -------------------------
div_row19 = Button(description='phenotype:volume', disabled=True, layout=divider_button_layout)
div_row19.style.button_color = 'orange'
name_btn = Button(description='total', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float252 = FloatText(value='478', step='10', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float252, units_btn, ]
box268 = Box(children=row, layout=box_layout)
name_btn = Button(description='fluid_fraction', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float253 = FloatText(value='0.75', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float253, units_btn, ]
box269 = Box(children=row, layout=box_layout)
name_btn = Button(description='nuclear', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float254 = FloatText(value='47.8', step='1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float254, units_btn, ]
box270 = Box(children=row, layout=box_layout)
name_btn = Button(description='fluid_change_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float255 = FloatText(value='0.05', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float255, units_btn, ]
box271 = Box(children=row, layout=box_layout)
name_btn = Button(description='cytoplasmic_biomass_change_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float256 = FloatText(value='0.0045', step='0.001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float256, units_btn, ]
box272 = Box(children=row, layout=box_layout)
name_btn = Button(description='nuclear_biomass_change_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float257 = FloatText(value='0.0055', step='0.001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float257, units_btn, ]
box273 = Box(children=row, layout=box_layout)
name_btn = Button(description='calcified_fraction', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float258 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float258, units_btn, ]
box274 = Box(children=row, layout=box_layout)
name_btn = Button(description='calcification_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float259 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float259, units_btn, ]
box275 = Box(children=row, layout=box_layout)
name_btn = Button(description='relative_rupture_volume', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float260 | |
+ Ad[3, 2] * tp2_2 + Ad[3, 3] * tp2_3
)
values[n] = (
Phi0_0
* (
Phi1_0
* (
Phi2_0 * (coefs[i0 + 0, i1 + 0, i2 + 0])
+ Phi2_1 * (coefs[i0 + 0, i1 + 0, i2 + 1])
+ Phi2_2 * (coefs[i0 + 0, i1 + 0, i2 + 2])
+ Phi2_3 * (coefs[i0 + 0, i1 + 0, i2 + 3])
)
+ Phi1_1
* (
Phi2_0 * (coefs[i0 + 0, i1 + 1, i2 + 0])
+ Phi2_1 * (coefs[i0 + 0, i1 + 1, i2 + 1])
+ Phi2_2 * (coefs[i0 + 0, i1 + 1, i2 + 2])
+ Phi2_3 * (coefs[i0 + 0, i1 + 1, i2 + 3])
)
+ Phi1_2
* (
Phi2_0 * (coefs[i0 + 0, i1 + 2, i2 + 0])
+ Phi2_1 * (coefs[i0 + 0, i1 + 2, i2 + 1])
+ Phi2_2 * (coefs[i0 + 0, i1 + 2, i2 + 2])
+ Phi2_3 * (coefs[i0 + 0, i1 + 2, i2 + 3])
)
+ Phi1_3
* (
Phi2_0 * (coefs[i0 + 0, i1 + 3, i2 + 0])
+ Phi2_1 * (coefs[i0 + 0, i1 + 3, i2 + 1])
+ Phi2_2 * (coefs[i0 + 0, i1 + 3, i2 + 2])
+ Phi2_3 * (coefs[i0 + 0, i1 + 3, i2 + 3])
)
)
+ Phi0_1
* (
Phi1_0
* (
Phi2_0 * (coefs[i0 + 1, i1 + 0, i2 + 0])
+ Phi2_1 * (coefs[i0 + 1, i1 + 0, i2 + 1])
+ Phi2_2 * (coefs[i0 + 1, i1 + 0, i2 + 2])
+ Phi2_3 * (coefs[i0 + 1, i1 + 0, i2 + 3])
)
+ Phi1_1
* (
Phi2_0 * (coefs[i0 + 1, i1 + 1, i2 + 0])
+ Phi2_1 * (coefs[i0 + 1, i1 + 1, i2 + 1])
+ Phi2_2 * (coefs[i0 + 1, i1 + 1, i2 + 2])
+ Phi2_3 * (coefs[i0 + 1, i1 + 1, i2 + 3])
)
+ Phi1_2
* (
Phi2_0 * (coefs[i0 + 1, i1 + 2, i2 + 0])
+ Phi2_1 * (coefs[i0 + 1, i1 + 2, i2 + 1])
+ Phi2_2 * (coefs[i0 + 1, i1 + 2, i2 + 2])
+ Phi2_3 * (coefs[i0 + 1, i1 + 2, i2 + 3])
)
+ Phi1_3
* (
Phi2_0 * (coefs[i0 + 1, i1 + 3, i2 + 0])
+ Phi2_1 * (coefs[i0 + 1, i1 + 3, i2 + 1])
+ Phi2_2 * (coefs[i0 + 1, i1 + 3, i2 + 2])
+ Phi2_3 * (coefs[i0 + 1, i1 + 3, i2 + 3])
)
)
+ Phi0_2
* (
Phi1_0
* (
Phi2_0 * (coefs[i0 + 2, i1 + 0, i2 + 0])
+ Phi2_1 * (coefs[i0 + 2, i1 + 0, i2 + 1])
+ Phi2_2 * (coefs[i0 + 2, i1 + 0, i2 + 2])
+ Phi2_3 * (coefs[i0 + 2, i1 + 0, i2 + 3])
)
+ Phi1_1
* (
Phi2_0 * (coefs[i0 + 2, i1 + 1, i2 + 0])
+ Phi2_1 * (coefs[i0 + 2, i1 + 1, i2 + 1])
+ Phi2_2 * (coefs[i0 + 2, i1 + 1, i2 + 2])
+ Phi2_3 * (coefs[i0 + 2, i1 + 1, i2 + 3])
)
+ Phi1_2
* (
Phi2_0 * (coefs[i0 + 2, i1 + 2, i2 + 0])
+ Phi2_1 * (coefs[i0 + 2, i1 + 2, i2 + 1])
+ Phi2_2 * (coefs[i0 + 2, i1 + 2, i2 + 2])
+ Phi2_3 * (coefs[i0 + 2, i1 + 2, i2 + 3])
)
+ Phi1_3
* (
Phi2_0 * (coefs[i0 + 2, i1 + 3, i2 + 0])
+ Phi2_1 * (coefs[i0 + 2, i1 + 3, i2 + 1])
+ Phi2_2 * (coefs[i0 + 2, i1 + 3, i2 + 2])
+ Phi2_3 * (coefs[i0 + 2, i1 + 3, i2 + 3])
)
)
+ Phi0_3
* (
Phi1_0
* (
Phi2_0 * (coefs[i0 + 3, i1 + 0, i2 + 0])
+ Phi2_1 * (coefs[i0 + 3, i1 + 0, i2 + 1])
+ Phi2_2 * (coefs[i0 + 3, i1 + 0, i2 + 2])
+ Phi2_3 * (coefs[i0 + 3, i1 + 0, i2 + 3])
)
+ Phi1_1
* (
Phi2_0 * (coefs[i0 + 3, i1 + 1, i2 + 0])
+ Phi2_1 * (coefs[i0 + 3, i1 + 1, i2 + 1])
+ Phi2_2 * (coefs[i0 + 3, i1 + 1, i2 + 2])
+ Phi2_3 * (coefs[i0 + 3, i1 + 1, i2 + 3])
)
+ Phi1_2
* (
Phi2_0 * (coefs[i0 + 3, i1 + 2, i2 + 0])
+ Phi2_1 * (coefs[i0 + 3, i1 + 2, i2 + 1])
+ Phi2_2 * (coefs[i0 + 3, i1 + 2, i2 + 2])
+ Phi2_3 * (coefs[i0 + 3, i1 + 2, i2 + 3])
)
+ Phi1_3
* (
Phi2_0 * (coefs[i0 + 3, i1 + 3, i2 + 0])
+ Phi2_1 * (coefs[i0 + 3, i1 + 3, i2 + 1])
+ Phi2_2 * (coefs[i0 + 3, i1 + 3, i2 + 2])
+ Phi2_3 * (coefs[i0 + 3, i1 + 3, i2 + 3])
)
)
)
@njit(cache=True)
def kernel(n, a, b, orders, coefs, points, values):
x0 = points[n, 0]
x1 = points[n, 1]
x2 = points[n, 2]
# common to all units
M0 = orders[0]
start0 = a[0]
dinv0 = (orders[0] - 1.0) / (b[0] - a[0])
M1 = orders[1]
start1 = a[1]
dinv1 = (orders[1] - 1.0) / (b[1] - a[1])
M2 = orders[2]
start2 = a[2]
dinv2 = (orders[2] - 1.0) / (b[2] - a[2])
# locate the point
u0 = (x0 - start0) * dinv0
i0 = int(floor(u0))
i0 = max(min(i0, M0 - 2), 0)
t0 = u0 - i0
u1 = (x1 - start1) * dinv1
i1 = int(floor(u1))
i1 = max(min(i1, M1 - 2), 0)
t1 = u1 - i1
u2 = (x2 - start2) * dinv2
i2 = int(floor(u2))
i2 = max(min(i2, M2 - 2), 0)
t2 = u2 - i2
tp0_0 = t0 * t0 * t0
tp0_1 = t0 * t0
tp0_2 = t0
tp0_3 = 1.0
tp1_0 = t1 * t1 * t1
tp1_1 = t1 * t1
tp1_2 = t1
tp1_3 = 1.0
tp2_0 = t2 * t2 * t2
tp2_1 = t2 * t2
tp2_2 = t2
tp2_3 = 1.0
Phi0_0 = 0
Phi0_1 = 0
Phi0_2 = 0
Phi0_3 = 0
if t0 < 0:
Phi0_0 = dAd[0, 3] * t0 + Ad[0, 3]
Phi0_1 = dAd[1, 3] * t0 + Ad[1, 3]
Phi0_2 = dAd[2, 3] * t0 + Ad[2, 3]
Phi0_3 = dAd[3, 3] * t0 + Ad[3, 3]
elif t0 > 1:
Phi0_0 = (3 * Ad[0, 0] + 2 * Ad[0, 1] + Ad[0, 2]) * (t0 - 1) + (
Ad[0, 0] + Ad[0, 1] + Ad[0, 2] + Ad[0, 3]
)
Phi0_1 = (3 * Ad[1, 0] + 2 * Ad[1, 1] + Ad[1, 2]) * (t0 - 1) + (
Ad[1, 0] + Ad[1, 1] + Ad[1, 2] + Ad[1, 3]
)
Phi0_2 = (3 * Ad[2, 0] + 2 * Ad[2, 1] + Ad[2, 2]) * (t0 - 1) + (
Ad[2, 0] + Ad[2, 1] + Ad[2, 2] + Ad[2, 3]
)
Phi0_3 = (3 * Ad[3, 0] + 2 * Ad[3, 1] + Ad[3, 2]) * (t0 - 1) + (
Ad[3, 0] + Ad[3, 1] + Ad[3, 2] + Ad[3, 3]
)
else:
Phi0_0 = (
Ad[0, 0] * tp0_0 + Ad[0, 1] * tp0_1 + Ad[0, 2] * tp0_2 + Ad[0, 3] * tp0_3
)
Phi0_1 = (
Ad[1, 0] * tp0_0 + Ad[1, 1] * tp0_1 + Ad[1, 2] * tp0_2 | |
import argparse
import glob
import re
import sys
from typing import List
print_columns = ["name", "total_synthesis_time"]
regel_columns = ["regel_time", "regel_timeout", "regel_sketch", "regel_solution"]
all_columns = ["name", "enumerator", "timed_out", "total_synthesis_time", "regex_synthesis_time",
"first_regex_time", "enumerated_regexes", "regex_interactions",
"regex_distinguishing_time", "cap_groups_synthesis_time", "enumerated_cap_groups",
"cap_conditions_synthesis_time", "enumerated_cap_conditions",
"cap_conditions_interactions", "cap_conditions_distinguishing_time", "solution",
"nodes", "first_regex", "cap_groups", "ground_truth", "regel_time", "regel_timeout",
"regel_sketch", "regel_solution"]
exclude_instances = ["datetime2.txt", "date3.txt"] # , "color.txt", "date.txt", "date7.txt", "id1.txt", "date3.txt"]
logs = {"nopruning": "log_10_22_mtnp", "dynamic": "log_10_28_dy",
"multitree": "log_10_22_mt",
"ktree": "log_10_22_kt", "lines": "log_10_22_li",
"multi-dist": "log_10_26_muti-dist"}
class Instance:
def __init__(self, name):
global all_columns
self.values = {}
for col in all_columns:
self.values[col] = "undefined"
self.values['name'] = name
def print_table(instances: List, regel: bool):
""" Print execution information for each instance (sorted by name) """
global print_columns, regel_columns
if regel:
print_columns.extend(regel_columns)
print(", ".join(print_columns))
for idx, instance in enumerate(instances):
row = []
for col_name in print_columns:
if col_name in ["solution", "cap_groups", "ground_truth", "regel_sketch", "regel_solution", "first_regex"]:
row.append(f'"{instance.values[col_name]}"')
else:
row.append(str(instance.values[col_name]))
print(', '.join(row))
def print_only_synthesis_times(instances):
for instance in instances:
time = instance.values["total_synthesis_time"]
if time == "undefined":
time = 4000
print(time)
def print_rank(instances):
""" Print execution time for each instance (sorted by time) """
ranked = sorted(instances,
key=lambda i: 4000 if i.values["total_synthesis_time"] == 'undefined' else
i.values["total_synthesis_time"])
print("instance, time, ranking")
for idx, instance in enumerate(ranked):
time = 4000 if instance.values["total_synthesis_time"] == "undefined" else \
instance.values["total_synthesis_time"]
print(f'{instance.values["name"]}, {time}, {idx + 1}')
def print_regel_rank(instances):
ranked = sorted(instances,
key=lambda i: 4000 if i.values["regel_time"] == 'undefined' else
i.values["regel_time"])
print("instance, time, ranking")
for idx, instance in enumerate(ranked):
time = 4000 if instance.values["regel_time"] == "undefined" else instance.values["regel_time"]
print(f'{instance.values["name"]}, {time}, {idx + 1}')
def print_compare_times():
global logs
instances = {}
for log in logs:
log_files = glob.glob(logs[log] + "/*.txt")
instances[log] = []
for log_file in log_files:
instance = read_log(log_file)
if instance is not None:
instances[log].append(instance)
instances[log] = sorted(instances[log], key=lambda i: i.values['name'])
columns = list(logs.keys())
print(", ".join(["instance"] + columns))
# get number of instances from any list in the dictionary
num_instances = len(next(iter(instances.values())))
for idx in range(num_instances):
row = []
instance_name = next(iter(instances.values()))[idx].values["name"]
row.append(instance_name)
for c in columns:
time = instances[c][idx].values["total_synthesis_time"]
if time == "undefined":
time = 4000
row.append(time)
print(", ".join(map(str, row)))
def print_count_solved(instances: List):
count = 0
for instance in instances:
if instance.values["solution"] != 'No solution' and instance.values["solution"] != 'undefined':
count += 1
print(count)
def print_count_solved_all(instances: List):
instances = list(filter(lambda i: i.values["solution"] != 'No solution'
and i.values["solution"] != 'undefined', instances))
count_3600 = 0
count_60 = 0
count_10 = 0
for instance in instances:
if instance.values["first_regex_time"] < 3600:
count_3600 += 1
if instance.values["first_regex_time"] < 60:
count_60 += 1
if instance.values["first_regex_time"] < 10:
count_10 += 1
print(count_10, count_60, count_3600)
def print_count_not_timeout(instances: List):
count = 0
for instance in instances:
if not instance.values['timed_out']:
count += 1
print(count)
def print_count_not_timeout_all(instances: List):
instances = list(filter(lambda i: not i.values['timed_out'], instances))
count_3600 = 0
count_60 = 0
count_10 = 0
for instance in instances:
if instance.values["total_synthesis_time"] < 3600:
count_3600 += 1
if instance.values["total_synthesis_time"] < 60:
count_60 += 1
if instance.values["total_synthesis_time"] < 10:
count_10 += 1
print(count_10, count_60, count_3600)
def print_regel_count_not_timeout_all(instances):
instances = list(filter(lambda i: not i.values['regel_timeout'], instances))
count_3600 = 0
count_60 = 0
count_10 = 0
for instance in instances:
if instance.values["regel_time"] < 3600:
count_3600 += 1
if instance.values["regel_time"] < 60:
count_60 += 1
if instance.values["regel_time"] < 10:
count_10 += 1
print(count_10, count_60, count_3600)
def main():
parser = argparse.ArgumentParser(description='Validations Synthesizer tester',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('log_dir', metavar='DIR', type=str, help="Logs directory", default='')
parser.add_argument('-r', '--regel-log-dir', metavar='DIR', type=str,
help="Regel logs directory", default='')
parser.add_argument('--rank', action="store_true", help="Rank instances according to synthesis time")
parser.add_argument('--count-solved', action="store_true",
help="Count number of instances that returned a solution (time out or not).")
parser.add_argument('--only-synthesis-times', action="store_true",
help="Print only the synthesis time for each instance.")
parser.add_argument('--count-solved-all', action="store_true",
help="Count number of instances that returned a solution (time out or not) in 10, 60 and 3600 "
"seconds.")
parser.add_argument('--count-not-timeout', action="store_true",
help="Count number of instances that did not time out.")
parser.add_argument('--count-not-timeout-all', action="store_true",
help="Count number of instances that did not time out in 10, 60 and 3600 seconds.")
parser.add_argument('--regel-count-not-timeout-all', action="store_true",
help="Count number of instances that did not time out with REGEL in 10, 60 and 3600 seconds.")
parser.add_argument('--rank-regel', action="store_true", help="Make REGEL time ranking")
parser.add_argument('--compare-times', action="store_true",
help="Make table comparing the synthesis time for different methods")
args = parser.parse_args()
log_dir = args.log_dir
regel_log_dir = args.regel_log_dir
log_files = glob.glob(log_dir + "/*.txt")
instances = []
for log_file in log_files:
instance = read_log(log_file)
if instance is not None:
instances.append(instance)
if len(regel_log_dir) > 0:
for instance in instances:
read_regel_log(instance, regel_log_dir)
instances = sorted(instances, key=lambda i: i.values['name'])
if args.rank:
print_rank(instances)
elif args.rank_regel:
assert len(regel_log_dir) > 0, "please indicate REGEL logs directory"
print_regel_rank(instances)
elif args.compare_times:
print_compare_times()
elif args.only_synthesis_times:
print_only_synthesis_times(instances)
elif args.count_solved:
print_count_solved(instances)
elif args.count_solved_all:
print_count_solved_all(instances)
elif args.count_not_timeout:
print_count_not_timeout(instances)
elif args.count_not_timeout_all:
print_count_not_timeout_all(instances)
elif args.regel_count_not_timeout_all:
assert len(regel_log_dir) > 0, "please indicate REGEL logs directory"
print_regel_count_not_timeout_all(instances)
else:
print_table(instances, len(regel_log_dir) > 0)
def read_regel_log(instance, regel_log_dir):
try:
with open(regel_log_dir + "/" + instance.values['name'] + "-1") as f:
for line in f:
if "Sketch" in line:
regex = r"Sketch: (.+)"
m = re.search(regex, line)
if m is not None:
instance.values["regel_sketch"] = m[1]
elif "Learned program" in line:
regex = r"Learned program: (.+): (?:\d+\.\d+)"
m = re.search(regex, line)
if m is not None:
instance.values["regel_solution"] = m[1]
elif "Total time" in line:
regex = r"Total time: (\d+\.\d+)"
m = re.search(regex, line)
if m is not None:
instance.values['regel_time'] = float(m[1])
instance.values['regel_timeout'] = False
except IOError:
try:
with open(regel_log_dir + "/" + instance.values['name'] + "-b") as f:
for line in f:
if "Learned program" in line:
regex = r"Learned program: (.+): (?:\d+\.\d+)"
m = re.search(regex, line)
if m is not None:
instance.values["regel_solution"] = m[1]
elif "Total time" in line:
regex = r"Total time: (\d+\.\d+)"
m = re.search(regex, line)
if m is not None:
instance.values['regel_time'] = float(m[1])
instance.values['regel_timeout'] = False
except IOError:
print("could not open", regel_log_dir + "/" + instance.values['name'] + "-1", file=sys.stderr)
def read_log(log_file):
instance_name = list(filter(None, log_file.split('/')))[-1]
for excluded in exclude_instances:
if excluded in instance_name:
return None
instance = Instance(instance_name)
with open(log_file) as f:
regex_synthesis = False
cap_groups_synthesis = False
cap_conditions_synthesis = False
solution_print = False
for line in f:
if "Enumerator" in line:
regex = "Enumerator: (.+)"
m = re.search(regex, line)
if m is not None:
instance.values['enumerator'] = m[1]
elif "Terminated" in line:
regex = "Terminated: (.+)"
m = re.search(regex, line)
if m is not None:
instance.values['timed_out'] = m[1] == 'True'
elif "Elapsed time" in line:
regex = r"Elapsed time: (\d+\.\d+)"
m = re.search(regex, line)
if m is not None:
instance.values['total_synthesis_time'] = float(m[1])
elif "Time per depth" in line:
regex = "Time per depth: (.+)"
m = re.search(regex, line)
if m is not None:
instance.values['per_depth_times'] = m[1]
elif "Regex synthesis" in line:
regex_synthesis = True
continue
elif "Capturing groups synthesis" in line:
regex_synthesis = False
cap_groups_synthesis = True
continue
elif "Capturing conditions synthesis" in line:
cap_groups_synthesis = False
cap_conditions_synthesis = True
continue
elif "First regex:" in line:
regex = r"First regex: (.+)"
m = re.search(regex, line)
if m is not None:
instance.values['first_regex'] = m[1]
elif "Solution" in line:
cap_conditions_synthesis = False
solution_print = True
regex = r"Solution: (.+)"
m = re.search(regex, line)
if m is not None:
instance.values['solution'] = m[1]
continue
elif "No solution" in line:
cap_conditions_synthesis = False
solution_print = True
instance.values['solution'] = 'No solution'
instance.values['cap_groups'] = None
continue
elif regex_synthesis:
if "Regex time" in line:
regex = r"Regex time: (\d+\.\d+)"
m = re.search(regex, line)
if m is not None:
instance.values['regex_synthesis_time'] = float(m[1])
elif "First regex time" in line:
regex = r"First regex time: (\d+\.\d+)"
m = re.search(regex, line)
if m is not None:
instance.values['first_regex_time'] = float(m[1])
elif "Enumerated" in line:
regex = r"Enumerated: (\d+)"
m = re.search(regex, line)
if m is not None:
instance.values['enumerated_regexes'] = int(m[1])
elif "Interactions" in line:
regex = r"Interactions: (\d+)"
m = re.search(regex, line)
if m is not None:
instance.values['regex_interactions'] = int(m[1])
elif "Distinguish time" in line:
regex = r"Distinguish time: (\d+\.\d+)"
m = re.search(regex, line)
if m is not None:
instance.values['regex_distinguishing_time'] = float(m[1])
elif cap_groups_synthesis:
if "Cap. groups time" in line:
regex = r"Cap. groups time: (\d+\.\d+)"
m = re.search(regex, line)
if m is not None:
instance.values['cap_groups_synthesis_time'] = float(m[1])
elif "Enumerated" in line:
regex = r"Enumerated: (\d+)"
m = re.search(regex, line)
if m is not None:
instance.values['enumerated_cap_groups'] = int(m[1])
| |
the form in order to minimize fetching of data. If the query parameter is ommitted all variables are fetched. If the query parameter contains non-existent variable names, the variable names are ignored.
:param bool deserialize_values: Determines whether serializable variable values (typically variables that store custom Java objects) should be deserialized on server side (default true). If set to true, a serializable variable will be deserialized on server side and transformed to JSON using [Jackson's](http://jackson.codehaus.org/) POJO/bean property introspection feature. Note that this requires the Java classes of the variable value to be on the REST API's classpath. If set to false, a serializable variable will be returned in its serialized format. For example, a variable that is serialized as XML will be returned as a JSON string containing XML. **Note**: While true is the default value for reasons of backward compatibility, we recommend setting this parameter to false when developing web applications that are independent of the Java process applications deployed to the engine.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: dict(str, VariableValueDto)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_start_form_variables_with_http_info(id, **kwargs) # noqa: E501
def get_start_form_variables_with_http_info(self, id, **kwargs): # noqa: E501
"""Get Start Form Variables # noqa: E501
Retrieves the start form variables for a process definition (only if they are defined via the [Generated Task Form](https://docs.camunda.org/manual/7.13/user-guide/task-forms/#generated-task-forms) approach). The start form variables take form data specified on the start event into account. If form fields are defined, the variable types and default values of the form fields are taken into account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_start_form_variables_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the process definition to retrieve the variables for. (required)
:param str variable_names: A comma-separated list of variable names. Allows restricting the list of requested variables to the variable names in the list. It is best practice to restrict the list of variables to the variables actually required by the form in order to minimize fetching of data. If the query parameter is ommitted all variables are fetched. If the query parameter contains non-existent variable names, the variable names are ignored.
:param bool deserialize_values: Determines whether serializable variable values (typically variables that store custom Java objects) should be deserialized on server side (default true). If set to true, a serializable variable will be deserialized on server side and transformed to JSON using [Jackson's](http://jackson.codehaus.org/) POJO/bean property introspection feature. Note that this requires the Java classes of the variable value to be on the REST API's classpath. If set to false, a serializable variable will be returned in its serialized format. For example, a variable that is serialized as XML will be returned as a JSON string containing XML. **Note**: While true is the default value for reasons of backward compatibility, we recommend setting this parameter to false when developing web applications that are independent of the Java process applications deployed to the engine.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(dict(str, VariableValueDto), status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'variable_names',
'deserialize_values'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_start_form_variables" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_start_form_variables`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
if 'variable_names' in local_var_params and local_var_params['variable_names'] is not None: # noqa: E501
query_params.append(('variableNames', local_var_params['variable_names'])) # noqa: E501
if 'deserialize_values' in local_var_params and local_var_params['deserialize_values'] is not None: # noqa: E501
query_params.append(('deserializeValues', local_var_params['deserialize_values'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/process-definition/{id}/form-variables', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, VariableValueDto)', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_start_form_variables_by_key(self, key, **kwargs): # noqa: E501
"""Get Start Form Variables # noqa: E501
Retrieves the start form variables for the latest process definition which belongs to no tenant (only if they are defined via the [Generated Task Form](https://docs.camunda.org/manual/7.13/user-guide/task-forms/#generated-task-forms) approach). The start form variables take form data specified on the start event into account. If form fields are defined, the variable types and default values of the form fields are taken into account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_start_form_variables_by_key(key, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str key: The key of the process definition (the latest version thereof) to be retrieved. (required)
:param str variable_names: A comma-separated list of variable names. Allows restricting the list of requested variables to the variable names in the list. It is best practice to restrict the list of variables to the variables actually required by the form in order to minimize fetching of data. If the query parameter is ommitted all variables are fetched. If the query parameter contains non-existent variable names, the variable names are ignored.
:param bool deserialize_values: Determines whether serializable variable values (typically variables that store custom Java objects) should be deserialized on server side (default true). If set to true, a serializable variable will be deserialized on server side and transformed to JSON using [Jackson's](http://jackson.codehaus.org/) POJO/bean property introspection feature. Note that this requires the Java classes of the variable value to be on the REST API's classpath. If set to false, a serializable variable will be returned in its serialized format. For example, a variable that is serialized as XML will be returned as a JSON string containing XML. **Note**: While true is the default value for reasons of backward compatibility, we recommend setting this parameter to false when developing web applications that are independent of the Java process applications deployed to the engine.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: dict(str, VariableValueDto)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_start_form_variables_by_key_with_http_info(key, **kwargs) # noqa: E501
def get_start_form_variables_by_key_with_http_info(self, key, **kwargs): # noqa: E501
"""Get Start Form Variables # noqa: E501
Retrieves the start form variables for the latest process definition which belongs to no tenant (only if they are defined via the [Generated Task Form](https://docs.camunda.org/manual/7.13/user-guide/task-forms/#generated-task-forms) approach). The start form variables take form data specified on the start event into account. If form fields are defined, the variable types and default values of the | |
# Copyright (c) 2017, Teriks
# All rights reserved.
#
# pake is distributed under the following BSD 3-Clause License
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs
import os.path
import shutil
import subprocess
import sys
import os
import tempfile
import pake
import pake.process
import pake.program
import pake.util
import pake.returncodes as returncodes
import pake.conf
__all__ = ['export', 'subpake', 'SubpakeException', 'EXPORTS']
EXPORTS = dict()
"""
A dictionary object containing all current exports by name,
you are free to modify this dictionary directly.
See: :py:meth:`pake.export`, :py:meth:`pake.subpake` and :py:meth:`pake.TaskContext.subpake`.
Be careful and make sure it remains a dictionary object.
Export values must be able to **repr()** into parsable python literals.
"""
class SubpakeException(pake.process.StreamingSubprocessException):
"""
Raised upon encountering a non-zero return code from a subpake invocation.
This exception is raised from both :py:meth:`pake.subpake` and :py:meth:`pake.TaskContext.subpake`.
.. py:attribute:: cmd
Executed subpake command in list form.
.. py:attribute:: returncode
Process returncode.
.. py:attribute:: message
Optional message from the raising function, may be **None**
.. py:attribute:: filename
Filename describing the file from which the process call was initiated. (might be None)
.. py:attribute:: function_name
Function name describing the function which initiated the process call. (might be None)
.. py:attribute:: line_number
Line Number describing the line where the process call was initiated. (might be None)
"""
def __init__(self, cmd, returncode,
output=None,
output_stream=None,
message=None):
"""
:param cmd: Command in list form.
:param returncode: The command's returncode.
:param output: (Optional) All output from the command as bytes.
:param output_stream: (Optional) A file like object containing the process output, at **seek(0)**.
By providing this parameter instead of **output**, you give this object permission
to close the stream when it is garbage collected or when :py:meth:`pake.SubpakeException.write_info` is called.
:param message: Optional exception message.
"""
super().__init__(cmd=cmd,
returncode=returncode,
output=output,
output_stream=output_stream,
message=message)
def export(name, value):
"""
Exports a define that can be retrieved in subpake scripts via :py:func:`pake.Pake.get_define`.
This function can redefine the value of an existing export as well.
The :py:attr:`pake.EXPORTS` dictionary can also be manipulated directly.
Export values must be able to **repr()** into parsable python literals.
:param name: The name of the define.
:param value: The value of the define.
"""
EXPORTS[name] = value
def subpake(*args,
stdout=None,
silent=False,
ignore_errors=False,
call_exit=True,
readline=True,
collect_output=False,
collect_output_lock=None):
"""
Execute a ``pakefile.py`` script, changing directories if necessary.
This function should not be used inside tasks, use: :py:meth:`pake.TaskContext.subpake` instead.
A :py:meth:`pake.TaskContext` instance is passed into the single parameter of each task, usually named **ctx**.
:py:meth:`pake.subpake` allows similar syntax to :py:meth:`pake.TaskContext.call` for its **\*args** parameter.
Subpake scripts do not inherit the **--jobs** argument from the parent script, if you want
to run them with multithreading enabled you need to pass your own **--jobs** argument manually.
Example:
.. code-block:: python
# These are all equivalent
pake.subpake('dir/pakefile.py', 'task_a', '-C', 'some_dir')
pake.subpake(['dir/pakefile.py', 'task_a', '-C', 'some_dir'])
# note the nested iterable containing string arguments
pake.subpake(['dir/pakefile.py', 'task_a', ['-C', 'some_dir']])
pake.subpake('dir/pakefile.py task_a -C some_dir')
:param args: The script, and additional arguments to pass to the script.
You may pass the command words as a single iterable, a string, or as
variadic arguments.
:param stdout: The file output to write all of the pakefile's output to. (defaults to :py:attr:`pake.conf.stdout`)
The pakefile's **stderr** will be redirected to its **stdout**, so the passed file object will
receive all output from the pakefile including error messages.
:param silent: Whether or not to silence all output from the subpake script.
:param ignore_errors: If this is **True**, this function will never call **exit** or throw
:py:exc:`pake.SubpakeException` if the executed pakefile returns with a
non-zero exit code. It will instead return the exit code from the
subprocess to the caller.
:param call_exit: Whether or not to print to :py:attr:`pake.conf.stderr` and immediately
call **exit** if the pakefile script encounters an error. The value
of this parameter will be disregarded when **ignore_errors=True**.
:param readline: Whether or not to use **readline** for reading process output when **ignore_errors**
and **silent** are **False**, this is necessary for live output in that case. When live
output to a terminal is not required, such as when writing to a file on disk, setting
this parameter to **False** results in more efficient writes. This parameter defaults to **True**
:param collect_output: Whether or not to collect all subpake output to a temporary file
and then write it incrementally to the **stdout** parameter when
the process finishes. This can help prevent crashes when dealing with lots of output.
When you pass **True** to this parameter, the **readline** parameter is ignored.
See: :ref:`Output synchronization with ctx.call & ctx.subpake`
:param collect_output_lock: If you provide a lockable object such as :py:class:`threading.Lock` or
:py:class:`threading.RLock`, The subpake function will try to acquire the lock before
incrementally writing to the **stdout** parameter when **collect_output=True**.
The lock you pass is only required to implement a context manager and be usable
in a **with** statement, no methods are called on the lock. :py:meth:`pake.TaskContext.subpake`
will pass :py:attr:`pake.TaskContext.io_lock` for you if **collect_output=True**.
:raises: :py:exc:`ValueError` if no command + optional command arguments are provided.
:raises: :py:exc:`FileNotFoundError` if the first argument *(the pakefile)* is not found.
:raises: :py:exc:`pake.SubpakeException` if the called pakefile script encounters an error
and the following is true: **exit_on_error=False** and **ignore_errors=False**.
"""
args = pake.util.handle_shell_args(args)
if len(args) < 1:
raise ValueError('Not enough arguments provided, '
'must at least provide a pakefile.py script path as the first argument.')
script = args.pop(0)
if not os.path.isfile(script):
raise FileNotFoundError('pakefile: "{}" does not exist.'.format(script))
stdout = stdout if stdout is not None else pake.conf.stdout
script_dir = os.path.dirname(os.path.abspath(script))
try:
depth = pake.program.get_subpake_depth() + 1
except pake.program.PakeUninitializedException:
depth = 0
extra_args = ['--_subpake_depth', str(depth), '--stdin-defines']
if os.getcwd() != script_dir:
extra_args += ['--directory', script_dir]
args = [sys.executable, script] + extra_args + list(str(i) for i in args)
if ignore_errors:
return _subpake_ignore_errors(
args=args,
stdout=stdout,
silent=silent,
collect_output=collect_output,
collect_output_lock=collect_output_lock)
return _subpake_with_errors(args=args,
stdout=stdout,
silent=silent,
call_exit=call_exit,
readline=readline,
collect_output=collect_output,
collect_output_lock=collect_output_lock)
def _subpake_ignore_errors(args, stdout, silent, collect_output, collect_output_lock):
use_temp_file_for_collect = collect_output and not silent
if use_temp_file_for_collect:
p_stdout = tempfile.TemporaryFile(mode='w+', newline='\n')
elif silent:
p_stdout = subprocess.DEVNULL
else:
p_stdout = stdout
try:
with subprocess.Popen(args,
stdout=p_stdout,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
universal_newlines=True) as process:
process.stdin.write(repr(EXPORTS))
process.stdin.flush()
process.stdin.close()
try:
return process.wait()
except: # pragma: no cover
process.kill()
process.wait()
raise
finally:
if use_temp_file_for_collect:
# Rewind the temp file first
p_stdout.seek(0)
if collect_output_lock:
with collect_output_lock:
shutil.copyfileobj(p_stdout, stdout)
else:
shutil.copyfileobj(p_stdout, stdout)
p_stdout.close()
def _subpake_with_errors(args, stdout, silent, call_exit, readline, collect_output, collect_output_lock):
with subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
universal_newlines=True) as process:
process.stdin.write(repr(EXPORTS))
process.stdin.flush()
process.stdin.close()
output_copy_buffer = tempfile.TemporaryFile(mode='w+', newline='\n')
def do_collect_output(seek0_before, seek0_after):
if seek0_before:
output_copy_buffer.seek(0)
if collect_output and not silent:
if collect_output_lock:
with collect_output_lock:
shutil.copyfileobj(output_copy_buffer, stdout)
else:
shutil.copyfileobj(output_copy_buffer, stdout)
if seek0_after:
output_copy_buffer.seek(0)
try:
if not silent:
pake.util.copyfileobj_tee(process.stdout, [stdout, output_copy_buffer], readline=readline)
else:
# Only need to copy to the output_copy_buffer, for error reporting
# when silent = True
shutil.copyfileobj(process.stdout, output_copy_buffer)
| |
<filename>cblb/models_8bit_cblb.py
import numpy as np
from models import *
def MUX_8_1_model_ode(state, T, params):
delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, rho_x, gamma_x, theta_x, r_X = params
params_yes = gamma_x, n_y, theta_x, delta_x, rho_x
params_not = delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, rho_x
I0, I1, I2, I3, I4, I5, I6, I7, S0, S1, S2 = state[:11]
I0_out, I1_out, I2_out, I3_out, I4_out, I5_out, I6_out, I7_out = state[11:19]
L_I0_I0, L_I1_S2, L_I1_I1, L_I2_S1, L_I2_I2, L_I3_S1, L_I3_S2, L_I3_I3, L_I4_S0, L_I4_I4, L_I5_S0, L_I5_S2, L_I5_I5, L_I6_S0, L_I6_S1, L_I6_I6, L_I7_S0, L_I7_S1, L_I7_S2, L_I7_I7, L_I0, L_I1, L_I2, L_I3, L_I4, L_I5, L_I6, L_I7 = state[19:47]
N_I0_S0, N_I0_S1, N_I0_S2, N_I0_I0, N_I1_S0, N_I1_S1, N_I1_S2, N_I1_I1, N_I2_S0, N_I2_S1, N_I2_S2, N_I2_I2, N_I3_S0, N_I3_S1, N_I3_S2, N_I3_I3, N_I4_S0, N_I4_S1, N_I4_S2, N_I4_I4, N_I5_S0, N_I5_S1, N_I5_S2, N_I5_I5, N_I6_S0, N_I6_S1, N_I6_S2, N_I6_I6, N_I7_S0, N_I7_S1, N_I7_S2, N_I7_I7, N_I0, N_I1, N_I2, N_I3, N_I4, N_I5, N_I6, N_I7 = state[47:87]
out = state[87]
"""
I0
"""
dI0_out = 0
# yes S0: I0_S0
state_yes_I0_S0 = I0_out, S0, N_I0_S0
dI0_out += yes_cell_wrapper(state_yes_I0_S0, params_yes)
dN_I0_S0 = population(N_I0_S0, r_X)
# yes S1: I0_S1
state_yes_I0_S1 = I0_out, S1, N_I0_S1
dI0_out += yes_cell_wrapper(state_yes_I0_S1, params_yes)
dN_I0_S1 = population(N_I0_S1, r_X)
# yes S2: I0_S2
state_yes_I0_S2 = I0_out, S2, N_I0_S2
dI0_out += yes_cell_wrapper(state_yes_I0_S2, params_yes)
dN_I0_S2 = population(N_I0_S2, r_X)
# not I0: I0_I0
state_not_I0_I0 = L_I0_I0, I0_out, I0, N_I0_I0
dL_I0_I0, dd = not_cell_wrapper(state_not_I0_I0, params_not)
dI0_out += dd
dN_I0_I0 = population(N_I0_I0, r_X)
"""
I1
"""
dI1_out = 0
# yes S0: I1_S0
state_yes_I1_S0 = I1_out, S0, N_I1_S0
dI1_out += yes_cell_wrapper(state_yes_I1_S0, params_yes)
dN_I1_S0 = population(N_I1_S0, r_X)
# yes S1: I1_S1
state_yes_I1_S1 = I1_out, S1, N_I1_S1
dI1_out += yes_cell_wrapper(state_yes_I1_S1, params_yes)
dN_I1_S1 = population(N_I1_S1, r_X)
# not S2: I1_S2
state_not_I1_S2 = L_I1_S2, I1_out, S2, N_I1_S2
dL_I1_S2, dd = not_cell_wrapper(state_not_I1_S2, params_not)
dI1_out += dd
dN_I1_S2 = population(N_I1_S2, r_X)
# not I1: I1_I1
state_not_I1_I1 = L_I1_I1, I1_out, I1, N_I1_I1
dL_I1_I1, dd = not_cell_wrapper(state_not_I1_I1, params_not)
dI1_out += dd
dN_I1_I1 = population(N_I1_I1, r_X)
"""
I2
"""
dI2_out = 0
# yes S0: I2_S0
state_yes_I2_S0 = I2_out, S0, N_I2_S0
dI2_out += yes_cell_wrapper(state_yes_I2_S0, params_yes)
dN_I2_S0 = population(N_I2_S0, r_X)
# not S1: I2_S1
state_not_I2_S1 = L_I2_S1, I2_out, S1, N_I2_S1
dL_I2_S1, dd = not_cell_wrapper(state_not_I2_S1, params_not)
dI2_out += dd
dN_I2_S1 = population(N_I2_S1, r_X)
# yes S2: I2_S2
state_yes_I2_S2 = I2_out, S2, N_I2_S2
dI2_out += yes_cell_wrapper(state_yes_I2_S2, params_yes)
dN_I2_S2 = population(N_I2_S2, r_X)
# not I2: I2_I2
state_not_I2_I2 = L_I2_I2, I2_out, I2, N_I2_I2
dL_I2_I2, dd = not_cell_wrapper(state_not_I2_I2, params_not)
dI2_out += dd
dN_I2_I2 = population(N_I2_I2, r_X)
"""
I3
"""
dI3_out = 0
# yes S0: I3_S0
state_yes_I3_S0 = I3_out, S0, N_I3_S0
dI3_out += yes_cell_wrapper(state_yes_I3_S0, params_yes)
dN_I3_S0 = population(N_I3_S0, r_X)
# not S1: I3_S1
state_not_I3_S1 = L_I3_S1, I3_out, S1, N_I3_S1
dL_I3_S1, dd = not_cell_wrapper(state_not_I3_S1, params_not)
dI3_out += dd
dN_I3_S1 = population(N_I3_S1, r_X)
# not S2: I3_S2
state_not_I3_S2 = L_I3_S2, I3_out, S2, N_I3_S2
dL_I3_S2, dd = not_cell_wrapper(state_not_I3_S2, params_not)
dI3_out += dd
dN_I3_S2 = population(N_I3_S2, r_X)
# not I3: I3_I3
state_not_I3_I3 = L_I3_I3, I3_out, I3, N_I3_I3
dL_I3_I3, dd = not_cell_wrapper(state_not_I3_I3, params_not)
dI3_out += dd
dN_I3_I3 = population(N_I3_I3, r_X)
"""
I4
"""
dI4_out = 0
# not S0: I4_S0
state_not_I4_S0 = L_I4_S0, I4_out, S0, N_I4_S0
dL_I4_S0, dd = not_cell_wrapper(state_not_I4_S0, params_not)
dI4_out += dd
dN_I4_S0 = population(N_I4_S0, r_X)
# yes S1: I4_S1
state_yes_I4_S1 = I4_out, S1, N_I4_S1
dI4_out += yes_cell_wrapper(state_yes_I4_S1, params_yes)
dN_I4_S1 = population(N_I4_S1, r_X)
# yes S2: I4_S2
state_yes_I4_S2 = I4_out, S2, N_I4_S2
dI4_out += yes_cell_wrapper(state_yes_I4_S2, params_yes)
dN_I4_S2 = population(N_I4_S2, r_X)
# not I4: I4_I4
state_not_I4_I4 = L_I4_I4, I4_out, I4, N_I4_I4
dL_I4_I4, dd = not_cell_wrapper(state_not_I4_I4, params_not)
dI4_out += dd
dN_I4_I4 = population(N_I4_I4, r_X)
"""
I5
"""
dI5_out = 0
# not S0: I5_S0
state_not_I5_S0 = L_I5_S0, I5_out, S0, N_I5_S0
dL_I5_S0, dd = not_cell_wrapper(state_not_I5_S0, params_not)
dI5_out += dd
dN_I5_S0 = population(N_I5_S0, r_X)
# yes S1: I5_S1
state_yes_I5_S1 = I5_out, S1, N_I5_S1
dI5_out += yes_cell_wrapper(state_yes_I5_S1, params_yes)
dN_I5_S1 = population(N_I5_S1, r_X)
# not S2: I5_S2
state_not_I5_S2 = L_I5_S2, I5_out, S2, N_I5_S2
dL_I5_S2, dd = not_cell_wrapper(state_not_I5_S2, params_not)
dI5_out += dd
dN_I5_S2 = population(N_I5_S2, r_X)
# not I5: I5_I5
state_not_I5_I5 = L_I5_I5, I5_out, I5, N_I5_I5
dL_I5_I5, dd = not_cell_wrapper(state_not_I5_I5, params_not)
dI5_out += dd
dN_I5_I5 = population(N_I5_I5, r_X)
"""
I6
"""
dI6_out = 0
# not S0: I6_S0
state_not_I6_S0 = L_I6_S0, I6_out, S0, N_I6_S0
dL_I6_S0, dd = not_cell_wrapper(state_not_I6_S0, params_not)
dI6_out += dd
dN_I6_S0 = population(N_I6_S0, r_X)
# not S1: I6_S1
state_not_I6_S1 = L_I6_S1, I6_out, S1, N_I6_S1
dL_I6_S1, dd = not_cell_wrapper(state_not_I6_S1, params_not)
dI6_out += dd
dN_I6_S1 = population(N_I6_S1, r_X)
# yes S2: I6_S2
state_yes_I6_S2 = I6_out, S2, N_I6_S2
dI6_out += yes_cell_wrapper(state_yes_I6_S2, params_yes)
dN_I6_S2 = population(N_I6_S2, r_X)
# not I6: I6_I6
state_not_I6_I6 = L_I6_I6, I6_out, I6, N_I6_I6
dL_I6_I6, dd = not_cell_wrapper(state_not_I6_I6, params_not)
dI6_out += dd
dN_I6_I6 = population(N_I6_I6, r_X)
"""
I7
"""
dI7_out = 0
# not S0: I7_S0
state_not_I7_S0 = L_I7_S0, I7_out, S0, N_I7_S0
dL_I7_S0, dd = not_cell_wrapper(state_not_I7_S0, params_not)
dI7_out += dd
dN_I7_S0 = population(N_I7_S0, r_X)
# not S1: I7_S1
state_not_I7_S1 = L_I7_S1, I7_out, S1, N_I7_S1
dL_I7_S1, dd = not_cell_wrapper(state_not_I7_S1, params_not)
dI7_out += dd
dN_I7_S1 = population(N_I7_S1, r_X)
# not S2: I7_S2
state_not_I7_S2 = L_I7_S2, I7_out, S2, N_I7_S2
dL_I7_S2, dd = not_cell_wrapper(state_not_I7_S2, params_not)
dI7_out += dd
dN_I7_S2 = population(N_I7_S2, r_X)
# not I7: I7_I7
state_not_I7_I7 = L_I7_I7, I7_out, I7, N_I7_I7
dL_I7_I7, dd = not_cell_wrapper(state_not_I7_I7, params_not)
dI7_out += dd
dN_I7_I7 = population(N_I7_I7, r_X)
"""
out
"""
dout = 0
# not I0: I0
state_not_I0 = L_I0, out, I0_out, N_I0
dL_I0, dd = not_cell_wrapper(state_not_I0, params_not)
dout += dd
dN_I0 = population(N_I0, r_X)
# not I1: I1
state_not_I1 = L_I1, out, I1_out, N_I1
dL_I1, dd = not_cell_wrapper(state_not_I1, params_not)
dout += dd
dN_I1 = population(N_I1, r_X)
# not I2: I2
state_not_I2 = L_I2, out, I2_out, N_I2
dL_I2, dd = not_cell_wrapper(state_not_I2, params_not)
dout += dd
dN_I2 = population(N_I2, r_X)
# not I3: I3
state_not_I3 = L_I3, out, I3_out, N_I3
dL_I3, dd = not_cell_wrapper(state_not_I3, params_not)
dout += dd
dN_I3 = population(N_I3, r_X)
# not I4: I4
state_not_I4 = L_I4, out, I4_out, N_I4
dL_I4, dd = not_cell_wrapper(state_not_I4, params_not)
dout += dd
dN_I4 = population(N_I4, r_X)
# not I5: I5
state_not_I5 = L_I5, out, I5_out, N_I5
dL_I5, dd = not_cell_wrapper(state_not_I5, params_not)
dout += dd
dN_I5 = population(N_I5, r_X)
# not I6: I6
state_not_I6 = L_I6, out, I6_out, N_I6
dL_I6, dd = not_cell_wrapper(state_not_I6, params_not)
dout += dd
dN_I6 = population(N_I6, r_X)
# not I7: I7
state_not_I7 = L_I7, out, I7_out, N_I7
dL_I7, dd = not_cell_wrapper(state_not_I7, params_not)
dout += dd
dN_I7 = population(N_I7, r_X)
dI0, dI1, dI2, dI3, dI4, dI5, dI6, dI7, dS0, dS1, dS2 = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
dstate = np.array([dI0, dI1, dI2, dI3, dI4, dI5, dI6, dI7, dS0, dS1, dS2,
dI0_out, dI1_out, dI2_out, dI3_out, dI4_out, dI5_out, dI6_out, dI7_out,
dL_I0_I0, dL_I1_S2, dL_I1_I1, dL_I2_S1, dL_I2_I2, dL_I3_S1, dL_I3_S2, dL_I3_I3, dL_I4_S0, dL_I4_I4,
dL_I5_S0, dL_I5_S2, dL_I5_I5, dL_I6_S0, dL_I6_S1, dL_I6_I6, dL_I7_S0, dL_I7_S1, dL_I7_S2, dL_I7_I7,
dL_I0, dL_I1, dL_I2, dL_I3, dL_I4, dL_I5, dL_I6, dL_I7,
dN_I0_S0, dN_I0_S1, dN_I0_S2, dN_I0_I0, dN_I1_S0, dN_I1_S2, dN_I1_S1, dN_I1_I1, dN_I2_S0, dN_I2_S1, dN_I2_S2, dN_I2_I2, dN_I3_S0, dN_I3_S1, dN_I3_S2, dN_I3_I3,
dN_I4_S0, dN_I4_S1, dN_I4_S2, dN_I4_I4, dN_I5_S0, dN_I5_S1, dN_I5_S2, dN_I5_I5, dN_I6_S0, dN_I6_S1, dN_I6_S2, dN_I6_I6, dN_I7_S0, dN_I7_S1, dN_I7_S2, dN_I7_I7,
dN_I0, dN_I1, dN_I2, dN_I3, dN_I4, dN_I5, dN_I6, dN_I7,
dout])
return dstate
def MUX_8_1_generate_stoichiometry():
I0_out, I1_out, I2_out, I3_out, I4_out, I5_out, I6_out, I7_out = range(11, 19)
L_I0_I0, L_I1_S2, L_I1_I1, L_I2_S1, L_I2_I2, L_I3_S1, L_I3_S2, L_I3_I3, L_I4_S0, L_I4_I4, L_I5_S0, L_I5_S2, L_I5_I5, L_I6_S0, L_I6_S1, L_I6_I6, L_I7_S0, L_I7_S1, L_I7_S2, L_I7_I7, L_I0, L_I1, L_I2, L_I3, L_I4, L_I5, L_I6, L_I7 = range(19, 47)
out = 87
#
# x axis ... species
# y axis ... reactions
#
N = np.zeros((88, 176))
##################### I0
"""
# yes S0: I0_S0
"""
r = 0
# reaction 0
# 0 --> I0_out
N[I0_out, r] = 1
r += 1
# reaction 1
# I0_out --> 0
N[I0_out, r] = -1
r += 1
# reaction 2
# I0_out --> 0
N[I0_out, r] = -1
"""
# yes S1: I0_S1
"""
r += 1
# reaction 3
# 0 --> I0_out
N[I0_out, r] = 1
r += 1
# | |
<reponame>mmabey/fhir.resources<gh_stars>0
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/CapabilityStatement
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import io
import json
import os
import unittest
import pytest
from .. import capabilitystatement
from ..fhirdate import FHIRDate
from .fixtures import force_bytes
@pytest.mark.usefixtures("base_settings")
class CapabilityStatementTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get("FHIR_UNITTEST_DATADIR") or ""
with io.open(os.path.join(datadir, filename), "r", encoding="utf-8") as handle:
js = json.load(handle)
self.assertEqual("CapabilityStatement", js["resourceType"])
return capabilitystatement.CapabilityStatement(js)
def testCapabilityStatement1(self):
inst = self.instantiate_from("capabilitystatement-messagedefinition.json")
self.assertIsNotNone(
inst, "Must have instantiated a CapabilityStatement instance"
)
self.implCapabilityStatement1(inst)
js = inst.as_json()
self.assertEqual("CapabilityStatement", js["resourceType"])
inst2 = capabilitystatement.CapabilityStatement(js)
self.implCapabilityStatement1(inst2)
def implCapabilityStatement1(self, inst):
self.assertEqual(
force_bytes(inst.contact[0].name), force_bytes("System Administrator")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].system), force_bytes("email")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].value), force_bytes("<EMAIL>")
)
self.assertEqual(inst.date.date, FHIRDate("2012-01-04").date)
self.assertEqual(inst.date.as_json(), "2012-01-04")
self.assertEqual(
force_bytes(inst.description),
force_bytes(
"Sample capability statement showing new MessageDefinition structure"
),
)
self.assertTrue(inst.experimental)
self.assertEqual(force_bytes(inst.fhirVersion), force_bytes("4.0.1"))
self.assertEqual(force_bytes(inst.format[0]), force_bytes("xml"))
self.assertEqual(force_bytes(inst.format[1]), force_bytes("json"))
self.assertEqual(force_bytes(inst.id), force_bytes("messagedefinition"))
self.assertEqual(
force_bytes(inst.implementation.description),
force_bytes("Acme Message endpoint"),
)
self.assertEqual(
force_bytes(inst.implementation.url),
force_bytes("http://acem.com/fhir/message-drop"),
)
self.assertEqual(force_bytes(inst.kind), force_bytes("instance"))
self.assertEqual(
force_bytes(inst.messaging[0].documentation),
force_bytes("ADT A08 equivalent for external system notifications"),
)
self.assertEqual(
force_bytes(inst.messaging[0].endpoint[0].address),
force_bytes("mllp:10.1.1.10:9234"),
)
self.assertEqual(
force_bytes(inst.messaging[0].endpoint[0].protocol.code),
force_bytes("mllp"),
)
self.assertEqual(
force_bytes(inst.messaging[0].endpoint[0].protocol.system),
force_bytes("http://terminology.hl7.org/CodeSystem/message-transport"),
)
self.assertEqual(inst.messaging[0].reliableCache, 30)
self.assertEqual(
force_bytes(inst.messaging[0].supportedMessage[0].definition),
force_bytes("MessageDefinition/example"),
)
self.assertEqual(
force_bytes(inst.messaging[0].supportedMessage[0].mode),
force_bytes("receiver"),
)
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(force_bytes(inst.publisher), force_bytes("ACME Corporation"))
self.assertEqual(force_bytes(inst.software.name), force_bytes("EHR"))
self.assertEqual(force_bytes(inst.status), force_bytes("draft"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
def testCapabilityStatement2(self):
inst = self.instantiate_from("capabilitystatement-example.json")
self.assertIsNotNone(
inst, "Must have instantiated a CapabilityStatement instance"
)
self.implCapabilityStatement2(inst)
js = inst.as_json()
self.assertEqual("CapabilityStatement", js["resourceType"])
inst2 = capabilitystatement.CapabilityStatement(js)
self.implCapabilityStatement2(inst2)
def implCapabilityStatement2(self, inst):
self.assertEqual(
force_bytes(inst.contact[0].name), force_bytes("System Administrator")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].system), force_bytes("email")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].value), force_bytes("<EMAIL>")
)
self.assertEqual(
force_bytes(inst.copyright),
force_bytes("Copyright © Acme Healthcare and GoodCorp EHR Systems"),
)
self.assertEqual(inst.date.date, FHIRDate("2012-01-04").date)
self.assertEqual(inst.date.as_json(), "2012-01-04")
self.assertEqual(
force_bytes(inst.description),
force_bytes(
"This is the FHIR capability statement for the main EHR at ACME for the private interface - it does not describe the public interface"
),
)
self.assertEqual(
force_bytes(inst.document[0].documentation),
force_bytes("Basic rules for all documents in the EHR system"),
)
self.assertEqual(force_bytes(inst.document[0].mode), force_bytes("consumer"))
self.assertEqual(
force_bytes(inst.document[0].profile),
force_bytes(
"http://fhir.hl7.org/base/Profilebc054d23-75e1-4dc6-aca5-838b6b1ac81d/_history/b5fdd9fc-b021-4ea1-911a-721a60663796"
),
)
self.assertTrue(inst.experimental)
self.assertEqual(force_bytes(inst.fhirVersion), force_bytes("4.0.1"))
self.assertEqual(force_bytes(inst.format[0]), force_bytes("xml"))
self.assertEqual(force_bytes(inst.format[1]), force_bytes("json"))
self.assertEqual(force_bytes(inst.id), force_bytes("example"))
self.assertEqual(
force_bytes(inst.implementation.description),
force_bytes("main EHR at ACME"),
)
self.assertEqual(
force_bytes(inst.implementation.url), force_bytes("http://10.2.3.4/fhir")
)
self.assertEqual(
force_bytes(inst.implementationGuide[0]),
force_bytes("http://hl7.org/fhir/us/lab"),
)
self.assertEqual(
force_bytes(inst.instantiates[0]),
force_bytes("http://ihe.org/fhir/CapabilityStatement/pixm-client"),
)
self.assertEqual(
force_bytes(inst.jurisdiction[0].coding[0].code), force_bytes("US")
)
self.assertEqual(
force_bytes(inst.jurisdiction[0].coding[0].display),
force_bytes("United States of America (the)"),
)
self.assertEqual(
force_bytes(inst.jurisdiction[0].coding[0].system),
force_bytes("urn:iso:std:iso:3166"),
)
self.assertEqual(force_bytes(inst.kind), force_bytes("instance"))
self.assertEqual(
force_bytes(inst.messaging[0].documentation),
force_bytes("ADT A08 equivalent for external system notifications"),
)
self.assertEqual(
force_bytes(inst.messaging[0].endpoint[0].address),
force_bytes("mllp:10.1.1.10:9234"),
)
self.assertEqual(
force_bytes(inst.messaging[0].endpoint[0].protocol.code),
force_bytes("mllp"),
)
self.assertEqual(
force_bytes(inst.messaging[0].endpoint[0].protocol.system),
force_bytes("http://terminology.hl7.org/CodeSystem/message-transport"),
)
self.assertEqual(inst.messaging[0].reliableCache, 30)
self.assertEqual(
force_bytes(inst.messaging[0].supportedMessage[0].definition),
force_bytes("MessageDefinition/example"),
)
self.assertEqual(
force_bytes(inst.messaging[0].supportedMessage[0].mode),
force_bytes("receiver"),
)
self.assertEqual(force_bytes(inst.name), force_bytes("ACME-EHR"))
self.assertEqual(
force_bytes(inst.patchFormat[0]), force_bytes("application/xml-patch+xml")
)
self.assertEqual(
force_bytes(inst.patchFormat[1]), force_bytes("application/json-patch+json")
)
self.assertEqual(force_bytes(inst.publisher), force_bytes("ACME Corporation"))
self.assertEqual(
force_bytes(inst.purpose),
force_bytes(
"Main EHR capability statement, published for contracting and operational support"
),
)
self.assertEqual(
force_bytes(inst.rest[0].compartment[0]),
force_bytes("http://hl7.org/fhir/CompartmentDefinition/patient"),
)
self.assertEqual(
force_bytes(inst.rest[0].documentation),
force_bytes("Main FHIR endpoint for acem health"),
)
self.assertEqual(
force_bytes(inst.rest[0].interaction[0].code), force_bytes("transaction")
)
self.assertEqual(
force_bytes(inst.rest[0].interaction[1].code), force_bytes("history-system")
)
self.assertEqual(force_bytes(inst.rest[0].mode), force_bytes("server"))
self.assertTrue(inst.rest[0].resource[0].conditionalCreate)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].conditionalDelete),
force_bytes("not-supported"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].conditionalRead),
force_bytes("full-support"),
)
self.assertFalse(inst.rest[0].resource[0].conditionalUpdate)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].documentation),
force_bytes("This server does not let the clients create identities."),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].interaction[0].code),
force_bytes("read"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].interaction[1].code),
force_bytes("vread"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].interaction[1].documentation),
force_bytes("Only supported for patient records since 12-Dec 2012"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].interaction[2].code),
force_bytes("update"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].interaction[3].code),
force_bytes("history-instance"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].interaction[4].code),
force_bytes("create"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].interaction[5].code),
force_bytes("history-type"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].profile),
force_bytes(
"http://registry.fhir.org/r4/StructureDefinition/7896271d-57f6-4231-89dc-dcc91eab2416"
),
)
self.assertTrue(inst.rest[0].resource[0].readHistory)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchInclude[0]),
force_bytes("Organization"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[0].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/Patient-identifier"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[0].documentation),
force_bytes("Only supports search by institution MRN"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[0].name),
force_bytes("identifier"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[0].type),
force_bytes("token"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[1].definition),
force_bytes(
"http://hl7.org/fhir/SearchParameter/Patient-general-practitioner"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[1].name),
force_bytes("general-practitioner"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[1].type),
force_bytes("reference"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchRevInclude[0]),
force_bytes("Person"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].supportedProfile[0]),
force_bytes(
"http://registry.fhir.org/r4/StructureDefinition/00ab9e7a-06c7-4f77-9234-4154ca1e3347"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].type), force_bytes("Patient")
)
self.assertFalse(inst.rest[0].resource[0].updateCreate)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].versioning),
force_bytes("versioned-update"),
)
self.assertTrue(inst.rest[0].security.cors)
self.assertEqual(
force_bytes(inst.rest[0].security.description),
force_bytes("See Smart on FHIR documentation"),
)
self.assertEqual(
force_bytes(inst.rest[0].security.service[0].coding[0].code),
force_bytes("SMART-on-FHIR"),
)
self.assertEqual(
force_bytes(inst.rest[0].security.service[0].coding[0].system),
force_bytes(
"http://terminology.hl7.org/CodeSystem/restful-security-service"
),
)
self.assertEqual(force_bytes(inst.software.name), force_bytes("EHR"))
self.assertEqual(inst.software.releaseDate.date, FHIRDate("2012-01-04").date)
self.assertEqual(inst.software.releaseDate.as_json(), "2012-01-04")
self.assertEqual(
force_bytes(inst.software.version), force_bytes("0.00.020.2134")
)
self.assertEqual(force_bytes(inst.status), force_bytes("draft"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.title), force_bytes("ACME EHR capability statement")
)
self.assertEqual(
force_bytes(inst.url),
force_bytes("urn:uuid:68D043B5-9ECF-4559-A57A-396E0D452311"),
)
self.assertEqual(
force_bytes(inst.useContext[0].code.code), force_bytes("focus")
)
self.assertEqual(
force_bytes(inst.useContext[0].code.system),
force_bytes("http://terminology.hl7.org/CodeSystem/usage-context-type"),
)
self.assertEqual(
force_bytes(inst.useContext[0].valueCodeableConcept.coding[0].code),
force_bytes("positive"),
)
self.assertEqual(
force_bytes(inst.useContext[0].valueCodeableConcept.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/variant-state"),
)
self.assertEqual(force_bytes(inst.version), force_bytes("20130510"))
def testCapabilityStatement3(self):
inst = self.instantiate_from("capabilitystatement-measure-processor.json")
self.assertIsNotNone(
inst, "Must have instantiated a CapabilityStatement instance"
)
self.implCapabilityStatement3(inst)
js = inst.as_json()
self.assertEqual("CapabilityStatement", js["resourceType"])
inst2 = capabilitystatement.CapabilityStatement(js)
self.implCapabilityStatement3(inst2)
def implCapabilityStatement3(self, inst):
self.assertEqual(force_bytes(inst.contact[0].name), force_bytes("FHIR Project"))
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].system), force_bytes("other")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].value),
force_bytes("http://hl7.org/fhir"),
)
self.assertEqual(inst.date.date, FHIRDate("2016-09-16").date)
self.assertEqual(inst.date.as_json(), "2016-09-16")
self.assertEqual(
force_bytes(inst.description),
force_bytes(
"Basic conformance statement for a Measure Processor Service. A server can support more functionality than defined here, but this is the minimum amount"
),
)
self.assertEqual(force_bytes(inst.fhirVersion), force_bytes("4.0.1"))
self.assertEqual(force_bytes(inst.format[0]), force_bytes("json"))
self.assertEqual(force_bytes(inst.format[1]), force_bytes("xml"))
self.assertEqual(force_bytes(inst.id), force_bytes("measure-processor"))
self.assertEqual(force_bytes(inst.kind), force_bytes("capability"))
self.assertEqual(
force_bytes(inst.name),
force_bytes("Measure Processor Service Conformance Statement"),
)
self.assertEqual(force_bytes(inst.publisher), force_bytes("HL7, Inc"))
self.assertEqual(
force_bytes(inst.rest[0].documentation),
force_bytes("RESTful Measure Processor Service"),
)
self.assertEqual(force_bytes(inst.rest[0].mode), force_bytes("server"))
self.assertEqual(
force_bytes(inst.rest[0].operation[0].definition),
force_bytes("OperationDefinition/Measure-evaluate-measure"),
)
self.assertEqual(
force_bytes(inst.rest[0].operation[0].name), force_bytes("evaluate-measure")
)
self.assertEqual(
force_bytes(inst.rest[0].operation[1].definition),
force_bytes("OperationDefinition/Measure-data-requirements"),
)
self.assertEqual(
force_bytes(inst.rest[0].operation[1].name),
force_bytes("data-requirements"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].interaction[0].code),
force_bytes("read"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].interaction[0].documentation),
force_bytes(
"Read allows clients to get the logical definitions of the measures"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].interaction[1].code),
force_bytes("search-type"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].interaction[1].documentation),
force_bytes(
"Search allows clients to filter measures based on a provided search parameter"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].profile),
force_bytes("StructureDefinition/Measure"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[0].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/Measure-identifier"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[0].name),
force_bytes("identifier"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[0].type),
force_bytes("token"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[1].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/Measure-status"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[1].name),
force_bytes("status"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[1].type),
force_bytes("token"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[2].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/Measure-version"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[2].name),
force_bytes("version"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[2].type),
force_bytes("string"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].type), force_bytes("Measure")
)
self.assertTrue(inst.rest[0].security.cors)
self.assertEqual(
force_bytes(inst.rest[0].security.service[0].coding[0].code),
force_bytes("Certificates"),
)
self.assertEqual(
force_bytes(inst.rest[0].security.service[0].coding[0].system),
force_bytes(
"http://terminology.hl7.org/CodeSystem/restful-security-service"
),
)
self.assertEqual(
force_bytes(inst.software.name),
force_bytes("ACME Measure Processor Service"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("draft"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.url), force_bytes("http://hl7.org/fhir/measure-processor")
)
def testCapabilityStatement4(self):
inst = self.instantiate_from("capabilitystatement-terminology-server.json")
self.assertIsNotNone(
inst, "Must have instantiated a CapabilityStatement instance"
)
self.implCapabilityStatement4(inst)
js = inst.as_json()
self.assertEqual("CapabilityStatement", js["resourceType"])
inst2 = capabilitystatement.CapabilityStatement(js)
self.implCapabilityStatement4(inst2)
def implCapabilityStatement4(self, inst):
self.assertEqual(force_bytes(inst.contact[0].name), force_bytes("FHIR Project"))
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].system), force_bytes("url")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].value),
force_bytes("http://hl7.org/fhir"),
)
self.assertEqual(inst.date.date, FHIRDate("2015-07-05").date)
self.assertEqual(inst.date.as_json(), "2015-07-05")
self.assertEqual(
force_bytes(inst.description),
force_bytes(
"Basic capability statement for a Terminology Server. A server can support more fucntionality than defined here, but this is the minimum amount"
),
)
self.assertEqual(
force_bytes(inst.extension[0].url),
force_bytes(
"http://hl7.org/fhir/StructureDefinition/capabilitystatement-supported-system"
),
)
self.assertEqual(
force_bytes(inst.extension[0].valueUri), force_bytes("http://loinc.org")
)
self.assertEqual(force_bytes(inst.fhirVersion), force_bytes("4.0.1"))
self.assertEqual(force_bytes(inst.format[0]), force_bytes("json"))
self.assertEqual(force_bytes(inst.format[1]), force_bytes("xml"))
self.assertEqual(force_bytes(inst.id), force_bytes("terminology-server"))
self.assertEqual(force_bytes(inst.kind), force_bytes("capability"))
self.assertEqual(
force_bytes(inst.name),
force_bytes("Terminology Service Capability Statement"),
)
self.assertEqual(force_bytes(inst.publisher), force_bytes("HL7, Inc"))
self.assertEqual(
force_bytes(inst.rest[0].documentation),
force_bytes("RESTful Terminology Server"),
)
self.assertEqual(force_bytes(inst.rest[0].mode), force_bytes("server"))
self.assertEqual(
force_bytes(inst.rest[0].operation[0].definition),
force_bytes("OperationDefinition/ValueSet-expand"),
)
self.assertEqual(
force_bytes(inst.rest[0].operation[0].extension[0].url),
force_bytes(
"http://hl7.org/fhir/StructureDefinition/capabilitystatement-expectation"
),
)
self.assertEqual(
force_bytes(inst.rest[0].operation[0].extension[0].valueCode),
force_bytes("SHALL"),
)
self.assertEqual(
force_bytes(inst.rest[0].operation[0].name), force_bytes("expand")
)
self.assertEqual(
force_bytes(inst.rest[0].operation[1].definition),
force_bytes("OperationDefinition/CodeSystem-lookup"),
)
self.assertEqual(
force_bytes(inst.rest[0].operation[1].extension[0].url),
force_bytes(
"http://hl7.org/fhir/StructureDefinition/capabilitystatement-expectation"
),
)
self.assertEqual(
force_bytes(inst.rest[0].operation[1].extension[0].valueCode),
force_bytes("SHALL"),
)
self.assertEqual(
force_bytes(inst.rest[0].operation[1].name), force_bytes("lookup")
)
self.assertEqual(
force_bytes(inst.rest[0].operation[2].definition),
force_bytes("OperationDefinition/ValueSet-validate-code"),
)
self.assertEqual(
force_bytes(inst.rest[0].operation[2].extension[0].url),
force_bytes(
"http://hl7.org/fhir/StructureDefinition/capabilitystatement-expectation"
),
)
self.assertEqual(
force_bytes(inst.rest[0].operation[2].extension[0].valueCode),
force_bytes("SHALL"),
)
self.assertEqual(
force_bytes(inst.rest[0].operation[2].name), force_bytes("validate-code")
)
self.assertEqual(
force_bytes(inst.rest[0].operation[3].definition),
force_bytes("OperationDefinition/ConceptMap-translate"),
)
self.assertEqual(
force_bytes(inst.rest[0].operation[3].extension[0].url),
force_bytes(
"http://hl7.org/fhir/StructureDefinition/capabilitystatement-expectation"
),
)
self.assertEqual(
force_bytes(inst.rest[0].operation[3].extension[0].valueCode),
force_bytes("SHALL"),
)
self.assertEqual(
force_bytes(inst.rest[0].operation[3].name), force_bytes("translate")
)
self.assertEqual(
force_bytes(inst.rest[0].operation[4].definition),
force_bytes("OperationDefinition/ConceptMap-closure"),
)
self.assertEqual(
force_bytes(inst.rest[0].operation[4].extension[0].url),
force_bytes(
"http://hl7.org/fhir/StructureDefinition/capabilitystatement-expectation"
),
)
self.assertEqual(
force_bytes(inst.rest[0].operation[4].extension[0].valueCode),
force_bytes("SHOULD"),
)
self.assertEqual(
force_bytes(inst.rest[0].operation[4].name), force_bytes("closure")
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].interaction[0].code),
force_bytes("read"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].interaction[0].documentation),
force_bytes(
"Read allows clients to get the logical definitions of the value sets"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].interaction[0].extension[0].url),
force_bytes(
"http://hl7.org/fhir/StructureDefinition/capabilitystatement-expectation"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].interaction[0].extension[0].valueCode),
force_bytes("SHALL"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].interaction[1].code),
force_bytes("search-type"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].interaction[1].documentation),
force_bytes("Search allows clients to find value sets on the server"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].interaction[1].extension[0].url),
force_bytes(
"http://hl7.org/fhir/StructureDefinition/capabilitystatement-expectation"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].interaction[1].extension[0].valueCode),
force_bytes("SHALL"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].profile),
force_bytes("StructureDefinition/ValueSet"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[0].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/ValueSet-date"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[0].name),
force_bytes("date"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[0].type),
force_bytes("date"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[1].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/ValueSet-name"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[1].name),
force_bytes("name"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[1].type),
force_bytes("string"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[2].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/ValueSet-reference"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[2].name),
force_bytes("reference"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[2].type),
force_bytes("token"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[3].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/ValueSet-status"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[3].name),
force_bytes("status"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[3].type),
force_bytes("token"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[4].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/ValueSet-url"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[4].name),
force_bytes("url"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[4].type),
force_bytes("uri"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[5].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/ValueSet-version"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[5].name),
force_bytes("version"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].searchParam[5].type),
force_bytes("token"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[0].type), force_bytes("ValueSet")
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].interaction[0].code),
force_bytes("read"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].interaction[0].documentation),
force_bytes(
"Read allows clients to get the logical definitions of the concept maps"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].interaction[0].extension[0].url),
force_bytes(
"http://hl7.org/fhir/StructureDefinition/capabilitystatement-expectation"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].interaction[0].extension[0].valueCode),
force_bytes("SHALL"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].interaction[1].code),
force_bytes("search-type"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].interaction[1].documentation),
force_bytes("Search allows clients to find concept maps on the server"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].interaction[1].extension[0].url),
force_bytes(
"http://hl7.org/fhir/StructureDefinition/capabilitystatement-expectation"
),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].interaction[1].extension[0].valueCode),
force_bytes("SHALL"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].profile),
force_bytes("StructureDefinition/ConceptMap"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].searchParam[0].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/ConceptMap-date"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].searchParam[0].name),
force_bytes("date"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].searchParam[0].type),
force_bytes("date"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].searchParam[1].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/ConceptMap-name"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].searchParam[1].name),
force_bytes("name"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].searchParam[1].type),
force_bytes("string"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].searchParam[2].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/ConceptMap-status"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].searchParam[2].name),
force_bytes("status"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].searchParam[2].type),
force_bytes("token"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].searchParam[3].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/ConceptMap-source"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].searchParam[3].name),
force_bytes("source"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].searchParam[3].type),
force_bytes("uri"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].searchParam[4].definition),
force_bytes("http://hl7.org/fhir/SearchParameter/ConceptMap-target"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].searchParam[4].name),
force_bytes("target"),
)
self.assertEqual(
force_bytes(inst.rest[0].resource[1].searchParam[4].type),
force_bytes("uri"),
)
| |
<gh_stars>0
from abc import ABC, abstractmethod
from threading import Timer, Lock, Thread
import serial
import io
import logging
import json
import re
import queue
import time
import RPi.GPIO as GPIO
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient
class PhysicalThing(object):
_logger = logging.getLogger(__name__)
def __init__(self, endpoint=None, thingName=None, rootCAPath=None, certificatePath=None, privateKeyPath=None, region=None, device=None, devices=None):
''' Initialize connection to AWS IOT shadow service '''
self._eventQueue = queue.Queue()
self._localShadow = dict() # dictionary of local property values
self._propertyHandlers = dict() # dictionary to set which device handles which property values
self._shadowHandler = self._iotConnect(endpoint, thingName, rootCAPath, certificatePath, privateKeyPath, region)
if device is not None and devices is not None:
self._logger.debug('Arguments for both device and devices have been provided. Normal usage is one or the other')
if device is not None:
self.registerDevice(device)
if devices is not None:
for d in devices:
self.registerDevice(d)
def _iotConnect(self, endpoint, thingName, rootCAPath, certificatePath, privateKeyPath, region):
''' Establish connection to the AWS IOT service '''
# Init AWSIoTMQTTShadowClient
myAWSIoTMQTTShadowClient = None
myAWSIoTMQTTShadowClient = AWSIoTMQTTShadowClient('pyASHdenTV')
myAWSIoTMQTTShadowClient.configureEndpoint(endpoint, 8883)
myAWSIoTMQTTShadowClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath)
# AWSIoTMQTTShadowClient configuration
myAWSIoTMQTTShadowClient.configureAutoReconnectBackoffTime(1, 32, 20)
myAWSIoTMQTTShadowClient.configureConnectDisconnectTimeout(10) # 10 sec
myAWSIoTMQTTShadowClient.configureMQTTOperationTimeout(5) # 5 sec
# Connect to AWS IoT
myAWSIoTMQTTShadowClient.connect()
# Create a deviceShadow with persistent subscription
deviceShadowHandler = myAWSIoTMQTTShadowClient.createShadowHandlerWithName(thingName, True)
# Delete shadow JSON doc
deviceShadowHandler.shadowDelete(self._deleteCallback, 5)
# Listen on deltas
deviceShadowHandler.shadowRegisterDeltaCallback(self._deltaCallback)
return deviceShadowHandler
def registerDevice(self, device):
''' Register a device as the handler for the set of properties that the device implements '''
for property in device.properties:
if property in self._localShadow:
self._logger.warn('{0} is trying to register {1} which is a property that is already in use.'.format(device.__name__, property))
self._localShadow[property] = device.properties[property]
self._propertyHandlers[property] = device
device.start(self._eventQueue)
def _deleteCallback(self, payload, responseStatus, token):
''' Log result when a request to delete the IOT shadow has been made '''
if responseStatus == 'accepted':
self._logger.info("Delete request " + token + " accepted!")
return
self._logger.warn({
'timeout': "Delete request " + token + " time out!",
'rejected': "Delete request " + token + " rejected!"
}.get(responseStatus, "Delete request with token " + token + "contained unexpected response status " + responseStatus))
def _updateCallback(self, payload, responseStatus, token):
''' Log result when a request has been made to update the IOT shadow '''
if responseStatus == 'accepted':
payloadDict = json.loads(payload)
self._logger.info("Received delta request: " + json.dumps(payloadDict))
return
self._logger.warn({
'timeout': "Update request " + token + " timed out!",
'rejected': "Update request " + token + " was rejected!"
}.get(reponseStatus, "Update request " + token + " contained unexpected response status " + responseStatus))
def _deltaCallback(self, payload, responseStatus, token):
''' Receive an delta message from IOT service and forward update requests for every included property to the event queue '''
print ('Delta message received with content: {0}'.format(payload))
payloadDict = json.loads(payload)
for property in payloadDict['state']:
self._logger.info('Delta Message: processing item [{0}][{1}]'.format(property, payloadDict['state'][property]))
self._eventQueue.put({'source': '__thing__', 'action': 'UPDATE', 'property': property, 'value': payloadDict['state'][property] })
def onChange(self, updatedProperties):
return None
def start(self):
self._main()
def _main(self):
while True:
messages = [ self._eventQueue.get() ]
self._eventQueue.task_done()
''' A new message has come in but it may be a batch of updates so wait for a short time and then read all pending messages '''
time.sleep(0.1)
try:
while True:
messages.append( self._eventQueue.get_nowait())
self._eventQueue.task_done()
except queue.Empty:
pass
''' Process all received messages '''
updatedProperties = dict()
for message in messages:
if message['action'] == 'EXIT':
''' If an EXIT message is received then stop processing messages and exit the main thing loop '''
return
if message['action'] == 'UPDATE':
if message['source'] == '__thing__':
''' Update is from IOT service. Determine which device supports the updated property and send an update request to it '''
self._propertyHandlers[message['property']].updateDevice(message['property'], message['value'])
else:
''' Update is from device. Add it to updatedProperties '''
updatedProperties[message['property']] = message['value']
localPropertyChanges = self.onChange(updatedProperties)
if localPropertyChanges:
for k, v in localPropertyChanges:
self._propertyHandlers[k].updateDevice(k,v)
''' If there are properties to report to the IOT service, send an update message '''
updateNeeded = False
payloadDict = { 'state': { 'reported': {}, 'desired': {} } }
for property, value in updatedProperties.items():
if self._localShadow[property] != value:
print ('IOT UPDATED: [{0}:{1}]'.format(property, value))
updateNeeded = True
payloadDict['state']['reported'] = updatedProperties
payloadDict['state']['desired'] = updatedProperties
if updateNeeded:
self._shadowHandler.shadowUpdate(json.dumps(payloadDict), self._updateCallback, 5)
class PhysicalDevice(ABC):
''' Device that makes up part of an IOT thing '''
_logger = logging.getLogger(__name__)
def __init__(self, name = None, stream = None, properties = None, eol='\n', timeout=5, synchronous=False):
''' Initialize device driver and set it to receive updates from the eventQueue '''
self._stream = stream
self._eol = eol
self._timeout = timeout
self._synchronous = synchronous
self.properties = properties # dictionary of the properties and starting values for device
self.__name__ = name if name is not None else self.__class__.__name__
self._deviceQueue = queue.Queue()
self.readlock = Lock()
self._waitFor = None # Are we waiting for a specific value from the device
self._exit = False # Set when a request has been made to exit the device driver
def __del__(self):
self.close()
def start(self, eventQueue):
self._eventQueue = eventQueue
# Starting event loops
_threadWrite = Thread(target=self._writeLoop)
_threadWrite.start()
# If device is asynchronous, start an independent read thread
if not self._synchronous:
_threadRead = Thread(target=self._readLoop)
_threadRead.start()
def updateDevice(self, property, value):
''' Send message to device to tell it to update one of its property values '''
self._deviceQueue.put({'source': '__thing__', 'action': 'UPDATE', 'property': property, 'value': value })
def updateThing(self, property, value):
''' Send message to thing telling it to update its thing shadow to reflect the device's reported state '''
self._eventQueue.put({'source': self.__name__, 'action': 'UPDATE', 'property': property, 'value': value })
# update local property value
self.properties[property] = value
def exit(self):
''' Shut down device driver '''
self._exit = True
self._deviceQueue.put({'action': 'EXIT'})
@classmethod
def deviceToProperty(cls, property, regex):
def decorateinterface(func):
transform = getattr(func, '__deviceToProperty__', {})
cre = re.compile(regex)
transform[cre] = (property, func)
func.__deviceToProperty__ = transform
return func
return decorateinterface
@classmethod
def propertyToDevice(cls, property, cmd):
def decorateinterface(func):
transform = getattr(func, '__propertyToDevice__', {})
transform[property] = (cmd, func)
func.__propertyToDevice__ = transform
return func
return decorateinterface
@classmethod
def _deviceToProperty(cls, value):
for supercls in cls.__mro__: # This makes inherited Appliances work
for method in supercls.__dict__.values():
d2pList = getattr(method, '__deviceToProperty__', {})
for cre, (property, method) in d2pList.items():
match = cre.match(value)
if match:
return (property, method, match)
return None
@classmethod
def _propertyToDevice(cls, property):
for supercls in cls.__mro__: # This makes inherited Appliances work
for method in supercls.__dict__.values():
p2dList = getattr(method, '__propertyToDevice__', {})
if p2dList and property in p2dList:
return p2dList.get(property)
def _readLoop(self):
''' Main event loop for reading from device '''
print ('Starting {0} readLoop'.format(self.__name__))
while not self._exit:
val = self.read()
if val:
#print ('{0}:[{1}]'.format(self.__name__, val.replace('\r','\\r')))
self._processDeviceResponse(val)
def _processDeviceResponse(self, val):
ret = self._deviceToProperty(val) # Retrieve appropriate handler to translate device value into property value
if ret:
(property, method, match) = ret
if type(property) is not list: property = [ property ]
for i in range(len(property)):
# Extract out each match group and send to method to get it translated from the value from the device to the property value
mval = match.group(i+1)
xval = method(self, property[i], mval)
if self.properties[property[i]] != xval:
# Send updated property to Thing
self.updateThing(property[i], xval)
# else:
# print ('{0}:[{1}] Ignored'.format(self.__name__, val.replace('\r','\\r')))
def _writeLoop(self):
''' Main event loop for writing to device '''
print ('Starting {0} writeLoop'.format(self.__name__))
while not self._exit:
try:
# Wait for ready state to be reached
while not self.ready():
print ('{0} Sleeping ...'.format(self.__name__))
time.sleep(5)
raise queue.Empty
message = self._deviceQueue.get(block=True, timeout=5)
self._deviceQueue.task_done()
if message['action'].upper() == 'EXIT':
return
elif message['action'].upper() == 'UPDATE':
print ('IOT requests [{0}:{1}]'.format(message['property'], message['value']))
ret = self._propertyToDevice(message['property'])
if ret:
(cmd, method) = ret
# Send updated property to device
val = self.write(cmd.format(method(self,message['value'])))
# If device is synchronous, it likely returned a response from the command we just sent
if val:
# If so, process it
self._processDeviceResponse(val)
else:
self._logger.warn('{0} has no property that matches {1}'.format(self.__name__,message['property']))
except queue.Empty:
# If nothing waiting to be written or the device is not ready, send a query to get current device status
qs = self.queryStatus()
if qs:
# Get the query to send. If the query is a list, process each query individually
qs = qs if type(qs) is list else [ qs ]
for q in qs:
val = self.write(q)
| |
import os
import unittest
import pikepdf
import pdf_preflight.rules as rules
import pdf_preflight.profiles as profiles
pdf_folder = os.path.join(os.path.dirname(__file__), "pdfs")
class TestPdfPreflight(unittest.TestCase):
def test_profile__pdfa1a(self):
filename = os.path.join(pdf_folder, "pdfa-1a.pdf")
self.assertEqual(None, profiles.Pdfa1a.check_preflight(filename))
filename = os.path.join(pdf_folder, "standard_14_font.pdf")
with self.assertRaisesRegex(Exception, "^PDF failed Preflight checks.*") as cm:
profiles.Pdfa1a.check_preflight(filename)
expected_exception = ("PDF failed Preflight checks with the following Issues & exceptions:\n"
"ISSUES:\n"
"Rule 'OnlyEmbeddedFonts' found an error on page 1: "
"All fonts must be embedded; found non-embedded font.\n")
self.assertTrue(str(cm.exception).startswith(expected_exception))
def test_profile__pdfx1a(self):
filename = os.path.join(pdf_folder, "pdfx-1a.pdf")
self.assertEqual(None, profiles.Pdfx1a.check_preflight(filename))
filename = os.path.join(pdf_folder, "fails_pdfx.pdf")
with self.assertRaisesRegex(Exception, "^PDF failed Preflight checks.*") as cm:
profiles.Pdfx1a.check_preflight(filename)
expected_exception = ("PDF failed Preflight checks with the following Issues & exceptions:\n"
"ISSUES:\n"
"Rule 'InfoHasKeys' found an error in document metadata: "
"Info dict missing required key '/ModDate'\n"
"Rule 'InfoHasKeys' found an error in document metadata: "
"Info dict missing required key '/Title'\n"
"Rule 'InfoSpecifiesTrapping' found an error in document metadata: "
"Info dict missing required key '/Trapped'.\n"
"Rule 'MatchInfoEntries' found an error in document metadata: "
"Info dict missing required key '/GTS_PDFXConformance'\n"
"Rule 'MatchInfoEntries' found an error in document metadata: "
"Info dict missing required key '/GTS_PDFXVersion'\n"
"Rule 'MaxVersion' found an error in document metadata: "
"PDF version should be 1.3 or lower.\n"
"Rule 'NoRgb' found an error on page 1-100: "
"Found RGB colorspace; RGB objects are prohibited.\n"
"Rule 'NoTransparency' found an error on page 1-100: "
"Found object with transparency; transparent objects are prohibited.\n"
"Rule 'OutputIntentForPdfx' found an error in document metadata: "
"OutputIntent with subtype '/GTS_PDFX' is required but was not found.\n"
"Rule 'PdfxOutputIntentHasKeys' found an error in document metadata: "
"GTS_PDFX OutputIntent not found, assumed to be missing all required keys "
"'['/OutputConditionIdentifier', '/Info']'.\n"
"Rule 'PrintBoxes' found an error on page 1-100: "
"ArtBox or TrimBox is required, but neither was found; TrimBox is preferred.\n"
"Rule 'RootHasKeys' found an error in document metadata: "
"Root dict missing required key '/OutputIntents'\n")
self.assertTrue(str(cm.exception).startswith(expected_exception))
def test_profile__pdfx1a2003(self):
filename = os.path.join(pdf_folder, "pdfx-1a-2003.pdf")
self.assertEqual(None, profiles.Pdfx1a2003.check_preflight(filename))
filename = os.path.join(pdf_folder, "fails_pdfx.pdf")
with self.assertRaisesRegex(Exception, "^PDF failed Preflight checks.*") as cm:
profiles.Pdfx1a2003.check_preflight(filename)
expected_exception = ("PDF failed Preflight checks with the following Issues & exceptions:\n"
"ISSUES:\n"
"Rule 'InfoHasKeys' found an error in document metadata: "
"Info dict missing required key '/ModDate'\n"
"Rule 'InfoHasKeys' found an error in document metadata: "
"Info dict missing required key '/Title'\n"
"Rule 'InfoSpecifiesTrapping' found an error in document metadata: "
"Info dict missing required key '/Trapped'.\n"
"Rule 'MatchInfoEntries' found an error in document metadata: "
"Info dict missing required key '/GTS_PDFXVersion'\n"
"Rule 'MaxVersion' found an error in document metadata: "
"PDF version should be 1.4 or lower.\n"
"Rule 'NoRgb' found an error on page 1-100: "
"Found RGB colorspace; RGB objects are prohibited.\n"
"Rule 'NoTransparency' found an error on page 1-100: "
"Found object with transparency; transparent objects are prohibited.\n"
"Rule 'OutputIntentForPdfx' found an error in document metadata: "
"OutputIntent with subtype '/GTS_PDFX' is required but was not found.\n"
"Rule 'PdfxOutputIntentHasKeys' found an error in document metadata: "
"GTS_PDFX OutputIntent not found, assumed to be missing all required keys "
"'['/OutputConditionIdentifier', '/Info']'.\n"
"Rule 'PrintBoxes' found an error on page 1-100: "
"ArtBox or TrimBox is required, but neither was found; TrimBox is preferred.\n"
"Rule 'RootHasKeys' found an error in document metadata: "
"Root dict missing required key '/OutputIntents'\n")
self.assertTrue(str(cm.exception).startswith(expected_exception))
######################################################################
def test_rule__box_nesting(self):
filename = os.path.join(pdf_folder, "72ppi.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.BoxNesting.check(pdf)
self.assertEqual(None, issues)
filename = os.path.join(pdf_folder, "bleedbox_larger_than_mediabox.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.BoxNesting.check(pdf)
issue = issues[0]
self.assertEqual(1, issue.page)
self.assertEqual("BoxNesting", issue.rule)
self.assertEqual("BleedBox must be smaller than MediaBox", issue.desc)
def test_rule__compression_algorithms(self):
filename = os.path.join(pdf_folder, "jbig2.pdf")
with pikepdf.open(filename) as pdf:
allowed_algorithms = ["/FlateDecode", "/JBIG2Decode"]
issues = rules.CompressionAlgorithms.check(pdf, allowed_algorithms)
self.assertEqual(None, issues)
filename = os.path.join(pdf_folder, "jbig2.pdf")
with pikepdf.open(filename) as pdf:
allowed_algorithms = ["/FlateDecode"]
issues = rules.CompressionAlgorithms.check(pdf, allowed_algorithms)
issue = issues[0]
self.assertEqual("Metadata", issue.page)
self.assertEqual("CompressionAlgorithms", issue.rule)
self.assertEqual("File uses unwanted compression algorithm: '/JBIG2Decode'", issue.desc)
def test_rule__document_id(self):
filename = os.path.join(pdf_folder, "pdfx-1a-subsetting.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.DocumentId.check(pdf)
self.assertEqual(None, issues)
filename = os.path.join(pdf_folder, "no_document_id.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.DocumentId.check(pdf)
issue = issues[0]
self.assertEqual("Metadata", issue.page)
self.assertEqual("DocumentId", issue.rule)
self.assertEqual("Document ID missing.", issue.desc)
def test_rule__info_has_keys(self):
filename = os.path.join(pdf_folder, "72ppi.pdf")
with pikepdf.open(filename) as pdf:
entries = ["/Creator", "/Producer"]
issues = rules.InfoHasKeys.check(pdf, entries)
self.assertEqual(None, issues)
filename = os.path.join(pdf_folder, "72ppi.pdf")
with pikepdf.open(filename) as pdf:
entries = ["/GTS_PDFXVersion"]
issues = rules.InfoHasKeys.check(pdf, entries)
issue = issues[0]
self.assertEqual("Metadata", issue.page)
self.assertEqual("InfoHasKeys", issue.rule)
self.assertEqual("Info dict missing required key '/GTS_PDFXVersion'", issue.desc)
def test_rule__info_specifies_trapping(self):
filename = os.path.join(pdf_folder, "trapped_false.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.InfoSpecifiesTrapping.check(pdf)
self.assertEqual(None, issues)
filename = os.path.join(pdf_folder, "trapped_true.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.InfoSpecifiesTrapping.check(pdf)
self.assertEqual(None, issues)
filename = os.path.join(pdf_folder, "trapped_broken.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.InfoSpecifiesTrapping.check(pdf)
issue = issues[0]
self.assertEqual("Metadata", issue.page)
self.assertEqual("InfoSpecifiesTrapping", issue.rule)
self.assertEqual("Value of Info entry '/Trapped' must be True or False.", issue.desc)
filename = os.path.join(pdf_folder, "72ppi.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.InfoSpecifiesTrapping.check(pdf)
issue = issues[0]
self.assertEqual("Metadata", issue.page)
self.assertEqual("InfoSpecifiesTrapping", issue.rule)
self.assertEqual("Info dict missing required key '/Trapped'.", issue.desc)
def test_rule__match_info_entries(self):
filename = os.path.join(pdf_folder, "72ppi.pdf")
with pikepdf.open(filename) as pdf:
entries = {"/Creator": r"Prawn"}
issues = rules.MatchInfoEntries.check(pdf, entries)
self.assertEqual(None, issues)
filename = os.path.join(pdf_folder, "72ppi.pdf")
with pikepdf.open(filename) as pdf:
entries = {"/GTS_PDFXVersion": "^PDF/X.*"}
issues = rules.MatchInfoEntries.check(pdf, entries)
issue = issues[0]
self.assertEqual("Metadata", issue.page)
self.assertEqual("MatchInfoEntries", issue.rule)
self.assertEqual("Info dict missing required key '/GTS_PDFXVersion'", issue.desc)
filename = os.path.join(pdf_folder, "72ppi.pdf")
with pikepdf.open(filename) as pdf:
entries = {"/Creator": r"Shrimp"}
issues = rules.MatchInfoEntries.check(pdf, entries)
issue = issues[0]
self.assertEqual("Metadata", issue.page)
self.assertEqual("MatchInfoEntries", issue.rule)
self.assertEqual("Value of Info entry '/Creator' doesn't match regex 'Shrimp'", issue.desc)
def test_rule__max_version(self):
filename = os.path.join(pdf_folder, "version_1_3.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.MaxVersion.check(pdf, "1.3")
self.assertEqual(None, issues)
filename = os.path.join(pdf_folder, "version_1_3.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.MaxVersion.check(pdf, "1.4")
self.assertEqual(None, issues)
filename = os.path.join(pdf_folder, "version_1_4.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.MaxVersion.check(pdf, "1.4")
self.assertEqual(None, issues)
filename = os.path.join(pdf_folder, "version_1_4.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.MaxVersion.check(pdf, "1.3")
issue = issues[0]
self.assertEqual("Metadata", issue.page)
self.assertEqual("MaxVersion", issue.rule)
self.assertEqual("PDF version should be 1.3 or lower.", issue.desc)
def test_rule__no_filespecs(self):
filename = os.path.join(pdf_folder, "rgb.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.NoFilespecs.check(pdf)
self.assertEqual(None, issues)
filename = os.path.join(pdf_folder, "filespec_to_external_file.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.NoFilespecs.check(pdf)
issue = issues[0]
self.assertEqual("Metadata", issue.page)
self.assertEqual("NoFilespecs", issue.rule)
self.assertEqual("Found one or more filespecs; use of filespecs to reference external files is prohibited.",
issue.desc)
def test_rule__no_rgb(self):
filename = os.path.join(pdf_folder, "gray.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.NoRgb.check(pdf)
self.assertEqual(None, issues)
filename = os.path.join(pdf_folder, "rgb.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.NoRgb.check(pdf)
issue = issues[0]
self.assertEqual(1, issue.page)
self.assertEqual("NoRgb", issue.rule)
self.assertEqual("Found RGB colorspace; RGB objects are prohibited.",
issue.desc)
def test_rule__no_transparency(self):
filename = os.path.join(pdf_folder, "gray.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.NoTransparency.check(pdf)
self.assertEqual(None, issues)
filename = os.path.join(pdf_folder, "transparency.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.NoTransparency.check(pdf)
issue = issues[0]
self.assertEqual(1, issue.page)
self.assertEqual("NoTransparency", issue.rule)
self.assertEqual("Found object with transparency; transparent objects are prohibited.",
issue.desc)
def test_rule__only_embedded_fonts(self):
# pass a file with embedded fonts that don't have subsets
filename = os.path.join(pdf_folder, "pdfx-1a-no-subsetting.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.OnlyEmbeddedFonts.check(pdf)
self.assertEqual(None, issues)
# pass a file with embedded fonts that do have subsets
filename = os.path.join(pdf_folder, "pdfx-1a-subsetting.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.OnlyEmbeddedFonts.check(pdf)
self.assertEqual(None, issues)
# fail a file with a standard font that's not embedded
filename = os.path.join(pdf_folder, "standard_14_font.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.OnlyEmbeddedFonts.check(pdf)
issue = issues[0]
self.assertEqual(1, issue.page)
self.assertEqual("OnlyEmbeddedFonts", issue.rule)
self.assertEqual("All fonts must be embedded; found non-embedded font.", issue.desc)
def test_rule__output_intent_for_pdfx(self):
filename = os.path.join(pdf_folder, "pdfx-1a-subsetting.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.OutputIntentForPdfx.check(pdf)
self.assertEqual(None, issues)
filename = os.path.join(pdf_folder, "two_outputintents.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.OutputIntentForPdfx.check(pdf)
issue = issues[0]
self.assertEqual("Metadata", issue.page)
self.assertEqual("OutputIntentForPdfx", issue.rule)
self.assertEqual("Exactly one OutputIntent with subtype '/GTS_PDFX' is required; found multiple.",
issue.desc)
filename = os.path.join(pdf_folder, "version_1_4.pdf")
with pikepdf.open(filename) as pdf:
issues = rules.OutputIntentForPdfx.check(pdf)
issue = issues[0]
self.assertEqual("Metadata", issue.page)
self.assertEqual("OutputIntentForPdfx", issue.rule)
self.assertEqual("OutputIntent with subtype '/GTS_PDFX' is required but was not found.", issue.desc)
def test_rule__pdfx_output_intent_has_keys(self):
filename = os.path.join(pdf_folder, "pdfx-1a-subsetting.pdf")
with pikepdf.open(filename) as pdf:
entries = ["/OutputConditionIdentifier", "/Info"]
issues = rules.PdfxOutputIntentHasKeys.check(pdf, entries)
self.assertEqual(None, issues)
filename = os.path.join(pdf_folder, "pdfx-1a-subsetting.pdf")
with pikepdf.open(filename) as pdf:
entries = ["/Cheese"]
issues = rules.PdfxOutputIntentHasKeys.check(pdf, entries)
issue = issues[0]
self.assertEqual("Metadata", issue.page)
self.assertEqual("PdfxOutputIntentHasKeys", issue.rule)
self.assertEqual("GTS_PDFX OutputIntent missing required key '/Cheese'.",
issue.desc)
filename = | |
plot-metrics can be specified as keyboard argument 'plot_metrics'. It's value can be:
- Empty: default value used is 'price'
- plot_metrics = a String 'method' corresponding to a valid '.method()' implemented by self.fin_inst object
- if plot_metrics == 'implied_volatility', method .plot_iv() is called (implemented only for options, not
portfolios)
- plot-details can be specified as keyboard argument 'plot_details'. It's value can be:
- Empty: default value used is False
- plot_details = True or False
If True, we distinguish between the single-option (a) and portfolio (b) cases:
a) Single-option case: upper and lower price boundaries are shown if .plot_single_time() method is called.
b) Portfolio case: constituent instruments' details are shown if .plot_single_time() method is called.
- surf-plot can be specified as keyboard argument 'surf_plot'. It's value can be:
- Empty: default value used is False
- surf_plot = True or False
If True, .plot_surf() is called in case of Iterable time-parameter,
otherwise .plot_multi_time() is called.
- view can be specified as keyboard argument 'view'. It's value can be:
- Empty: default value used is (30, -60)
- surf_plot = Tuple of two numbers
It represent the pair of (elevation angle, azimutal angle) of the plot view
in case .plot_surf() is called.
"""
# argument parsing and plot setup
plot_metrics = self.parse_plot_metrics(**kwargs)
if plot_metrics == "implied_volatility":
imp_vol = kwargs["IV"]
time_parameter = self.time_parameter_label(imp_vol.index)
self.plot_iv(imp_vol, time_parameter)
else:
x_axis = self.x_axis(*args, **kwargs)
time_parameter, time_label_parameter = self.time_parameter(*args, **kwargs)
surf_plot = self.parse_surf_plot(**kwargs)
if is_iterable_not_string(time_parameter) and not surf_plot:
self.plot_multi_time(x_axis, time_parameter, time_label_parameter, plot_metrics)
elif is_iterable_not_string(time_parameter):
plot_view = self.parse_surf_plot_view(**kwargs)
self.plot_surf(x_axis, time_parameter, time_label_parameter, plot_metrics, plot_view)
else:
plot_details = self.parse_plot_details(**kwargs)
self.plot_single_time(x_axis, time_parameter, time_label_parameter, plot_metrics, plot_details)
# -----------------------------------------------------------------------------#
class OptionPlotter(Plotter):
"""
Plotter class to plot the price/P&L of single options. Inherits from Plotter base-class.
It implements a composition with an underlying `PlainVanillaOption` or `DigitalOption` object to access
option-specific attributes.
Attributes:
-----------
public attributes inherited from Plotter class
Public Methods:
--------
public methods inherited from Plotter class
plot_iv:
Plot FinancialInstrument Black-Scholes implied-volatility as
multiple dates line plots and as surface plot.
plot_surf:
Plot FinancialInstrument values as a surface of underlying value(s) and multiple dates.
plot_multi_time:
Plot FinancialInstrument values against underlying value(s), possibly at multiple dates.
plot_single_time:
Plot FinancialInstrument values against underlying value(s) at fixed date.
Instantiation and Usage examples:
--------
- options_plot.py
- options_plot_other_params.py
- options_plot_IV.py
- options_plot_surface.py
"""
def __init__(self, *args, **kwargs):
# calling the Plotter initializer
super(OptionPlotter, self).__init__(*args, **kwargs)
def plot_iv(self, iv, time_labels):
"""
Plot FinancialInstrument Black-Scholes implied-volatility as multiple
dates line plots and as surface plot.
Parameter 'iv' is required to be a pd.DataFrame.
Usage examples:
- options_plot_IV.py
"""
plt.rcParams["axes.prop_cycle"] = plt.cycler("color", plt.cm.Blues(np.linspace(0, 1, len(iv.index))))
#
# Line plots
#
ax = iv.T.plot(figsize=(10, 6), colormap="Blues")
# set axis labels
ax.set_xlabel(iv.columns.name, fontsize=12)
ax.set_ylabel('Black-Scholes Implied Volatility', fontsize=12)
# set title
ax.set_title("Implied volatility of a " + self.get_title(), fontsize=12)
# add the legend ('best' loc parameters places the legend in the best position automatically)
ax.legend(datetime_obj_to_date_string(iv.index), loc='best', ncol=1)
#
# Surf plot
#
# define the figure
fig = plt.figure(figsize=(15, 10))
ax = fig.gca(projection='3d')
# grid points, if needed convert dates to numeric representation for plotting
times_numeric = self.fin_inst.time_to_maturity(t=iv.index) # date_to_number(iv.index)
K_grid, time_grid = np.meshgrid(iv.columns, times_numeric)
# surface plot
surf = ax.plot_surface(iv.columns,
time_grid,
iv.values.astype('float64'), rstride=2, cstride=2,
cmap=plt.cm.Blues, linewidth=0.5, antialiased=True, zorder=1)
# plot the price for different underlying values, one line for each different date
plt.gca().set_prop_cycle(None)
i = 0
for iv_at_t in iv.itertuples():
t = self.fin_inst.time_to_maturity(t=iv_at_t.Index)
ax.plot(iv.columns, np.repeat(t, repeats=len(iv.columns)), iv_at_t[1:], '-', lw=1.5,
label=datetime_obj_to_date_string(iv_at_t.Index), zorder=2 + i)
i += 1
# set y ticks
ax.set_yticks(times_numeric)
ax.set_yticklabels(time_labels)
# set axis labels
ax.set_xlabel(iv.columns.name, fontsize=12)
ax.set_ylabel(r"Date" if is_date(iv.index) else r"Time-to-Maturity", fontsize=12)
ax.set_zlabel('Black-Scholes Implied Volatility', fontsize=12)
# set title
ax.set_title("Implied volatility of a " + self.get_title(), fontsize=12)
# add the legend ('best' loc parameters places the legend in the best position automatically)
ax.legend(loc='best', ncol=1)
# add a grid to ease visualization
plt.grid(True)
# draw a colorbar for color-reference
fig.colorbar(surf, orientation="horizontal", shrink=0.5, aspect=10, pad=0.05)
# show the plot
fig.tight_layout()
plt.show()
def plot_surf(self, x_axis_dict, times, time_labels, plot_metrics, view):
"""
Plot FinancialInstrument values as a surface of underlying value(s) and multiple dates.
Usage examples:
- options_plot_surface.py
"""
# identifier of the x-axis
x_id = x_axis_dict.pop('x_axis', 'x-id not found')
# other x-axis parameters
sigma_axis = x_axis_dict.pop('sigma_axis', False)
r_axis = x_axis_dict.pop('r_axis', False)
# x-axis values
x = x_axis_dict[x_id]
# number of times-to-maturity considered
n_times = len(times)
plt.rcParams["axes.prop_cycle"] = plt.cycler("color", plt.cm.Blues(np.linspace(0, 1, n_times)))
# define the figure
fig = plt.figure(figsize=(15, 10))
ax = fig.gca(projection='3d')
# define a dense grid of times
# in case of dates: from the most recent valuation date to expiration date
# in case of times-to-maturity: from the biggest tau to 0 (that is, expiration)
times_dense = self.make_dense(times)
n_times_dense = len(times_dense)
# if times are dates, we convert into their numeric representation. This is needed for plotting
times_numeric = date_to_number(times)
times_dense_numeric = date_to_number(times_dense)
# precompute surface (exploiting vectorization)
surface_metrics = getattr(self.fin_inst, plot_metrics)(
**{x_id: x, 't': times_dense, 'sigma_axis': sigma_axis, 'r_axis': r_axis}, np_output=False)
# grid points, if needed convert dates to numeric representation for plotting
x_axis_grid, time_grid = np.meshgrid(surface_metrics.columns, times_dense_numeric)
# surface plot
surf = ax.plot_surface(x_axis_grid, time_grid, surface_metrics.values.astype('float64'), rstride=2, cstride=2,
cmap=plt.cm.Blues, linewidth=0.5, antialiased=True, zorder=1)
# plot the price for different underlying values, one line for each different date
plt.gca().set_prop_cycle(None)
for i in range(n_times):
ax.plot(x, np.repeat(times_numeric[i], repeats=len(x)), surface_metrics.loc[times[i], :], '-', lw=1.5,
label=time_labels[i], zorder=1 + i + 1)
# precompute emission level metrics (exploiting vectorization)
if x_id == 'S':
x_emission = self.fin_inst.get_S()
elif x_id == 'K':
x_emission = self.fin_inst.get_K()
elif x_id == 'sigma':
x_emission = self.fin_inst.get_sigma()
elif x_id == 'r':
x_emission = self.fin_inst.get_r()
emission_metrics = getattr(self.fin_inst, plot_metrics)(**{x_id: x_emission, 't': times})
emission_metrics_dense = getattr(self.fin_inst, plot_metrics)(**{x_id: x_emission, 't': times_dense})
# blue dot at original underlying level for reference
ax.plot(x_emission + np.zeros(n_times), times_numeric, emission_metrics, 'b.', ms=10,
label=r"Emission level $" + x_id + r"={:.2f}$".format(x_emission), zorder=1 + i + 2)
ax.plot(x_emission + np.zeros(n_times_dense), times_dense_numeric, emission_metrics_dense, 'b--', lw=1.5,
zorder=1 + i + 2)
# part meaningful only if the x-axis is 'S' or 'K'
if x_id in ['S', 'K']:
# plot the red payoff line for different underlying values
if plot_metrics in ['price', 'PnL']:
ax.plot(x, np.repeat(times_dense_numeric[-1], repeats=len(x)), getattr(self.fin_inst, plot_metrics)(
**{x_id: x, 'tau': 0.0, 'sigma_axis': sigma_axis, 'r_axis': r_axis}), 'r-', lw=1.5,
label=plot_metrics + r" at maturity (" + self.fin_inst.get_docstring('payoff') + r")",
zorder=1 + i + 3)
# plot a dot to highlight the strike position and a reference zero line
ax.plot(np.array([self.fin_inst.get_K()]), np.array([times_dense_numeric[-1]]), np.array([0.0]), 'k.',
ms=15,
label="Strike $K={}$".format(self.fin_inst.get_K()), zorder=1 + i + 4)
ax.plot(self.fin_inst.get_K() + np.zeros(n_times_dense), times_dense_numeric, np.zeros_like(times_dense),
'k--', lw=1.5, zorder=1 + i + 5)
# include expiration time tick
times_numeric, time_labels = self.add_time_tick_and_label(time_parameter=times,
old_time_ticks=times_numeric,
old_time_ticks_label=time_labels)
# set y ticks
ax.set_yticks(times_numeric)
ax.set_yticklabels(time_labels)
# set axis labels
ax.set_xlabel(x_id, fontsize=12)
ax.set_ylabel(r"Date" if is_date(times) else r"Time-to-Maturity", fontsize=12)
ax.set_zlabel('Black-Scholes {}'.format(plot_metrics), fontsize=12)
# set title
ax.set_title(plot_metrics + " of a " + self.get_title(), fontsize=12)
# add the legend ('best' loc parameters places the legend in the best position automatically)
ax.legend(loc='best', ncol=1)
# add a grid to ease visualization
plt.grid(True)
# draw a colorbar for color-reference
fig.colorbar(surf, orientation="horizontal", shrink=0.5, aspect=10, pad=0.05)
# set the plot view
ax.view_init(view[0], view[1])
# rotate view and invert y axis in case of dates
# for better perspective
if is_date(times):
ax.view_init(ax.elev, ax.azim + 180)
ax.invert_xaxis()
# show the plot
fig.tight_layout()
plt.show()
def plot_multi_time(self, x_axis_dict, times, time_labels, plot_metrics):
"""
Plot FinancialInstrument values against underlying value(s), possibly at multiple dates.
Usage examples:
- options_plot.py
- options_plot_other_params.py
"""
# identifier of the x-axis
x_id = x_axis_dict.pop('x_axis', 'x-id not found')
# other x-axis parameters
sigma_axis = x_axis_dict.pop('sigma_axis', False)
r_axis = x_axis_dict.pop('r_axis', False)
# x-axis values
x = x_axis_dict[x_id]
# number of times-to-maturity considered
n_times = len(times)
plt.rcParams["axes.prop_cycle"] = plt.cycler("color", plt.cm.Blues(np.linspace(0, 1, n_times)))
# define the figure
fig, ax = plt.subplots(figsize=(10, 6))
# precompute surface | |
<filename>volttron/platform/vip/agent/subsystems/configstore.py
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2017, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import logging
import traceback
import os
import weakref
import fnmatch
import greenlet
import inspect
from .base import SubsystemBase
from volttron.platform.storeutils import list_unique_links, check_for_config_link
from volttron.platform.vip.agent import errors
from volttron.platform.agent.known_identities import CONFIGURATION_STORE
from collections import defaultdict
from copy import deepcopy
"""The configstore subsystem manages the agent side of the configuration store.
It is responsible for processing change notifications from the platform
and triggering the correct callbacks with the contents of a configuration.
"""
__docformat__ = 'reStructuredText'
__version__ = '1.0'
_log = logging.getLogger(__name__)
VALID_ACTIONS = set(["NEW", "UPDATE", "DELETE"])
class ConfigStore(SubsystemBase):
def __init__(self, owner, core, rpc):
self._core = weakref.ref(core)
self._rpc = weakref.ref(rpc)
self._ref_map = {} #For triggering callbacks.
self._reverse_ref_map = defaultdict(set) # For triggering callbacks.
self._store = {}
self._default_store = {}
self._callbacks = {}
self._name_map = {}
self._default_name_map = {}
self._initialized = False
self._initial_callbacks_called = False
self._process_callbacks_code_object = self._process_callbacks.__code__
def sub_factory():
return defaultdict(set)
self._subscriptions = defaultdict(sub_factory)
def onsetup(sender, **kwargs):
rpc.export(self._update_config, 'config.update')
rpc.export(self._initial_update, 'config.initial_update')
core.onsetup.connect(onsetup, self)
core.configuration.connect(self._onconfig, self)
def _onconfig(self, sender, **kwargs):
if not self._initialized:
try:
self._rpc().call(CONFIGURATION_STORE, "get_configs").get()
except errors.Unreachable as e:
_log.error("Connected platform does not support the Configuration Store feature.")
return
except errors.VIPError as e:
_log.error("Error retrieving agent configurations: {}".format(e))
return
affected_configs = {}
for config_name in self._store:
affected_configs[config_name] = "NEW"
for config_name in self._default_store:
affected_configs[config_name] = "NEW"
self._process_callbacks(affected_configs)
self._initial_callbacks_called = True
def _add_refs(self, config_name, contents):
refs = list_unique_links(contents)
self._ref_map[config_name] = refs
for ref in refs:
self._reverse_ref_map[ref].add(config_name)
def _update_refs(self, config_name, contents):
self._delete_refs(config_name)
self._add_refs(config_name, contents)
def _delete_refs(self, config_name):
#Delete refs if they exist.
old_refs = self._ref_map.pop(config_name, set())
for ref in old_refs:
reverse_ref_set = self._reverse_ref_map[ref]
reverse_ref_set.remove(config_name)
if not reverse_ref_set:
del self._reverse_ref_map[ref]
def _initial_update(self, configs, reset_name_map=True):
self._initialized = True
self._store = {key.lower(): value for (key,value) in configs.iteritems()}
if reset_name_map:
self._name_map = {key.lower(): key for key in configs.iterkeys()}
for config_name, config_contents in self._store.iteritems():
self._add_refs(config_name, config_contents)
for config_name, config_contents in self._default_store.iteritems():
if config_name not in self._store:
self._add_refs(config_name, config_contents)
def _process_links(self, config_contents, already_gathered):
if isinstance(config_contents,dict ):
for key in config_contents.keys():
value = config_contents[key]
if isinstance(value, (dict,list)):
self._process_links(value, already_gathered)
elif isinstance(value, str):
config_name = check_for_config_link(value)
if config_name is not None:
config_contents[key] = self._gather_child_configs(config_name, already_gathered)
if isinstance(config_contents,list):
for i in xrange(len(config_contents)):
value = config_contents[i]
if isinstance(value, (dict, list)):
self._process_links(value, already_gathered)
elif isinstance(value, str):
config_name = check_for_config_link(value)
if config_name is not None:
config_contents[i] = self._gather_child_configs(config_name, already_gathered)
def _gather_child_configs(self, config_name, already_gathered):
if config_name in already_gathered:
return already_gathered[config_name]
config_contents = self._store.get(config_name)
if config_contents is None:
config_contents = self._default_store.get(config_name)
config_contents = deepcopy(config_contents)
already_gathered[config_name] = config_contents
self._process_links(config_contents, already_gathered)
return config_contents
def _gather_config(self, config_name):
config_contents = self._store.get(config_name)
if config_contents is None:
config_contents = self._default_store.get(config_name)
if config_contents is None:
raise KeyError("{} not in store".format(config_name))
already_configured = {}
return self._gather_child_configs(config_name, already_configured)
def _gather_affected(self, config_name, seen_dict):
reverse_refs = self._reverse_ref_map[config_name]
for ref in reverse_refs:
if ref not in seen_dict:
seen_dict[ref] = "UPDATE"
self._gather_affected(ref, seen_dict)
def _update_config(self, action, config_name, contents=None, trigger_callback=False):
"""Called by the platform to push out configuration changes."""
#If we haven't yet grabbed the initial callback state we just bail.
if not self._initialized:
return
affected_configs = {}
#Update local store.
if action == "DELETE":
config_name_lower = config_name.lower()
if config_name_lower in self._store:
del self._store[config_name_lower]
if config_name_lower not in self._default_store:
affected_configs[config_name_lower] = "DELETE"
self._gather_affected(config_name_lower, affected_configs)
self._delete_refs(config_name_lower)
else:
affected_configs[config_name_lower] = "UPDATE"
self._gather_affected(config_name_lower, affected_configs)
self._update_refs(config_name_lower, self._default_store[config_name_lower])
if action == "DELETE_ALL":
for name in self._store:
affected_configs[name] = "DELETE"
#Just assume all default stores updated.
for name in self._default_store:
affected_configs[name] = "UPDATE"
self._ref_map = {}
self._reverse_ref_map = defaultdict(set)
self._initial_update({}, False)
if action in ("NEW", "UPDATE"):
config_name_lower = config_name.lower()
self._store[config_name_lower] = contents
self._name_map[config_name_lower] = config_name
if config_name_lower in self._default_store:
action = "UPDATE"
affected_configs[config_name_lower] = action
self._update_refs(config_name_lower, self._store[config_name_lower])
self._gather_affected(config_name_lower, affected_configs)
if trigger_callback and self._initial_callbacks_called:
self._process_callbacks(affected_configs)
if action == "DELETE":
del self._name_map[config_name_lower]
if action == "DELETE_ALL":
self._name_map.clear()
def _process_callbacks(self, affected_configs):
_log.debug("Processing callbacks for affected files: {}".format(affected_configs))
all_map = self._default_name_map.copy()
all_map.update(self._name_map)
#Always process "config" first.
if "config" in affected_configs:
self._process_callbacks_one_config("config", affected_configs["config"], all_map)
for config_name, action in affected_configs.iteritems():
if config_name == "config":
continue
self._process_callbacks_one_config(config_name, action, all_map)
def _process_callbacks_one_config(self, config_name, action, name_map):
callbacks = set()
for pattern, actions in self._subscriptions.iteritems():
if fnmatch.fnmatchcase(config_name, pattern) and action in actions:
callbacks.update(actions[action])
for callback in callbacks:
try:
if action == "DELETE":
contents = None
else:
contents = self._gather_config(config_name)
callback(name_map[config_name], action, contents)
except StandardError as e:
tb_str = traceback.format_exc()
_log.error("Problem processing callback:")
_log.error(tb_str)
def list(self):
"""Returns a list of configuration names for this agent.
:returns: Configuration names
:rtype: list
:Return Values:
A list of all the configuration names available for this agent.
"""
# Handle case were we are called during "onstart".
if not self._initialized:
try:
self._rpc().call(CONFIGURATION_STORE, "get_configs").get()
except errors.Unreachable as e:
_log.error("Connected platform does not support the Configuration Store feature.")
except errors.VIPError as e:
_log.error("Error retrieving agent configurations: {}".format(e))
all_map = self._default_name_map.copy()
all_map.update(self._name_map)
store_set = set(self._store.keys())
default_set = set(self._default_store.keys())
config_list = list(all_map[x] for x in (store_set|default_set))
config_list.sort()
return config_list
def get(self, config_name="config"):
"""Returns the contents of a configuration.
:param config_name: Name of configuration to add to store.
:type config_name: str
:returns: Configuration contents
:rtype: dict, list, or string
:Return Values:
The contents of the configuration specified.
"""
#Handle case were we are called during "onstart".
#If we fail to initialize we don't raise an exception as there still
#may be a default configuration to grab.
if not self._initialized:
try:
self._rpc().call(CONFIGURATION_STORE, "get_configs").get()
except errors.Unreachable as e:
_log.error("Connected platform does not support the Configuration Store feature.")
except errors.VIPError as e:
_log.error("Error retrieving agent configurations: {}".format(e))
config_name = config_name.lower()
return self._gather_config(config_name)
def _check_call_from_process_callbacks(self):
frame_records = inspect.stack()
try:
#Don't create any unneeded references to frame objects.
for i in xrange(1, len(frame_records)):
if self._process_callbacks_code_object is frame_records[i][0].f_code:
raise RuntimeError("Cannot request changes to the config store from a configuration callback.")
finally:
del frame_records
def set(self, config_name, contents, trigger_callback=False, send_update=True):
"""Called to set the contents of a configuration.
May not be called before the onstart phase of an agents lifetime.
May not be called from a configuration callback. Will produce a runtime error if done so.
:param config_name: Name of configuration to add to store.
:param contents: Contents of the configuration. May be a string, dictionary, or list.
:param trigger_callback: Tell the platform to trigger callbacks on the agent for this change.
:type config_name: str
:type contents: str, dict, list
:type trigger_callback: bool
"""
self._check_call_from_process_callbacks()
self._rpc().call(CONFIGURATION_STORE, "set_config", config_name, contents,
trigger_callback=trigger_callback,
send_update=send_update).get(timeout=10.0)
def set_default(self, config_name, contents):
"""Called to set the contents of a default configuration file. Default configurations are used if the
configuration store does not contain a configuration with that name.
May not be called | |
<filename>common/firewall/base.py
from builtins import range
from common.neutron.base import BaseNeutronTest
from tcutils.util import get_random_name, retry
from vn_test import VNFixture
from vm_test import VMFixture
from project_test import ProjectFixture
from port_fixture import PortFixture
from firewall_rule import FirewallRuleFixture
from firewall_policy import FirewallPolicyFixture
from firewall_group import FirewallGroupFixture
from application_policy_set import ApplicationPolicySetFixture
from address_group import AddressGroupFixture
from service_group import ServiceGroupFixture
from collections import defaultdict
from collections import OrderedDict as dict
from vnc_api.vnc_api import NoIdError, BadRequest
import random
import copy
class BaseFirewallTest(BaseNeutronTest):
@classmethod
def setUpClass(cls):
cls.tags = dict(); cls.vns = dict(); cls.vms = dict()
cls.sec_groups = dict(); cls.policys = dict()
super(BaseFirewallTest, cls).setUpClass()
cls.project_name = cls.inputs.project_name
cls.domain_name = cls.inputs.domain_name
cls.vnc_h = cls.connections.orch.vnc_h
cls.api_type = 'contrail'
try:
cls.create_common_objects()
except:
cls.tearDownClass()
raise
# end setUpClass
@classmethod
def tearDownClass(cls):
cls.cleanup_common_objects()
super(BaseFirewallTest, cls).tearDownClass()
# end tearDownClass
@classmethod
def create_common_objects(cls):
''' Create tags under both global and local scope
Site: svl, blr
deployment: prod, dev
application: hr, eng
tier: web, logic, db
'''
cls.tags['global'] = defaultdict(dict)
cls.tags['local'] = defaultdict(dict)
for site in ['svl', 'blr']:
cls.tags['global']['site'][site] = cls.create_only_tag('site', site, 'global')
cls.tags['local']['site'][site] = cls.create_only_tag('site', site)
for deploy in ['prod', 'dev']:
cls.tags['global']['deployment'][deploy] = \
cls.create_only_tag('deployment', deploy, 'global')
cls.tags['local']['deployment'][deploy] = cls.create_only_tag('deployment', deploy)
for app in ['hr', 'eng']:
cls.tags['global']['application'][app] = \
cls.create_only_tag('application', app, 'global')
cls.tags['local']['application'][app] = cls.create_only_tag('application', app)
for tier in ['web', 'logic', 'db']:
cls.tags['global']['tier'][tier] = cls.create_only_tag('tier', tier, 'global')
cls.tags['local']['tier'][tier] = cls.create_only_tag('tier', tier)
@classmethod
def cleanup_common_objects(cls):
if getattr(cls, 'vms', None):
for obj in cls.vms.values():
obj.cleanUp()
if getattr(cls, 'vns', None):
for obj in cls.vns.values():
obj.cleanUp()
for scopes in cls.tags.values():
for tag_types in scopes.values():
for obj in tag_types.values():
cls.vnc_h.delete_tag(id=obj.uuid)
if getattr(cls, 'save_af', None):
cls.inputs.set_af(cls.save_af)
@classmethod
def create_only_tag(cls, tag_type, tag_value, scope='local', **kwargs):
connections = kwargs.pop('connections', None) or cls.connections
project_name = connections.project_name
domain_name = connections.domain_name
project_fqname = [domain_name, project_name]
name = get_random_name(project_name)
if scope == 'local':
parent_type = 'project'; fqname = list(project_fqname)
else:
parent_type = None; fqname = []
fqname.append(name)
vnc_h = connections.orch.vnc_h
uuid = vnc_h.check_and_create_tag(fqname, tag_type, tag_value, parent_type)
cls.logger.info('Created Tag %s - %s %s=%s'%(uuid, project_fqname
if scope == 'local' else 'global', tag_type, tag_value))
return vnc_h.read_tag(id=uuid)
def create_tag(self, *args, **kwargs):
connections = kwargs.pop('connections', None) or self.connections
obj = self.create_only_tag(*args, **kwargs)
if kwargs.pop('cleanup', True):
self.addCleanup(self.delete_tag, obj.uuid, connections=connections)
return obj
def delete_tag(self, uuid, **kwargs):
connections = kwargs.pop('connections', None) or self.connections
vnc_h = connections.orch.vnc_h
self.logger.info('Deleting Tag %s'%(uuid))
return vnc_h.delete_tag(id=uuid)
def enable_security_draft_mode(self, SCOPE1=None, SCOPE2=None,
project_fqname=None, **kwargs):
connections = kwargs.pop('connections', None) or self.connections
vnc_h = connections.orch.vnc_h
if SCOPE1 == 'global':
project_fqname = None
elif SCOPE1 == 'local':
project_fqname = self.project.project_fq_name
if SCOPE2 == 'global':
self.logger.info('Enable security draft mode on global')
vnc_h.enable_security_draft_mode()
self.logger.info('Enable security draft mode on %s'%(
project_fqname if project_fqname else 'global'))
vnc_h.enable_security_draft_mode(project_fqname)
def disable_security_draft_mode(self, SCOPE1=None, SCOPE2=None,
project_fqname=None, **kwargs):
connections = kwargs.pop('connections', None) or self.connections
retry = kwargs.get('retry', 1)
vnc_h = connections.orch.vnc_h
while retry:
try:
if SCOPE1 == 'global':
project_fqname = None
elif SCOPE1 == 'local':
project_fqname = self.project.project_fq_name
if SCOPE2 == 'global':
self.logger.info('Disable security draft mode on global')
vnc_h.disable_security_draft_mode()
self.logger.info('Disable security draft mode on %s'%(
project_fqname if project_fqname else 'global'))
vnc_h.disable_security_draft_mode(project_fqname)
break
except BadRequest as e:
retry = retry - 1
if not retry:
raise
self.sleep(5)
def discard(self, SCOPE1=None, SCOPE2=None, project_fqname=None, **kwargs):
connections = kwargs.pop('connections', None) or self.connections
vnc_h = connections.orch.vnc_h
if SCOPE1 == 'global':
project_fqname = None
elif SCOPE1 == 'local':
project_fqname = self.project.project_fq_name
self.logger.info('discard security drafts on %s'%(
project_fqname if project_fqname else 'global'))
vnc_h.discard_security_draft(project_fqname)
if SCOPE1 == 'local' and SCOPE2 == 'global':
self.logger.info('discard security drafts on global')
self.sleep(kwargs.get('interval') or 2)
vnc_h.discard_security_draft()
def commit(self, SCOPE1=None, SCOPE2=None, project_fqname=None, **kwargs):
connections = kwargs.pop('connections', None) or self.connections
vnc_h = connections.orch.vnc_h
if SCOPE1 == 'global':
project_fqname = None
elif SCOPE1 == 'local':
project_fqname = self.project.project_fq_name
self.logger.info('commit security drafts on %s'%(
project_fqname if project_fqname else 'global'))
vnc_h.commit_security_draft(project_fqname)
if SCOPE1 == 'local' and SCOPE2 == 'global':
self.logger.info('commit security drafts on global')
self.sleep(kwargs.get('interval') or 2)
vnc_h.commit_security_draft()
def list_security_drafts(self, SCOPE1=None, SCOPE2=None, project_fqname=None, **kwargs):
drafts = defaultdict(list)
connections = kwargs.pop('connections', None) or self.connections
vnc_h = connections.orch.vnc_h
objs = list()
if SCOPE1 == 'global':
project_fqname = None
elif SCOPE1 == 'local':
project_fqname = self.project.project_fq_name
if SCOPE2 == 'global':
try:
objs.append(vnc_h.list_security_drafts())
except NoIdError:
pass
try:
objs.append(vnc_h.list_security_drafts(project_fqname))
except NoIdError:
pass
for obj in objs:
for ag in obj.get_address_groups() or []:
drafts['address-group'].append(ag['to'])
for sg in obj.get_service_groups() or []:
drafts['service-group'].append(sg['to'])
for fwr in obj.get_firewall_rules() or []:
drafts['firewall-rule'].append(fwr['to'])
for fwp in obj.get_firewall_policys() or []:
drafts['firewall-policy'].append(fwp['to'])
for aps in obj.get_application_policy_sets() or []:
drafts['application-policy-set'].append(aps['to'])
return drafts
def validate_draft(self, fixtures_draft_states, SCOPE1=None,
SCOPE2=None, project_fqname=None, **kwargs):
self.logger.info('Validating drafts on SCOPE1: %s, SCOPE2: %s,'
' project: %s'%(SCOPE1, SCOPE2, project_fqname))
drafts = self.list_security_drafts(SCOPE1, SCOPE2,
project_fqname, **kwargs)
copy_of_drafts = copy.deepcopy(drafts)
if (drafts and not fixtures_draft_states) or \
(fixtures_draft_states and not drafts):
assert False, "exp %s and got %s"%(fixtures_draft_states, drafts)
# Compare fqname against states created, updated, deleted
for state, fixtures in fixtures_draft_states.items():
for fixture in fixtures:
fqname = list(fixture.fq_name)
if len(fqname) == 2:
fqname[0] = 'draft-policy-management'
else:
fqname.insert(-1, 'draft-policy-management')
d1, obj_type, d3 = self._get_obj_from_fixture(fixture)
assert fqname in drafts[obj_type]
draft_obj = fixture.get_draft()
assert draft_obj.draft_mode_state == state
drafts[obj_type].remove(fqname)
for obj_type, objs in drafts.items():
assert not objs, "Unexpected drafts %s"%drafts
self.logger.debug('Validated drafts %s'%copy_of_drafts)
def _get_port(self, uuid, **kwargs):
connections = kwargs.pop('connections', None) or self.connections
return self.useFixture(PortFixture(uuid=uuid, connections=connections))
def _get_obj_from_fixture(self, fixture):
obj = None; object_type = None; uuid = None
if type(fixture) == VNFixture:
obj = fixture.api_vn_obj
elif type(fixture) == VMFixture:
uuid = fixture.vm_id
object_type = 'virtual-machine'
elif type(fixture) == ProjectFixture:
obj = fixture.project_obj
elif type(fixture) == PortFixture:
obj = fixture.vmi_obj
elif type(fixture) == AddressGroupFixture:
uuid = fixture.uuid
object_type = 'address-group'
elif type(fixture) == ServiceGroupFixture:
uuid = fixture.uuid
object_type = 'service-group'
elif type(fixture) == ApplicationPolicySetFixture:
uuid = fixture.uuid
object_type = 'application-policy-set'
elif type(fixture) == FirewallPolicyFixture:
uuid = fixture.uuid
object_type = 'firewall-policy'
elif type(fixture) == FirewallRuleFixture:
uuid = fixture.uuid
object_type = 'firewall-rule'
return (obj, object_type, uuid)
def add_labels(self, fixture, labels, **kwargs):
connections = kwargs.pop('connections', None) or self.connections
vnc_h = connections.orch.vnc_h
obj, object_type, uuid = self._get_obj_from_fixture(fixture)
is_global = False if getattr(labels[0], 'parent_type', None) == 'project' else True
tags = [label.tag_value for label in labels]
vnc_h.add_labels(tags, is_global, obj, object_type, uuid)
self.logger.info('Add %s labels %s to %s'%('global' if is_global
else '', tags, obj.uuid if obj else uuid))
self.addCleanup(self.delete_labels, fixture, labels, **kwargs)
def delete_labels(self, fixture, labels, **kwargs):
connections = kwargs.pop('connections', None) or self.connections
vnc_h = connections.orch.vnc_h
obj, object_type, uuid = self._get_obj_from_fixture(fixture)
is_global = False if getattr(labels[0], 'parent_type', None) == 'project' else True
labels = [label.tag_value for label in labels]
self.logger.info('Delete %s labels %s to %s'%('global' if is_global
else '', labels, obj.uuid if obj else uuid))
vnc_h.delete_labels(labels, is_global, obj, object_type, uuid)
def set_tag(self, fixture, tag, **kwargs):
connections = kwargs.pop('connections', None) or self.connections
vnc_h = connections.orch.vnc_h
obj, object_type, uuid = self._get_obj_from_fixture(fixture)
is_global = False if getattr(tag, 'parent_type', None) == 'project' else True
vnc_h.set_tag(tag.tag_type_name, tag.tag_value, is_global,
obj, object_type, uuid)
self.logger.info('Set %s tag %s=%s to %s'%('global' if is_global
else '', tag.tag_type_name, tag.tag_value,
obj.uuid if obj else uuid))
self.addCleanup(self.unset_tag, fixture, tag, **kwargs)
def unset_tag(self, fixture, tag, **kwargs):
connections = kwargs.pop('connections', None) or self.connections
vnc_h = connections.orch.vnc_h
obj, object_type, uuid = self._get_obj_from_fixture(fixture)
vnc_h.unset_tag(tag.tag_type_name, obj, object_type, uuid)
self.logger.info('Unset tag type %s from %s'%(tag.tag_type_name,
obj.uuid if obj else uuid))
def create_fw_policy(self, scope=None, rules=None, **kwargs):
connections = kwargs.pop('connections', None) or self.connections
api_type = kwargs.pop('api_type', self.api_type)
return self.useFixture(FirewallPolicyFixture(scope=scope,
rules=rules, connections=connections, api_type=api_type,
**kwargs))
def add_fw_rule(self, fwp_fixture, rule_uuid, seq_no):
return fwp_fixture.add_firewall_rules([{'uuid': rule_uuid,
'seq_no': seq_no}])
def remove_fw_rule(self, fwp_fixture, rule_uuid):
return fwp_fixture.remove_firewall_rule(rule_uuid)
def create_fw_rule(self, scope=None, **kwargs):
connections = kwargs.pop('connections', None) or self.connections
api_type = kwargs.pop('api_type', self.api_type)
return self.useFixture(FirewallRuleFixture(scope=scope,
connections=connections, api_type=api_type, **kwargs))
def _get_vmi_uuid(self, fixture):
if type(fixture) == VMFixture:
return list(fixture.get_vmi_ids().values())[0]
elif type(fixture) == PortFixture:
return fixture.uuid
def get_ip_address(self, fixture):
if type(fixture) == VMFixture:
return fixture.vm_ip
elif type(fixture) == PortFixture:
return fixture.get_ip_addresses()[0]
@property
def default_fwg(self):
if not getattr(self, '_default_fwg', None):
self._default_fwg = self.create_fw_group(name='default')
return self._default_fwg
def create_fw_group(self, vm_fixtures=None, port_fixtures=None,
ingress_policy=None, egress_policy=None,
verify=True, **kwargs):
connections = kwargs.pop('connections', None) or self.connections
ingress_policy_id = ingress_policy.uuid if ingress_policy else None
egress_policy_id = egress_policy.uuid if egress_policy else None
ports = [self._get_vmi_uuid(fixture) for fixture in
(vm_fixtures or list()) + (port_fixtures or list())]
# A port can only be associated to only one FW-Group
# By default default FWG will have all ports associated
# so disassociate from default FWG before associating to new FWG
if ports and kwargs.get('name') | |
import socket
import sys
import threading
import time
import uuid
import unittest
from mock import patch
from nose import SkipTest
from nose.tools import eq_
from nose.tools import raises
from kazoo.testing import KazooTestCase
from kazoo.exceptions import (
AuthFailedError,
BadArgumentsError,
ConfigurationError,
ConnectionClosedError,
ConnectionLoss,
InvalidACLError,
NoAuthError,
NoNodeError,
NodeExistsError,
SessionExpiredError,
)
from kazoo.protocol.connection import _CONNECTION_DROP
from kazoo.protocol.states import KeeperState, KazooState
from kazoo.tests.util import TRAVIS_ZK_VERSION
if sys.version_info > (3, ): # pragma: nocover
def u(s):
return s
else: # pragma: nocover
def u(s):
return unicode(s, "unicode_escape")
class TestClientTransitions(KazooTestCase):
def test_connection_and_disconnection(self):
states = []
rc = threading.Event()
@self.client.add_listener
def listener(state):
states.append(state)
if state == KazooState.CONNECTED:
rc.set()
self.client.stop()
eq_(states, [KazooState.LOST])
states.pop()
self.client.start()
rc.wait(2)
eq_(states, [KazooState.CONNECTED])
rc.clear()
states.pop()
self.expire_session()
rc.wait(2)
req_states = [KazooState.LOST, KazooState.CONNECTED]
eq_(states, req_states)
class TestClientConstructor(unittest.TestCase):
def _makeOne(self, *args, **kw):
from kazoo.client import KazooClient
return KazooClient(*args, **kw)
def test_invalid_handler(self):
from kazoo.handlers.threading import SequentialThreadingHandler
self.assertRaises(ConfigurationError,
self._makeOne, handler=SequentialThreadingHandler)
def test_chroot(self):
self.assertEqual(self._makeOne(hosts='127.0.0.1:2181/').chroot, '')
self.assertEqual(self._makeOne(hosts='127.0.0.1:2181/a').chroot, '/a')
self.assertEqual(self._makeOne(hosts='127.0.0.1/a').chroot, '/a')
self.assertEqual(self._makeOne(hosts='127.0.0.1/a/b').chroot, '/a/b')
self.assertEqual(self._makeOne(
hosts='127.0.0.1:2181,127.0.0.1:2182/a/b').chroot, '/a/b')
def test_connection_timeout(self):
from kazoo.handlers.threading import TimeoutError
client = self._makeOne(hosts='127.0.0.1:9')
self.assertTrue(client.handler.timeout_exception is TimeoutError)
self.assertRaises(TimeoutError, client.start, 0.1)
def test_ordered_host_selection(self):
client = self._makeOne(hosts='127.0.0.1:9,127.0.0.2:9/a',
randomize_hosts=False)
hosts = [h for h in client.hosts]
eq_(hosts, [('127.0.0.1', 9), ('127.0.0.2', 9)])
def test_invalid_hostname(self):
client = self._makeOne(hosts='nosuchhost/a')
timeout = client.handler.timeout_exception
self.assertRaises(timeout, client.start, 0.1)
def test_retry_options_dict(self):
from kazoo.retry import KazooRetry
client = self._makeOne(command_retry=dict(max_tries=99),
connection_retry=dict(delay=99))
self.assertTrue(type(client._conn_retry) is KazooRetry)
self.assertTrue(type(client._retry) is KazooRetry)
eq_(client._retry.max_tries, 99)
eq_(client._conn_retry.delay, 99)
class TestAuthentication(KazooTestCase):
def _makeAuth(self, *args, **kwargs):
from kazoo.security import make_digest_acl
return make_digest_acl(*args, **kwargs)
def test_auth(self):
username = uuid.uuid4().hex
password = uuid.uuid4().hex
digest_auth = "%s:%s" % (username, password)
acl = self._makeAuth(username, password, all=True)
client = self._get_client()
client.start()
client.add_auth("digest", digest_auth)
client.default_acl = (acl,)
try:
client.create("/1")
client.create("/1/2")
client.ensure_path("/1/2/3")
eve = self._get_client()
eve.start()
self.assertRaises(NoAuthError, eve.get, "/1/2")
# try again with the wrong auth token
eve.add_auth("digest", "badbad:bad")
self.assertRaises(NoAuthError, eve.get, "/1/2")
finally:
# Ensure we remove the ACL protected nodes
client.delete("/1", recursive=True)
eve.stop()
eve.close()
def test_connect_auth(self):
username = uuid.uuid4().hex
password = uuid.uuid4().hex
digest_auth = "%s:%s" % (username, password)
acl = self._makeAuth(username, password, all=True)
client = self._get_client(auth_data=[('digest', digest_auth)])
client.start()
try:
client.create('/1', acl=(acl,))
# give ZK a chance to copy data to other node
time.sleep(0.1)
self.assertRaises(NoAuthError, self.client.get, "/1")
finally:
client.delete('/1')
client.stop()
client.close()
def test_unicode_auth(self):
username = u("xe4/\hm")
password = u("/\<PASSWORD>")
digest_auth = "%s:%s" % (username, password)
acl = self._makeAuth(username, password, all=True)
client = self._get_client()
client.start()
client.add_auth("digest", digest_auth)
client.default_acl = (acl,)
try:
client.create("/1")
client.ensure_path("/1/2/3")
eve = self._get_client()
eve.start()
self.assertRaises(NoAuthError, eve.get, "/1/2")
# try again with the wrong auth token
eve.add_auth("digest", "badbad:bad")
self.assertRaises(NoAuthError, eve.get, "/1/2")
finally:
# Ensure we remove the ACL protected nodes
client.delete("/1", recursive=True)
eve.stop()
eve.close()
def test_invalid_auth(self):
client = self._get_client()
client.start()
self.assertRaises(TypeError, client.add_auth,
'digest', ('user', 'pass'))
self.assertRaises(TypeError, client.add_auth,
None, ('user', 'pass'))
def test_async_auth(self):
client = self._get_client()
client.start()
username = uuid.uuid4().hex
password = <PASSWORD>
digest_auth = "%s:%s" % (username, password)
result = client.add_auth_async("digest", digest_auth)
self.assertTrue(result.get())
def test_async_auth_failure(self):
client = self._get_client()
client.start()
username = uuid.uuid4().hex
password = <PASSWORD>
digest_auth = "%s:%s" % (username, password)
self.assertRaises(AuthFailedError, client.add_auth,
"unknown-scheme", digest_auth)
def test_add_auth_on_reconnect(self):
client = self._get_client()
client.start()
client.add_auth("digest", "jsmith:jsmith")
client._connection._socket.shutdown(socket.SHUT_RDWR)
while not client.connected:
time.sleep(0.1)
self.assertTrue(("digest", "jsmith:jsmith") in client.auth_data)
class TestConnection(KazooTestCase):
def test_chroot_warning(self):
k = self._get_nonchroot_client()
k.chroot = 'abba'
try:
with patch('warnings.warn') as mock_func:
k.start()
assert mock_func.called
finally:
k.stop()
def test_session_expire(self):
from kazoo.protocol.states import KazooState
cv = threading.Event()
def watch_events(event):
if event == KazooState.LOST:
cv.set()
self.client.add_listener(watch_events)
self.expire_session()
cv.wait(3)
assert cv.is_set()
def test_bad_session_expire(self):
from kazoo.protocol.states import KazooState
cv = threading.Event()
ab = threading.Event()
def watch_events(event):
if event == KazooState.LOST:
ab.set()
raise Exception("oops")
cv.set()
self.client.add_listener(watch_events)
self.expire_session()
ab.wait(0.5)
assert ab.is_set()
cv.wait(0.5)
assert not cv.is_set()
def test_state_listener(self):
from kazoo.protocol.states import KazooState
states = []
condition = threading.Condition()
def listener(state):
with condition:
states.append(state)
condition.notify_all()
self.client.stop()
eq_(self.client.state, KazooState.LOST)
self.client.add_listener(listener)
self.client.start(5)
with condition:
if not states:
condition.wait(5)
eq_(len(states), 1)
eq_(states[0], KazooState.CONNECTED)
def test_invalid_listener(self):
self.assertRaises(ConfigurationError, self.client.add_listener, 15)
def test_listener_only_called_on_real_state_change(self):
from kazoo.protocol.states import KazooState
self.assertTrue(self.client.state, KazooState.CONNECTED)
called = [False]
condition = threading.Event()
def listener(state):
called[0] = True
condition.set()
self.client.add_listener(listener)
self.client._make_state_change(KazooState.CONNECTED)
condition.wait(3)
self.assertFalse(called[0])
def test_no_connection(self):
client = self.client
client.stop()
self.assertFalse(client.connected)
self.assertTrue(client.client_id is None)
self.assertRaises(ConnectionClosedError, client.exists, '/')
def test_close_connecting_connection(self):
client = self.client
client.stop()
ev = threading.Event()
def close_on_connecting(state):
if state in (KazooState.CONNECTED, KazooState.LOST):
ev.set()
client.add_listener(close_on_connecting)
client.start()
# Wait until we connect
ev.wait(5)
ev.clear()
self.client._call(_CONNECTION_DROP, client.handler.async_result())
client.stop()
# ...and then wait until the connection is lost
ev.wait(5)
self.assertRaises(ConnectionClosedError,
self.client.create, '/foobar')
def test_double_start(self):
self.assertTrue(self.client.connected)
self.client.start()
self.assertTrue(self.client.connected)
def test_double_stop(self):
self.client.stop()
self.assertFalse(self.client.connected)
self.client.stop()
self.assertFalse(self.client.connected)
def test_restart(self):
self.assertTrue(self.client.connected)
self.client.restart()
self.assertTrue(self.client.connected)
def test_closed(self):
client = self.client
client.stop()
write_pipe = client._connection._write_pipe
# close the connection to free the pipe
client.close()
eq_(client._connection._write_pipe, None)
# sneak in and patch client to simulate race between a thread
# calling stop(); close() and one running a command
oldstate = client._state
client._state = KeeperState.CONNECTED
client._connection._write_pipe = write_pipe
try:
# simulate call made after write pipe is closed
self.assertRaises(ConnectionClosedError, client.exists, '/')
# simualte call made after write pipe is set to None
client._connection._write_pipe = None
self.assertRaises(ConnectionClosedError, client.exists, '/')
finally:
# reset for teardown
client._state = oldstate
client._connection._write_pipe = None
class TestClient(KazooTestCase):
def _getKazooState(self):
from kazoo.protocol.states import KazooState
return KazooState
def test_client_id(self):
client_id = self.client.client_id
self.assertEqual(type(client_id), tuple)
# make sure password is of correct length
self.assertEqual(len(client_id[1]), 16)
def test_connected(self):
client = self.client
self.assertTrue(client.connected)
def test_create(self):
client = self.client
path = client.create("/1")
eq_(path, "/1")
self.assertTrue(client.exists("/1"))
def test_create_on_broken_connection(self):
client = self.client
client.start()
client._state = KeeperState.EXPIRED_SESSION
self.assertRaises(SessionExpiredError, client.create,
'/closedpath', b'bar')
client._state = KeeperState.AUTH_FAILED
self.assertRaises(AuthFailedError, client.create,
'/closedpath', b'bar')
client._state = KeeperState.CONNECTING
self.assertRaises(SessionExpiredError, client.create,
'/closedpath', b'bar')
client.stop()
client.close()
self.assertRaises(ConnectionClosedError, client.create,
'/closedpath', b'bar')
def test_create_null_data(self):
client = self.client
client.create("/nulldata", None)
value, _ = client.get("/nulldata")
self.assertEqual(value, None)
def test_create_empty_string(self):
client = self.client
client.create("/empty", b"")
value, _ = client.get("/empty")
eq_(value, b"")
def test_create_unicode_path(self):
client = self.client
path = client.create(u("/ascii"))
eq_(path, u("/ascii"))
path = client.create(u("/\xe4hm"))
eq_(path, u("/\xe4hm"))
def test_create_async_returns_unchrooted_path(self):
client = self.client
path = client.create_async('/1').get()
eq_(path, "/1")
def test_create_invalid_path(self):
client = self.client
self.assertRaises(TypeError, client.create, ('a', ))
self.assertRaises(ValueError, client.create, ".")
self.assertRaises(ValueError, client.create, "/a/../b")
self.assertRaises(BadArgumentsError, client.create, "/b\x00")
self.assertRaises(BadArgumentsError, client.create, "/b\x1e")
def test_create_invalid_arguments(self):
from kazoo.security import OPEN_ACL_UNSAFE
single_acl = OPEN_ACL_UNSAFE[0]
client = self.client
self.assertRaises(TypeError, client.create, 'a', acl='all')
self.assertRaises(TypeError, client.create, 'a', acl=single_acl)
self.assertRaises(TypeError, client.create, 'a', value=['a'])
self.assertRaises(TypeError, client.create, 'a', ephemeral='yes')
self.assertRaises(TypeError, client.create, 'a', sequence='yes')
self.assertRaises(TypeError, client.create, 'a', makepath='yes')
def test_create_value(self):
client = self.client
client.create("/1", b"bytes")
data, stat = client.get("/1")
eq_(data, b"bytes")
def test_create_unicode_value(self):
client = self.client
self.assertRaises(TypeError, client.create, "/1", u("\xe4hm"))
def test_create_large_value(self):
client = self.client
kb_512 = b"a" * (512 * 1024)
client.create("/1", kb_512)
self.assertTrue(client.exists("/1"))
mb_2 = b"a" * (2 * 1024 * 1024)
self.assertRaises(ConnectionLoss, client.create, "/2", mb_2)
def test_create_acl_duplicate(self):
from kazoo.security import OPEN_ACL_UNSAFE
single_acl = OPEN_ACL_UNSAFE[0]
client = self.client
client.create("/1", acl=[single_acl, single_acl])
acls, stat = client.get_acls("/1")
# ZK >3.4 removes duplicate ACL entries
if TRAVIS_ZK_VERSION:
version = TRAVIS_ZK_VERSION
else:
version = client.server_version()
self.assertEqual(len(acls), 1 if version > (3, 4) else 2)
def test_create_acl_empty_list(self):
from kazoo.security import OPEN_ACL_UNSAFE
client = self.client
client.create("/1", acl=[])
acls, stat = client.get_acls("/1")
self.assertEqual(acls, OPEN_ACL_UNSAFE)
def test_version_no_connection(self):
@raises(ConnectionLoss)
def testit():
self.client.server_version()
self.client.stop()
testit()
def test_create_ephemeral(self):
client = self.client
client.create("/1", b"ephemeral", ephemeral=True)
data, stat = client.get("/1")
eq_(data, b"ephemeral")
eq_(stat.ephemeralOwner, client.client_id[0])
def test_create_no_ephemeral(self):
client = self.client
client.create("/1", b"val1")
data, stat = client.get("/1")
self.assertFalse(stat.ephemeralOwner)
def test_create_ephemeral_no_children(self):
from kazoo.exceptions import NoChildrenForEphemeralsError
client = self.client
client.create("/1", b"ephemeral", ephemeral=True)
self.assertRaises(NoChildrenForEphemeralsError,
client.create, "/1/2", b"val1")
self.assertRaises(NoChildrenForEphemeralsError,
client.create, "/1/2", b"val1", ephemeral=True)
def test_create_sequence(self):
client = self.client
client.create("/folder")
path = client.create("/folder/a", b"sequence", sequence=True)
eq_(path, "/folder/a0000000000")
path2 = client.create("/folder/a", b"sequence", sequence=True)
eq_(path2, "/folder/a0000000001")
path3 = client.create("/folder/", b"sequence", sequence=True)
eq_(path3, "/folder/0000000002")
def test_create_ephemeral_sequence(self):
basepath = "/" + uuid.uuid4().hex
realpath = self.client.create(basepath, b"sandwich", sequence=True,
ephemeral=True)
self.assertTrue(basepath != realpath and realpath.startswith(basepath))
data, stat = self.client.get(realpath)
eq_(data, b"sandwich")
def test_create_makepath(self):
self.client.create("/1/2", b"val1", makepath=True)
data, stat = self.client.get("/1/2")
eq_(data, b"val1")
self.client.create("/1/2/3/4/5", b"val2", makepath=True)
data, stat = self.client.get("/1/2/3/4/5")
eq_(data, b"val2")
self.assertRaises(NodeExistsError, self.client.create, "/1/2/3/4/5",
b"val2", makepath=True)
def test_create_makepath_incompatible_acls(self):
from kazoo.client import KazooClient
from kazoo.security import make_digest_acl_credential, CREATOR_ALL_ACL
credential = make_digest_acl_credential("username", "password")
alt_client = KazooClient(self.cluster[0].address + self.client.chroot,
max_retries=5, auth_data=[("digest", credential)])
alt_client.start()
alt_client.create("/1/2", b"val2", makepath=True, acl=CREATOR_ALL_ACL)
try:
self.assertRaises(NoAuthError, self.client.create, "/1/2/3/4/5",
b"val2", makepath=True)
finally:
alt_client.delete('/', recursive=True)
alt_client.stop()
def test_create_no_makepath(self):
self.assertRaises(NoNodeError, self.client.create, "/1/2", b"val1")
self.assertRaises(NoNodeError, self.client.create, "/1/2", b"val1",
makepath=False)
self.client.create("/1/2", b"val1", makepath=True)
self.assertRaises(NoNodeError, self.client.create, "/1/2/3/4", b"val1",
makepath=False)
def test_create_exists(self):
from kazoo.exceptions import NodeExistsError
client = self.client
path = client.create("/1")
self.assertRaises(NodeExistsError, client.create, path)
def test_create_get_set(self):
nodepath = "/" + uuid.uuid4().hex
self.client.create(nodepath, b"sandwich", ephemeral=True)
data, stat = self.client.get(nodepath)
eq_(data, b"sandwich")
newstat = self.client.set(nodepath, b"hats", stat.version)
self.assertTrue(newstat)
| |
from inspect import isfunction, isclass
import warnings
import pytest
try: # python 3.5+
from typing import Set, Any, List, Iterable
except ImportError:
pass
from _pytest.mark import MarkDecorator
from .pytest_compat import itermarkers, apply_mark_to, PytestUnknownMarkWarning
info_mode = False
debug_mode = False
def set_verbosity_level(pytest_config_verbositylevel):
global info_mode, debug_mode
info_mode = pytest_config_verbositylevel >= 3 # -vv
debug_mode = pytest_config_verbositylevel >= 4 # -vvv
class EasyMarkerDecorator(MarkDecorator):
"""
A mark decorator that in addition provides a .param(*values) convenience method
"""
@classmethod
def create_with_name(cls, name):
# create a pytest.mark.<name>, and copy its internal mark
_md = getattr(pytest.mark, name)
try:
mark = _md.mark
except AttributeError:
# happens in pytest 2, to maybe move in compat in the future
mark = _md.markname
return cls(mark)
def param(self, *values):
""" Convenience shortcut for `pytest.param(*values, marks=self)` """
return pytest.param(*values, marks=self)
class _Agnostic:
"""A special symbol used internally"""
def __repr__(self):
return "agnostic"
class EasyMarker(MarkDecorator):
"""
A pair of marker + commandline option for pytest. See constructor for details
"""
__slots__ = 'marker_id', 'full_name', \
'has_arg', 'allowed_values', 'used_values', \
'cmdoption_short', 'cmdoption_long', \
'not_filtering_skips_marked', 'filtering_skips_unmarked', \
'cmdhelp', 'markhelp'
_all_markers = []
def __init__(self,
marker_id, # type: str
mode, # type: str
full_name=None, # type: str
has_arg=True, # type: bool
allowed_values=None, # type: Iterable[Any]
cmdoption_short=None, # type: str
cmdoption_long=None, # type: str
cmdhelp=None, # type: str
markhelp=None, # type: str
):
"""
Creates a pair of marker + commandline option for pytest. Marker instances can be used
- to decorate test classes or test functions: @marker or @marker(arg) depending whether you set has_arg=False/True
- in parametrization values with `pytest.param(*<argvalues>, marks=<self>)` or
`pytest.param(*<argvalues>, marks=<self>(arg))` (for this, we inherit from MarkDecorator and override <self>.mark)
In addition, `<self>.param(*<argvalues>)` or `<self>(arg).param(*<argvalues>)` is a convenience method provided to
do the same than `pytest.param(*<argvalues>, marks=<self>)` or `pytest.param(*<argvalues>, marks=<self>(arg))`.
A special decorator `@<marker>.agnostic` can be used to decorate tests that should always run, whatever the
configuration. This is only relevant for `mode='silos'` or `mode='hard_filter'`, see below.
:param marker_id: the name of the pytest mark. Applying this marker with `@marker(arg)` will be equivalent to
applying @pytest.mark.<marker_id>(arg)
:param mode: a mandatory string indicating the working mode of this mark and the associated filter option. Four
modes are supported:
- 'silos': When the option is inactive, only non-marked tests are run. When the option is active, only
relevant marked tests run. There is no test in common between these "silos"
- 'extender': When the option is inactive, only non-marked tests are run, this is the "base" set
of tests. When the option is active, it adds the relevant marked tests to the base set.
- 'hard_filter': When the option is inactive, all tests run. When the option is active, only the relevant
marked tests run.
- 'soft_filter': When the option is inactive, all tests run. When the option is active, all non-marked
tests continue to run, but among marked tests only the relevant ones run.
:param full_name: the full name of the marker, to be used in help texts. If `None` (default), it defaults to
`marker_id`.
:param has_arg: if this is `True` (default), the marker has a single argument and the filtering commandline
option accepts an argument too. For example a `colormarker` with id `color` will accept an argument
describing which color: `@colormarker('yellow')`. If this is `False`, the marker has no argument and the
filtering commandline option is a flag with no arguments too. For example a `smokemarker` with id `smoke`:
`@smokemarker`.
:param allowed_values: a predefined set of values that can be used for this marker. Applying the mark with another
value as argument will result in a `ValueError`being raised. `None` (default) will allow users to apply
this mark with any value. Note that this can only be set if `has_arg`is `True`
:param cmdoption_short: the id to use for the "short" command option (for example providing `'E'` or `'-E'`
will result in the option `'-E'`). `None` (default) will *not* create a "short" command option, to avoid
name collisions.
:param cmdoption_long: the id to use for the "long" command option (for example providing `'env'` or `'--env'`
will result in the option `'--env'`). `None` (default) will use `marker_id` for the long command option.
:param cmdhelp: the help message displayed when `pytest --help` is called
:param markhelp: the help message displayed when `pytest --markers` is called
"""
# mode validation
if mode == "silos":
# When the option is inactive, only non-marked tests are run.
not_filtering_skips_marked = True
# When the option is active, only relevant marked tests run. There is no test in common between these silos
filtering_skips_unmarked = True
elif mode == "extender":
# When the option is inactive, only non-marked tests are run, this is the "base" set of tests.
not_filtering_skips_marked = True
# When the option is active, it adds the relevant marked tests to the base set.
filtering_skips_unmarked = False
elif mode == "hard_filter":
# When the option is inactive, all tests run.
not_filtering_skips_marked = False
# When the option is active, only the relevant marked tests run.
filtering_skips_unmarked = True
elif mode == "soft_filter":
# When the option is inactive, all tests run.
not_filtering_skips_marked = False
# When the option is active, all non-marked tests continue to run, but among marked tests only
# the relevant ones run.
filtering_skips_unmarked = False
if not has_arg:
raise ValueError("It does not make sense to set `mode` to `'soft_filter'` when the marker has "
"no arguments.")
else:
raise ValueError("Invalid 'mode' %r. Only 'silos', 'extender', 'hard_filter' or 'soft_filter' are "
"supported." % mode)
# identifiers
if marker_id is None:
raise ValueError("a non-None `marker_id` is mandatory")
self.marker_id = marker_id
# no need to call super constructor, we will never use it directly
# indeed we override the .mark attribute with a property, see below
# super(EasyMarker, self).__init__(Mark(marker_id, (), {}))
self.full_name = full_name if full_name is not None else marker_id # (default)
# arguments
self.has_arg = has_arg
# note: we do not use a set to store the allowed values because we want to preserve the order
self.allowed_values = tuple(allowed_values) if allowed_values is not None else None
if not self.has_arg and self.allowed_values is not None:
raise ValueError("`allowed_values` should not be provided if `has_arg` is `False`, as the marker does not "
"accept any arguments")
# cmdoption short
if cmdoption_short is not None:
if cmdoption_short.startswith('--'):
raise ValueError("Short command option should only have a single leading dash `-` symbol or zero, not "
"two. Found %s" % cmdoption_short)
else:
cmdoption_short = "-%s" % cmdoption_short.strip('-')
self.cmdoption_short = cmdoption_short
# cmdoption long
if cmdoption_long is None:
cmdoption_long = self.marker_id
if cmdoption_long.startswith('-') and cmdoption_long[1] != '-':
raise ValueError("Long command option should have two leading dash `-` symbols or zero, not one. "
"Found %s" % cmdoption_long)
else:
self.cmdoption_long = "--%s" % cmdoption_long.strip('-')
# query filters
self.not_filtering_skips_marked = not_filtering_skips_marked
self.filtering_skips_unmarked = filtering_skips_unmarked
# help messages
self.cmdhelp = cmdhelp if cmdhelp is not None else self._get_default_cmdhelp()
self.markhelp = markhelp if markhelp is not None else self._get_default_markhelp()
# register the marker so that we can list them all in `list_all()`
EasyMarker._all_markers.append(self)
# prepare to collect the list of values actually used
self.used_values = set()
@property
def mark(self):
# called by pytest when pytest.param(<argvalue>, marks=<self>)
if self.has_arg:
raise ValueError("This marker '%s' has a mandatory argument" % self.marker_id)
return self.get_mark_decorator().mark
def param(self, *values):
""" Convenience shortcut for `pytest.param(*values, marks=self)` """
return pytest.param(*values, marks=self)
def __str__(self):
return "Pytest marker '%s' with CLI option '%s' and decorator '@pytest.mark.%s(<%s>)'" \
% (self.full_name, self.cmdoption_both, self.marker_id, self.marker_id)
def __repr__(self):
return str(self)
def _get_default_cmdhelp(self):
if self.has_arg:
if self.filtering_skips_unmarked:
first_part = "only run tests marked as requiring %s NAME (marked with @%s(NAME))." \
% (self.full_name, self.marker_id)
else:
first_part = "run tests marked as requiring %s NAME (marked with @%s(NAME)), as well as tests not " \
"marked with @%s." % (self.full_name, self.marker_id, self.marker_id)
else:
first_part = "only run tests marked as %s (marked with @%s)." % (self.full_name, self.marker_id)
if self.not_filtering_skips_marked:
return first_part + " Important: if you call `pytest` without | |
None
return
""" Next some funcs that just say stuff"""
def repeat_me(text=""):
to_repeat = text.replace('repeat after me', '', 1)
say(to_repeat)
return
def say_time(text=""):
now = datetime.now()
dt_string = now.strftime("Your space time co ordinates are %I %M %p on %A %B %d, %Y.")
say(dt_string)
return
"""
Use the openweathermap API to get weather info. right now have hardcoded a bunch
of stuff, such as the API key, the lat and long for Portland, etc. The API returns info in
JSON format, which is a nested series of dicts and things.
"""
def say_weather(text=""):
api_key = "5295f26a45340d6e3fbf3e63fb069a79"
base_url = "http://api.openweathermap.org/data/2.5/onecall?"
full_url = (base_url
+ "lat=45.5051&lon=-122.6750"
+ "&exclude=hourly,minutely"
+ "&appid="
+ api_key
+ "&units=imperial"
)
say("Let me check.")
try:
response = requests.get(full_url)
data = json.loads(response.text)
except:
say("Sorry, I'm not sure. Try looking out the window.")
return
current = data["current"]
current_temp = current["temp"]
current_weather = current["weather"]
current_description = current_weather[0]["description"]
daily = data["daily"]
daily_temp = daily[0]["temp"]
max_temp = daily_temp["max"]
min_temp = daily_temp["min"]
say('Right now in Portland its "%d" degrees Fahrenheit.' % current_temp)
say('The sky looks "%s" to me.' % current_description)
say('Forecast calls for a high of "%d", and a low of "%d".' % (max_temp, min_temp))
return
def tell_joke(text=""):
url = "https://official-joke-api.appspot.com/random_joke"
try:
data = requests.get(url)
joke = json.loads(data.text)
except:
say("Sorry, I can't think of one right now.")
return
say(joke["setup"]+"..")
say(joke["punchline"])
return
def trivia(text=""):
url = "https://opentdb.com/api.php?amount=1&category=23&difficulty=medium&type=multiple"
try:
data = requests.get(url)
trivia = json.loads(data.text)
except:
say("argh.")
return
logging.info(trivia.json())
return
def say_fav_show(text=""):
say("Doctor Who, obviously!")
return
def say_creator(text=""):
say("Some maniac from Reed College. Between you and me, I think he's got a screw loose.")
return
def say_name(text=""):
say("My name is <NAME>. I'm an experimental robot created by <NAME>. I can respond to simple commands. I can also tell jokes.")
return
def say_goodbye(text=""):
global user_name
say("Goodbye, " + user_name + ". Talk to you later.")
exit()
return
def say_what(text=""): #this is our generic "i didn't understand" situation
if not text:
return
else:
say("Sorry, I don't understand.")
return
"""
Next, the "tell me about x" function. User asks for info on a topic and we
look it up in wikipedia. i don't think the wikipedia module is super reliable
and wish we could do better error-checking. there's long latency while we get info.
"""
def tell_me_about(text):
if not text:
return -1
# so text will be something like "tell me about Delaware."
# first we have to strip out the 'tell me about' preamble
topic = text.replace('tell me about', '', 1)
if not topic:
say("Sorry, I didn't catch that.")
return -1
say("OK, Hang on a sec while I look up" + topic)
try:
wikipedia_entry = wikipedia.summary(topic, sentences=wiki_sentences)
except wikipedia.exceptions.PageError as e:
logging.info(e.options)
say("Page error.")
return -1
except wikipedia.exceptions.DisambiguationError as e:
logging.info(e.options)
say("Sorry, that was too ambiguous.")
return -1
except :
say("Sorry, something went wrong.")
return -1
say("Here's what I found:")
say(wikipedia_entry)
return 1
def more_detail(text=""):
global wiki_sentences
wiki_sentences = wiki_sentences + 2
say("Affirmative")
return
def less_detail(text=""):
global wiki_sentences
wiki_sentences = wiki_sentences - 2
if wiki_sentences <= 1 :
wiki_sentences = 1
say("Affirmative")
return
"""
next answer any questions that have stumped us so far. That means the question was not in our
phraselist. This is explicitly for questions. so
first let's make sure there is a question word. Let's also kick out
questions about you and me since those are not things the internet is likely to answer well.
at first, tried to post a query to duckduckgo but i don't understand their format
so right now am just asking Alexa!
"""
def answer_question(question=""):
global k9_volume
if not question :
return 0
qlist = [ 'who','whos','what','whats','which','why','where','wheres','when','whens','how','hows']
first_word = question.split()[0]
if first_word not in qlist :
logging.info('"%s" is not a question.' % question)
return 0
shitlist = ['you','your','me','my','us','we']
s = set(question.split()).intersection(shitlist)
if len(s) != 0 :
say("That is a personal question.")
return 0
say("I have no idea. But I know someone who might know.")
k9_volume = k9_volume + 20 # make it loud!
say("Alexa! " + question)
k9_volume = k9_volume - 20 # back to normal volume
return 1
"""
url = 'https://api.duckduckgo.com/?q="%s"&format=json' % question
logging.info(url)
data = requests.get(url)
answer = json.loads(data.text)
logging.info(answer["AbstractText"])
say(answer["AbstractText"])
return 1
"""
"""
Next a func to do some clever responses for users we expect to encounter
"""
def set_user_name(text):
global user_name
global user_name_tries
if text is None:
user_name_tries+=1
if user_name_tries <= 3 :
say("Sorry, I didn't hear that. What is your name?")
return None
else :
say("Still didn't hear it. I'll call you Ishmael.")
user_name = "Ishmael"
user_name_tries = 0
return user_name
else :
user_name = text.lower()
if "chris" in user_name:
say("I thought it might be you, " + user_name + ". What an unexpected pleasure.")
return user_name
else :
say("Greetings, " + user_name + ". It's a pleasure to meet you.")
return user_name
return None
"""
Next set up a dictionary of known phrases user might say and functions K9 might execute in
response. In python dictionaries have a key:value structure. The phrases are keys and
the functions are values. The phrase is a string that we can pass to some of these
functions. The keys can be used as hints for voice recognition
"""
phrase_bank = {
'K9' : wake_up,
'turn the light on' : light_on,
'turn the light off': light_off,
'blink the light' : light_blink,
'pulse the light' : light_pulse,
'turn left' : turn_left,
'turn right' : turn_right,
'spin' : spin,
'go forward' : go_forward,
'go back' : go_back,
'back up' : go_back,
'speed up' : speed_up,
'slow down' : slow_down,
'attack' : attack,
'stop' : halt,
'halt' : halt,
'woe' : halt,
'explore' : explore,
'get louder' : get_louder,
'speak up' : get_louder,
'pipe down' : get_quieter,
'too loud' : get_quieter,
'more detail' : more_detail,
'less detail' : less_detail,
'broad strokes' : less_detail,
'repeat after me' : repeat_me,
'what time is it' : say_time,
'what\'s the time' : say_time,
'what is the weather' : say_weather,
'what\'s the weather' : say_weather,
'what is your favorite show' : say_fav_show,
'what\'s your favorite show' : say_fav_show,
'what\'s your name' : say_name,
'who are you' : say_name,
'identify yourself' : say_name,
'who built you' : say_creator,
'who made you' : say_creator,
'who created you' : say_creator,
'tell me about' : tell_me_about,
'tell me a joke' : tell_joke,
'tell me another joke' : tell_joke,
'tell me a better joke' : tell_joke,
'say something funny' : tell_joke,
'make me laugh' : tell_joke,
'see you' : say_goodbye,
'chow' : say_goodbye,
'goodbye' : say_goodbye
}
"""
Next a function that takes a phrase, looks at phrase_bank to see if we know how to handle
and returns appropriate function. Otherwise return the say_what function. Note the phrase
may actually *contain* the key phrase. So if user asks "I say, what time is it, eh?" We
look to see if the phrase_bank contains "what time is it". We need to figure out how to
strip extraneous stuff from the beginning of phrase, too
"""
def respond_to_phrase(phrase):
if not phrase:
return
for key in phrase_bank :
if key in phrase :
func = phrase_bank[key]
return func(phrase)
# if we're here, we didn't find phrase in the phrase_bank
logging.info('not finding "%s"' % phrase)
first_word = phrase.split()[0]
qlist = ['who','what','which','why','where','when','how']
if first_word in qlist :
return answer_question(phrase)
else :
return(say_what(phrase))
def get_hints(language_code):
if language_code.startswith('en_'):
return (phrase_bank.keys())
return None
"""
Next define a subclass of the cloudspeech client. mostly because I want k9 to show user that
it is "listening" by pulsing an LED.
"""
class K9Client (CloudSpeechClient):
def start_listening(self):
k9_board.led.state = Led.PULSE_SLOW
# Calling the parent's class method, whic basically just logs to the logfile.
super().start_listening()
return
def stop_listening(self):
k9_board.led.state = Led.OFF
super().stop_listening()
return
"""
Finally, define the main() loop
"""
def main():
global awake_flag
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description='Assistant service example.')
parser.add_argument('--language', default=locale_language())
args = parser.parse_args()
logging.info('Initializing for language %s...', args.language)
| |
else:
t = type(val)
cls = t.__module__+"."+t.__qualname__
add = id(val)
return MPackage.F(MPackage.PackageContext+"PyObject", self.ref, cls, add)
def __add__(self, amt):
from operator import add
return self.handler.ref(self.handler._op(add, self.ref, amt))
def __sub__(self, amt):
from operator import sub
return self.handler.ref(self.handler._op(sub, self.ref, amt))
def __mul__(self, amt):
from operator import mul
return self.handler.ref(self.handler._op(mul, self.ref, amt))
def __call__(self, *args, **kwargs):
return self.get()(*args, **kwargs)
def __iadd__(self, amt):
return self.handler._iop(self.ref, amt)
def __imul__(self, amt):
return self.handler.imul(self.ref, amt)
def __getattr__(self, item):
return self.handler.get()
###############################################################################################
# #
# Expr #
# #
###############################################################################################
class Expr:
"""The Expr class is a representation of arbitrary Mathematica expressions in Java.
Exprs are created by reading an expression from a link (using the getExpr() method),
they can be decomposed into component Exprs with methods like head() and part(), and
their structure can be queried with methods like length(), numberQ(), and matrixQ().
All these methods will be familiar to Mathematica programmers, and their Expr
counterparts work similarly. Like Mathematica expressions, Exprs are immutable, meaning
they can never be changed once they are created. Operations that might appear to modify
an Expr (like delete()) return new modified Exprs without changing the original.
<p>
Exprs are stored initially in a very efficient way, and they can be created and written
to links very quickly. When you call operations that inspect their structure or that
extract component parts, however, it is likely that they must be unpacked into a more
Java-native form that requires more memory.
<p>
In its present state, Expr has four main uses:
<p>
(1) Storing expressions read from a link so that they can be later written to another
link. This use replaces functionality that C-language programmers would use a loopback
link for. (J/Link has a LoopbackLink interface as well, but Expr affords an even easier
method.)
<pre>
Expr e = ml.getExpr();
// ... Later, write it to a different MathLink:
otherML.put(e);
e.dispose();</pre>
Note that if you just want to move an expression immediately from one link to another, you
can use the MathLink method transferExpression() and avoid creating an Expr to store it.
<p>
(2) Many of the KernelLink methods take either a string or an Expr. If it is not convenient
to build a string of Mathematica input, you can use an Expr. There are two ways to build an
Expr: you can use a constructor, or you can create a loopback link as a scratchpad,
build the expression on this link with a series of MathLink put calls, then read
the expression off the loopback link using getExpr(). Here is an example that creates an Expr
that represents 2+2 and computes it in Mathematica using these two techniques:
<pre>
// First method: Build it using Expr constructors:
Expr e1 = new Expr(new Expr(Expr.SYMBOL, "Plus"), new Expr[]{new Expr(2), new Expr(2)});
// ml is a KernelLink
String result = ml.evaluateToOutputForm(e1, 72);
// Second method: Build it on a LoopbackLink with MathLink calls:
LoopbackLink loop = MathLinkFactory.createLoopbackLink();
loop.putFunction("Plus", 2);
loop.put(2);
loop.put(2);
Expr e2 = loop.getExpr();
loop.close();
result = ml.evaluateToOutputForm(e2, 72);
e2.dispose();</pre>
(3) Getting a string representation of an expression. Sometimes you want to be able to
produce a readable string form of an entire expression, particularly for debugging. The
toString() method will do this for you:
<pre>
// This code will print out the next expression waiting on the link without
// consuming it, so that the state of the link is unchanged:
System.out.println("Next expression is: " + ml.peekExpr().toString());</pre>
(4) Examining the structure or properties of an expression. Although it is possible to
do this sort of thing with MathLink calls, it is very difficult in general. Expr lets
you read an entire expression from a link and then examine it using a very high-level
interface and without having to worry about managing your current position in an
incoming stream of data.
<p>
Expr is a work in progress. It will be expanded in the future.
"""
__EXPR_TABLE = {}
__ALLOW_EMPTY = False
def __init__(self, *args, loopback = None):
self.__type = None
self.__itype = None
self.__dims = None
self.__head = None
self.__args = None
self.__link = loopback
self.__hash = None
if len(args)>0:
head = args[0]
args = args[1:]
if len(args) == 0:
self.__init_from_val(head)
elif isinstance(head, (int, str)) and len(args) == 1:
self.__init_from_val_and_hint(head, args[0])
elif isinstance(head, Expr):
if head.data_type == "Symbol":
self.__init_from_head_and_args(head, args)
else:
raise ValueError(
"{}: head must be of type 'Symbol' not '{}'".format(type(self).__name__, head.data_type)
)
else:
raise ValueError(
"Unable to construct {} from head {} and args {}".format(type(self).__name__, head, args)
)
elif not self.__ALLOW_EMPTY and not loopback:
raise TypeError("__init__() missing 1 required positional argument: 'val'")
else:
self.__type = Env.getExprTypeInt("Unknown")
@classmethod
def _get_cached_expr(cls, name, *args):
try:
expr = cls.__EXPR_TABLE[name]
except KeyError:
expr = cls.__EXPR_TABLE[name] = cls(*args)
return expr
@classmethod
def _get_head(cls, sym):
return cls._get_cached_expr(sym, "Symbol", sym)
def __init_from_val(self, val):
"""Create an Expr from the value val
:param val:
:return:
"""
from decimal import Decimal as decimal
from fractions import Fraction as fraction
from collections import OrderedDict as Association
from array import array
converter_map = {
int : { "type" : Env.getExprTypeInt("Integer"), "head" : "Integer" },
float : { "type" : Env.getExprTypeInt("Real"), "head" : "Real" },
str : { "type" : Env.getExprTypeInt("String"), "head" : "String" },
decimal : { "type" : Env.getExprTypeInt("Decimal"), "head" : "Real" },
fraction : { "type" : Env.getExprTypeInt("Rational"), "head" : "Rational" },
complex : { "type" : Env.getExprTypeInt("Complex"), "head" : "Complex" },
}
otype = None
ohead = None
oitype = None
odims = None
for key, item in converter_map.items():
if isinstance(val, key):
otype = item["type"]
ohead = self._get_head(item["head"])
odims = (0,)
break
if otype is None:
if isinstance(val, BufferedNDArray):
ohead = self._get_head("List")
otype = Env.getExprTypeInt("BufferedNDArray")
oitype = val.typecode
odims = val.shape
if isinstance(val, array):
ohead = self._get_head("List")
otype = Env.getExprTypeInt("Array")
oitype = val.typecode
odims = (len(val), )
elif isinstance(val, Association):
ohead = self._get_head("Association")
otype = Env.getExprTypeInt("Association")
odims = (len(val), )
elif isinstance(val, (list, tuple)):
ohead = self._get_head("List")
otype = Env.getExprTypeInt("List")
odims = ArrayUtils.get_array_dims(val, False)
elif Env.HAS_NUMPY:
import numpy as np
if isinstance(val, np.ndarray):
ohead = self._get_head("List")
otype = Env.getExprTypeInt("NumPyArray")
oitype = val.dtype.type
odims = val.shape
if otype is None:
ohead = self._get_head(type(val).__name__)
try:
iter(val) # iterable anything is a list ?
except:
otype = self._get_head("Object")
odims = (0, )
else:
otype = Env.getExprTypeInt("Function")
odims = ArrayUtils.get_array_dims(val, False)
self.__head = ohead
self.__args = (val, )
self.__type = otype
self.__itype = oitype
self.__dims = odims
def __init_from_val_and_hint(self, typename, val):
"""Creates an Expr representing a Mathematica Integer, Real, String, or Symbol whose value is
given by the supplied string (for example "2", "3.14", or "Plus").
:param typename: the type of the Expr; must be one of "Integer", "Real", "Decimal", "Fraction", or "Symbol"
:param val: the value of the Expr, interpreted according to the type argument
:return:
"""
if isinstance(typename, int):
typename = Env.getExprTypeName(typename)
if typename == "Integer":
self.__head = self._get_head("Integer")
self.__args = (int(val), )
self.__type = Env.getExprTypeInt(typename)
self.__dims = ()
elif typename == "Real":
self.__head = self._get_head("Real")
self.__args = (float(val), )
self.__type = Env.getExprTypeInt(typename)
self.__dims = ()
elif typename == "String":
self.__head = self._get_head("String")
self.__args = (str(val), ),
self.__type = Env.getExprTypeInt(typename)
self.__dims = ()
elif typename == "Symbol":
import re
if not isinstance(val, str):
raise TypeError("{} with head Symbol can't have value of type {}. Only str is allowed".format(type(self).__name__, type(val).__name__))
val = val.strip()
sym_re = "($|[^\W\d_])+"
sym_re = re.compile(sym_re, re.U)
if not re.match(sym_re, val):
raise ValueError("Symbol must match regex {}".format(sym_re))
if val == "Symbol":
self.__head = self
else:
self.__head = self._get_head("Symbol")
self.__args = (val, )
self.__type = Env.getExprTypeInt(typename)
self.__dims = ()
elif typename == "Rational":
from fractions import Fraction as fraction
self.__head = self._get_head("Rational")
self.__args = (fraction(val), ),
self.__type = Env.getExprTypeInt(typename)
self.__dims = ()
elif typename == "Decimal":
from decimal import Decimal as decimal
self.__head = self._get_head("Real")
self.__args = (decimal(val), ),
self.__type = Env.getExprTypeInt(typename)
self.__dims = ()
elif typename == "Complex":
self.__head = self._get_head("Complex")
self.__args = (complex(val), ),
self.__type = Env.getExprTypeInt(typename)
self.__dims | |
of tuples,with the
# strategies you are looking to apply
# to each type.
[
(list, ["append"]),
(dict, ["merge"])
],
# next, choose the fallback strategies,
# applied to all other types:
["override"],
# finally, choose the strategies in
# the case where the types conflict:
["override"]
)
return my_merger.merge(dct1, dct2)
def merge_all(dcts):
"""
Shallow merge all the dcts
:param dcts:
:return:
"""
return reduce(
lambda accum, dct: merge(accum, dct),
dict(),
dcts
)
def merge_deep_all(dcts):
"""
Merge deep all dicts using merge_deep
:param dcts:
:return:
"""""
return reduce(
lambda accum, dct: merge_deep(accum, dct),
dict(),
dcts
)
@curry
def merge(dct1, dct2):
"""
Ramda implmentation of merge
:param dct1:
:param dct2:
:return:
"""
return merge_dicts(dct1, dct2)
def compact(lst):
"""
Ramda implmentation of compact. Removes Nones from lst (not 0, etc)
:param lst:
:return:
"""
return filter(lambda x: x is not None, lst)
def compact_empty(lst):
"""
Ramda implmentation of compact. Removes empty strings
:param lst:
:return:
"""
return filter(lambda x: x != '', lst)
def from_pairs(pairs):
"""
Implementation of ramda from_pairs Converts a list of pairs or tuples of pairs to a dict
:param pairs:
:return:
"""
return {k: v for k, v in pairs}
def to_pairs(dct):
"""
Implementation of ramda to_pairs Converts a dict to a list of pairs
:param dct:
:return:
"""
return dct.items()
def flatten(lst):
"""
Impemenation of ramda flatten
:param lst:
:return:
"""
return list(itertools.chain.from_iterable(lst))
@curry
def concat(lst1, lst2):
"""
Implmentation of ramda cancat
:param lst1:
:param lst2:
:return:
"""
return lst1 + lst2
def from_pairs_to_array_values(pairs):
"""
Like from pairs but combines duplicate key values into arrays
:param pairs:
:return:
"""
result = {}
for pair in pairs:
result[pair[0]] = concat(prop_or([], pair[0], result), [pair[1]])
return result
def fullname(o):
"""
https://stackoverflow.com/questions/2020014/get-fully-qualified-class-name-of-an-object-in-python
Return the full name of a class
:param o:
:return:
"""
return o.__module__ + "." + o.__class__.__name__
def length(lst):
"""
Implementation of Ramda length
:param lst:
:return:
"""
return len(lst)
def isalambda(v):
"""
Detects if something is a lambda
:param v:
:return:
"""
return isfunction(v) and v.__name__ == '<lambda>'
@curry
def map_prop_value_as_index(prp, lst):
"""
Returns the given prop of each item in the list
:param prp:
:param lst:
:return:
"""
return from_pairs(map(lambda item: (prop(prp, item), item), lst))
def to_dict_deep(obj, classkey=None):
"""
Converts an object to a dict deeply
:param obj:
:param classkey:
:return:
"""
if isinstance(dict, obj):
data = {}
for (k, v) in obj.items():
data[k] = to_dict_deep(v, classkey)
return data
elif hasattr(obj, "_ast"):
return to_dict_deep(obj._ast())
elif hasattr(obj, "__iter__") and not isinstance(str, obj):
return [to_dict_deep(v, classkey) for v in obj]
elif hasattr(obj, "__dict__"):
data = dict([(key, to_dict_deep(value, classkey))
for key, value in obj.__dict__.items()
if not callable(value) and not key.startswith('_')])
if classkey is not None and hasattr(obj, "__class__"):
data[classkey] = obj.__class__.__name__
return data
else:
return obj
@curry
def flatten_dct_until(obj, until_func, separator):
"""
Flattens an objects so deep keys and array indices become concatinated strings
E.g. {a: {b: [1, 3]}} => {'a.b.0': 1, 'a.b.1': 2}
@param {Object} obj The object to flattened
@param {Function} until_func stop flattening a line if the this function returns false for the current key
Takes 2 args, key and value
@param {Object} separator Key segment separator, probably either '.' or '__'
@returns {Object} The 1-D version of the object
:param obj:
:return:
"""
return from_pairs(_flatten_dct(obj, until_func, separator))
@curry
def flatten_dct(obj, separator):
"""
Flattens an objects so deep keys and array indices become concatinated strings
E.g. {a: {b: [1, 3]}} => {'a.b.0': 1, 'a.b.1': 2}
@param {Object} obj The object to flattened
@param {Object} separator Key segment separator, probably either '.' or '__'
@returns {Object} The 1-D version of the object
:param obj:
:return:
"""
return from_pairs(_flatten_dct(obj, always(True), separator))
def _flatten_dct(obj, until_func, separator, recurse_keys=[]):
"""
:param obj:
:param until_func: Stops recursion on a certain line if the function returns false and the remaining
value is returned with the key
:param recurse_keys:
:return:
"""
return if_else(
# If we have something iterable besides a string that is truty
both(isinstance((dict, list, tuple)), identity),
# Then recurse on each object or array value. If o is not truthy, meaning {} or [], return
# a single item dict with the keys and o as the value
lambda o: compose(
flatten,
map_with_obj_to_values(
lambda k, oo: _flatten_dct(oo, until_func, separator, concat(recurse_keys, [k])) if
until_func(k, oo) else
[[join(separator, concat(recurse_keys, [k])), oo]]
),
# Convert lists and tuples to dict where indexes become keys
if_else(isinstance(dict), identity, list_to_dict)
)(o),
# If not an object return a single pair
lambda o: [[join(separator, recurse_keys), o]]
)(obj)
def key_string_to_lens_path(key_string):
"""
Converts a key string like 'foo.bar.0.wopper' to ['foo', 'bar', 0, 'wopper']
:param {String} keyString The dot-separated key string
:return {[String]} The lens array containing string or integers
"""
return map(
if_else(
isinstance(int),
# convert to int
lambda s: int(s),
# Leave the string alone
identity
),
key_string.split('.')
)
@curry
def fake_lens_path_view(lens_path, obj):
"""
Simulates R.view with a lens_path since we don't have lens functions
:param lens_path: Array of string paths
:param obj: Object containing the given path
:return: The value at the path or None
"""
if equals(0, length(lens_path)):
return obj
segment = head(lens_path)
return if_else(
both(lambda _: identity(segment), has(segment)),
# Recurse on the rest of the path
compose(fake_lens_path_view(tail(lens_path)), getitem(segment)),
# Give up
lambda _: None
)(obj)
@curry
def fake_lens_path_set(lens_path, value, obj):
"""
Simulates R.set with a lens_path since we don't have lens functions. obj can be a dict or instance.
:param lens_path: Array of string paths
:param value: The value to set at the lens path
:param obj: Object containing the given path
:return: The value at the path or None
"""
segment = head(lens_path)
obj_copy = copy.copy(obj)
def set_array_index(i, v, l):
# Fill the array with None up to the given index and set the index to v
try:
l[i] = v
except IndexError:
for _ in range(i - len(l) + 1):
l.append(None)
l[i] = v
if not (length(lens_path) - 1):
# Done
new_value = value
else:
# Find the value at the path or create a {} or [] at obj[segment]
found_or_created = item_path_or(
if_else(
lambda segment: isint(segment) or segment.isnumeric(),
always([]),
always({})
)(head(tail(lens_path))),
int(segment) if isint(segment) else segment,
obj
)
# Recurse on the rest of the path
new_value = fake_lens_path_set(tail(lens_path), value, found_or_created)
# Set or replace
if isint(segment) or segment.isnumeric():
set_array_index(int(segment), new_value, obj_copy)
else:
if isinstance(dict, obj_copy):
obj_copy[segment] = new_value
elif isinstance(object, obj_copy):
setattr(obj_copy, segment, new_value)
return obj_copy
def unflatten_dct(obj):
"""
Undoes the work of flatten_dict
@param {Object} obj 1-D object in the form returned by flattenObj
@returns {Object} The original
:param obj:
:return:
"""
def reduce_func(accum, key_string_and_value):
key_string = key_string_and_value[0]
value = key_string_and_value[1]
item_key_path = key_string_to_lens_path(key_string)
# All but the last segment gives us the item container len
container_key_path = init(item_key_path)
container = unless(
# If the path has any length (not []) and the value is set, don't do anything
both(always(length(container_key_path)), fake_lens_path_view(container_key_path)),
# Else we are at the top level, so use the existing accum or create a [] or {}
# depending on if our item key is a number or not
lambda x: default_to(
if_else(
lambda segment: segment.isnumeric(),
always([]),
always({})
)(head(item_key_path))
)(x)
)(accum)
# Finally set the container at the itemLensPath
return fake_lens_path_set(
item_key_path,
value,
container
)
return compose(
reduce(
reduce_func,
# null initial value
None
),
to_pairs
)(obj)
def list_to_dict(lst):
return dict(zip(range(len(lst)), lst))
@curry
def when(if_pred, when_true, obj):
"""
Ramda when implementation
:param if_pred:
:param when_true:
:param obj:
:return:
"""
return if_else(if_pred, when_true, identity, obj)
@curry
def unless(unless_pred, when_not_true, obj):
"""
Ramda unless implementation
:param unless_pred:
:param when_not_true:
:param obj:
:return:
"""
return if_else(unless_pred, identity, when_not_true, obj)
@curry
def props(props, obj_or_dict):
"""
Ramda implmentation of props, which fetches each specified prop in a dict or object using
prop() on each of props. Props must all be defined
:param props: List of simple props
:param obj_or_dict: And object or dict
:return: A list of the resolved prop values
"""
return map(
lambda p: prop(p, obj_or_dict),
props
)
@curry
def props_or(undefined_value, props, obj_or_dict):
"""
Ramda implmentation of props, which fetches each specified prop in a dict or object using
prop() on each of props.
:param | |
dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderRequest,
) -> dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderHeaders()
return self.renew_application_authorization_service_order_with_options(request, headers, runtime)
async def renew_application_authorization_service_order_async(
self,
request: dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderRequest,
) -> dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderHeaders()
return await self.renew_application_authorization_service_order_with_options_async(request, headers, runtime)
def renew_application_authorization_service_order_with_options(
self,
request: dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderRequest,
headers: dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderResponse(),
self.do_roarequest('RenewApplicationAuthorizationServiceOrder', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/applicationAuthorizations/orders/renew', 'json', req, runtime)
)
async def renew_application_authorization_service_order_with_options_async(
self,
request: dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderRequest,
headers: dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.instance_id):
body['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.end_time_gmt):
body['endTimeGMT'] = request.end_time_gmt
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RenewApplicationAuthorizationServiceOrderResponse(),
await self.do_roarequest_async('RenewApplicationAuthorizationServiceOrder', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/applicationAuthorizations/orders/renew', 'json', req, runtime)
)
def get_process_definition(
self,
process_instance_id: str,
request: dingtalkyida__1__0_models.GetProcessDefinitionRequest,
) -> dingtalkyida__1__0_models.GetProcessDefinitionResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetProcessDefinitionHeaders()
return self.get_process_definition_with_options(process_instance_id, request, headers, runtime)
async def get_process_definition_async(
self,
process_instance_id: str,
request: dingtalkyida__1__0_models.GetProcessDefinitionRequest,
) -> dingtalkyida__1__0_models.GetProcessDefinitionResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetProcessDefinitionHeaders()
return await self.get_process_definition_with_options_async(process_instance_id, request, headers, runtime)
def get_process_definition_with_options(
self,
process_instance_id: str,
request: dingtalkyida__1__0_models.GetProcessDefinitionRequest,
headers: dingtalkyida__1__0_models.GetProcessDefinitionHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetProcessDefinitionResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.group_id):
query['groupId'] = request.group_id
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.order_number):
query['orderNumber'] = request.order_number
if not UtilClient.is_unset(request.system_type):
query['systemType'] = request.system_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.name_space):
query['nameSpace'] = request.name_space
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetProcessDefinitionResponse(),
self.do_roarequest('GetProcessDefinition', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/definitions/{process_instance_id}', 'json', req, runtime)
)
async def get_process_definition_with_options_async(
self,
process_instance_id: str,
request: dingtalkyida__1__0_models.GetProcessDefinitionRequest,
headers: dingtalkyida__1__0_models.GetProcessDefinitionHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetProcessDefinitionResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.group_id):
query['groupId'] = request.group_id
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.order_number):
query['orderNumber'] = request.order_number
if not UtilClient.is_unset(request.system_type):
query['systemType'] = request.system_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.name_space):
query['nameSpace'] = request.name_space
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetProcessDefinitionResponse(),
await self.do_roarequest_async('GetProcessDefinition', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/definitions/{process_instance_id}', 'json', req, runtime)
)
def upgrade_tenant_information(
self,
request: dingtalkyida__1__0_models.UpgradeTenantInformationRequest,
) -> dingtalkyida__1__0_models.UpgradeTenantInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpgradeTenantInformationHeaders()
return self.upgrade_tenant_information_with_options(request, headers, runtime)
async def upgrade_tenant_information_async(
self,
request: dingtalkyida__1__0_models.UpgradeTenantInformationRequest,
) -> dingtalkyida__1__0_models.UpgradeTenantInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.UpgradeTenantInformationHeaders()
return await self.upgrade_tenant_information_with_options_async(request, headers, runtime)
def upgrade_tenant_information_with_options(
self,
request: dingtalkyida__1__0_models.UpgradeTenantInformationRequest,
headers: dingtalkyida__1__0_models.UpgradeTenantInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpgradeTenantInformationResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpgradeTenantInformationResponse(),
self.do_roarequest('UpgradeTenantInformation', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/apps/tenantInfos', 'json', req, runtime)
)
async def upgrade_tenant_information_with_options_async(
self,
request: dingtalkyida__1__0_models.UpgradeTenantInformationRequest,
headers: dingtalkyida__1__0_models.UpgradeTenantInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.UpgradeTenantInformationResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.access_key):
body['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
body['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.account_number):
body['accountNumber'] = request.account_number
if not UtilClient.is_unset(request.commodity_type):
body['commodityType'] = request.commodity_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.UpgradeTenantInformationResponse(),
await self.do_roarequest_async('UpgradeTenantInformation', 'yida_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/yida/apps/tenantInfos', 'json', req, runtime)
)
def get_application_authorization_service_platform_resource(
self,
request: dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceRequest,
) -> dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceHeaders()
return self.get_application_authorization_service_platform_resource_with_options(request, headers, runtime)
async def get_application_authorization_service_platform_resource_async(
self,
request: dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceRequest,
) -> dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceHeaders()
return await self.get_application_authorization_service_platform_resource_with_options_async(request, headers, runtime)
def get_application_authorization_service_platform_resource_with_options(
self,
request: dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceRequest,
headers: dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceResponse(),
self.do_roarequest('GetApplicationAuthorizationServicePlatformResource', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/authorization/platformResources', 'json', req, runtime)
)
async def get_application_authorization_service_platform_resource_with_options_async(
self,
request: dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceRequest,
headers: dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['instanceId'] = request.instance_id
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_uid):
query['callerUid'] = request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetApplicationAuthorizationServicePlatformResourceResponse(),
await self.do_roarequest_async('GetApplicationAuthorizationServicePlatformResource', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/authorization/platformResources', 'json', req, runtime)
)
def list_application_authorization_service_application_information(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationRequest,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationHeaders()
return self.list_application_authorization_service_application_information_with_options(instance_id, request, headers, runtime)
async def list_application_authorization_service_application_information_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationRequest,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationHeaders()
return await self.list_application_authorization_service_application_information_with_options_async(instance_id, request, headers, runtime)
def list_application_authorization_service_application_information_with_options(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationRequest,
headers: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_union_id):
query['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationResponse(),
self.do_roarequest('ListApplicationAuthorizationServiceApplicationInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/authorizations/applicationInfos/{instance_id}', 'json', req, runtime)
)
async def list_application_authorization_service_application_information_with_options_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationRequest,
headers: dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.caller_union_id):
query['callerUnionId'] = request.caller_union_id
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ListApplicationAuthorizationServiceApplicationInformationResponse(),
await self.do_roarequest_async('ListApplicationAuthorizationServiceApplicationInformation', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/authorizations/applicationInfos/{instance_id}', 'json', req, runtime)
)
def validate_application_authorization_service_order(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderRequest,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderHeaders()
return self.validate_application_authorization_service_order_with_options(caller_uid, request, headers, runtime)
async def validate_application_authorization_service_order_async(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderRequest,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderHeaders()
return await self.validate_application_authorization_service_order_with_options_async(caller_uid, request, headers, runtime)
def validate_application_authorization_service_order_with_options(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderRequest,
headers: dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderResponse(),
self.do_roarequest('ValidateApplicationAuthorizationServiceOrder', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/appsAuthorizations/freshOrderInfoReviews/{caller_uid}', 'json', req, runtime)
)
async def validate_application_authorization_service_order_with_options_async(
self,
caller_uid: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderRequest,
headers: dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateApplicationAuthorizationServiceOrderResponse(),
await self.do_roarequest_async('ValidateApplicationAuthorizationServiceOrder', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/appsAuthorizations/freshOrderInfoReviews/{caller_uid}', 'json', req, runtime)
)
def get_activity_list(
self,
request: dingtalkyida__1__0_models.GetActivityListRequest,
) -> dingtalkyida__1__0_models.GetActivityListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetActivityListHeaders()
return self.get_activity_list_with_options(request, headers, runtime)
async def get_activity_list_async(
self,
request: dingtalkyida__1__0_models.GetActivityListRequest,
) -> dingtalkyida__1__0_models.GetActivityListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetActivityListHeaders()
return await self.get_activity_list_with_options_async(request, headers, runtime)
def get_activity_list_with_options(
self,
request: dingtalkyida__1__0_models.GetActivityListRequest,
headers: dingtalkyida__1__0_models.GetActivityListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetActivityListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.process_code):
query['processCode'] = request.process_code
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetActivityListResponse(),
self.do_roarequest('GetActivityList', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/processes/activities', | |
Header "Accept" is "application/json"
When I make a "POST" request with the repository id
Then I should receive a "200" response code
And response should have key "status" of 200
And response header "Content-Type" should be "application/json; charset=UTF-8"
And response should not have key "errors"
""")
@given(u'the "set asset" endpoint for the "{obj}" "{attr}" with the asset "{asset_no}"')
def given_a_set(context, obj, attr, asset_no):
repository_id = get_repository_id(context)
context.execute_steps(u"""
Given the "set asset" endpoint
""")
resource_id = quote_plus(getattr(context, obj).get(attr))
context.endpoint = context.endpoint.format(repository_id, resource_id, context.set_asset_ids[int(asset_no)])
@given(u'a set with {nbassets} assets')
def given_a_set_with_assets(context, nbassets):
context.execute_steps(u"""
Given a set
And a group of {} asset ids
And the "set assets" endpoint of the repository for the "set" "id"
and parameter "assets" is the set_asset_ids
When I make a "POST" request
Then I should receive a "200" response code
And response should have key "status" of 200
""".format(nbassets))
context.params = {}
@given(u'an array of 1 assets as the body')
def use_premade_asset(context):
body = [
{
"source_id_type": context.id_map['source_id_type'],
"source_id": context.id_map['source_id']
}
]
context.body = body
def generate_query(number, valid_id=True):
result = []
for i in range(number):
result.append(generate_query_object(valid_id))
return result
def generate_random_id(keytype="testcoid"):
uuidv = str(uuid.uuid4()).replace('-', '')
if keytype == "hub_keyS0":
return "https://openpermissions.org/s0/hub1/asset/testco/testcoid/%s"%(uuidv)
else:
return uuidv
def generate_query_object(valid_id):
source_id_type = COMMON_ASSET_DETAILS['source_id_type'] if valid_id else u"InvalidIdType"
return {"source_id_type": source_id_type,
"source_id": generate_random_id()}
@given(u'an invalid Query objects as the body')
def invalid_objects_as_body(context):
context.body = "gibberish"
@given(u'"{no_of_lic_offs}" offers have already been onboarded')
def add_offers(context, no_of_lic_offs):
try:
no_of_lic_offs = int(no_of_lic_offs)
except ValueError:
raise ValueError("Number of offers must be a number")
context.offer_ids = []
for _ in range(no_of_lic_offs):
add_an_offer(context, "valid")
if context.offer['id'] not in context.offer_ids:
context.offer_ids.append(context.offer['id'])
@given(u'"{no_of_lic_offs}" offers with sets have already been onboarded')
def add_offer_sets(context, no_of_lic_offs):
try:
no_of_lic_offs = int(no_of_lic_offs)
except ValueError:
raise ValueError("Number of offers must be a number")
context.offer_ids = []
for _ in range(no_of_lic_offs):
add_offer_and_set(context, "valid")
if context.offer['id'] not in context.offer_ids:
context.offer_ids.append(context.offer['id'])
@given(u'body is a "valid" {content_type} asset')
def inject_valid_asset_into_context_body(context, content_type):
source_id = generate_random_id()
entity_ids, asset_ttl = format_common_asset(source_id)
offer_id = getattr(context, 'offer', {}).get('id')
offer_ids = [offer_id] if offer_id else []
context.body = generate_asset(
offer_ids,
asset_ttl,
entity_ids[0],
content_type
)
@given(u'body is an "invalid" xml asset')
def inject_invalid_asset_into_context_body(context):
context.body = """
<?xml version="1.0" encoding="UTF-8"?>
<note>
<p>
badly formed xml
</note>
"""
@given(u'body is "not" an xml asset')
def inject_not_an_asset_into_context_body(context):
context.body = "not xml data"
date_format = "%Y-%m-%dT%H:%M:%SZ"
def isoformat(d):
"""
:param d: a date
:return: Returns valid iso8601 with timezone dateformat for linked data
"""
return d.strftime(date_format)
def generate_asset(offer_ids, asset_ttl, entity_id, content_type='xml'):
graph = Graph()
graph.parse(data=asset_ttl, format='turtle')
for offer_id in offer_ids:
asset_offer_ttl = ASSET_DIRECT_OFFER_TEMPLATE.format(
prefixes=COMMON_PREFIXES,
id=entity_id,
offer_id=offer_id,
modified=isoformat(datetime.utcnow())
)
graph.parse(data=asset_offer_ttl, format='turtle')
asset = graph.serialize(format=content_type)
return asset
def generate_indirect_asset(asset_ttl, entity_id, offer_ids=[], set_ids=[], content_type='xml'):
graph = Graph()
graph.parse(data=asset_ttl, format='turtle')
for offer_id in offer_ids:
asset_offer_ttl = ASSET_INDIRECT_OFFER_TEMPLATE.format(
prefixes=COMMON_PREFIXES,
id=entity_id,
offer_id=offer_id,
set_id=offer_id+"5e4",
modified=isoformat(datetime.utcnow())
)
graph.parse(data=asset_offer_ttl, format='turtle')
for set_id in set_ids:
asset_offer_ttl = ASSET_INDIRECT_SET_TEMPLATE.format(
prefixes=COMMON_PREFIXES,
id=entity_id,
set_id=set_id,
modified=isoformat(datetime.utcnow())
)
graph.parse(data=asset_offer_ttl, format='turtle')
asset = graph.serialize(format=content_type)
return asset
def format_common_asset(source_id):
bnode_id = generate_random_id()
if COMMON_ASSET_DETAILS['source_id_type'] != 'hub_key':
entity_ids = [generate_random_id()]
else:
entity_ids = [source_id]
asset = ASSET_TEMPLATE.format(
prefixes=COMMON_PREFIXES,
id=entity_ids[0],
source_id_type=COMMON_ASSET_DETAILS['source_id_type'],
source_id=source_id,
bnode_id=bnode_id,
timestamp=datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
)
return (entity_ids, asset)
@given(u'an {asset} has been added for the given offer')
@clean_step
def add_asset_for_offers(context, asset):
assert hasattr(context, 'repository'), 'no repository set in the context'
if hasattr(context, 'offer_ids'):
offer_ids = context.offer_ids
elif hasattr(context, 'offer'):
offer_ids = [context.offer['id']]
else:
raise KeyError("Missing offer ID(s) for asset")
source_id = generate_random_id()
entity_ids, asset_ttl = format_common_asset(source_id)
if asset == "asset":
asset_xml = generate_asset(offer_ids, asset_ttl, entity_ids[0])
if asset == "indirect asset":
asset_xml = generate_indirect_asset(asset_ttl, entity_ids[0], offer_ids=offer_ids)
context.body = asset_xml
hub_key = TEMPLATE_HUBKEY.format(
repo_id=context.repository['id'],
id_type=asset,
id=unquote_plus(entity_ids[0].encode())
)
hub_key0 = TEMPLATE_HUBKEY_V0.format(
entity_type = 'asset',
org_id = COMMON_ASSET_DETAILS["organisation_id"],
source_id_type = COMMON_ASSET_DETAILS['source_id_type'],
source_id=source_id
)
context.id_map = {
'source_id_type': COMMON_ASSET_DETAILS['source_id_type'],
'source_id': source_id,
'entity_id': unquote_plus(entity_ids[0].encode()),
'hub_key': hub_key,
'hub_key1': '/'.join(hub_key.split('/')[3:]),
'hub_key0': '/'.join(hub_key0.split('/')[3:])
}
context.asset = {'id': entity_ids[0]}
context.execute_steps(u"""
Given the "repository" service
And the repository "testco repo" belonging to "testco"
And the client ID is the "testco" "external" service ID
And the client has an access token granting "write" access to the repository
And the "assets" endpoint
And Header "Content-Type" is "application/xml"
And Header "Accept" is "application/json"
When I make a "POST" request with the repository id
""")
check_success(context)
def check_success(context):
if context.response.status_code != 200:
status_code = context.response.status_code
error = context.response_object.get("errors", [{}])[0]
source = error.get('source', 'not set').strip()
error_msg = error.get('message', 'not set').strip()
msg = '\n\n========================== CAPTURED ERROR ========================='
msg += "\nStatus code: {}\nSource: {}\nError message: {}\n".format(
status_code,
source,
error_msg
)
raise AssertionError(msg)
@given(u'an asset not in the repository')
def a_new_asset_not_in_repo(context):
source_id = generate_random_id()
context.id_map = {
'source_id_type': COMMON_ASSET_DETAILS['source_id_type'],
'source_id': source_id
}
@given(u'a body of {count} generated valid ids')
def add_valid_ids_to_body(context, count):
context.body = {
'ids': [{
'source_id_type': COMMON_ASSET_DETAILS['source_id_type'],
'source_id': generate_random_id()}
for _ in range(int(count))
]
}
@given(u'a body of {count} generated invalid ids')
def add_invalid_ids_to_body(context, count):
ids = []
for index in range(int(count)):
if index % 2:
ids.append({
'source_id_type': COMMON_ASSET_DETAILS['source_id_type']
})
else:
ids.append({'source_id': generate_random_id()})
context.body = {'ids': ids}
@given(u'a body of {count} generated invalid id types')
def add_invalid_types_to_body(context, count):
context.body = {'ids': [{'source_id_type': 'InvalidPictureIDType',
'source_id': generate_random_id()}
for _ in range(int(count))]}
@given(u'the "{resource}" endpoint of the repository for the "{obj}" "{attr}"')
def endpoint_of_the_repository(context, resource, obj, attr):
repository_id = get_repository_id(context)
context.execute_steps(u"""
Given the "{}" endpoint
""".format(resource))
resource_id = quote_plus(getattr(context, obj).get(attr))
context.endpoint = context.endpoint.format(repository_id, resource_id)
@given(u'the "{resource}" endpoint of the repository for an invalid {entity_type}')
def repository_enpoint_invalid_entity(context, resource, entity_type):
repository_id = get_repository_id(context)
context.execute_steps(u"""
Given the "{}" endpoint
""".format(resource))
context.endpoint = context.endpoint.format(repository_id, 'a' * 32)
@given(u'the additional IDs endpoint for the new asset')
def endpoint_for_asset(context):
repository_id = get_repository_id(context)
context.execute_steps(u"""
Given the "asset ids" endpoint
""")
entity_id = context.id_map['entity_id']
context.endpoint = context.endpoint.format(quote_plus(repository_id), quote_plus(entity_id))
@given(u'the additional IDs endpoint for an illegal asset')
def endpoint_for_illegal_asset(context):
repository_id = get_repository_id(context)
context.execute_steps(u"""
Given the "asset ids" endpoint
""")
entity_id = str(uuid.uuid4()).replace('-', '')
context.endpoint = context.endpoint.format(quote_plus(repository_id), quote_plus(entity_id))
@when(u'I query the "{service}" service for the asset')
def query_for_asset(context, service):
assert service == context.service_name, (
'expected context.service_name = {} got {}'.format(
service, context.service_name)
)
id_map = context.id_map
query_an_asset(context, id_map['source_id_type'], id_map['source_id'])
@when(u'I query the "{service}" service for the asset using a schema 0 hub key')
def query_for_asset(context, service):
assert service == context.service_name, (
'expected context.service_name = {} got {}'.format(
service, context.service_name)
)
id_map = context.id_map
hub_key = 'https://openpermissions.org/s0/hub1/asset/maryevans/{}/{}'.format(
id_map['source_id_type'],
id_map['source_id'])
query_an_asset(context, 'hub_key', hub_key)
@when(u'I bulk query the "{service}" service for the asset')
def query_for_asset(context, service):
assert service == context.service_name, (
'expected context.service_name = {} got {}'.format(
service, context.service_name)
)
id_map = context.id_map
body = [
{
'source_id_type': id_map['source_id_type'],
'source_id': id_map['source_id']
}
]
query_by_source_id_and_type(context, body)
@when(u'I bulk query the "{service}" service for the asset using a schema 0 hub key')
def query_for_asset(context, service):
assert service == context.service_name, (
'expected context.service_name = {} got {}'.format(
service, context.service_name)
)
id_map = context.id_map
hub_key = 'https://openpermissions.org/s0/hub1/asset/maryevans/{}/{}'.format(
id_map['source_id_type'],
id_map['source_id'])
body = [
{
'source_id_type': 'hub_key',
'source_id': hub_key
}
]
query_by_source_id_and_type(context, body)
@when(u'I query the "{service}" service for the asset together with another asset')
def query_for_multi_assets(context, service):
assert service == context.service_name, (
'expected context.service_name = {} got {}'.format(
service, context.service_name)
)
id_map = context.id_map
body = [
{
'source_id_type': id_map['source_id_type'],
'source_id': id_map['source_id']
}
]
body.append(generate_query_object(True))
query_by_source_id_and_type(context, body)
@clean_step
def query_an_asset(context, source_id_type, source_id):
context.execute_steps(u"""
Given Header "Content-Type" is "application/json"
And Header "Accept" is "application/json"
And parameter "source_id_type" is "{}"
And parameter "source_id" is "{}"
When I make a "GET" request
""".format(source_id_type, source_id))
@clean_step
def query_by_source_id_and_type(context, body):
context.body = body
context.execute_steps(u"""
Given Header "Content-Type" is "application/json"
And Header "Accept" is "application/json"
When I make a "POST" request
""")
@given(u'an array of "{number}" "{query_type}" Query objects as the body')
def step_impl(context, number, query_type):
num = int(number)
if query_type == "no result":
context.body = generate_query(context, num)
elif query_type == "resulting":
context.body = get_query(context, num)
elif query_type == "mixed":
assert num >= 2
div, mod = divmod(num, 2)
context.body = generate_query(div + mod)
context.body += get_query(context, div)
elif query_type == "invalid id type":
context.body = generate_query(num, False)
else:
assert False
@given(u'the additional id \"{source_id_type}\" \"{source_id}\" has been attached to the asset')
def added_ids(context, source_id_type, source_id):
context.id_to_be_attached = {
'ids': [
{
'source_id_type': source_id_type,
'source_id': source_id
}
]
}
context.clean_execute_steps(u"""
Given the "repository" service
And the repository "testco repo" belonging to "testco"
And the client ID is the "testco" "external" service ID
And the client has an access token granting "write" access to the repository
And the request body is the "id_to_be_attached"
And Header "Content-Type" is "application/json"
And Header "Accept" | |
<gh_stars>0
import os
from datetime import date as datetime
from flask import Flask, Markup, redirect, render_template, request, session
from flask_session import Session
from sqlalchemy import (
Column,
ForeignKey,
Integer,
Sequence,
String,
create_engine,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, scoped_session, sessionmaker
import config
app = Flask(__name__)
# Check for environment variable
if not os.getenv("DATABASE_URL"):
raise RuntimeError("DATABASE_URL is not set")
# Configure session to use filesystem
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Set up database
engine = create_engine(os.getenv("DATABASE_URL"))
db = scoped_session(sessionmaker(bind=engine))
Base = declarative_base()
class User(Base):
__tablename__ = "users"
id = Column(Integer, Sequence("users_sequence"), primary_key=True)
username = Column(String, nullable=False, unique=True)
password = Column(String, nullable=False)
name = Column(String, nullable=False)
elevation = Column(Integer, nullable=False, default=0)
posts = relationship("Post", back_populates="user")
comments = relationship("Comment", back_populates="user")
def __repr__(self):
return str(
[
self.id,
self.username,
self.password,
self.name,
self.elevation,
self.posts,
self.comments,
]
)
class Post(Base):
__tablename__ = "posts"
id = Column(Integer, Sequence("posts_sequence"), primary_key=True)
title = Column(String, nullable=False, unique=True)
slug = Column(String, nullable=False, unique=True)
user_id = Column(Integer, ForeignKey("users.id"))
year = Column(String, nullable=False)
month = Column(String, nullable=False)
date = Column(String, nullable=False)
content = Column(String, nullable=False)
user = relationship("User", back_populates="posts")
comments = relationship("Comment", back_populates="post")
def __repr__(self):
return str(
[
self.id,
self.title,
self.slug,
"{d}-{m}-{y}".format(d=self.year, m=self.month, y=self.date),
self.user.name,
self.comments,
]
)
class Page(Base):
__tablename__ = "pages"
id = Column(Integer, Sequence("pages_sequence"), primary_key=True)
title = Column(String, nullable=False, unique=True)
slug = Column(String, nullable=False, unique=True)
precedence = Column(Integer, nullable=False)
content = Column(String, nullable=False)
def __repr__(self):
return str([self.id, self.title, self.slug, self.precedence])
class Comment(Base):
__tablename__ = "comments"
id = Column(Integer, Sequence("comments_sequence"), primary_key=True)
content = Column(String, nullable=False)
user_id = Column(Integer, ForeignKey("users.id"))
post_id = Column(Integer, ForeignKey("posts.id"))
user = relationship("User", back_populates="comments")
post = relationship("Post", back_populates="comments")
def __repr__(self):
return str([self.id, self.user.name, self.post.slug])
Base.metadata.create_all(engine)
if (
db.query(User).all() == []
and db.query(Post).all() == []
and db.query(Page).all() == []
and db.query(Comment).all() == []
):
db.add(
User(
username="admin", password="password", name="Konikal", elevation=4
)
)
db.add(
Post(
title="Hello world!",
slug="hello-world",
user_id=1,
year=datetime.today().strftime("%Y"),
month=datetime.today().strftime("%m"),
date=datetime.today().strftime("%d"),
content="""{"ops":[{"insert":"Welcome to Konikal. This is your first post. Edit or delete it, then start posting!"}]}""",
)
)
db.add(
Page(
title="Home",
slug="home",
precedence="1",
content="""{"ops":[{"insert":"Welcome to Konikal. This is your home page. Edit or delete it, then start posting!"}]}""",
)
)
db.add(
Comment(
content="""{"ops":[{"insert":"This is your first comment. Edit or delete it, then start posting!"}]}""",
user_id=1,
post_id=1,
)
)
db.commit()
###############################################################################
# Main page
@app.route("/")
def root():
session["route"] = "/"
pagebar = ""
pages = db.query(Page).order_by(Page.precedence.desc()).all()
for i in pages:
pagebar += """
<li class="nav-item active">
<a class="nav-link" href="/{slug}">{title}</a>
</li>
""".format(
title=i.title, slug=i.slug
)
home = db.query(Page).filter_by(slug="home").first()
if home is not None:
return render_template(
"page.html",
pagebar=Markup(config.pagebar["home"].format(pagebar=pagebar)),
body=Markup(
config.page["page"].format(
title=home.title, content=home.content
)
),
custom=config.custom,
)
else:
body = ""
posts = (
db.query(Post)
.order_by(
Post.year.desc(),
Post.month.desc(),
Post.date.desc(),
Post.id.desc(),
)
.all()
)
if posts != []:
for i in posts:
body += """
<tr>
<td><a href="/posts/{year}/{month}/{date}/{slug}">{title}</a></td>
<td>By {author}</td>
<td><a href="/posts/{year}/{month}/{date}">{date}</a>-<a href="/posts/{year}/{month}">{month}</a>-<a href="/posts/{year}">{year}</a></td>
</tr>
""".format(
title=i.title,
slug=i.slug,
author=i.user.name,
year=i.year,
month=i.month,
date=i.date,
)
else:
body = "No posts"
return render_template(
"page.html",
pagebar=Markup(config.pagebar["no_home"].format(pagebar=pagebar)),
body=Markup(
config.page["posts"].format(page="All Posts", body=body)
),
custom=config.custom,
)
# Redirect route
@app.route("/route")
def route():
if "route" in session:
if session["route"].find("/user/") != -1:
return redirect("/")
else:
return redirect(session["route"])
else:
return redirect("/")
# Search page
@app.route("/search", methods=["GET"])
def search():
search = request.args.get("search").lower()
session["search"] = search
body = ""
posts = (
db.query(Post)
.order_by(
Post.year.desc(),
Post.month.desc(),
Post.date.desc(),
Post.id.desc(),
)
.all()
)
results = []
for i in posts:
if i.title.lower().find(search) != -1 or i.title.lower().find(search) != -1:
results.append(i)
pagebar = ""
pages = db.query(Page).order_by(Page.precedence.desc()).all()
for i in pages:
pagebar += """
<li class="nav-item active">
<a class="nav-link" href="/{slug}">{title}</a>
</li>
""".format(
title=i.title, slug=i.slug
)
home = db.query(Page).filter_by(slug="home").first()
if posts != []:
for i in results:
body += """
<tr>
<td><a href="/posts/{year}/{month}/{date}/{slug}">{title}</a></td>
<td>By {author}</td>
<td><a href="/posts/{year}/{month}/{date}">{date}</a>-<a href="/posts/{year}/{month}">{month}</a>-<a href="/posts/{year}">{year}</a></td>
</tr>
""".format(
title=i.title,
slug=i.slug,
author=i.user.name,
year=i.year,
month=i.month,
date=i.date,
)
else:
body = "No posts"
if home is not None:
pagebar = Markup(config.pagebar["home"].format(pagebar=pagebar))
else:
pagebar = Markup(config.pagebar["no_home"].format(pagebar=pagebar))
session["route"] = "/posts"
return render_template(
"page.html",
pagebar=pagebar,
body=Markup(config.page["posts"].format(page="Search: " + search, body=body)),
custom=config.custom,
)
# All posts page
@app.route("/posts")
def posts():
body = ""
posts = (
db.query(Post)
.order_by(
Post.year.desc(),
Post.month.desc(),
Post.date.desc(),
Post.id.desc(),
)
.all()
)
pagebar = ""
pages = db.query(Page).order_by(Page.precedence.desc()).all()
for i in pages:
pagebar += """
<li class="nav-item active">
<a class="nav-link" href="/{slug}">{title}</a>
</li>
""".format(
title=i.title, slug=i.slug
)
home = db.query(Page).filter_by(slug="home").first()
if posts != []:
for i in posts:
body += """
<tr>
<td><a href="/posts/{year}/{month}/{date}/{slug}">{title}</a></td>
<td>By {author}</td>
<td><a href="/posts/{year}/{month}/{date}">{date}</a>-<a href="/posts/{year}/{month}">{month}</a>-<a href="/posts/{year}">{year}</a></td>
</tr>
""".format(
title=i.title,
slug=i.slug,
author=i.user.name,
year=i.year,
month=i.month,
date=i.date,
)
else:
body = "No posts"
if home is not None:
pagebar = Markup(config.pagebar["home"].format(pagebar=pagebar))
else:
pagebar = Markup(config.pagebar["no_home"].format(pagebar=pagebar))
session["route"] = "/posts"
return render_template(
"page.html",
pagebar=pagebar,
body=Markup(config.page["posts"].format(page="All Posts", body=body)),
custom=config.custom,
)
# Year posts page
@app.route("/posts/<year>")
def posts_year(year):
body = ""
posts = (
db.query(Post)
.filter_by(year=year)
.order_by(Post.month.desc(), Post.date.desc(), Post.id.desc())
.all()
)
pagebar = ""
pages = db.query(Page).order_by(Page.precedence.desc()).all()
for i in pages:
pagebar += """
<li class="nav-item active">
<a class="nav-link" href="/{slug}">{title}</a>
</li>
""".format(
title=i.title, slug=i.slug
)
home = db.query(Page).filter_by(slug="home").first()
if posts != []:
for i in posts:
body += """
<tr>
<td><a href="/posts/{year}/{month}/{date}/{slug}">{title}</a></td>
<td>By {author}</td>
<td><a href="/posts/{year}/{month}/{date}">{date}</a>-<a href="/posts/{year}/{month}">{month}</a>-<a href="/posts/{year}">{year}</a></td>
</tr>
""".format(
title=i.title,
slug=i.slug,
author=i.user.name,
year=year,
month=i.month,
date=i.date,
)
else:
body = "No posts"
if home is not None:
pagebar = Markup(config.pagebar["home"].format(pagebar=pagebar))
else:
pagebar = Markup(config.pagebar["no_home"].format(pagebar=pagebar))
session["route"] = "/posts/{year}".format(year=year)
return render_template(
"page.html",
pagebar=pagebar,
body=Markup(config.page["posts"].format(page=year, body=body)),
custom=config.custom,
)
# Month posts page
@app.route("/posts/<year>/<month>")
def posts_year_month(year, month):
body = ""
posts = (
db.query(Post)
.filter_by(year=year, month=month)
.order_by(Post.date.desc(), Post.id.desc())
.all()
)
pagebar = ""
pages = db.query(Page).order_by(Page.precedence.desc()).all()
for i in pages:
pagebar += """
<li class="nav-item active">
<a class="nav-link" href="/{slug}">{title}</a>
</li>
""".format(
title=i.title, slug=i.slug
)
home = db.query(Page).filter_by(slug="home").first()
if posts != []:
for i in posts:
body += """
<tr>
<td><a href="/posts/{year}/{month}/{date}/{slug}">{title}</a></td>
<td>By {author}</td>
<td><a href="/posts/{year}/{month}/{date}">{date}</a>-<a href="/posts/{year}/{month}">{month}</a>-<a href="/posts/{year}">{year}</a></td>
</tr>
""".format(
title=i.title,
slug=i.slug,
author=i.user.name,
year=year,
month=month,
date=i.date,
)
else:
body = "No posts"
if home is not None:
pagebar = Markup(config.pagebar["home"].format(pagebar=pagebar))
else:
pagebar = Markup(config.pagebar["no_home"].format(pagebar=pagebar))
session["route"] = "/posts/{year}/{month}".format(year=year, month=month)
return render_template(
"page.html",
pagebar=pagebar,
body=Markup(
config.page["posts"].format(
page="{month}-{year}".format(month=month, year=year), body=body
)
),
custom=config.custom,
)
# Date posts page
@app.route("/posts/<year>/<month>/<date>")
def posts_year_month_date(year, month, date):
body = ""
posts = (
db.query(Post)
.filter_by(year=year, month=month, date=date)
.order_by(Post.id.desc())
.all()
)
pagebar = ""
pages = db.query(Page).order_by(Page.precedence.desc()).all()
for i in pages:
pagebar += """
<li class="nav-item active">
<a class="nav-link" href="/{slug}">{title}</a>
</li>
""".format(
title=i.title, slug=i.slug
)
home = db.query(Page).filter_by(slug="home").first()
if posts != []:
for i in posts:
body += """
<tr>
<td><a href="/posts/{year}/{month}/{date}/{slug}">{title}</a></td>
<td>By {author}</td>
<td><a href="/posts/{year}/{month}/{date}">{date}</a>-<a href="/posts/{year}/{month}">{month}</a>-<a href="/posts/{year}">{year}</a></td>
</tr>
""".format(
title=i.title,
slug=i.slug,
author=i.user.name,
year=year,
month=month,
date=date,
)
else:
body = "No posts"
if home is not None:
pagebar = Markup(config.pagebar["home"].format(pagebar=pagebar))
else:
pagebar = Markup(config.pagebar["no_home"].format(pagebar=pagebar))
session["route"] = "/posts/{year}/{month}/{date}".format(
year=year, month=month, date=date
)
return render_template(
"page.html",
pagebar=pagebar,
body=Markup(
config.page["posts"].format(
page="{date}-{month}-{year}".format(
date=date, month=month, year=year
),
body=body,
)
),
custom=config.custom,
)
# Post page
@app.route("/posts/<year>/<month>/<date>/<slug>")
def posts_year_month_date_slug(year, month, date, slug):
post = (
db.query(Post)
.filter_by(year=year, month=month, date=date, slug=slug)
.first()
)
pagebar = ""
pages = db.query(Page).order_by(Page.precedence.desc()).all()
for i in pages:
pagebar += """
<li class="nav-item active">
<a class="nav-link" href="/{slug}">{title}</a>
</li>
""".format(
title=i.title, slug=i.slug
)
home = db.query(Page).filter_by(slug="home").first()
if post is not None:
session["route"] = "/posts/{year}/{month}/{date}/{slug}".format(
year=year, month=month, date=date, slug=slug
)
if home is not None:
pagebar = Markup(config.pagebar["home"].format(pagebar=pagebar))
else:
pagebar = Markup(config.pagebar["no_home"].format(pagebar=pagebar))
return render_template(
"page.html",
pagebar=pagebar,
body=Markup(
config.page["post"].format(
title=post.title,
date="""<a href="/posts/{year}/{month}/{date}">{date}</a>-<a href="/posts/{year}/{month}">{month}</a>-<a href="/posts/{year}">{year}</a>""".format(
date=date, month=month, year=year
),
author=post.user.name,
content=post.content,
)
),
custom=config.custom,
)
else:
session["error"] = "alert('Invalid route: Post does not exist');"
return redirect(session["route"])
# Page page
@app.route("/<slug>")
def slug(slug):
page = db.query(Page).filter_by(slug=slug).first()
pagebar = ""
pages = db.query(Page).order_by(Page.precedence.desc()).all()
for i in pages:
pagebar += """
<li class="nav-item active">
<a class="nav-link" href="/{slug}">{title}</a>
</li>
""".format(
title=i.title, slug=i.slug
)
home = db.query(Page).filter_by(slug="home").first()
if page is not None:
if slug == "home":
return redirect("/")
else:
session["route"] = "/{slug}".format(slug=slug)
if home is not None:
pagebar = Markup(
config.pagebar["home"].format(pagebar=pagebar)
)
else:
pagebar = Markup(
config.pagebar["no_home"].format(pagebar=pagebar)
)
session["route"] = "/{slug}".format(slug=slug)
return render_template(
"page.html",
pagebar=pagebar,
body=Markup(
config.page["page"].format(
title=page.title, content=page.content
)
),
custom=config.custom,
)
else:
session["error"] = "alert('Invalid route: Page does not exist');"
return redirect(session["route"])
# Login page
@app.route("/login")
def login():
if "user" not in session:
pagebar = ""
pages = db.query(Page).order_by(Page.precedence.desc()).all()
for i in pages:
pagebar += """
<li class="nav-item active">
<a class="nav-link" href="/{slug}">{title}</a>
</li>
""".format(
title=i.title, slug=i.slug
)
home = db.query(Page).filter_by(slug="home").first()
if home is not None:
pagebar = Markup(config.pagebar["home"].format(pagebar=pagebar))
else:
pagebar = Markup(config.pagebar["no_home"].format(pagebar=pagebar))
return render_template(
"page.html",
pagebar=pagebar,
body=Markup(config.page["login"]),
custom=config.custom,
)
else:
session["error"] = "alert('Invalid login: Already logged in');"
return redirect("/route")
# Login processing
@app.route("/login/done", methods=["POST"])
def | |
<reponame>sassoftware/conary<gh_stars>10-100
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
import conary_test
from conary_test import rephelp
from conary.deps import deps
from conary import versions
from conary.build.policy import PolicyError
from conary.build.errors import CookError
from conary import rpmhelper
from conary_test import resources
pythonVer = "%s.%s" % sys.version_info[:2]
class RpmCapsuleTest(rephelp.RepositoryHelper):
@conary_test.rpm
def testRPMCapsuleEpoch(self):
recipestr1 = r"""
class TestEpoch(CapsuleRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addCapsule('epoch-1.0-1.i386.rpm')
"""
built, d = self.buildRecipe(recipestr1, "TestEpoch")
nvf = built[0]
nvf = nvf[0], versions.VersionFromString(nvf[1]), nvf[2]
repos = self.openRepository()
trv = repos.getTrove(*nvf)
self.assertEquals(trv.troveInfo.capsule.rpm.epoch(), 17)
@conary_test.rpm
def testScriptHasLdSoConf(self):
'test warning on capsule scripts containing "ld.so.conf" (CNP-185)'
recipestr = """
class TestLdSoConf(CapsuleRecipe):
name = 'scripts'
version = '1.0_1'
clearBuildReqs()
def setup(r):
r.addCapsule('scripts-1.0-1.x86_64.rpm')
"""
# note that cookrpmcapsuletest:testCookWithScripts tests success case
self.assertRaises(PolicyError, self.buildRecipe, recipestr,
'TestLdSoConf')
@conary_test.rpm
def testRPMCapsuleDepPolicy(self):
""" Make sure that RPMProvide and RPMProvide work"""
recipestr1 = r"""
class TestEpoch(CapsuleRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addCapsule('epoch-1.0-1.i386.rpm')
r.RPMProvides('rpm: nonsenseProvision(FOO BAR)', 'epoch:rpm' )
r.RPMRequires('rpm: nonsenseRequirement(BAZ QUX)', 'epoch' )
"""
self.cfg.enableRPMVersionDeps = False
built, d = self.buildRecipe(recipestr1, "TestEpoch")
nvf = built[0]
nvf = nvf[0], versions.VersionFromString(nvf[1]), nvf[2]
repos = self.openRepository()
trv = repos.getTrove(*nvf)
self.assertEquals(str(trv.provides()),
'\n'.join(('trove: epoch:rpm',
'rpm: epoch',
'rpm: epoch[x86-32]',
'rpm: nonsenseProvision(BAR FOO)')))
self.assertEquals(str(trv.requires),
'\n'.join(('rpm: nonsenseRequirement(BAZ QUX)',
'rpmlib: CompressedFileNames',
'rpmlib: PayloadFilesHavePrefix')))
self.cfg.enableRPMVersionDeps = True
built, d = self.buildRecipe(recipestr1, "TestEpoch")
nvf = built[0]
nvf = nvf[0], versions.VersionFromString(nvf[1]), nvf[2]
repos = self.openRepository()
trv = repos.getTrove(*nvf)
self.assertEquals(str(trv.provides()),
'\n'.join(('trove: epoch:rpm',
'rpm: epoch',
'rpm: epoch-17:1.0',
'rpm: epoch-17:1.0-1',
'rpm: epoch[x86-32]',
'rpm: epoch[x86-32]-17:1.0',
'rpm: epoch[x86-32]-17:1.0-1',
'rpm: nonsenseProvision(BAR FOO)')))
self.assertEquals(str(trv.requires),
'\n'.join(('rpm: nonsenseRequirement(BAZ QUX)',
'rpmlib: CompressedFileNames',
'rpmlib: PayloadFilesHavePrefix')))
@conary_test.rpm
def testRPMCapsuleDepPolicy2(self):
"""Make sure that we can't specify non rpm and rpmlib deps using
RPMProvides"""
recipestr1 = r"""
class TestEpoch(CapsuleRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addCapsule('epoch-1.0-1.i386.rpm')
r.RPMProvides('soname: nonsenseProvision(FOO BAR)', 'epoch' )
"""
try:
self.buildRecipe(recipestr1, "TestEpoch")
except CookError, e:
err = str(e).split('\n')[1]
self.assertEqual(
str(err),
" PolicyError: RPMProvides cannot "
"be used to provide the non-rpm dependency: 'soname: "
"nonsenseProvision(FOO BAR)'")
@conary_test.rpm
def testRPMCapsuleDepPolicy3(self):
"""Make sure that we can't specify non rpm and rpmlib deps using
RPMRequires"""
recipestr1 = r"""
class TestEpoch(CapsuleRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addCapsule('epoch-1.0-1.i386.rpm')
r.RPMRequires('soname: nonsenseProvision(FOO BAR)', 'epoch' )
"""
try:
self.buildRecipe(recipestr1, "TestEpoch")
except CookError, e:
err = str(e).split('\n')[1]
self.assertEqual(
str(err),
" PolicyError: RPMRequires cannot "
"be used to provide the non-rpm dependency: 'soname: "
"nonsenseProvision(FOO BAR)'")
@conary_test.rpm
def testRPMProvidesExceptions(self):
"""
Make sure you can add exceptions for rpm dependencies. You need to be
able to do this when an RPM that you are installing incorrectly provides
something that the system provides to avoid installing all RPMs as one
large job.
It only makes sense to do exceptDeps for RPMProvides since rpm
provisions aren't actually attached to files.
"""
recipe1 = """
class TestRecipe(CapsuleRecipe):
name = 'test'
version = '1'
clearBuildReqs()
def setup(r):
r.addCapsule('perl-Archive-Tar-1.46-68.fc11.x86_64.rpm')
"""
recipe2 = recipe1 + "\n r.RPMProvides(exceptDeps='rpm: perl.*')"
def getPerlProvides(trv):
return [ x for x in str(trv.provides()).split('\n')
if x.startswith('rpm: perl') ]
self.cfg.enableRPMVersionDeps = False
r1trv = self.build(recipe1, 'TestRecipe')
r1provides = getPerlProvides(r1trv)
self.assertEqual(len(r1provides), 5)
r2trv = self.build(recipe2, 'TestRecipe')
r2provides = getPerlProvides(r2trv)
self.assertEqual(len(r2provides), 0)
self.assertTrue([ x for x in str(r2trv.provides()) ])
self.cfg.enableRPMVersionDeps = True
r1trv = self.build(recipe1, 'TestRecipe')
r1provides = getPerlProvides(r1trv)
self.assertEqual(len(r1provides), 19)
r2trv = self.build(recipe2, 'TestRecipe')
r2provides = getPerlProvides(r2trv)
self.assertEqual(len(r2provides), 0)
self.assertTrue([ x for x in str(r2trv.provides()) ])
@conary_test.rpm
def testRPMCapsuleKernelModMerging(self):
'''
Make sure that RPMRequires passes through mergeKmodSymbols correctly
'''
def checkDeps(built, reqExpected, provExpected):
nvf = built
nvf = nvf[0], versions.VersionFromString(nvf[1]), nvf[2]
repos = self.openRepository()
trv = repos.getTrove(*nvf)
reqGot = list(trv.requires().iterDepsByClass(deps.RpmDependencies))
provGot = list(trv.provides().iterDepsByClass(deps.RpmDependencies))
self.assertEquals(str(reqGot), reqExpected)
self.assertEquals(str(provGot), provExpected)
recipestr1 = r"""
class TestKernel(CapsuleRecipe):
name = 'kernelish'
version = '0'
clearBuildReqs()
def setup(r):
r.addCapsule('kernelish-1.0-1.noarch.rpm')
r.RPMRequires(mergeKmodSymbols=True)
"""
self.cfg.enableRPMVersionDeps = False
built, d = self.buildRecipe(recipestr1, "TestKernel")
req = "[Dependency('ksym', flags={'bar:123456789abcdef': 1, 'foo:123456789abcdef': 1})]"
prov = "[Dependency('kernel', flags={'bar:123456789abcdef': 1, 'foo:123456789abcdef': 1}), Dependency('kernelish')]"
checkDeps(built[0], req, prov)
self.cfg.enableRPMVersionDeps = True
built, d = self.buildRecipe(recipestr1, "TestKernel")
req = "[Dependency('ksym', flags={'bar:123456789abcdef': 1, 'foo:123456789abcdef': 1})]"
prov = "[Dependency('kernel', flags={'bar:123456789abcdef': 1, 'foo:123456789abcdef': 1}), Dependency('kernelish-1.0'), Dependency('kernelish-0:1.0'), Dependency('kernelish-0:1.0-1'), Dependency('kernelish'), Dependency('kernelish-1.0-1')]"
checkDeps(built[0], req, prov)
recipestr2 = r"""
class TestKernel(CapsuleRecipe):
name = 'kernelish'
version = '0'
clearBuildReqs()
def setup(r):
r.addCapsule('kernelish-1.0-1.noarch.rpm')
r.RPMRequires(mergeKmodSymbols=False)
"""
self.cfg.enableRPMVersionDeps = False
built, d = self.buildRecipe(recipestr2, "TestKernel")
req = "[Dependency('ksym[bar:123456789abcdef]'), Dependency('ksym[foo:123456789abcdef]')]"
prov = "[Dependency('kernel[foo:123456789abcdef]'), Dependency('kernelish'), Dependency('kernel[bar:123456789abcdef]')]"
checkDeps(built[0], req, prov)
self.cfg.enableRPMVersionDeps = True
built, d = self.buildRecipe(recipestr2, "TestKernel")
req = "[Dependency('ksym[bar:123456789abcdef]'), Dependency('ksym[foo:123456789abcdef]')]"
prov = '[%s]' % ', '.join([
"Dependency('kernelish-1.0')",
"Dependency('kernel[bar:123456789abcdef]')",
"Dependency('kernelish-0:1.0')",
"Dependency('kernelish-0:1.0-1')",
"Dependency('kernel[foo:123456789abcdef]')",
"Dependency('kernelish')",
"Dependency('kernelish-1.0-1')",
])
checkDeps(built[0], req, prov)
@conary_test.rpm
def testRPMCapsuleDepCulling(self):
""" Make sure that RPMRequires redundent rpm requires are culled"""
recipestr1 = r"""
class TestDepCulling(CapsuleRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addCapsule('gnome-main-menu-0.9.10-26.x86_64.rpm')
"""
self.cfg.enableRPMVersionDeps = False
self.overrideBuildFlavor('is: x86_64')
built, d = self.buildRecipe(recipestr1, "TestDepCulling")
nvf = built[0]
nvf = nvf[0], versions.VersionFromString(nvf[1]), nvf[2]
repos = self.openRepository()
trv = repos.getTrove(*nvf)
reqGot = list(trv.requires().iterDepsByClass(deps.RpmDependencies))
reqExpected = "[Dependency('hal'), Dependency('gnome-main-menu-lang'), Dependency('gnome-panel'), Dependency('tango-icon-theme'), Dependency('coreutils'), Dependency('dbus-1-glib'), Dependency('libssui'), Dependency('eel'), Dependency('wireless-tools')]"
self.assertEquals(str(reqGot), reqExpected)
self.cfg.enableRPMVersionDeps = True
built, d = self.buildRecipe(recipestr1, "TestDepCulling")
nvf = built[0]
nvf = nvf[0], versions.VersionFromString(nvf[1]), nvf[2]
repos = self.openRepository()
trv = repos.getTrove(*nvf)
reqGot = list(trv.requires().iterDepsByClass(deps.RpmDependencies))
reqExpected = "[Dependency('hal'), Dependency('gnome-main-menu-lang'), Dependency('gnome-main-menu-lang-0.9.10'), Dependency('gnome-panel'), Dependency('tango-icon-theme'), Dependency('coreutils'), Dependency('dbus-1-glib'), Dependency('libssui'), Dependency('eel'), Dependency('wireless-tools')]"
self.assertEquals(str(reqGot), reqExpected)
@conary_test.rpm
def testRPMRequiresExceptions(self):
""" Make sure that RPMRequires's exceptions argument works"""
recipestr1 = r"""
class TestRPMRequiresExceptions(CapsuleRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addCapsule('gnome-main-menu-0.9.10-26.x86_64.rpm')
r.RPMRequires(exceptions='gnome-main-menu.*rpm')
"""
self.overrideBuildFlavor('is: x86_64')
built, d = self.buildRecipe(recipestr1, "TestRPMRequiresExceptions")
nvf = built[0]
nvf = nvf[0], versions.VersionFromString(nvf[1]), nvf[2]
repos = self.openRepository()
trv = repos.getTrove(*nvf)
reqGot = list(trv.requires().iterDepsByClass(deps.RpmDependencies))
reqExpected = "[]"
self.assertEquals(str(reqGot), reqExpected)
@conary_test.rpm
def testRPMRequiresExceptDeps1(self):
""" Make sure that RPMRequires's exceptDeps argument works"""
recipestr1 = r"""
class TestRPMRequiresExceptDeps(CapsuleRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addCapsule('gnome-main-menu-0.9.10-26.x86_64.rpm')
r.RPMRequires(exceptDeps='rpmlib: .*')
"""
self.overrideBuildFlavor('is: x86_64')
built, d = self.buildRecipe(recipestr1, "TestRPMRequiresExceptDeps")
nvf = built[0]
nvf = nvf[0], versions.VersionFromString(nvf[1]), nvf[2]
repos = self.openRepository()
trv = repos.getTrove(*nvf)
reqGot = list(trv.requires().iterDepsByClass(deps.RpmLibDependencies))
reqExpected = "[]"
self.assertEquals(str(reqGot), reqExpected)
@conary_test.rpm
def testRPMRequiresExceptDeps2(self):
""" Make sure that RPMRequires's exceptDeps argument works"""
recipestr1 = r"""
class TestRPMRequiresExceptDeps(CapsuleRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addCapsule('gnome-main-menu-0.9.10-26.x86_64.rpm')
r.RPMRequires(exceptDeps=('gnome-main-menu.*','rpmlib: .*'))
"""
self.overrideBuildFlavor('is: x86_64')
built, d = self.buildRecipe(recipestr1, "TestRPMRequiresExceptDeps")
nvf = built[0]
nvf = nvf[0], versions.VersionFromString(nvf[1]), nvf[2]
repos = self.openRepository()
trv = repos.getTrove(*nvf)
reqGot = list(trv.requires().iterDepsByClass(deps.RpmLibDependencies))
reqExpected = "[]"
self.assertEquals(str(reqGot), reqExpected)
@conary_test.rpm
def testRPMRequiresExceptDeps3(self):
""" Make sure that RPMRequires's exceptDeps argument works"""
recipestr1 = r"""
class TestRPMRequiresExceptDeps(CapsuleRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addCapsule('gnome-main-menu-0.9.10-26.x86_64.rpm')
r.RPMRequires(exceptDeps=(('gnome-main-menu.*','rpm: .*'),) )
"""
self.overrideBuildFlavor('is: x86_64')
built, d = self.buildRecipe(recipestr1, "TestRPMRequiresExceptDeps")
nvf = built[0]
nvf = nvf[0], versions.VersionFromString(nvf[1]), nvf[2]
repos = self.openRepository()
trv = repos.getTrove(*nvf)
reqGot = list(trv.requires().iterDepsByClass(deps.RpmDependencies))
reqExpected = "[]"
self.assertEquals(str(reqGot), reqExpected)
@conary_test.rpm
def testRPMCapsuleUserGroup(self):
recipestr1 = r"""
class TestGroup(CapsuleRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addCapsule('ownerships-1.0-1.i386.rpm')
"""
built, d = self.buildRecipe(recipestr1, "TestGroup")
nvf = built[0]
nvf = nvf[0], versions.VersionFromString(nvf[1]), nvf[2]
repos = self.openRepository()
trv = repos.getTrove(*nvf)
self.assertEquals(trv.requires(), deps.ThawDependencySet(
'17#CompressedFileNames|17#PayloadFilesHavePrefix|'
'17#PayloadIsBzip2'))
@conary_test.rpm
def testRPMCapsuleGhost(self):
recipestr1 = r"""
class TestGhost(CapsuleRecipe):
name = 'ghost'
version = '1.0'
clearBuildReqs()
def setup(r):
r.addCapsule('ghost-1.0-1.i386.rpm')
# ensure that initialContents overrides transient
r.Transient('/foo/ghost')
"""
built, d = self.buildRecipe(recipestr1, "TestGhost")
client = self.getConaryClient()
repos = client.getRepos()
nvf = repos.findTrove(None, built[0])
trv = repos.getTrove(*nvf[0])
fileList = list(trv.iterFileList())
fileObjs = repos.getFileVersions([(x[0], x[2], x[3]) for x in fileList
if x[1] == '/foo/ghost'])
for fileInfo, fileObj in zip(fileList, fileObjs):
self.assertFalse(fileObj.flags.isConfig(),
"Expected config to be unset for %s" % fileInfo[1])
self.assertFalse(fileObj.flags.isTransient(),
"Expected transient to be unset for %s" % fileInfo[1])
self.assertTrue(fileObj.flags.isInitialContents(),
"Expected initialContents for %s" % fileInfo[1])
@conary_test.rpm
def testRPMCapsuleDeps(self):
'make sure that rpm capsule deps are correct'
recipestr1 = r"""
class TestProvides(CapsuleRecipe):
name = 'depstest'
version = '1.0'
clearBuildReqs()
def setup(r):
r.addCapsule('depstest-0.1-1.x86_64.rpm')
"""
self.cfg.enableRPMVersionDeps = False
built, d = self.buildRecipe(recipestr1, "TestProvides")
client = self.getConaryClient()
repos = client.getRepos()
nvf = repos.findTrove(None, built[0])
trv = repos.getTrove(*nvf[0])
reqExpected = '\n'.join((
'abi: ELF32(SysV x86)',
'file: /bin/sh',
'soname: ELF32/ld-linux.so.2(GLIBC_PRIVATE SysV | |
, rloc , port ) :
self . rloc . copy_address ( rloc )
self . translated_rloc . copy_address ( rloc )
self . translated_port = port
if 73 - 73: OOooOOo / Oo0Ooo
if 80 - 80: OoO0O00 + I1IiiI % i1IIi / I11i % i1IIi * i11iIiiIii
def is_rloc_translated ( self ) :
return ( self . translated_rloc . is_null ( ) == False )
if 27 - 27: OoOoOO00 / I1Ii111 * O0 / I1IiiI - IiII / o0oOOo0O0Ooo
if 70 - 70: I1ii11iIi11i
def rloc_exists ( self ) :
if ( self . rloc . is_null ( ) == False ) : return ( True )
if ( self . rle_name or self . geo_name or self . elp_name or self . json_name ) :
return ( False )
if 11 - 11: I1Ii111
return ( True )
if 70 - 70: Ii1I
if 22 - 22: Ii1I
def is_rtr ( self ) :
return ( ( self . priority == 254 and self . mpriority == 255 and self . weight == 0 and self . mweight == 0 ) )
if 59 - 59: I1ii11iIi11i
if 90 - 90: OOooOOo / iII111i
if 70 - 70: o0oOOo0O0Ooo
def print_state_change ( self , new_state ) :
I1Ii1iI1 = self . print_state ( )
OO0o0o0oo = "{} -> {}" . format ( I1Ii1iI1 , new_state )
if ( new_state == "up" and self . unreach_state ( ) ) :
OO0o0o0oo = bold ( OO0o0o0oo , False )
if 44 - 44: Oo0Ooo + Ii1I + ooOoO0o / I1ii11iIi11i
return ( OO0o0o0oo )
if 50 - 50: i1IIi . iIii1I11I1II1 % OoO0O00
if 45 - 45: OoooooooOO . O0 * oO0o + IiII
def print_rloc_probe_rtt ( self ) :
if ( self . rloc_probe_rtt == - 1 ) : return ( "none" )
return ( self . rloc_probe_rtt )
if 18 - 18: II111iiii . O0 - I11i / I11i
if 71 - 71: OoOoOO00 + iIii1I11I1II1 - II111iiii / i1IIi
def print_recent_rloc_probe_rtts ( self ) :
I111II = str ( self . recent_rloc_probe_rtts )
I111II = I111II . replace ( "-1" , "?" )
return ( I111II )
if 22 - 22: I1Ii111 - OOooOOo * i1IIi
if 88 - 88: ooOoO0o + iIii1I11I1II1 + OoO0O00 * I1Ii111 + oO0o
def compute_rloc_probe_rtt ( self ) :
i1oo0OO0Oo = self . rloc_probe_rtt
self . rloc_probe_rtt = - 1
if ( self . last_rloc_probe_reply == None ) : return
if ( self . last_rloc_probe == None ) : return
self . rloc_probe_rtt = self . last_rloc_probe_reply - self . last_rloc_probe
self . rloc_probe_rtt = round ( self . rloc_probe_rtt , 3 )
I1IIIIII1 = self . recent_rloc_probe_rtts
self . recent_rloc_probe_rtts = [ i1oo0OO0Oo ] + I1IIIIII1 [ 0 : - 1 ]
if 76 - 76: I1Ii111 * OOooOOo * IiII % IiII / o0oOOo0O0Ooo * I11i
if 41 - 41: i11iIiiIii . I1IiiI / O0
def print_rloc_probe_hops ( self ) :
return ( self . rloc_probe_hops )
if 93 - 93: Oo0Ooo % OoOoOO00 . II111iiii
if 60 - 60: OoO0O00 - IiII % O0 * I1ii11iIi11i
def print_recent_rloc_probe_hops ( self ) :
oOO000OOOOOooo = str ( self . recent_rloc_probe_hops )
return ( oOO000OOOOOooo )
if 64 - 64: OoO0O00 % OoOoOO00 % I1IiiI - Ii1I / IiII * Ii1I
if 74 - 74: IiII - O0 % OOooOOo % OoooooooOO - I11i
def store_rloc_probe_hops ( self , to_hops , from_ttl ) :
if ( to_hops == 0 ) :
to_hops = "?"
elif ( to_hops < LISP_RLOC_PROBE_TTL / 2 ) :
to_hops = "!"
else :
to_hops = str ( LISP_RLOC_PROBE_TTL - to_hops )
if 4 - 4: i1IIi + OoOoOO00 + iIii1I11I1II1 - i1IIi * i11iIiiIii
if ( from_ttl < LISP_RLOC_PROBE_TTL / 2 ) :
OO0oOo00 = "!"
else :
OO0oOo00 = str ( LISP_RLOC_PROBE_TTL - from_ttl )
if 88 - 88: O0 % OOooOOo . iII111i
if 40 - 40: O0 . Ii1I % IiII % I1ii11iIi11i - OoOoOO00
i1oo0OO0Oo = self . rloc_probe_hops
self . rloc_probe_hops = to_hops + "/" + OO0oOo00
I1IIIIII1 = self . recent_rloc_probe_hops
self . recent_rloc_probe_hops = [ i1oo0OO0Oo ] + I1IIIIII1 [ 0 : - 1 ]
if 94 - 94: I1IiiI . I1Ii111
if 37 - 37: i1IIi - O0
def process_rloc_probe_reply ( self , nonce , eid , group , hop_count , ttl ) :
oOOoo0O00 = self
while ( True ) :
if ( oOOoo0O00 . last_rloc_probe_nonce == nonce ) : break
oOOoo0O00 = oOOoo0O00 . next_rloc
if ( oOOoo0O00 == None ) :
lprint ( " No matching nonce state found for nonce 0x{}" . format ( lisp_hex_string ( nonce ) ) )
if 36 - 36: I1Ii111 . OoooooooOO - i1IIi % iII111i - II111iiii * i11iIiiIii
return
if 90 - 90: OoOoOO00 % iII111i - Oo0Ooo
if 13 - 13: o0oOOo0O0Ooo / O0 . I1Ii111 * I1Ii111
if 76 - 76: Ii1I - iII111i
oOOoo0O00 . last_rloc_probe_reply = lisp_get_timestamp ( )
oOOoo0O00 . compute_rloc_probe_rtt ( )
OOo0OOo = oOOoo0O00 . print_state_change ( "up" )
if ( oOOoo0O00 . state != LISP_RLOC_UP_STATE ) :
lisp_update_rtr_updown ( oOOoo0O00 . rloc , True )
oOOoo0O00 . state = LISP_RLOC_UP_STATE
oOOoo0O00 . last_state_change = lisp_get_timestamp ( )
Iii1 = lisp_map_cache . lookup_cache ( eid , True )
if ( Iii1 ) : lisp_write_ipc_map_cache ( True , Iii1 )
if 31 - 31: I11i . ooOoO0o
if 69 - 69: I1ii11iIi11i
oOOoo0O00 . store_rloc_probe_hops ( hop_count , ttl )
if 6 - 6: iIii1I11I1II1 * I1ii11iIi11i / I11i % I1Ii111 / Oo0Ooo
iI11iI11i11ii = bold ( "RLOC-probe reply" , False )
I1iiIiiii1111 = oOOoo0O00 . rloc . print_address_no_iid ( )
OOOOo000o = bold ( str ( oOOoo0O00 . print_rloc_probe_rtt ( ) ) , False )
i111 = ":{}" . format ( self . translated_port ) if self . translated_port != 0 else ""
if 42 - 42: iII111i / i11iIiiIii + II111iiii % IiII / ooOoO0o
o00ooO0Ooo = ""
if ( oOOoo0O00 . rloc_next_hop != None ) :
oOo0OOOOOO , o0O0 = oOOoo0O00 . rloc_next_hop
o00ooO0Ooo = ", nh {}({})" . format ( o0O0 , oOo0OOOOOO )
if 57 - 57: ooOoO0o * oO0o + o0oOOo0O0Ooo
if 97 - 97: OoooooooOO * I1IiiI . Ii1I * I1IiiI
ooo0OO = green ( lisp_print_eid_tuple ( eid , group ) , False )
lprint ( ( " Received {} from {}{} for {}, {}, rtt {}{}, " + "to-ttl/from-ttl {}" ) . format ( iI11iI11i11ii , red ( I1iiIiiii1111 , False ) , i111 , ooo0OO ,
# OoO0O00 * OoO0O00
OOo0OOo , OOOOo000o , o00ooO0Ooo , str ( hop_count ) + "/" + str ( ttl ) ) )
if 66 - 66: i1IIi . IiII / OoOoOO00 / i11iIiiIii
if ( oOOoo0O00 . rloc_next_hop == None ) : return
if 53 - 53: OoOoOO00 % OoooooooOO + Ii1I
if 85 - 85: ooOoO0o % i11iIiiIii * oO0o / ooOoO0o / I1Ii111 . i11iIiiIii
if 23 - 23: i1IIi + I1Ii111 / Oo0Ooo * O0 . O0
if 67 - 67: OoO0O00 - II111iiii + Ii1I
oOOoo0O00 = None
iIiiII = None
while ( True ) :
oOOoo0O00 = self if oOOoo0O00 == None else oOOoo0O00 . next_rloc
if ( oOOoo0O00 == None ) : break
if ( oOOoo0O00 . up_state ( ) == False ) : continue
if ( oOOoo0O00 . rloc_probe_rtt == - 1 ) : continue
if 88 - 88: i1IIi . I1IiiI - I11i % OoooooooOO / OoOoOO00 + OoOoOO00
if ( iIiiII == None ) : iIiiII = oOOoo0O00
if ( oOOoo0O00 . rloc_probe_rtt < iIiiII . rloc_probe_rtt ) : iIiiII = oOOoo0O00
if 32 - 32: o0oOOo0O0Ooo * O0
if 65 - 65: | |
<reponame>arccode/factory
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import errno
import json
import os
from unittest import mock
import xmlrpc.client
import rest_framework.status
import rest_framework.test
from backend import models
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
@contextlib.contextmanager
def TestData(file_name, deserialize=True):
"""Load a JSON file under the testdata folder using the with statement."""
with open(os.path.join(SCRIPT_DIR, 'testdata', file_name)) as f:
if deserialize:
yield json.load(f)
else:
yield f.read()
class UploadedFileTest(rest_framework.test.APITestCase):
def setUp(self):
with open(__file__) as f:
response = self.client.post('/files/', data={'file': f})
self.uploaded_file_id = response.json()['id']
def testWithUploadedFile(self):
"""The normal use case of UploadedFile."""
with models.UploadedFile(self.uploaded_file_id) as path:
self.assertTrue(os.path.isfile(path))
self.assertFalse(os.path.exists(path))
@mock.patch('os.unlink')
def testWithUploadedFileNoSuchFile(self, unlink):
"""The uploaded file will be removed after used, but it doesn't matter if it
has already been removed."""
unlink.side_effect = OSError(errno.ENOENT, 'No such file')
with models.UploadedFile(self.uploaded_file_id) as path:
unlink.assert_not_called()
unlink.assert_called_once_with(path)
@mock.patch('os.unlink')
def testWithUploadedFileUnlinkRaisesErrorOtherThanENOENT(self, unlink):
"""Test if os.unlink() raises error other than ENOENT."""
unlink.side_effect = OSError(errno.EACCES, 'Permission denied')
# This case should never happen actually, but if it happened, we'll just
# raise.
with self.assertRaises(OSError):
with models.UploadedFile(self.uploaded_file_id):
pass
@mock.patch('os.rmdir')
def testWithUploadedFileDirectoryNotEmpty(self, rmdir):
"""The code will try to remove the parent directory of the uploaded file,
but will fail if it's not empty, which we don't care."""
rmdir.side_effect = OSError(errno.ENOTEMPTY, 'Directory not empty')
with models.UploadedFile(self.uploaded_file_id) as path:
rmdir.assert_not_called()
rmdir.assert_called_once_with(os.path.dirname(path))
@mock.patch('os.rmdir')
def testWithUploadedFileRmdirRaisesErrorOtherThanENOTEMPTY(self, rmdir):
"""Test if os.rmdir() raises error other than ENOTEMPTY."""
rmdir.side_effect = OSError(errno.EACCES, 'Permission denied')
# This case should never happen actually, but if it happened, we'll just
# raise.
with self.assertRaises(OSError):
with models.UploadedFile(self.uploaded_file_id):
pass
# TODO(pihsun): Check if testdata still makes sense after there's no match, and
# there's only one active bundle.
class DomeAPITest(rest_framework.test.APITestCase):
"""Test Dome APIs.
This class is somewhere between unit test and integration test. All layers
below Dome back-end are mocked (such as docker commands, Umpire, etc.), but
models, serializers, views, and urls modules are not tested separately.
TODO(littlecvr): we probably need real unit tests and integration tests.
Project APIs:
- GET projects/
List projects.
- POST /projects/
Create a new project.
- DELETE /projects/${PROJECT_NAME}/
Delete a specific project.
- PUT /projects/${PROJECT_NAME}/
Add/create/delete Umpire container of the project.
Bundle APIs:
- GET /projects/${PROJECT_NAME}/bundles/
List bundles.
- POST /projects/${PROJECT_NAME/bundles/
Upload a new bundle.
- PUT /projects/${PROJECT_NAME}/bundles/
Reorder the bundles.
- DELETE /projects/${PROJECT_NAME}/bundles/${BUNDLE_NAME}/
Delete bundle.
- PUT /projects/${PROJECT_NAME/bundles/${BUNDLE_NAME}/
Update bundle resources
Resource APIs:
- POST /projects/${PROJECT_NAME}/resources/
Add a resource to Umpire.
"""
# TODO(littlecvr): separate tests into different groups (project, bundle,
# resource).
@classmethod
def setUpClass(cls):
super(DomeAPITest, cls).setUpClass()
cls.PROJECT_WITHOUT_UMPIRE_NAME = 'project_without_umpire'
cls.PROJECT_WITH_UMPIRE_NAME = 'project_with_umpire'
cls.PROJECT_WITH_UMPIRE_PORT = 8080
cls.MOCK_UMPIRE_VERSION = 5
models.Project.objects.create(name=cls.PROJECT_WITHOUT_UMPIRE_NAME)
models.Project.objects.create(name=cls.PROJECT_WITH_UMPIRE_NAME,
umpire_enabled=True,
umpire_port=cls.PROJECT_WITH_UMPIRE_PORT)
os.makedirs(os.path.join(
models.UMPIRE_BASE_DIR, cls.PROJECT_WITH_UMPIRE_NAME))
def setUp(self):
self.maxDiff = None # developer friendly setting
ENTITIES_TO_MOCK = ['subprocess.call',
'subprocess.check_call',
'subprocess.check_output',
'shutil.copy',
'shutil.rmtree',
'os.chmod',
'xmlrpc.client.ServerProxy']
self.patchers = []
self.mocks = {}
for entity in ENTITIES_TO_MOCK:
self.patchers.append(mock.patch(entity))
self.mocks[entity] = self.patchers[-1].start()
self.patchers.append(mock.patch.object(
models.Project, 'GetExistingUmpirePort'))
self.mocks['GetExistingUmpirePort'] = self.patchers[-1].start()
self.mocks['GetExistingUmpirePort'].return_value = None
def MockUmpireGetActiveConfig():
"""Mock the GetActiveConfig() call because it's used so often."""
add_config_from_blob_mock = (
self.mocks['xmlrpc.client.ServerProxy']().AddConfigFromBlob)
# Emulate Umpire to some extend: if new config has been uploaded, return
# it; otherwise, return the default config.
if add_config_from_blob_mock.called:
args, unused_kwargs = add_config_from_blob_mock.call_args
return args[0]
with TestData('umpire_config.json', deserialize=False) as config_str:
return config_str
def MockUmpireGetPayloadsDict(file_name):
"""Mock the GetPayloadsDict() RPC call in Umpire."""
with TestData(file_name) as c:
return c
self.mocks['xmlrpc.client.ServerProxy']().GetActiveConfig = (
mock.MagicMock(side_effect=MockUmpireGetActiveConfig))
self.mocks['xmlrpc.client.ServerProxy']().GetPayloadsDict = (
mock.MagicMock(side_effect=MockUmpireGetPayloadsDict))
self.mocks['xmlrpc.client.ServerProxy']().GetVersion = (
mock.MagicMock(return_value=self.MOCK_UMPIRE_VERSION))
def tearDown(self):
for patcher in self.patchers:
patcher.stop()
def testAddExistingUmpire(self):
UMPIRE_PORT = 8090
# pretend we have the container
self.mocks['subprocess.check_output'].return_value = (
models.Project.GetUmpireContainerName(self.PROJECT_WITHOUT_UMPIRE_NAME))
self.mocks['GetExistingUmpirePort'].return_value = UMPIRE_PORT
response = self._AddExistingUmpire(self.PROJECT_WITHOUT_UMPIRE_NAME)
self.assertEqual(response.status_code, rest_framework.status.HTTP_200_OK)
self.assertTrue(
response.content, {
'name': self.PROJECT_WITHOUT_UMPIRE_NAME,
'umpireEnabled': True,
'umpirePort': UMPIRE_PORT,
'hasExistingUmpire': True
})
# no docker commands should be called
self.mocks['subprocess.call'].assert_not_called()
self.mocks['subprocess.check_call'].assert_not_called()
def testCreateProject(self):
PROJECT_NAME = 'testing_project'
response = self._CreateProject(PROJECT_NAME)
self.assertEqual(response.status_code,
rest_framework.status.HTTP_201_CREATED)
self.assertJSONEqual(
response.content, {
'name': PROJECT_NAME,
'umpireEnabled': False,
'umpirePort': None,
'netbootBundle': None,
'hasExistingUmpire': False
})
# no docker commands should be called
self.mocks['subprocess.call'].assert_not_called()
self.mocks['subprocess.check_call'].assert_not_called()
self.mocks['subprocess.check_output'].assert_not_called()
def testCreateProjectThatAlreadyExists(self):
response = self._CreateProject(self.PROJECT_WITH_UMPIRE_NAME)
# TODO(littlecvr): should expect HTTP_409_CONFLICT
self.assertEqual(response.status_code,
rest_framework.status.HTTP_400_BAD_REQUEST)
# TODO(littlecvr): should expect message like "Project OOO already exists"
def testCreateProjectWithEmptyName(self):
response = self._CreateProject('')
self.assertEqual(response.status_code,
rest_framework.status.HTTP_400_BAD_REQUEST)
def testCreateProjectWithSlashesInName(self):
response = self._CreateProject('a/b')
self.assertEqual(response.status_code,
rest_framework.status.HTTP_400_BAD_REQUEST)
def testCreateProjectWithoutName(self):
response = self.client.post('/projects/', data={}, format='json')
self.assertEqual(response.status_code,
rest_framework.status.HTTP_400_BAD_REQUEST)
self.assertTrue('is required' in response.json()['name'])
def testDeleteAllProjects(self):
response = self.client.delete('/projects/')
self.assertEqual(response.status_code,
rest_framework.status.HTTP_405_METHOD_NOT_ALLOWED)
def testDeleteProject(self):
response = self._DeleteProject(self.PROJECT_WITH_UMPIRE_NAME)
self.assertEqual(response.status_code,
rest_framework.status.HTTP_204_NO_CONTENT)
# make sure the container has also been removed
self.mocks['subprocess.call'].assert_called_with([
'docker', 'rm',
models.Project.GetUmpireContainerName(self.PROJECT_WITH_UMPIRE_NAME)])
def testDeleteNonExistingProject(self):
response = self._DeleteProject('non_existing_project')
self.assertEqual(response.status_code,
rest_framework.status.HTTP_404_NOT_FOUND)
def testDisableUmpire(self):
response = self._DisableUmpire(self.PROJECT_WITH_UMPIRE_NAME)
self.assertEqual(response.status_code, rest_framework.status.HTTP_200_OK)
self.assertJSONEqual(
response.content, {
'name': self.PROJECT_WITH_UMPIRE_NAME,
'umpireEnabled': False,
'umpirePort': 8080,
'netbootBundle': None,
'hasExistingUmpire': False
})
# make sure the container has also been removed
self.mocks['subprocess.call'].assert_called_with([
'docker', 'rm',
models.Project.GetUmpireContainerName(self.PROJECT_WITH_UMPIRE_NAME)])
def testDisableUmpireOnProjectWithoutUmpire(self):
response = self._DisableUmpire(self.PROJECT_WITHOUT_UMPIRE_NAME)
self.assertEqual(response.status_code, rest_framework.status.HTTP_200_OK)
self.assertJSONEqual(
response.content, {
'name': self.PROJECT_WITHOUT_UMPIRE_NAME,
'umpireEnabled': False,
'umpirePort': None,
'netbootBundle': None,
'hasExistingUmpire': False
})
# nothing should be changed and nothing should be called
self.mocks['subprocess.call'].assert_not_called()
self.mocks['subprocess.check_call'].assert_not_called()
self.mocks['subprocess.check_output'].assert_not_called()
def testEnableUmpire(self):
UMPIRE_PORT = 8090
# pretend there is no containers
self.mocks['subprocess.check_output'].side_effect = [
'',
models.Project.GetUmpireContainerName(self.PROJECT_WITHOUT_UMPIRE_NAME)]
self.mocks['GetExistingUmpirePort'].return_value = UMPIRE_PORT
response = self._EnableUmpire(self.PROJECT_WITHOUT_UMPIRE_NAME, UMPIRE_PORT)
self.assertEqual(response.status_code, rest_framework.status.HTTP_200_OK)
self.assertJSONEqual(
response.content, {
'name': self.PROJECT_WITHOUT_UMPIRE_NAME,
'umpireEnabled': True,
'umpirePort': UMPIRE_PORT,
'netbootBundle': None,
'hasExistingUmpire': True
})
# make sure docker run has been called
container_name = models.Project.GetUmpireContainerName(
self.PROJECT_WITHOUT_UMPIRE_NAME)
docker_run_called = False
for call in self.mocks['subprocess.check_call'].call_args_list:
args, unused_kwargs = call
if 'run' in args[0] and container_name in args[0]:
docker_run_called = True
break
self.assertTrue(docker_run_called)
def testEnableUmpireButUmpireAlreadyEnabled(self):
"""Test enabling Umpire on a project with Umpire already enabled (and the
Umpire container exists).
Nothing should be changed, and no Docker commands except querying for
container name should be called.
"""
UMPIRE_PORT = 8090
# pretend there is no container
self.mocks['subprocess.check_output'].return_value = ''
self._EnableUmpire(self.PROJECT_WITH_UMPIRE_NAME, UMPIRE_PORT)
# make sure no docker commands (except querying for container name) are
# called
self.mocks['subprocess.call'].assert_not_called()
self.mocks['subprocess.check_call'].assert_not_called()
def testUploadResource(self):
RESOURCE_TYPE = 'toolkit'
RESOURCE_VERSION = '1234.5678'
EXPECTED_RETURN_VALUE = {'type': RESOURCE_TYPE,
'version': RESOURCE_VERSION}
# mock Umpire AddResource() call
self.mocks['xmlrpc.client.ServerProxy']().AddPayload = mock.MagicMock(
return_value={RESOURCE_TYPE: EXPECTED_RETURN_VALUE})
response = self._CreateResource(self.PROJECT_WITH_UMPIRE_NAME,
RESOURCE_TYPE)
self.assertEqual(response.status_code,
rest_framework.status.HTTP_201_CREATED)
self.assertJSONEqual(response.content, EXPECTED_RETURN_VALUE)
# make sure AddResource() is called
self.mocks['xmlrpc.client.ServerProxy']().AddPayload.assert_called_with(
mock.ANY, RESOURCE_TYPE)
def testUploadResourceToNonExistingProject(self):
RESOURCE_TYPE = 'device_factory_toolkit'
response = self._CreateResource('non_existing_project', RESOURCE_TYPE)
self.assertEqual(response.status_code,
rest_framework.status.HTTP_400_BAD_REQUEST)
def testActivateBundle(self):
response = self._ActivateBundle(self.PROJECT_WITH_UMPIRE_NAME,
'testing_bundle_02')
self.assertEqual(response.status_code, rest_framework.status.HTTP_200_OK)
with TestData('umpire_config-activated.json') as c:
self.assertEqual(c, self._GetLastestUploadedConfig())
with TestData('expected_response-activated_bundle.json') as r:
self.assertEqual(r, response.json())
def testActivateNonExistingBundle(self):
response = self._ActivateBundle(self.PROJECT_WITH_UMPIRE_NAME,
'non_existing_bundle')
self.assertEqual(response.status_code,
rest_framework.status.HTTP_400_BAD_REQUEST)
self.assertIn('does not exist', response.json()['detail'])
def testActivateBundleUnicode(self):
response = self._ActivateBundle(self.PROJECT_WITH_UMPIRE_NAME,
u'testing_bundle_04_with_\u4e2d\u6587')
self.assertEqual(response.status_code, rest_framework.status.HTTP_200_OK)
with TestData('umpire_config-activated_unicode.json') as c:
self.assertEqual(c, self._GetLastestUploadedConfig())
with TestData('expected_response-activated_bundle_unicode.json') as r:
self.assertEqual(r, response.json(encoding='UTF-8'))
def testDeleteBundle(self):
response = self.client.delete(
'/projects/%s/bundles/%s/' % (self.PROJECT_WITH_UMPIRE_NAME,
'testing_bundle_02'),
format='json')
self.assertEqual(response.status_code,
rest_framework.status.HTTP_204_NO_CONTENT)
with TestData('umpire_config-deleted.json') as c:
self.assertEqual(c, self._GetLastestUploadedConfig())
def testDeleteActiveBundle(self):
response = self.client.delete(
'/projects/%s/bundles/%s/' % (self.PROJECT_WITH_UMPIRE_NAME,
'testing_bundle_01'),
format='json')
self.assertEqual(response.status_code,
rest_framework.status.HTTP_422_UNPROCESSABLE_ENTITY)
def testDeleteNonExistingBundle(self):
response = self.client.delete(
'/projects/%s/bundles/%s/' % (self.PROJECT_WITH_UMPIRE_NAME,
'non_existing_bundle'),
format='json')
self.assertEqual(response.status_code,
rest_framework.status.HTTP_404_NOT_FOUND)
self.assertIn('not found', response.json()['detail'])
def testListBundles(self):
response = self.client.get(
'/projects/%s/bundles/' % self.PROJECT_WITH_UMPIRE_NAME,
format='json')
self.assertEqual(response.status_code, rest_framework.status.HTTP_200_OK)
bundle_list = response.json()
with TestData('expected_response-get_bundle_list.json') as r:
self.assertEqual(r, bundle_list)
def testReorderBundles(self):
response = self._ReorderBundles(self.PROJECT_WITH_UMPIRE_NAME,
['testing_bundle_02',
'testing_bundle_01',
'testing_bundle_03',
'empty_init_bundle',
u'testing_bundle_04_with_\u4e2d\u6587'])
self.assertEqual(response.status_code, rest_framework.status.HTTP_200_OK)
with TestData('umpire_config-reordered.json') as c:
self.assertEqual(c, self._GetLastestUploadedConfig())
with TestData('expected_response-reorder_bundles.json') as r:
self.assertEqual(r, response.json())
def testReorderBundlesWithoutListingAllBundleNames(self):
response = self._ReorderBundles(self.PROJECT_WITH_UMPIRE_NAME,
['testing_bundle_02',
'testing_bundle_01',
'testing_bundle_03',
'empty_init_bundle'])
self.assertEqual(response.status_code,
rest_framework.status.HTTP_400_BAD_REQUEST)
self.assertTrue('All bundles must be listed' in response.json()['detail'])
def testUploadBundle(self):
with TestData(
'umpire_config-uploaded.json', deserialize=False) as config_str:
self.mocks['xmlrpc.client.ServerProxy']().GetActiveConfig =\
mock.MagicMock(return_value=config_str)
with TestData('new_bundle.json') as b:
bundle = b
response = self._UploadNewBundle(self.PROJECT_WITH_UMPIRE_NAME,
bundle['id'], bundle['note'])
self.assertEqual(response.status_code,
rest_framework.status.HTTP_201_CREATED)
self.mocks['xmlrpc.client.ServerProxy']().ImportBundle.assert_called_once()
def testUploadBundleThatAlreadyExists(self):
BUNDLE_NAME = 'existing_bundle'
BUNDLE_NOTE = 'existing_bundle_note'
self.mocks['xmlrpc.client.ServerProxy']().ImportBundle = mock.MagicMock(
side_effect=xmlrpc.client.Fault(
-32500, # application error, doesn't matter actually
"UmpireError: bundle_id: '%s' already in use" % BUNDLE_NAME))
response = self._UploadNewBundle(self.PROJECT_WITH_UMPIRE_NAME,
BUNDLE_NAME, BUNDLE_NOTE)
self.assertEqual(response.status_code,
rest_framework.status.HTTP_409_CONFLICT)
self.assertTrue('already exists' in response.json()['detail'])
def testUploadBundleUnknownUmpireError(self):
BUNDLE_NAME = 'doomed_bundle'
BUNDLE_NOTE = 'doomed bundle'
self.mocks['xmlrpc.client.ServerProxy']().ImportBundle = mock.MagicMock(
side_effect=xmlrpc.client.Fault(
-32500, # application error, doesn't matter actually
'UmpireError: Unknown error'))
response = self._UploadNewBundle(self.PROJECT_WITH_UMPIRE_NAME,
BUNDLE_NAME, BUNDLE_NOTE)
self.assertEqual(response.status_code,
rest_framework.status.HTTP_500_INTERNAL_SERVER_ERROR)
self.assertIn('Unknown error', response.json()['detail'])
def testUpdateBundleResource(self):
response = self.client.put(
'/projects/%s/bundles/%s/' % (self.PROJECT_WITH_UMPIRE_NAME,
'testing_bundle_01'),
data={
'newName': 'testing_bundle_01_new',
'note': 'climbing like a monkey',
'resources': {
'device_factory_toolkit': {
'type': 'device_factory_toolkit',
'file_id': self._UploadFile()['id']
}
}
},
format='json'
)
self.assertEqual(response.status_code, rest_framework.status.HTTP_200_OK)
# the first call to UploadConfig() should duplicate the source bundle with
# the new name
with TestData('umpire_config-resource_updated.json') as c:
self.assertEqual(c, self._GetUploadedConfig(0))
# just make sure Update() | |
<filename>tests/test_preview_item.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""Unit tests for PreviewItem."""
from __future__ import unicode_literals
import unittest
from mock import mock
from collections import OrderedDict
import pywikibot
from wikidatastuff.preview_item import PreviewItem
from wikidatastuff.qualifier import Qualifier # replace with mocks
from wikidatastuff.statement import Statement # replace with mocks
from wikidatastuff.reference import Reference # replace with mocks
class BasicFormatMocker(unittest.TestCase):
"""Patch some basic formatters and provide a repo."""
def setUp(self):
self.repo = pywikibot.Site('test', 'wikidata')
# patch bold
def bold_side_effect(val):
return 'bold_{}'.format(val)
bold_patcher = mock.patch(
'wikidatastuff.preview_item.PreviewItem.make_text_bold')
self.mock_bold = bold_patcher.start()
self.mock_bold.side_effect = bold_side_effect
# patch italics
def italics_side_effect(val):
return 'italics_{}'.format(val)
italics_patcher = mock.patch(
'wikidatastuff.preview_item.PreviewItem.make_text_italics')
self.mock_italics = italics_patcher.start()
self.mock_italics.side_effect = italics_side_effect
self.addCleanup(bold_patcher.stop)
self.addCleanup(italics_patcher.stop)
# patch wikidata_template
wd_template_patcher = mock.patch(
'wikidatastuff.preview_item.PreviewItem.make_wikidata_template')
self.mock_wd_template = wd_template_patcher.start()
self.mock_wd_template.side_effect = ['wd_template_{}'.format(i)
for i in range(1, 5)]
self.addCleanup(wd_template_patcher.stop)
class TestPreviewItemBase(BasicFormatMocker):
"""Shared setup for all instance method tests."""
def setUp(self):
super(TestPreviewItemBase, self).setUp()
self.preview_item = PreviewItem(
labels={}, descriptions={}, protoclaims={}, item=None, ref=None)
class TestMakeWikidataTemplate(unittest.TestCase):
"""Test the make_wikidata_template method."""
def setUp(self):
self.repo = pywikibot.Site('test', 'wikidata')
def test_make_wikidata_template_empty(self):
with self.assertRaises(ValueError) as cm:
PreviewItem.make_wikidata_template('')
self.assertEqual(
str(cm.exception),
'Sorry only items and properties are supported, not whatever '
'"" is.'
)
def test_make_wikidata_template_none(self):
with self.assertRaises(ValueError) as cm:
PreviewItem.make_wikidata_template(None)
self.assertEqual(
str(cm.exception),
'Sorry only items and properties are supported, not whatever '
'"None" is.'
)
def test_make_wikidata_template_qid(self):
expected = '{{Q|Q123}}'
self.assertEqual(
PreviewItem.make_wikidata_template('Q123'),
expected
)
def test_make_wikidata_template_pid(self):
expected = '{{P|P123}}'
self.assertEqual(
PreviewItem.make_wikidata_template('P123'),
expected
)
def test_make_wikidata_template_item_page(self):
expected = '{{Q|Q321}}'
item = pywikibot.ItemPage(self.repo, 'Q321')
self.assertEqual(
PreviewItem.make_wikidata_template(item),
expected
)
def test_make_wikidata_template_property_page(self):
expected = '{{P|P321}}'
prop = pywikibot.PropertyPage(self.repo, 'P321')
self.assertEqual(
PreviewItem.make_wikidata_template(prop),
expected
)
def test_make_wikidata_template_bad_id_fail(self):
with self.assertRaises(ValueError) as cm:
PreviewItem.make_wikidata_template('dummy')
self.assertEqual(
str(cm.exception),
'Sorry only items and properties are supported, not whatever '
'"dummy" is.'
)
def test_make_wikidata_template_special_novalue(self):
expected = "{{Q'|no value}}"
self.assertEqual(
PreviewItem.make_wikidata_template('novalue', special=True),
expected
)
def test_make_wikidata_template_special_somevalue(self):
expected = "{{Q'|some value}}"
self.assertEqual(
PreviewItem.make_wikidata_template('somevalue', special=True),
expected
)
def test_make_wikidata_template_special_fail(self):
with self.assertRaises(ValueError) as cm:
PreviewItem.make_wikidata_template('dummy', special=True)
self.assertEqual(
str(cm.exception),
'Sorry but "dummy" is not a recognized special value/snaktype.'
)
class TestFormatItem(TestPreviewItemBase):
"""Test the format_item method."""
def test_format_item_none(self):
self.preview_item.item = None
self.assertEqual(
self.preview_item.format_item(),
'–'
)
def test_format_item_with_item(self):
self.preview_item.item = 'anything'
self.preview_item.format_item()
self.mock_wd_template.assert_called_once_with('anything')
class TestFormatDescriptions(TestPreviewItemBase):
"""Test the format_descriptions method."""
def test_format_descriptions_empty(self):
self.preview_item.desc_dict = {}
self.assertEqual(self.preview_item.format_descriptions(), '')
def test_make_wikidata_template_with_data(self):
descriptions = {
'en': 'en_desc',
'sv': 'sv_desc'
}
self.preview_item.desc_dict = OrderedDict(
sorted(descriptions.items(), key=lambda t: t[0]))
expected = (
'* bold_en: en_desc\n'
'* bold_sv: sv_desc\n'
)
self.assertEqual(self.preview_item.format_descriptions(), expected)
self.mock_bold.assert_has_calls([mock.call('en'), mock.call('sv')])
class TestFormatLabels(TestPreviewItemBase):
"""Test the format_labels method."""
def test_format_labels_empty(self):
self.preview_item.labels_dict = {}
self.assertEqual(self.preview_item.format_labels(), '')
def test_format_labels_with_multiple_langs(self):
labels = {
'en': ['en_label'],
'sv': ['sv_label']
}
self.preview_item.labels_dict = OrderedDict(
sorted(labels.items(), key=lambda t: t[0]))
expected = (
'* bold_en: italics_en_label\n'
'* bold_sv: italics_sv_label\n'
)
self.assertEqual(self.preview_item.format_labels(), expected)
self.mock_bold.assert_has_calls([mock.call('en'), mock.call('sv')])
def test_format_labels_with_multiple_names(self):
self.preview_item.labels_dict = {
'en': ['en_label', 'en_alias_1', 'en_alias_2']
}
expected = (
'* bold_en: italics_en_label | en_alias_1 | en_alias_2\n'
)
self.assertEqual(self.preview_item.format_labels(), expected)
self.mock_bold.assert_called_once_with('en')
self.mock_italics.assert_called_once_with('en_label')
class TestFormatItis(BasicFormatMocker):
"""Test the format_itis method."""
def setUp(self):
super(TestFormatItis, self).setUp()
timestring_patcher = mock.patch(
'wikidatastuff.preview_item.pywikibot.WbTime.toTimestr')
self.mock_format_timestring = timestring_patcher.start()
self.mock_format_timestring.return_value = 'formatted_WbTime'
self.addCleanup(timestring_patcher.stop)
def test_format_itis_none(self):
itis = None
expected = 'None'
self.assertEqual(PreviewItem.format_itis(itis), expected)
self.mock_wd_template.assert_not_called()
self.mock_format_timestring.assert_not_called()
def test_format_itis_item_page(self):
itis = pywikibot.ItemPage(self.repo, 'Q123')
expected = 'wd_template_1'
self.assertEqual(PreviewItem.format_itis(itis), expected)
self.mock_wd_template.assert_called_once_with(itis)
self.mock_format_timestring.assert_not_called()
def test_format_itis_quantity(self):
itis = pywikibot.WbQuantity(123, site=self.repo)
expected = '123'
self.assertEqual(PreviewItem.format_itis(itis), expected)
self.mock_wd_template.assert_not_called()
self.mock_format_timestring.assert_not_called()
def test_format_itis_quantity_unit(self):
unit = pywikibot.ItemPage(self.repo, 'Q123')
itis = pywikibot.WbQuantity(123, unit=unit, site=self.repo)
expected = '123 wd_template_1'
self.assertEqual(PreviewItem.format_itis(itis), expected)
self.mock_wd_template.assert_called_once_with(unit)
self.mock_format_timestring.assert_not_called()
def test_format_itis_time(self):
itis = pywikibot.WbTime(year=1999)
expected = 'formatted_WbTime'
self.assertEqual(PreviewItem.format_itis(itis), expected)
self.mock_wd_template.assert_not_called()
self.mock_format_timestring.assert_called_once()
def test_format_itis_other(self):
itis = [1, 2, 3]
expected = '[1, 2, 3]'
self.assertEqual(PreviewItem.format_itis(itis), expected)
self.mock_wd_template.assert_not_called()
self.mock_format_timestring.assert_not_called()
def test_format_itis_special(self):
itis = 'dummy'
expected = 'wd_template_1'
self.assertEqual(
PreviewItem.format_itis(itis, special=True),
expected
)
self.mock_wd_template.assert_called_once_with(itis, special=True)
self.mock_format_timestring.assert_not_called()
def test_format_itis_statement_item(self):
item = pywikibot.ItemPage(self.repo, 'Q123')
itis = Statement(item)
expected = 'wd_template_1'
self.assertEqual(
PreviewItem.format_itis(itis),
expected
)
self.mock_wd_template.assert_called_once_with(item)
self.mock_format_timestring.assert_not_called()
def test_format_itis_statement_other(self):
itis = Statement('dummy')
expected = 'dummy'
self.assertEqual(
PreviewItem.format_itis(itis),
expected
)
self.mock_wd_template.assert_not_called()
self.mock_format_timestring.assert_not_called()
def test_format_itis_statement_detect_special(self):
itis = Statement('novalue', special=True)
expected = 'wd_template_1'
self.assertEqual(
PreviewItem.format_itis(itis),
expected
)
self.mock_wd_template.assert_called_once_with('novalue', special=True)
self.mock_format_timestring.assert_not_called()
class TestFormatClaim(BasicFormatMocker):
"""Test the format_claim method."""
def setUp(self):
super(TestFormatClaim, self).setUp()
itis_patcher = mock.patch(
'wikidatastuff.preview_item.PreviewItem.format_itis')
self.mock_format_itis = itis_patcher.start()
self.mock_format_itis.return_value = 'formatted_itis'
self.addCleanup(itis_patcher.stop)
def test_format_claim_basic(self):
claim = pywikibot.Claim(self.repo, 'P123')
claim.setTarget('1')
expected = 'wd_template_1: formatted_itis'
self.assertEqual(PreviewItem.format_claim(claim), expected)
self.mock_wd_template.assert_called_once_with('P123')
self.mock_format_itis.assert_called_once_with('1', False)
def test_format_claim_special(self):
claim = pywikibot.Claim(self.repo, 'P123')
claim.setSnakType('novalue')
expected = 'wd_template_1: formatted_itis'
self.assertEqual(PreviewItem.format_claim(claim), expected)
self.mock_wd_template.assert_called_once_with('P123')
self.mock_format_itis.assert_called_once_with('novalue', True)
class TestFormatReference(BasicFormatMocker):
"""Test the format_reference method."""
def setUp(self):
super(TestFormatReference, self).setUp()
claim_patcher = mock.patch(
'wikidatastuff.preview_item.PreviewItem.format_claim')
self.mock_format_claim = claim_patcher.start()
self.mock_format_claim.side_effect = ['formatted_claim_{}'.format(i)
for i in range(1, 5)]
self.addCleanup(claim_patcher.stop)
self.claim_1 = pywikibot.Claim(self.repo, 'P123')
self.claim_1.setTarget('1')
self.claim_2 = pywikibot.Claim(self.repo, 'P123')
self.claim_2.setTarget('2')
self.claim_3 = pywikibot.Claim(self.repo, 'P123')
self.claim_3.setTarget('3')
self.claim_4 = pywikibot.Claim(self.repo, 'P123')
self.claim_4.setTarget('4')
def test_format_reference_basic(self):
ref = Reference(
source_test=[self.claim_1, self.claim_2],
source_notest=[self.claim_3, self.claim_4]
)
expected = (
':italics_tested:\n'
':*formatted_claim_1\n'
':*formatted_claim_2\n'
':italics_not tested:\n'
':*formatted_claim_3\n'
':*formatted_claim_4\n'
)
self.assertEqual(PreviewItem.format_reference(ref), expected)
self.mock_format_claim.assert_has_calls([
mock.call(self.claim_1),
mock.call(self.claim_2),
mock.call(self.claim_3),
mock.call(self.claim_4)
])
self.mock_italics.assert_has_calls([
mock.call('tested'),
mock.call('not tested')
])
def test_format_reference_no_test(self):
ref = Reference(source_notest=self.claim_1)
expected = (
':italics_not tested:\n'
':*formatted_claim_1\n'
)
self.assertEqual(PreviewItem.format_reference(ref), expected)
self.mock_format_claim.assert_called_once_with(self.claim_1)
self.mock_italics.assert_called_once_with('not tested')
def test_format_reference_no_notest(self):
ref = Reference(source_test=self.claim_1)
expected = (
':italics_tested:\n'
':*formatted_claim_1\n'
)
self.assertEqual(PreviewItem.format_reference(ref), expected)
self.mock_format_claim.assert_called_once_with(self.claim_1)
self.mock_italics.assert_called_once_with('tested')
class TestFormatQual(BasicFormatMocker):
"""Test the format_qual method."""
def setUp(self):
super(TestFormatQual, self).setUp()
itis_patcher = mock.patch(
'wikidatastuff.preview_item.PreviewItem.format_itis')
self.mock_format_itis = itis_patcher.start()
self.mock_format_itis.return_value = 'formatted_itis'
self.addCleanup(itis_patcher.stop)
def test_format_qual_basic(self):
qual = Qualifier('P123', 'val')
self.assertEqual(
PreviewItem.format_qual(qual), 'wd_template_1: formatted_itis')
self.mock_wd_template.assert_called_once_with('P123')
self.mock_format_itis.assert_called_once_with('val')
class TestFormatProtoclaims(TestPreviewItemBase):
"""Test the format_protoclaims method."""
def setUp(self):
super(TestFormatProtoclaims, self).setUp()
itis_patcher = mock.patch(
'wikidatastuff.preview_item.PreviewItem.format_itis')
self.mock_format_itis = itis_patcher.start()
self.mock_format_itis.side_effect = ['formatted_itis_{}'.format(i)
for i in range(1, 5)]
self.addCleanup(itis_patcher.stop)
qual_patcher = mock.patch(
'wikidatastuff.preview_item.PreviewItem.format_qual')
self.mock_format_qual = qual_patcher.start()
self.mock_format_qual.side_effect = ['formatted_qual_{}'.format(i)
for i in range(1, 5)]
self.addCleanup(qual_patcher.stop)
ref_patcher = mock.patch(
'wikidatastuff.preview_item.PreviewItem.format_reference')
self.mock_format_ref = ref_patcher.start()
self.mock_format_ref.side_effect = ['formatted_reference_{}'.format(i)
for i in range(1, 5)]
self.addCleanup(ref_patcher.stop)
def test_format_protoclaims_no_protoclaims(self):
self.preview_item.protoclaims = {}
expected = (
"{| class='wikitable'\n"
"|-\n"
"! Property\n"
"! Value\n"
"! Qualifiers\n"
"|}"
)
self.assertEqual(self.preview_item.format_protoclaims(), expected)
self.mock_wd_template.assert_not_called()
self.mock_format_itis.assert_not_called()
self.mock_format_qual.assert_not_called()
self.mock_format_ref.assert_not_called()
def test_format_protoclaims_no_single_none_claim(self):
self.preview_item.protoclaims = {'P123': None}
expected = (
"{| class='wikitable'\n"
"|-\n"
"! Property\n"
"! Value\n"
"! Qualifiers\n"
"|}"
)
self.assertEqual(self.preview_item.format_protoclaims(), expected)
self.mock_wd_template.assert_not_called()
self.mock_format_itis.assert_not_called()
self.mock_format_qual.assert_not_called()
self.mock_format_ref.assert_not_called()
def test_format_protoclaims_single(self):
itis = Statement('dummy')
self.preview_item.protoclaims = {'P123': itis}
expected = (
"{| class='wikitable'\n"
"|-\n"
"! Property\n"
"! Value\n"
"! Qualifiers\n"
'|-\n'
'| wd_template_1 \n'
'| formatted_itis_1 \n'
'| \n'
"|}"
)
self.assertEqual(self.preview_item.format_protoclaims(), expected)
self.mock_wd_template.assert_called_once_with('P123')
self.mock_format_itis.assert_called_once_with(itis)
self.mock_format_qual.assert_not_called()
self.mock_format_ref.assert_not_called()
def test_format_protoclaims_single_with_qual(self):
itis = Statement('dummy')
qual = Qualifier('P321', 'qual_dummy')
itis._quals.add(qual)
self.preview_item.protoclaims = {'P123': itis}
expected = (
"{| class='wikitable'\n"
"|-\n"
"! Property\n"
"! Value\n"
"! Qualifiers\n"
'|-\n'
'| wd_template_1 \n'
'| formatted_itis_1 \n'
'| formatted_qual_1 \n'
"|}"
)
self.assertEqual(self.preview_item.format_protoclaims(), expected)
self.mock_wd_template.assert_called_once_with('P123')
self.mock_format_itis.assert_called_once_with(itis)
self.mock_format_qual.assert_called_once_with(qual)
self.mock_format_ref.assert_not_called()
def test_format_protoclaims_single_with_multiple_qual(self):
itis = Statement('dummy')
qual_1 = Qualifier('P321', 'qual_dummy')
qual_2 = Qualifier('P213', 'qual_dummy')
itis._quals.add(qual_1)
itis._quals.add(qual_2)
self.preview_item.protoclaims = {'P123': itis}
expected = (
"{| class='wikitable'\n"
"|-\n"
"! Property\n"
"! Value\n"
"! Qualifiers\n"
'|-\n'
'| wd_template_1 \n'
'| formatted_itis_1 \n'
'| * formatted_qual_1 \n'
'* formatted_qual_2 \n'
"|}"
)
self.assertEqual(self.preview_item.format_protoclaims(), expected)
self.mock_wd_template.assert_called_once_with('P123')
self.mock_format_itis.assert_called_once_with(itis)
self.mock_format_qual.assert_has_calls([
mock.call(qual_1),
mock.call(qual_2)],
any_order=True
)
self.mock_format_ref.assert_not_called()
def test_format_protoclaims_multple_same_prop(self):
itis_1 = Statement('foo')
itis_2 = Statement('bar')
self.preview_item.protoclaims = {'P123': [itis_1, itis_2]}
expected = (
"{| class='wikitable'\n"
"|-\n"
"! Property\n"
"! Value\n"
"! Qualifiers\n"
'|-\n'
'| wd_template_1 \n'
'| formatted_itis_1 \n'
'| \n'
'|-\n'
'| wd_template_1 \n'
'| formatted_itis_2 \n'
'| \n'
"|}"
)
self.assertEqual(self.preview_item.format_protoclaims(), expected)
self.mock_wd_template.assert_called_once_with('P123')
self.mock_format_itis.assert_has_calls([
mock.call(itis_1),
mock.call(itis_2)
])
self.mock_format_qual.assert_not_called()
self.mock_format_ref.assert_not_called()
def test_format_protoclaims_multple_different_prop(self):
itis_1 = Statement('foo')
itis_2 = Statement('bar')
protoclaims = {'P123': itis_1, 'P321': itis_2}
self.preview_item.protoclaims = OrderedDict(
sorted(protoclaims.items(), key=lambda t: int(t[0][1:])))
expected = (
"{| class='wikitable'\n"
"|-\n"
"! Property\n"
"! Value\n"
"! Qualifiers\n"
'|-\n'
'| wd_template_1 \n'
'| formatted_itis_1 \n'
'| \n'
'|-\n'
'| wd_template_2 \n'
'| formatted_itis_2 \n'
'| \n'
"|}"
)
self.assertEqual(self.preview_item.format_protoclaims(), expected)
self.mock_wd_template.assert_has_calls([
mock.call('P123'),
mock.call('P321')],
any_order=True
)
self.mock_format_itis.assert_has_calls([
mock.call(itis_1),
mock.call(itis_2)],
any_order=True
)
self.mock_format_qual.assert_not_called()
self.mock_format_ref.assert_not_called()
def test_format_protoclaims_ref_adds_column(self):
claim_1 = pywikibot.Claim(self.repo, 'P123')
claim_1.setTarget('1')
ref_1 = Reference(claim_1)
itis_1 = Statement('foo')
itis_2 = Statement('bar').add_reference(ref_1)
self.preview_item.protoclaims = {'P123': [itis_1, itis_2]}
expected = (
"{| class='wikitable'\n"
"|-\n"
"! Property\n"
"! Value\n"
"! Qualifiers\n"
"! References\n"
'|-\n'
'| wd_template_1 \n'
'| formatted_itis_1 \n'
'| \n'
'| \n'
'|-\n'
'| wd_template_1 \n'
'| formatted_itis_2 \n'
'| \n'
'| \nformatted_reference_1 \n'
"|}"
)
self.assertEqual(self.preview_item.format_protoclaims(), expected)
self.mock_wd_template.assert_called_once_with('P123')
self.mock_format_itis.assert_has_calls([
mock.call(itis_1),
mock.call(itis_2)
])
self.mock_format_qual.assert_not_called()
self.mock_format_ref.assert_called_once_with(ref_1)
def test_format_protoclaims_ref_adds_column_set_default(self):
claim_1 = pywikibot.Claim(self.repo, 'P123')
claim_1.setTarget('1')
ref_1 = Reference(claim_1)
claim_2 = pywikibot.Claim(self.repo, 'P123')
claim_2.setTarget('2')
ref_2 = Reference(claim_2)
itis_1 = Statement('foo')
itis_2 = Statement('bar').add_reference(ref_1)
self.preview_item.ref = ref_2
self.preview_item.protoclaims = {'P123': [itis_1, itis_2]}
expected = (
"{| class='wikitable'\n"
"|-\n"
"! Property\n"
"! Value\n"
"! Qualifiers\n"
"! References\n"
'|-\n'
'| wd_template_1 \n'
'| | |
if lib_conf == None or not os.path.exists(lib_conf):
return
with open(lib_conf) as lib_conf_handle:
for line in lib_conf_handle:
# remove comments
match = re.search('^[^#]*', line)
line = match.group()
# skip empty lines
if re.search('^\s*$', line):
continue
# parse (allowing spaces)
# <confLibName>:<confLibPath>
match = re.search('^\s*(?P<name>[a-zA-Z0-9_]+)\s*:' +
'\s*(?P<path>[^\s]*)\s*$', line)
if (not match.group('name')) or (not match.group('path')):
error_exit("libConf file syntax error: '" + line + "'");
lcname = match.group('name')
lcpath = match.group('path')
conf_lib_list.append({ 'name': lcname, 'path': lcpath })
def gen_conf_lib_preamble():
for conf_lib in conf_lib_list:
add_conf_lib_sticky_path(conf_lib['path'])
for idx, conf_lib in reversed(list(enumerate(conf_lib_list))):
clpriority = len(conf_lib_list) - idx
add_conf_lib(clpriority, conf_lib['name'], conf_lib['path'])
add_conf_lib(0, None, None)
preamble = get_output_str()
return preamble
def process_directive(line, directive_fn, linenr, basedir):
global make_target, used_resources
filename = None
filenames = None
relative_dir = None
match = re.search('^([^a-z]*)%%([a-z]+)%%:\s*([^\s]*)\s*$', line)
if match == None or len(match.groups()) != 3:
error_exit(directive_fn + ':' + str(linenr) + ': directive has invalid syntax: ' + line)
directive_prefix = match.group(1)
directive = match.group(2)
basename = match.group(3)
stdmatch = re.search('^<(.*)>$', basename)
quotematch = re.search('^"(.*)"$', basename)
# tildematch = re.search('^~/(.*)$', basename)
if stdmatch != None:
filename = find_file_in_path(directive, stdmatch.group(1))
elif quotematch != None:
filename = os.path.join(basedir, quotematch.group(1))
if not os.path.isfile(filename):
filename = find_file_in_path(directive, quotematch.group(1))
elif basename == 'source':
filename = get_cdl_source()
if source_dir is not None:
relative_dir = source_dir
elif basename == 'foreign':
filenames = []
for fn in used_resources.get("foreign"):
filenames.append(find_file_in_path('foreign', fn))
elif basename == 'fonturls':
write_font_urls(directive_prefix)
return
# elif tildematch != None:
# filename = os.path.join(get_root_dir(), tildematch.group(1))
else:
print('basename="' + basename + '"')
if get_mode() == 'incl':
print(directive, filename)
if filename is not None:
if relative_dir is None:
relative_dir = os.path.dirname(filename)
process_file(directive, filename, relative_dir)
elif filenames is not None:
for filename in filenames:
process_file(directive, filename, os.path.dirname(filename))
else:
error_exit('invalid directive: ' + line)
# Only compress svg images
def use_compression_for_image(filename):
return filename.endswith(".svg")
# Compress all data files
def use_compression_for_data(filename):
return True
# Stores which resource has been copied to which path; avoids duplicate copies
# and resolves faster
copied_resources = {}
# Stores which path is the target for which resource; avoids duplicate naming
resource_targets = {}
def add_copied_resource(resource_hash, path):
global copied_resources
if path in resource_targets and resource_targets[path] != resource_hash:
error_exit("{} is the target for both {} and {}".format(
path, resource_targets[path], resource_hash
))
copied_resources[resource_hash] = path
resource_targets[path] = resource_hash
# Returns the path to the file from the macro. When common_dir has been set,
# copies the file to that directory, compressing it when the extension allows
# it, but only when the source file is newer.
def copy_and_compress(type, macro_arg, use_compression_fun, common_dir):
global copied_resources
resource_hash = type + ':' + macro_arg
if resource_hash in copied_resources:
return copied_resources[resource_hash]
src_path = find_file_in_path(type, macro_arg)
if common_dir == None:
add_copied_resource(resource_hash, src_path)
return src_path
out_path = os.path.join(common_dir, os.path.basename(macro_arg))
if not os.path.exists(src_path):
print("{0} does not exist: {1}".format(type, src_path), file=sys.stderr)
add_copied_resource(resource_hash, out_path)
return out_path
use_compression = use_compression_fun(macro_arg)
if out_path == src_path:
add_copied_resource(resource_hash, src_path)
return out_path # In case someone puts the images in the common_dir
target_path = out_path
if use_compression:
target_path += '.gz'
if not os.path.exists(target_path) or os.path.getmtime(target_path) < os.path.getmtime(src_path):
if use_compression:
with open(src_path, 'rb') as f_in, gzip.open(target_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
else:
with open(src_path, 'rb') as f_in, open(target_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
add_copied_resource(resource_hash, src_path)
return out_path
# format: %%image:(url)%%. Behaves like process_image_macro.
# Calls copy_and_compress for an image
def process_image_macro(macro_name, macro_args):
global common_image_dir
return copy_and_compress('image', macro_args[0], use_compression_for_image, common_image_dir)
# format: %%font:(fontFamily,url)%%, no comma in the font name, no superfluous spaces
def process_font_macro(macro_name, macro_args):
if len(macro_args) < 2:
error_exit('font macro should have two arguments')
url = ",".join(macro_args[1:]) # in case the URL constains commas
add_resource_usage('font', url)
return macro_args[0]
# format: %%data:(url)%%. Behaves like process_image_macro.
# Calls copy_and_compress for a data file
def process_data_macro(macro_name, macro_args):
global common_data_dir
return copy_and_compress('data', macro_args[0], use_compression_for_data, common_data_dir)
def process_buildinfo_macro(macro_name, macro_args):
global build_info_file
return build_info_file
def process_conf_lib_preamble_macro(macro_name, macro_args):
push_include_file('template', '--conf-lib-include--')
str = '\n' + gen_conf_lib_preamble()
pop_include_file('template', '--conf-lib-include--')
return str
def process_title_macro(macro_name, macro_args):
global title
return title
def process_splash_screen_url_macro(macro_name, macro_args):
global splash_screen_url
return normalize_path(find_file_in_path('url', splash_screen_url))
def process_classes_macro(macro_name, macro_args):
global conf_lib_by_priority
return "\n" + \
",\n".join(
map(
lambda x: "\t{\n\t\tname: '" +
("" if x['name'] == None else x['name']) +
"',\n\t\tclasses: [\n\t\t\t" +
",\n\t\t\t".join(x['class_list']) +
"\n\t\t]\n\t}",
conf_lib_by_priority
)
) + "\n"
def process_textfile_macro(macro_name, macro_args):
if len(macro_args) != 1:
error_exit('textfile macro should have one argument')
src_path = find_file_in_path('text', macro_args[0])
if get_mode() == 'incl':
print('textfile', src_path)
return ""
str = ""
with open(src_path) as input_handle:
for line in input_handle:
str += "\\n" + line[:-1].replace('\\', '\\\\').replace('"', '\\"')
return str[2:]
def process_url_macro(macro_name, macro_args):
if len(macro_args) != 1:
error_exit('textfile macro should have one argument')
return find_file_in_path('url', macro_args[0])
def process_macro(dtype, line, fn, linenr, match):
macro_name = match.group(1)
macro_arg_str = match.group(2)
# extract arguments
macro_args = re.findall('[^,]+', macro_arg_str)
if macro_name == 'image':
macro_subst = process_image_macro(macro_name, macro_args)
elif macro_name == 'data':
macro_subst = process_data_macro(macro_name, macro_args)
elif macro_name == 'font':
macro_subst = process_font_macro(macro_name, macro_args)
elif macro_name == 'buildinfo':
macro_subst = process_buildinfo_macro(macro_name, macro_args)
elif macro_name == 'conflibPreamble':
macro_subst = process_conf_lib_preamble_macro(macro_name, macro_args)
elif macro_name == 'title':
macro_subst = process_title_macro(macro_name, macro_args)
elif macro_name == 'splashScreenUrl':
macro_subst = process_splash_screen_url_macro(macro_name, macro_args)
elif macro_name == 'classes':
macro_subst = process_classes_macro(macro_name, macro_args)
elif macro_name == 'textfile':
macro_subst = process_textfile_macro(macro_name, macro_args)
elif macro_name == 'url':
macro_subst = process_url_macro(macro_name, macro_args)
else:
error_exit(fn + ':' + str(linenr) + ": don't know (yet) how to handle macro '" + macro_name +
"' in '" + line + "'")
if macro_subst == None:
error_exit(fn + ':' + str(linenr) + ': empty subst')
return macro_subst
def get_current_conf_lib_name():
global current_conf_lib
if current_conf_lib == None or current_conf_lib['name'] == None:
conf_lib_name = ""
else:
conf_lib_name = current_conf_lib['name']
return conf_lib_name
def verify_current_conf_lib(conf_lib_name):
cblp_name = conf_lib_by_priority[0]['name']
if cblp_name == None:
cblp_name = ""
if cblp_name != conf_lib_name:
error_exit('confLib names do not match')
def process_class_def(dtype, line, fn):
"""replace 'var classes =' with 'var <CL>__<fn>__classes ='
where <CL> is the current confLib (may be empty) and <fn> is the current
source file name"""
global conf_lib_by_priority
conf_lib_name = get_current_conf_lib_name()
verify_current_conf_lib(conf_lib_name)
mclass_name = conf_lib_name + '__' + stemname(fn, conf_lib_name) + '__classes'
mclass_def = 'var ' + mclass_name + ' ='
match = re.search('^\s*var[^=]*=(.*)$', line)
mclass_def = mclass_def + match.group(1) + "\n"
section_print(dtype, mclass_def)
conf_lib_by_priority[0]['class_list'].append(mclass_name)
def process_constant_def(dtype, line, fn):
"""
replace
'var xxxConstants = { ... };'
with
'var <confLib1>__xxxConstants = { ... };'
and then, at the end of the 'constantfile' section append
'var xxxConstants = mergeCdlConstants(
<confLib1>__xxxConstants,
<confLib2>__xxxConstants,
...
);'
(ordered by confLib priority) to allow higher priority confLibs to
overwrite constants defined in lower priority confLibs, such that the
affect reaches back into the lower priority confLib. For example, if Core has
CellConstants = {
width: 5
}
Cell: { position: { width: CellConstants.width } }
and Mon1 has
CellConstants = {
width: 2
}
then setting CellConstants.width to 2 must occur before including Core::Cell
a constant definition is also identified as
var xxx = { // %%constantdef%%
"""
conf_lib_name = get_current_conf_lib_name()
verify_current_conf_lib(conf_lib_name)
# neutralize processed %%constantdef%% by converting %% to %-
constdef_match = re.search('^(.*//.*)%%constantdef%%(.*)$', line)
if constdef_match:
line = constdef_match.group(1) + '%-constantdef-%' + \
constdef_match.group(2)
match = re.search('^\s*var\s+([a-zA-Z0-9_]+)\s*=(.*)$', line)
if (not match) or (not match.group(1)) or (not match.group(2)):
error_exit('constant_def: parse failure (' + line + ')')
const_name = match.group(1)
mconst_name = conf_lib_name + '__' + const_name
mconst_def = 'var ' + mconst_name + ' =' + match.group(2) + "\n"
section_print(dtype, mconst_def)
conf_lib_by_priority[0]['constant'].append({
'name': const_name,
'element': mconst_name
})
# The pattern for macros
macro_re = re.compile('%%([a-zA-Z0-9_]*):\(([^%()]*)\)%%')
# The pattern for includes
include_re = re.compile('^[^a-z]*%%[a-z]+%%:')
# Returns a string indicating the line type
# - 'class' when the line is var classes/stemname = ...
# - 'screen' when the line is var screenArea = ...
# - '' otherwise
def process_line(dtype, line, fn, linenr, basedir):
line = line.rstrip('\n')
line += '\n'
mode = get_mode()
line = macro_re.sub(lambda match_group: process_macro(dtype, line, fn, linenr, match_group), line)
if include_re.search(line):
process_directive(line, fn, linenr, basedir)
elif dtype == 'classfile' and (re.search('^\s*var\s+classes\s*=', line) or \
re.search('^\s*var\s*' + stemname(fn, None) + '\s*=', line)):
if mode == 'js':
process_class_def(dtype, line, fn)
return 'class'
elif (dtype == 'constantfile' and \
re.search('^\s*var\s+[a-zA-Z0-9_]+[cC]onstants\s*=', line)) \
or \
re.search('\s*var\s+[a-zA-Z0-9_]+\s*=.*//.*%%constantdef%%', line):
if mode == 'js':
process_constant_def(dtype, line, fn)
return 'constant'
else:
if dtype == 'template' or get_mode() == 'js':
section_print(dtype, line)
if re.search('^\s*var\s+screenArea\s*=', line):
return 'screen'
if re.search('^\s*var\s+test\s*=', line):
return 'test'
return ''
def process_file(dtype, filename, basedir):
global processed_files
global | |
None),
('CRC', 'i'),
('SwHolding', '16d'),
('UserParamEx','8d'),
]
size_check = 352
class V9_SweepRecord(TreeNode):
field_info = [
('Mark', 'i'),
('Label', '32s', cstr),
('AuxDataFileOffset', 'i'),
('StimCount', 'i'),
('SweepCount', 'i'),
('Time', 'd'),
('Timer', 'd'),
('SwUserParams', '4d'),
('Temperature', 'd'),
('OldIntSol', 'i'),
('OldExtSol', 'i'),
('DigitalIn', 'h'),
('SweepKind', 'h'),
('DigitalOut','h'),
('Filler1', 'i', None),
('Markers', '4d'),
('Filler2', 'i', None),
('CRC', 'i'),
('SwHolding', '16d'),
]
## according to Matlab Heka, but it could be 290 or 294. dependent on veriion of Heka
##TODO : need clear this part!
# size_check = 288
class UserParamDescrType(Struct):
field_info = [
('Name', '32s', cstr),
('Unit', '8s', cstr),
]
size_check = 40
class AmplifierState(Struct):
field_info = [
('StateVersion', '8s', cstr),
('RealCurrentGain', 'd'),
('RealF2Bandwidth', 'd'),
('F2Frequency', 'd'),
('RsValue', 'd'),
('RsFraction', 'd'),
('GLeak', 'd'),
('CFastAmp1', 'd'),
('CFastAmp2', 'd'),
('CFastTau', 'd'),
('CSlow', 'd'),
('GSeries', 'd'),
('StimDacScale', 'd'),
('CCStimScale', 'd'),
('VHold', 'd'),
('LastVHold', 'd'),
('VpOffset', 'd'),
('VLiquidJunction', 'd'),
('CCIHold', 'd'),
('CSlowStimVolts', 'd'),
('CCTrackVHold', 'd'),
('TimeoutLength', 'd'),
('SearchDelay', 'd'),
('MConductance', 'd'),
('MCapacitance', 'd'),
('SerialNumber', '8s', cstr),
('E9Boards', 'h'),
('CSlowCycles', 'h'),
('IMonAdc', 'h'),
('VMonAdc', 'h'),
('MuxAdc', 'h'),
('TstDac', 'h'),
('StimDac', 'h'),
('StimDacOffset', 'h'),
('MaxDigitalBit', 'h'),
('SpareInt1', 'h', None),
('SpareInt2', 'h', None),
('SpareInt3', 'h', None),
('AmplKind', 'c'),
('IsEpc9N', 'c'),
('ADBoard', 'c'),
('BoardVersion', 'c'),
('ActiveE9Board', 'c'),
('Mode', 'c'),
('Range', 'c'),
('F2Response', 'c'),
('RsOn', 'c'),
('CSlowRange', 'c'),
('CCRange', 'c'),
('CCGain', 'c'),
('CSlowToTstDac', 'c'),
('StimPath', 'c'),
('CCTrackTau', 'c'),
('WasClipping', 'c'),
('RepetitiveCSlow', 'c'),
('LastCSlowRange', 'c'),
('Locked', 'c'),
('CanCCFast', 'c'),
('CanLowCCRange', 'c'),
('CanHighCCRange', 'c'),
('CanCCTracking', 'c'),
('HasVmonPath', 'c'),
('HasNewCCMode', 'c'),
('Selector', 'c'),
('HoldInverted', 'c'),
('AutoCFast', 'c'),
('AutoCSlow', 'c'),
('HasVmonX100', 'c'),
('TestDacOn', 'c'),
('QMuxAdcOn', 'c'),
('RealImon1Bandwidth', 'd'),
('StimScale', 'd'),
('Gain', 'c'),
('Filter1', 'c'),
('StimFilterOn', 'c'),
('RsSlow', 'c'),
('Old1', 'c'),
('CCCFastOn', 'c'),
('CCFastSpeed', 'c'),
('F2Source', 'c'),
('TestRange', 'c'),
('TestDacPath', 'c'),
('MuxChannel', 'c'),
('MuxGain64', 'c'),
('VmonX100', 'c'),
('IsQuadro', 'c'),
('SpareBool4', 'c', None),
('SpareBool5', 'c', None),
('StimFilterHz', 'd'),
('RsTau', 'd'),
('FilterOffsetDac', 'h'),
('ReferenceDac', 'h'),
('SpareInt6', 'h', None),
('SpareInt7', 'h', None),
('Spares1', '24s', None),
('CalibDate', '16s'),
('SelHold', 'd'),
('Spares2', '32s', None),
]
size_check = 400
class LockInParams(Struct):
field_info = [
('ExtCalPhase', 'd'),
('ExtCalAtten', 'd'),
('PLPhase', 'd'),
('PLPhaseY1', 'd'),
('PLPhaseY2', 'd'),
('UsedPhaseShift', 'd'),
('UsedAttenuation', 'd'),
('Spares2', '8s', None),
('ExtCalValid', '?'),
('PLPhaseValid', '?'),
('LockInMode', 'c'),
('CalMode', 'c'),
('Spares', '28s', None),
]
size_check = 96
class V9_SeriesRecord(TreeNode): ## Done!
field_info = [
('Mark', 'i'),
('Label', '32s', cstr),
('Comment', '80s', cstr),
('SeriesCount', 'i'),
('NumberSweeps', 'i'),
('AmplStateOffset', 'i'),
('AmplStateSeries', 'i'),
('MethodTag', 'i'),
('Time', 'd'),
('PageWidth', 'd'),
('SwUserParamDescr', UserParamDescrType.array(4)),
('MethodName','32s', cstr),
('SeUserParams', '4d'),
('LockInParams', LockInParams),
('AmplifierState', AmplifierState),
('Username', '80s', cstr),
('UserParamDescr', UserParamDescrType.array(4)),
('Filler1', 'i', None),
('CRC', 'i'),
('UserParams2', '4d'),
('UserParamDescr2',UserParamDescrType.array(4)),
('ScanParams', '96c'), ## 96 uint8
]
size_check = 1408
class SeriesRecord(TreeNode): ## Done! validated with Matlab HEKA importer!
field_info = [
('Mark', 'i'),
('Label', '32s', cstr),
('Comment', '80s', cstr),
('SeriesCount', 'i'),
('NumberSweeps', 'i'),
('AmplStateFlag', 'i'),
('AmplStateRef', 'i'),
('MethodTag', 'i'),
('Time', 'd'),
('PageWidth', 'd'),
('SwUserParamDescr', UserParamDescrType.array(2)),
('Filler1', '80s', None),
('MethodName','32s', cstr),
('PhotoParams1', '4d'),
('OldLockInParams', LockInParams),
('OldAmpState', AmplifierState),
('Username', '80s', cstr),
('PhotoParams2', UserParamDescrType.array(4)),
('Filler1', 'i', None),
('CRC', 'i'),
('UserParams2', '4d'),
('UserParamDescr2',UserParamDescrType.array(4)),
('ScanParams', '96c'), ## 96 uint8
('UserDescr2', UserParamDescrType.array(8) )
]
size_check = 1728
class GroupRecord(TreeNode):
field_info = [
('Mark', 'i'),
('Label', '32s', cstr),
('Text', '80s', cstr),
('ExperimentNumber', 'i'),
('GroupCount', 'i'),
('CRC', 'i'),
('MatrixWidth', 'd'),
('MatrixHeight', 'd'),
]
size_check = 144
class Pulsed(TreeNode):
field_info = [
('Version', 'i'),
('Mark', 'i'),
('VersionName', '32s', cstr),
('AuxFileName', '80s', cstr),
('RootText', '400s', cstr),
('StartTime', 'd'),
('MaxSamples', 'i'),
('CRC', 'i'),
('Features', 'h'),
('Filler1', 'h', None),
('Filler2', 'i', None),
('TcEnumerator','32h'),
('TcKind', '32c')
]
size_check = 640
rectypes = [
None,
GroupRecord,
SeriesRecord,
SweepRecord,
TraceRecord
]
def __init__(self, bundle, offset=0, size=None):
fh = open(bundle.file_name, 'rb')
#pdb.set_trace()
fh.seek(offset)
# read .pul header
magic = fh.read(4)
if magic == b'eerT':
self.endian = '<'
elif magic == b'Tree':
self.endian = '>'
elif magic ==b'DAT1':
self.endian = '>'
else:
raise RuntimeError('Bad file magic: %s' % magic)
levels = struct.unpack(self.endian + 'i', fh.read(4))[0]
# read size of each level (one int per level)
self.level_sizes = []
for i in range(levels):
size = struct.unpack(self.endian + 'i', fh.read(4))[0]
self.level_sizes.append(size)
TreeNode.__init__(self, fh, self)
class Pulsed9(TreeNode):
field_info = [
('Version', 'i'),
('Mark', 'i'),
('VersionName', '32s', cstr),
('AuxFileName', '80s', cstr),
('RootText', '400s', cstr),
('StartTime', 'd'),
('MaxSamples', 'i'),
('CRC', 'i'),
('Features', 'h'),
('Filler1', 'h', None),
('Filler2', 'i', None),
]
size_check = 544
rectypes = [
None,
GroupRecord, ## no changes in group record between version 9 and version 1000
V9_SeriesRecord,
V9_SweepRecord,
TraceRecord ## no changes in tracerecord between version 9 and version 1000
]
def __init__(self, bundle, offset=0, size=None):
fh = open(bundle.file_name, 'rb')
fh.seek(offset)
# read .pul header
magic = fh.read(4)
if magic == b'eerT':
self.endian = '<'
elif magic == b'Tree':
self.endian = '>'
else:
raise RuntimeError('Bad file magic: %s' % magic)
levels = struct.unpack(self.endian + 'i', fh.read(4))[0]
# read size of each level (one int per level)
self.level_sizes = []
for i in range(levels):
size = struct.unpack(self.endian + 'i', fh.read(4))[0]
self.level_sizes.append(size)
TreeNode.__init__(self, fh, self)
class Data(object):
def __init__(self, bundle, offset=0, size=None):
self.bundle = bundle
self.offset = offset
def __getitem__(self, *args):
index = args[0]
assert len(index) == 4
pul = self.bundle.pul
trace = pul[index[0]][index[1]][index[2]][index[3]]
fh = open(self.bundle.file_name, 'rb')
fh.seek(trace.Data)
fmt = bytearray(trace.DataFormat)[0]
dtype = [np.int16, np.int32, np.float32, np.float64][fmt]
dByte =[2, 4, 4, 8][fmt];
nItemsPerBlock = np.int(trace.InterleaveSizeS/dByte)
TotalBytes = trace.DataPoints * dByte
#print('{:f}, {:f}'.format(trace.DataPoints, trace.InterleaveSizeS))
if trace.DataPoints >= trace.InterleaveSizeS and trace.InterleaveSizeS !=0:
print('long block')
### there is a mixture of data points (count) and bytes!
data = np.fromfile(fh, count=nItemsPerBlock, dtype=dtype)
dataRead = trace.InterleaveSizeS
data2Read = TotalBytes- dataRead ## in bytes
c= 0
while data2Read > 0 :
fh.seek(trace.InterleaveSkip - trace.InterleaveSizeS, os.SEEK_CUR) ## skip the skip-block
c = c+1
# print(c)
if data2Read < trace.InterleaveSizeS: ## less than a block size
data0 = np.fromfile(fh, count=np.int(data2Read/dByte), dtype=dtype)
data = np.concatenate((data, data0))
break
else: ## larger than a block size
data0 = np.fromfile(fh, count=nItemsPerBlock, dtype=dtype)
data = np.concatenate((data, data0))
dataRead = trace.InterleaveSizeS + dataRead
data2Read = TotalBytes - dataRead
else:
data = np.fromfile(fh, count=trace.DataPoints, dtype=dtype)
return data * trace.DataScaler + trace.ZeroData
class StimulationRecord(TreeNode):
'''
(* StimulationRecord = RECORD *)
'''
### Long real: d
### Byte: c
field_info = [
('Mark', 'i'), # = 0; (* INT32 *)
('EntryName', '32s',cstr), # = 4; (* String32Type *)
('FileName', '32s',cstr), # = 36; (* String32Type *)
('AnalName', '32s',cstr), # = 68; (* String32Type *)
('DataStartSegment', 'i'), # = 100; (* INT32 *)
('DataStartTime', 'd'), # = 104; (* LONGREAL *)
('SampleInterval', 'd'), # = 112; (* LONGREAL *)
('SweepInterval', 'd'), # = 120; (* LONGREAL *)
('LeakDelay', 'd'), # = 128; (* LONGREAL *)
('FilterFactor', 'd'), # = 136; (* LONGREAL *)
('NumberSweeps', 'i'), # = 144; (* INT32 *)
('NumberLeaks', 'i'), # = 148; (* INT32 *)
('NumberAverages ', 'i'), # = 152; (* INT32 *)
('ActualAdcChannels', 'i'), # = 156; (* INT32 *)
('ActualDacChannels ', 'i'), # = 160; (* INT32 *)
('ExtTrigger', 'c'), # = 164; (* BYTE *)
('NoStartWait', 'h'), # = 165; (* BOOLEAN *)
('UseScanRates', 'h'), # = 166; (* BOOLEAN *)
('NoContAq', 'h'), # = 167; (* BOOLEAN *)
('HasLockIn', 'h'), # = 168; (* BOOLEAN *)
('OldStartMacKind', 'c'), # = 169; (* CHAR *)
('OldEndMacKind', 'h'), # = 170; (* BOOLEAN *)
('AutoRange', 'c'), # = 171; (* BYTE *)
('BreakNext', 'h'), # = 172; (* BOOLEAN *)
('IsExpanded', 'h'), # = 173; (* BOOLEAN *)
('LeakCompMode', 'h'), # = 174; (* BOOLEAN *)
('HasChirp', 'h'), # = 175; (* BOOLEAN *)
('OldStartMacro', '32s',cstr), # = 176; (* String32Type *)
('OldEndMacro', '32s',cstr), # = 208; (* String32Type *)
('IsGapFree', 'h'), # = 240; (* BOOLEAN *)
('HandledExternally ', 'h'), # = 241; (* BOOLEAN *)
('Filler1', 'i'), # = 242; (* BOOLEAN *)
('Filler2', 'i'), # = 243; (* BOOLEAN *)
('CRC', 'i'), # = 244; (* CARD32 *)
]
# size_check = 248
class ChannelRecord(TreeNode):
'''
set fileds of Channel record
'''
field_info = [
('Mark','i'),# = fread(fh, 1, 'int32=>int32');% = 0; (* INT32 *)
('LinkedChannel','i'),# = | |
"""
detector
Copyright (c) 2020 <NAME>
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
"""
import os
import sys
import json
# import datetime # not really useful so remove soon pls
import numpy as np
import skimage.draw
import imgaug # should augment this improt as well haha
# from PIL import Image
# Root directory of project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
# sys.path.insert(1, 'samples/hentai/')
# from hentai import HentaiConfig
from cv2 import VideoCapture, CAP_PROP_FRAME_HEIGHT, CAP_PROP_FRAME_WIDTH, CAP_PROP_FPS, VideoWriter, VideoWriter_fourcc
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
# Path to trained weights
WEIGHTS_PATH = os.path.join(ROOT_DIR, "weights.h5")
# taking this from hentai to avoid import
class HentaiConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "hentai"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 + 1 # Background + censor bar + mosaic
# Number of training steps per epoch, equal to dataset train size
STEPS_PER_EPOCH = 297
# Skip detections with < 65% confidence NOTE: lowered this because its better for false positives
DETECTION_MIN_CONFIDENCE = 0.55
class Detector():
# at startup, dont create model yet
def __init__(self, weights_path):
class InferenceConfig(HentaiConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
self.config = InferenceConfig()
self.weights_path = weights_path
# counts how many non-png images, if >1 then warn user
self.dcp_compat = 0
# keep model loading to be done later, not now
# Make sure this is called before using model weights
def load_weights(self):
self.model = modellib.MaskRCNN(mode="inference", config=self.config,
model_dir=DEFAULT_LOGS_DIR)
self.model.load_weights(self.weights_path, by_name=True)
def apply_cover(self, image, mask):
"""Apply cover over image. Based off of Mask-RCNN Balloon color splash function
image: RGB image [height, width, 3]
mask: instance segmentation mask [height, width, instance count]
Returns result covered image.
"""
# Copy color pixels from the original color image where mask is set
# green = np.array([[[0, 255, 0]]], dtype=np.uint8)
# print('apply_cover: shape of image is',image.shape)
green = np.zeros([image.shape[0], image.shape[1], image.shape[2]], dtype=np.uint8)
green[:,:] = [0, 255, 0]
if mask.shape[-1] > 0:
# We're treating all instances as one, so collapse the mask into one layer
mask = (np.sum(mask, -1, keepdims=True) < 1)
cover = np.where(mask, image, green).astype(np.uint8)
else:
# error case, return image
cover = image
return cover, mask
def get_non_png(self):
return self.dcp_compat
def video_create(self, image_path=None, dcp_path=''):
assert image_path
# Video capture to get shapes and stats
# Only supports 1 video at a time, but this can still get mp4 only
vid_list = []
for file in os.listdir(image_path):
if file.endswith('mp4') or file.endswith('MP4'):
vid_list.append(image_path + '/' + file)
video_path = vid_list[0] # ONLY works with 1 video for now
vcapture = VideoCapture(video_path)
width = int(vcapture.get(CAP_PROP_FRAME_WIDTH))
height = int(vcapture.get(CAP_PROP_FRAME_HEIGHT))
fps = vcapture.get(CAP_PROP_FPS)
# Define codec and create video writer, video output is purely for debugging and educational purpose. Not used in decensoring.
file_name = "uncensored_video.avi"
vwriter = VideoWriter(file_name,
VideoWriter_fourcc(*'MJPG'),
fps, (width, height))
count = 0
print("Beginning build. Do ensure only relevant images are in source directory")
input_path = dcp_path + '/decensor_output/'
img_list = []
# output of the video detection should be in order anyway
# os.chdir(input_path)
# files = filter(os.path.isfile, os.listdir(input_path))
# files = [os.path.join( f) for f in files]
# files.sort(key=lambda x: os.path.getmtime(x))
# for file in files:
for file in os.listdir(input_path):
# TODO: check what other filetpyes supported
if file.endswith('.png') or file.endswith('.PNG'):
img_list.append(input_path + file)
print('adding image ', input_path + file)
for img in img_list:
print("frame: ", count)
# Read next image
image = skimage.io.imread(img) # Should be no alpha channel in created image
# Add image to video writer, after flipping R and B value
image = image[..., ::-1]
vwriter.write(image)
count += 1
vwriter.release()
print('video complete')
# save path and orig video folder are both paths, but orig video folder is for original mosaics to be saved.
# fname = filename.
# image_path = path of input file, image or video
def detect_and_cover(self, image_path=None, fname=None, save_path='', is_video=False, orig_video_folder=None, save_mask=False):
assert image_path
assert fname # replace these with something better?
if is_video: # TODO: video capabilities will finalize later
# from cv2 import VideoCapture, CAP_PROP_FRAME_HEIGHT, CAP_PROP_FRAME_WIDTH, CAP_PROP_FPS, VideoWriter, VideoWriter_fourcc
# Video capture
video_path = image_path
vcapture = VideoCapture(video_path)
width = int(vcapture.get(CAP_PROP_FRAME_WIDTH))
height = int(vcapture.get(CAP_PROP_FRAME_HEIGHT))
fps = vcapture.get(CAP_PROP_FPS)
# Define codec and create video writer, video output is purely for debugging and educational purpose. Not used in decensoring.
file_name = fname + "_with_censor_masks.avi"
vwriter = VideoWriter(file_name,
VideoWriter_fourcc(*'MJPG'),
fps, (width, height))
count = 0
success = True
while success:
print("frame: ", count)
# Read next image
success, image = vcapture.read()
if success:
# OpenCV returns images as BGR, convert to RGB
image = image[..., ::-1]
# save frame into decensor input original. Need to keep names persistent.
im_name = fname[:-4] # if we get this far, we definitely have a .mp4. Remove that, add count and .png ending
file_name = orig_video_folder + im_name + str(count).zfill(6) + '.png' # NOTE Should be adequite for having 10^6 frames, which is more than enough for even 30 mintues total.
# print('saving frame as ', file_name)
skimage.io.imsave(file_name, image)
# Detect objects
r = self.model.detect([image], verbose=0)[0]
# Apply cover
cov, mask = self.apply_cover(image, r['masks'])
# save covered frame into input for decensoring path
file_name = save_path + im_name + str(count).zfill(6) + '.png'
# print('saving covered frame as ', file_name)
skimage.io.imsave(file_name, cov)
# RGB -> BGR to save image to video
cov = cov[..., ::-1]
# Add image to video writer
vwriter.write(cov)
count += 1
vwriter.release()
print('video complete')
else:
# print("Running on ", end='')
# print(image_path)
# Read image
image = skimage.io.imread(image_path) # problems with strange shapes
if image.shape[-1] == 4:
image = image[..., :3] # strip alpha channel
# Detect objects
r = self.model.detect([image], verbose=0)[0]
cov, mask = self.apply_cover(image, r['masks'])
# Save output
file_name = save_path + fname
skimage.io.imsave(file_name, cov)
# print("Saved to ", file_name)
# Option to save ask separately not working rn
# if(save_mask==True):
# skimage.io.imsave(file_name+'_mask', skimage.img_as_uint(mask)) # save to default input dir for now
def run_on_folder(self, input_folder, output_folder, is_video=False, orig_video_folder=None, save_mask=False):
assert input_folder
assert output_folder # replace with catches and popups
file_counter = 0
if(is_video == True):
# support for multiple videos if your computer can even handle that
vid_list = []
for file in os.listdir(input_folder):
if file.endswith('mp4') or file.endswith('MP4'):
vid_list.append((input_folder + '/' + file, file))
for vid_path, vid_name in vid_list:
# video will not support separate mask saves
self.detect_and_cover(vid_path, vid_name, output_folder, is_video=True, orig_video_folder=orig_video_folder)
print('detection on video', file_counter, 'is complete')
file_counter += 1
else:
# obtain inputs from the input folder
img_list = []
for file in os.listdir(input_folder):
# TODO: check what other filetpyes supported
if file.endswith('.png') or file.endswith('.PNG'):
img_list.append((input_folder + '/' + file, file))
elif file.endswith(".jpg") or file.endswith(".JPG") or file.endswith(".jpeg"):
# img_list.append((input_folder + '/' + file, file)) # Do not add jpgs. Conversion to png must happen first
self.dcp_compat += 1
# save run detection with outputs to output folder
for img_path, img_name in img_list:
self.detect_and_cover(img_path, img_name, output_folder, save_mask=save_mask)
print('detection on image', file_counter, 'is complete')
file_counter += 1
# return 0
# main only used for debugging here. Comment out pls
'''if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Utilize Mask R-CNN to detect censor bars.')
parser.add_argument('--weights', required=True,
metavar="/path/to/weights.h5",
help="Path to weights.h5")
parser.add_argument('--imagedir', required=True,
metavar="path to image folder",
help='Folder of images to apply mask coverage on')
# parser.add_argument('--video', required=False,
# metavar="path or URL to video",
# help='Video to apply effect on')
args = parser.parse_args()
weights_path = args.weights
images_path = args.imagedir
output_dir = "temp_out/"
print('Initializing Detector class')
detect_instance = Detector(weights_path=args.weights)
print('loading | |
"RBS Status", "Source Storage", "Archival Storage"]
page_cursor = response.get('data', {}).get('vSphereVmNewConnection', {}).get('pageInfo', {})
next_page_context = {
"next_page_token": page_cursor.get('endCursor', ''),
"name": "rubrik-polaris-vm-objects-list",
"has_next_page": page_cursor.get('hasNextPage', '')
}
if next_page_context.get('has_next_page'):
readable_output = "{}\n {} {}".format(tableToMarkdown(table_name, hr, header, removeNull=True),
MESSAGES['NEXT_RECORD'], page_cursor.get('endCursor'))
else:
readable_output = tableToMarkdown(table_name, hr, header, removeNull=True)
outputs = {
f"{OUTPUT_PREFIX['VM_OBJECT']}(val.id == obj.id)": context,
f"{OUTPUT_PREFIX['PAGE_TOKEN_VM_OBJECT']}(val.name == obj.name)": remove_empty_elements(next_page_context)
}
return CommandResults(
outputs=outputs,
raw_response=response,
readable_output=readable_output
)
def rubrik_polaris_vm_object_snapshot_list_command(client: PolarisClient, args: Dict[str, Any]) -> CommandResults:
"""
Search for a Rubrik snapshot of an object based on the arguments.
:type client: ``PolarisClient``
:param client: Rubrik Polaris client to use
:type args: ``dict``
:param args: arguments obtained from demisto.args()
:return: CommandResult object
"""
object_id = validate_required_arg("object_id", args.get('object_id'))
start_date = end_date = ""
start_date_ob = arg_to_datetime(validate_required_arg("start_date", args.get("start_date")))
if start_date_ob:
start_date = start_date_ob.strftime(DATE_TIME_FORMAT)
end_date_ob = arg_to_datetime(validate_required_arg("end_date", args.get("end_date")))
if end_date_ob:
end_date = end_date_ob.strftime(DATE_TIME_FORMAT)
timezone_offset = validate_required_arg("timezone_offset", args.get("timezone_offset"))
cluster_connected = args.get("cluster_connected", DEFAULT_CLUSTER_CONNECTED)
if cluster_connected:
cluster_connected = validate_boolean_argument(cluster_connected, 'cluster_connected')
snapshot_group_by = args.get('snapshot_group_by', DEFAULT_SNAPSHOT_GROUP_BY)
missed_snapshot_by = args.get('missed_snapshot_group_by', DEFAULT_SNAPSHOT_GROUP_BY)
time_range = {
"start": start_date,
"end": end_date
}
response = client.get_object_snapshot(snapshot_group_by=snapshot_group_by,
missed_snapshot_group_by=missed_snapshot_by,
object_id=object_id, time_range=time_range,
timezone_offset=timezone_offset,
cluster_connected=cluster_connected)
data = response.get('data', {}).get('snappable', {})
if not data.get('snapshotGroupByConnection', {}).get('nodes'):
return CommandResults(readable_output=MESSAGES["NO_RECORDS_FOUND"].format("vm object snapshots"))
context, hr = prepare_context_hr_vm_object_snapshot(data)
table_name = "VM Object Snapshots"
header = ["Snapshot Details", SNAPSHOT_IDS]
readable_output = tableToMarkdown(table_name, hr, header, removeNull=True)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX['VM_OBJECT'],
outputs_key_field="id",
outputs=context,
raw_response=response,
readable_output=readable_output
)
def rubrik_sonar_ondemand_scan_status_command(client: PolarisClient, args: Dict[str, Any]) -> CommandResults:
"""
Retrieve the status of a scanned system in Polaris Sonar.
:type client: ``PolarisClient``
:param client: Rubrik Polaris client to use
:type args: ``dict``
:param args: arguments obtained from demisto.args()
:return: CommandResult object
"""
crawl_id = args.get("crawl_id")
if not crawl_id:
raise ValueError(ERROR_MESSAGES['MISSING_REQUIRED_FIELD'].format("crawl_id"))
raw_response = client.get_on_demand_scan_status(crawl_id)
nodes = raw_response.get("data", {}).get("crawl", {}).get("crawlObjConnection", {}).get("nodes", [])
response_crawl_id = raw_response.get("data", {}).get("crawl", {}).get("id", "")
if not nodes:
return CommandResults(readable_output=MESSAGES['NO_RESPONSE'])
context, hr = prepare_context_hr_sonar_ondemand_scan_status(nodes, response_crawl_id)
return CommandResults(outputs_prefix=OUTPUT_PREFIX['SONAR_ON_DEMAND_SCAN'],
outputs_key_field="crawlId",
readable_output=hr,
outputs=context,
raw_response=raw_response)
def rubrik_sonar_ondemand_scan_result_command(client: PolarisClient, args: Dict[str, Any]) -> CommandResults:
"""
Retrieve the download link for an on-demand scan of a system in Rubrik Polaris - Sonar.
:type client: ``PolarisClient``
:param client: Rubrik Polaris client to use
:type args: ``dict``
:param args: arguments obtained from demisto.args()
:return: CommandResult object
"""
crawl_id = validate_required_arg("crawl_id", args.get("crawl_id", ""))
file_type = validate_required_arg("file_type", args.get("file_type"))
raw_response = client.get_on_demand_scan_result(crawl_id, {"fileType": file_type})
outputs = raw_response.get("data", {}).get("downloadResultsCsv", {})
if not outputs or not outputs.get("downloadLink"):
return CommandResults(readable_output=MESSAGES['NO_RESPONSE'])
hr_content = {
"Scan result CSV Download Link": f"Download the [CSV]({outputs.get('downloadLink')}) file to see the result."
}
hr = tableToMarkdown("Sonar On-Demand Scan Result", hr_content, headers="Scan result CSV Download Link",
removeNull=True)
context = {
"crawlId": crawl_id.lower(),
"Result": outputs
}
return CommandResults(outputs_prefix=OUTPUT_PREFIX['SONAR_ON_DEMAND_SCAN'],
outputs_key_field="crawlId",
readable_output=hr,
outputs=context,
raw_response=raw_response)
def rubrik_radar_anomaly_csv_analysis_command(client: PolarisClient, args: Dict[str, Any]) -> CommandResults:
"""
Request for the analysis and retrieve the download link for the Radar CSV analyzed file.
:type client: ``PolarisClient``
:param client: Rubrik Polaris client to use
:type args: ``dict``
:param args: arguments obtained from demisto.args()
:return: CommandResult object
"""
cluster_id = validate_required_arg("cluster_id", args.get('cluster_id'))
snapshot_id = validate_required_arg("snapshot_id", args.get("snapshot_id"))
object_id = validate_required_arg("object_id", args.get("object_id"))
response = client.get_csv_result(cluster_id=cluster_id, snappable_id=object_id, snapshot_id=snapshot_id)
data = response.get("data", {})
download_data = data.get('investigationCsvDownloadLink', {})
if not download_data:
return CommandResults(readable_output=MESSAGES["NO_RESPONSE"])
context = {
"clusterId": cluster_id,
"snapshotId": snapshot_id,
"objectId": object_id
}
context.update(data)
table_name = "Radar Anomaly CSV Analysis"
hr = [f"Download the analyzed [CSV]({download_data.get('downloadLink')}) file."]
readable_output = tableToMarkdown(table_name, hr, ["CSV Download Link"], removeNull=True)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX["RADAR_ANOMALY_CSV_ANALYSIS"],
outputs_key_field=["clusterId", "snapshotId", "objectId"],
outputs=context,
raw_response=response,
readable_output=readable_output
)
def rubrik_sonar_csv_download_command(client: PolarisClient, args: Dict[str, Any]) -> CommandResults:
"""
Request for the analysis and retrieve the download link for the Radar CSV analyzed file.
:type client: ``PolarisClient``
:param client: Rubrik Polaris client to use
:type args: ``dict``
:param args: arguments obtained from demisto.args()
:return: CommandResult object
"""
snapshot_id = validate_required_arg("snapshot_id", args.get("snapshot_id"))
object_id = validate_required_arg("object_id", args.get("object_id"))
file_type = args.get('file_type')
filters = None
if file_type:
filters = {
"fileType": file_type
}
response = client.get_csv_download(snappable_id=object_id, snapshot_id=snapshot_id, filters=filters)
data = response.get("data", {})
if not data:
return CommandResults(readable_output=MESSAGES["NO_RESPONSE"])
context = {
"snapshotId": snapshot_id,
"objectId": object_id
}
context.update(data)
table_name = "Sonar CSV Download"
if data.get('downloadSnapshotResultsCsv', {}).get('isSuccessful'):
hr = ["Success"]
else:
hr = ["Failed"]
readable_output = tableToMarkdown(table_name, hr, ["Download Status"], removeNull=True)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX["SONAR_CSV_DOWNLOAD"],
outputs_key_field=["snapshotId", "objectId"],
outputs=context,
raw_response=response,
readable_output=readable_output
)
def rubrik_gps_snapshot_files_list_command(client: PolarisClient, args: Dict[str, Any]) -> CommandResults:
"""
Retrieve the list of the available files that can be downloaded.
:type client: ``PolarisClient``
:param client: Rubrik Polaris client to use
:type args: ``dict``
:param args: arguments obtained from demisto.args()
:return: CommandResult object
"""
snapshot_id = validate_required_arg("snapshot_id", args.get("snapshot_id", ""))
search_prefix = args.get("search_prefix", "")
path = args.get("path", "")
limit = arg_to_number(args.get('limit', DEFAULT_LIMIT))
if not limit or limit <= 0 or limit > 1000:
raise ValueError(ERROR_MESSAGES["INVALID_LIMIT"].format(limit))
next_page_token = args.get('next_page_token')
raw_response = client.get_snapshot_files(snapshot_id=snapshot_id, search_prefix=search_prefix, path=path,
first=limit, after=next_page_token)
outputs = raw_response.get("data", {}).get("browseSnapshotFileConnection", {}).get("edges", [])
page_cursor = raw_response.get("data", {}).get("browseSnapshotFileConnection", {}).get("pageInfo", {})
if not outputs:
return CommandResults(readable_output=MESSAGES['NO_RECORDS_FOUND'].format("files"))
context, hr = prepare_context_hr_gps_snapshot_files(outputs, snapshot_id)
next_page_context = {
"next_page_token": page_cursor.get('endCursor', ''),
"name": "rubrik-gps-snapshot-files-list",
"has_next_page": page_cursor.get('hasNextPage', '')
}
outputs = {
f"{OUTPUT_PREFIX['GPS_SNAPSHOT_FILES']}(val.snapshotId == obj.snapshotId)": context,
f"{OUTPUT_PREFIX['PAGE_TOKEN_GPS_SNAPSHOT_FILES']}(val.name == obj.name)": remove_empty_elements(
next_page_context)
}
if page_cursor.get("hasNextPage"):
hr += "{} {}".format(MESSAGES['NEXT_RECORD'], page_cursor.get("endCursor"))
return CommandResults(readable_output=hr,
outputs=outputs,
raw_response=raw_response)
def rubrik_gps_vm_export_command(client: PolarisClient, args: Dict[str, Any]) -> CommandResults:
"""
Request to initiate an export of a snapshot of a virtual machine.
:type client: ``PolarisClient``
:param client: Rubrik Polaris client to use
:type args: ``dict``
:param args: arguments obtained from demisto.args()
:return: CommandResult object
"""
config, object_id = validate_vm_export_args(args)
raw_response = client.export_vm_snapshot(config, object_id)
outputs = raw_response.get("data", {})
if not outputs:
return CommandResults(readable_output=MESSAGES['NO_RECORDS_FOUND'].format('vm export'))
snapshot_export_request_id = outputs.get('vSphereVMExportSnapshotV2', {}).get('id', '')
hr_content = {"Snapshot Export Request ID": snapshot_export_request_id}
hr = tableToMarkdown("GPS VM Export", hr_content, headers="Snapshot Export Request ID", removeNull=True)
context = {
"id": snapshot_export_request_id
}
return CommandResults(outputs_prefix=OUTPUT_PREFIX['GPS_VM_EXPORT'],
outputs_key_field="id",
readable_output=hr,
outputs=context,
raw_response=raw_response)
def rubrik_user_downloads_list_command(client: PolarisClient, args: Dict[str, Any]) -> CommandResults:
"""
Retrieve the user downloads. This would return the current and past download history.
:type client: ``PolarisClient``
:param client: Rubrik Polaris client to use
:type args: ``dict``
:param args: arguments obtained from demisto.args()
:return: CommandResult object
"""
response = client.get_user_downloads()
data = response.get("data", {})
if not data:
return CommandResults(readable_output=MESSAGES["NO_RECORDS_FOUND"].format("user downloads"))
context, hr = prepare_context_hr_user_downloads(data.get('getUserDownloads', []))
table_name = "User Downloads"
headers = ["Download ID", "Name", "Status", "Identifier", "Creation Time", "Completion Time"]
readable_output = tableToMarkdown(table_name, hr, headers, removeNull=True)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX["USER_DOWNLOADS"],
outputs_key_field="id",
outputs=context,
raw_response=response,
readable_output=readable_output
)
def rubrik_sonar_csv_result_download_command(client: PolarisClient, args: Dict[str, Any]) -> CommandResults:
"""
Retrieve the download link for the requested Sonar CSV Snapshot file.
:type client: ``PolarisClient``
:param client: Rubrik Polaris client to use
:type args: ``dict``
:param args: arguments obtained from demisto.args()
:return: CommandResult object
"""
download_id = arg_to_number(validate_required_arg("download_id", args.get("download_id")))
response = client.get_csv_result_download(download_id=download_id)
data = response.get("data", {})
if not data:
return CommandResults(readable_output=MESSAGES["NO_RESPONSE"])
context = {
"downloadId": download_id
}
context.update(data)
table_name = "Sonar CSV Result"
url_ = data.get('getDownloadUrl', {}).get('url')
hr = [f"Download the [CSV]({url_}) file to see the result."]
readable_output = tableToMarkdown(table_name, hr, ["Download URL"], removeNull=True)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX["SONAR_CSV_DOWNLOAD"],
outputs_key_field="downloadId",
outputs=context,
raw_response=response,
readable_output=readable_output
)
def rubrik_gps_sla_domain_list(client: PolarisClient, args: Dict[str, Any]) -> CommandResults:
"""
List available SLA Domains Rubrik Polaris - GPS.
:type client: ``PolarisClient``
:param client: Rubrik Polaris client to use
:type args: ``dict``
:param args: arguments obtained from demisto.args()
:return: CommandResult object
"""
name = args.get("name", "")
cluster_uuid = args.get("cluster_id", "")
object_type = argToList(args.get("object_type"))
show_cluster_slas_only = args.get("show_cluster_slas_only", DEFAULT_SHOW_CLUSTER_SLA_ONLY)
limit = arg_to_number(args.get('limit', DEFAULT_LIMIT))
next_page_token = args.get('next_page_token')
sort_order = args.get('sort_order', DEFAULT_SORT_ORDER)
sort_by = args.get('sort_by', DEFAULT_SORT_BY_SLA_DOMAIN)
filters = []
if name:
filters.append({
"field": "NAME",
"text": name
})
if cluster_uuid:
filters.append({
"field": "CLUSTER_UUID",
"text": cluster_uuid
})
if object_type:
filters.append({
"field": "OBJECT_TYPE",
"objectTypeList": object_type
})
if show_cluster_slas_only:
show_cluster_slas_only = validate_boolean_argument(show_cluster_slas_only, "show_cluster_slas_only")
filters.append({
"field": "SHOW_CLUSTER_SLAS_ONLY",
"text": str(show_cluster_slas_only).lower()
})
if not limit or limit <= 0 or limit > 1000:
raise ValueError(ERROR_MESSAGES['INVALID_LIMIT'].format(limit))
response = client.list_sla_domains(after=next_page_token, first=limit,
filters=filters, sort_order=sort_order,
sort_by=sort_by, show_protected_object_count=True)
edges = response.get('data', {}).get('globalSlaConnection', {}).get('edges', [])
if not edges:
return CommandResults(readable_output=MESSAGES["NO_RECORDS_FOUND"].format("sla domains"))
context, hr = prepare_context_hr_sla_domains_list(edges)
page_cursor = response.get('data', {}).get('globalSlaConnection', {}).get('pageInfo', {})
next_page_context = {
"next_page_token": page_cursor.get('endCursor', ''),
"name": "rubrik-gps-sla-domain-list",
"has_next_page": page_cursor.get('hasNextPage', '')
}
if next_page_context.get('has_next_page'):
hr += f"\n {MESSAGES['NEXT_RECORD']} {page_cursor.get('endCursor')}\n"
outputs = {
f"{OUTPUT_PREFIX['GPS_SLA_DOMAIN']}(val.id == obj.id)": context,
f"{OUTPUT_PREFIX['PAGE_TOKEN_SLA_DOMAIN']}(val.name == obj.name)": remove_empty_elements(next_page_context)
}
return CommandResults(
outputs=outputs,
raw_response=response,
readable_output=hr
)
def rubrik_gps_vm_snapshot_create(client: PolarisClient, args: Dict[str, Any]) -> CommandResults:
"""
Trigger an on-demand vm snapshot.
:type client: ``PolarisClient``
:param | |
New `forcefield` keyword to switch between different values of
DEFAULT_DONORS/ACCEPTORS to accomodate different force fields.
Also has an option "other" for no default values.
.. versionchanged:: 0.8
The new default for `update_selection1` and `update_selection2` is now
``True`` (see `Issue 138`_). Set to ``False`` if your selections only
need to be determined once (will increase performance).
.. versionchanged:: 0.9.0
New keyword `distance_type` to select between calculation between
heavy atoms or hydrogen-acceptor. It defaults to the previous
behavior (i.e. "hydrogen").
.. versionchanged:: 0.11.0
Initial checks for selections that potentially raise :exc:`SelectionError`.
.. versionchanged:: 0.17.0
use 0-based indexing
.. deprecated:: 0.16
The previous `verbose` keyword argument was replaced by
`debug`. Note that the `verbose` keyword argument is now
consistently used to toggle progress meters throughout the library.
.. _`Issue 138`: https://github.com/MDAnalysis/mdanalysis/issues/138
"""
super(HydrogenBondAnalysis, self).__init__(universe.trajectory, **kwargs)
warnings.warn(
"This class is deprecated as of MDAnalysis version 1.0 and will "
"be removed in version 2.0."
"Please use MDAnalysis.analysis.hydrogenbonds.hbond_analysis.HydrogenBondAnalysis instead.",
category=DeprecationWarning
)
# per-frame debugging output?
self.debug = debug
self._get_bonded_hydrogens_algorithms = {
"distance": self._get_bonded_hydrogens_dist, # 0.7.6 default
"heuristic": self._get_bonded_hydrogens_list, # pre 0.7.6
}
if not detect_hydrogens in self._get_bonded_hydrogens_algorithms:
raise ValueError("detect_hydrogens must be one of {0!r}".format(
self._get_bonded_hydrogens_algorithms.keys()))
self.detect_hydrogens = detect_hydrogens
self.u = universe
self.selection1 = selection1
self.selection2 = selection2
self.selection1_type = selection1_type
self.update_selection1 = update_selection1
self.update_selection2 = update_selection2
self.filter_first = filter_first
self.distance = distance
self.distance_type = distance_type # note: everything except 'heavy' will give the default behavior
self.angle = angle
self.pbc = pbc and all(self.u.dimensions[:3])
# set up the donors/acceptors lists
if donors is None:
donors = []
if acceptors is None:
acceptors = []
self.forcefield = forcefield
self.donors = tuple(set(self.DEFAULT_DONORS[forcefield]).union(donors))
self.acceptors = tuple(set(self.DEFAULT_ACCEPTORS[forcefield]).union(acceptors))
if not (self.selection1 and self.selection2):
raise ValueError('HydrogenBondAnalysis: invalid selections')
elif self.selection1_type not in ('both', 'donor', 'acceptor'):
raise ValueError('HydrogenBondAnalysis: Invalid selection type {0!s}'.format(self.selection1_type))
self._timeseries = None # final result accessed as self.timeseries
self.timesteps = None # time for each frame
self.table = None # placeholder for output table
self._update_selection_1()
self._update_selection_2()
self._log_parameters()
if self.selection1_type == 'donor':
self._sanity_check(1, 'donors')
self._sanity_check(2, 'acceptors')
elif self.selection1_type == 'acceptor':
self._sanity_check(1, 'acceptors')
self._sanity_check(2, 'donors')
else: # both
self._sanity_check(1, 'donors')
self._sanity_check(1, 'acceptors')
self._sanity_check(2, 'acceptors')
self._sanity_check(2, 'donors')
logger.info("HBond analysis: initial checks passed.")
def _sanity_check(self, selection, htype):
"""sanity check the selections 1 and 2
*selection* is 1 or 2, *htype* is "donors" or "acceptors"
If selections do not update and the required donor and acceptor
selections are empty then a :exc:`SelectionError` is immediately
raised.
If selections update dynamically then it is possible that the selection
will yield donors/acceptors at a later step and we only issue a
warning.
.. versionadded:: 0.11.0
"""
assert selection in (1, 2)
assert htype in ("donors", "acceptors")
# horrible data organization: _s1_donors, _s2_acceptors, etc, update_selection1, ...
atoms = getattr(self, "_s{0}_{1}".format(selection, htype))
update = getattr(self, "update_selection{0}".format(selection))
if not atoms:
errmsg = "No {1} found in selection {0}. " \
"You might have to specify a custom '{1}' keyword.".format(
selection, htype)
if not update:
logger.error(errmsg)
raise SelectionError(errmsg)
else:
errmsg += " Selection will update so continuing with fingers crossed."
warnings.warn(errmsg, category=SelectionWarning)
logger.warning(errmsg)
def _log_parameters(self):
"""Log important parameters to the logfile."""
logger.info("HBond analysis: selection1 = %r (update: %r)", self.selection1, self.update_selection1)
logger.info("HBond analysis: selection2 = %r (update: %r)", self.selection2, self.update_selection2)
logger.info("HBond analysis: criterion: donor %s atom and acceptor atom distance <= %.3f A", self.distance_type,
self.distance)
logger.info("HBond analysis: criterion: angle D-H-A >= %.3f degrees", self.angle)
logger.info("HBond analysis: force field %s to guess donor and acceptor names", self.forcefield)
logger.info("HBond analysis: bonded hydrogen detection algorithm: %r", self.detect_hydrogens)
def _get_bonded_hydrogens(self, atom, **kwargs):
"""Find hydrogens bonded to `atom`.
This method is typically not called by a user but it is documented to
facilitate understanding of the internals of
:class:`HydrogenBondAnalysis`.
Parameters
----------
atom : groups.Atom
heavy atom
**kwargs
passed through to the calculation method that was selected with
the `detect_hydrogens` kwarg of :class:`HydrogenBondAnalysis`.
Returns
-------
hydrogen_atoms : AtomGroup or []
list of hydrogens (can be a :class:`~MDAnalysis.core.groups.AtomGroup`)
or empty list ``[]`` if none were found.
See Also
--------
:meth:`_get_bonded_hydrogens_dist`
:meth:`_get_bonded_hydrogens_list`
.. versionchanged:: 0.7.6
Can switch algorithm by using the `detect_hydrogens` keyword to the
constructor. *kwargs* can be used to supply arguments for algorithm.
"""
return self._get_bonded_hydrogens_algorithms[self.detect_hydrogens](atom, **kwargs)
def _get_bonded_hydrogens_dist(self, atom):
"""Find hydrogens bonded within cutoff to `atom`.
Hydrogens are detected by either name ("H*", "[123]H*") or type ("H");
this is not fool-proof as the atom type is not always a character but
the name pattern should catch most typical occurrences.
The distance from `atom` is calculated for all hydrogens in the residue
and only those within a cutoff are kept. The cutoff depends on the
heavy atom (more precisely, on its element, which is taken as the first
letter of its name ``atom.name[0]``) and is parameterized in
:attr:`HydrogenBondAnalysis.r_cov`. If no match is found then the
default of 1.5 Å is used.
Parameters
----------
atom : groups.Atom
heavy atom
Returns
-------
hydrogen_atoms : AtomGroup or []
list of hydrogens (can be a :class:`~MDAnalysis.core.groups.AtomGroup`)
or empty list ``[]`` if none were found.
Notes
-----
The performance of this implementation could be improved once the
topology always contains bonded information; it currently uses the
selection parser with an "around" selection.
.. versionadded:: 0.7.6
"""
try:
return atom.residue.atoms.select_atoms(
"(name H* 1H* 2H* 3H* or type H) and around {0:f} name {1!s}"
"".format(self.r_cov[atom.name[0]], atom.name))
except NoDataError:
return []
def _get_bonded_hydrogens_list(self, atom, **kwargs):
"""Find "bonded" hydrogens to the donor *atom*.
At the moment this relies on the **assumption** that the
hydrogens are listed directly after the heavy atom in the
topology. If this is not the case then this function will
fail.
Hydrogens are detected by name ``H*``, ``[123]H*`` and they have to be
within a maximum distance from the heavy atom. The cutoff distance
depends on the heavy atom and is parameterized in
:attr:`HydrogenBondAnalysis.r_cov`.
Parameters
----------
atom : groups.Atom
heavy atom
**kwargs
ignored
Returns
-------
hydrogen_atoms : AtomGroup or []
list of hydrogens (can be a :class:`~MDAnalysis.core.groups.AtomGroup`)
or empty list ``[]`` if none were found.
.. versionchanged:: 0.7.6
Added detection of ``[123]H`` and additional check that a
selected hydrogen is bonded to the donor atom (i.e. its
distance to the donor is less than the covalent radius
stored in :attr:`HydrogenBondAnalysis.r_cov` or the default
1.5 Å).
Changed name to
:meth:`~HydrogenBondAnalysis._get_bonded_hydrogens_list`
and added *kwargs* so that it can be used instead of
:meth:`~HydrogenBondAnalysis._get_bonded_hydrogens_dist`.
"""
warnings.warn("_get_bonded_hydrogens_list() (heuristic detection) does "
"not always find "
"all hydrogens; Using detect_hydrogens='distance', when "
"constructing the HydrogenBondAnalysis class is safer. "
"Removal of this feature is targeted for 1.0",
category=DeprecationWarning)
box = self.u.dimensions if self.pbc else None
try:
hydrogens = [
a for a in self.u.atoms[atom.index + 1:atom.index + 4]
if (a.name.startswith(('H', '1H', '2H', '3H')) and
distances.calc_bonds(atom.position, a.position, box=box) < self.r_cov[atom.name[0]])
]
except IndexError:
hydrogens = [] # weird corner case that atom is the last one in universe
return hydrogens
def _update_selection_1(self):
self._s1 = self.u.select_atoms(self.selection1)
self.logger_debug("Size of selection 1: {0} atoms".format(len(self._s1)))
if not self._s1:
logger.warning("Selection 1 '{0}' did not select any atoms."
.format(str(self.selection1)[:80]))
self._s1_donors = {}
self._s1_donors_h = {}
self._s1_acceptors = {}
if self.selection1_type in ('donor', 'both'):
self._s1_donors = self._s1.select_atoms(
'name {0}'.format(' '.join(self.donors)))
self._s1_donors_h = {}
for i, d in enumerate(self._s1_donors):
tmp = self._get_bonded_hydrogens(d)
if tmp:
self._s1_donors_h[i] = tmp
self.logger_debug("Selection 1 donors: {0}".format(len(self._s1_donors)))
self.logger_debug("Selection 1 donor hydrogens: {0}".format(len(self._s1_donors_h)))
if self.selection1_type in ('acceptor', 'both'):
self._s1_acceptors = self._s1.select_atoms(
'name {0}'.format(' '.join(self.acceptors)))
self.logger_debug("Selection 1 acceptors: {0}".format(len(self._s1_acceptors)))
def _update_selection_2(self):
box = self.u.dimensions if self.pbc else None
self._s2 = self.u.select_atoms(self.selection2)
if self.filter_first and self._s2:
self.logger_debug('Size of selection 2 before filtering:'
' {} atoms'.format(len(self._s2)))
ns_selection_2 = AtomNeighborSearch(self._s2, box)
self._s2 = ns_selection_2.search(self._s1, 3. * self.distance)
self.logger_debug('Size of selection 2: {0} atoms'.format(len(self._s2)))
if not self._s2:
logger.warning('Selection 2 "{0}" did not select any atoms.'
.format(str(self.selection2)[:80]))
self._s2_donors = {}
self._s2_donors_h = {}
self._s2_acceptors = {}
if not self._s2:
return None
if self.selection1_type in ('donor', 'both'):
self._s2_acceptors = self._s2.select_atoms(
'name {0}'.format(' '.join(self.acceptors)))
self.logger_debug("Selection 2 acceptors: {0:d}".format(len(self._s2_acceptors)))
if self.selection1_type in ('acceptor', 'both'):
self._s2_donors = self._s2.select_atoms(
'name {0}'.format(' '.join(self.donors)))
self._s2_donors_h = {}
for | |
#
# The next three lines are self-descriptive. But you will need the *\maketitle* **after** the \begin{document} for those items to appear.
#
# ### The Body
#
# The body is similar to what you would find in a normal word processor. The first element to add to the body is the *abstract*. An abstract informs the reader what will follow in the rest of the document. For your class projects, this will be a <250 word summary of your work. In your class assignments, you can write a summary of what you learned so that when *future* you comes back to it, it will hopefully make sense. To create an abstract:
#
# >\begin{abstract}
#
# >This is a simple paragraph at the beginning of the document. A brief introduction to the main subject.
#
# >\end{abstract}
#
# you must create an abstract environment. Environments (e.g., abstract, figure, table, equation) always have a \begin and an \end statement to tell the LaTex compiler that it needs to do something different here and for how long.
#
# There are other elements that in the body that **don't** need an environment because they are self explanatory to the compiler when to stop. For example, you can organize the body using sections, subsections, subsubsections, etc. Although an environment is not required, you do need a `\` to tell the compiler that it isn't really text either.
#
# >\section{Introduction}
#
# >\section{Methods}
#
# >\subsection{Newton's 1st Law}
#
# >\subsubsection{Einstein's Theory of General Relativity}
#
# >\section{Results}
#
# In the above examples:
#
# - The first section is the Introduction and it will be enumerated starting from 1. The LaTex compiler will know when the Introduction ends when it encounters the next \section command.
# - The second section called Methods (enumerated with 2) has a subsection called Newton's 1st Law. Subsections are then enumerated with a ".#", where the above subsection is 2.1. Subsubsections will gain an additional ".#" so that it will numbered 2.1.1.
# - The third section (enumerated with 3) tells the compiler to go back to the previous level in the tree.
#
# Between section commands, this is where the main text will appear. In contrast to a word processor (like Word), LaTex allows for inline commands. The most common inline commands are:
#
# - Enter math mode with \$ signs. Suppose you need the greek letter $\alpha$, then you can easily add it to your text by placing the \alpha between \$ \$. This is less cumbersome than having to define a macro in Word. Anything that you could do in an equation, can be done in math mode (e.g., \frac{1}{2}x^2 in between \$ signs appears as $\frac{1}{2}x^2$)
# - Cite a reference. This will be explained more later.
# - Add a comment to the writer using \%. Everything on a line that comes after \% will not appear in the pdf document, but can serve as a note for later.
# - You can add text formatting for **bold**, *italics*, or $\texttt{texttype}$ using: \textbf, \textit, or \texttt.
#
# Figures and tables are created using an environment (recall that this means begin and end statements). For many of the extra features for figures, you will need to add `\usepackage{graphicx}` to the front matter. Figures and tables have similar structures as you can see in these basic examples:
#
# >\begin{figure}[!h]
#
# >\centering
#
# >\includegraphics[width=\linewidth]{filename.png}
#
# >\caption{This is the figure caption, which describes basic aspects of the figure to the reader. \label{fig:Fig1}}
#
# >\end{figure}
#
# and
#
# >\begin{table}[!h]
#
# >\centering
#
# >\begin{tabular}{c|c|c}
#
# >\hline
#
# >cell11 & cell12 & cell13 \ \
#
# >cell21 & cell22 & cell23 \ \
#
# >cell31 & cell32 & cell33
#
# >\end{tabular}
#
# >caption{This is a table caption, which describes the basic apsects of the table or gives the table a title. \label{tab:Tab1}}
#
# >\end{table}
#
# The figure and table environment have a [] after the begin statement, where positioning arguments are placed (e.g., !=override default, h=here, t=top of page, b=bottom of page). This is followed by \centering, which tells the LaTeX compiler to place the figure/table in the center of the page (<------center------->). The figure environment relies on the `\includegraphics` command from the graphicx package, which this has a [] for arguments that tell the LaTeX compiler how to scale the figure. In the above example, the figure is scaled so that the width of the figure spans an entire line. The {} after \includegraphics holds the filename of the image (e.g., `filename.png`), where LaTex can handle many filetypes (e.g., png, jpg, and pdf are the most common). The table environment is different in that it holds *tabular* environment within *table* environment. The tabular environment has arguments {} that tell the LateX compiler:
#
# - the number of columns (implicitly),
# - the alignment within columns (explicitly), and
# - the borders between columns.
#
# The columns can be left (l), center (c), or (r) aligned, where the total number of these characters indicates the number of columns (3l's = 3 columns left aligned). The \hline command draws a horizontal line that spans the width of the table. The data within the table is separated using the `&` symbol and a row is terminated with `\\`. The last row **doesn't** need to be terminated with `\\` and the `\end{tabular}` must follow on the next line.
#
# Both figures and tables use the *caption* environment to hold the description and a *label* environment so that the figure/table can be dynamically referenced in the text (using `\ref{fig:Fig1}` or `\ref{tab:Tab1}`). The beauty of LaTex is that the referencing system keeps track of the figure and table numbering so that if the order of tables are switched, then the numbering is updated with the next compilation. Finally, both figures and tables **require** an \end statement.
#
# The LaTex compiler will abort or crash if a given environment does not have matching {} or begin/end statements. This is usually indicated in the compilation log (upper right button **View Logs**).
#
# ### The Back Matter
#
# The back matter contains supplementary information to the body (e.g., acknowledgments, references, appendices). The acknowledgments (**note the spelling**) is an environment so it needs a \begin{acknowledgments} and an \end{ackowledgments}, where this section is where you would thank particular individuals/institutions that aided in the completion of the project (e.g., converstations, resources, proofing).
#
# An appendix is started with the `\appendix` command, which behaves much like the body but includes supplementary material (e.g., a derivation of an equation, how a new method was verified) and it's labeled with letters (A,B,C,...). For you, this is where you can put the code that you generate using the \verbatim environment. Addtional guides on how to include code in Latex can be found [here](https://www.overleaf.com/learn/latex/Code_listing).
#
# In addition to the ease of generating equations, LaTex is preferred because it makes referencing easier too with BibTex. At the end of your document, references are included by telling LaTex the referencing style (e.g., apsrev4-2.bst for *Physical Review*) and a database of references (e.g., references.bib) through supplemental files. You must include the following for the references:
#
# >\bibliographystyle{style_filename.bst}
#
# >\bibliography{reference_filename.bib}
#
# The reference database (*.bib file) will contain entries like the following:
#
# ```
# @ARTICLE{Berman1983,
# author = "<NAME>., <NAME> <NAME>., <NAME>.",
# title = "Stability of nonlinear modes",
# journal = "Physica D",
# volume = "88",
# pages = "445",
# year = "1983",
# }
# ```
# where the `Berman1983` is a label used for the inline citation command within the body (e.g., \cite{Berman1983}). The quotation marks for each field tell BibTex not to change the formatting (i.e., captialization). There are different types of environments that correspond to different references (e.g., ARTICLE, BOOK, INPROCEEDINGS, etc.). Remember that environments require an opening { and closing }.
# | |
(len(raw) < 3): continue
self.ShowProgress( iline* 100.0 /(maxlines+1))
if mode == '2d':
self.dimension = 2
sx = raw.split()
yval = float(sx[2])
tmp_y.append(yval)
self.yaddr = sx[1].strip()
if self.yaddr.endswith(':'): self.yaddr = self.yaddr[:-1]
mode = None
if len(tmp_dat)>0:
ntotal_at_2d.append(len(tmp_dat))
elif mode == 'epics scan': # real numeric column data
print( 'Warning: file appears to have a second scan appended!')
break
elif mode == 'data': # real numeric column data
tmp_dat.append(numpy.array([float(i) for i in raw.split()]))
elif mode == '-----':
if col_legend is None:
col_legend = lines.pop()[1:].strip().split()
elif mode in ( '=====', 'n_points'):
pass
elif mode == 'user titles':
self.user_titles.append(raw[1:].strip())
elif mode == 'pv list':
str = raw[1:].strip().replace('not connected',' = not connected')
if str.lower().startswith(mode): continue
desc = str
addr = ''
val = 'unknown'
try:
x = str.split('=')
desc = x[0].replace('\t','').strip()
val = x[1].strip()
if '(' in desc and desc.endswith(')'):
n = desc.rfind('(')
addr = desc[n+1:-1]
desc = desc[:n].rstrip()
except:
pass
self.env_addr.append(addr)
self.env_desc.append(desc)
self.env_val.append(val)
elif mode == 'scan regions':
self.scan_regions.append(raw[1:].strip())
elif mode == 'scan ended at':
self.stop_time = raw[20:].strip()
elif mode == 'scan began at':
self.start_time = raw[20:].strip()
elif mode == 'column labels':
col_details.append(raw[1:].strip())
elif mode is None:
sx = [i.strip() for i in raw[1:].split('=')]
if len(sx)>1:
if sx[0] == 'scan prefix':
self.scan_prefix = sx[1]
if sx[0] == 'scan dimension':
self.dimension = int(float(sx[1]))
else:
print( 'UNKOWN MODE = ',mode, raw[:20])
del lines
try:
col_details.pop(0)
except IndexError:
print( 'Empty Scan File')
return -2
if len(self.user_titles) > 1: self.user_titles.pop(0)
if len(self.scan_regions) > 1: self.scan_regions.pop(0)
# check that 2d maps are of consistent size
if self.dimension == 2:
ntotal_at_2d.append(len(tmp_dat))
np_row0 = ntotal_at_2d[0]
nrows = len(ntotal_at_2d)
npts = len(tmp_dat)
if npts != np_row0 * nrows:
for i,n in enumerate(ntotal_at_2d):
if n == np_row0*(i+1):
nrows,npts_total = i+1,n
if len(tmp_y) > nrows or len(tmp_dat)> npts_total:
print( 'Warning: Some trailing data may be lost!')
tmp_y = tmp_y[:nrows]
tmp_dat = tmp_dat[:npts_total]
#
self.y = numpy.array(tmp_y)
# done reading file
self._make_arrays(tmp_dat,col_legend,col_details)
tmp_dat = None
self.xaddr = self.pos_addr[0].strip()
for addr,desc in zip(self.env_addr,self.env_desc):
if self.xaddr == addr: self.xdesc = desc
if self.yaddr == addr: self.ydesc = desc
self.has_fullxrf = False
if os.path.exists("%s.fullxrf" %fname):
self.read_fullxrf("%s.fullxrf" %fname, len(self.x), len(self.y))
def read_fullxrf(self,xrfname, n_xin, n_yin):
inpf = open(xrfname,'r')
atime = os.stat(xrfname)[8]
prefix = os.path.splitext(xrfname)[0]
print('Reading Full XRF spectra from %s' % xrfname)
first_line = inpf.readline()
if not first_line.startswith('; MCA Spectra'):
print('Warning: %s is not a QuadXRF File' % xrffile)
inpf.close()
return
self.has_fullxrf = True
isHeader= True
nheader = 0
header = {'CAL_OFFSET':None,'CAL_SLOPE':None,'CAL_QUAD':None}
rois = []
n_energies = 2048
while isHeader:
line = inpf.readline()
nheader = nheader + 1
isHeader = line.startswith(';') and not line.startswith(';----')
words = line[2:-1].split(':')
if words[0] in header.keys():
header[words[0]] = [float(i) for i in words[1].split()]
elif words[0].startswith('ROI'):
roinum = int(words[0][3:])
rois.append((words[1].strip(),int(words[2]),int(words[3])))
# end of header: read one last line
line = inpf.readline()
nelem = self.nelem = len(header['CAL_OFFSET'])
nheader = nheader + 1
# print('==rois==' , len(rois), len(rois)/nelem, nelem)
allrois = []
nrois = len(rois)/nelem
for i in range(nrois):
tmp = [rois[i+j*nrois] for j in range(nelem)]
allrois.append( tuple(tmp) )
for i in range(nrois):
nam = []
lo = []
hi = []
for j in range(nelem):
r = rois[i+j*nrois]
nam.append(r[0])
lo.append(r[1])
hi.append(r[2])
self.roi_names.append(nam)
self.roi_llim.append(lo)
self.roi_hlim.append(hi)
roi_template ="""ROI_%i_LEFT: %i %i %i %i
ROI_%i_RIGHT: %i %i %i %i
ROI_%i_LABEL: %s & %s & %s & %s & """
rout = []
for i in range(nrois):
vals = [i] + self.roi_llim[i] + [i] + self.roi_hlim[i] + [i] + self.roi_names[i]
rout.append(roi_template % tuple(vals))
xrf_header= """VERSION: 3.1
ELEMENTS: %i
DATE: %s
CHANNELS: %i
ROIS: %i %i %i %i
REAL_TIME: 1.0 1.0 1.0 1.0
LIVE_TIME: 1.0 1.0 1.0 1.0
CAL_OFFSET: %15.8e %15.8e %15.8e %15.8e
CAL_SLOPE: %15.8e %15.8e %15.8e %15.8e
CAL_QUAD: %15.8e %15.8e %15.8e %15.8e
TWO_THETA: 10.0000000 10.0000000 10.0000000 10.0000000"""
hout = [nelem, time.ctime(atime),n_energies, nrois, nrois, nrois, nrois]
hout.extend( header['CAL_OFFSET'])
hout.extend( header['CAL_SLOPE'])
hout.extend( header['CAL_QUAD'])
obuff ="%s\n%s" % (xrf_header % tuple(hout), '\n'.join(rout))
rois = []
allrois = []
self.xrf_header = obuff
# dir = prefix
self.xrf_energies = []
x_en = numpy.arange(n_energies)*1.0
for i in range(nelem):
off = header['CAL_OFFSET'][i]
slope = header['CAL_SLOPE'][i]
quad = header['CAL_QUAD'][i]
self.xrf_energies.append(off + x_en * (slope + x_en * quad))
self.xrf_energies = numpy.array(self.xrf_energies)
self.xrf_dict = {}
processing = True
iyold = 1
ix = 0
iy = 0
# lines = inpf.readlines()
progress_save = self.progress
self.progress = self.my_progress
# slow part: ascii text to numpy array
for line in inpf:# enumerate(lines):
raw = numpy.fromstring(line[:-1],sep=' ')
ix = raw[0]
iy = raw[1]
dat = raw[2:]
if iy != iyold:
iyold = iy
if iy>1: self.PrintMessage('. ')
self.xrf_dict['%i/%i' % (ix,iy)] = dat
inpf.close()
xrf_shape = (n_xin, nelem, n_energies)
if self.dimension == 2:
xrf_shape = (n_yin, n_xin, nelem, n_energies)
# print( 'xrf_shape ', xrf_shape)
self.xrf_data = -1*numpy.ones(xrf_shape)
xrf_dt_factor = self.dt_factor * 1.0
if self.dimension == 2:
xrf_dt_factor = xrf_dt_factor.transpose((1,2,0))[:,:,:,numpy.newaxis]
for iy in range(n_yin):
for ix in range(n_xin):
key = <KEY> (ix+1,iy+1)
if key in self.xrf_dict:
d = numpy.array(self.xrf_dict[key])
d.shape = (nelem,n_energies)
self.xrf_data[iy,ix,:,:] = d
else:
xrf_dt_factor = xrf_dt_factor.transpose((1,0))[:,:,numpy.newaxis]
for ix in range(n_xin):
key = <KEY> (ix+1,iy)
d = numpy.array(self.xrf_dict[key])
d.shape = (nelem, n_energies)
self.xrf_data[ix,:,:] = d
self.xrf_corr = self.xrf_data * xrf_dt_factor
# merge XRF data
en_merge = self.xrf_energies[0]
if self.dimension == 2:
self.xrf_merge = self.xrf_data[:,:,0,:]*1.0
self.xrf_merge_corr = self.xrf_corr[:,:,0,:]*1.0
self.PrintMessage('\n')
for iy in range(n_yin):
self.PrintMessage('. ')
for ix in range(n_xin):
sum_r = self.xrf_merge[iy,ix,:]*1.0
sum_c = self.xrf_merge_corr[iy,ix,:]*1.0
for idet in range(1,nelem):
en = self.xrf_energies[idet]
dat_r = self.xrf_data[iy,ix,idet,:]
dat_c = self.xrf_corr[iy,ix,idet,:]
sum_r += numpy.interp(en_merge, en, dat_r)
sum_c += numpy.interp(en_merge, en, dat_c)
self.xrf_merge[iy,ix,:] = sum_r
self.xrf_merge_corr[iy,ix,:] = sum_c
else:
self.xrf_merge = self.xrf_data[:,0,:]*1.0
self.xrf_merge_corr = self.xrf_corr[:,0,:]*1.0
for ix in range(n_xin):
sum_r = self.xrf_merge[ix,:]*1.0
sum_c = self.xrf_merge_corr[ix,:]*1.0
for idet in range(1,nelem):
en = self.xrf_energies[idet]
dat_r = self.xrf_data[ix,idet,:]
dat_c = self.xrf_corr[ix,idet,:]
sum_r += numpy.interp(en_merge, en, dat_r)
sum_c += numpy.interp(en_merge, en, dat_c)
self.xrf_merge[ix,:] = sum_r
self.xrf_merge_corr[ix,:] = sum_c
self.progress = progress_save
inpf.close()
self.xrf_dict = None
def save_sums_ascii(self,fname=None, correct=True,extension='dat'):
if fname is None:
fname = self.path
map = None
correct = correct and hasattr(self,'det_corr')
outf = _cleanfile(fname)
fout = open("%s.%s" % (outf,extension),'w')
fout.write("# ASCII data from %s\n" % self.filename)
fout.write("# x positioner %s = %s\n" % (self.xaddr,self.xdesc))
if self.dimension==2:
fout.write("# y positioner %s = %s\n" % (self.yaddr,self.ydesc))
fout.write("# Dead Time Correction applied: %s\n" % correct)
fout.write("#-----------------------------------------\n")
labels = [self.xdesc]
if self.dimension == 2:
ydesc = self.ydesc
if ydesc in ('',None): ydesc = 'Y'
labels.append(ydesc)
labels.extend(self.sums_names)
labels = ["%5s" % _cleanfile(l) for l in labels]
olabel = ' '.join(labels)
fout.write("# %s\n" % olabel)
sums = self.sums
if correct: sums = self.sums_corr
if self.dimension ==1:
for i,x in enumerate(self.x):
o = ["%10.5f" % x]
o.extend(["%12g" % s for s in sums[:,i]])
fout.write(" %s\n" % " ".join(o) )
else:
for i,x in enumerate(self.x):
for j,y in enumerate(self.y):
o = [" %10.5f" % x, " %10.5f" % y]
o.extend(["%12g" % s for s in sums[:,j,i]])
fout.write(" %s\n" % " ".join(o))
fout.close()
def gsescan_group(fname, _larch=None, bad=None, **kws):
"""simple mapping of EscanData file to larch groups"""
escan = EscanData(fname, bad=bad)
if escan.status is not None:
raise ValueError('Not a valid Escan Data file')
group = Group()
group.__name__ ='GSE Escan Data file %s' % fname
for key, val in escan.__dict__.items():
if not key.startswith('_'):
setattr(group, key, val)
group.array_labels = group.pos_desc + group.sums_names
group.get_data = escan.get_data
return group
GSE_header_IDE= ['# XDI/1.0 GSE/1.0',
'# Beamline.name: 13-ID-E, GSECARS',
'# Monochromator.name: Si 111, LN2 Cooled',
'# Monochromator.dspacing: 3.13477',
'# Facility.name: APS',
'# Facility.xray_source: 3.6 cm undulator',
'# Detectors.i0: 20cm ion chamber, He',
'# Detectors.ifluor: Si SDD Vortex ME-4, XIA xMAP, 4 elements',
'# Column.1: energy eV',
'# Column.2: mufluor',
'# Column.3: i0',
'# Column.4: ifluor (corrected for deadtime)',
'# Column.5: ifluor_raw (not corrected) ' ]
GSE_header_BMD = ['# XDI/1.0 GSE/1.0',
'# Beamline.name: 13-BM-D, GSECARS',
'# Monochromator.name: Si 111, water cooled ',
'# Monochromator.dspacing: 3.13477',
'# Facility.name: APS',
'# Facility.xray_source: bending magnet',
'# Detectors.i0: 10cm ion chamber, N2',
'# Detectors.ifluor: Ge SSD detector, XIA xMAP, 12 elements',
'# Column.1: energy eV',
'# Column.2: mufluor',
'# Column.3: i0',
'# Column.4: ifluor (corrected for deadtime)',
'# Column.5: ifluor_raw (not corrected) ' ]
def gsescan_deadtime_correct(fname, channelname, subdir='DT_Corrected',
bad=None, _larch=None):
"""convert GSE ESCAN fluorescence XAFS scans to dead time | |
<filename>ball_catching/strategies/soc_solvers.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 26 09:29:13 2016
@author: Hoefer
"""
import argparse
import os
import sys
import yaml
import numpy as np
import matplotlib.pylab as plt
from ball_catching.config import settings_root
from ball_catching.dynamics.world import DynamicsModel
def dot3(A,B,C):
""" Returns the matrix multiplication of A*B*C"""
return np.dot(A, np.dot(B,C))
# ====================================================================
class LQR:
def __init__(self):
pass
@property
def name(self):
return "LQR"
# def solve(self, N, A=None, B=None):
# """Solve the LQR problem, iterating over N steps"""
#
# Q, R, P0 = self.Q, self.R, self.H
#
# if A is None:
# A = dynamics.Adt
# if B is None:
# B = dynamics.Bdt
#
# P = P0[:,:]
# Plog = [P0]
# Flog = []
#
# for i in range(N):
# try:
# F = - np.dot ( np.linalg.inv(R + np.dot(np.dot(B.T, P), B)),
# np.dot( np.dot(B.T, P), A ) )
# except np.linalg.linalg.LinAlgError as e:
# print "warn: %s" % str(e)
# F = np.zeros(B.T.shape)
#
# Flog.append(F)
# P = np.dot ( np.dot( (A + np.dot(B, F)).T, P ),
# (A + np.dot(B, F))) + np.dot( np.dot(F.T, R), F) + Q
# Plog.append(P)
#
# self.Plog = Plog
# self.Flog = Flog
#
# return Plog, Flog
def solve(self, N, dynamics, A=None, B=None, c=None):
"""Solve the LQR problem, iterating over N steps"""
self.dynamics_local = dynamics
dt = dynamics.DT
Q, R, S = self.Q, self.R, self.H
D_s, D_a = self.Q.shape[0], self.R.shape[0]
#P = np.zeros( (D_a, D_s))
s = np.zeros( (D_s,) )
if A is None:
A = dynamics.Adt
if B is None:
B = dynamics.Bdt
if c is None:
c = dynamics.cdt
#c = np.zeros( (D_s,) )
F = np.zeros( (N, D_a, D_s) )
f = np.zeros( (N, D_a) )
inv = np.linalg.inv
for t in reversed(range(N)):
C = dot3(B.T, S, A) #+ P
D = dot3(A.T, S, A) + Q
E = dot3(B.T, S, B) + R
d = np.dot(A.T, s+S.dot(c)) #+ q
e = np.dot(B.T, s+S.dot(c)) #+ r
#F[t] = - inv(E).dot(C)
#f[t] = - inv(E).dot(e)
#S = D + C.T.dot(F[t])
#s = d + C.T.dot(f[t])
idx = N-t-1
F[idx] = - inv(E).dot(C)
f[idx] = - inv(E).dot(e)
S = D + C.T.dot(F[idx])
s = d + C.T.dot(f[idx])
self.F = F
self.f = f
self.tti = [ i*dt for i in range(N) ]
return self.tti, self.F, self.f
def cost_t(self, x, u):
Q, R, = self.Q, self.R
x = x.reshape( (-1,1) )
u = u.reshape( (-1,1) )
sx = np.dot(x.T, np.dot(Q, x))
su = np.dot(u.T, np.dot(R, u))
return (sx + su)[0,0]
def cost_final(self, x):
x = x.reshape( (-1,1) )
return np.dot(x.T, np.dot(self.H, x))[0,0]
def J(self, x, u, N):
"""Compute the total cost of a trajectory
x & u for N steps"""
Q, R, H = self.Q, self.R, self.H
sum = 0
for i in range(N-1):
#FIXME use cost_t
xx = x[i,:].T
uu = u[i,:].T
sx = np.dot(xx.T, np.dot(Q, xx))
su = np.dot(uu.T, np.dot(R, uu))
sum += sx
sum += su
# last step:
if x.shape[0] == N:
#FIXME use cost_final
sum += np.dot(x[-1,:].T, np.dot(H, (x[-1,:])))
return 0.5 * sum
# ====================================================================
class iLQR(LQR):
@property
def name(self):
return "iLQR"
def solve(self, N, dynamics, x0=None, u0=None, max_iter=1000, A=None, B=None, c=None, verbose=True):
"""Solve the iLQR problem, iterating over N steps"""
inv = np.linalg.inv
self.dynamics_local = dynamics
dt = dynamics.DT
# cost matrices
Q, R, S = self.Q, self.R, self.H
D_s, D_a = self.Q.shape[0], self.R.shape[0]
#S = np.zeros( (D_a, D_s))
s = np.zeros( (D_s,) )
if A is None:
A = dynamics.Adt
if B is None:
B = dynamics.Bdt
if c is None:
c = dynamics.cdt
#c = np.zeros( (D_s,) )
g = lambda x,u: dynamics.step(x,u,noise=False)
if x0 is None:
x0 = np.zeros( (D_s,) )
if u0 is None:
u0 = np.zeros( (D_a,) )
tf, N, _, _ = dynamics.get_time_to_impact(x0)
# initialize state and action matrices
F = np.zeros( (N, D_a, D_s) )
f = np.zeros( (N, D_a) )
# initialize state and action matrices
x_hat = np.zeros((N+1, D_s))
x_hat_new = np.zeros((N+1, D_s))
u_hat = np.zeros((N, D_a))
u_hat_new = np.zeros((N, D_a))
old_cost = np.inf
new_cost = 0.
for opt_iter in range(max_iter):
alpha = 1. # line search parameter
# ------------
# Forward pass
# line search
first_round = True
while first_round or (new_cost >= old_cost and np.abs((old_cost - new_cost) / new_cost) >= 1e-4):
first_round = False
new_cost = 0.
# initialize trajectory
x_hat_new[0,:] = x0
for t in range(N):
idx = N-t-1
# line search for choosing optimal combination of old and new action
u_hat_new[t,:] = (1.0 - alpha)*u_hat[t,:] \
+ F[idx].dot(x_hat_new[t,:] - (1.0 - alpha)*x_hat[t,:]) + alpha*f[idx]
# next time-step
x_hat_new[t+1,:] = g(x_hat_new[t,:], u_hat_new[t,:])
new_cost += self.cost_t(x_hat_new[t,:], u_hat_new[t,:])
new_cost += self.cost_final(x_hat_new[t,:])
alpha *= 0.5
x_hat[:] = x_hat_new[:]
u_hat[:] = u_hat_new[:]
if verbose:
print ("Iter: %d, Alpha: %f, Rel. progress: %f, Cost: %f" % \
(opt_iter, (2*alpha), ((old_cost-new_cost)/new_cost), new_cost,))
if np.abs((old_cost - new_cost) / new_cost) < 1e-4:
break
old_cost = new_cost
# ------------
# backward pass
# for quadratizing final cost (not implemented)
#S = np.zeros( (D_s, D_s) )
#s = np.zeros( (D_s, ) )
S = self.H
s = np.zeros( (D_s, ) )
#for (size_t t = ell-1; t != -1; --t) {
for t in reversed(range(N)):
# jacobian
A = dynamics.compute_J(x_hat[t], u_hat[t])
B = dynamics.Bdt # FIXME nonlinear motion model support
c = x_hat[t+1] - (A.dot(x_hat[t]) - B.dot(u_hat[t])).flatten()
C = dot3(B.T, S, A) #+ P
D = dot3(A.T, S, A) + Q
E = dot3(B.T, S, B) + R
d = np.dot(A.T, s+S.dot(c)) #+ q
e = np.dot(B.T, s+S.dot(c)) #+ r
# F[t] = - inv(E).dot(C)
# f[t] = - inv(E).dot(e)
# S = D + C.T.dot(F[t])
# s = d + C.T.dot(f[t])
idx = N-t-1
F[idx] = - inv(E).dot(C)
f[idx] = - inv(E).dot(e)
S = D + C.T.dot(F[idx])
s = d + C.T.dot(f[idx])
self.F = F
self.f = f
self.tti = [ i*dt for i in range(N) ]
# old style
#self.Flog = [ F[t] for t in range(F.shape[0]) ]
return self.tti, self.F, self.f
# ====================================================================
class SOCBallCatching:
def __init__(self, solver, dynamics_local,
terminal_distance, terminal_velocity, control_effort):
"""
Generates cost matrices Q, H, R and
assigns them to the solver (LQR or iLQR)
"""
self.terminal_distance = terminal_distance
self.terminal_velocity = terminal_velocity
self.control_effort = control_effort
D_s = dynamics_local.state_dim
D_a = dynamics_local.action_dim
Q = np.zeros((D_s,D_s))
R = np.identity(D_a)*control_effort
H = np.zeros((D_s,D_s))
# agent terminal_distance to ball (x dimension)
H[0,0] = terminal_distance
H[9,9] = terminal_distance
H[0,9] = H[9,0] = -terminal_distance
# agent terminal_distance to ball (z dimension)
H[6,6] = terminal_distance
H[12,12] = terminal_distance
H[6,12] = H[12,6] = -terminal_distance
# agent velocity at contact
H[10,10] = terminal_velocity
H[13,13] = terminal_velocity
# init solver cost
solver.Q = Q
solver.R = R
solver.H = H
self.dynamics_global = DynamicsModel()
self.solver = solver
self.solver.dynamics_local = dynamics_local
def solve(self, N=None):
fr, dt, dim = self.dynamics_global.FRAMERATE,\
self.dynamics_global.DT,\
self.dynamics_global.dim
if N is None:
N = int(10*fr) # 10 seconds at current framerate
# --------
# Use LQR
if self.solver.name == "LQR":
#dynamics_local = DynamicsModel(dt=dt, dim=dim,
# drag=False, copy=True)
ret = self.solver.solve(N, self.solver.dynamics_local)
# --------
# Use iLQR
elif self.solver.name == "iLQR":
#dynamics_local = DynamicsModel(dt=dt, dim=dim,
# drag=True, copy=True)
D_s = self.dynamics_global.state_dim
D_a = self.dynamics_global.action_dim
# we need to set x0 and u0
x0 = np.zeros( (D_s,) )
# using 'far' setting
x0[1] = 150. # ball velocity x
#x0[4] = 15.556 # ball velocity z
x0[4] = 150. # ball velocity z
#if dim==3:
# x0[7] = x0[1] # ball velocity x, y and z
x0[5] = -self.dynamics_global.GRAVITY
x0[9] = 300 # agent x-position
if dim==3:
#x0[12] = x0[9] # agent z-position
x0[12] = 30. # agent z-position
u0 = np.zeros( (D_a,) )
ret = | |
<filename>colour/notation/munsell.py
"""
Munsell Renotation System
=========================
Defines various objects for *Munsell Renotation System* computations:
- :func:`colour.notation.munsell_value_Priest1920`: *Munsell* value :math:`V`
computation of given *luminance* :math:`Y` using
*Priest, Gibson and MacNicholas (1920)* method.
- :func:`colour.notation.munsell_value_Munsell1933`: *Munsell* value
:math:`V` computation of given *luminance* :math:`Y` using
*Munsell, Sloan and Godlove (1933)* method.
- :func:`colour.notation.munsell_value_Moon1943`: *Munsell* value :math:`V`
computation of given *luminance* :math:`Y` using
*Moon and Spencer (1943)* method.
- :func:`colour.notation.munsell_value_Saunderson1944`: *Munsell* value
:math:`V` computation of given *luminance* :math:`Y` using
*Saunderson and Milner (1944)* method.
- :func:`colour.notation.munsell_value_Ladd1955`: *Munsell* value :math:`V`
computation of given *luminance* :math:`Y` using *Ladd and Pinney (1955)*
method.
- :func:`colour.notation.munsell_value_McCamy1987`: *Munsell* value :math:`V`
computation of given *luminance* :math:`Y` using *McCamy (1987)* method.
- :func:`colour.notation.munsell_value_ASTMD1535`: *Munsell* value
:math:`V` computation of given *luminance* :math:`Y` using
*ASTM D1535-08e1* method.
- :attr:`colour.MUNSELL_VALUE_METHODS`: Supported *Munsell* value
computation methods.
- :func:`colour.munsell_value`: *Munsell* value :math:`V` computation of
given *luminance* :math:`Y` using given method.
- :func:`colour.munsell_colour_to_xyY`
- :func:`colour.xyY_to_munsell_colour`
Notes
-----
- The Munsell Renotation data commonly available within the *all.dat*,
*experimental.dat* and *real.dat* files features *CIE xyY* colourspace values
that are scaled by a :math:`1 / 0.975 \\simeq 1.02568` factor. If you are
performing conversions using *Munsell* *Colorlab* specification,
e.g. *2.5R 9/2*, according to *ASTM D1535-08e1* method, you should not
scale the output :math:`Y` Luminance. However, if you use directly the
*CIE xyY* colourspace values from the Munsell Renotation data data, you
should scale the :math:`Y` Luminance before conversions by a :math:`0.975`
factor.
*ASTM D1535-08e1* states that::
The coefficients of this equation are obtained from the 1943 equation
by multiplying each coefficient by 0.975, the reflectance factor of
magnesium oxide with respect to the perfect reflecting diffuser, and
rounding to ve digits of precision.
References
----------
- :cite:`ASTMInternational1989a` : ASTM International. (1989). ASTM D1535-89
- Standard Practice for Specifying Color by the Munsell System (pp. 1-29).
Retrieved September 25, 2014, from
http://www.astm.org/DATABASE.CART/HISTORICAL/D1535-89.htm
- :cite:`Centore2012a` : Centore, P. (2012). An open-source inversion
algorithm for the Munsell renotation. Color Research & Application, 37(6),
455-464. doi:10.1002/col.20715
- :cite:`Centore2014k` : Centore, P. (2014).
MunsellAndKubelkaMunkToolboxApr2014 -
MunsellRenotationRoutines/MunsellHueToASTMHue.m.
https://github.com/colour-science/MunsellAndKubelkaMunkToolbox
- :cite:`Centore2014l` : Centore, P. (2014).
MunsellAndKubelkaMunkToolboxApr2014 -
MunsellSystemRoutines/LinearVsRadialInterpOnRenotationOvoid.m.
https://github.com/colour-science/MunsellAndKubelkaMunkToolbox
- :cite:`Centore2014m` : Centore, P. (2014).
MunsellAndKubelkaMunkToolboxApr2014 -
MunsellRenotationRoutines/MunsellToxyY.m.
https://github.com/colour-science/MunsellAndKubelkaMunkToolbox
- :cite:`Centore2014n` : Centore, P. (2014).
MunsellAndKubelkaMunkToolboxApr2014 -
MunsellRenotationRoutines/FindHueOnRenotationOvoid.m.
https://github.com/colour-science/MunsellAndKubelkaMunkToolbox
- :cite:`Centore2014o` : Centore, P. (2014).
MunsellAndKubelkaMunkToolboxApr2014 -
MunsellSystemRoutines/BoundingRenotationHues.m.
https://github.com/colour-science/MunsellAndKubelkaMunkToolbox
- :cite:`Centore2014p` : Centore, P. (2014).
MunsellAndKubelkaMunkToolboxApr2014 -
MunsellRenotationRoutines/xyYtoMunsell.m.
https://github.com/colour-science/MunsellAndKubelkaMunkToolbox
- :cite:`Centore2014q` : Centore, P. (2014).
MunsellAndKubelkaMunkToolboxApr2014 -
MunsellRenotationRoutines/MunsellToxyForIntegerMunsellValue.m.
https://github.com/colour-science/MunsellAndKubelkaMunkToolbox
- :cite:`Centore2014r` : Centore, P. (2014).
MunsellAndKubelkaMunkToolboxApr2014 -
MunsellRenotationRoutines/MaxChromaForExtrapolatedRenotation.m.
https://github.com/colour-science/MunsellAndKubelkaMunkToolbox
- :cite:`Centore2014s` : Centore, P. (2014).
MunsellAndKubelkaMunkToolboxApr2014 -
MunsellRenotationRoutines/MunsellHueToChromDiagHueAngle.m.
https://github.com/colour-science/MunsellAndKubelkaMunkToolbox
- :cite:`Centore2014t` : Centore, P. (2014).
MunsellAndKubelkaMunkToolboxApr2014 -
MunsellRenotationRoutines/ChromDiagHueAngleToMunsellHue.m.
https://github.com/colour-science/MunsellAndKubelkaMunkToolbox
- :cite:`Centore2014u` : Centore, P. (2014).
MunsellAndKubelkaMunkToolboxApr2014 -
GeneralRoutines/CIELABtoApproxMunsellSpec.m.
https://github.com/colour-science/MunsellAndKubelkaMunkToolbox
- :cite:`Centorea` : Centore, P. (n.d.). The Munsell and Kubelka-Munk
Toolbox. Retrieved January 23, 2018, from
http://www.munsellcolourscienceforpainters.com/\
MunsellAndKubelkaMunkToolbox/MunsellAndKubelkaMunkToolbox.html
- :cite:`Wikipedia2007c` : <NAME>., <NAME>., & <NAME>.
(1995). Lightness dependency of chroma scales of a nonlinear
color-appearance model and its latest formulation. Color Research &
Application, 20(3), 156-167. doi:10.1002/col.5080200305
"""
from __future__ import annotations
import numpy as np
import re
from colour.algebra import (
Extrapolator,
LinearInterpolator,
cartesian_to_cylindrical,
euclidean_distance,
polar_to_cartesian,
spow,
)
from colour.colorimetry import CCS_ILLUMINANTS, luminance_ASTMD1535
from colour.constants import (
INTEGER_THRESHOLD,
FLOATING_POINT_NUMBER_PATTERN,
)
from colour.hints import (
ArrayLike,
Boolean,
Dict,
Floating,
FloatingOrArrayLike,
FloatingOrNDArray,
Integer,
Literal,
NDArray,
Optional,
StrOrArrayLike,
StrOrNDArray,
Tuple,
Union,
)
from colour.models import Lab_to_LCHab, XYZ_to_Lab, XYZ_to_xy, xyY_to_XYZ
from colour.volume import is_within_macadam_limits
from colour.notation import MUNSELL_COLOURS_ALL
from colour.utilities import (
CACHE_REGISTRY,
CaseInsensitiveMapping,
Lookup,
as_float,
as_float_array,
as_float_scalar,
as_int_scalar,
attest,
domain_range_scale,
from_range_1,
from_range_10,
get_domain_range_scale,
to_domain_1,
to_domain_10,
to_domain_100,
is_integer,
is_numeric,
tsplit,
tstack,
usage_warning,
validate_method,
)
__author__ = "Colour Developers, <NAME>"
__copyright__ = "Copyright 2013 Colour Developers"
__copyright__ += ", "
__copyright__ += (
"The Munsell and Kubelka-Munk Toolbox: Copyright 2010-2018 <NAME> "
"(Gales Ferry, CT 06335, USA); used by permission."
)
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "<EMAIL>"
__status__ = "Production"
__all__ = [
"MUNSELL_GRAY_PATTERN",
"MUNSELL_COLOUR_PATTERN",
"MUNSELL_GRAY_FORMAT",
"MUNSELL_COLOUR_FORMAT",
"MUNSELL_GRAY_EXTENDED_FORMAT",
"MUNSELL_COLOUR_EXTENDED_FORMAT",
"MUNSELL_HUE_LETTER_CODES",
"ILLUMINANT_NAME_MUNSELL",
"CCS_ILLUMINANT_MUNSELL",
"munsell_value_Priest1920",
"munsell_value_Munsell1933",
"munsell_value_Moon1943",
"munsell_value_Saunderson1944",
"munsell_value_Ladd1955",
"munsell_value_McCamy1987",
"munsell_value_ASTMD1535",
"MUNSELL_VALUE_METHODS",
"munsell_value",
"munsell_specification_to_xyY",
"munsell_colour_to_xyY",
"xyY_to_munsell_specification",
"xyY_to_munsell_colour",
"parse_munsell_colour",
"is_grey_munsell_colour",
"normalise_munsell_specification",
"munsell_colour_to_munsell_specification",
"munsell_specification_to_munsell_colour",
"xyY_from_renotation",
"is_specification_in_renotation",
"bounding_hues_from_renotation",
"hue_to_hue_angle",
"hue_angle_to_hue",
"hue_to_ASTM_hue",
"interpolation_method_from_renotation_ovoid",
"xy_from_renotation_ovoid",
"LCHab_to_munsell_specification",
"maximum_chroma_from_renotation",
"munsell_specification_to_xy",
]
MUNSELL_GRAY_PATTERN: str = f"N(?P<value>{FLOATING_POINT_NUMBER_PATTERN})"
MUNSELL_COLOUR_PATTERN: str = (
f"(?P<hue>{FLOATING_POINT_NUMBER_PATTERN})\\s*"
f"(?P<letter>BG|GY|YR|RP|PB|B|G|Y|R|P)\\s*"
f"(?P<value>{FLOATING_POINT_NUMBER_PATTERN})\\s*\\/\\s*"
f"(?P<chroma>[-+]?{FLOATING_POINT_NUMBER_PATTERN})"
)
MUNSELL_GRAY_FORMAT: str = "N{0}"
MUNSELL_COLOUR_FORMAT: str = "{0} {1}/{2}"
MUNSELL_GRAY_EXTENDED_FORMAT: str = "N{0:.{1}f}"
MUNSELL_COLOUR_EXTENDED_FORMAT: str = "{0:.{1}f}{2} {3:.{4}f}/{5:.{6}f}"
MUNSELL_HUE_LETTER_CODES: Lookup = Lookup(
{
"BG": 2,
"GY": 4,
"YR": 6,
"RP": 8,
"PB": 10,
"B": 1,
"G": 3,
"Y": 5,
"R": 7,
"P": 9,
}
)
ILLUMINANT_NAME_MUNSELL: str = "C"
CCS_ILLUMINANT_MUNSELL: NDArray = CCS_ILLUMINANTS[
"CIE 1931 2 Degree Standard Observer"
][ILLUMINANT_NAME_MUNSELL]
_MUNSELL_SPECIFICATIONS_CACHE: Dict = CACHE_REGISTRY.register_cache(
f"{__name__}._MUNSELL_SPECIFICATIONS_CACHE"
)
_MUNSELL_VALUE_ASTM_D1535_08_INTERPOLATOR_CACHE: Dict = (
CACHE_REGISTRY.register_cache(
f"{__name__}._MUNSELL_VALUE_ASTM_D1535_08_INTERPOLATOR_CACHE"
)
)
_MUNSELL_MAXIMUM_CHROMAS_FROM_RENOTATION_CACHE: Dict = (
CACHE_REGISTRY.register_cache(
f"{__name__}._MUNSELL_MAXIMUM_CHROMAS_FROM_RENOTATION_CACHE"
)
)
def _munsell_specifications() -> NDArray:
"""
Return the *Munsell Renotation System* specifications and caches them if
not existing.
The *Munsell Renotation System* data is stored in
:attr:`colour.notation.MUNSELL_COLOURS` attribute in a 2 columns form::
(
(('2.5GY', 0.2, 2.0), (0.713, 1.414, 0.237)),
(('5GY', 0.2, 2.0), (0.449, 1.145, 0.237)),
...,
(('7.5GY', 0.2, 2.0), (0.262, 0.837, 0.237)),
)
The first column is converted from *Munsell* colour to specification using
:func:`colour.notation.munsell.munsell_colour_to_munsell_specification`
definition:
('2.5GY', 0.2, 2.0) --> (2.5, 0.2, 2.0, 4)
Returns
-------
:class:`numpy.ndarray`
*Munsell Renotation System* specifications.
"""
global _MUNSELL_SPECIFICATIONS_CACHE
if "All" in _MUNSELL_SPECIFICATIONS_CACHE:
return _MUNSELL_SPECIFICATIONS_CACHE["All"]
munsell_specifications = np.array(
[
munsell_colour_to_munsell_specification(
MUNSELL_COLOUR_FORMAT.format(*colour[0])
)
for colour in MUNSELL_COLOURS_ALL
]
)
_MUNSELL_SPECIFICATIONS_CACHE["All"] = munsell_specifications
return munsell_specifications
def _munsell_value_ASTMD1535_interpolator() -> Extrapolator:
"""
Return the *Munsell* value interpolator for *ASTM D1535-08e1* method and
caches it if not existing.
Returns
-------
:class:`colour.Extrapolator`
*Munsell* value interpolator for *ASTM D1535-08e1* method.
"""
global _MUNSELL_VALUE_ASTM_D1535_08_INTERPOLATOR_CACHE
if "ASTM D1535-08 Interpolator" in (
_MUNSELL_VALUE_ASTM_D1535_08_INTERPOLATOR_CACHE
):
return _MUNSELL_VALUE_ASTM_D1535_08_INTERPOLATOR_CACHE[
"ASTM D1535-08 Interpolator"
]
munsell_values = np.arange(0, 10, 0.001)
interpolator = LinearInterpolator(
luminance_ASTMD1535(munsell_values), munsell_values
)
extrapolator = Extrapolator(interpolator)
_MUNSELL_VALUE_ASTM_D1535_08_INTERPOLATOR_CACHE[
"ASTM D1535-08 Interpolator"
] = extrapolator
return extrapolator
def _munsell_maximum_chromas_from_renotation() -> Tuple[
Tuple[Tuple[Floating, Floating, Floating], Floating], ...
]:
"""
Return the maximum *Munsell* chromas from *Munsell Renotation System* data
and caches them if not existing.
Returns
-------
:class:`tuple`
Maximum *Munsell* chromas.
"""
global _MUNSELL_MAXIMUM_CHROMAS_FROM_RENOTATION_CACHE
if "Maximum Chromas From Renotation" in (
_MUNSELL_MAXIMUM_CHROMAS_FROM_RENOTATION_CACHE
):
return _MUNSELL_MAXIMUM_CHROMAS_FROM_RENOTATION_CACHE[
"Maximum Chromas From Renotation"
]
chromas: Dict[Tuple[Floating, Floating, Floating], Floating] = {}
for munsell_colour in MUNSELL_COLOURS_ALL:
hue, value, chroma, code = tsplit(
munsell_colour_to_munsell_specification(
MUNSELL_COLOUR_FORMAT.format(*munsell_colour[0])
)
)
index = (hue, value, code)
if index in chromas:
chroma = max([chromas[index], chroma])
chromas[index] = chroma
maximum_chromas_from_renotation = tuple(
zip(chromas.keys(), chromas.values())
)
_MUNSELL_MAXIMUM_CHROMAS_FROM_RENOTATION_CACHE[
"Maximum Chromas From Renotation"
] = maximum_chromas_from_renotation
return maximum_chromas_from_renotation
def munsell_value_Priest1920(Y: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Return the *Munsell* value :math:`V` of given *luminance* :math:`Y` using
*Priest et al. (1920)* method.
Parameters
----------
Y
*luminance* :math:`Y`.
Returns
-------
:class:`np.floating` or :class:`numpy.ndarray`
*Munsell* value :math:`V`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``Y`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``V`` | [0, 10] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Wikipedia2007c`
Examples
--------
>>> munsell_value_Priest1920(12.23634268) # doctest: +ELLIPSIS
3.4980484...
"""
Y = to_domain_100(Y)
V = 10 * np.sqrt(Y / 100)
return as_float(from_range_10(V))
def munsell_value_Munsell1933(Y: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Return the *Munsell* value :math:`V` of given *luminance* :math:`Y` using
*Munsell et al. (1933)* method.
Parameters
----------
Y
*luminance* :math:`Y`.
Returns
-------
:class:`np.floating` or :class:`numpy.ndarray`
*Munsell* value :math:`V`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``Y`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``V`` | [0, 10] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Wikipedia2007c`
Examples
--------
>>> munsell_value_Munsell1933(12.23634268) # doctest: +ELLIPSIS
4.1627702...
"""
Y = to_domain_100(Y)
V = np.sqrt(1.4742 * Y - 0.004743 * (Y * Y))
return as_float(from_range_10(V))
def munsell_value_Moon1943(Y: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Return the *Munsell* value :math:`V` of given *luminance* :math:`Y` using
*Moon and Spencer (1943)* method.
Parameters
----------
Y
*luminance* :math:`Y`.
Returns
-------
:class:`np.floating` or :class:`numpy.ndarray`
*Munsell* value :math:`V`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``Y`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``V`` | [0, 10] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Wikipedia2007c`
Examples
--------
>>> munsell_value_Moon1943(12.23634268) # doctest: +ELLIPSIS
4.0688120...
"""
Y = to_domain_100(Y)
V = 1.4 * spow(Y, 0.426)
return as_float(from_range_10(V))
def | |
model_base.predict_proba(X_sample)
time_predict_frac = time.time() - time_start_predict
time_predict_estimate = time_predict_frac / frac
logger.log(15, f'\t{round(time_predict_estimate, 2)}s\t= Estimated out-of-fold prediction time...')
if time_predict_estimate > time_left:
logger.warning(f'\tNot enough time to generate out-of-fold predictions for model. Estimated time required was {round(time_predict_estimate, 2)}s compared to {round(time_left, 2)}s of available time.')
raise TimeLimitExceeded
if use_child_oof:
logger.log(15, '\t`use_child_oof` was specified for this model. It will function similarly to a bagged model, but will only fit one child model.')
time_start_predict = time.time()
if model_base._get_tags().get('valid_oof', False):
self._oof_pred_proba = model_base.get_oof_pred_proba(X=X, y=y)
else:
logger.warning('\tWARNING: `use_child_oof` was specified but child model does not have a dedicated `get_oof_pred_proba` method. This model may have heavily overfit validation scores.')
self._oof_pred_proba = model_base.predict_proba(X=X)
self._child_oof = True
model_base.predict_time = time.time() - time_start_predict
model_base.val_score = model_base.score_with_y_pred_proba(y=y, y_pred_proba=self._oof_pred_proba)
else:
self._oof_pred_proba = model_base.predict_proba(X=X) # TODO: Cheater value, will be overfit to valid set
self._oof_pred_model_repeats = np.ones(shape=len(X), dtype=np.uint8)
self._n_repeats = 1
self._n_repeats_finished = 1
self._k_per_n_repeat = [1]
self._bagged_mode = False
model_base.reduce_memory_size(remove_fit=True, remove_info=False, requires_save=True)
if not self.params.get('save_bag_folds', True):
model_base.model = None
if self.low_memory:
self.save_child(model_base, verbose=False)
self.models = [model_base.name]
else:
self.models = [model_base]
self._add_child_times_to_bag(model=model_base)
def _fit_folds(self,
X,
y,
model_base,
k_fold=None,
k_fold_start=0,
k_fold_end=None,
n_repeats=1,
n_repeat_start=0,
time_limit=None,
sample_weight=None,
save_folds=True,
groups=None,
**kwargs):
fold_fitting_strategy = self.params.get('fold_fitting_strategy', SequentialLocalFoldFittingStrategy)
# TODO: Preprocess data here instead of repeatedly
# FIXME: Raise exception if multiclass/binary and a single val fold contains all instances of a class. (Can happen if custom groups is specified)
time_start = time.time()
if k_fold_start != 0:
cv_splitter = self._cv_splitters[n_repeat_start]
else:
cv_splitter = self._get_cv_splitter(n_splits=k_fold, n_repeats=n_repeats, groups=groups)
if k_fold != cv_splitter.n_splits:
k_fold = cv_splitter.n_splits
if k_fold_end is None:
k_fold_end = k_fold
kfolds = cv_splitter.split(X=X, y=y)
oof_pred_proba, oof_pred_model_repeats = self._construct_empty_oof(X=X, y=y)
models = []
fold_start = n_repeat_start * k_fold + k_fold_start
fold_end = (n_repeats - 1) * k_fold + k_fold_end
folds_to_fit = fold_end - fold_start
# noinspection PyCallingNonCallable
fold_fitting_strategy: AbstractFoldFittingStrategy = fold_fitting_strategy(
self, X, y, sample_weight, time_limit, time_start, models, oof_pred_proba, oof_pred_model_repeats, save_folds=save_folds)
for j in range(n_repeat_start, n_repeats): # For each n_repeat
if j != n_repeat_start or k_fold_start == 0:
self._cv_splitters.append(cv_splitter)
cur_repeat_count = j - n_repeat_start
fold_start_n_repeat = fold_start + cur_repeat_count * k_fold
fold_end_n_repeat = min(fold_start_n_repeat + k_fold, fold_end)
for i in range(fold_start_n_repeat, fold_end_n_repeat): # For each fold
fold_num_in_repeat = i - (j * k_fold) # The fold in the current repeat set (first fold in set = 0)
fold_ctx = dict(
model_name_suffix=f'S{j + 1}F{fold_num_in_repeat + 1}', # S5F3 = 3rd fold of the 5th repeat set
fold=kfolds[i],
is_last_fold=i != (fold_end - 1),
folds_to_fit=folds_to_fit,
folds_finished=i - fold_start,
folds_left=fold_end - i,
)
fold_fitting_strategy.schedule_fold_model_fit(model_base, fold_ctx, kwargs)
if (fold_end_n_repeat != fold_end) or (k_fold == k_fold_end):
self._k_per_n_repeat.append(k_fold)
fold_fitting_strategy.after_all_folds_scheduled()
self.models += models
self._bagged_mode = True
if self._oof_pred_proba is None:
self._oof_pred_proba = oof_pred_proba
self._oof_pred_model_repeats = oof_pred_model_repeats
else:
self._oof_pred_proba += oof_pred_proba
self._oof_pred_model_repeats += oof_pred_model_repeats
self._n_repeats = n_repeats
if k_fold == k_fold_end:
self._k = None
self._k_fold_end = 0
self._n_repeats_finished = self._n_repeats
else:
self._k = k_fold
self._k_fold_end = k_fold_end
self._n_repeats_finished = self._n_repeats - 1
# TODO: Augment to generate OOF after shuffling each column in X (Batching), this is the fastest way.
# TODO: Reduce logging clutter during OOF importance calculation (Currently logs separately for each child)
# Generates OOF predictions from pre-trained bagged models, assuming X and y are in the same row order as used in .fit(X, y)
def compute_feature_importance(self,
X,
y,
features=None,
silent=False,
time_limit=None,
is_oof=False,
**kwargs) -> pd.DataFrame:
if features is None:
# FIXME: use FULL features (children can have different features)
features = self.load_child(model=self.models[0]).features
if not is_oof:
return super().compute_feature_importance(X, y, features=features, time_limit=time_limit, silent=silent, **kwargs)
fi_fold_list = []
model_index = 0
num_children = len(self.models)
if time_limit is not None:
time_limit_per_child = time_limit / num_children
else:
time_limit_per_child = None
if not silent:
logging_message = f'Computing feature importance via permutation shuffling for {len(features)} features using out-of-fold (OOF) data aggregated across {num_children} child models...'
if time_limit is not None:
logging_message = f'{logging_message} Time limit: {time_limit}s...'
logger.log(20, logging_message)
time_start = time.time()
early_stop = False
children_completed = 0
log_final_suffix = ''
for n_repeat, k in enumerate(self._k_per_n_repeat):
if is_oof:
if self._child_oof or not self._bagged_mode:
raise AssertionError('Model trained with no validation data cannot get feature importances on training data, please specify new test data to compute feature importances (model=%s)' % self.name)
kfolds = self._cv_splitters[n_repeat].split(X=X, y=y)
cur_kfolds = kfolds[n_repeat * k:(n_repeat + 1) * k]
else:
cur_kfolds = [(None, list(range(len(X))))] * k
for i, fold in enumerate(cur_kfolds):
_, test_index = fold
model = self.load_child(self.models[model_index + i])
fi_fold = model.compute_feature_importance(X=X.iloc[test_index, :], y=y.iloc[test_index], features=features, time_limit=time_limit_per_child,
silent=silent, log_prefix='\t', importance_as_list=True, **kwargs)
fi_fold_list.append(fi_fold)
children_completed += 1
if time_limit is not None and children_completed != num_children:
time_now = time.time()
time_left = time_limit - (time_now - time_start)
time_child_average = (time_now - time_start) / children_completed
if time_left < (time_child_average * 1.1):
log_final_suffix = f' (Early stopping due to lack of time...)'
early_stop = True
break
if early_stop:
break
model_index += k
# TODO: DON'T THROW AWAY SAMPLES! USE LARGER N
fi_list_dict = dict()
for val in fi_fold_list:
val = val['importance'].to_dict() # TODO: Don't throw away stddev information of children
for key in val:
if key not in fi_list_dict:
fi_list_dict[key] = []
fi_list_dict[key] += val[key]
fi_df = _compute_fi_with_stddev(fi_list_dict)
if not silent:
logger.log(20, f'\t{round(time.time() - time_start, 2)}s\t= Actual runtime (Completed {children_completed} of {num_children} children){log_final_suffix}')
return fi_df
def load_child(self, model, verbose=False) -> AbstractModel:
if isinstance(model, str):
child_path = self.create_contexts(self.path + model + os.path.sep)
return self._child_type.load(path=child_path, verbose=verbose)
else:
return model
def save_child(self, model, verbose=False):
child = self.load_child(model)
child.set_contexts(self.path + child.name + os.path.sep)
child.save(verbose=verbose)
# TODO: Multiply epochs/n_iterations by some value (such as 1.1) to account for having more training data than bagged models
def convert_to_refit_full_template(self):
init_args = self._get_init_args()
init_args['hyperparameters']['save_bag_folds'] = True # refit full models must save folds
init_args['model_base'] = self.convert_to_refitfull_template_child()
init_args['name'] = init_args['name'] + REFIT_FULL_SUFFIX
model_full_template = self.__class__(**init_args)
return model_full_template
def convert_to_refitfull_template_child(self):
compressed_params = self._get_compressed_params()
child_compressed = copy.deepcopy(self._get_model_base())
child_compressed.feature_metadata = self.feature_metadata # TODO: Don't pass this here
child_compressed.params = compressed_params
return child_compressed
def _get_init_args(self):
init_args = dict(
model_base=self._get_model_base(),
random_state=self._random_state,
)
init_args.update(super()._get_init_args())
init_args.pop('problem_type')
return init_args
def _get_compressed_params(self, model_params_list=None):
if model_params_list is None:
model_params_list = [
self.load_child(child).get_trained_params()
for child in self.models
]
model_params_compressed = dict()
for param in model_params_list[0].keys():
model_param_vals = [model_params[param] for model_params in model_params_list]
if all(isinstance(val, bool) for val in model_param_vals):
counter = Counter(model_param_vals)
compressed_val = counter.most_common(1)[0][0]
elif all(isinstance(val, int) for val in model_param_vals):
compressed_val = round(mean(model_param_vals))
elif all(isinstance(val, float) for val in model_param_vals):
compressed_val = mean(model_param_vals)
else:
try:
counter = Counter(model_param_vals)
compressed_val = counter.most_common(1)[0][0]
except TypeError:
compressed_val = model_param_vals[0]
model_params_compressed[param] = compressed_val
return model_params_compressed
def _get_compressed_params_trained(self):
model_params_list = [
self.load_child(child).params_trained
for child in self.models
]
return self._get_compressed_params(model_params_list=model_params_list)
def _get_model_base(self):
if self.model_base is None:
return self.load_model_base()
else:
return self.model_base
def _add_child_times_to_bag(self, model):
if self.fit_time is None:
self.fit_time = model.fit_time
else:
self.fit_time += model.fit_time
if self.predict_time is None:
self.predict_time = model.predict_time
else:
self.predict_time += model.predict_time
@classmethod
def load(cls, path: str, reset_paths=True, low_memory=True, load_oof=False, verbose=True):
model = super().load(path=path, reset_paths=reset_paths, verbose=verbose)
if not low_memory:
model.persist_child_models(reset_paths=reset_paths)
if load_oof:
model._load_oof()
return model
@classmethod
def load_oof(cls, path, verbose=True):
try:
oof = load_pkl.load(path=path + 'utils' + os.path.sep + cls._oof_filename, verbose=verbose)
oof_pred_proba = oof['_oof_pred_proba']
oof_pred_model_repeats = oof['_oof_pred_model_repeats']
except FileNotFoundError:
model = cls.load(path=path, reset_paths=True, verbose=verbose)
model._load_oof()
oof_pred_proba = model._oof_pred_proba
oof_pred_model_repeats = model._oof_pred_model_repeats
return cls._oof_pred_proba_func(oof_pred_proba=oof_pred_proba, oof_pred_model_repeats=oof_pred_model_repeats)
def _load_oof(self):
if self._oof_pred_proba is not None:
pass
else:
oof = load_pkl.load(path=self.path + 'utils' + os.path.sep + self._oof_filename)
self._oof_pred_proba = oof['_oof_pred_proba']
self._oof_pred_model_repeats = oof['_oof_pred_model_repeats']
def persist_child_models(self, reset_paths=True):
for i, model_name in enumerate(self.models):
if isinstance(model_name, str):
child_path = self.create_contexts(self.path + model_name + os.path.sep)
child_model = self._child_type.load(path=child_path, reset_paths=reset_paths, verbose=True)
self.models[i] = child_model
def load_model_base(self):
return load_pkl.load(path=self.path + 'utils' + os.path.sep + 'model_template.pkl')
def save_model_base(self, model_base):
save_pkl.save(path=self.path + 'utils' + os.path.sep + 'model_template.pkl', object=model_base)
def save(self, path=None, verbose=True, save_oof=True, save_children=False) -> str:
if path is None:
path = self.path
if save_children:
model_names = []
for child in self.models:
child = self.load_child(child)
child.set_contexts(path + child.name + os.path.sep)
child.save(verbose=False)
model_names.append(child.name)
self.models = model_names
if save_oof and self._oof_pred_proba is not None:
save_pkl.save(path=path + 'utils' + os.path.sep + self._oof_filename, object={
'_oof_pred_proba': self._oof_pred_proba,
'_oof_pred_model_repeats': self._oof_pred_model_repeats,
})
self._oof_pred_proba = None
self._oof_pred_model_repeats = None
return super().save(path=path, verbose=verbose)
# If `remove_fit_stack=True`, variables | |
<gh_stars>0
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `neural_tangents/stax.py`."""
import functools
import itertools
import random as prandom
from typing import Tuple
from absl.testing import absltest
from absl.testing import parameterized
from jax import default_backend
from jax import jit
from jax import test_util as jtu
from jax.config import config
from jax.example_libraries import stax as ostax
import jax.numpy as np
import jax.random as random
import neural_tangents as nt
from neural_tangents import stax
from tests import test_utils
import numpy as onp
config.parse_flags_with_absl()
config.update('jax_numpy_rank_promotion', 'raise')
MODELS = [
'fc',
'conv'
]
BATCH_SIZE = 4
INPUT_SHAPE = (BATCH_SIZE, 8, 6, 2)
WIDTHS = [2**10]
N_SAMPLES = 100
RTOL = 0.041
ATOL = 0.1
FILTER_SHAPES = [
(2, 1),
(3, 2)
]
PADDINGS = [
'SAME',
'VALID',
'CIRCULAR'
]
STRIDES = [
(1, 2),
(2, 1),
]
ACTIVATIONS = {
stax.Relu(): 'Relu',
}
PROJECTIONS = [
'FLAT',
'POOL',
'ATTN',
]
LAYER_NORM = [
'C',
'HC',
'CHW',
'NC',
'NWC',
'NCHW'
]
POOL_TYPES = [
'SUM',
'AVG'
]
PARAMETERIZATIONS = [
'NTK',
'STANDARD'
]
test_utils.update_test_tolerance()
prandom.seed(1)
def _get_inputs(
key,
same_inputs,
shape,
fn=np.cos
) -> Tuple[np.ndarray, np.ndarray]:
key, split = random.split(key)
x1 = fn(random.normal(key, shape))
batch_axis = shape.index(BATCH_SIZE)
shape = shape[:batch_axis] + (2 * BATCH_SIZE,) + shape[batch_axis + 1:]
x2 = None if same_inputs else fn(random.normal(split, shape)) * 2
return x1, x2
def _get_net(W_std, b_std, filter_shape, is_conv, use_pooling, is_res, padding,
phi, strides, width, is_ntk, proj_into_2d, pool_type, layer_norm,
parameterization, use_dropout):
if is_conv:
# Select a random filter order.
default_filter_spec = 'HW'
filter_specs = [''.join(p) for p in itertools.permutations('HWIO')]
filter_spec = prandom.choice(filter_specs)
filter_shape = tuple(filter_shape[default_filter_spec.index(c)]
for c in filter_spec if c in default_filter_spec)
strides = tuple(strides[default_filter_spec.index(c)]
for c in filter_spec if c in default_filter_spec)
# Select the activation order.
default_spec = 'NHWC'
if default_backend() == 'tpu':
# Keep batch dimension leading for TPU for batching to work.
specs = ['N' + ''.join(p) for p in itertools.permutations('CHW')]
else:
specs = [''.join(p) for p in itertools.permutations('NCHW')]
spec = prandom.choice(specs)
input_shape = tuple(INPUT_SHAPE[default_spec.index(c)] for c in spec)
else:
input_shape = (INPUT_SHAPE[0], onp.prod(INPUT_SHAPE[1:]))
if default_backend() == 'tpu':
spec = 'NC'
else:
spec = prandom.choice(['NC', 'CN'])
if spec.index('N') == 1:
input_shape = input_shape[::-1]
filter_spec = None
dimension_numbers = (spec, filter_spec, spec)
batch_axis, channel_axis = spec.index('N'), spec.index('C')
spec_fc = ''.join(c for c in spec if c in ('N', 'C'))
batch_axis_fc, channel_axis_fc = spec_fc.index('N'), spec_fc.index('C')
if not is_conv:
batch_axis = batch_axis_fc
channel_axis = channel_axis_fc
if layer_norm:
layer_norm = tuple(spec.index(c) for c in layer_norm)
def fc(out_dim):
return stax.Dense(
out_dim=out_dim,
W_std=W_std,
b_std=b_std,
parameterization=parameterization,
batch_axis=batch_axis_fc,
channel_axis=channel_axis_fc
)
def conv(out_chan):
return stax.Conv(
out_chan=out_chan,
filter_shape=filter_shape,
strides=strides,
padding=padding,
W_std=W_std,
b_std=b_std,
dimension_numbers=dimension_numbers,
parameterization=parameterization
)
affine = conv(width) if is_conv else fc(width)
rate = onp.random.uniform(0.5, 0.9)
dropout = stax.Dropout(rate, mode='train')
if pool_type == 'AVG':
pool_fn = stax.AvgPool
global_pool_fn = stax.GlobalAvgPool
elif pool_type == 'SUM':
pool_fn = stax.SumPool
global_pool_fn = stax.GlobalSumPool
else:
raise ValueError(pool_type)
if use_pooling:
pool_or_identity = pool_fn((2, 3),
None,
'SAME' if padding == 'SAME' else 'CIRCULAR',
batch_axis=batch_axis,
channel_axis=channel_axis)
else:
pool_or_identity = stax.Identity()
dropout_or_identity = dropout if use_dropout else stax.Identity()
layer_norm_or_identity = (stax.Identity() if layer_norm is None else
stax.LayerNorm(axis=layer_norm,
batch_axis=batch_axis,
channel_axis=channel_axis))
res_unit = stax.serial(dropout_or_identity, affine, pool_or_identity)
if is_res:
block = stax.serial(
affine,
stax.FanOut(2),
stax.parallel(stax.Identity(),
res_unit),
stax.FanInSum(),
layer_norm_or_identity,
phi)
else:
block = stax.serial(
affine,
res_unit,
layer_norm_or_identity,
phi)
if proj_into_2d == 'FLAT':
proj_layer = stax.Flatten(batch_axis, batch_axis_fc)
elif proj_into_2d == 'POOL':
proj_layer = global_pool_fn(batch_axis, channel_axis)
elif proj_into_2d.startswith('ATTN'):
n_heads = int(np.sqrt(width))
n_chan_val = int(np.round(float(width) / n_heads))
proj_layer = stax.serial(
stax.GlobalSelfAttention(
n_chan_out=width,
n_chan_key=width,
n_chan_val=n_chan_val,
n_heads=n_heads,
linear_scaling=True,
W_key_std=W_std,
W_value_std=W_std,
W_query_std=W_std,
W_out_std=1.0,
b_std=b_std,
batch_axis=batch_axis,
channel_axis=channel_axis),
stax.Flatten(batch_axis, batch_axis_fc))
else:
raise ValueError(proj_into_2d)
readout = stax.serial(proj_layer, fc(1 if is_ntk else width))
device_count = -1 if spec.index('N') == 0 else 0
return stax.serial(block, readout), input_shape, device_count, channel_axis_fc
def _get_net_pool(width, is_ntk, pool_type, padding,
filter_shape, strides, normalize_edges):
W_std, b_std = 2.**0.5, 0.5**0.5
phi = stax.Relu()
parameterization = 'ntk'
fc = functools.partial(
stax.Dense,
W_std=W_std / width if pool_type == 'SUM' else W_std,
b_std=b_std,
parameterization=parameterization)
conv = functools.partial(
stax.Conv,
filter_shape=filter_shape,
strides=None,
padding='SAME',
W_std=W_std / onp.prod(filter_shape) if pool_type == 'SUM' else W_std,
b_std=b_std,
parameterization=parameterization)
if pool_type == 'AVG':
pool_fn = functools.partial(stax.AvgPool, normalize_edges=normalize_edges)
global_pool_fn = stax.GlobalAvgPool
elif pool_type == 'SUM':
pool_fn = stax.SumPool
global_pool_fn = stax.GlobalSumPool
else:
raise ValueError(pool_type)
pool = pool_fn(filter_shape, strides, padding)
device_count = -1
return stax.serial(
conv(width),
phi,
pool,
conv(width),
phi,
global_pool_fn(),
fc(1 if is_ntk else width)
), INPUT_SHAPE, device_count, -1
class StaxTest(test_utils.NeuralTangentsTestCase):
def _skip_test(self, filter_shape, is_conv, is_res, padding, proj_into_2d,
strides, use_pooling):
if is_conv:
test_utils.skip_test(self)
if (is_res and is_conv and ((strides is not None and strides != (1, 1)) or
(padding == 'VALID' and filter_shape !=
(1, 1)))):
raise absltest.SkipTest('Different paths in a residual models need to '
'return outputs of the same shape.')
elif (filter_shape != FILTER_SHAPES[0] or padding != PADDINGS[0] or
strides != STRIDES[0] or proj_into_2d != PROJECTIONS[0] or
use_pooling):
raise absltest.SkipTest('FC models do not have these parameters.')
@parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name':
'_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(
model, phi_name, width, 'same_inputs'
if same_inputs else 'different_inputs', 'filter_shape=%s' %
str(filter_shape), 'padding=%s' % padding, 'strides=%s' %
str(strides), 'pool' if use_pooling else 'flatten',
'NTK' if is_ntk else 'NNGP', 'RESNET' if is_res else 'serial',
proj_into_2d),
'model':
model,
'width':
width,
'strides':
strides,
'padding':
padding,
'phi':
phi,
'same_inputs':
same_inputs,
'filter_shape':
filter_shape,
'use_pooling':
use_pooling,
'is_ntk':
is_ntk,
'is_res':
is_res,
'proj_into_2d':
proj_into_2d
}
for model in MODELS
for width in WIDTHS
for phi, phi_name in ACTIVATIONS.items()
for same_inputs in [False]
for padding in PADDINGS for strides in STRIDES
for filter_shape in FILTER_SHAPES
for use_pooling in [False, True]
for is_ntk in [False, True]
for is_res in [False, True]
for proj_into_2d in PROJECTIONS))
def test_exact(self, model, width, strides, padding, phi, same_inputs,
filter_shape, use_pooling, is_ntk, is_res, proj_into_2d):
is_conv = 'conv' in model
# Check for duplicate / incorrectly-shaped NN configs / wrong backend.
self._skip_test(filter_shape, is_conv, is_res, padding, proj_into_2d,
strides, use_pooling)
pool_type = 'AVG'
W_std, b_std = 2.**0.5, 0.5**0.5
layer_norm = None
parameterization = 'ntk'
use_dropout = False
net = _get_net(W_std, b_std, filter_shape, is_conv, use_pooling, is_res,
padding, phi, strides, width, is_ntk, proj_into_2d,
pool_type, layer_norm, parameterization, use_dropout)
self._check_agreement_with_empirical(
net, same_inputs, use_dropout, is_ntk, RTOL, 1.05)
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name':
f'_model={model}'
f'_width={width}'
f'_same_inputs={same_inputs}'
f'_filter_shape={filter_shape}'
f'_proj={proj_into_2d}_'
f'_is_ntk={is_ntk}_'
f'_b_std={b_std}_'
f'_param={parameterization}',
'model':
model,
'width':
width,
'same_inputs':
same_inputs,
'filter_shape':
filter_shape,
'proj_into_2d':
proj_into_2d,
'is_ntk':
is_ntk,
'b_std':
b_std,
'parameterization':
parameterization
}
for model in MODELS
for width in WIDTHS
for same_inputs in [False]
for is_ntk in [False, True]
for filter_shape in FILTER_SHAPES
for proj_into_2d in PROJECTIONS[:2]
for b_std in [None, 0., 0.5**0.5]
for parameterization in PARAMETERIZATIONS))
def test_parameterizations(
self,
model,
width,
same_inputs,
is_ntk,
filter_shape,
proj_into_2d,
b_std,
parameterization
):
is_conv = 'conv' in model
W_std = 2.**0.5
if parameterization == 'STANDARD':
W_std /= width**0.5
if b_std is not None:
b_std /= width**0.5
padding = PADDINGS[0]
strides = STRIDES[0]
phi = stax.Relu()
use_pooling, is_res = False, False
layer_norm = None
pool_type = 'AVG'
use_dropout = False
# Check for duplicate / incorrectly-shaped NN configs / wrong backend.
if is_conv:
test_utils.skip_test(self)
elif proj_into_2d != PROJECTIONS[0] or filter_shape != FILTER_SHAPES[0]:
raise absltest.SkipTest('FC models do not have these parameters.')
net = _get_net(W_std=W_std,
b_std=b_std,
filter_shape=filter_shape,
is_conv=is_conv,
use_pooling=use_pooling,
is_res=is_res,
padding=padding,
phi=phi,
strides=strides,
width=width,
is_ntk=is_ntk,
proj_into_2d=proj_into_2d,
pool_type=pool_type,
layer_norm=layer_norm,
parameterization=parameterization,
use_dropout=use_dropout)
self._check_agreement_with_empirical(net=net,
same_inputs=same_inputs,
use_dropout=use_dropout,
is_ntk=is_ntk,
rtol=0.021,
atol=0.2)
@parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name':
'_{}_{}_{}_{}_{}_{}'.format(
model,
width,
'same_inputs' if same_inputs else 'different_inputs',
'NTK' if is_ntk else 'NNGP',
proj_into_2d,
'layer_norm=%s' % str(layer_norm)),
'model':
model,
'width':
width,
'same_inputs':
same_inputs,
'is_ntk':
is_ntk,
'proj_into_2d':
proj_into_2d,
'layer_norm':
layer_norm
}
for model in MODELS
for width in WIDTHS
for same_inputs in [False]
for is_ntk in [False, True]
for proj_into_2d in PROJECTIONS[:2]
for layer_norm in LAYER_NORM))
def test_layernorm(self,
model,
width,
same_inputs,
is_ntk,
proj_into_2d,
layer_norm):
is_conv = 'conv' in model
# Check for duplicate / incorrectly-shaped NN configs / wrong backend.
if is_conv:
test_utils.skip_test(self)
elif proj_into_2d != PROJECTIONS[0] or layer_norm not in ('C', 'NC'):
raise absltest.SkipTest('FC models do not have these parameters.')
W_std, b_std = 2.**0.5, 0.5**0.5
filter_shape = FILTER_SHAPES[0]
padding = PADDINGS[0]
strides = STRIDES[0]
phi = stax.Relu()
use_pooling, | |
code was not 201')
self.assertEqual(response_data['job_type'], input_job_type, 'job_type was not set properly')
self.assertEqual(response_data['artifact_id'], input_artifact_id, 'artifact_id was not set properly')
self.assertRegex(response_data['id'],
r'[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}\Z')
self.assertIsNone(response_data['ssh_containers'], 'ssh_containers not null')
self.assertIsNotNone(response_data['created'], 'job creation date/time was not set properly')
self.assertItemsEqual(response_data.keys(),
['created', 'job_type', 'artifact_id', 'build_env_size', 'id', 'enable_debug',
'public_key_id', 'kubernetes_job', 'kubernetes_service', 'kubernetes_configmap',
'ssh_containers', 'status', 'image_root_archive_name', 'initrd_file_name',
'kernel_file_name', 'resultant_image_id', 'kubernetes_namespace',
'kernel_parameters_file_name'],
'returned keys not the same')
@mock.patch("src.server.v2.resources.jobs.open", new_callable=mock.mock_open,
read_data='{"metadata":{"name":"foo"}}')
@mock.patch("src.server.app.app.s3.generate_presigned_url")
def test_post_enable_debug_true(self, s3_mock, mock_open, utils_mock, config_mock, client_mock):
input_job_type = "create"
input_artifact_id = self.test_recipe_id
public_key_id = self.test_public_key_id
debug_ssh_container_name = 'debug'
debug_ssh_container_jail = False
input_data = {
'job_type': input_job_type,
'artifact_id': input_artifact_id,
'public_key_id': public_key_id,
'enable_debug': True,
'image_root_archive_name': self.getUniqueString(),
'kernel_file_name': self.getUniqueString(),
'initrd_file_name': self.getUniqueString(),
}
s3url = S3Url(self.recipe_data['link']['path'])
expected_params = {'Bucket': s3url.bucket, 'Key': s3url.key}
self.stubber.add_response('head_object', {"ETag": self.recipe_data['link']["etag"]}, expected_params)
s3_mock.return_value = "http://localhost/path/to/file_abc.tgz"
self.stubber.activate()
response = self.app.post('/jobs', content_type='application/json', data=json.dumps(input_data))
self.stubber.deactivate()
response_data = json.loads(response.data)
self.assertEqual(response.status_code, 201, 'status code was not 201')
self.assertEqual(response_data['job_type'], input_job_type, 'job_type was not set properly')
self.assertEqual(response_data['artifact_id'], input_artifact_id, 'artifact_id was not set properly')
self.assertRegex(response_data['id'],
r'[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}\Z')
self.assertIsNotNone(response_data['ssh_containers'], 'ssh_containers not null')
external_host_name = "{}.ims.cmn.shasta.local".format(response_data['id'])
self.assertEqual(response_data['ssh_containers'][0]['connection_info']['customer_access']['host'],
external_host_name, 'SSH Container host value did not match expected value')
self.assertEqual(response_data['ssh_containers'][0]['connection_info']['customer_access']['port'], 22,
'SSH Container host value did not match expected value')
cluster_local_host_name = \
"{r[kubernetes_service]}.{r[kubernetes_namespace]}.svc.cluster.local".format(r=response_data)
self.assertEqual(response_data['ssh_containers'][0]['connection_info']['cluster.local']['host'],
cluster_local_host_name, 'SSH Container host value did not match expected value')
self.assertEqual(response_data['ssh_containers'][0]['connection_info']['cluster.local']['port'], 22,
'SSH Container host value did not match expected value')
self.assertEqual(response_data['ssh_containers'][0]['name'], debug_ssh_container_name,
'SSH Container name value did not match')
self.assertEqual(response_data['ssh_containers'][0]['jail'], debug_ssh_container_jail,
'SSH Container jail value did not match')
self.assertIsNotNone(response_data['created'], 'job creation date/time was not set properly')
self.assertItemsEqual(response_data.keys(),
['created', 'job_type', 'artifact_id', 'build_env_size', 'id', 'enable_debug',
'public_key_id', 'kubernetes_job', 'kubernetes_service', 'kubernetes_configmap',
'ssh_containers', 'status', 'image_root_archive_name', 'initrd_file_name',
'kernel_file_name', 'resultant_image_id', 'kubernetes_namespace',
'kernel_parameters_file_name'],
'returned keys not the same')
@mock.patch("src.server.v2.resources.jobs.open", new_callable=mock.mock_open,
read_data='{"metadata":{"name":"foo"}}')
@mock.patch("src.server.app.app.s3.generate_presigned_url")
def test_post_ims_job_namespace(self, s3_mock, mock_open, utils_mock, config_mock, client_mock):
""" Test happy path POST """
input_job_type = "create"
input_artifact_id = self.test_recipe_id
public_key_id = self.test_public_key_id
job_namespace = self.getUniqueString()
input_data = {
'job_type': input_job_type,
'artifact_id': input_artifact_id,
'public_key_id': public_key_id,
'enable_debug': False,
'image_root_archive_name': self.getUniqueString(),
'kernel_file_name': self.getUniqueString(),
'initrd_file_name': self.getUniqueString(),
}
s3url = S3Url(self.recipe_data['link']['path'])
expected_params = {'Bucket': s3url.bucket, 'Key': s3url.key}
self.stubber.add_response('head_object', {"ETag": self.recipe_data['link']["etag"]}, expected_params)
s3_mock.return_value = "http://localhost/path/to/file_abc.tgz"
with mock.patch.dict('os.environ', {'DEFAULT_IMS_JOB_NAMESPACE': job_namespace}):
self.stubber.activate()
response = self.app.post('/jobs', content_type='application/json', data=json.dumps(input_data))
self.stubber.deactivate()
response_data = json.loads(response.data)
self.assertEqual(response.status_code, 201, 'status code was not 201')
self.assertEqual(response_data['job_type'], input_job_type, 'job_type was not set properly')
self.assertEqual(response_data['artifact_id'], input_artifact_id, 'artifact_id was not set properly')
self.assertRegex(response_data['id'],
r'[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}\Z')
self.assertIsNone(response_data['ssh_containers'], 'ssh_containers not null')
self.assertIsNotNone(response_data['created'], 'job creation date/time was not set properly')
self.assertEqual(response_data['kubernetes_namespace'], job_namespace,
"kubernetes_namespace was not set properly")
self.assertItemsEqual(response_data.keys(),
['created', 'job_type', 'artifact_id', 'build_env_size', 'id', 'enable_debug',
'public_key_id', 'kubernetes_job', 'kubernetes_service', 'kubernetes_configmap',
'ssh_containers', 'status', 'image_root_archive_name', 'initrd_file_name',
'kernel_file_name', 'resultant_image_id', 'kubernetes_namespace',
'kernel_parameters_file_name'],
'returned keys not the same')
def test_post_create_with_ssh_container(self, utils_mock, config_mock, client_mock):
""" Test create with ssh_container """
input_job_type = "create"
input_artifact_id = self.test_recipe_id
public_key_id = self.test_public_key_id
input_data = {
'job_type': input_job_type,
'artifact_id': input_artifact_id,
'public_key_id': public_key_id,
'image_root_archive_name': self.getUniqueString(),
'kernel_file_name': self.getUniqueString(),
'initrd_file_name': self.getUniqueString(),
'ssh_containers': [
{'name': 'post-build', 'jail': False}
]
}
response = self.app.post('/jobs', content_type='application/json', data=json.dumps(input_data))
self.assertEqual(response.status_code, 400, 'status code was not 400')
@responses.activate
@mock.patch("src.server.v2.resources.jobs.open", new_callable=mock.mock_open,
read_data='{"metadata":{"name":"foo"}}')
@mock.patch("src.server.app.app.s3.generate_presigned_url")
def test_post_customize_with_out_ssh_container(self, s3_mock, mock_open, utils_mock, config_mock, client_mock):
""" Test happy path POST without a ssh_container """
input_job_type = "customize"
input_artifact_id = self.test_image_id
public_key_id = self.test_public_key_id
default_ssh_container_name = "customize"
default_ssh_container_jail = False
input_data = {
'job_type': input_job_type,
'artifact_id': input_artifact_id,
'public_key_id': public_key_id,
'image_root_archive_name': self.getUniqueString(),
'kernel_file_name': self.getUniqueString(),
'initrd_file_name': self.getUniqueString(),
}
manifest_s3_info = S3Url(self.image_data["link"]["path"])
manifest_expected_params = {'Bucket': manifest_s3_info.bucket, 'Key': manifest_s3_info.key}
self.stubber.add_response(
'head_object',
{"ETag": self.image_data["link"]["etag"]},
manifest_expected_params
)
s3_manifest_json = json.dumps(self.s3_manifest_data).encode()
self.stubber.add_response(
'get_object',
{
'Body': StreamingBody(io.BytesIO(s3_manifest_json), len(s3_manifest_json)),
'ContentLength': len(s3_manifest_json)
},
manifest_expected_params
)
rootfs_manifest_info = [artifact for artifact in self.s3_manifest_data["artifacts"]
if artifact["type"].startswith(self.manifest_rootfs_mime_type)]
self.assertEqual(len(rootfs_manifest_info), 1)
rootfs_s3_info = S3Url(rootfs_manifest_info[0]["link"]["path"])
self.stubber.add_response(
'head_object',
{"ETag": rootfs_manifest_info[0]["link"]["etag"]},
{'Bucket': rootfs_s3_info.bucket, 'Key': rootfs_s3_info.key}
)
s3_mock.return_value = "http://localhost/path/to/file_abc.tgz"
self.stubber.activate()
response = self.app.post('/jobs', content_type='application/json', data=json.dumps(input_data))
self.stubber.deactivate()
response_data = json.loads(response.data)
self.assertEqual(response.status_code, 201, 'status code was not 201')
self.assertEqual(response_data['job_type'], input_job_type, 'job_type was not set properly')
self.assertEqual(response_data['artifact_id'], input_artifact_id, 'artifact_id was not set properly')
self.assertIsNotNone(response_data['ssh_containers'], 'ssh_containers not null')
self.assertEqual(response_data['ssh_containers'][0]['name'], default_ssh_container_name,
'SSH Container name value did not match')
self.assertEqual(response_data['ssh_containers'][0]['jail'], default_ssh_container_jail,
'SSH Container jail value did not match')
self.assertRegex(response_data['id'],
r'[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}\Z')
self.assertIsNotNone(response_data['created'], 'job creation date/time was not set properly')
self.assertItemsEqual(response_data.keys(),
['created', 'job_type', 'artifact_id', 'build_env_size', 'id', 'enable_debug',
'public_key_id', 'kubernetes_job', 'kubernetes_service', 'kubernetes_configmap',
'ssh_containers', 'status', 'image_root_archive_name', 'initrd_file_name',
'kernel_file_name', 'resultant_image_id', 'kubernetes_namespace',
'kernel_parameters_file_name'],
'returned keys not the same')
@responses.activate
@mock.patch("src.server.v2.resources.jobs.open", new_callable=mock.mock_open,
read_data='{"metadata":{"name":"foo"}}')
@mock.patch("src.server.app.app.s3.generate_presigned_url")
def test_post_customize_with_ssh_container(self, s3_mock, mock_open, utils_mock, config_mock, client_mock):
""" Test happy path POST with one ssh_container """
input_job_type = "customize"
input_artifact_id = self.test_image_id
public_key_id = self.test_public_key_id
ssh_container_name = "my-ssh-jail"
ssh_container_jail = True
input_data = {
'job_type': input_job_type,
'artifact_id': input_artifact_id,
'public_key_id': public_key_id,
'image_root_archive_name': self.getUniqueString(),
'kernel_file_name': self.getUniqueString(),
'initrd_file_name': self.getUniqueString(),
'ssh_containers': [
{'name': ssh_container_name, 'jail': ssh_container_jail}
]
}
s3_manifest_json = json.dumps(self.s3_manifest_data).encode()
raw_stream = StreamingBody(
io.BytesIO(s3_manifest_json),
len(s3_manifest_json)
)
manifest_s3_info = S3Url(self.image_data["link"]["path"])
manifest_expected_params = {'Bucket': manifest_s3_info.bucket, 'Key': manifest_s3_info.key}
self.stubber.add_response(
'head_object',
{"ETag": self.image_data["link"]["etag"]},
manifest_expected_params
)
s3_manifest_json = json.dumps(self.s3_manifest_data).encode()
self.stubber.add_response(
'get_object',
{
'Body': StreamingBody(io.BytesIO(s3_manifest_json), len(s3_manifest_json)),
'ContentLength': len(s3_manifest_json)
},
manifest_expected_params
)
rootfs_manifest_info = [artifact for artifact in self.s3_manifest_data["artifacts"]
if artifact["type"].startswith(self.manifest_rootfs_mime_type)]
self.assertEqual(len(rootfs_manifest_info), 1)
rootfs_s3_info = S3Url(rootfs_manifest_info[0]["link"]["path"])
self.stubber.add_response(
'head_object',
{
"ETag": rootfs_manifest_info[0]["link"]["etag"],
"Metadata": {
"md5sum": rootfs_manifest_info[0]["md5"]
}
},
{'Bucket': rootfs_s3_info.bucket, 'Key': rootfs_s3_info.key}
)
s3_mock.return_value = "http://localhost/path/to/file_abc.tgz"
self.stubber.activate()
response = self.app.post('/jobs', content_type='application/json', data=json.dumps(input_data))
self.stubber.deactivate()
response_data = json.loads(response.data)
self.assertEqual(response.status_code, 201, 'status code was not 201')
self.assertEqual(response_data['job_type'], input_job_type, 'job_type was not set properly')
self.assertEqual(response_data['artifact_id'], input_artifact_id, 'artifact_id was not set properly')
self.assertEqual(response_data['ssh_containers'][0]['name'], ssh_container_name,
'SSH Container name value did not match')
self.assertEqual(response_data['ssh_containers'][0]['jail'], ssh_container_jail,
'SSH Container jail value did not match')
external_host_name = "{}.ims.cmn.shasta.local".format(response_data['id'])
self.assertEqual(response_data['ssh_containers'][0]['connection_info']['customer_access']['host'],
external_host_name, 'SSH Container host value did not match expected value')
self.assertEqual(response_data['ssh_containers'][0]['connection_info']['customer_access']['port'], 22,
'SSH Container host value did not match expected value')
cluster_local_host_name = \
"{r[kubernetes_service]}.{r[kubernetes_namespace]}.svc.cluster.local".format(r=response_data)
self.assertEqual(response_data['ssh_containers'][0]['connection_info']['cluster.local']['host'],
cluster_local_host_name, 'SSH Container host value did not match expected value')
self.assertEqual(response_data['ssh_containers'][0]['connection_info']['cluster.local']['port'], 22,
'SSH Container host value did not match expected value')
self.assertRegex(response_data['id'],
r'[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}\Z')
self.assertIsNotNone(response_data['created'], 'job creation date/time was not set properly')
self.assertItemsEqual(response_data.keys(),
['created', 'job_type', 'artifact_id', 'build_env_size', 'id', 'enable_debug',
'public_key_id', 'kubernetes_job', 'kubernetes_service', 'kubernetes_configmap',
'ssh_containers', 'status', 'image_root_archive_name', 'initrd_file_name',
'kernel_file_name', 'resultant_image_id', 'kubernetes_namespace',
'kernel_parameters_file_name'],
'returned keys not the same')
def test_post_create_with_multiple_ssh_containers(self, utils_mock, config_mock, client_mock):
""" Post Job Create with multiple ssh_containers requested """
input_job_type = "create"
input_artifact_id = self.test_recipe_id
public_key_id = self.test_public_key_id
input_data = {
'job_type': input_job_type,
'artifact_id': input_artifact_id,
'public_key_id': public_key_id,
'image_root_archive_name': self.getUniqueString(),
'kernel_file_name': self.getUniqueString(),
'initrd_file_name': self.getUniqueString(),
'ssh_containers': [
{'name': 'pre-cfs', 'jail': False},
{'name': 'cfs', 'jail': True},
{'name': 'post-cfs', 'jail': False},
]
}
response = self.app.post('/jobs', content_type='application/json', data=json.dumps(input_data))
response_data = json.loads(response.data)
self.assertEqual(response.status_code, 400, 'status code was not 400')
def test_post_400_no_input(self, utils_mock, config_mock, client_mock):
""" Test a POST request with no input provided by the client """
response = self.app.post(self.test_uri, content_type='application/json', data=json.dumps({}))
check_error_responses(self, response, 400, ['status', 'title', 'detail'])
def test_post_422_missing_inputs(self, utils_mock, config_mock, client_mock):
""" Test a POST request with missing data provided by the client """
input_data = {'job_type': self.getUniqueString()}
response = self.app.post(self.test_uri, content_type='application/json', data=json.dumps(input_data))
check_error_responses(self, response, 422, ['status', 'title', 'detail', 'errors'])
input_data = {'artifact_id': str(uuid.uuid4())}
response = self.app.post(self.test_uri, content_type='application/json', data=json.dumps(input_data))
check_error_responses(self, response, 422, ['status', 'title', 'detail', 'errors'])
def test_post_422_improper_type_inputs(self, utils_mock, config_mock, client_mock):
""" Test a POST request with invalid data provided by the client (bad types) """
input_data = {'job_type': self.getUniqueInteger(), 'artifact_id': str(uuid.uuid4())}
response = self.app.post(self.test_uri, content_type='application/json', data=json.dumps(input_data))
check_error_responses(self, response, 422, ['status', 'title', 'detail', 'errors'])
input_data = {'job_type': self.getUniqueString(), 'artifact_id': self.getUniqueInteger()}
response = self.app.post(self.test_uri, content_type='application/json', data=json.dumps(input_data))
check_error_responses(self, response, 422, ['status', 'title', 'detail', 'errors'])
def test_post_422_unknown_field(self, utils_mock, config_mock, client_mock):
""" Test a POST request with a field that is not valid for the request """
input_job_type = self.getUniqueString()
input_artifact_id = str(uuid.uuid4())
input_data = {
'job_type': input_job_type,
'artifact_id': input_artifact_id,
'invalid_field': str(uuid.uuid4()) # invalid
}
response = self.app.post(self.test_uri, content_type='application/json', data=json.dumps(input_data))
check_error_responses(self, response, 422, ['status', 'title', 'detail', 'errors'])
@mock.patch("src.server.v2.resources.jobs.open", new_callable=mock.mock_open,
read_data='{"metadata":{"name":"foo"}}')
def test_post_422_missing_image_root_archive_name(self, mock_open, utils_mock, config_mock, client_mock):
""" Test case where image_root_archive_name is missing """
input_job_type = "create"
input_artifact_id = self.test_recipe_id
public_key_id = self.test_public_key_id
input_data = {
'job_type': input_job_type,
'artifact_id': input_artifact_id,
'public_key_id': public_key_id,
'enable_debug': False,
# 'image_root_archive_name': self.getUniqueString(),
'kernel_file_name': self.getUniqueString(),
'initrd_file_name': self.getUniqueString(),
}
response = self.app.post(self.test_uri, content_type='application/json', data=json.dumps(input_data))
check_error_responses(self, response, 422, ['status', 'title', 'detail', 'errors'])
self.assertIn("image_root_archive_name", response.json["errors"],
"Expected image_root_archive_name to be listed in error detail")
@mock.patch("src.server.v2.resources.jobs.open", new_callable=mock.mock_open,
read_data='{"metadata":{"name":"foo"}}')
def test_post_422_image_root_archive_name_is_blank(self, mock_open, utils_mock, config_mock, client_mock):
""" Test case where image_root_archive_name is blank """
input_job_type = "create"
input_artifact_id = self.test_recipe_id
public_key_id = self.test_public_key_id
input_data = {
'job_type': input_job_type,
'artifact_id': input_artifact_id,
'public_key_id': public_key_id,
'enable_debug': False,
'image_root_archive_name': "",
'kernel_file_name': self.getUniqueString(),
'initrd_file_name': self.getUniqueString(),
}
response = self.app.post(self.test_uri, content_type='application/json', | |
"""Mutation.
Usage:
mutation play [--verbose] [--exclude=<globs>] [--only-deadcode-detection] [--include=<globs>] [--sampling=<s>] [--randomly-seed=<n>] [--max-workers=<n>] [<file-or-directory> ...] [-- TEST-COMMAND ...]
mutation replay [--verbose] [--max-workers=<n>]
mutation list
mutation show MUTATION
mutation apply MUTATION
mutation (-h | --help)
mutation --version
Options:
--verbose Show more information.
-h --help Show this screen.
--version Show version.
"""
import asyncio
import fnmatch
import functools
import itertools
import os
import random
import re
import shlex
import sys
import time
from ast import Constant
from concurrent import futures
from contextlib import contextmanager
from copy import deepcopy
from datetime import timedelta
from difflib import unified_diff
from uuid import UUID
import lexode
import parso
import pygments
import pygments.formatters
import pygments.lexers
import zstandard as zstd
from aiostream import pipe, stream
from astunparse import unparse
from coverage import Coverage
from docopt import docopt
from humanize import precisedelta
from loguru import logger as log
from lsm import LSM
from pathlib3x import Path
from termcolor import colored
from tqdm import tqdm
from ulid import ULID
__version__ = (0, 4, 4)
MINUTE = 60 # seconds
HOUR = 60 * MINUTE
DAY = 24 * HOUR
MONTH = 31 * DAY
def humanize(seconds):
if seconds < 1:
precision = "seconds"
elif seconds // DAY != 0:
precision = "days"
elif seconds // DAY != 0:
precision = "hours"
elif seconds // HOUR != 0:
precision = "minutes"
else:
precision = "seconds"
return precisedelta(timedelta(seconds=seconds), minimum_unit=precision)
PRONOTION = "https://youtu.be/ihZEaj9ml4w?list=PLOSNaPJYYhrtliZqyEWDWL0oqeH0hOHnj"
log.remove()
if os.environ.get("DEBUG", False):
log.add(
sys.stdout,
format="<level>{level}</level> {message}",
level="TRACE",
colorize=True,
enqueue=True,
)
else:
log.add(
sys.stdout,
format="<level>{level}</level> {message}",
level="INFO",
colorize=True,
enqueue=True,
)
# The function patch was taken somewhere over the rainbow...
_hdr_pat = re.compile(r"^@@ -(\d+),?(\d+)? \+(\d+),?(\d+)? @@$")
def patch(diff, source):
"""Apply unified diff patch to string s to recover newer string. If
revert is True, treat s as the newer string, recover older string.
"""
s = source.splitlines(True)
p = diff.splitlines(True)
t = ""
i = sl = 0
(midx, sign) = (1, "+")
while i < len(p) and p[i].startswith(("---", "+++")):
i += 1 # skip header lines
while i < len(p):
m = _hdr_pat.match(p[i])
if not m:
raise Exception("Cannot process diff")
i += 1
l = int(m.group(midx)) - 1 + (m.group(midx + 1) == "0")
t += "".join(s[sl:l])
sl = l
while i < len(p) and p[i][0] != "@":
if i + 1 < len(p) and p[i + 1][0] == "\\":
line = p[i][:-1]
i += 2
else:
line = p[i]
i += 1
if len(line) > 0:
if line[0] == sign or line[0] == " ":
t += line[1:]
sl += line[0] != sign
t += "\n" + "".join(s[sl:])
return t
def glob2predicate(patterns):
def regex_join(regexes):
"""Combine a list of regexes into one that matches any of them."""
return "|".join("(?:%s)" % r for r in regexes)
regexes = (fnmatch.translate(pattern) for pattern in patterns)
regex = re.compile(regex_join(regexes))
def predicate(path):
return regex.match(path) is not None
return predicate
def node_iter(node, level=1):
yield node
for child in node.children:
if not getattr(child, "children", False):
yield child
continue
yield from node_iter(child, level + 1)
def node_copy_tree(node, index):
root = node.get_root_node()
root = deepcopy(root)
iterator = itertools.dropwhile(
lambda x: x[0] != index, zip(itertools.count(0), node_iter(root))
)
index, node = next(iterator)
return root, node
@contextmanager
def timeit():
start = time.perf_counter()
yield lambda: time.perf_counter() - start
class Mutation(type):
ALL = set()
DEADCODE = set()
deadcode_detection = False
def __init__(cls, *args, **kwargs):
super().__init__(*args, **kwargs)
obj = cls()
type(cls).ALL.add(obj)
if cls.deadcode_detection:
type(cls).DEADCODE.add(obj)
class StatementDrop(metaclass=Mutation):
deadcode_detection = True
NEWLINE = "a = 42\n"
def predicate(self, node):
return "stmt" in node.type and node.type != "expr_stmt"
def mutate(self, node, index):
root, new = node_copy_tree(node, index)
index = new.parent.children.index(new)
passi = parso.parse("pass").children[0]
passi.prefix = new.get_first_leaf().prefix
new.parent.children[index] = passi
newline = parso.parse(type(self).NEWLINE).children[0].children[1]
new.parent.children.insert(index + 1, newline)
yield root, new
class DefinitionDrop(metaclass=Mutation):
deadcode_detection = True
def predicate(self, node):
# There is also node.type = 'lambdadef' but lambadef are
# always part of a assignation statement. So, that case is
# handled in StatementDrop.
return node.type in ("classdef", "funcdef")
def mutate(self, node, index):
root, new = node_copy_tree(node, index)
new.parent.children.remove(new)
yield root, new
def chunks(iterable, n):
"""Yield successive n-sized chunks from iterable."""
it = iter(iterable)
while chunk := tuple(itertools.islice(it, n)):
yield chunk
class MutateNumber(metaclass=Mutation):
COUNT = 5
def predicate(self, node):
return node.type == "number"
def mutate(self, node, index):
value = eval(node.value)
if isinstance(value, int):
def randomize(x):
return random.randint(0, x)
else:
def randomize(x):
return random.random() * x
for size in range(8, 32):
if value < 2 ** size:
break
count = 0
while count != self.COUNT:
count += 1
root, new = node_copy_tree(node, index)
new.value = str(randomize(2 ** size))
if new.value == node.value:
continue
yield root, new
class MutateString(metaclass=Mutation):
def predicate(self, node):
# str or bytes.
return node.type == "string"
def mutate(self, node, index):
root, new = node_copy_tree(node, index)
value = eval(new.value)
if isinstance(value, bytes):
value = b"coffeebad" + value
else:
value = "mutated string " + value
value = Constant(value=value, kind="")
value = unparse(value).strip()
new.value = value
yield root, new
class MutateKeyword(metaclass=Mutation):
KEYWORDS = set(["continue", "break", "pass"])
SINGLETON = set(["True", "False", "None"])
# Support xor operator ^
BOOLEAN = set(["and", "or"])
TARGETS = KEYWORDS | SINGLETON | BOOLEAN
def predicate(self, node):
return node.type == "keyword" and node.value in type(self).TARGETS
def mutate(self, node, index):
value = node.value
for targets in [self.KEYWORDS, self.SINGLETON, self.BOOLEAN]:
if value in targets:
break
else:
raise NotImplementedError
for target in targets:
if target == value:
continue
root, new = node_copy_tree(node, index)
new.value = target
yield root, new
class Comparison(metaclass=Mutation):
def predicate(self, node):
return node == "comparison"
def mutate(self, node, index):
root, new = node_copy_tree(node, index)
not_test = parso.parse("not ({})".format(new.get_code()))
index = new.parent.children.index(new)
new.parent.children[index] = not_test
return root, new
class MutateOperator(metaclass=Mutation):
BINARY = ["+", "-", "%", "|", "&", "//", "/", "*", "^", "**", "@"]
BITWISE = ["<<", ">>"]
COMPARISON = ["<", "<=", "==", "!=", ">=", ">"]
ASSIGNEMENT = ["="] + [x + "=" for x in BINARY + BITWISE]
# TODO support OPERATORS_CONTAINS = ["in", "not in"]
OPERATORS = [
BINARY,
BITWISE,
BITWISE,
COMPARISON,
ASSIGNEMENT,
]
def predicate(self, node):
return node.type == "operator"
def mutate(self, node, index):
for operators in type(self).OPERATORS:
if node.value not in operators:
continue
for new_operator in operators:
if node.value == new_operator:
continue
root, new = node_copy_tree(node, index)
new.value = new_operator
yield root, new
def diff(source, target, filename=""):
lines = unified_diff(
source.split("\n"), target.split("\n"), filename, filename, lineterm=""
)
out = "\n".join(lines)
return out
def mutate(node, index, mutations):
for mutation in mutations:
if not mutation.predicate(node):
continue
yield from mutation.mutate(node, index)
def interesting(new_node, coverage):
if getattr(new_node, "line", False):
return new_node.line in coverage
return new_node.get_first_leaf().line in coverage
def deltas_compute(source, path, coverage, mutations):
ast = parso.parse(source)
ignored = 0
for (index, node) in zip(itertools.count(0), node_iter(ast)):
for root, new_node in mutate(node, index, mutations):
if not interesting(new_node, coverage):
ignored += 1
continue
target = root.get_code()
delta = diff(source, target, path)
yield delta
if ignored > 1:
msg = "Ignored {} mutations from file at {}"
msg += " because there is no associated coverage."
log.trace(msg, ignored, path)
async def pool_for_each_par_map(loop, pool, f, p, iterator):
zx = stream.iterate(iterator)
zx = zx | pipe.map(lambda x: loop.run_in_executor(pool, p, x))
async with zx.stream() as streamer:
limit = pool._max_workers
unfinished = []
while True:
tasks = []
for i in range(limit):
try:
task = await streamer.__anext__()
except StopAsyncIteration:
limit = 0
else:
tasks.append(task)
tasks = tasks + list(unfinished)
if not tasks:
break
finished, unfinished = await asyncio.wait(
tasks, return_when=asyncio.FIRST_COMPLETED
)
for finish in finished:
out = finish.result()
f(out)
limit = pool._max_workers - len(unfinished)
def mutation_create(item):
path, source, coverage, mutation_predicate = item
if not coverage:
msg = "Ignoring file {} because there is no associated coverage."
log.trace(msg, path)
return []
log.trace("Mutating file: {}...", path)
mutations = [m for m in Mutation.ALL if mutation_predicate(m)]
deltas = deltas_compute(source, path, coverage, mutations)
# return the compressed deltas to save some time in the
# mainthread.
out = [(path, zstd.compress(x.encode("utf8"))) for x in deltas]
log.trace("There is {} mutations for the file `{}`", len(out), path)
return out
def install_module_loader(uid):
db = LSM(".mutation.okvslite")
mutation_show(uid.hex)
path, diff = lexode.unpack(db[lexode.pack([1, uid])])
diff = zstd.decompress(diff).decode("utf8")
with open(path) as f:
source = f.read()
patched = patch(diff, source)
import imp
components = path[:-3].split("/")
while components:
for pythonpath in sys.path:
filepath = os.path.join(pythonpath, "/".join(components))
filepath += ".py"
ok = os.path.exists(filepath)
if ok:
module_path = ".".join(components)
break
else:
components.pop()
continue
break
if module_path is None:
raise Exception("sys.path oops!")
patched_module = imp.new_module(module_path)
try:
exec(patched, | |
# Licensed under the Apache License Version 2.0: http://www.apache.org/licenses/LICENSE-2.0.txt
"""Celery tasks relating to Twitter."""
__author__ = '<NAME>'
from datetime import datetime
from celery import chain, group
from celery.task.control import revoke
from celery.utils.log import get_task_logger
from itertools import groupby
from app import app
from db_settings import get_neo_driver, cache
from solr_tools import tweets2Solr
from twitter_settings import *
from twitter_tools.neo import connections2Neo, tweetDump2Neo, users2Neo, setUserDefunct, multiUserTweetDump2Neo
from twitter_tools.rated_twitter import RatedTwitter
from twitter_tools.streaming_twitter import StreamingTwitter
from twitter_tools.tools import renderTwitterUser, decomposeTweets
from crawl.crawl_cypher import nextNearest, whoNext, start_user_crawl, update_crawl
logger = get_task_logger(__name__)
@app.task(name='twitter_tasks.twitterCall', bind=True)
def twitterCall(self, method_name, credentials=False, **kwargs):
"""Attempt a given Twitter API call, retry if rate-limited. Returns the result of the call.
Positional arguments:
methodName -- the Twitter API method to call
args -- a dicionary of keyword arguments
"""
api = RatedTwitter(credentials=credentials)
limit = api.can_we_do_that(method_name)
if limit:
logger.info('*** TWITTER RATE-LIMITED: %s ***' % method_name)
raise twitterCall.retry(exc=Exception('Twitter rate-limited', method_name), countdown=limit)
else:
okay, result = api.method_call(method_name, **kwargs)
if okay:
logger.info('*** TWITTER CALL: %s suceeded***' % method_name)
return result
else:
assert False
@app.task(name='twitter_tasks.pushRenderedTwits2Neo', bind=True)
def pushRenderedTwits2Neo(self, twits):
db = get_neo_driver()
users2Neo(db, twits)
db.close()
@app.task(name='twitter_tasks.pushTwitterUsers', bind=True)
def pushTwitterUsers(self, twits):
"""Store Twitter users returned by a Twitter API call in Neo4J.
Positional arguments:
twits -- a list of Twitter users as returned by Twython
"""
logger.info('***Push twitter users to neo ***')
rightNow = datetime.now().isoformat()
for twit in twits:
twit['last_scraped'] = rightNow
logger.info('***Push twitter user: ' + twit['screen_name'] + ' ***')
renderedTwits = [renderTwitterUser(twit) for twit in twits]
pushRenderedTwits2Neo.delay(renderedTwits)
@app.task(name='twitter_tasks.getTwitterUsers', bind=True)
def getTwitterUsers(self, users, credentials=False):
"""Look-up a set of Twitter users by screen_name and store them in Neo4J.
Positional arguments:
users -- a list of screen_names
"""
userList = ','.join(users)
logger.info('***Getting twitter users: ' + userList + ' ***')
chain(twitterCall.s('lookup_user', credentials, **{'screen_name': userList}), pushTwitterUsers.s())()
@app.task(name='twitter_tasks.start_stream', bind=True)
def start_stream(self, track=None, follow=False, credentials=False):
logger.info('***Starting twitter filter stream***')
# TODO one of filter_terms or follow is required, return error if neither present
# start the stream
stream_task = stream_filter.delay(credentials=credentials, track=track)
cache.set("stream_id_" + self.request.id, stream_task.id.encode('utf-8'))
@app.task(name='twitter_tasks.stop_stream', bind=True)
def stop_stream(self, task_id):
# stop the stream running in the stream started by the given task id
logger.info('***Stopping twitter filter streamer ***')
stream_id = cache.get("stream_id_" + task_id).decode('utf-8')
revoke(stream_id, terminate=True)
# clean up the cache
@app.task(name='twitter_tasks.stream_filter', bind=True)
def stream_filter(self, credentials=False, retry_count=None, track=None):
logger.info('***Creating twitter filter streamer in task id: ' + self.request.id + ' ***')
streamer = StreamingTwitter(credentials=credentials, retry_count=retry_count, stream_id=self.request.id)
streamer.statuses.filter(track=track)
@app.task(name='twitter_tasks.push_stream_results', bind=True)
def push_stream_results(self, statuses):
logger.info('***Push twitter filter stream results***')
sorted_statuses = sorted(statuses, key=lambda status: status['user']['screen_name'])
decomposed_tweets = decomposeTweets(statuses)
users = [list(group)[0]['user'] for key, group in groupby(sorted_statuses,
lambda status: status['user']['screen_name'])]
pushTwitterUsers.delay(users)
pushRenderedMultiUserTweets2Neo.delay(decomposed_tweets)
@app.task(name='twitter_tasks.search', bind=True)
def search(self, query_terms, result_type='mixed', page_size=100, lang='en', tweet_id=0, maxTweets=False, count=0, credentials=False):
logger.info('***Starting TWITTER search ***')
api = RatedTwitter(credentials=credentials)
limit = api.search_wait()
if limit:
logger.info('*** TWITTER RATE-LIMITED: search starts with: %s:%d ***' % (query_terms[0], str(count)))
raise search.retry(countdown=limit)
else:
query = {'q': query_terms,
'result_type': result_type,
'count': page_size,
'lang': lang,
}
if tweet_id:
query['max_id'] = tweet_id
okay, result = api.search(**query)
if okay:
logger.info('*** TWITTER search starts with: %s:%s ***' % (query_terms[0], str(tweet_id)))
if result:
push_search_results.delay(result)
newCount = count + len(result['statuses'])
if maxTweets:
if newCount > maxTweets: # No need for the task to call itself again.
return
try:
# Parse the data returned to get max_id to be passed in consequent call.
next_results_url_params = result['search_metadata']['next_results']
next_max_id = next_results_url_params.split('max_id=')[1].split('&')[0]
# Not done yet, the task calls itself with an updated count and tweetId.
search.delay(query_terms, maxTweets=maxTweets, count=newCount, tweet_id=next_max_id,
result_type=result_type, page_size=page_size, lang=lang, credentials=credentials)
except: #do we have anything we want in the except clause if there is not a next batch of tweets?
return
else:
if result == 'limited':
raise search.retry(countdown=api.search_wait())
@app.task(name='twitter_tasks.push_search_results', bind=True)
def push_search_results(self, search_results, cacheKey=False):
statuses = search_results['statuses']
sorted_statuses = sorted(statuses, key=lambda status: status['user']['screen_name'])
decomposed_tweets = decomposeTweets(statuses)
users = [list(group)[0]['user'] for key, group in groupby(sorted_statuses,
lambda status: status['user']['screen_name'])]
pushTwitterUsers.delay(users)
pushRenderedMultiUserTweets2Neo.delay(decomposed_tweets)
@app.task(name='twitter_tasks.pushRenderedMultiUserTweets2Neo', bind=True)
def pushRenderedMultiUserTweets2Neo(self, all_tweets_dump):
db = get_neo_driver()
multiUserTweetDump2Neo(db, all_tweets_dump)
db.close()
@app.task(name='twitter_tasks.pushRenderedTweets2Neo', bind=True)
def pushRenderedTweets2Neo(self, user, tweetDump):
db = get_neo_driver()
tweetDump2Neo(db, user, tweetDump)
db.close()
@app.task(name='twitter_tasks.pushRenderedTweets2Solr', bind=True)
def pushRenderedTweets2Solr(self, tweets):
tweets2Solr(tweets)
@app.task(name='twitter_tasks.pushTweets', bind=True)
def pushTweets(self, tweets, user, cacheKey=False):
""" Dump a set of tweets from a given user's timeline to Neo4J/Solr.
Positional arguments:
tweets -- a list of tweets as returned by Twython.
user -- screen_name of the user
Keyword arguments:
cacheKey -- a Redis key that identifies an on-going task to grab a user's timeline
"""
logger.info('Executing pushTweets task id {0.id}, task parent id {0.parent_id}, root id {0.root_id}'.format(self.request))
tweetDump = decomposeTweets(tweets) # Extract mentions, URLs, replies hashtags etc...
pushRenderedTweets2Neo.delay(user, tweetDump)
for label in ['tweet', 'retweet', 'quotetweet']:
pushRenderedTweets2Solr.delay([t[0] for t in tweetDump[label]])
if cacheKey: # These are the last Tweets, tell the scraper we're done.
cache.set(cacheKey, 'done')
logger.info('*** %s: DONE WITH TWEETS ***' % user)
@app.task(name='twitter_tasks.getTweets', bind=True)
def getTweets(self, user, maxTweets=3000, count=0, tweetId=0, cacheKey=False, credentials=False):
logger.info('Executing getTweets task id {0.id}, args: {0.args!r} kwargs: {0.kwargs!r}'.format(self.request))
logger.info('task parent id {0.parent_id}, root id {0.root_id}'.format(self.request))
"""Get tweets from the timeline of the given user, push them to Neo4J.
Positional arguments:
user -- The screen_name of the user
Keyword arguments:
maxTweets -- The maximum number of tweets to retrieve
cacheKey -- a Redis key that identifies an on-going task to grab a user's timeline
count -- The number of tweets already retrieved, set when the task calls itself
tweetId -- The maximum tweet ID to retrieve, set when the task calls itself
"""
api = RatedTwitter(credentials=credentials)
limit = api.get_user_timeline_wait()
if limit:
logger.info('*** TWITTER RATE-LIMITED: statuses.user_timeline: %s:%d ***' % (user, str(count)))
raise getTweets.retry(countdown=limit)
else:
args = {'screen_name': user, 'exclude_replies': False, 'include_rts': True, 'trim_user': False, 'count': 200}
if tweetId:
args['max_id'] = tweetId
okay, result = api.get_user_timeline(**args)
if okay:
logger.info('*** TWITTER USER_TIMELINE: %s:%s ***' % (user, str(tweetId)))
if result:
newCount = count + len(result)
if maxTweets:
if newCount > maxTweets: # No need for the task to call itself again.
pushTweets.delay(result, user, cacheKey=cacheKey) # Give pushTweets the cache-key to end the job.
return
else:
pushTweets.delay(result, user)
newTweetId = min([t['id'] for t in result]) - 1
# Not done yet, the task calls itself with an updated count and tweetId.
getTweets.delay(user, maxTweets=maxTweets, count=newCount, tweetId=newTweetId, cacheKey=cacheKey, credentials=credentials)
else:
pushTweets.delay([], user, cacheKey=cacheKey) # Nothing more found, so tell pushTweets the job is done.
else:
if result == '404':
db = get_neo_driver()
setUserDefunct(db, user)
db.close()
cache.set('scrape_tweets_' + self.request.root_id, 'done')
if result == 'limited':
raise getTweets.retry(countdown=api.get_user_timeline_wait())
@app.task(name='twitter_tasks.pushRenderedConnections2Neo', bind=True)
def pushRenderedConnections2Neo(self, user, renderedTwits, friends=True):
db = get_neo_driver()
connections2Neo(db, user,renderedTwits,friends=friends)
db.close()
@app.task(name='twitter_tasks.pushTwitterConnections', bind=True)
def pushTwitterConnections(self, twits, user, friends=True, cacheKey=False):
"""Push the Twitter connections of a given user to Neo4J.
Positional arguments:
twits -- a list of Twitter users as returned by Twython
user -- The screen_name of the user
Keyword arguments:
friends -- "twits" are the user's friends if True, (default) else they're followers
cacheKey -- a Redis key that identifies an on-going task to grab a user's friends or followers
"""
if friends:
job = ' FRIENDS'
else:
job = ' FOLLOWERS'
if twits:
rendered_twits = [renderTwitterUser(twit) for twit in twits]
pushRenderedConnections2Neo.delay(user, rendered_twits, friends=friends)
if cacheKey: # These are the last connections, tell the scraper we're done.
cache.set(cacheKey, 'done')
logger.info('*** %s: DONE WITH %s ***' % (user, job))
@app.task(name='twitter_tasks.getTwitterConnections', bind=True)
def getTwitterConnections(self, user, friends=True, cursor=-1, credentials=False, cacheKey=False):
"""Get the connections of the given user, push them to Neo4J.
Positional arguments:
user -- The screen_name of the user
Keyword arguments:
friends -- "twits" are the user's friends if True, (default) else they're followers
cacheKey -- a Redis key that identifies an on-going task to grab a user's friends or followers
cursor -- Id of the next block of connections to retrieve, set when the task calls itself
"""
api = RatedTwitter(credentials=credentials)
if friends:
method = api.get_friends_list
limit = api.get_friends_list_wait()
method_name = 'get_friends_list'
else:
method = api.get_followers_list
limit = api.get_followers_list_wait()
method_name = 'get_followers_list'
if limit:
logger.info('*** TWITTER RATE-LIMITED: %s:%s ***' % (method_name, str(cursor)))
raise getTwitterConnections.retry(countdown=limit)
else:
okay, result = method(screen_name=user, cursor=cursor, count=200) # We can get a maximum of 200 connections at once.
if okay:
logger.info('*** TWITTER CURSOR: %s:%s:%s ***' % (method_name, user, str(cursor)))
twits = result['users']
next_cursor = result.get('next_cursor', False)
if next_cursor: # Unless the next cursor is 0, we're not done yet.
getTwitterConnections.delay(user, friends=friends, cursor=next_cursor, cacheKey=cacheKey, credentials=credentials)
pushTwitterConnections.delay(twits, user, | |
after the Remote Passive
Rendezvous completes. If the assisting device does not observe traffic across the tunnel from both
sides within a period of time equal to or great than this timeout, it will close the tunnel.
"""
args = shlex.split(line)
optParser = OptionParser(usage=optparse.SUPPRESS_USAGE, option_class=ExtendedOption)
optParser.add_option("-p", "--pairing-code", action="store", dest="pairingCode", type="string")
optParser.add_option("-t", "--access-token", action="store", dest="accessToken", type="base64")
optParser.add_option("-d", "--use-dummy-access-token", action="store_true", dest="useDummyAccessToken")
optParser.add_option("-j", "--joiner-address", action="store", dest="joinerAddr", type="string")
optParser.add_option("-r", "--rendezvous-timeout", action="store", dest="rendezvousTimeout", type="int")
optParser.add_option("-i", "--inactivity-timeout", action="store", dest="inactivityTimeout", type="int")
try:
(options, remainingArgs) = optParser.parse_args(args)
except SystemExit:
return
if (len(remainingArgs) > 1):
print "Unexpected argument: " + remainingArgs[1]
return
if (len(remainingArgs) == 1):
options.pairingCode = remainingArgs[0]
if (options.useDummyAccessToken and not options.accessToken):
options.accessToken = base64.standard_b64decode(dummyAccessToken)
if (options.pairingCode and options.accessToken):
print "Cannot specify both pairing code and access token"
return
try:
self.devMgr.RemotePassiveRendezvous(rendezvousDeviceAddr=options.joinerAddr,
pairingCode=options.pairingCode, accessToken=options.accessToken,
rendezvousTimeout=options.rendezvousTimeout, inactivityTimeout=options.inactivityTimeout)
except WeaveDeviceMgr.DeviceManagerException, ex:
print str(ex)
return
print "Successfully connected to remote device %X" % (self.devMgr.DeviceId())
def do_reconnect(self, line):
"""
reconnect
Reconnect to the device using the previously supplied connect arguments.
"""
args = shlex.split(line)
if (len(args) != 0):
print "Usage:"
self.do_help('reconnect')
return
try:
self.devMgr.ReconnectDevice()
except WeaveDeviceMgr.DeviceManagerException, ex:
print str(ex)
return
def do_close(self, line):
"""
close
Close the connection to the device.
"""
args = shlex.split(line)
if (len(args) != 0):
print "Usage:"
self.do_help('close')
return
try:
self.devMgr.Close()
self.devMgr.CloseEndpoints()
except WeaveDeviceMgr.DeviceManagerException, ex:
print str(ex)
def do_enableconnectionmonitor(self, line):
"""
enable-connection-monitor [ <interval> <timeout> ]
Instruct the device to enable Weave connection monitoring.
<interval>
-- Interval at which to send EchoRequest messages (in ms).
Defaults to 500. Max is 65535 ms.
<timeout>
-- Amount of time after which the lack of a response to an
EchoRequest will cause the device to terminate the
connection (in ms). Defaults to 2000. Max is 65535 ms.
"""
args = shlex.split(line)
if (len(args) > 2):
print "Unexpected argument: " + args[2]
return
if (len(args) == 0):
interval = 500
timeout = 2000
elif (len(args) == 2):
interval = int(args[0])
if (interval < 0 or interval > 65535):
print "Invalid value specified for interval: " + args[0]
return
timeout = int(args[1])
if (timeout < 0 or timeout > 65535):
print "Invalid value specified for interval: " + args[1]
return
else:
print "Usage:"
self.do_help('rendezvous')
return
try:
self.devMgr.EnableConnectionMonitor(interval, timeout)
except WeaveDeviceMgr.DeviceManagerException, ex:
print str(ex)
return
print "Connection monitor enabled"
def do_disableconnectionmonitor(self, line):
"""
disable-connection-monitor
Instruct the device to disable Weave connection monitoring.
"""
args = shlex.split(line)
if (len(args) > 0):
print "Unexpected argument: " + args[2]
return
try:
self.devMgr.DisableConnectionMonitor()
except WeaveDeviceMgr.DeviceManagerException, ex:
print str(ex)
return
print "Connection monitor disabled"
def do_scannetworks(self, line):
"""
scan-networks
Scan for remote WiFi networks.
"""
args = shlex.split(line)
networkType = WeaveDeviceMgr.NetworkType_WiFi
if (len(args) > 1):
print "Unexpected argument: " + args[1]
return
if (len(args) == 1):
try:
networkType = WeaveDeviceMgr.ParseNetworkType(args[0])
except Exception, ex:
print "Invalid network type: " + args[0]
return
try:
scanResult = self.devMgr.ScanNetworks(networkType)
except WeaveDeviceMgr.DeviceManagerException, ex:
print str(ex)
return
print "ScanNetworks complete, %d network(s) found" % (len(scanResult))
i = 1
for net in scanResult:
print " Network %d" % (i)
net.Print(" ")
i = i + 1
def do_addnetwork(self, line):
self.do_addwifinetwork(line)
def do_addwifinetwork(self, line):
"""
add-wifi-network <ssid> <security-type> [ <key> ]
Provision a new WiFi network.
<security-type>:
none
wep
wpa
wpa2
wpa2-mixed-personal
wpa-enterprise
wpa2-enterprise
wpa2-mixed-enterprise
"""
args = shlex.split(line)
if (len(args) == 0):
print "Usage:"
self.do_help('add-wifi-network')
return
if (len(args) < 2):
print "Please specify WiFI security type"
return
securityType = WeaveDeviceMgr.ParseSecurityType(args[1])
if (securityType == None):
print "Unrecognized security type: " + args[1]
return
networkInfo = WeaveDeviceMgr.NetworkInfo(
networkType = WeaveDeviceMgr.NetworkType_WiFi,
wifiSSID = args[0],
wifiMode = WeaveDeviceMgr.WiFiMode_Managed,
wifiRole = WeaveDeviceMgr.WiFiRole_Station,
wifiSecurityType = securityType)
if (securityType != WeaveDeviceMgr.WiFiSecurityType_None):
if (len(args) < 3):
print "Must supply WiFi key"
return
if (len(args) > 3):
print "Unexpected argument: " + args[3]
return
networkInfo.WiFiKey = args[2]
elif (len(args) > 2):
print "Unexpected argument: " + args[2]
return
try:
addResult = self.devMgr.AddNetwork(networkInfo)
except WeaveDeviceMgr.DeviceManagerException, ex:
print str(ex)
return
self.lastNetworkId = addResult
print "Add wifi network complete (network id = " + str(addResult) + ")"
def do_addthreadnetwork(self, line):
"""
add-thread-network <name> <extended-pan-id> [ <key> ] [ <field>=<value>... ]
Provision a new Thread network.
<name>: string name of network
<extended-pan-id>: hex string (8 bytes)
<key>: hex string (any length)
<field>:
thread-key or key
thread-pan-id or pan-id
thread-channel or channel
...
"""
args = shlex.split(line)
if (len(args) == 0):
print "Usage:"
self.do_help('add-thread-network')
return
if (len(args) < 2):
print "Please specify the Network Name and Extended PAN Identifier"
return
networkInfo = WeaveDeviceMgr.NetworkInfo()
networkInfo.NetworkType = WeaveDeviceMgr.NetworkType_Thread
networkInfo.ThreadNetworkName = args[0]
try:
networkInfo.ThreadExtendedPANId = bytearray(binascii.unhexlify(args[1]))
if len(networkInfo.ThreadExtendedPANId) != 8:
print "Thread extended PAN id must be 8 bytes in hex"
return
except ValueError:
print "Invalid value specified for thread extended PAN id: " + args[1]
return
kvstart = 3 if (len(args[2].split('=', 1)) == 1) else 2
if (kvstart > 2):
try:
networkInfo.ThreadNetworkKey = bytearray(binascii.unhexlify(args[2]))
except ValueError:
print "Invalid value for Thread Network Key"
return
for addedVal in args[kvstart:]:
pair = addedVal.split('=', 1)
if (len(pair) < 2):
print "Invalid argument: must be key=value format <" + addedVal + ">"
return
name = pair[0]
val = pair[1]
if name == 'key':
name = 'threadnetworkkey'
elif name == 'channel':
name = 'threadchannel'
elif name == 'extended-pan-id':
name = 'threadextendedpanid'
try:
if (name == 'threadchannel' or name == 'thread-channel'):
val = int(val, 10)
elif (name == 'threadnetworkkey' or name == 'thread-network-key' or name == 'thread-key'):
val = bytearray(binascii.unhexlify(val))
elif (name == 'threadextendedpanid' or name == 'thread-extended-pan-id'):
val = bytearray(binascii.unhexlify(val))
elif (name == 'threadpanid' or name == 'thread-pan-id' or name == 'pan-id'):
val = int(val, 16)
except ValueError:
print "Invalid value specified for <" + name + "> field"
return
try:
networkInfo.SetField(name, val)
except Exception, ex:
print str(ex)
return
if networkInfo.ThreadPANId != None:
panId=networkInfo.ThreadPANId
if panId < 1 or panId > 0xffff:
print "Thread PAN Id must be non-zero and 2 bytes in hex"
return
try:
addResult = self.devMgr.AddNetwork(networkInfo)
except WeaveDeviceMgr.DeviceManagerException, ex:
print str(ex)
return
self.lastNetworkId = addResult
print "Add thread network complete (network id = " + str(addResult) + ")"
def do_createthreadnetwork(self, line):
"""
create-thread-network [ <options> ]
Send a request to device to create a new Thread network and wait for a reply.
Options:
--name <name>
Thread network name (string).
--key <key>
Thread network key (hex string of any length).
--panid <panid>
Thread network PAN id (16-bit hex int).
--channel <channel>
Thread network channel number (int). Valid supported range is [11 - 26].
All above parameters are optional and if not specified the value will be created by device.
"""
args = shlex.split(line)
optParser = OptionParser(usage=optparse.SUPPRESS_USAGE, option_class=ExtendedOption)
optParser.add_option("-n", "--name", action="store", dest="threadNetworkName", type="string")
optParser.add_option("-k", "--key", action="store", dest="threadNetworkKey", type="string")
optParser.add_option("-p", "--panid", action="store", dest="threadPANId", type="hexint")
optParser.add_option("-c", "--channel", action="store", dest="threadChannel", type="int")
try:
(options, remainingArgs) = optParser.parse_args(args)
except SystemExit:
return
if (len(remainingArgs) > 0):
print "Unexpected argument: " + remainingArgs[0]
return
networkInfo = WeaveDeviceMgr.NetworkInfo()
networkInfo.NetworkType = WeaveDeviceMgr.NetworkType_Thread
if (options.threadNetworkName):
networkInfo.ThreadNetworkName = options.threadNetworkName
if (options.threadNetworkKey):
networkInfo.ThreadNetworkKey = bytearray(binascii.unhexlify(options.threadNetworkKey))
if (options.threadPANId):
networkInfo.ThreadPANId = options.threadPANId
if (networkInfo.ThreadPANId > 0xffff):
print "Thread PAN Id must be 16-bit hex value."
return
if (options.threadChannel):
networkInfo.ThreadChannel = options.threadChannel
if (networkInfo.ThreadChannel < 11 or networkInfo.ThreadChannel > 26):
print "Thread Channel value must be in a range [11 - 26]."
return
try:
addResult = self.devMgr.AddNetwork(networkInfo)
except WeaveDeviceMgr.DeviceManagerException, ex:
print str(ex)
return
self.lastNetworkId = addResult
print "Create Thread network complete (network id = " + str(addResult) + ")"
def do_updatenetwork(self, line):
"""
update-network <network-id> [ <field>=<value>... ]
Update an existing provisioned network.
<field>:
wifi-ssid or ssid
wifi-mode
wifi-role
wifi-security or security
wifi-key or key
thread-network-name or thread-name
thread-extended-pan-id or pan-id
network-key or thread-key
"""
args = shlex.split(line)
print args
if (len(args) == 0):
print "Usage:"
self.do_help('update-network')
return
if (len(args) < 1):
print "Please specify the network id"
return
networkId = self.parseNetworkId(args[0])
if (networkId == None):
return
self.lastNetworkId = networkId
networkInfo = WeaveDeviceMgr.NetworkInfo(networkId=networkId)
for updatedVal in args[1:]:
nameVal = updatedVal.split('=', 1)
if (len(nameVal) < 2):
print "Invalid argument: updatedVal"
return
try:
networkInfo.SetField(nameVal[0], nameVal[1])
except Exception, ex:
print str(ex)
return
try:
self.devMgr.UpdateNetwork(networkInfo)
| |
if settings['setKey']['status']:
if xmobz.startswith(settings['setKey']['key']):
cmd = xmobz.replace(settings['setKey']['key'],'')
else:
cmd = 'Undefined command'
else:
cmd = text.lower()
return cmd
def removeCmd(text, key=''):
if key == '':
setKey = '' if not settings['setKey']['status'] else settings['setKey']['key']
else:
setKey = key
text_ = text[len(setKey):]
sep = text_.split(' ')
return text_[len(sep[0] + ' '):]
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
async def mobanzu(op):
try:
if settings["restartPoint"] is not None:
a001.sendMessage(settings["restartPoint"],"[ Bots Operated Again... ]")
settings["restartPoint"] = None
if op.type == 0:
# print ("[ 0 ] END OF OPERATION")
return
if op.type == 11 or op.type == 122:
if op.type == 11: print ("[ 11 ] NOTIFIED UPDATE GROUP")
else: print ("[ 122 ] NOTIFIED UPDATE CHAT")
if settings["autoPurge"] == True:
if op.param2 in status["blacklist"]:
try:
fck1 = threading.Thread(target=lockqr, args=(op.param1,)).start()
fck2 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param1 in status["promax"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_1 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_2 = threading.Thread(target=lockqr, args=(op.param1,)).start()
d23X_3 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param2 in status["blacklist"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
try:
d23X_4 = threading.Thread(target=lockqr, args=(op.param1,)).start()
d23X_5 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param3 in status["blacklist"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
try:
d23X_6 = threading.Thread(target=lockqr, args=(op.param1,)).start()
d23X_7 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param3 == '4':
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_8 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
groupqr = a001.getGroup(op.param1)
if groupqr.preventedJoinByTicket == False:
d23X_9 = threading.Thread(target=lockqr, args=(op.param1,)).start()
d23X_10 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
groupqr = a002.getGroup(op.param1)
if groupqr.preventedJoinByTicket == False:
d23X_11 = threading.Thread(target=lockqr, args=(op.param1,)).start()
d23X_12 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param3 == '1':
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_13 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
groupn = a001.getGroup(op.param1).name
if groupn not in settings["changeGroupName"][op.param1]:
progn = a001.getGroup(op.param1)
progn.name = settings["changeGroupName"][op.param1]
a001.updateGroup(progn)
d23X_14 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
else:
progn = a001.getGroup(op.param1).name
settings["changeGroupName"][op.param1] = progn
with open('settings.json', 'w') as fp:
json.dump(settings, fp, sort_keys=True, indent=4)
groupp = a001.getGroup(op.param1).pictureStatus
if groupp not in settings["changeGroupPicture"][op.param1]:
progp = a001.getGroup(op.param1)
progp.pictureStatus = settings["changeGroupPicture"]
a001.updateGroupPicture(progp)
d23X_15 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
else:
progp = a001.getGroup(op.param1).pictureStatus
settings["changeGroupPicture"][op.param1] = progp
with open('settings.json', 'w') as fp:
json.dump(settings, fp, sort_keys=True, indent=4)
except:
try:
groupn = a002.getGroup(op.param1).name
if groupn not in settings["changeGroupName"][op.param1]:
progn = a002.getGroup(op.param1)
progn.name = settings["changeGroupName"][op.param1]
a002.updateGroup(progn)
d23X_16 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
else:
progn = a002.getGroup(op.param1).name
settings["changeGroupName"][op.param1] = progn
with open('settings.json', 'w') as fp:
json.dump(settings, fp, sort_keys=True, indent=4)
groupp = a002.getGroup(op.param1).pictureStatus
if groupp not in settings["changeGroupPicture"][op.param1]:
progp = a002.getGroup(op.param1)
progp.pictureStatus = settings["changeGroupPicture"]
a002.updateGroupPicture(progp)
d23X_17 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
else:
progp = a002.getGroup(op.param1).pictureStatus
settings["changeGroupPicture"][op.param1] = progp
with open('settings.json', 'w') as fp:
json.dump(settings, fp, sort_keys=True, indent=4)
except:
pass
if op.type == 13 or op.type == 124:
if op.type == 13: print ("[ 13 ] NOTIFIED INVITE INTO GROUP")
else: print ("[ 124 ] NOTIFIED INVITE INTO CHAT")
if settings["autoPurge"] == True:
if op.param2 in status["blacklist"]:
try:
fck3 = threading.Thread(target=cancel, args=(op.param1, op.param3)).start()
fck4 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param1 in status["promax"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_18 = threading.Thread(target=blacklist, args=(op.param2,)).start()
d23X_19 = threading.Thread(target=blacklist, args=(op.param3,)).start()
try:
d23X_20 = threading.Thread(target=cancel, args=(op.param1, op.param3)).start()
d23X_21 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
inv1 = op.param3.replace('\x1e',',')
inv2 = inv1.split(',')
for _mid in inv2:
d23X_22 = threading.Thread(target=cancel, args=(op.param1, _mid)).start()
d23X_23 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
inv3 = op.param3.replace('\x1e',',')
inv4 = inv3.split(',')
for _mid in inv4:
d23X_24 = threading.Thread(target=cancel, args=(op.param1, _mid)).start()
d23X_25 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param2 in status["blacklist"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
try:
d23X_26 = threading.Thread(target=cancel, args=(op.param1, op.param3)).start()
d23X_27 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
inv1 = op.param3.replace('\x1e',',')
inv2 = inv1.split(',')
for _mid in inv2:
d23X_28 = threading.Thread(target=cancel, args=(op.param1, _mid)).start()
d23X_29 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
inv3 = op.param3.replace('\x1e',',')
inv4 = inv3.split(',')
for _mid in inv4:
d23X_30 = threading.Thread(target=cancel, args=(op.param1, _mid)).start()
d23X_31 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param3 in status["blacklist"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
try:
d23X_32 = threading.Thread(target=cancel, args=(op.param1, op.param3)).start()
d23X_33 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
inv1 = op.param3.replace('\x1e',',')
inv2 = inv1.split(',')
for _mid in inv2:
d23X_34 = threading.Thread(target=cancel, args=(op.param1, _mid)).start()
d23X_35 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
inv3 = op.param3.replace('\x1e',',')
inv4 = inv3.split(',')
for _mid in inv4:
d23X_36 = threading.Thread(target=cancel, args=(op.param1, _mid)).start()
d23X_37 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if M001D23 in op.param3:
if settings["autoJoin"] == True:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
try:
d23X_38 = threading.Thread(target=join, args=(op.param1,)).start()
except:
pass
else:
try:
d23X_39 = threading.Thread(target=reject, args=(op.param1,)).start()
except:
pass
if M002D23 in op.param3:
if settings["autoJoin"] == True:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
try:
d23X_40 = threading.Thread(target=join, args=(op.param1,)).start()
except:
pass
else:
try:
d23X_41 = threading.Thread(target=reject, args=(op.param1,)).start()
except:
pass
if op.type == 17 or op.type == 130:
if op.type == 17: print ("[ 17 ] NOTIFIED ACCEPT GROUP INVITATION")
else: print ("[ 130 ] NOTIFIED ACCEPT CHAT INVITATION")
if settings["autoPurge"] == True:
if op.param2 in status["blacklist"]:
try:
fck5 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param1 in status["promax"]:
if op.param2 in status["blacklist"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
try:
d23X_42 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.type == 19 or op.type == 133:
if op.type == 19: print ("[ 19 ] NOTIFIED KICKOUT FROM GROUP")
else: print ("[ 133 ] NOTIFIED DELETE OTHER FROM CHAT")
if settings["autoPurge"] == True:
if op.param2 in status["blacklist"]:
try:
fck6 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param1 in status["promax"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_43 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_44 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_45 = threading.Thread(target=invite, args=(op.param1, op.param3)).start()
except:
pass
if op.param3 in M001D23:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_46 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_47 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_48 = threading.Thread(target=backup, args=(op.param1, op.param3)).start()
d23X_49 = threading.Thread(target=antijs, args=(op.param1, op.param2)).start()
d23X_50 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param3 in M002D23:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_51 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_52 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_53 = threading.Thread(target=backup, args=(op.param1, op.param3)).start()
d23X_54 = threading.Thread(target=antijs, args=(op.param1, op.param2)).start()
d23X_55 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param3 in M003D23:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_56 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_57 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_58 = threading.Thread(target=invite, args=(op.param1, op.param3)).start()
d23X_59 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.type == 32 or op.type == 126:
if op.type | |
<filename>gs_manager/servers/generic/steam.py<gh_stars>1-10
import os
import re
import time
from queue import Empty, Queue
from threading import Thread
from typing import List, Optional, Type, Dict
from subprocess import CalledProcessError # nosec
import click
import click_spinner
import requests
from steamfiles import acf
from gs_manager.command import Config, ServerCommandClass
from gs_manager.command.validators import GenericConfigType, ListFlatten
from gs_manager.decorators import multi_instance, require, single_instance
from gs_manager.servers.base import (
STATUS_FAILED,
STATUS_PARTIAL_FAIL,
STATUS_SUCCESS,
BaseServer,
BaseServerConfig,
)
from gs_manager.utils import get_server_path
from valve.source import NoResponseError
from valve.source.a2s import ServerQuerier
__all__ = ["SteamServer", "SteamServerConfig"]
STEAM_PUBLISHED_FILES_API = "https://api.steampowered.com/ISteamRemoteStorage/GetPublishedFileDetails/v1" # noqa
def _enqueue_output(out, queue):
for line in iter(out.readline, b""):
queue.put(line)
out.close()
class SteamServerConfig(BaseServerConfig):
steamcmd_path: str = "steamcmd"
steam_query_ip: str = "127.0.0.1"
steam_query_port: Optional[int] = None
workshop_id: int = None
workshop_items: List[str] = []
steam_username: str = None
steam_password: str = None
steam_requires_login: bool = False
app_id: int = None
_validators: Dict[str, List[GenericConfigType]] = {
**BaseServerConfig._validators,
**{
"workshop_items": [ListFlatten],
},
}
@property
def global_options(self):
global_options = super().global_options.copy()
all_options = [
{
"param_decls": ("--steamcmd-path",),
"type": click.Path(),
"help": "Path to steamcmd executable",
},
{
"param_decls": ("--app-id",),
"type": int,
"help": "app ID for Steam game to update from",
},
{
"param_decls": ("--steam-query-port",),
"type": int,
"help": "Port to query to check if server is accessible",
},
{
"param_decls": ("--steam-query-ip",),
"type": int,
"help": "IP to query to check if server is accessible",
},
{
"param_decls": ("--steam-username",),
"type": str,
"help": "Steam username to use instead of anonymous",
},
{
"param_decls": ("--steam-password",),
"type": str,
"help": "Steam password to use instead of anonymous",
},
]
global_options["all"] += all_options
return global_options
class SteamServer(BaseServer):
name: str = "steam"
config_class: Optional[Type[Config]] = SteamServerConfig
_config: SteamServerConfig
_servers: Dict[str, ServerQuerier] = {}
@property
def config(self) -> SteamServerConfig:
return super().config
@property
def server(self) -> Optional[ServerQuerier]:
if self.is_query_enabled():
if self._servers.get(self.server_name) is None:
self._servers[self.server_name] = ServerQuerier(
(
self.config.steam_query_ip,
int(self.config.steam_query_port),
),
)
return self._servers[self.server_name]
return None
def is_accessible(self) -> bool:
if self.is_query_enabled():
try:
self.server.ping()
except NoResponseError:
return False
return True
def is_query_enabled(self) -> bool:
return self.config.steam_query_port is not None
def _parse_line(self, bar, line):
step_name = line.group("step_name")
current = int(line.group("current"))
total = int(line.group("total"))
self.logger.debug(
"processed: {}: {} / {}".format(step_name, current, total)
)
if bar is None and current < total:
bar = click.progressbar(
length=total,
show_eta=False,
show_percent=True,
label=step_name,
)
if bar is not None:
bar.update(current)
def _wait_until_validated(
self, app_id, process, detailed_status=False, force=False
):
update_verb = "updating"
if force:
update_verb = "valdiating"
if detailed_status:
# this does not work as expected because of a steamcmd bug
# https://github.com/ValveSoftware/Source-1-Games/issues/1684
# https://github.com/ValveSoftware/Source-1-Games/issues/1929
buffer = Queue()
thread = Thread(
target=_enqueue_output,
args=(process.stdout, buffer),
daemon=True,
)
thread.start()
bar = None
line_re = re.compile(
r"Update state \(0x\d+\) (?P<step_name>\w+), progress: "
r"\d+\.\d+ \((?P<current>\d+) \/ (?P<total>\d+)\)"
)
self.logger.debug("start processing output...")
while True:
try:
line = buffer.get_nowait().decode("utf-8").strip()
except Empty:
time.sleep(0.1)
else:
self.logger.debug("line: {}".format(line))
self._parse_line(bar, line_re.match(line))
if process.poll() is not None and buffer.empty():
break
else:
self.logger.info(
f"{update_verb} {app_id}...", nl=False,
)
with click_spinner.spinner():
while process.poll() is None:
time.sleep(1)
def _check_steam_for_update(self, app_id: str, branch: str):
manifest_file = get_server_path(
["steamapps", f"appmanifest_{app_id}.acf"]
)
if not os.path.isfile(manifest_file):
self.logger.debug("No local manifet")
return True
manifest = None
with open(manifest_file, "r") as f:
manifest = acf.load(f)
stdout = self.run_command(
(
f"{self.config.steamcmd_path} +app_info_update 1 "
f"+app_info_print {app_id} +quit"
),
redirect_output=True,
)
index = stdout.find(f'"{app_id}"')
app_info = acf.loads(stdout[index:])
try:
current_buildid = app_info[app_id]["depots"]["branches"][branch][
"buildid"
]
except KeyError:
self.logger.debug("Failed to parse remote manifest")
return True
self.logger.debug(f"current: {manifest['AppState']['buildid']}")
self.logger.debug(f"latest: {current_buildid}")
return manifest["AppState"]["buildid"] != current_buildid
def _get_published_file(self, file_id):
s = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=5)
s.mount("http://", adapter)
r = s.post(
STEAM_PUBLISHED_FILES_API,
{"itemcount": 1, "publishedfileids[0]": file_id},
)
r.raise_for_status()
return r.json()
def _stop_servers(self, was_running, reason: Optional[str] = None):
current_instance = self.config.instance_name
multi_instance = self.config.multi_instance
if reason is None:
reason = "Updates found"
if self._command_exists("say_command"):
self.logger.info("notifying users...")
self.set_instance(None, False)
self.invoke(
self.say,
command_string=f"{reason}. Server restarting in 5 minutes",
do_print=False,
parallel=True,
current_instances=f"@each:{','.join(was_running)}",
)
self._wait(300 - self.config.pre_stop)
if self._command_exists("save_command"):
self.logger.info("saving servers...")
self.set_instance(None, False)
self.invoke(
self.command,
command_string=self.config.save_command,
do_print=False,
parallel=True,
current_instances=f"@each:{','.join(was_running)}",
)
self.set_instance(None, False)
self.invoke(
self.stop,
force=False,
reason="New updates found.",
verb="restarting",
parallel=True,
current_instances=f"@each:{','.join(was_running)}",
)
self.set_instance(current_instance, multi_instance)
with open(get_server_path(".start_servers"), "w") as f:
if isinstance(was_running, bool):
f.write("default")
else:
f.write(",".join(was_running))
def _start_servers(self, restart, was_running):
if not restart:
return
if not was_running:
was_running = self._was_running_from_disk()
if not was_running:
return
current_instance = self.config.instance_name
multi_instance = self.config.multi_instance
self.set_instance(None, False)
if len(was_running) == 1 and was_running[0] == "default":
self.invoke(self.start, no_verify=False, foreground=False)
else:
self.invoke(
self.start,
no_verify=False,
foreground=False,
parallel=True,
current_instances=f"@each:{','.join(was_running)}",
)
self.set_instance(current_instance, multi_instance)
def _was_running_from_disk(self):
was_running = False
start_servers = get_server_path(".start_servers")
if os.path.exists(start_servers):
with open(start_servers, "r") as f:
was_running = f.read().strip().split(",")
os.remove(start_servers)
return was_running
def _steam_login(self) -> str:
if self.config.steam_username and self.config.steam_password:
return (
f"+login {self.config.steam_username} "
f"{self.config.steam_password}"
)
elif self.config.steam_requires_login:
raise click.BadParameter(
(
"this server requires a valid Steam login. Provide "
"a --steam-username and --steam-password"
),
self.context,
)
return "+login anonymous"
def str_mods(self, mods):
mods = [str(mod) for mod in mods]
return mods
@multi_instance
@click.command(cls=ServerCommandClass)
@click.pass_obj
def status(self, *args, **kwargs):
""" checks if Steam server is running or not """
if not self.is_running():
self._find_pid(False)
if self.is_running():
try:
if self.is_query_enabled():
server_info = self.server.info()
self.logger.success(f"{self.server_name} is running")
self.logger.info(
f"server name: {server_info['server_name']}"
)
self.logger.info(f"map: {server_info['map']}")
self.logger.info(f"game: {server_info['game']}")
self.logger.info(
f"players: {server_info['player_count']}/"
f"{server_info['max_players']} "
f"({server_info['bot_count']} bots)"
)
self.logger.info(
f"server type: {server_info['server_type']}"
)
self.logger.info(
"password protected: "
f"{server_info['password_protected']}"
)
self.logger.info(f"VAC: {server_info['vac_enabled']}")
self.logger.info(f"version: {server_info['version']}")
else:
self.logger.success(f"{self.server_name} is running")
return STATUS_SUCCESS
except NoResponseError:
self.logger.error(
f"{self.server_name} is running but not accesible"
)
return STATUS_PARTIAL_FAIL
self.logger.warning(f"{self.server_name} is not running")
return STATUS_FAILED
@require("app_id")
@require("steamcmd_path")
@single_instance
@click.option(
"--allow-run", is_flag=True, help="Allow running instances",
)
@click.option(
"-f",
"--force",
is_flag=True,
help="Force a full validate of all mod files",
)
@click.option(
"-s",
"--stop",
is_flag=True,
help="Do a shutdown if instances are running",
)
@click.option(
"-r",
"--restart",
is_flag=True,
help="Do a restart if instances are running",
)
@click.command(cls=ServerCommandClass)
@click.pass_obj
def install(
self,
allow_run: bool,
force: bool,
stop: bool,
restart: bool,
app_id: Optional[int] = None,
*args,
**kwargs,
) -> int:
""" installs/validates/updates the gameserver """
app_id = app_id or self.config.app_id
if not force:
self.logger.info(f"checking for update for {app_id}...")
needs_update = self._check_steam_for_update(
str(self.config.app_id), "public"
)
if not needs_update:
self.logger.success(
f"{self.config.app_id} is already on latest version"
)
return STATUS_SUCCESS
was_running = False
if not allow_run:
was_running = self.is_running(check_all=True)
if was_running:
if not (restart or stop):
self.logger.warning(
f"at least once instance of {app_id} "
"is still running"
)
return STATUS_PARTIAL_FAIL
self._stop_servers(
was_running, reason="Updates found for game"
)
process = self.run_command(
(
f"{self.config.steamcmd_path} {self._steam_login()} "
f"+force_install_dir {self.config.server_path} +app_update "
f"{app_id} validate +quit"
),
redirect_output=True,
return_process=True,
)
self._wait_until_validated(app_id, process, force=force)
if process.returncode == 0:
self.logger.success("\nvalidated {}".format(app_id))
self._start_servers(restart, was_running)
return STATUS_SUCCESS
else:
self.logger.error(
"\nfailed to validate {}".format(self.server_name)
)
return STATUS_FAILED
@require("app_id")
@require("workshop_id")
@single_instance
@click.command(cls=ServerCommandClass)
@click.option(
"-w",
"--workshop-id",
type=int,
help="Workshop ID to use for downloading workshop items from",
)
@click.option(
"-i",
"--workshop-items",
type=int,
multiple=True,
help="List of comma seperated IDs for workshop items to download",
)
@click.option(
"--allow-run", is_flag=True, help="Allow running instances",
)
@click.option(
"-f",
"--force",
is_flag=True,
help="Force a full validate of all mod files",
)
@click.option(
"-s",
"--stop",
is_flag=True,
help="Do a shutdown if instances are running",
)
@click.option(
"-r",
"--restart",
is_flag=True,
help="Do a restart if instances are running",
)
@click.pass_obj
def workshop_download(
self,
allow_run: bool,
force: bool,
stop: bool,
restart: bool,
*args,
**kwargs,
) -> int:
""" downloads Steam workshop items """
was_running = False
if not force:
needs_update = self._check_steam_for_update(
str(self.config.workshop_id), "public"
)
if not needs_update:
self.logger.success(
f"{self.config.workshop_id} is already on latest version"
)
self._start_servers(restart, was_running)
return STATUS_SUCCESS
if not allow_run:
was_running = self.is_running(check_all=True)
if was_running:
if not (restart or stop):
self.logger.warning(
f"at least once instance of {self.config.app_id} "
"is still running"
)
return STATUS_PARTIAL_FAIL
self._stop_servers(
was_running, reason="Updates found for workshop app"
)
status = self.invoke(
self.install,
app_id=self.config.workshop_id,
allow_run=True,
force=force,
)
if not status == STATUS_SUCCESS:
return status
if len(self.config.workshop_items) == 0:
self.logger.warning("\nno workshop items selected for install")
return STATUS_PARTIAL_FAIL
mods_to_update = []
manifest_file = get_server_path(
[
"steamapps",
"workshop",
f"appworkshop_{self.config.workshop_id}.acf",
],
)
if not force and os.path.isfile(manifest_file):
manifest = None
with open(manifest_file, "r") as f:
manifest = acf.load(f)
self.logger.info("checking for updates for workshop items...")
with click.progressbar(self.config.workshop_items) as bar:
for workshop_item in bar:
workshop_item = str(workshop_item)
if (
workshop_item
not in manifest["AppWorkshop"][
"WorkshopItemsInstalled"
]
):
mods_to_update.append(workshop_item)
continue
last_update_time = int(
manifest["AppWorkshop"]["WorkshopItemsInstalled"][
workshop_item
]["timeupdated"]
)
try:
latest_metadata = self._get_published_file(
workshop_item
)
except requests.HTTPError:
self.logger.error(
"\ncould not query Steam for updates"
)
return STATUS_FAILED
newest_update_time = int(
latest_metadata["response"]["publishedfiledetails"][0][
"time_updated"
| |
<gh_stars>1-10
from __future__ import print_function
from __future__ import unicode_literals
from future.builtins import str
#coding=utf8
import sys
sys.path.insert(0, '..')
from par import SimpleVisitor
from par.md import MarkdownGrammar, MarkdownHtmlVisitor
template="""<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<link rel="stylesheet" type="text/css" href="bootstrap.min.css"/>
<link rel="stylesheet" type="text/css" href="example.css"/>
<title>%(title)s</title>
</head>
<body>
<div class="container">
%(body)s
</div>
</body>
</html>
"""
tag_class = {
'table':'',
}
text = u"""
Markdown: Syntax
================
* [Overview](#overview)
* [Philosophy](#philosophy)
* [Inline HTML](#html)
* [Automatic Escaping for Special Characters](#autoescape)
* [Block Elements](#block)
* [Paragraphs and Line Breaks](#p)
* [Headers](#header)
* [Blockquotes](#blockquote)
* [Lists](#list)
* [Code Blocks](#precode)
* [Horizontal Rules](#hr)
* [Span Elements](#span)
* [Links](#link)
* [Emphasis](#em)
* [Code](#code)
* [Images](#img)
* [Miscellaneous](#misc)
* [Backslash Escapes](#backslash)
* [Automatic Links](#autolink)
**Note:** This document is itself written using Markdown; you
can [see the source for it by adding '.text' to the URL][src].
[src]: /projects/markdown/syntax.text
* * *
<h2 id="overview">Overview</h2>
<h3 id="philosophy">Philosophy</h3>
Markdown is intended to be as easy-to-read and easy-to-write as is feasible.
Readability, however, is emphasized above all else. A Markdown-formatted
document should be publishable as-is, as plain text, without looking
like it's been marked up with tags or formatting instructions. While
Markdown's syntax has been influenced by several existing text-to-HTML
filters -- including [Setext] [1], [atx] [2], [Textile] [3], [reStructuredText] [4],
[Grutatext] [5], and [EtText] [6] -- the single biggest source of
inspiration for Markdown's syntax is the format of plain text email.
[1]: http://docutils.sourceforge.net/mirror/setext.html
[2]: http://www.aaronsw.com/2002/atx/
[3]: http://textism.com/tools/textile/
[4]: http://docutils.sourceforge.net/rst.html
[5]: http://www.triptico.com/software/grutatxt.html
[6]: http://ettext.taint.org/doc/
To this end, Markdown's syntax is comprised entirely of punctuation
characters, which punctuation characters have been carefully chosen so
as to look like what they mean. E.g., asterisks around a word actually
look like \*emphasis\*. Markdown lists look like, well, lists. Even
blockquotes look like quoted passages of text, assuming you've ever
used email.
<h3 id="html">Inline HTML</h3>
Markdown's syntax is intended for one purpose: to be used as a
format for *writing* for the web.
Markdown is not a replacement for HTML, or even close to it. Its
syntax is very small, corresponding only to a very small subset of
HTML tags. The idea is *not* to create a syntax that makes it easier
to insert HTML tags. In my opinion, HTML tags are already easy to
insert. The idea for Markdown is to make it easy to read, write, and
edit prose. HTML is a *publishing* format; Markdown is a *writing*
format. Thus, Markdown's formatting syntax only addresses issues that
can be conveyed in plain text.
For any markup that is not covered by Markdown's syntax, you simply
use HTML itself. There's no need to preface it or delimit it to
indicate that you're switching from Markdown to HTML; you just use
the tags.
The only restrictions are that block-level HTML elements -- e.g. `<div>`,
`<table>`, `<pre>`, `<p>`, etc. -- must be separated from surrounding
content by blank lines, and the start and end tags of the block should
not be indented with tabs or spaces. Markdown is smart enough not
to add extra (unwanted) `<p>` tags around HTML block-level tags.
For example, to add an HTML table to a Markdown article:
This is a regular paragraph.
<table>
<tr>
<td>Foo</td>
</tr>
</table>
This is another regular paragraph.
Note that Markdown formatting syntax is not processed within block-level
HTML tags. E.g., you can't use Markdown-style `*emphasis*` inside an
HTML block.
Span-level HTML tags -- e.g. `<span>`, `<cite>`, or `<del>` -- can be
used anywhere in a Markdown paragraph, list item, or header. If you
want, you can even use HTML tags instead of Markdown formatting; e.g. if
you'd prefer to use HTML `<a>` or `<img>` tags instead of Markdown's
link or image syntax, go right ahead.
Unlike block-level HTML tags, Markdown syntax *is* processed within
span-level tags.
<h3 id="autoescape">Automatic Escaping for Special Characters</h3>
In HTML, there are two characters that demand special treatment: `<`
and `&`. Left angle brackets are used to start tags; ampersands are
used to denote HTML entities. If you want to use them as literal
characters, you must escape them as entities, e.g. `<`, and
`&`.
Ampersands in particular are bedeviling for web writers. If you want to
write about 'AT&T', you need to write '`AT&T`'. You even need to
escape ampersands within URLs. Thus, if you want to link to:
http://images.google.com/images?num=30&q=larry+bird
you need to encode the URL as:
http://images.google.com/images?num=30&q=larry+bird
in your anchor tag `href` attribute. Needless to say, this is easy to
forget, and is probably the single most common source of HTML validation
errors in otherwise well-marked-up web sites.
Markdown allows you to use these characters naturally, taking care of
all the necessary escaping for you. If you use an ampersand as part of
an HTML entity, it remains unchanged; otherwise it will be translated
into `&`.
So, if you want to include a copyright symbol in your article, you can write:
©
and Markdown will leave it alone. But if you write:
AT&T
Markdown will translate it to:
AT&T
Similarly, because Markdown supports [inline HTML](#html), if you use
angle brackets as delimiters for HTML tags, Markdown will treat them as
such. But if you write:
4 < 5
Markdown will translate it to:
4 < 5
However, inside Markdown code spans and blocks, angle brackets and
ampersands are *always* encoded automatically. This makes it easy to use
Markdown to write about HTML code. (As opposed to raw HTML, which is a
terrible format for writing about HTML syntax, because every single `<`
and `&` in your example code needs to be escaped.)
* * *
<h2 id="block">Block Elements</h2>
<h3 id="p">Paragraphs and Line Breaks</h3>
A paragraph is simply one or more consecutive lines of text, separated
by one or more blank lines. (A blank line is any line that looks like a
blank line -- a line containing nothing but spaces or tabs is considered
blank.) Normal paragraphs should not be indented with spaces or tabs.
The implication of the "one or more consecutive lines of text" rule is
that Markdown supports "hard-wrapped" text paragraphs. This differs
significantly from most other text-to-HTML formatters (including Movable
Type's "Convert Line Breaks" option) which translate every line break
character in a paragraph into a `<br />` tag.
When you *do* want to insert a `<br />` break tag using Markdown, you
end a line with two or more spaces, then type return.
Yes, this takes a tad more effort to create a `<br />`, but a simplistic
"every line break is a `<br />`" rule wouldn't work for Markdown.
Markdown's email-style [blockquoting][bq] and multi-paragraph [list items][l]
work best -- and look better -- when you format them with hard breaks.
[bq]: #blockquote
[l]: #list
<h3 id="header">Headers</h3>
Markdown supports two styles of headers, [Setext] [1] and [atx] [2].
Setext-style headers are "underlined" using equal signs (for first-level
headers) and dashes (for second-level headers). For example:
This is an H1
=============
This is an H2
-------------
Any number of underlining `=`'s or `-`'s will work.
Atx-style headers use 1-6 hash characters at the start of the line,
corresponding to header levels 1-6. For example:
# This is an H1
## This is an H2
###### This is an H6
Optionally, you may "close" atx-style headers. This is purely
cosmetic -- you can use this if you think it looks better. The
closing hashes don't even need to match the number of hashes
used to open the header. (The number of opening hashes
determines the header level.) :
# This is an H1 #
## This is an H2 ##
### This is an H3 ######
<h3 id="blockquote">Blockquotes</h3>
Markdown uses email-style `>` characters for blockquoting. If you're
familiar with quoting passages of text in an email message, then you
know how to create a blockquote in Markdown. It looks best if you hard
wrap the text and put a `>` before every line:
> This is a blockquote with two paragraphs. Lorem ipsum dolor sit amet,
> consectetuer adipiscing elit. Aliquam hendrerit mi posuere lectus.
> Vestibulum enim wisi, viverra nec, fringilla in, laoreet vitae, risus.
>
> Donec sit amet nisl. Aliquam semper ipsum sit amet velit. Suspendisse
> id sem consectetuer libero luctus adipiscing.
Markdown allows you to be lazy and only put the `>` before the first
line of a hard-wrapped paragraph:
> This is a blockquote with two paragraphs. Lorem ipsum dolor sit amet,
consectetuer adipiscing elit. Aliquam hendrerit mi posuere lectus.
Vestibulum enim wisi, viverra nec, fringilla in, laoreet vitae, risus.
> Donec sit amet nisl. Aliquam semper ipsum sit amet velit. Suspendisse
id sem consectetuer libero luctus adipiscing.
Blockquotes can be nested (i.e. a blockquote-in-a-blockquote) by
adding additional levels of `>`:
| |
<reponame>ojhall94/halletal2019<gh_stars>1-10
# !/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib
sns.set_palette('colorblind')
matplotlib.rc('xtick', labelsize=15)
matplotlib.rc('ytick', labelsize=15)
matplotlib.rc('axes',labelsize=15)
import pandas as pd
import pystan
import corner
import pickle
import glob
import argparse
parser = argparse.ArgumentParser(description='Run our PyStan model on some data')
parser.add_argument('type', type=str, choices=['astero', 'gaia'], help='Choice of PyStan model.')
parser.add_argument('iters', type=int, help='Number of MCMC iterations in PyStan.')
parser.add_argument('corrections', type=str, choices=['None', 'RC'], help='Choice of corrections to the seismic scaling relations.')
parser.add_argument('band', type=str, choices=['K','J','H','GAIA'], help='Choice of photometric passband.')
parser.add_argument('tempdiff', type=float, help='Perturbation to the temperature values in K')
# parser.add_argument('bclabel', type=str, choices=['nn','lt','nt'], help='Temp arg: nn: no prop; lt prop logg and teff; nt prop t only.')
parser.add_argument('-t', '--testing', action='store_const', const=True, default=False, help='Turn on to output results to a test_build folder')
parser.add_argument('-u','--update', action='store_const', const=True, default=False, help='Turn on to update the PyStan model you choose to run')
parser.add_argument('-a','--apokasc', action='store_const', const=True, default=False, help='Turn on to run on the APOKASC subsample')
parser.add_argument('-af', '--apofull', action='store_const', const=True, default=False, help='Turn on to propagate full APOKASC data')
parser.add_argument('-v', '--visual', action='store_const', const=True, default=False, help='Turn on to include cornerplots')
args = parser.parse_args()
import os
import sys
sys.path.append(os.path.expanduser('~')+'/PhD/Hacks_and_Mocks/asfgrid/')
import asfgrid
from omnitool.literature_values import Av_coeffs, hawkvals
from omnitool import scalings
from omnitool.literature_values import Rsol
# __outdir__ = os.path.expanduser('~')+'/Projects/Oli/Output/'
# __datdir__ = os.path.expanduser('~')+'/Projects/Oli/Data/'
__outdir__ = os.path.expanduser('~')+'/PhD/Gaia_Project/Output/'
__datdir__ = os.path.expanduser('~')+'/PhD/Gaia_Project/data/KepxDR2/'
__iter__ = args.iters
def create_astrostan(overwrite=True):
astrostan = '''
functions {
real bailerjones_lpdf(real r, real L){
return log((1/(2*L^3)) * (r*r) * exp(-r/L));
}
real precalc_multinormal_lpdf(vector oo, vector oo_true, real logdetc, matrix invc, int N, real Nfloat){
vector[N] r;
r = oo - oo_true;
return -0.5 * ((r' * invc * r) + logdetc + Nfloat * log(2*pi()));
}
}
data {
int<lower = 0> N;
real<lower= 0> Nfloat;
vector[N] m;
vector<lower=0>[N] m_err;
vector[N] oo;
vector<lower=0>[N] RlEbv;
matrix[N, N] invc;
real logdetc;
real mu_init;
real mu_spread;
real sig_init;
real sig_spread;
}
parameters {
//Hyperparameters
real mu;
real<lower=0.> sigma;
real<lower=1.> sigo;
real<lower=0.5,upper=1.> Q;
real<lower=.1, upper=4000.> L;
real oo_zp;
//Latent parameters
vector[N] M_infd_std;
vector[N] Ai;
vector<lower = 1.>[N] r_infd;
}
transformed parameters{
//Inferred and transformed parameters
vector[N] M_infd;
//Operations
for (n in 1:N){
M_infd[n] = mu + sigma * M_infd_std[n]; //Rescale the M fit
}
}
model {
//Define calculable properties
vector[N] m_true;
vector[N] oo_true;
//Hyperparameters [p(theta_rc, L)]
mu ~ normal(mu_init, mu_spread); // Prior from seismo
sigma ~ normal(sig_init, sig_spread);
Q ~ normal(1., .25);
sigo ~ normal(3.0, 1.0);
L ~ uniform(0.1, 4000.); // Prior on the length scale
oo_zp ~ normal(0.0, 500.); // Prior on the offset (in mu as)
//Latent parameters [p(alpha_i | theta_rc, L)]
Ai ~ normal(RlEbv, 0.05);
for (n in 1:N){
r_infd[n] ~ bailerjones(L);
target += log_mix(Q,
normal_lpdf(M_infd_std[n] | 0., 1.),
normal_lpdf(M_infd_std[n] | 0., sigo));
}
//Calculable properties
for (n in 1:N){
m_true[n] = M_infd[n] + 5*log10(r_infd[n]) - 5 + Ai[n];
oo_true[n] = (1000./r_infd[n]) + (oo_zp/1000.);
}
//Observables [p(D | theta_rc, L, alpha)]
oo ~ precalc_multinormal(oo_true, logdetc, invc, N, Nfloat);
m ~ normal(m_true, m_err); //Measurement uncertainty on magnitude
}
'''
model_path = 'astrostan.pkl'
if overwrite:
print('Updating Stan model')
sm = pystan.StanModel(model_code = astrostan, model_name='astrostan')
with open(model_path, 'wb') as f:
pickle.dump(sm, f)
if not os.path.isfile(model_path):
print('Saving Stan Model')
sm = pystan.StanModel(model_code = astrostan, model_name='astrostan')
with open(model_path, 'wb') as f:
pickle.dump(sm, f)
def create_asterostan(overwrite=True):
asterostan = '''
data {
int<lower = 0> N;
vector[N] Mobs;
vector[N] Munc;
real muH;
}
parameters {
//Hyperparameters
real mu;
real <lower=0.> sigma;
real <lower=0.5,upper=1.> Q;
real <lower=1.> sigo;
//Latent Parameters
vector[N] Mtrue_std;
}
transformed parameters{
vector[N] Mtrue;
for (n in 1:N){
Mtrue[n] = mu + sigma * Mtrue_std[n];
}
}
model {
mu ~ normal(muH, 1.0); //p(theta)
sigma ~ normal(0.0, 1.0); //''
sigo ~ normal(3.0, 2.0); //''
Q ~ normal(1., 0.1); //''
Mobs ~ normal(Mtrue, Munc); //p(D | theta, alpha)
//p(alpha | theta)
for (n in 1:N)
target += log_mix(Q,
normal_lpdf(Mtrue_std[n] | 0., 1.),
normal_lpdf(Mtrue_std[n] | 0., sigo));
}
'''
model_path = 'asterostan.pkl'
if overwrite:
print('Updating Stan model')
sm = pystan.StanModel(model_code = asterostan, model_name='astrostan')
pkl_file = open(model_path, 'wb')
pickle.dump(sm, pkl_file)
pkl_file.close()
if not os.path.isfile(model_path):
print('Saving Stan Model')
sm = pystan.StanModel(model_code = asterostan, model_name='astrostan')
pkl_file = open(model_path, 'wb')
pickle.dump(sm, pkl_file)
pkl_file.close()
def update_stan(model='gaia'):
if model == 'gaia':
create_astrostan(overwrite=True)
if model == 'astero':
create_asterostan(overwrite=True)
if model == 'both':
create_astrostan(overwrite=True)
create_asterostan(overwrite=True)
class run_stan:
def __init__(self, _dat, _init=0., _majorlabel='', _minorlabel='', _stantype='astero'):
'''Core PyStan class.
Input __init__:
_dat (dict): Dictionary of the data in pystan format.
_init (dict): Dictionary of initial guesses in pystan format.
_majorlabel (str): Name of the run set. This will be the name of the local
directory the results are stored in.
_minorlabel (str): Name of the individual run (i.e. a numeric value). This
will be included in the title of all output files.
_stantype (str): Stanmodel to be used, either 'astero' or 'gaia'.
Input __call__:
verbose (bool): If True: saves chains, median and standard deviations on
parameter posteriors, and the rhat values (as well as plot of rhats)
visual (bool): If True: saves cornerplot and the pystan chain plot.
'''
self.dat = _dat
self.init = _init
self.data = _stantype #Either astero or gaia
self.runlabel = __outdir__+_majorlabel+'/'+_stantype+'_'+_minorlabel
#Check folder exists, if not, overwrite
if not os.path.exists(__outdir__+_majorlabel):
os.makedirs(__outdir__+_majorlabel)
def build_metadata(self):
'''Builds label metadata for the run'''
if self.data == 'astero':
self.pars = ['mu', 'sigma', 'Q', 'sigo']
self.verbose = [r'$\mu_{RC} (mag)$',r'$\sigma_{RC} (mag)$',r'$Q$', r'$\sigma_o (mag)$']
if self.data =='gaia':
self.pars = ['mu', 'sigma', 'Q', 'sigo', 'L', 'oo_zp']
self.verbose = [r'$\mu_{RC} (mag)$',r'$\sigma_{RC} (mag)$',r'$Q$', r'$\sigma_o (mag)$', r'$L (pc)$', r'$\varpi_{zp} (\mu as)$']
def read_stan(self):
'''Reads the existing stanmodels'''
if self.data == 'astero':
model_path = 'asterostan.pkl'
if os.path.isfile(model_path):
sm = pickle.load(open(model_path, 'rb'))
else:
print('No stan model found')
create_asterostan(overwrite=True)
sys.exit()
if self.data == 'gaia':
model_path = 'astrostan.pkl'
if os.path.isfile(model_path):
sm = pickle.load(open(model_path, 'rb'))
else:
print('No stan model found')
create_astrostan(overwrite=True)
sys.exit()
return sm
def run_stan(self):
'''Runs PyStan'''
sm = self.read_stan()
if self.init != 0.:
fit = sm.sampling(data = self.dat,
iter= __iter__, chains=4, seed=24601,
init = [self.init, self.init, self.init, self.init])
else:
fit = sm.sampling(data = self.dat, seed=24601,
iter= __iter__, chains=4)
return fit
def out_corner(self, fit):
chain = np.array([fit[label] for label in self.pars])
corner.corner(chain.T,labels=self.verbose,\
quantiles=[0.16, 0.5, 0.84],\
show_titles=True, title_kwargs={"fontsize": 12})
plt.savefig(self.runlabel+'_corner.png')
plt.close('all')
def out_stanplot(self, fit):
fit.plot()
plt.savefig(self.runlabel+'_stanplot.png')
plt.close('all')
def run_output(self, fit):
#Save the chains
chain = np.array([fit[label] for label in self.pars])
np.savetxt(self.runlabel+'_chains.txt',chain)
#Save the full fit extract
outlabel = self.runlabel+'_fullchain_dict.pkl'
output = open(outlabel, 'wb')
pickle.dump(fit.extract(), output)
output.close()
#Save the parameters
pardict = {label:np.median(fit[label]) for label in self.pars}
pardict.update({label+'_std':np.std(fit[label]) for label in self.pars})
pardict = pd.DataFrame.from_dict(pardict,orient='index').T
pardict.to_csv(self.runlabel+'_pars.csv')
#Save the Rhat values
s = fit.summary()
rhat = s['summary'][:,-1]
np.savetxt(self.runlabel+'_rhats.txt', rhat)
def __call__(self, verbose=True, visual=True):
self.build_metadata()
fit = self.run_stan()
if visual:
self.out_corner(fit)
# self.out_stanplot(fit)
if verbose:
self.run_output(fit)
print('Run to + '+self.runlabel+' complete!')
def read_data():
'''Reads in the Yu et al. 2018 data'''
if args.type == 'gaia':
sfile = __datdir__+'rcxyu18.csv'
else:
if args.apokasc:
sfile = __datdir__+'rcxyuxapokasc2.csv'
else:
sfile = __datdir__+'rcxyu18.csv'
df = pd.read_csv(sfile)
return df
def read_paramdict(majorlabel, minorlabel='', sort='astero'):
'''Reads in results for either:
-A full run series (majorlabel) where the minorlabel is included as a
column in the output.
-A single run (majorlabel and minorlabel).
Returns a pandas dataframe.
'''
loc = __outdir__+majorlabel+'/'
if minorlabel != '':
globlist = glob.glob(loc+sort+'_'+str(float(minorlabel))+'_*pars*.csv')
else:
globlist = glob.glob(loc+sort+'*_*pars*.csv')
minorlabels = [os.path.basename(globloc).split('_')[1] for globloc in globlist]
df = pd.DataFrame()
for n, globloc in enumerate(globlist):
sdf = pd.read_csv(globloc, index_col = 0)
if minorlabels[n] != 'pars.csv':
sdf[majorlabel] = minorlabels[n]
df = df.append(sdf)
return df.sort_values(by=majorlabel)
def read_astero_output(majorlabel, minorlabel, sort):
loc = __outdir__+majorlabel+'/'+sort+'_'+str(float(minorlabel))+'_fullchain_dict.pkl'
pkl_file = open(loc, 'rb')
fit = pickle.load(pkl_file)
pkl_file.close()
M_infd = np.median(fit['Mtrue'],axis=0)
M_infd_std = np.median(fit['Mtrue_std'], axis=0)
return M_infd, M_infd_std
def get_basic_init(type='gaia'):
'''Returns a basic series of initial guesses in PyStan format.'''
init = {'mu':-1.7,
'sigma':0.1,
'Q':0.95,
'sigo':4.}
if type == 'gaia':
init['L'] = 1000
return init
def get_fdnu(df):
asf = asfgrid.Seism()
evstate = np.ones(len(df))*2
logz = np.log10(df.Z.values)
teff = df.Teff.values + args.tempdiff
dnu = df.dnu.values
numax = df.numax.values
mass, radius = asf.get_mass_radius(evstate, logz, teff, dnu, numax)
logg = asf.mr2logg(mass, radius)
fdnu = asf._get_fdnu(evstate, logz, teff, mass, logg, fill_value='nearest')
return fdnu
def kernel(ra, dec, sigma, p):
'''
p[0] : Offset
p[1] : Exponential decay scale
'''
dr = np.deg2rad(dec)
thetaij = np.sqrt((np.subtract.outer(ra, ra)*np.cos(0.5*np.add.outer(dr, dr)))**2 + np.subtract.outer(dec, dec)**2)
cov = p[0] * np.exp(-thetaij / p[1])
np.fill_diagonal(cov, np.diag(cov) + sigma**2)
if not np.all(np.linalg.eigvals(cov) > 0):
raise ValueError("The matrix isn't positive-definite | |
<reponame>bilgelm/NiMARE<gh_stars>0
"""
Topic modeling with generalized correspondence latent Dirichlet allocation.
"""
import logging
import os.path as op
import numpy as np
import pandas as pd
import nibabel as nib
from scipy.stats import multivariate_normal
from ...base import AnnotationModel
from ...due import due, Doi
from ...utils import get_template
LGR = logging.getLogger(__name__)
@due.dcite(Doi('10.1371/journal.pcbi.1005649'),
description='Introduces GC-LDA decoding.')
class GCLDAModel(AnnotationModel):
"""
Generate a GCLDA topic model.
Parameters
----------
count_df : :obj:`pandas.DataFrame`
A DataFrame with feature counts for the model. The index is 'id',
used for identifying studies. Other columns are features (e.g.,
unigrams and bigrams from Neurosynth), where each value is the number
of times the feature is found in a given article.
coordinates_df : :obj:`pandas.DataFrame`
A DataFrame with a list of foci in the dataset. The index is 'id',
used for identifying studies. Additional columns include 'i', 'j' and
'k' (the matrix indices of the foci in standard space).
n_topics : :obj:`int`, optional
Number of topics to generate in model. The default is 100.
n_regions : :obj:`int`, optional
Number of subregions per topic (>=1). The default is 2.
alpha : :obj:`float`, optional
Prior count on topics for each document. The default is 0.1.
beta : :obj:`float`, optional
Prior count on word-types for each topic. The default is 0.01.
gamma : :obj:`float`, optional
Prior count added to y-counts when sampling z assignments. The
default is 0.01.
delta : :obj:`float`, optional
Prior count on subregions for each topic. The default is 1.0.
dobs : :obj:`int`, optional
Spatial region 'default observations' (# observations weighting
Sigma estimates in direction of default 'roi_size' value). The
default is 25.
roi_size : :obj:`float`, optional
Default spatial 'region of interest' size (default value of
diagonals in covariance matrix for spatial distribution, which the
distributions are biased towards). The default is 50.0.
symmetric : :obj:`bool`, optional
Whether or not to use symmetry constraint on subregions. Symmetry
requires n_regions = 2. The default is False.
seed_init : :obj:`int`, optional
Initial value of random seed. The default is 1.
name : :obj:`str`, optional
Name of model.
"""
def __init__(self, count_df, coordinates_df, mask='Mni152_2mm',
n_topics=100, n_regions=2, symmetric=True, alpha=.1,
beta=.01, gamma=.01, delta=1.0, dobs=25, roi_size=50.0,
seed_init=1, name='gclda'):
LGR.info('Constructing/Initializing GCLDA Model')
# --- Checking to make sure parameters are valid
if (symmetric is True) and (n_regions != 2):
# symmetric model only valid if R = 2
raise ValueError('Cannot run a symmetric model unless #Subregions '
'(n_regions) == 2 !')
# Initialize sampling parameters
self.iter = 0 # Tracks the global sampling iteration of the model
self.seed = 0 # Tracks current random seed to use (gets incremented
# after initialization and each sampling update)
# Set up model hyperparameters
# Pseudo-count hyperparams need to be floats so that when sampling
# distributions are computed the count matrices/vectors are converted
# to floats
self.params = {
'n_topics': n_topics, # Number of topics (T)
'n_regions': n_regions, # Number of subregions (R)
'alpha': alpha, # Prior count on topics for each doc
'beta': beta, # Prior count on word-types for each topic
'gamma': gamma, # Prior count added to y-counts when sampling z assignments
'delta': delta, # Prior count on subregions for each topic
'roi_size': roi_size, # Default ROI (default covariance spatial
# region we regularize towards) (not in paper)
'dobs': dobs, # Sample constant (# observations weighting
# sigma in direction of default covariance)
# (not in paper)
'symmetric': symmetric, # Use constrained symmetry on subregions?
# (only for n_regions = 2)
'seed_init': seed_init, # Random seed for initializing model
}
self.model_name = ('{0}_{1}T_{2}R_alpha{3:.3f}_beta{4:.3f}_'
'gamma{5:.3f}_delta{6:.3f}_{7}dobs_{8:.1f}roi_{9}'
'symmetric_{10}').format(
name, self.params['n_topics'], self.params['n_regions'],
self.params['alpha'], self.params['beta'],
self.params['gamma'], self.params['delta'],
self.params['dobs'], self.params['roi_size'],
self.params['symmetric'], self.params['seed_init'])
# Prepare data
if isinstance(mask, str) and not op.isfile(mask):
self.mask = get_template(mask, mask='brain')
elif isinstance(mask, str) and op.isfile(mask):
self.mask = nib.load(mask)
elif isinstance(mask, nib.Nifti1Image):
self.mask = mask
else:
raise Exception('Input "mask" could not be figured out.')
# Import all word-labels into a list
# List of word-strings (wtoken_word_idx values are indices into this list)
self.vocabulary = count_df.columns.tolist()
# Extract document and word indices from count_df
count_df.index = count_df.index.astype(str)
ids = count_df.index.tolist()
docidx_mapper = {id_: i for (i, id_) in enumerate(ids)}
self.ids = ids
# Create docidx column
count_df['id'] = count_df.index
count_df['docidx'] = count_df['id'].map(docidx_mapper)
count_df = count_df.dropna(subset=['docidx'])
count_df = count_df.drop('id', 1)
# Remove words not found anywhere in the corpus
count_df = count_df.loc[:, (count_df != 0).any(axis=0)]
# Get updated vocabulary
word_labels = count_df.columns.tolist()
word_labels.remove('docidx')
self.word_labels = word_labels
widx_mapper = {word: i for (i, word) in enumerate(self.word_labels)}
# Melt dataframe and create widx column
widx_df = pd.melt(count_df, id_vars=['docidx'], var_name='word',
value_name='count')
widx_df['widx'] = widx_df['word'].map(widx_mapper)
# Replicate rows based on count
widx_df = widx_df.loc[np.repeat(widx_df.index.values, widx_df['count'])]
widx_df = widx_df[['docidx', 'widx']].astype(int)
widx_df.sort_values(by=['docidx', 'widx'], inplace=True)
# List of document-indices for word-tokens
self.wtoken_doc_idx = widx_df['docidx'].tolist()
# List of word-indices for word-tokens
self.wtoken_word_idx = widx_df['widx'].tolist()
# Import all peak-indices into lists
if 'id' not in coordinates_df.columns:
coordinates_df['id'] = coordinates_df.index
coordinates_df['docidx'] = coordinates_df['id'].astype(str).map(docidx_mapper)
coordinates_df = coordinates_df.dropna(subset=['docidx'])
coordinates_df = coordinates_df[['docidx', 'x', 'y', 'z']]
coordinates_df['docidx'] = coordinates_df['docidx'].astype(int)
# List of document-indices for peak-tokens x
self.ptoken_doc_idx = coordinates_df['docidx'].tolist()
self.peak_vals = coordinates_df[['x', 'y', 'z']].values
# Seed random number generator
np.random.seed(self.params['seed_init']) # pylint: disable=no-member
# Preallocate vectors of assignment indices
self.wtoken_topic_idx = np.zeros(
len(self.wtoken_word_idx), dtype=int) # word->topic assignments
# Randomly initialize peak->topic assignments (y) ~ unif(1...n_topics)
self.peak_topic_idx = np.random.randint(
self.params['n_topics'], # pylint: disable=no-member
size=(len(self.ptoken_doc_idx)))
self.peak_region_idx = np.zeros(
len(self.ptoken_doc_idx), dtype=int) # peak->region assignments
# Preallocate count matrices
# Peaks: D x T: Number of peak-tokens assigned to each topic per document
self.n_peak_tokens_doc_by_topic = np.zeros(
(len(self.ids), self.params['n_topics']), dtype=int)
# Peaks: R x T: Number of peak-tokens assigned to each subregion per topic
self.n_peak_tokens_region_by_topic = np.zeros(
(self.params['n_regions'], self.params['n_topics']), dtype=int)
# Words: W x T: Number of word-tokens assigned to each topic per word-type
self.n_word_tokens_word_by_topic = np.zeros(
(len(self.word_labels), self.params['n_topics']), dtype=int)
# Words: D x T: Number of word-tokens assigned to each topic per document
self.n_word_tokens_doc_by_topic = np.zeros(
(len(self.ids), self.params['n_topics']), dtype=int)
# Words: 1 x T: Total number of word-tokens assigned to each topic (across all docs)
self.total_n_word_tokens_by_topic = np.zeros(
(1, self.params['n_topics']), dtype=int)
# Preallocate Gaussians for all subregions
# Regions_Mu & Regions_Sigma: Gaussian mean and covariance for all
# subregions of all topics
# Formed using lists (over topics) of lists (over subregions) of numpy
# arrays
# regions_mu = (n_topics, n_regions, 1, n_peak_dims)
# regions_sigma = (n_topics, n_regions, n_peak_dims, n_peak_dims)
self.regions_mu = []
self.regions_sigma = []
for i_topic in range(self.params['n_topics']):
topic_mu = []
topic_sigma = []
for j_region in range(self.params['n_regions']):
topic_mu.append(np.zeros((1, self.peak_vals.shape[1])))
topic_sigma.append(np.zeros(
(self.peak_vals.shape[1], self.peak_vals.shape[1])))
self.regions_mu.append(topic_mu) # (\mu^{(t)}_r)
self.regions_sigma.append(topic_sigma) # (\sigma^{(t)}_r)
# Initialize lists for tracking log-likelihood of data over sampling iterations
self.loglikely_iter = [] # Tracks iteration we compute each loglikelihood at
self.loglikely_x = [] # Tracks log-likelihood of peak tokens
self.loglikely_w = [] # Tracks log-likelihood of word tokens
self.loglikely_tot = [] # Tracks log-likelihood of peak + word tokens
# Initialize peak->subregion assignments (r)
if not self.params['symmetric']:
# if symmetric model use deterministic assignment :
# if peak_val[0] > 0, r = 1, else r = 0
self.peak_region_idx[:] = np.random.randint(
self.params['n_regions'], # pylint: disable=no-member
size=(len(self.ptoken_doc_idx)))
else:
# if asymmetric model, randomly sample r ~ unif(1...n_regions)
self.peak_region_idx[:] = (self.peak_vals[:, 0] > 0).astype(int)
# Update model vectors and count matrices to reflect y and r assignments
for i_ptoken in range(len(self.ptoken_doc_idx)):
# document -idx (d)
doc = self.ptoken_doc_idx[i_ptoken]
topic = self.peak_topic_idx[i_ptoken] # peak-token -> topic assignment (y_i)
region = self.peak_region_idx[i_ptoken] # peak-token -> subregion assignment (c_i)
self.n_peak_tokens_doc_by_topic[doc, topic] += 1 # Increment document-by-topic counts
self.n_peak_tokens_region_by_topic[region, topic] += 1 # Increment region-by-topic
# Randomly Initialize Word->Topic Assignments (z) for each word
# token w_i: sample z_i proportional to p(topic|doc_i)
for i_wtoken in range(len(self.wtoken_word_idx)):
# w_i word-type
word = self.wtoken_word_idx[i_wtoken]
# w_i doc-index
doc = self.wtoken_doc_idx[i_wtoken]
# Estimate p(t|d) for current doc
p_topic_g_doc = self.n_peak_tokens_doc_by_topic[doc] + self.params['gamma']
# Sample a topic from p(t|d) for the z-assignment
probs = np.cumsum(p_topic_g_doc) # Compute a cdf of the sampling
# distribution for z
# Which elements of cdf are less than random sample?
sample_locs = probs < np.random.rand() * probs[-1] # pylint: | |
import csv
import logging
import os
from dataset_manager.enums import FeatureType, FeatureFunctionType
logger = logging.getLogger(__name__)
class OpensmileExtractor():
CONFIG_ENERGY = os.path.join(os.path.dirname(__file__), "configurations/energy.conf")
CONFIG_EMOTION_FEATURES = os.path.join(os.path.dirname(__file__), "configurations/emotion-features.conf")
CONFIG_MFCC = os.path.join(os.path.dirname(__file__), "configurations/mfcc.conf")
CONFIG_PITCH = os.path.join(os.path.dirname(__file__), "configurations/pitch.conf")
CONFIG_LSP = os.path.join(os.path.dirname(__file__), "configurations/lsp.conf")
CONFIG_INTENSITY = os.path.join(os.path.dirname(__file__), "configurations/intensity.conf")
CONFIG_MZCR = os.path.join(os.path.dirname(__file__), "configurations/mzcr.conf")
CONFIG_SPECTRAL = os.path.join(os.path.dirname(__file__), "configurations/spectral.conf")
configurations = {
FeatureType.ENERGY: CONFIG_ENERGY,
FeatureType.MFCC_1: CONFIG_MFCC,
FeatureType.MFCC_2: CONFIG_MFCC,
FeatureType.MFCC_3: CONFIG_MFCC,
FeatureType.MFCC_4: CONFIG_MFCC,
FeatureType.MFCC_5: CONFIG_MFCC,
FeatureType.MFCC_6: CONFIG_MFCC,
FeatureType.MFCC_7: CONFIG_MFCC,
FeatureType.MFCC_8: CONFIG_MFCC,
FeatureType.MFCC_9: CONFIG_MFCC,
FeatureType.MFCC_10: CONFIG_MFCC,
FeatureType.MFCC_11: CONFIG_MFCC,
FeatureType.MFCC_12: CONFIG_MFCC,
FeatureType.PITCH_VOICE_PROB: CONFIG_PITCH,
FeatureType.PITCH_F0: CONFIG_PITCH,
FeatureType.LSP_1: CONFIG_LSP,
FeatureType.LSP_2: CONFIG_LSP,
FeatureType.LSP_3: CONFIG_LSP,
FeatureType.LSP_4: CONFIG_LSP,
FeatureType.LSP_5: CONFIG_LSP,
FeatureType.LSP_6: CONFIG_LSP,
FeatureType.LSP_7: CONFIG_LSP,
FeatureType.INTENSITY: CONFIG_INTENSITY,
FeatureType.LOUDNESS: CONFIG_INTENSITY,
FeatureType.MZCR: CONFIG_MZCR,
FeatureType.SPECTRAL_1: CONFIG_SPECTRAL,
FeatureType.SPECTRAL_2: CONFIG_SPECTRAL,
FeatureType.SPECTRAL_3: CONFIG_SPECTRAL,
FeatureType.SPECTRAL_4: CONFIG_SPECTRAL,
FeatureType.SPECTRAL_5: CONFIG_SPECTRAL,
FeatureType.SPECTRAL_ROLLOFF_1: CONFIG_SPECTRAL,
FeatureType.SPECTRAL_ROLLOFF_2: CONFIG_SPECTRAL,
FeatureType.SPECTRAL_ROLLOFF_3: CONFIG_SPECTRAL,
FeatureType.SPECTRAL_ROLLOFF_4: CONFIG_SPECTRAL,
FeatureType.SPECTRAL_FLUX: CONFIG_SPECTRAL,
FeatureType.SPECTRAL_CENTROID: CONFIG_SPECTRAL,
FeatureType.SPECTRAL_MAX_POS: CONFIG_SPECTRAL,
FeatureType.SPECTRAL_MIN_POS: CONFIG_SPECTRAL
}
def _extract(self, config, params=""):
"""
Opensmile extraction by calling the command-line extraction command.
"""
logger.info("OpenSmile extraction with config: %s", config)
smile_cmd = os.popen("which SMILExtract").read().replace('\n', '')
cmd = '{} -C "{}" {}'.format(smile_cmd, config, params)
os.system(cmd)
# def compute_energy(self, video):
# """
# Energy extraction for a given video. Returns a list containing the energy rms for each frame.
#
# :param video:
# :return:
# """
#
# logger.info("Computing sound energy")
#
# audio_path = video.audio_path
# output_file = os.path.join(video.dataset.audio_folder_path, "{}.energy.csv".format(video.name))
#
# # Get video frame-rate, and compute framesize (in seconds)
# fps = video.video_part.fps
# framesize = 1 / fps
#
# # Prepare params (input file, output file, framesize)
# params = '-I "{}" -O "{}" -F {}'.format(audio_path, output_file, framesize)
#
# # Do extraction
# self._extract(self.CONFIG_ENERGY, params)
#
# energy = list()
#
# # Read the csv and add them to the multimedia element
# with open(output_file, 'rb') as csvfile:
# csv_reader = csv.DictReader(csvfile, delimiter=';')
# for i, row in enumerate(csv_reader):
# energy.append([i, float(row['pcm_RMSenergy'])])
#
# # Cleanup
# os.remove(output_file)
#
# return energy
def compute(self, features):
video = features.multimedia_part.video
audio_path = video.audio_path
output_file = os.path.join(video.dataset.audio_path, "{}.output.csv".format(video.name))
# Get video frame-rate, and compute framesize (in seconds)
fps = video.video_part.fps
framesize = 1 / fps
# Do first time only with mean for each frames, used for arousal
# Prepare params (input file, output file, framesize)
params = '-I "{}" -O "{}" -F {}'.format(audio_path, output_file, framesize)
# Do extraction
self._extract(self.configurations[features.type], params)
result = dict()
function = FeatureFunctionType.VALUE
# Read the csv and add them to the multimedia element
crt_nb_frame = 0
with open(output_file, 'rb') as csvfile:
csv_reader = csv.DictReader(csvfile, delimiter=';')
for i, row in enumerate(csv_reader):
crt_nb_frame += 1
if not result.has_key(function):
result[function] = list()
function_field = FeatureFunctionType.fields[function]
if FeatureType.fields.has_key(features.type):
field = FeatureType.fields[features.type] + "_" + function_field
else:
field = function_field
result[function].append([i, float(row[field])])
os.remove(output_file)
# Add 0 values to the end as OpenSmile does not process the last second!
for i in range(crt_nb_frame, video.nb_frames):
result[function].append([i, 0.])
# Prepare params (input file, output file, framesize)
params = '-I "{}" -O "{}" -F {}'.format(audio_path, output_file, framesize)
# Do extraction
self._extract(self.configurations[features.type] + '.1s', params)
# Read the csv and add them to the multimedia element
crt_nb_frame = 0
with open(output_file, 'rb') as csvfile:
csv_reader = csv.DictReader(csvfile, delimiter=';')
for i, row in enumerate(csv_reader):
crt_nb_frame += 1
frame_idx = int(i*fps)
for function in FeatureType.functions[features.type]:
if not result.has_key(function):
result[function] = list()
function_field = FeatureFunctionType.fields[function]
if FeatureType.fields.has_key(features.type):
field = FeatureType.fields[features.type] + "_" + function_field
else:
field = function_field
result[function].append([frame_idx, float(row[field])])
# Cleanup
os.remove(output_file)
return result
# def compute_mfcc_with_functionals(self, video):
# """
# MFCC features extraction for a given video. Returns a dictionary containing the list of mfcc[1-12] functionals
#
# Returned dictionary looks like:
# { "frameTime": 0.0,
# "mfcc_sma[7]_skewness": 0.1635602,
# "mfcc_sma[8]_amean": -3.041601,
# "mfcc_sma[5]_range": 5.615002, ...
# """
# logger.info("Computing mfcc")
#
# audio_path = video.audio_path
# output_file = os.path.join(video.dataset.audio_folder_path, "{}.mfcc.csv".format(video.name))
#
# # Get video frame-rate, and compute framesize (in seconds)
# fps = video.video_part.fps
# framesize = 1 / fps
#
# # Prepare params (input file, output file, framesize)
# params = '-I "{}" -O "{}" -F {}'.format(audio_path, output_file, framesize)
#
# # Do extraction
# self._extract(self.CONFIG_MFCC, params)
#
#
# mfcc = list()
#
# # Read the csv and add them to the multimedia element
# with open(output_file, 'rb') as csvfile:
# csv_reader = csv.DictReader(csvfile, delimiter=';')
# for i, row in enumerate(csv_reader):
# frameFeatures = {}
# frameFeatures['frameTime'] = float(row['frameTime'])
# for j in range(1,13) :
# frameFeatures['mfcc_sma['+str(j)+']_max'] = float(row['mfcc_sma['+str(j)+']_max'])
# frameFeatures['mfcc_sma['+str(j)+']_min'] = float(row['mfcc_sma['+str(j)+']_min'])
# frameFeatures['mfcc_sma['+str(j)+']_range'] = float(row['mfcc_sma['+str(j)+']_range'])
# frameFeatures['mfcc_sma['+str(j)+']_maxPos'] = float(row['mfcc_sma['+str(j)+']_maxPos'])
# frameFeatures['mfcc_sma['+str(j)+']_minPos'] = float(row['mfcc_sma['+str(j)+']_minPos'])
# frameFeatures['mfcc_sma['+str(j)+']_amean'] = float(row['mfcc_sma['+str(j)+']_amean'])
# frameFeatures['mfcc_sma['+str(j)+']_linregc1'] = float(row['mfcc_sma['+str(j)+']_linregc1'])
# frameFeatures['mfcc_sma['+str(j)+']_linregc2'] = float(row['mfcc_sma['+str(j)+']_linregc2'])
# frameFeatures['mfcc_sma['+str(j)+']_linregerrQ'] = float(row['mfcc_sma['+str(j)+']_linregerrQ'])
# frameFeatures['mfcc_sma['+str(j)+']_skewness'] = float(row['mfcc_sma['+str(j)+']_skewness'])
# frameFeatures['mfcc_sma['+str(j)+']_kurtosis'] = float(row['mfcc_sma['+str(j)+']_kurtosis'])
# frameFeatures['mfcc_sma_de['+str(j)+']_max'] = float(row['mfcc_sma_de['+str(j)+']_max'])
# frameFeatures['mfcc_sma_de['+str(j)+']_min'] = float(row['mfcc_sma_de['+str(j)+']_min'])
# frameFeatures['mfcc_sma_de['+str(j)+']_range'] = float(row['mfcc_sma_de['+str(j)+']_range'])
# frameFeatures['mfcc_sma_de['+str(j)+']_maxPos'] = float(row['mfcc_sma_de['+str(j)+']_maxPos'])
# frameFeatures['mfcc_sma_de['+str(j)+']_minPos'] = float(row['mfcc_sma_de['+str(j)+']_minPos'])
# frameFeatures['mfcc_sma_de['+str(j)+']_amean'] = float(row['mfcc_sma_de['+str(j)+']_amean'])
# frameFeatures['mfcc_sma_de['+str(j)+']_linregc1'] = float(row['mfcc_sma_de['+str(j)+']_linregc1'])
# frameFeatures['mfcc_sma_de['+str(j)+']_linregc2'] = float(row['mfcc_sma_de['+str(j)+']_linregc2'])
# frameFeatures['mfcc_sma_de['+str(j)+']_linregerrQ'] = float(row['mfcc_sma_de['+str(j)+']_linregerrQ'])
# frameFeatures['mfcc_sma_de['+str(j)+']_skewness'] = float(row['mfcc_sma_de['+str(j)+']_skewness'])
# frameFeatures['mfcc_sma_de['+str(j)+']_kurtosis'] = float(row['mfcc_sma_de['+str(j)+']_kurtosis'])
# mfcc.append(frameFeatures)
# # Cleanup
# os.remove(output_file)
#
# return mfcc
#
# def compute_mfcc_with_functionals_on_relevant_parts(self, video):
# """
# MFCC features extraction for a given video. Returns a dictionary containing the list of mfcc[1-12] functionals
#
# Returned dictionary looks like:
# { "frameTime": 0.0,
# "mfcc_sma[7]_skewness": 0.1635602,
# "mfcc_sma[8]_amean": -3.041601,
# "mfcc_sma[5]_range": 5.615002, ...
# """
# logger.info("Computing mfcc")
#
# audio_path = video.audio_path
# output_file = os.path.join(video.dataset.audio_folder_path, "{}.mfcc.csv".format(video.name))
#
# # Get video frame-rate, and compute framesize (in seconds)
# fps = video.video_part.fps
# framesize = 1 / fps
#
#
# mfcc = list()
#
# for partition in video.arousal.arousal_partitions:
#
# # Prepare params (input file, output file, framesize)
# start_second = partition[0][0] / fps
# end_second = partition[1][0] /fps
# output_file = os.path.join(video.dataset.audio_folder_path, "{}.mfcc.csv".format(video.name))
# params = '-I "{}" -O "{}" -F {} -S {} -E {}'.format(audio_path, output_file, framesize, start_second, end_second)
#
# # Do extraction
# self._extract(self.CONFIG_MFCC, params)
#
# # Read the csv and add them to the multimedia element
# with open(output_file, 'rb') as csvfile:
# csv_reader = csv.DictReader(csvfile, delimiter=';')
# for i, row in enumerate(csv_reader):
# frameFeatures = {}
# frameFeatures['frameTime'] = float(row['frameTime']) + int(start_second)
# for j in range(1,13) :
# frameFeatures['mfcc_sma['+str(j)+']_max'] = float(row['mfcc_sma['+str(j)+']_max'])
# frameFeatures['mfcc_sma['+str(j)+']_min'] = float(row['mfcc_sma['+str(j)+']_min'])
# frameFeatures['mfcc_sma['+str(j)+']_range'] = float(row['mfcc_sma['+str(j)+']_range'])
# frameFeatures['mfcc_sma['+str(j)+']_maxPos'] = float(row['mfcc_sma['+str(j)+']_maxPos'])
# frameFeatures['mfcc_sma['+str(j)+']_minPos'] = float(row['mfcc_sma['+str(j)+']_minPos'])
# frameFeatures['mfcc_sma['+str(j)+']_amean'] = float(row['mfcc_sma['+str(j)+']_amean'])
# frameFeatures['mfcc_sma['+str(j)+']_linregc1'] = float(row['mfcc_sma['+str(j)+']_linregc1'])
# frameFeatures['mfcc_sma['+str(j)+']_linregc2'] = float(row['mfcc_sma['+str(j)+']_linregc2'])
# frameFeatures['mfcc_sma['+str(j)+']_linregerrQ'] = float(row['mfcc_sma['+str(j)+']_linregerrQ'])
# frameFeatures['mfcc_sma['+str(j)+']_skewness'] = float(row['mfcc_sma['+str(j)+']_skewness'])
# frameFeatures['mfcc_sma['+str(j)+']_kurtosis'] = float(row['mfcc_sma['+str(j)+']_kurtosis'])
# frameFeatures['mfcc_sma_de['+str(j)+']_max'] = float(row['mfcc_sma_de['+str(j)+']_max'])
# frameFeatures['mfcc_sma_de['+str(j)+']_min'] = float(row['mfcc_sma_de['+str(j)+']_min'])
# frameFeatures['mfcc_sma_de['+str(j)+']_range'] = float(row['mfcc_sma_de['+str(j)+']_range'])
# frameFeatures['mfcc_sma_de['+str(j)+']_maxPos'] = float(row['mfcc_sma_de['+str(j)+']_maxPos'])
# frameFeatures['mfcc_sma_de['+str(j)+']_minPos'] = float(row['mfcc_sma_de['+str(j)+']_minPos'])
# frameFeatures['mfcc_sma_de['+str(j)+']_amean'] = float(row['mfcc_sma_de['+str(j)+']_amean'])
# frameFeatures['mfcc_sma_de['+str(j)+']_linregc1'] = float(row['mfcc_sma_de['+str(j)+']_linregc1'])
# frameFeatures['mfcc_sma_de['+str(j)+']_linregc2'] = float(row['mfcc_sma_de['+str(j)+']_linregc2'])
# frameFeatures['mfcc_sma_de['+str(j)+']_linregerrQ'] = float(row['mfcc_sma_de['+str(j)+']_linregerrQ'])
# frameFeatures['mfcc_sma_de['+str(j)+']_skewness'] = float(row['mfcc_sma_de['+str(j)+']_skewness'])
# frameFeatures['mfcc_sma_de['+str(j)+']_kurtosis'] = float(row['mfcc_sma_de['+str(j)+']_kurtosis'])
# mfcc.append(frameFeatures)
# # Cleanup
# os.remove(output_file)
#
# return mfcc
#
#
# def compute_pitch_with_functionals(self, video):
# """
# Pitch feature extraction for a given video. Returns a dictionary containing the list of pitch functionals
#
# Returned dictionary looks like:
# { "frameTime": 0.0,
# ""F0_sma_amean": 0.0,
# "F0_sma_minPos": 0.0,
# "F0_sma_linregc2": 0.0, ...
# """
# logger.info("Computing pitch")
#
# audio_path = video.audio_path
# output_file = os.path.join(video.dataset.audio_folder_path, "{}.pitch.csv".format(video.name))
#
# # Get video frame-rate, and compute framesize (in seconds)
# fps = video.video_part.fps
# framesize = 1 / fps
#
# # Prepare params (input file, output file, framesize)
# params = '-I "{}" -O "{}" -F {}'.format(audio_path, output_file, framesize)
#
# # Do extraction
# self._extract(self.CONFIG_PITCH, params)
#
#
# pitch = list()
#
# #Read the csv and add them to the multimedia element
# with open(output_file, 'rb') as csvfile:
# csv_reader = csv.DictReader(csvfile, delimiter=';')
# for i, row in enumerate(csv_reader):
# frameFeatures = {}
# frameFeatures['frameTime'] = float(row['frameTime'])
# frameFeatures['voiceProb_sma_max'] = float(row['voiceProb_sma_max'])
# frameFeatures['voiceProb_sma_min'] = float(row['voiceProb_sma_min'])
# frameFeatures['voiceProb_sma_range'] = float(row['voiceProb_sma_range'])
# frameFeatures['voiceProb_sma_maxPos'] = float(row['voiceProb_sma_maxPos'])
# frameFeatures['voiceProb_sma_minPos'] = float(row['voiceProb_sma_minPos'])
# frameFeatures['voiceProb_sma_amean'] = float(row['voiceProb_sma_amean'])
# frameFeatures['voiceProb_sma_linregc1'] = float(row['voiceProb_sma_linregc1'])
# frameFeatures['voiceProb_sma_linregc2'] = float(row['voiceProb_sma_linregc2'])
# frameFeatures['voiceProb_sma_linregerrQ'] = float(row['voiceProb_sma_linregerrQ'])
# frameFeatures['voiceProb_sma_stddev'] = | |
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Plivo Team. See LICENSE.txt for details.
import os
import uuid
import time
import math
import unittest
import msgpack
from sharq import SharQ
from sharq.utils import generate_epoch
class SharQTestCase(unittest.TestCase):
"""
`SharQTestCase` contains the functional test cases
that validate the correctness of all the APIs exposed
by SharQ.
"""
def setUp(self):
cwd = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(cwd, 'sharq.test.conf') # test config
self.queue = SharQ(config_path)
# flush all the keys in the test db before starting test
self.queue._r.flushdb()
# test specific values
self._test_queue_id = 'johndoe'
self._test_queue_type = 'sms'
self._test_payload_1 = {
'to': '1000000000',
'message': 'Hello, world'
}
self._test_payload_2 = {
'to': '1000000001',
'message': 'Hello, SharQ'
}
self._test_requeue_limit_5 = 5
self._test_requeue_limit_neg_1 = -1
self._test_requeue_limit_0 = 0
self._test2_queue_id = 'thetourist'
self._test2_queue_type = 'package'
def _get_job_id(self):
"""Generates a uuid4 and returns the string
representation of it.
"""
return str(uuid.uuid4())
def test_enqueue_response_status(self):
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
self.assertEqual(response['status'], 'queued')
def test_enqueue_job_queue_existence(self):
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
# check if the job queue exists
queue_name = '%s:%s:%s' % (
self.queue._key_prefix,
self._test_queue_type,
self._test_queue_id
)
self.assertTrue(self.queue._r.exists(queue_name))
def test_enqueue_job_existence_in_job_queue(self):
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
# check if the queue contains the job we just pushed (by peeking)
queue_name = '%s:%s:%s' % (
self.queue._key_prefix,
self._test_queue_type,
self._test_queue_id
)
latest_job_id = self.queue._r.lrange(queue_name, -1, -1)
self.assertEqual(latest_job_id, [job_id])
def test_enqueue_job_queue_length(self):
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
# check if the queue length is one
queue_name = '%s:%s:%s' % (
self.queue._key_prefix,
self._test_queue_type,
self._test_queue_id
)
queue_length = self.queue._r.llen(queue_name)
self.assertEqual(queue_length, 1)
def test_enqueue_payload_dump(self):
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
# check if the payload is saved in the appropriate structure
payload_map_name = '%s:payload' % (self.queue._key_prefix)
# check if the payload map exists
self.assertTrue(self.queue._r.exists(payload_map_name))
def test_enqueue_payload_encode_decode(self):
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
payload_map_name = '%s:payload' % (self.queue._key_prefix)
payload_map_key = '%s:%s:%s' % (
self._test_queue_type, self._test_queue_id, job_id)
raw_payload = self.queue._r.hget(payload_map_name, payload_map_key)
# decode the payload from msgpack to dictionary
payload = msgpack.unpackb(raw_payload[1:-1])
self.assertEqual(payload, self._test_payload_1)
def test_enqueue_interval_map_existence(self):
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
# check if interval is saved in the appropriate structure
interval_map_name = '%s:interval' % (self.queue._key_prefix)
# check if interval map exists
self.assertTrue(self.queue._r.exists(interval_map_name))
def test_enqueue_interval_value(self):
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
# check if interval is saved in the appropriate structure
interval_map_name = '%s:interval' % (self.queue._key_prefix)
interval_map_key = '%s:%s' % (
self._test_queue_type, self._test_queue_id)
interval = self.queue._r.hget(
interval_map_name, interval_map_key)
self.assertEqual(interval, '10000') # 10s (10000ms)
def test_enqueue_requeue_limit_map_existence(self):
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type
# without a requeue limit parameter
)
# check if requeue limit is saved in the appropriate structure
requeue_limit_map_name = '%s:%s:%s:requeues_remaining' % (
self.queue._key_prefix,
self._test_queue_type,
self._test_queue_id,
)
# check if requeue limit map exists
self.assertTrue(self.queue._r.exists(requeue_limit_map_name))
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
requeue_limit=self._test_requeue_limit_5
)
# check if requeue limit is saved in the appropriate structure
requeue_limit_map_name = '%s:%s:%s:requeues_remaining' % (
self.queue._key_prefix,
self._test_queue_type,
self._test_queue_id,
)
# check if requeue limit map exists
self.assertTrue(self.queue._r.exists(requeue_limit_map_name))
def test_enqueue_requeue_limit_value(self):
# without requeue limit (but reading from the config)
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type
# without requeue limit.
)
# check if requeue limit is saved in the appropriate structure
requeue_limit_map_name = '%s:%s:%s:requeues_remaining' % (
self.queue._key_prefix,
self._test_queue_type,
self._test_queue_id,
)
requeues_remaining = self.queue._r.hget(
requeue_limit_map_name, job_id)
self.assertEqual(requeues_remaining, '-1') # from the config file.
# with requeue limit in the enqueue function.
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
requeue_limit=self._test_requeue_limit_5
)
# check if requeue limit is saved in the appropriate structure
requeue_limit_map_name = '%s:%s:%s:requeues_remaining' % (
self.queue._key_prefix,
self._test_queue_type,
self._test_queue_id,
)
requeues_remaining = self.queue._r.hget(
requeue_limit_map_name, job_id)
self.assertEqual(requeues_remaining, '5') # 5 retries remaining.
def test_enqueue_ready_set(self):
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
sorted_set_name = '%s:%s' % (
self.queue._key_prefix, self._test_queue_type)
self.assertTrue(self.queue._r.exists(sorted_set_name))
def test_enqueue_ready_set_contents(self):
job_id = self._get_job_id()
start_time = str(generate_epoch())
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
end_time = str(generate_epoch())
sorted_set_name = '%s:%s' % (
self.queue._key_prefix, self._test_queue_type)
queue_id_list = self.queue._r.zrangebyscore(
sorted_set_name,
start_time,
end_time)
# check if exactly one item in the list
self.assertEqual(len(queue_id_list), 1)
# check the value to match the queue_id
self.assertEqual(queue_id_list[0], self._test_queue_id)
def test_enqueue_queue_type_ready_set(self):
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
# check the queue type ready set.
queue_type_ready_set = self.queue._r.smembers(
'%s:ready:queue_type' % self.queue._key_prefix)
self.assertEqual(len(queue_type_ready_set), 1)
self.assertEqual(queue_type_ready_set.pop(), self._test_queue_type)
def test_enqueue_queue_type_active_set(self):
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
queue_type_ready_set = self.queue._r.smembers(
'%s:active:queue_type' % self.queue._key_prefix)
self.assertEqual(len(queue_type_ready_set), 0)
def test_enqueue_metrics_global_enqueue_counter(self):
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
timestamp = int(generate_epoch())
# epoch for the minute.
timestamp_minute = str(int(math.floor(timestamp / 60000.0) * 60000))
counter_value = self.queue._r.get('%s:enqueue_counter:%s' % (
self.queue._key_prefix, timestamp_minute))
self.assertEqual(counter_value, '1')
def test_enqueue_metrics_per_queue_enqueue_counter(self):
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
response = self.queue.dequeue(
queue_type=self._test_queue_type
)
timestamp = int(generate_epoch())
# epoch for the minute.
timestamp_minute = str(int(math.floor(timestamp / 60000.0) * 60000))
counter_value = self.queue._r.get('%s:%s:%s:enqueue_counter:%s' % (
self.queue._key_prefix,
self._test_queue_type,
self._test_queue_id,
timestamp_minute))
self.assertEqual(counter_value, '1')
def test_enqueue_second_job_status(self):
# job 1
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
# job 2
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_2,
interval=20000, # 20s (20000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
self.assertEqual(response['status'], 'queued')
def test_enqueue_second_job_queue_existence(self):
# job 1
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
# job 2
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_2,
interval=20000, # 20s (20000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
queue_name = '%s:%s:%s' % (
self.queue._key_prefix, self._test_queue_type, self._test_queue_id)
self.assertTrue(self.queue._r.exists(queue_name))
def test_enqueue_second_job_existence_in_job_queue(self):
# job 1
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
# job 2
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_2,
interval=20000, # 20s (20000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
queue_name = '%s:%s:%s' % (
self.queue._key_prefix, self._test_queue_type, self._test_queue_id)
latest_job_id = self.queue._r.lrange(queue_name, -1, -1)
self.assertEqual(latest_job_id, [job_id])
def test_enqueue_second_job_queue_length(self):
# job 1
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
# job 2
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_2,
interval=20000, # 20s (20000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
queue_name = '%s:%s:%s' % (
self.queue._key_prefix, self._test_queue_type, self._test_queue_id)
# check if the queue length is two
queue_length = self.queue._r.llen(queue_name)
self.assertEqual(queue_length, 2)
def test_enqueue_second_job_payload_dump(self):
# job 1
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
# job 2
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_2,
interval=20000, # 20s (20000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
payload_map_name = '%s:payload' % (self.queue._key_prefix)
# check if the payload map exists
self.assertTrue(self.queue._r.exists(payload_map_name))
def test_enqueue_second_job_payload_encode_decode(self):
# job 1
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
# job 2
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_2,
interval=20000, # 20s (20000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
payload_map_name = '%s:payload' % (self.queue._key_prefix)
payload_map_key = '%s:%s:%s' % (
self._test_queue_type, self._test_queue_id, job_id)
raw_payload = self.queue._r.hget(payload_map_name, payload_map_key)
# decode the payload from msgpack to dictionary
payload = msgpack.unpackb(raw_payload[1:-1])
self.assertEqual(payload, self._test_payload_2)
def test_enqueue_second_job_interval_map_existence(self):
# job 1
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
# job 2
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_2,
interval=20000, # 20s (20000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
interval_map_name = '%s:interval' % (self.queue._key_prefix)
# check if interval map exists
self.assertTrue(self.queue._r.exists(interval_map_name))
def test_enqueue_second_job_interval_value(self):
# job 1
job_id = self._get_job_id()
response = self.queue.enqueue(
payload=self._test_payload_1,
interval=10000, # 10s (10000ms)
job_id=job_id,
queue_id=self._test_queue_id,
queue_type=self._test_queue_type,
)
# job 2
job_id | |
1, self.heads, C // self.heads).permute(2, 0, 3, 1, 4)
kv = self.to_kv(attn_kv).reshape(B_, N, 2, self.heads, C // self.heads).permute(2, 0, 3, 1, 4)
q = q[0]
k, v = kv[0], kv[1]
return q,k,v
def flops(self, H, W):
flops = H*W*self.dim*self.inner_dim*3
return flops
class LinearProjection_Concat_kv(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0., bias=True):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = bias)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = bias)
self.dim = dim
self.inner_dim = inner_dim
def forward(self, x, attn_kv=None):
B_, N, C = x.shape
attn_kv = x if attn_kv is None else attn_kv
qkv_dec = self.to_qkv(x).reshape(B_, N, 3, self.heads, C // self.heads).permute(2, 0, 3, 1, 4)
kv_enc = self.to_kv(attn_kv).reshape(B_, N, 2, self.heads, C // self.heads).permute(2, 0, 3, 1, 4)
q, k_d, v_d = qkv_dec[0], qkv_dec[1], qkv_dec[2] # make torchscript happy (cannot use tensor as tuple)
k_e, v_e = kv_enc[0], kv_enc[1]
k = torch.cat((k_d,k_e),dim=2)
v = torch.cat((v_d,v_e),dim=2)
return q,k,v
def flops(self, H, W):
flops = H*W*self.dim*self.inner_dim*5
return flops
#########################################
########### window-based self-attention #############
class WindowAttention(nn.Module):
def __init__(self, dim, win_size,num_heads, token_projection='linear', qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.,se_layer=False):
super().__init__()
self.dim = dim
self.win_size = win_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * win_size[0] - 1) * (2 * win_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.win_size[0]) # [0,...,Wh-1]
coords_w = torch.arange(self.win_size[1]) # [0,...,Ww-1]
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.win_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.win_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.win_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
# self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
if token_projection =='conv':
self.qkv = ConvProjection(dim,num_heads,dim//num_heads,bias=qkv_bias)
elif token_projection =='linear_concat':
self.qkv = LinearProjection_Concat_kv(dim,num_heads,dim//num_heads,bias=qkv_bias)
else:
self.qkv = LinearProjection(dim,num_heads,dim//num_heads,bias=qkv_bias)
self.token_projection = token_projection
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.se_layer = SELayer(dim) if se_layer else nn.Identity()
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, attn_kv=None, mask=None):
B_, N, C = x.shape
q, k, v = self.qkv(x,attn_kv)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.win_size[0] * self.win_size[1], self.win_size[0] * self.win_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
ratio = attn.size(-1)//relative_position_bias.size(-1)
relative_position_bias = repeat(relative_position_bias, 'nH l c -> nH l (c d)', d = ratio)
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
mask = repeat(mask, 'nW m n -> nW m (n d)',d = ratio)
attn = attn.view(B_ // nW, nW, self.num_heads, N, N*ratio) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N*ratio)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.se_layer(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, win_size={self.win_size}, num_heads={self.num_heads}'
def flops(self, H, W):
# calculate flops for 1 window with token length of N
# print(N, self.dim)
flops = 0
N = self.win_size[0]*self.win_size[1]
nW = H*W/N
# qkv = self.qkv(x)
# flops += N * self.dim * 3 * self.dim
flops += self.qkv.flops(H, W)
# attn = (q @ k.transpose(-2, -1))
if self.token_projection !='linear_concat':
flops += nW * self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += nW * self.num_heads * N * N * (self.dim // self.num_heads)
else:
flops += nW * self.num_heads * N * (self.dim // self.num_heads) * N*2
# x = (attn @ v)
flops += nW * self.num_heads * N * N*2 * (self.dim // self.num_heads)
# x = self.proj(x)
flops += nW * N * self.dim * self.dim
print("W-MSA:{%.2f}"%(flops/1e9))
return flops
#########################################
########### feed-forward network #############
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.in_features = in_features
self.hidden_features = hidden_features
self.out_features = out_features
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def flops(self, H, W):
flops = 0
# fc1
flops += H*W*self.in_features*self.hidden_features
# fc2
flops += H*W*self.hidden_features*self.out_features
print("MLP:{%.2f}"%(flops/1e9))
return flops
class LeFF(nn.Module):
def __init__(self, dim=32, hidden_dim=128, act_layer=nn.GELU,drop = 0.):
super().__init__()
self.linear1 = nn.Sequential(nn.Linear(dim, hidden_dim),
act_layer())
self.dwconv = nn.Sequential(nn.Conv2d(hidden_dim,hidden_dim,groups=hidden_dim,kernel_size=3,stride=1,padding=1),
act_layer())
self.linear2 = nn.Sequential(nn.Linear(hidden_dim, dim))
self.dim = dim
self.hidden_dim = hidden_dim
def forward(self, x):
# bs x hw x c
bs, hw, c = x.size()
hh = int(math.sqrt(hw))
x = self.linear1(x)
# spatial restore
x = rearrange(x, ' b (h w) (c) -> b c h w ', h = hh, w = hh)
# bs,hidden_dim,32x32
x = self.dwconv(x)
# flaten
x = rearrange(x, ' b c h w -> b (h w) c', h = hh, w = hh)
x = self.linear2(x)
return x
def flops(self, H, W):
flops = 0
# fc1
flops += H*W*self.dim*self.hidden_dim
# dwconv
flops += H*W*self.hidden_dim*3*3
# fc2
flops += H*W*self.hidden_dim*self.dim
print("LeFF:{%.2f}"%(flops/1e9))
return flops
#########################################
########### window operation#############
def window_partition(x, win_size, dilation_rate=1):
B, H, W, C = x.shape
if dilation_rate !=1:
x = x.permute(0,3,1,2) # B, C, H, W
assert type(dilation_rate) is int, 'dilation_rate should be a int'
x = F.unfold(x, kernel_size=win_size,dilation=dilation_rate,padding=4*(dilation_rate-1),stride=win_size) # B, C*Wh*Ww, H/Wh*W/Ww
windows = x.permute(0,2,1).contiguous().view(-1, C, win_size, win_size) # B' ,C ,Wh ,Ww
windows = windows.permute(0,2,3,1).contiguous() # B' ,Wh ,Ww ,C
else:
x = x.view(B, H // win_size, win_size, W // win_size, win_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, win_size, win_size, C) # B' ,Wh ,Ww ,C
return windows
def window_reverse(windows, win_size, H, W, dilation_rate=1):
# B' ,Wh ,Ww ,C
B = int(windows.shape[0] / (H * W / win_size / win_size))
x = windows.view(B, H // win_size, W // win_size, win_size, win_size, -1)
if dilation_rate !=1:
x = windows.permute(0,5,3,4,1,2).contiguous() # B, C*Wh*Ww, H/Wh*W/Ww
x = F.fold(x, (H, W), kernel_size=win_size, dilation=dilation_rate, padding=4*(dilation_rate-1),stride=win_size)
else:
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
#########################################
# Downsample Block
class Downsample(nn.Module):
def __init__(self, in_channel, out_channel):
super(Downsample, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel_size=4, stride=2, padding=1),
)
self.in_channel = in_channel
self.out_channel = out_channel
def forward(self, x):
B, L, C = x.shape
# import pdb;pdb.set_trace()
H = int(math.sqrt(L))
W = int(math.sqrt(L))
x = x.transpose(1, 2).contiguous().view(B, C, H, W)
out = self.conv(x).flatten(2).transpose(1,2).contiguous() # B H*W C
return out
def flops(self, H, W):
flops = 0
# conv
flops += H/2*W/2*self.in_channel*self.out_channel*4*4
print("Downsample:{%.2f}"%(flops/1e9))
return flops
class NearestUpsample(nn.Module):
def __init__(self, in_channel, out_channel):
super(NearestUpsample, self).__init__()
self.deconv = nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(inplace=True)
)
self.in_channel = in_channel
self.out_channel = out_channel
def forward(self, x):
B, L, C = x.shape
H = int(math.sqrt(L))
W = int(math.sqrt(L))
x = x.transpose(1, 2).contiguous().view(B, C, H, W)
x = F.interpolate(x, scale_factor=2, mode='nearest')
out = self.deconv(x).flatten(2).transpose(1,2).contiguous() # B H*W C
return out
def flops(self, H, W):
flops = 0
return flops
class MyUpsample(nn.Module):
def __init__(self, in_channel, out_channel):
super(MyUpsample, self).__init__()
self.deconv = nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(inplace=True)
)
self.in_channel = in_channel
self.out_channel = out_channel
def forward(self, x):
B, L, C = x.shape
H = int(math.sqrt(L))
W = int(math.sqrt(L))
x = x.transpose(1, 2).contiguous().view(B, C, H, W)
x = F.interpolate(x, scale_factor=2, mode='nearest')
out = self.deconv(x).flatten(2).transpose(1,2).contiguous() # B H*W C
return out
def flops(self, H, W):
flops = 0
# conv
flops += H*2*W*2*self.in_channel*self.out_channel*2*2
print("Upsample:{%.2f}"%(flops/1e9))
return flops
# Upsample Block
class Upsample(nn.Module):
def __init__(self, in_channel, out_channel):
super(Upsample, self).__init__()
self.deconv = nn.Sequential(
nn.ConvTranspose2d(in_channel, out_channel, kernel_size=2, stride=2),
)
self.in_channel = in_channel
self.out_channel = out_channel
def forward(self, x):
| |
'''
This module contains the ConfigForm class (a popup dialog).
@author: <NAME>
'''
import clr
import log
from cvform import CVForm
from configuration import Configuration
import i18n
import System
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import AutoScaleMode, Button, CheckBox, ContextMenu, \
CheckedListBox, DialogResult, FlatStyle, Label, MenuItem, \
RichTextBox, SelectionMode, TabControl, TabPage, TextBox, LinkLabel
clr.AddReference('System.Drawing')
from System.Drawing import Point, Size, ContentAlignment
# =============================================================================
class ConfigForm(CVForm):
'''
This class is a popup, modal dialog that displays all of the configurable
options available to the user. The user can change any of the options,
and then click OK or Cancel to quit the dialog and contine the normal
execution of the program. Clicking Cancel will discard any configuration
changes that were made; clicking OK will save them permanently.
'''
# ==========================================================================
def __init__(self, owner):
'''
Initializes this form.
owner -> this form's owner window/dialog
'''
# these are the strings that the user sees for each checkbox; they can
# also be used to reference each checkbox inside the checkboxlist
ConfigForm.__SERIES_CB = i18n.get("ConfigFormSeriesCB")
ConfigForm.__NUMBER_CB = i18n.get("ConfigFormNumberCB")
ConfigForm.__PUBLISHED_CB = i18n.get("ConfigFormPublishedCB")
ConfigForm.__RELEASED_CB = i18n.get("ConfigFormReleasedCB")
ConfigForm.__TITLE_CB = i18n.get("ConfigFormTitleCB")
ConfigForm.__CROSSOVERS_CB = i18n.get("ConfigFormCrossoversCB")
ConfigForm.__WRITER_CB = i18n.get("ConfigFormWriterCB")
ConfigForm.__PENCILLER_CB = i18n.get("ConfigFormPencillerCB")
ConfigForm.__INKER_CB = i18n.get("ConfigFormInkerCB")
ConfigForm.__COVER_ARTIST_CB = i18n.get("ConfigFormCoverCB")
ConfigForm.__COLORIST_CB = i18n.get("ConfigFormColoristCB")
ConfigForm.__LETTERER_CB = i18n.get("ConfigFormLettererCB")
ConfigForm.__EDITOR_CB = i18n.get("ConfigFormEditorCB")
ConfigForm.__SUMMARY_CB = i18n.get("ConfigFormSummaryCB")
ConfigForm.__IMPRINT_CB = i18n.get("ConfigFormImprintCB")
ConfigForm.__PUBLISHER_CB = i18n.get("ConfigFormPublisherCB")
ConfigForm.__VOLUME_CB = i18n.get("ConfigFormVolumeCB")
ConfigForm.__CHARACTERS_CB = i18n.get("ConfigFormCharactersCB")
ConfigForm.__TEAMS_CB = i18n.get("ConfigFormTeamsCB")
ConfigForm.__LOCATIONS_CB = i18n.get("ConfigFormLocationsCB")
ConfigForm.__WEBPAGE_CB = i18n.get("ConfigFormWebCB")
# the TabControl that contains all our TabPages
self.__tabcontrol = None
# the ok button for this dialog
self.__ok_button = None
# the cancel button for this dialog
self.__cancel_button = None
# the restore defaults button for this dialog
self.__restore_button = None
# "options" checkboxes
self.__ow_existing_cb = None
self.__ignore_blanks_cb = None
self.__autochoose_series_cb = None
self.__confirm_issue_cb = None
self.__convert_imprints_cb = None
self.__summary_dialog_cb = None
self.__download_thumbs_cb = None
self.__preserve_thumbs_cb = None
self.__fast_rescrape_cb = None
self.__rescrape_tags_cb = None
self.__rescrape_notes_cb = None
# "api key" textbox
self.__api_key_tbox = None
# "advanced settings" textbox
self.__advanced_tbox = None
# "data" checkbox list
self.__update_checklist = None
CVForm.__init__(self, owner, "configformLocation")
self.__build_gui()
# ==========================================================================
def __build_gui(self):
''' Constructs and initializes the gui for this form. '''
# 1. --- build each gui component
self.__ok_button = self.__build_okbutton()
self.__cancel_button = self.__build_cancel_button()
self.__restore_button = self.__build_restore_button()
self.__tabcontrol = self.__build_tabcontrol()
# 2. -- configure this form, and add all the gui components to it
self.AutoScaleMode = AutoScaleMode.Font
self.ClientSize = Size(416, 375)
self.Text = i18n.get("ConfigFormTitle")
self.Controls.Add(self.__ok_button)
self.Controls.Add(self.__cancel_button)
self.Controls.Add(self.__restore_button)
self.Controls.Add(self.__tabcontrol)
# 3. -- define the keyboard focus tab traversal ordering
self.__ok_button.TabIndex = 0
self.__cancel_button.TabIndex = 1
self.__restore_button.TabIndex = 2
self.__tabcontrol.TabIndex = 3
self.__fired_update_gui()
# ==========================================================================
def __build_okbutton(self):
''' builds and returns the ok button for this form '''
button = Button()
button.DialogResult = DialogResult.OK
button.Location = Point(228, 343)
button.Size = Size(80, 23)
button.Text = i18n.get("ConfigFormOK")
return button
# ==========================================================================
def __build_restore_button(self):
''' builds and returns the restore button for this form '''
button = Button()
button.Click += self.__fired_restore_defaults
button.Location = Point(10, 343)
button.Size = Size(170, 23)
button.Text = i18n.get("ConfigFormRestore")
return button
# ==========================================================================
def __build_cancel_button(self):
''' builds and returns the cancel button for this form '''
button = Button()
button.DialogResult = DialogResult.Cancel
button.Location = Point(315, 343)
button.Size = Size(90, 23)
button.Text = i18n.get("ConfigFormCancel")
return button
# ==========================================================================
def __build_tabcontrol(self):
''' builds and returns the TabControl for this dialog '''
tabcontrol = TabControl()
tabcontrol.Location = Point(10, 15)
tabcontrol.Size = Size(395, 302)
tabcontrol.Controls.Add( self.__build_comicvinetab() )
tabcontrol.Controls.Add( self.__build_detailstab() )
tabcontrol.Controls.Add( self.__build_behaviourtab() )
tabcontrol.Controls.Add( self.__build_datatab() )
tabcontrol.Controls.Add( self.__build_advancedtab() )
return tabcontrol
# ==========================================================================
def __build_comicvinetab(self):
''' builds and returns the "ComicVine" Tab for the TabControl '''
tabpage = TabPage()
tabpage.Text = i18n.get("ConfigFormComicVineTab")
tabpage.Name = "comicvine"
# 1. --- a description label for this tabpage
label = Label()
label.UseMnemonic = False
label.AutoSize = False
label.Location = Point(34, 80)
label.Size = Size(315, 54)
label.Text = i18n.get("ConfigFormComicVineText")
# 2. --- the API key text box
fired_update_gui = self.__fired_update_gui
class ApiKeyTextBox(TextBox):
def OnTextChanged(self, args):
fired_update_gui()
self.__api_key_tbox = ApiKeyTextBox()
tbox = self.__api_key_tbox
tbox.Location = Point(34, 135)
tbox.Size = Size(315, 1)
menu = ContextMenu()
items = menu.MenuItems
items.Add( MenuItem(i18n.get("TextCut"), lambda s, ea : tbox.Cut() ) )
items.Add( MenuItem(i18n.get("TextCopy"), lambda s, ea : tbox.Copy() ) )
items.Add( MenuItem(i18n.get("TextPaste"), lambda s, ea : tbox.Paste() ) )
tbox.ContextMenu = menu
# 3. --- add a clickable link to send the user to ComicVine
linklabel = LinkLabel()
linklabel.UseMnemonic = False
linklabel.AutoSize = False
linklabel.Location = Point(34, 170)
linklabel.Size = Size(315, 34)
linklabel.Text = i18n.get("ConfigFormComicVineClickHere")
linklabel.LinkClicked += self.__fired_linkclicked
# 4. --- add 'em all to this tabpage
tabpage.Controls.Add(label)
tabpage.Controls.Add(tbox)
tabpage.Controls.Add(linklabel)
return tabpage
# ==========================================================================
def __build_detailstab(self):
''' builds and returns the "Details" Tab for the TabControl '''
tabpage = TabPage()
tabpage.Text = i18n.get("ConfigFormDetailsTab")
tabpage.Name = "details"
# 1. --- a description label for this tabpage
label = Label()
label.UseMnemonic = False
label.AutoSize = True
label.Location = Point(14, 35)
label.Size = Size(299, 17)
label.Text = i18n.get("ConfigFormDetailsText")
# 2. --- the 'select all' button
checkall_button = Button()
checkall_button.Click += self.__fired_checkall
checkall_button.Location = Point(280, 107)
checkall_button.Size = Size(100, 23)
checkall_button.Text = i18n.get("ConfigFormDetailsAll")
# 3. --- the 'deselect all' button
uncheckall_button = Button()
uncheckall_button.Click += self.__fired_uncheckall
uncheckall_button.Location = Point(280, 138)
uncheckall_button.Size = Size(100, 23)
uncheckall_button.Text = i18n.get("ConfigFormDetailsNone")
# 4. --- build the update checklist (contains all the 'data' checkboxes)
self.__update_checklist = CheckedListBox()
self.__update_checklist.CheckOnClick = True
self.__update_checklist.ColumnWidth = 125
self.__update_checklist.ThreeDCheckBoxes = True
self.__update_checklist.Location = Point(15, 65)
self.__update_checklist.MultiColumn = True
self.__update_checklist.SelectionMode = SelectionMode.One
self.__update_checklist.Size = Size(260, 170)
self.__update_checklist.ItemCheck += self.__fired_update_gui
self.__update_checklist.Items.Add(ConfigForm.__SERIES_CB)
self.__update_checklist.Items.Add(ConfigForm.__VOLUME_CB)
self.__update_checklist.Items.Add(ConfigForm.__NUMBER_CB)
self.__update_checklist.Items.Add(ConfigForm.__TITLE_CB)
self.__update_checklist.Items.Add(ConfigForm.__PUBLISHED_CB)
self.__update_checklist.Items.Add(ConfigForm.__RELEASED_CB)
self.__update_checklist.Items.Add(ConfigForm.__CROSSOVERS_CB)
self.__update_checklist.Items.Add(ConfigForm.__PUBLISHER_CB)
self.__update_checklist.Items.Add(ConfigForm.__IMPRINT_CB)
self.__update_checklist.Items.Add(ConfigForm.__WRITER_CB)
self.__update_checklist.Items.Add(ConfigForm.__PENCILLER_CB)
self.__update_checklist.Items.Add(ConfigForm.__INKER_CB)
self.__update_checklist.Items.Add(ConfigForm.__COLORIST_CB)
self.__update_checklist.Items.Add(ConfigForm.__LETTERER_CB)
self.__update_checklist.Items.Add(ConfigForm.__COVER_ARTIST_CB)
self.__update_checklist.Items.Add(ConfigForm.__EDITOR_CB)
self.__update_checklist.Items.Add(ConfigForm.__SUMMARY_CB)
self.__update_checklist.Items.Add(ConfigForm.__CHARACTERS_CB)
self.__update_checklist.Items.Add(ConfigForm.__TEAMS_CB)
self.__update_checklist.Items.Add(ConfigForm.__LOCATIONS_CB)
self.__update_checklist.Items.Add(ConfigForm.__WEBPAGE_CB)
# 5. --- add 'em all to this tabpage
tabpage.Controls.Add(label)
tabpage.Controls.Add(checkall_button)
tabpage.Controls.Add(uncheckall_button)
tabpage.Controls.Add(self.__update_checklist)
return tabpage
# ==========================================================================
def __build_behaviourtab(self):
''' builds and returns the "Behaviour" Tab for the TabControl '''
tabpage = TabPage()
tabpage.Text = i18n.get("ConfigFormBehaviourTab")
# 1. --- build the 'When scraping for the first time' label
first_scrape_label = Label()
first_scrape_label.AutoSize = False
first_scrape_label.FlatStyle = FlatStyle.System
first_scrape_label.Location = Point(52, 27)
first_scrape_label.Text = i18n.get("ConfigFormFirstScrapeLabel")
first_scrape_label.Size = Size(300, 17)
# 1. --- build the 'autochoose series' checkbox
self.__autochoose_series_cb = CheckBox()
self.__autochoose_series_cb.AutoSize = False
self.__autochoose_series_cb.FlatStyle = FlatStyle.System
self.__autochoose_series_cb.Location = Point(82, 45)
self.__autochoose_series_cb.Size = Size(300, 34)
self.__autochoose_series_cb.Text =i18n.get("ConfigFormAutochooseSeriesCB")
self.__autochoose_series_cb.CheckedChanged += self.__fired_update_gui
# 2. --- build the 'confirm issue' checkbox
self.__confirm_issue_cb = CheckBox()
self.__confirm_issue_cb.AutoSize = False
self.__confirm_issue_cb.FlatStyle = FlatStyle.System
self.__confirm_issue_cb.Location = Point(82, 75)
self.__confirm_issue_cb.Size = Size(300, 34)
self.__confirm_issue_cb.Text = i18n.get("ConfigFormConfirmIssueCB")
self.__confirm_issue_cb.CheckedChanged += self.__fired_update_gui
# 3. -- build the 'use fast rescrape' checkbox
self.__fast_rescrape_cb = CheckBox()
self.__fast_rescrape_cb.AutoSize = False
self.__fast_rescrape_cb.FlatStyle = FlatStyle.System
self.__fast_rescrape_cb.Location = Point(52, 116)
self.__fast_rescrape_cb.Size = Size(300, 34)
self.__fast_rescrape_cb.Text = i18n.get("ConfigFormRescrapeCB")
self.__fast_rescrape_cb.CheckedChanged += self.__fired_update_gui
# 4. -- build the 'add rescrape hints to notes' checkbox
self.__rescrape_notes_cb = CheckBox()
self.__rescrape_notes_cb.AutoSize = False
self.__rescrape_notes_cb.FlatStyle = FlatStyle.System
self.__rescrape_notes_cb.Location = Point(82, 151)
self.__rescrape_notes_cb.Size = Size(270, 17)
self.__rescrape_notes_cb.Text = i18n.get("ConfigFormRescrapeNotesCB")
self.__rescrape_notes_cb.CheckedChanged += self.__fired_update_gui
# 5. -- build the 'add rescrape hints to tags' checkbox
self.__rescrape_tags_cb = CheckBox()
self.__rescrape_tags_cb.AutoSize = False
self.__rescrape_tags_cb.FlatStyle = FlatStyle.System
self.__rescrape_tags_cb.Location = Point(82, 181)
self.__rescrape_tags_cb.Size = Size(270, 17)
self.__rescrape_tags_cb.Text = i18n.get("ConfigFormRescrapeTagsCB")
self.__rescrape_tags_cb.CheckedChanged += self.__fired_update_gui
# 6. --- build the 'specify series name' checkbox
self.__summary_dialog_cb = CheckBox()
self.__summary_dialog_cb.AutoSize = False
self.__summary_dialog_cb.FlatStyle = FlatStyle.System
self.__summary_dialog_cb.Location = Point(52, 214)
self.__summary_dialog_cb.Size = Size(300, 34)
self.__summary_dialog_cb.Text = i18n.get("ConfigFormShowSummaryCB")
self.__summary_dialog_cb.CheckedChanged += self.__fired_update_gui
# 7. --- add 'em all to the tabpage
tabpage.Controls.Add(first_scrape_label)
tabpage.Controls.Add(self.__autochoose_series_cb)
tabpage.Controls.Add(self.__confirm_issue_cb)
tabpage.Controls.Add(self.__fast_rescrape_cb)
tabpage.Controls.Add(self.__rescrape_tags_cb)
tabpage.Controls.Add(self.__rescrape_notes_cb)
tabpage.Controls.Add(self.__summary_dialog_cb)
return tabpage
# ==========================================================================
def __build_datatab(self):
''' builds and returns the "Data" Tab for the TabControl '''
tabpage = TabPage()
tabpage.Text = i18n.get("ConfigFormDataTab")
# 1. --- build the 'convert imprints checkbox'
self.__convert_imprints_cb = CheckBox()
self.__convert_imprints_cb.AutoSize = False
self.__convert_imprints_cb.FlatStyle = | |
import os
import sys
import time
import random
from collections import OrderedDict
import boto3
import pandas as pd
import numpy as np
from six import raise_from
from PIL import Image, ImageFile, ImageDraw
from botocore.exceptions import ClientError
from keras_retinanet.preprocessing.csv_generator import Generator
from keras_retinanet.utils.image import read_image_bgr, resize_image
from utils.query import pd_query, cursor
from config import config
# Without this the program will crash
ImageFile.LOAD_TRUNCATED_IMAGES = True
def error_print(message):
print(f'[Warning] {message}', file=sys.stderr)
def _parse(value, function, fmt):
"""
Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise_from(ValueError(fmt.format(e)), None)
def _atomic_file_exists(file_path):
"""
Atomically check if a file exists
Returns a boolean representing if the file exists
"""
try:
# This file open is atomic. This avoids race conditions when multiple processes are running.
# This race condition only happens when workers > 1 and multiprocessing = True in fit_generator
fd = os.open(file_path, os.O_CREAT | os.O_EXCL)
os.close(fd)
return False
except FileExistsError:
return True
def _get_labelmap(classes):
"""
Initializes the classmap of each class's database IDs to training IDs
"""
# Keras requires that the mapping IDs correspond to the index number of the class.
# So we create that mapping (dictionary)
class_id_name = pd_query(
f"select id, name from concepts where id = ANY(ARRAY{classes})")
labelmap = pd.Series(class_id_name.name.values,
index=class_id_name.id).to_dict()
return labelmap
def get_classmap(classes):
"""
Initializes the classmap of each class's database IDs to training IDs
"""
# Keras requires that the mapping IDs correspond to the index number of the class.
# So we create that mapping (dictionary)
classmap = {class_: index for index, class_ in enumerate(classes)}
return classmap
def _bound_coordinates(curr):
x_ratio = (curr['videowidth'] / config.RESIZED_WIDTH)
y_ratio = (curr['videoheight'] / config.RESIZED_HEIGHT)
x1 = min(max(int(curr['x1'] / x_ratio), 0), config.RESIZED_WIDTH)
x2 = min(max(int(curr['x2'] / x_ratio), 0), config.RESIZED_WIDTH)
y1 = min(max(int(curr['y1'] / y_ratio), 0), config.RESIZED_HEIGHT)
y2 = min(max(int(curr['y2'] / y_ratio), 0), config.RESIZED_HEIGHT)
return x1, x2, y1, y2
class AnnotationGenerator(object):
def __init__(self,
collection_ids,
verified_only,
include_tracking,
verify_videos,
classes,
min_examples,
validation_split=0.8):
print("Grabbing annotations....")
# Start with a list of all possible annotations, grouped by frame in video
selected_frames, concept_counts = self._select_annotations(collection_ids,
verified_only,
include_tracking,
verify_videos,
min_examples,
classes)
print(f"Found {sum(concept_counts.values())} annotations.")
# Creating a counter for each user: the concepts & whether they were verified.
userDict = {}
for user_df in selected_frames:
userId = str(user_df['userid'].iloc[0])
if userId not in userDict:
userDict[userId] = {}
for index,row in user_df.iterrows(): # each unique concept
conceptId = str(row['conceptid'])
if conceptId not in userDict[userId]:
userDict[userId][conceptId] = {'0': 0, '1':0} # 0 is un-verified, 1 is verified
if pd.notnull(row['verifiedby']): # it has been verified, increment verified count
userDict[userId][conceptId]['1'] +=1
else:
userDict[userId][conceptId]['0'] +=1
self.userDict = userDict
self.selected_frames = selected_frames
self.classmap = get_classmap(classes)
# Shuffle selected frames so that training/testing set are different each run
random.shuffle(self.selected_frames)
num_frames = len(self.selected_frames)
split_index = int(num_frames * validation_split)
# Split our data into training and testing sets, based on validation_split
self.training_set = self.selected_frames[0:split_index]
self.testing_set = self.selected_frames[split_index:]
def flow_from_s3(self,
image_folder='',
image_extension='.png',
subset='training',
**kwargs):
# Depending on subset, return either a training or testing generator
if subset == 'training':
return S3Generator(
selected_frames=self.training_set,
image_folder=image_folder,
image_extension=image_extension,
classes=self.classmap,
**kwargs
)
elif subset in ['validation', 'testing']:
return S3Generator(
selected_frames=self.testing_set,
image_folder=image_folder,
image_extension=image_extension,
classes=self.classmap,
**kwargs
)
else:
raise ValueError(
'subset parameter must be either "training" or "validation"/"testing"')
@staticmethod
def _select_annotations(collection_ids, verified_only, include_tracking, verify_videos, min_examples, concepts):
selected = []
concept_count = {}
annotations = AnnotationGenerator._get_annotations(
collection_ids, verified_only, include_tracking,
verify_videos, concepts, min_examples)
for concept in concepts:
concept_count[concept] = 0
# This grouping ensure that we can view all annotations for a single image
frames_group = annotations.groupby(
['videoid', 'frame_num'], sort=False)
frames_group = [df for _, df in frames_group]
ai_id = pd_query(
"SELECT id FROM users WHERE username='tracking'").id[0]
# Give priority to frames with highest verification priority
# And with least amount of tracking annotations
# And lower speed
frames_group.sort(key=lambda df: (
-df.priority.max(), list(df['userid']).count(ai_id), df.speed.mean()))
# Selects images that we'll use (each group has annotations for an image)
for annotation_group in frames_group:
# Check if we have min number of images already
if not any(v < min_examples for v in concept_count.values()):
break
in_annot = []
for i, row in annotation_group.iterrows():
if row['conceptid'] not in concept_count:
continue
concept_count[row['conceptid']] += 1
in_annot.append(row['conceptid'])
x1, x2, y1, y2 = _bound_coordinates(row)
annotation_group.at[i, 'x1'] = x1
annotation_group.at[i, 'x2'] = x2
annotation_group.at[i, 'y1'] = y1
annotation_group.at[i, 'y2'] = y2
annotation_group.at[i, 'videowidth'] = config.RESIZED_WIDTH
annotation_group.at[i, 'videoheight'] = config.RESIZED_HEIGHT
# Checks if frame has only concept we have too many of
if any(v > min_examples for v in concept_count.values()):
# Gets all concepts that we have too many of
excess = list(
{key: value for (key, value) in concept_count.items() if value > min_examples})
# if it doens't include concept that we need more of
# Don't save the annotation
if set(excess) >= set(in_annot):
for a in in_annot:
concept_count[a] -= 1
continue
selected.append(annotation_group)
return selected, concept_count
@staticmethod
def _get_annotations(collection_ids, verified_only, include_tracking,
verify_videos, concepts, min_examples):
# Query that gets all annotations for given concepts
# making sure that any tracking annotations originated from good users
tracking_user = cursor.execute(
"""SELECT id FROM users WHERE username = 'tracking'""")
tracking_uid = cursor.fetchone()[0]
annotations_query = r'''
WITH collection AS (SELECT
A.id,
image,
userid,
videoid,
videowidth,
videoheight,
conceptid,
x1, x2, y1, y2,
speed,
ROUND(fps * timeinvideo) as frame_num,
verifiedby
FROM
annotation_intermediate inter
LEFT JOIN
annotations a ON a.id=inter.annotationid
LEFT JOIN
videos ON videos.id=videoid
WHERE inter.id = ANY(%s) AND a.videoid <> ANY(%s)
'''
if verified_only:
annotations_query += """ AND a.verifiedby IS NOT NULL"""
# Filter collection so each concept has min_example annotations
annotations_query += r'''
),
filteredCollection AS (
SELECT
*
FROM (
SELECT
ROW_NUMBER() OVER (
PARTITION BY
conceptid
ORDER BY userid=32, verifiedby IS NULL) AS r,
c.*
FROM
collection c) t
WHERE
t.r <= (%s)
)
'''
# Add annotations that exist in the same frame
annotations_query += r'''
SELECT
A.id,
image,
userid,
videoid,
videowidth,
videoheight,
conceptid,
x1, x2, y1, y2,
speed,
priority,
ROUND(fps * timeinvideo) as frame_num,
verifiedby
FROM
annotations a
LEFT JOIN
videos ON videos.id=videoid
WHERE
ABS(x2-x1)>25 AND ABS(y2-y1)>25 AND
x1>=0 AND x1<videowidth AND
x2>0 AND x2<=videowidth AND
y1>=0 AND y1<videowidth AND
y2>0 AND y2<=videowidth AND
EXISTS (
SELECT
1
FROM
filteredCollection c
WHERE
c.videoid=a.videoid
AND c.frame_num=ROUND(fps * timeinvideo))
'''
if not include_tracking:
annotations_query += f''' AND a.userid <> {tracking_uid}'''
return pd_query(
annotations_query,
(collection_ids, verify_videos, min_examples))
class S3Generator(Generator):
def __init__(self, classes, selected_frames, image_folder, image_extension='.png', **kwargs):
self.image_folder = image_folder
# We initalize selected_annotations to hold all possible annotation iamges.
# Then, downloaded_images will hold those that have already been downloaded
self.selected_annotations = pd.concat(selected_frames).reset_index()
# Go ahead and add a column with the file name we'll save the images as
# We use videoid + frame_num as this ensures that we never download
# the same frame in a video twice, even if it has multiple annotations
self.selected_annotations['save_name'] = self.selected_annotations.apply(
lambda row: f'{row["videoid"]}_{int(row["frame_num"])}',
axis=1
)
# Make a set of all images that've already been downloaded
self.downloaded_images = set(os.listdir(image_folder))
self.image_extension = image_extension
self.classes = classes
self.labelmap = _get_labelmap(list(classes))
self.failed_downloads = set()
# Make a reverse dictionary so that we can lookup the other way
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
self._connect_s3()
self.image_data = self._read_annotations()
super(S3Generator, self).__init__(**kwargs)
def size(self):
""" Size of the dataset.
"""
return len(self.selected_annotations.index)
def num_classes(self):
""" Number of classes in the dataset.
"""
return len(self.classes)
def has_label(self, label):
""" Return True if label is a known label.
"""
return label in self.labels
def has_name(self, name):
""" Returns True if name is a known class.
"""
return name in self.classes
def name_to_label(self, name):
""" Map name to label.
"""
return self.classes[name]
def label_to_name(self, label):
""" Map label to name.
"""
return self.labelmap[self.labels[label]]
def image_aspect_ratio(self, image_index):
""" Compute the aspect ratio for an image with image_index.
"""
image = self.selected_annotations.iloc[image_index]
image_width = image['videowidth']
image_height = image['videoheight']
return float(image_width) / float(image_height)
def load_image(self, image_index):
""" Load an image at the image_index.
"""
if self._download_image(image_index):
return read_image_bgr(self.image_path(image_index))
return self.load_image((image_index + | |
<filename>tests/test_data/netsim_data.py
SCENARIO_1_STAT = {
"stat_samples": {
"D": {
(0, 9): {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_latency_at_arrival": 0.0,
"avg_latency_at_departure": 0,
"avg_latency_at_drop": 0,
"avg_receive_rate_bps": 8888.888888888889,
"avg_receive_rate_pps": 1.1111111111111112,
"avg_send_rate_bps": 0.0,
"avg_send_rate_pps": 0.0,
"duration": 9,
"last_state_change_timestamp": 9,
"timestamp": 9,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_received_bytes": 10000,
"total_received_pkts": 10,
"total_sent_bytes": 0,
"total_sent_pkts": 0,
}
},
"S": {
(0, 9): {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_latency_at_arrival": 0,
"avg_latency_at_departure": 0.0,
"avg_latency_at_drop": 0,
"avg_receive_rate_bps": 0.0,
"avg_receive_rate_pps": 0.0,
"avg_send_rate_bps": 8888.888888888889,
"avg_send_rate_pps": 1.1111111111111112,
"duration": 9,
"last_state_change_timestamp": 9,
"timestamp": 9,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_received_bytes": 0,
"total_received_pkts": 0,
"total_sent_bytes": 10000,
"total_sent_pkts": 10,
}
},
}
}
SCENARIO_2_3_STAT = {
"stat_samples": {
"D": {
(0, 10): {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_latency_at_arrival": 0.0,
"avg_latency_at_departure": 0,
"avg_latency_at_drop": 0,
"avg_receive_rate_bps": 8000.0,
"avg_receive_rate_pps": 1.0,
"avg_send_rate_bps": 0.0,
"avg_send_rate_pps": 0.0,
"duration": 10,
"last_state_change_timestamp": 9,
"timestamp": 10,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_received_bytes": 10000,
"total_received_pkts": 10,
"total_sent_bytes": 0,
"total_sent_pkts": 0,
}
},
"S": {
(0, 10): {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_latency_at_arrival": 0,
"avg_latency_at_departure": 0.0,
"avg_latency_at_drop": 0,
"avg_receive_rate_bps": 0.0,
"avg_receive_rate_pps": 0.0,
"avg_send_rate_bps": 8000.0,
"avg_send_rate_pps": 1.0,
"duration": 10,
"last_state_change_timestamp": 9,
"timestamp": 10,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_received_bytes": 0,
"total_received_pkts": 0,
"total_sent_bytes": 10000,
"total_sent_pkts": 10,
}
},
}
}
SCENARIO_6_STAT = {
"stat_samples": {
"D": {
(0, 2): {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_latency_at_arrival": 1.2,
"avg_latency_at_departure": 0,
"avg_latency_at_drop": 0,
"avg_receive_rate_bps": 8000.0,
"avg_receive_rate_pps": 1.0,
"avg_send_rate_bps": 0.0,
"avg_send_rate_pps": 0.0,
"duration": 2.0,
"last_state_change_timestamp": 1.6,
"timestamp": 2,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_received_bytes": 2000,
"total_received_pkts": 2,
"total_sent_bytes": 0,
"total_sent_pkts": 0,
},
(2, 4): {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_latency_at_arrival": 1.4749999999999999,
"avg_latency_at_departure": 0,
"avg_latency_at_drop": 0,
"avg_receive_rate_bps": 16000.0,
"avg_receive_rate_pps": 2.0,
"avg_send_rate_bps": 0.0,
"avg_send_rate_pps": 0.0,
"duration": 2.0,
"last_state_change_timestamp": 3.6,
"timestamp": 4,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_received_bytes": 4000,
"total_received_pkts": 4,
"total_sent_bytes": 0,
"total_sent_pkts": 0,
},
},
"S": {
(0, 2): {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_latency_at_arrival": 0,
"avg_latency_at_departure": 0.0,
"avg_latency_at_drop": 0,
"avg_receive_rate_bps": 0.0,
"avg_receive_rate_pps": 0.0,
"avg_send_rate_bps": 76000.0,
"avg_send_rate_pps": 9.5,
"duration": 2.0,
"last_state_change_timestamp": 1.9000000000000006,
"timestamp": 2,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_received_bytes": 0,
"total_received_pkts": 0,
"total_sent_bytes": 19000,
"total_sent_pkts": 19,
},
(2, 4): {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_latency_at_arrival": 0,
"avg_latency_at_departure": 0.0,
"avg_latency_at_drop": 0,
"avg_receive_rate_bps": 0.0,
"avg_receive_rate_pps": 0.0,
"avg_send_rate_bps": 80000.0,
"avg_send_rate_pps": 10.0,
"duration": 2.0,
"last_state_change_timestamp": 3.900000000000002,
"timestamp": 4,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_received_bytes": 0,
"total_received_pkts": 0,
"total_sent_bytes": 20000,
"total_sent_pkts": 20,
},
},
"SW1": {
(0, 2): {
"avg_drop_rate_bps": 56000.0,
"avg_drop_rate_pps": 7.0,
"avg_receive_rate_bps": 76000.0,
"avg_receive_rate_pps": 9.5,
"avg_send_rate_bps": 12000.0,
"avg_send_rate_pps": 1.5,
"duration": 2.0,
"last_state_change_timestamp": 1.9000000000000006,
"packet_processors": {
"PacketProcessor": {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_get_rate_pps": 9.5,
"avg_latency_at_arrival": 0.0,
"avg_latency_at_departure": 0.0,
"avg_latency_at_drop": 0,
"avg_put_rate_pps": 9.5,
"avg_queue_len": 0.0,
"avg_receive_rate_bps": 76000.0,
"avg_receive_rate_pps": 9.5,
"avg_send_rate_bps": 76000.0,
"avg_send_rate_pps": 9.5,
"avg_wait_time": 0.0,
"cur_queue_len": 0,
"duration": 2.0,
"integral_queue_sum": 0.0,
"integral_wait_time_sum": 0.0,
"last_state_change_timestamp": 1.9000000000000006,
"max_queue_len": 0,
"max_wait_time": 0,
"timestamp": 2,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_get_bytes": 19000,
"total_get_pkts": 19,
"total_put_bytes": 19000,
"total_put_pkts": 19,
"total_received_bytes": 19000,
"total_received_pkts": 19,
"total_sent_bytes": 19000,
"total_sent_pkts": 19,
}
},
"rx_interface_queues": {
"S:SW1:1": {
"avg_get_processed_rate": 0,
"avg_get_requested_rate": 0,
"avg_put_processed_rate": 0,
"avg_put_requested_rate": 0,
"avg_queue_len": 0,
"cur_queue_len": 0,
"duration": 0,
"integral_queue_sum": 0,
"last_state_change_timestamp": 0,
"max_queue_len": 0,
"timestamp": 0,
"total_get_processed_count": 19,
"total_get_requested_count": 20,
"total_put_processed_count": 19,
"total_put_requested_count": 19,
}
},
"rx_interfaces": {
"S:SW1:1": {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_get_rate_pps": 9.5,
"avg_latency_at_arrival": 0.0,
"avg_latency_at_departure": 0.0,
"avg_latency_at_drop": 0,
"avg_put_rate_pps": 9.5,
"avg_queue_len": 0.0,
"avg_receive_rate_bps": 76000.0,
"avg_receive_rate_pps": 9.5,
"avg_send_rate_bps": 76000.0,
"avg_send_rate_pps": 9.5,
"avg_wait_time": 0.0,
"cur_queue_len": 0,
"duration": 2.0,
"integral_queue_sum": 0.0,
"integral_wait_time_sum": 0.0,
"last_state_change_timestamp": 1.9000000000000006,
"max_queue_len": 0,
"max_wait_time": 0,
"timestamp": 2,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_get_bytes": 19000,
"total_get_pkts": 19,
"total_put_bytes": 19000,
"total_put_pkts": 19,
"total_received_bytes": 19000,
"total_received_pkts": 19,
"total_sent_bytes": 19000,
"total_sent_pkts": 19,
}
},
"timestamp": 2,
"total_dropped_bytes": 14000,
"total_dropped_pkts": 14,
"total_received_bytes": 19000,
"total_received_pkts": 19,
"total_sent_bytes": 3000,
"total_sent_pkts": 3,
"tx_interface_queues": {
"SW1:SW2:2": {
"avg_get_processed_rate": 0,
"avg_get_requested_rate": 0,
"avg_put_processed_rate": 0,
"avg_put_requested_rate": 0,
"avg_queue_len": 0,
"cur_queue_len": 1,
"duration": 0,
"integral_queue_sum": 0,
"last_state_change_timestamp": 0,
"max_queue_len": 0,
"timestamp": 0,
"total_get_processed_count": 4,
"total_get_requested_count": 4,
"total_put_processed_count": 5,
"total_put_requested_count": 5,
}
},
"tx_interfaces": {
"SW1:SW2:2": {
"avg_drop_rate_bps": 56000.0,
"avg_drop_rate_pps": 7.0,
"avg_get_rate_pps": 2.0,
"avg_latency_at_arrival": 0.0,
"avg_latency_at_departure": 0.7999999999999999,
"avg_latency_at_drop": 0.0,
"avg_put_rate_pps": 2.5,
"avg_queue_len": 0.8499999999999999,
"avg_receive_rate_bps": 76000.0,
"avg_receive_rate_pps": 9.5,
"avg_send_rate_bps": 12000.0,
"avg_send_rate_pps": 1.5,
"avg_wait_time": 0.32500000000000007,
"cur_queue_len": 1,
"duration": 2.0,
"integral_queue_sum": 1.6999999999999997,
"integral_wait_time_sum": 1.3000000000000003,
"last_state_change_timestamp": 1.9000000000000006,
"max_queue_len": 1,
"max_wait_time": 0.5000000000000001,
"timestamp": 2,
"total_dropped_bytes": 14000,
"total_dropped_pkts": 14,
"total_get_bytes": 4000,
"total_get_pkts": 4,
"total_put_bytes": 5000,
"total_put_pkts": 5,
"total_received_bytes": 19000,
"total_received_pkts": 19,
"total_sent_bytes": 3000,
"total_sent_pkts": 3,
}
},
},
(2, 4): {
"avg_drop_rate_bps": 120000.0,
"avg_drop_rate_pps": 15.0,
"avg_receive_rate_bps": 156000.0,
"avg_receive_rate_pps": 19.5,
"avg_send_rate_bps": 28000.0,
"avg_send_rate_pps": 3.5,
"duration": 2.0,
"last_state_change_timestamp": 3.900000000000002,
"packet_processors": {
"PacketProcessor": {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_get_rate_pps": 9.75,
"avg_latency_at_arrival": 0.0,
"avg_latency_at_departure": 0.0,
"avg_latency_at_drop": 0,
"avg_put_rate_pps": 9.75,
"avg_queue_len": 0.0,
"avg_receive_rate_bps": 78000.0,
"avg_receive_rate_pps": 9.75,
"avg_send_rate_bps": 78000.0,
"avg_send_rate_pps": 9.75,
"avg_wait_time": 0.0,
"cur_queue_len": 0,
"duration": 4.0,
"integral_queue_sum": 0.0,
"integral_wait_time_sum": 0.0,
"last_state_change_timestamp": 3.900000000000002,
"max_queue_len": 0,
"max_wait_time": 0,
"timestamp": 4,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_get_bytes": 39000,
"total_get_pkts": 39,
"total_put_bytes": 39000,
"total_put_pkts": 39,
"total_received_bytes": 39000,
"total_received_pkts": 39,
"total_sent_bytes": 39000,
"total_sent_pkts": 39,
}
},
"rx_interface_queues": {
"S:SW1:1": {
"avg_get_processed_rate": 0,
"avg_get_requested_rate": 0,
"avg_put_processed_rate": 0,
"avg_put_requested_rate": 0,
"avg_queue_len": 0,
"cur_queue_len": 0,
"duration": 0,
"integral_queue_sum": 0,
"last_state_change_timestamp": 0,
"max_queue_len": 0,
"timestamp": 0,
"total_get_processed_count": 39,
"total_get_requested_count": 40,
"total_put_processed_count": 39,
"total_put_requested_count": 39,
}
},
"rx_interfaces": {
"S:SW1:1": {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_get_rate_pps": 9.75,
"avg_latency_at_arrival": 0.0,
"avg_latency_at_departure": 0.0,
"avg_latency_at_drop": 0,
"avg_put_rate_pps": 9.75,
"avg_queue_len": 0.0,
"avg_receive_rate_bps": 78000.0,
"avg_receive_rate_pps": 9.75,
"avg_send_rate_bps": 78000.0,
"avg_send_rate_pps": 9.75,
"avg_wait_time": 0.0,
"cur_queue_len": 0,
"duration": 4.0,
"integral_queue_sum": 0.0,
"integral_wait_time_sum": 0.0,
"last_state_change_timestamp": 3.900000000000002,
"max_queue_len": 0,
"max_wait_time": 0,
"timestamp": 4,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_get_bytes": 39000,
"total_get_pkts": 39,
"total_put_bytes": 39000,
"total_put_pkts": 39,
"total_received_bytes": 39000,
"total_received_pkts": 39,
"total_sent_bytes": 39000,
"total_sent_pkts": 39,
}
},
"timestamp": 4,
"total_dropped_bytes": 30000,
"total_dropped_pkts": 30,
"total_received_bytes": 39000,
"total_received_pkts": 39,
"total_sent_bytes": 7000,
"total_sent_pkts": 7,
"tx_interface_queues": {
"SW1:SW2:2": {
"avg_get_processed_rate": 0,
"avg_get_requested_rate": 0,
"avg_put_processed_rate": 0,
"avg_put_requested_rate": 0,
"avg_queue_len": 0,
"cur_queue_len": 1,
"duration": 0,
"integral_queue_sum": 0,
"last_state_change_timestamp": 0,
"max_queue_len": 0,
"timestamp": 0,
"total_get_processed_count": 8,
"total_get_requested_count": 8,
"total_put_processed_count": 9,
"total_put_requested_count": 9,
}
},
"tx_interfaces": {
"SW1:SW2:2": {
"avg_drop_rate_bps": 60000.0,
"avg_drop_rate_pps": 7.5,
"avg_get_rate_pps": 2.0,
"avg_latency_at_arrival": 0.0,
"avg_latency_at_departure": 0.8999999999999998,
"avg_latency_at_drop": 0.0,
"avg_put_rate_pps": 2.25,
"avg_queue_len": 0.9249999999999988,
"avg_receive_rate_bps": 78000.0,
"avg_receive_rate_pps": 9.75,
"avg_send_rate_bps": 14000.0,
"avg_send_rate_pps": 1.75,
"avg_wait_time": 0.4124999999999997,
"cur_queue_len": 1,
"duration": 4.0,
"integral_queue_sum": 3.6999999999999953,
"integral_wait_time_sum": 3.2999999999999976,
"last_state_change_timestamp": 3.900000000000002,
"max_queue_len": 1,
"max_wait_time": 0.5000000000000001,
"timestamp": 4,
"total_dropped_bytes": 30000,
"total_dropped_pkts": 30,
"total_get_bytes": 8000,
"total_get_pkts": 8,
"total_put_bytes": 9000,
"total_put_pkts": 9,
"total_received_bytes": 39000,
"total_received_pkts": 39,
"total_sent_bytes": 7000,
"total_sent_pkts": 7,
}
},
},
},
"SW2": {
(0, 2): {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_receive_rate_bps": 12000.0,
"avg_receive_rate_pps": 1.5,
"avg_send_rate_bps": 8000.0,
"avg_send_rate_pps": 1.0,
"duration": 2.0,
"last_state_change_timestamp": 1.6,
"packet_processors": {
"PacketProcessor": {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_get_rate_pps": 1.5,
"avg_latency_at_arrival": 0.7999999999999999,
"avg_latency_at_departure": 0.7999999999999999,
"avg_latency_at_drop": 0,
"avg_put_rate_pps": 1.5,
"avg_queue_len": 0.0,
"avg_receive_rate_bps": 12000.0,
"avg_receive_rate_pps": 1.5,
"avg_send_rate_bps": 12000.0,
"avg_send_rate_pps": 1.5,
"avg_wait_time": 0.0,
"cur_queue_len": 0,
"duration": 2.0,
"integral_queue_sum": 0.0,
"integral_wait_time_sum": 0.0,
"last_state_change_timestamp": 1.6,
"max_queue_len": 0,
"max_wait_time": 0,
"timestamp": 2,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_get_bytes": 3000,
"total_get_pkts": 3,
"total_put_bytes": 3000,
"total_put_pkts": 3,
"total_received_bytes": 3000,
"total_received_pkts": 3,
"total_sent_bytes": 3000,
"total_sent_pkts": 3,
}
},
"rx_interface_queues": {
"SW1:SW2:2": {
"avg_get_processed_rate": 0,
"avg_get_requested_rate": 0,
"avg_put_processed_rate": 0,
"avg_put_requested_rate": 0,
"avg_queue_len": 0,
"cur_queue_len": 0,
"duration": 0,
"integral_queue_sum": 0,
"last_state_change_timestamp": 0,
"max_queue_len": 0,
"timestamp": 0,
"total_get_processed_count": 3,
"total_get_requested_count": 4,
"total_put_processed_count": 3,
"total_put_requested_count": 3,
}
},
"rx_interfaces": {
"SW1:SW2:2": {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_get_rate_pps": 1.5,
"avg_latency_at_arrival": 0.7999999999999999,
"avg_latency_at_departure": 0.7999999999999999,
"avg_latency_at_drop": 0,
"avg_put_rate_pps": 1.5,
"avg_queue_len": 0.0,
"avg_receive_rate_bps": 12000.0,
"avg_receive_rate_pps": 1.5,
"avg_send_rate_bps": 12000.0,
"avg_send_rate_pps": 1.5,
"avg_wait_time": 0.0,
"cur_queue_len": 0,
"duration": 2.0,
"integral_queue_sum": 0.0,
"integral_wait_time_sum": 0.0,
"last_state_change_timestamp": 1.6,
"max_queue_len": 0,
"max_wait_time": 0,
"timestamp": 2,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_get_bytes": 3000,
"total_get_pkts": 3,
"total_put_bytes": 3000,
"total_put_pkts": 3,
"total_received_bytes": 3000,
"total_received_pkts": 3,
"total_sent_bytes": 3000,
"total_sent_pkts": 3,
}
},
"timestamp": 2,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_received_bytes": 3000,
"total_received_pkts": 3,
"total_sent_bytes": 2000,
"total_sent_pkts": 2,
"tx_interface_queues": {
"SW2:D:3": {
"avg_get_processed_rate": 0,
"avg_get_requested_rate": 0,
"avg_put_processed_rate": 0,
"avg_put_requested_rate": 0,
"avg_queue_len": 0,
"cur_queue_len": 0,
"duration": 0,
"integral_queue_sum": 0,
"last_state_change_timestamp": 0,
"max_queue_len": 0,
"timestamp": 0,
"total_get_processed_count": 3,
"total_get_requested_count": 3,
"total_put_processed_count": 3,
"total_put_requested_count": 3,
}
},
"tx_interfaces": {
"SW2:D:3": {
"avg_drop_rate_bps": 0.0,
"avg_drop_rate_pps": 0.0,
"avg_get_rate_pps": 1.5,
"avg_latency_at_arrival": 0.7999999999999999,
"avg_latency_at_departure": 1.2,
"avg_latency_at_drop": 0,
"avg_put_rate_pps": 1.5,
"avg_queue_len": 0.0,
"avg_receive_rate_bps": 12000.0,
"avg_receive_rate_pps": 1.5,
"avg_send_rate_bps": 8000.0,
"avg_send_rate_pps": 1.0,
"avg_wait_time": 0.0,
"cur_queue_len": 0,
"duration": 2.0,
"integral_queue_sum": 0.0,
"integral_wait_time_sum": 0.0,
"last_state_change_timestamp": 1.6,
"max_queue_len": 0,
"max_wait_time": 0,
"timestamp": 2,
"total_dropped_bytes": 0,
"total_dropped_pkts": 0,
"total_get_bytes": 3000,
"total_get_pkts": 3,
"total_put_bytes": 3000,
"total_put_pkts": 3,
"total_received_bytes": 3000,
"total_received_pkts": | |
import numpy as np
from scipy.optimize import minimize
from scipy.io import loadmat
from math import sqrt,exp
import pickle
import sys
from time import time
def initializeWeights(n_in, n_out):
"""
# initializeWeights return the random weights for Neural Network given the
# number of node in the input layer and output layer
# Input:
# n_in: number of nodes of the input layer
# n_out: number of nodes of the output layer
# Output:
# W: matrix of random initial weights with size (n_out x (n_in + 1))"""
epsilon = sqrt(6) / sqrt(n_in + n_out + 1)
W = (np.random.rand(n_out, n_in + 1) * 2 * epsilon) - epsilon
return W
def sigmoid(z):
"""# Notice that z can be a scalar, a vector or a matrix
# return the sigmoid of input z"""
return 1.0 / (1.0 + np.exp(-1.0 * z))
def preprocess():
""" Input:
Although this function doesn't have any input, you are required to load
the MNIST data set from file 'mnist_all.mat'.
Output:
train_data: matrix of training set. Each row of train_data contains
feature vector of a image
train_label: vector of label corresponding to each image in the training
set
validation_data: matrix of training set. Each row of validation_data
contains feature vector of a image
validation_label: vector of label corresponding to each image in the
training set
test_data: matrix of training set. Each row of test_data contains
feature vector of a image
test_label: vector of label corresponding to each image in the testing
set
Some suggestions for preprocessing step:
- feature selection"""
mat = loadmat('mnist_all.mat') # loads the MAT object as a Dictionary
# Pick a reasonable size for validation data
# ------------Initialize preprocess arrays----------------------#
train_preprocess = np.zeros(shape=(50000, 784))
validation_preprocess = np.zeros(shape=(10000, 784))
test_preprocess = np.zeros(shape=(10000, 784))
train_label_preprocess = np.zeros(shape=(50000,))
validation_label_preprocess = np.zeros(shape=(10000,))
test_label_preprocess = np.zeros(shape=(10000,))
# ------------Initialize flag variables----------------------#
train_len = 0
validation_len = 0
test_len = 0
train_label_len = 0
validation_label_len = 0
# ------------Start to split the data set into 6 arrays-----------#
for key in mat:
# -----------when the set is training set--------------------#
if "train" in key:
label = key[-1] # record the corresponding label
tup = mat.get(key)
sap = range(tup.shape[0])
tup_perm = np.random.permutation(sap)
tup_len = len(tup) # get the length of current training set
tag_len = tup_len - 1000 # defines the number of examples which will be added into the training set
# ---------------------adding data to training set-------------------------#
train_preprocess[train_len:train_len + tag_len] = tup[tup_perm[1000:], :]
train_len += tag_len
train_label_preprocess[train_label_len:train_label_len + tag_len] = label
train_label_len += tag_len
# ---------------------adding data to validation set-------------------------#
validation_preprocess[validation_len:validation_len + 1000] = tup[tup_perm[0:1000], :]
validation_len += 1000
validation_label_preprocess[validation_label_len:validation_label_len + 1000] = label
validation_label_len += 1000
# ---------------------adding data to test set-------------------------#
elif "test" in key:
label = key[-1]
tup = mat.get(key)
sap = range(tup.shape[0])
tup_perm = np.random.permutation(sap)
tup_len = len(tup)
test_label_preprocess[test_len:test_len + tup_len] = label
test_preprocess[test_len:test_len + tup_len] = tup[tup_perm]
test_len += tup_len
# ---------------------Shuffle,double and normalize-------------------------#
train_size = range(train_preprocess.shape[0])
train_perm = np.random.permutation(train_size)
train_data = train_preprocess[train_perm]
train_data = np.double(train_data)
train_data = train_data / 255.0
train_label = train_label_preprocess[train_perm]
validation_size = range(validation_preprocess.shape[0])
vali_perm = np.random.permutation(validation_size)
validation_data = validation_preprocess[vali_perm]
validation_data = np.double(validation_data)
validation_data = validation_data / 255.0
validation_label = validation_label_preprocess[vali_perm]
test_size = range(test_preprocess.shape[0])
test_perm = np.random.permutation(test_size)
test_data = test_preprocess[test_perm]
test_data = np.double(test_data)
test_data = test_data / 255.0
test_label = test_label_preprocess[test_perm]
same_value_cols = np.all(train_data == train_data[0,:], axis = 0)
common = np.where(same_value_cols == True)[0]
train_data = np.delete(train_data,common,axis=1)
validation_data = np.delete(validation_data,common,axis=1)
test_data = np.delete(test_data,common,axis=1)
print('preprocess done')
return train_data, train_label, validation_data, validation_label, test_data, test_label
def feedForward(inputs, weight):
net = np.dot(inputs, weight.T)
out = sigmoid(net)
return out
def computeGradient(training_data, out_hidden, w2, out_output, train_label):
deltaL = out_output - train_label
gradient_out = np.dot(deltaL.T, out_hidden)
gradient_out *= (training_data.shape[0] ** -1)
gradient_hidden = np.dot(training_data.T, np.dot(deltaL, w2) * out_hidden * ( 1 - out_hidden))
gradient_hidden = gradient_hidden[:,:-1]
gradient_hidden = gradient_hidden.T
gradient_hidden *= (training_data.shape[0] ** -1)
return gradient_hidden,gradient_out
def addRegularization(training_data, w1, w2, obj_val, gradient_hidden, gradient_out, lambdaval):
obj_val += (lambdaval/(2*training_data.shape[0])) * (np.sum(w1 * w1) + np.sum(w2 * w2))
gradient_out += (training_data.shape[0] ** -1) * (lambdaval * w2)
gradient_hidden += (training_data.shape[0] ** -1) * (lambdaval * w1)
return obj_val,gradient_hidden,gradient_out
def nnObjFunction(params, *args):
"""% nnObjFunction computes the value of objective function (negative log
% likelihood error function with regularization) given the parameters
% of Neural Networks, thetraining data, their corresponding training
% labels and lambda - regularization hyper-parameter.
% Input:
% params: vector of weights of 2 matrices w1 (weights of connections from
% input layer to hidden layer) and w2 (weights of connections from
% hidden layer to output layer) where all of the weights are contained
% in a single vector.
% n_input: number of node in input layer (not include the bias node)
% n_hidden: number of node in hidden layer (not include the bias node)
% n_class: number of node in output layer (number of classes in
% classification problem
% training_data: matrix of training data. Each row of this matrix
% represents the feature vector of a particular image
% training_label: the vector of truth label of training images. Each entry
% in the vector represents the truth label of its corresponding image.
% lambda: regularization hyper-parameter. This value is used for fixing the
% overfitting problem.
% Output:
% obj_val: a scalar value representing value of error function
% obj_grad: a SINGLE vector of gradient value of error function
% NOTE: how to compute obj_grad
% Use backpropagation algorithm to compute the gradient of error function
% for each weights in weight matrices.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% reshape 'params' vector into 2 matrices of weight w1 and w2
% w1: matrix of weights of connections from input layer to hidden layers.
% w1(i, j) represents the weight of connection from unit j in input
% layer to unit i in hidden layer.
% w2: matrix of weights of connections from hidden layer to output layers.
% w2(i, j) represents the weight of connection from unit j in hidden
% layer to unit i in output layer."""
n_input, n_hidden, n_class, training_data, training_label, lambdaval = args
w1 = params[0:n_hidden * (n_input + 1)].reshape((n_hidden, (n_input + 1)))
w2 = params[(n_hidden * (n_input + 1)):].reshape((n_class, (n_hidden + 1)))
obj_val = 0
train_label = np.zeros((training_label.shape[0],n_class))
train_label[np.arange(training_label.shape[0]),training_label.astype(int)] = 1
training_data = np.hstack((training_data, np.ones((training_data.shape[0],1))))
out_hidden = feedForward(training_data, w1)
out_hidden = np.hstack((out_hidden, np.ones((out_hidden.shape[0],1))))
out_output = feedForward(out_hidden, w2)
obj_val = (-1.0/training_data.shape[0]) * (np.sum( np.sum( ( train_label * np.log(out_output) ) + ( (1 - train_label) * np.log(1 - out_output) ) ) ) )
gradient_hidden, gradient_out = computeGradient(training_data, out_hidden, w2, out_output, train_label)
obj_val, gradient_hidden, gradient_out = addRegularization(training_data, w1, w2, obj_val, gradient_hidden, gradient_out, lambdaval)
# Make sure you reshape the gradient matrices to a 1D array. for instance if your gradient matrices are grad_w1 and grad_w2
# you would use code similar to the one below to create a flat array
# obj_grad = np.concatenate((grad_w1.flatten(), grad_w2.flatten()),0)
# obj_grad = np.array([])
obj_grad = np.concatenate((gradient_hidden.flatten(), gradient_out.flatten()),0)
return (obj_val, obj_grad)
def nnPredict(w1, w2, data):
"""% nnPredict predicts the label of data given the parameter w1, w2 of Neural
% Network.
% Input:
% w1: matrix of weights of connections from input layer to hidden layers.
% w1(i, j) represents the weight of connection from unit i in input
% layer to unit j in hidden layer.
% w2: matrix of weights of connections from hidden layer to output layers.
% w2(i, j) represents the weight of connection from unit i in input
% layer to unit j in hidden layer.
% data: matrix of data. Each row of this matrix represents the feature
% vector of a particular image
% Output:
% label: a column vector of predicted labels"""
data = np.hstack((data, np.ones((data.shape[0],1))))
out_hidden = feedForward(data, w1)
out_hidden = np.hstack((out_hidden, np.ones((out_hidden.shape[0],1))))
out_output = feedForward(out_hidden, w2)
labels = np.argmax(out_output, axis = 1)
# Your code here
return labels
"""**************Neural Network Script Starts here********************************"""
# n_input = | |
-m 10',
{
'm': '10',
'n': '10',
'lda': '10',
}
),
(
'getf2_npvt_batched',
'-f getf2_npvt_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'batch_c': '1',
}
),
(
'getf2_npvt_strided_batched',
'-f getf2_npvt_strided_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'strideA': '100',
'batch_c': '1',
}
),
(
'getrf_npvt',
'-f getrf_npvt -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
}
),
(
'getrf_npvt_batched',
'-f getrf_npvt_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'batch_c': '1',
}
),
(
'getrf_npvt_strided_batched',
'-f getrf_npvt_strided_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'strideA': '100',
'batch_c': '1',
}
),
(
'getrf',
'-f getrf -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
}
),
(
'getrf_batched',
'-f getrf_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'strideP': '10',
'batch_c': '1',
}
),
(
'getrf_strided_batched',
'-f getrf_strided_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'strideA': '100',
'strideP': '10',
'batch_c': '1',
}
),
(
'getf2',
'-f getf2 -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
}
),
(
'getf2_batched',
'-f getf2_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'strideP': '10',
'batch_c': '1',
}
),
(
'getf2_strided_batched',
'-f getf2_strided_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'strideA': '100',
'strideP': '10',
'batch_c': '1',
}
),
(
'geqr2',
'-f geqr2 -n 10 -m 15',
{
'm': '15',
'n': '10',
'lda': '15',
}
),
(
'geqr2_batched',
'-f geqr2_batched -n 10 -m 15',
{
'm': '15',
'n': '10',
'lda': '15',
'strideP': '10',
'batch_c': '1',
}
),
(
'geqr2_strided_batched',
'-f geqr2_strided_batched -n 10 -m 15',
{
'm': '15',
'n': '10',
'lda': '15',
'strideA': '150',
'strideP': '10',
'batch_c': '1',
}
),
(
'geqrf',
'-f geqrf -n 10 -m 15',
{
'm': '15',
'n': '10',
'lda': '15',
}
),
(
'geqrf_batched',
'-f geqrf_batched -n 10 -m 15',
{
'm': '15',
'n': '10',
'lda': '15',
'strideP': '10',
'batch_c': '1',
}
),
(
'geqrf_strided_batched',
'-f geqrf_strided_batched -n 10 -m 15',
{
'm': '15',
'n': '10',
'lda': '15',
'strideA': '150',
'strideP': '10',
'batch_c': '1',
}
),
(
'geqrf_ptr_batched',
'-f geqrf_ptr_batched -n 10 -m 15',
{
'm': '15',
'n': '10',
'lda': '15',
'strideP': '10',
'batch_c': '1',
}
),
(
'gerq2',
'-f gerq2 -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
}
),
(
'gerq2_batched',
'-f gerq2_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'strideP': '10',
'batch_c': '1',
}
),
(
'gerq2_strided_batched',
'-f gerq2_strided_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'strideA': '100',
'strideP': '10',
'batch_c': '1',
}
),
(
'gerqf',
'-f gerqf -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
}
),
(
'gerqf_batched',
'-f gerqf_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'strideP': '10',
'batch_c': '1',
}
),
(
'gerqf_strided_batched',
'-f gerqf_strided_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'strideA': '100',
'strideP': '10',
'batch_c': '1',
}
),
(
'geql2',
'-f geql2 -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
}
),
(
'geql2_batched',
'-f geql2_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'strideP': '10',
'batch_c': '1',
}
),
(
'geql2_strided_batched',
'-f geql2_strided_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'strideA': '100',
'strideP': '10',
'batch_c': '1',
}
),
(
'geqlf',
'-f geqlf -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
}
),
(
'geqlf_batched',
'-f geqlf_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'strideP': '10',
'batch_c': '1',
}
),
(
'geqlf_strided_batched',
'-f geqlf_strided_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'strideA': '100',
'strideP': '10',
'batch_c': '1',
}
),
(
'gelq2',
'-f gelq2 -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
}
),
(
'gelq2_batched',
'-f gelq2_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'strideP': '10',
'batch_c': '1',
}
),
(
'gelq2_strided_batched',
'-f gelq2_strided_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'strideA': '100',
'strideP': '10',
'batch_c': '1',
}
),
(
'gelqf',
'-f gelqf -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
}
),
(
'gelqf_batched',
'-f gelqf_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'strideP': '10',
'batch_c': '1',
}
),
(
'gelqf_strided_batched',
'-f gelqf_strided_batched -m 10',
{
'm': '10',
'n': '10',
'lda': '10',
'strideA': '100',
'strideP': '10',
'batch_c': '1',
}
),
(
'getrs',
'-f getrs -n 10',
{
'trans': 'N',
'n': '10',
'nrhs': '10',
'lda': '10',
'ldb': '10',
}
),
(
'getrs_batched',
'-f getrs_batched -n 10',
{
'trans': 'N',
'n': '10',
'nrhs': '10',
'lda': '10',
'ldb': '10',
'strideP': '10',
'batch_c': '1',
}
),
(
'getrs_strided_batched',
'-f getrs_strided_batched -n 10',
{
'trans': 'N',
'n': '10',
'nrhs': '10',
'lda': '10',
'ldb': '10',
'strideA': '100',
'strideP': '10',
'strideB': '100',
'batch_c': '1',
}
),
(
'gesv',
'-f gesv -n 10',
{
'n': '10',
'nrhs': '10',
'lda': '10',
'ldb': '10',
}
),
(
'gesv_batched',
'-f gesv_batched -n 10',
{
'n': '10',
'nrhs': '10',
'lda': '10',
'ldb': '10',
'strideP': '10',
'batch_c': '1',
}
),
(
'gesv_strided_batched',
'-f gesv_strided_batched -n 10',
{
'n': '10',
'nrhs': '10',
'lda': '10',
'ldb': '10',
'strideA': '100',
'strideP': '10',
'strideB': '100',
'batch_c': '1',
}
),
(
'gesvd',
'-f gesvd -n 10 -m 15',
{
'left_svect': 'N',
'right_svect': 'N',
'm': '15',
'n': '10',
'lda': '15',
'ldu': '15',
'ldv': '10',
}
),
(
'gesvd_batched',
'-f gesvd_batched -n 10 -m 15',
{
'left_svect': 'N',
'right_svect': 'N',
'm': '15',
'n': '10',
'lda': '15',
'strideS': '10',
'ldu': '15',
'strideU': '225',
'ldv': '10',
'strideV': '100',
'strideE': '9',
'batch_c': '1',
}
),
(
'gesvd_strided_batched',
'-f gesvd_strided_batched -n 10 -m 15',
{
'left_svect': 'N',
'right_svect': 'N',
'm': '15',
'n': '10',
'lda': '15',
'strideA': '150',
'strideS': '10',
'ldu': '15',
'strideU': '225',
'ldv': '10',
'strideV': '100',
'strideE': '9',
'batch_c': '1',
}
),
(
'trtri',
'-f trtri -n 10',
{
'uplo': 'U',
'diag': 'N',
'n': '10',
'lda': '10',
}
),
(
'trtri_batched',
'-f trtri_batched -n 10',
{
'uplo': 'U',
'diag': 'N',
'n': '10',
'lda': '10',
'batch_c': '1',
}
),
(
'trtri_strided_batched',
'-f trtri_strided_batched -n 10',
{
'uplo': 'U',
'diag': 'N',
'n': '10',
'lda': '10',
'strideA': '100',
'batch_c': '1',
}
),
(
'getri',
'-f getri -n 10',
{
'n': '10',
'lda': '10',
}
),
(
'getri_batched',
'-f getri_batched -n 10',
{
'n': '10',
'lda': '10',
'strideP': '10',
'batch_c': '1',
}
),
(
'getri_strided_batched',
'-f getri_strided_batched -n 10',
{
'n': '10',
'lda': '10',
'strideA': '100',
'strideP': '10',
'batch_c': '1',
}
),
(
'getri_npvt',
'-f getri_npvt -n 10',
{
'n': '10',
'lda': '10',
}
),
(
'getri_npvt_batched',
'-f getri_npvt_batched -n 10',
{
'n': '10',
'lda': '10',
'batch_c': '1',
}
),
(
'getri_npvt_strided_batched',
'-f getri_npvt_strided_batched -n 10',
{
'n': '10',
'lda': '10',
'strideA': '100',
'batch_c': '1',
}
),
(
'getri_outofplace',
'-f getri_outofplace -n 10',
{
'n': '10',
'lda': '10',
'ldc': '10',
}
),
(
'getri_outofplace_batched',
'-f getri_outofplace_batched -n 10',
{
'n': '10',
'lda': '10',
'strideP': '10',
'ldc': '10',
'batch_c': '1',
}
),
(
'getri_outofplace_strided_batched',
'-f getri_outofplace_strided_batched -n 10',
{
'n': '10',
'lda': '10',
'strideA': '100',
'strideP': '10',
'ldc': '10',
'strideC': '100',
'batch_c': '1',
}
),
(
'getri_npvt_outofplace',
'-f getri_npvt_outofplace -n 10',
{
'n': '10',
'lda': '10',
'ldc': '10',
}
),
(
'getri_npvt_outofplace_batched',
'-f getri_npvt_outofplace_batched -n 10',
{
'n': '10',
'lda': '10',
'ldc': '10',
'batch_c': '1',
}
),
(
'getri_npvt_outofplace_strided_batched',
'-f getri_npvt_outofplace_strided_batched -n 10',
{
'n': '10',
'lda': '10',
'strideA': '100',
'ldc': '10',
'strideC': '100',
'batch_c': '1',
}
),
(
'gels',
'-f gels -n 10 -m 15',
{
'trans': 'N',
'm': '15',
'n': '10',
'nrhs': '10',
'lda': '15',
'ldb': '15',
}
),
(
'gels_batched',
'-f gels_batched -n 10 -m 15',
{
'trans': 'N',
'm': '15',
'n': '10',
'nrhs': '10',
'lda': '15',
'ldb': '15',
'batch_c': '1',
}
),
(
'gels_strided_batched',
'-f gels_strided_batched -n 10 -m 15',
{
'trans': 'N',
'm': '15',
'n': '10',
'nrhs': '10',
'lda': '15',
'ldb': '15',
'strideA': '150',
'strideB': '150',
'batch_c': '1',
}
),
(
'gebd2',
'-f gebd2 -n 10 -m 15',
{
'm': '15',
'n': '10',
'lda': '15',
}
),
(
'gebd2_batched',
'-f gebd2_batched -n 10 -m 15',
{
'm': '15',
'n': '10',
'lda': '15',
| |
<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import random
from datetime import date, timedelta
import mock
from dateutil.relativedelta import relativedelta
from django.core.management import call_command
from django.test import override_settings
from django.urls import reverse
from django.conf import settings
from django.core import mail
from rest_framework import status
from account.models import User
from agency.agencies import UNICEF, WFP
from agency.models import Agency
from agency.roles import VALID_FOCAL_POINT_ROLE_NAMES, AgencyRole
from common.headers import CustomHeader
from notification.consts import NotificationType, NOTIFICATION_DATA
from partner.roles import PartnerRole
from partner.serializers import PartnerShortSerializer
from project.models import Assessment, Application, EOI, Pin
from partner.models import Partner
from common.tests.base import BaseAPITestCase
from common.factories import (
OpenEOIFactory,
AgencyMemberFactory,
PartnerSimpleFactory,
PartnerMemberFactory,
AgencyOfficeFactory,
AgencyFactory,
PartnerVerificationFactory,
UserFactory,
PartnerFactory,
get_new_common_file,
DirectEOIFactory, FinalizedEOIFactory)
from common.models import Specialization, CommonFile
from common.consts import (
SELECTION_CRITERIA_CHOICES,
JUSTIFICATION_FOR_DIRECT_SELECTION,
APPLICATION_STATUSES,
COMPLETED_REASON,
CFEI_TYPES,
CFEI_STATUSES,
EXTENDED_APPLICATION_STATUSES,
)
from project.views import PinProjectAPIView
from project.serializers import ConvertUnsolicitedSerializer
filename = os.path.join(settings.PROJECT_ROOT, 'apps', 'common', 'tests', 'test.doc')
class TestPinUnpinWrongEOIAPITestCase(BaseAPITestCase):
user_type = BaseAPITestCase.USER_PARTNER
def test_pin_unpin_project_wrong_eois(self):
url = reverse('projects:pins')
response = self.client.patch(url, data={"eoi_ids": [1, 2, 3], "pin": True})
self.assertResponseStatusIs(response, status_code=status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.data['non_field_errors'], PinProjectAPIView.ERROR_MSG_WRONG_EOI_PKS)
self.assertEquals(Pin.objects.count(), 0)
class TestPinUnpinEOIAPITestCase(BaseAPITestCase):
user_type = BaseAPITestCase.USER_PARTNER
quantity = 2
url = reverse('projects:pins')
def setUp(self):
super(TestPinUnpinEOIAPITestCase, self).setUp()
AgencyOfficeFactory.create_batch(self.quantity)
AgencyMemberFactory.create_batch(self.quantity)
OpenEOIFactory.create_batch(self.quantity, is_published=True)
def test_pin_unpin_project_wrong_params(self):
eoi_ids = EOI.objects.all().values_list('id', flat=True)
response = self.client.patch(self.url, data={"eoi_ids": eoi_ids, "pin": None})
self.assertResponseStatusIs(response, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.data['non_field_errors'], PinProjectAPIView.ERROR_MSG_WRONG_PARAMS)
self.assertEquals(Pin.objects.count(), 0)
def test_pin_unpin_project(self):
# add pins
eoi_ids = EOI.objects.all().values_list('id', flat=True)
response = self.client.patch(self.url, data={"eoi_ids": eoi_ids, "pin": True})
self.assertResponseStatusIs(response, status.HTTP_201_CREATED)
self.assertEquals(Pin.objects.count(), self.quantity)
self.assertEquals(response.data["eoi_ids"], list(eoi_ids))
# read pins
response = self.client.get(self.url)
self.assertResponseStatusIs(response)
self.assertEquals(response.data['count'], self.quantity)
# delete pins
response = self.client.patch(self.url, data={"eoi_ids": eoi_ids, "pin": False})
self.assertResponseStatusIs(response, status_code=status.HTTP_204_NO_CONTENT)
self.assertEquals(Pin.objects.count(), 0)
class TestOpenProjectsAPITestCase(BaseAPITestCase):
quantity = 2
url = reverse('projects:open')
user_type = BaseAPITestCase.USER_AGENCY
agency_role = AgencyRole.EDITOR_ADVANCED
def setUp(self):
super(TestOpenProjectsAPITestCase, self).setUp()
AgencyOfficeFactory.create_batch(self.quantity)
AgencyMemberFactory.create_batch(self.quantity)
PartnerMemberFactory.create_batch(self.quantity)
OpenEOIFactory.create_batch(self.quantity, agency=self.user.agency)
def test_open_project(self):
# read open projects
response = self.client.get(self.url)
self.assertResponseStatusIs(response)
self.assertEquals(response.data['count'], self.quantity)
@override_settings(EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend')
def test_create_patch_project(self):
ao = self.user.agency_members.first().office
payload = {
'title': "EOI title",
'agency': ao.agency.id,
'focal_points': [
AgencyMemberFactory(role=list(VALID_FOCAL_POINT_ROLE_NAMES)[0], office=ao).user.id
],
'locations': [
{
"admin_level_1": {"name": "Baghdad", "country_code": 'IQ'},
"lat": random.randint(-90, 90),
"lon": random.randint(-180, 180),
},
{
"admin_level_1": {"name": "Paris", "country_code": "FR"},
"lat": random.randint(-90, 90),
"lon": random.randint(-180, 180),
},
],
'agency_office': ao.id,
'specializations': Specialization.objects.all().values_list('id', flat=True)[:2],
'description': 'Brief background of the project',
'other_information': 'Other information',
"clarification_request_deadline_date": date.today(),
'start_date': date.today(),
'end_date': date.today(),
'deadline_date': date.today(),
'notif_results_date': date.today(),
'has_weighting': True,
'assessments_criteria': [
{'selection_criteria': SELECTION_CRITERIA_CHOICES.sector, 'weight': 10},
{'selection_criteria': SELECTION_CRITERIA_CHOICES.local, 'weight': 40},
],
}
response = self.client.post(self.url, data=payload)
self.assertResponseStatusIs(response, status_code=status.HTTP_400_BAD_REQUEST)
self.assertEquals(
response.data['assessments_criteria'],
['The sum of all weight criteria must be equal to 100.']
)
payload['assessments_criteria'].extend([
{'selection_criteria': SELECTION_CRITERIA_CHOICES.cost, 'weight': 20},
{'selection_criteria': SELECTION_CRITERIA_CHOICES.innovative, 'weight': 30},
])
response = self.client.post(self.url, data=payload)
self.assertResponseStatusIs(response, status_code=status.HTTP_201_CREATED)
eoi = EOI.objects.order_by('id').last()
self.assertEquals(response.data['title'], payload['title'])
self.assertEquals(eoi.created_by.id, self.user.id)
self.assertEquals(response.data['id'], eoi.id)
self.assertTrue(eoi.is_weight_adjustments_ok, 'The sum of all weight criteria must be equal to 100.')
# invite partners
url = reverse('projects:eoi-detail', kwargs={"pk": eoi.id})
payload = {
"invited_partners": PartnerShortSerializer([
Partner.objects.first(), Partner.objects.last()
], many=True).data
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response)
self.assertEquals(response.data['id'], eoi.id)
self.assertTrue(Partner.objects.first().id in [p['id'] for p in response.data['invited_partners']])
self.assertTrue(Partner.objects.count(), len(response.data['invited_partners']))
call_command('send_daily_notifications')
notification_emails = list(filter(
lambda msg: f'/cfei/open/{eoi.id}/overview' in msg.body,
mail.outbox
))
self.assertTrue(len(notification_emails) >= 1)
payload = {
"invited_partners": PartnerShortSerializer([Partner.objects.last()], many=True).data
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response)
self.assertEquals(response.data['id'], eoi.id)
self.assertTrue(Partner.objects.last().id in [p['id'] for p in response.data['invited_partners']])
self.assertTrue(Partner.objects.count(), 1)
self.assertTrue(len(response.data['invited_partners']), 1)
self.assertTrue(len(mail.outbox) > 0) # mail.outbox is in shared resource, can have also other mails
mail.outbox = []
# edit EOI - dates & focal point(s)
payload = {
"start_date": date.today() - timedelta(days=10),
"end_date": date.today() + timedelta(days=20),
"deadline_date": date.today() + timedelta(days=10),
"notif_results_date": date.today() + timedelta(days=15),
"focal_points": [
AgencyMemberFactory(role=list(VALID_FOCAL_POINT_ROLE_NAMES)[0], office=ao).user.id,
]
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response)
self.assertEquals(response.data['notif_results_date'], str(date.today() + timedelta(days=15)))
# complete this CFEI
justification = "mission completed"
payload = {
"justification": justification,
"completed_reason": COMPLETED_REASON.cancelled,
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response)
self.assertEquals(response.data['completed_reason'], COMPLETED_REASON.cancelled)
self.assertTrue(response.data['completed_date'])
self.assertTrue(response.data['is_completed'])
self.assertEquals(response.data['justification'], justification)
@override_settings(EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend')
def test_patch_locations_for_project(self):
cfei = OpenEOIFactory(created_by=self.user)
details_url = reverse('projects:eoi-detail', kwargs={'pk': cfei.id})
details_response = self.client.get(details_url)
self.assertResponseStatusIs(details_response)
initial_locations = details_response.data['locations']
new_locations_payload = {
'locations': [
{
"admin_level_1": {"name": "Baghdad", "country_code": 'IQ'},
"lat": random.randint(-90, 90),
"lon": random.randint(-180, 180),
},
{
"admin_level_1": {"name": "Paris", "country_code": "FR"},
"lat": random.randint(-90, 90),
"lon": random.randint(-180, 180),
},
],
}
update_response = self.client.patch(details_url, data=new_locations_payload)
self.assertResponseStatusIs(update_response)
self.assertEqual(
len(new_locations_payload['locations']),
len(update_response.data['locations'])
)
second_update_payload = {
'locations': [
{
"admin_level_1": {"name": "Poland", "country_code": 'PL'},
"lat": random.randint(-90, 90),
"lon": random.randint(-180, 180),
},
] + initial_locations,
}
second_update_response = self.client.patch(details_url, data=second_update_payload)
self.assertResponseStatusIs(second_update_response)
self.assertEqual(
len(second_update_payload['locations']),
len(second_update_response.data['locations'])
)
self.assertTrue(
{l['id'] for l in initial_locations}.issubset(
{l['id'] for l in second_update_response.data['locations']}
)
)
@override_settings(EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend')
def test_patch_specializations_for_project(self):
cfei = OpenEOIFactory(created_by=self.user)
details_url = reverse('projects:eoi-detail', kwargs={'pk': cfei.id})
details_response = self.client.get(details_url)
self.assertResponseStatusIs(details_response)
for _ in range(10):
spec_count = random.randint(2, 7)
update_payload = {
'specializations': Specialization.objects.order_by('?').values_list('id', flat=True)[:spec_count],
}
update_response = self.client.patch(details_url, data=update_payload)
self.assertResponseStatusIs(update_response)
self.assertEqual(len(update_response.data['specializations']), spec_count)
class TestDirectProjectsAPITestCase(BaseAPITestCase):
quantity = 2
url = reverse('projects:direct')
user_type = 'agency'
agency_role = AgencyRole.EDITOR_ADVANCED
def setUp(self):
super(TestDirectProjectsAPITestCase, self).setUp()
PartnerSimpleFactory()
AgencyOfficeFactory.create_batch(self.quantity)
AgencyMemberFactory.create_batch(self.quantity)
OpenEOIFactory.create_batch(self.quantity)
# TODO: This test is not deterministic - randomly fails
def test_create_direct_project(self):
ao = self.user.agency_members.first().office
payload = {
'eoi': {
'title': "EOI title",
'agency': ao.agency.id,
'focal_points': [self.user.id],
'locations': [
{
"admin_level_1": {"name": "Baghdad", "country_code": 'IQ'},
"lat": random.randint(-90, 90),
"lon": random.randint(-180, 180),
},
{
"admin_level_1": {"name": "Paris", "country_code": "FR"},
"lat": random.randint(-90, 90),
"lon": random.randint(-180, 180),
},
],
'agency_office': ao.id,
'specializations': Specialization.objects.all().values_list('id', flat=True)[:2],
'description': 'Brief background of the project',
'other_information': 'Other information',
'start_date': date.today(),
'end_date': date.today(),
'notif_results_date': date.today(),
'has_weighting': True,
},
'applications': [
{
"partner": Partner.objects.last().id,
"ds_justification_select": [
JUSTIFICATION_FOR_DIRECT_SELECTION.known,
JUSTIFICATION_FOR_DIRECT_SELECTION.local,
],
"ds_attachment": get_new_common_file().id,
"justification_reason": "To save those we love."
},
]
}
response = self.client.post(self.url, data=payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, msg=response.data)
self.assertEquals(response.data['eoi']['title'], payload['eoi']['title'])
self.assertEquals(response.data['eoi']['created_by'], self.user.id)
self.assertEquals(response.data['eoi']['display_type'], CFEI_TYPES.direct)
self.assertEquals(response.data['eoi']['id'], EOI.objects.order_by('id').last().id)
app = Application.objects.get(pk=response.data['applications'][0]['id'])
self.assertEquals(app.submitter, self.user)
self.assertEquals(
app.ds_justification_select,
[JUSTIFICATION_FOR_DIRECT_SELECTION.known, JUSTIFICATION_FOR_DIRECT_SELECTION.local]
)
app = Application.objects.get(pk=response.data['applications'][0]['id'])
self.assertEquals(app.submitter, self.user)
self.assertEquals(
app.ds_justification_select,
[JUSTIFICATION_FOR_DIRECT_SELECTION.known, JUSTIFICATION_FOR_DIRECT_SELECTION.local]
)
self.assertIsNotNone(response.data['applications'][-1]['ds_attachment'])
class TestPartnerApplicationsAPITestCase(BaseAPITestCase):
user_type = BaseAPITestCase.USER_PARTNER
def setUp(self):
super(TestPartnerApplicationsAPITestCase, self).setUp()
AgencyOfficeFactory.create_batch(self.quantity)
AgencyMemberFactory.create_batch(self.quantity)
OpenEOIFactory.create_batch(self.quantity, display_type='NoN')
PartnerSimpleFactory.create_batch(self.quantity)
@mock.patch('partner.models.Partner.profile_is_complete', lambda _: True)
def test_create(self):
self.client.set_headers({
CustomHeader.PARTNER_ID.value: self.user.partner_members.first().partner.id
})
eoi_id = EOI.objects.first().id
url = reverse('projects:partner-applications', kwargs={"pk": eoi_id})
payload = {
"cn": get_new_common_file().id,
}
response = self.client.post(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_201_CREATED)
app = Application.objects.last()
self.assertEquals(response.data['id'], app.id)
self.assertEquals(app.submitter.id, self.user.id)
common_file = CommonFile.objects.create()
common_file.file_field.save('test.csv', open(filename))
payload = {
"cn": common_file.id,
}
response = self.client.post(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.data[0], 'You already applied for this project.')
url = reverse('projects:agency-applications', kwargs={"pk": eoi_id})
payload = {
"partner": Partner.objects.exclude(applications__eoi_id=eoi_id).order_by('?').last().id,
"ds_justification_select": [JUSTIFICATION_FOR_DIRECT_SELECTION.known],
"justification_reason": "a good reason",
}
response = self.client.post(url, data=payload)
expected_msgs = 'You do not have permission to perform this action.'
self.assertEquals(response.data['detail'], expected_msgs)
class TestAgencyApplicationsAPITestCase(BaseAPITestCase):
user_type = BaseAPITestCase.USER_AGENCY
agency_role = AgencyRole.EDITOR_ADVANCED
def setUp(self):
super(TestAgencyApplicationsAPITestCase, self).setUp()
AgencyMemberFactory.create_batch(self.quantity)
PartnerSimpleFactory.create_batch(self.quantity)
@mock.patch('partner.models.Partner.profile_is_complete', lambda _: True)
def test_create(self):
eoi = OpenEOIFactory(display_type='NoN', agency=self.user.agency)
eoi.focal_points.add(self.user)
url = reverse('projects:agency-applications', kwargs={"pk": eoi.id})
partner = Partner.objects.last()
PartnerVerificationFactory(partner=partner)
payload = {
"partner": partner.id,
"ds_justification_select": [JUSTIFICATION_FOR_DIRECT_SELECTION.known],
"justification_reason": "a good reason",
}
response = self.client.post(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_201_CREATED)
app_id = eoi.applications.last().id
self.assertEqual(response.data['id'], app_id)
eoi.display_type = CFEI_TYPES.direct
eoi.save()
url = reverse('projects:agency-applications-delete', kwargs={"pk": app_id, "eoi_id": eoi.id})
response = self.client.delete(url)
self.assertResponseStatusIs(response, status.HTTP_204_NO_CONTENT)
class TestApplicationsAPITestCase(BaseAPITestCase):
user_type = BaseAPITestCase.USER_AGENCY
agency_role = AgencyRole.EDITOR_ADVANCED
def setUp(self):
super(TestApplicationsAPITestCase, self).setUp()
AgencyOfficeFactory.create_batch(self.quantity)
AgencyMemberFactory.create_batch(self.quantity)
# make sure that creating user is not the current one
creator = UserFactory()
AgencyMemberFactory(user=creator, office=self.user.agency_members.first().office)
self.eoi = OpenEOIFactory(is_published=True, created_by=creator, agency=self.user.agency)
self.eoi.focal_points.clear()
@override_settings(EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend')
def test_read_update_application(self):
application = self.eoi.applications.first()
PartnerMemberFactory.create_batch(5, partner=application.partner)
url = reverse('projects:application', kwargs={"pk": application.id})
response = self.client.get(url)
self.assertResponseStatusIs(response)
self.assertFalse(response.data['did_win'])
self.assertEquals(response.data['ds_justification_select'], [])
payload = {
"status": APPLICATION_STATUSES.preselected,
"ds_justification_select": [JUSTIFICATION_FOR_DIRECT_SELECTION.local],
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_400_BAD_REQUEST)
self.assertEquals(
response.data['non_field_errors'],
['Only Focal Point/Creator is allowed to pre-select/reject an application.']
)
self.client.logout()
creator = application.eoi.created_by
self.client.force_login(creator)
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_403_FORBIDDEN)
creator.agency_members.update(role=AgencyRole.EDITOR_ADVANCED.name)
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_200_OK)
self.assertEquals(response.data['status'], APPLICATION_STATUSES.preselected)
self.assertEquals(response.data['ds_justification_select'], [JUSTIFICATION_FOR_DIRECT_SELECTION.local])
payload = {
"did_win": True,
"status": APPLICATION_STATUSES.preselected,
"justification_reason": "good reason",
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_400_BAD_REQUEST)
self.assertIn('review_summary_comment', response.data)
application.eoi.review_summary_comment = 'Test comment'
application.eoi.save()
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_400_BAD_REQUEST)
self.assertEquals(
response.data['non_field_errors'],
['You cannot award an application if the profile has not been verified yet.']
)
PartnerVerificationFactory(partner=application.partner, submitter=application.eoi.created_by)
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response)
self.assertIn('application_status', response.data)
self.assertTrue(response.data['did_win'])
self.assertEquals(response.data['status'], APPLICATION_STATUSES.preselected)
call_command('send_daily_notifications')
self.assertTrue(len(mail.outbox) > 0)
mail.outbox = []
partner_user = UserFactory()
PartnerMemberFactory(user=partner_user, partner=application.partner, role=PartnerRole.ADMIN.name)
self.client.force_login(partner_user)
# accept offer
payload = {
"did_accept": True,
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response)
self.assertTrue(response.data['did_accept'])
self.assertEquals(response.data['decision_date'], str(date.today()))
self.client.force_login(application.eoi.created_by)
awarded_partners_response = self.client.get(
reverse('projects:applications-awarded-partners', kwargs={"eoi_id": application.id})
)
self.assertEqual(
awarded_partners_response.status_code, status.HTTP_200_OK, msg=awarded_partners_response.content
)
if awarded_partners_response.data:
self.assertEqual(awarded_partners_response.data[0]['partner_decision_date'], str(date.today()))
self.assertEqual(awarded_partners_response.data[0]['partner_notified'].date(), date.today())
self.client.force_login(partner_user)
payload = {
"did_accept": False,
"did_decline": True,
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response)
self.assertFalse(response.data['did_accept'])
self.assertTrue(response.data['did_decline'])
self.client.force_login(application.eoi.created_by)
reason = "They are better then You."
payload = {
"did_withdraw": True,
"withdraw_reason": reason,
"status": APPLICATION_STATUSES.rejected,
| |
uuid,
):
"""Delete.
:param uuid: Id of the purge rule
"""
request_data = {
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The rule was not found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to delete purge rules for this account')
query_data = {
'api': self._api,
'url': '/purge/delete',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def run(
self,
dry_run,
account_id=None,
object=None,
rule_id=None,
):
"""Run.
:param dry_run: Do a dry run of the rule - flag
:param account_id: account_id
:param object: Limit purging to this object only (optional)
:param rule_id: rule_id
"""
request_data = {
'account_id': account_id,
'dry_run': dry_run,
'object': object,
'rule_id': rule_id,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The rule or account was not found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to run purge rules for this account')
query_data = {
'api': self._api,
'url': '/purge/run',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
class AsyncPurge:
"""AsyncPurge."""
def __init__(self, api):
self._api = api
def list(
self,
account_id,
):
"""List.
:param account_id: uuid of the account
"""
request_data = {
'account_id': account_id,
}
errors_mapping = {}
errors_mapping[('FILTER_NOT_FOUND', None)] = FilterNotFound('The filter can not be found. The error_subtype will hold the filter UUID')
errors_mapping[('INVALID_CONDITION', None)] = InvalidCondition('The condition is not support. The error_subtype will hold the filter expression this applies to')
errors_mapping[('INVALID_FIELD', None)] = InvalidField('The field is not valid for this object. The error_subtype will hold the filter expression this applies to')
errors_mapping[('INVALID_SORT_FIELD', None)] = InvalidSortField('The field is not valid for this object. The error_subtype will hold the field name this applies to')
errors_mapping[('INVALID_SORT_ORDER', None)] = InvalidSortOrder('The sort order for the field is invalid. The error_subtype will hold the field name this applies to')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The account can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to view this list')
query_data = {
'api': self._api,
'url': '/purge/list',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
query_data['paginated_field'] = 'purges'
return AsyncQueryOPSF(**query_data)
def add(
self,
account_id,
days_old,
days_old_how,
name,
adults=None,
archive=None,
global_param=None,
max_deletes=None,
minors=None,
modalities=None,
namespaces=None,
object=None,
owned_phr=None,
shared_from_phr=None,
skinny=None,
study_status_tags=None,
suspended=None,
thin=None,
):
"""Add.
:param account_id: uuid of the account the rule is for
:param days_old: Studies greater than or equal to these days old will be purged
:param days_old_how: How should the days old value be calculated using the 'U'pdated, 'C'reated or 'S'tudy date
:param name: Name of the purge rule
:param adults: Apply this rule to adults - flag (optional)
:param archive: Archive the studies rather than deleting them - flag (optional)
:param global_param: Flag to make this a global purge rule (optional)
:param max_deletes: Maximum number of purges per run of the rule (optional)
:param minors: Apply this rule to minors - flag (optional)
:param modalities: A JSON array of modalities to limit the rule to (optional)
:param namespaces: A JSON array of namespace uuid to limit the rule to (optional)
:param object: The object to be purged, Study by default (Study|Hl7) (optional)
:param owned_phr: Apply this rule to owned PHR namespaces - flag (optional)
:param shared_from_phr: If a study was shared from a PHR namespace delete the copy in the PHR namespace as well - flag (optional)
:param skinny: Make the studies skinny rather than deleting - flag (optional)
:param study_status_tags: A comma separated list of study status tags to purge (optional)
:param suspended: This rule is suspended and not applied - flag (optional)
:param thin: Make the studies thin rather than deleting - flag (optional)
"""
request_data = {
'account_id': account_id,
'adults': adults,
'archive': archive,
'days_old': days_old,
'days_old_how': days_old_how,
'global': global_param,
'max_deletes': max_deletes,
'minors': minors,
'modalities': modalities,
'name': name,
'namespaces': namespaces,
'object': object,
'owned_phr': owned_phr,
'shared_from_phr': shared_from_phr,
'skinny': skinny,
'study_status_tags': study_status_tags,
'suspended': suspended,
'thin': thin,
}
errors_mapping = {}
errors_mapping[('GT_ZERO', None)] = GtZero('The parameter must be great than zero. The error_subtype holds the name of the parameter')
errors_mapping[('INVALID_FLAG', None)] = InvalidFlag('An invalid flag was passed. The error_subtype holds the name of the invalid flag')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_A_NUMBER', None)] = NotANumber('The parameter must be a valid number. The error_subtype holds the name of the parameter')
errors_mapping[('NOT_FOUND', None)] = NotFound('The account or namespace was not found. The error_subtype holds the uuid of the not found item')
errors_mapping[('NOT_LIST', None)] = NotList('The field is not a JSON array. The error_subtype holds the name of the field')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to add a purge to that account')
errors_mapping[('ONLY_ONE_FLAG', None)] = OnlyOneFlag('You can set either the skinny, thin or archive flag, not multiple')
errors_mapping[('VALIDATION_FAILED', None)] = ValidationFailed('A field failed validation. The error_subtype holds the name of the invalid field')
query_data = {
'api': self._api,
'url': '/purge/add',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return AsyncQueryO(**query_data)
def set(
self,
uuid,
adults=None,
archive=None,
days_old=None,
days_old_how=None,
global_param=None,
max_deletes=None,
minors=None,
modalities=None,
name=None,
namespaces=None,
owned_phr=None,
shared_from_phr=None,
skinny=None,
study_status_tags=None,
suspended=None,
thin=None,
):
"""Set.
:param uuid: Id of the purge rule
:param adults: Apply this rule to adults - flag (optional)
:param archive: Archive the studies rather than deleting them - flag (optional)
:param days_old: Studies greater than or equal to these days old will be purged (optional)
:param days_old_how: How should the days old value be calculated using the 'U'pdated, 'C'reated or 'S'tudy date (optional)
:param global_param: Flag to make this a global purge rule (optional)
:param max_deletes: Maximum number of purges per run of the rule (optional)
:param minors: Apply this rule to minors - flag (optional)
:param modalities: A JSON array of modalities to limit the rule to) (optional)
:param name: Name of the purge rule (optional)
:param namespaces: A JSON array of namespace uuid to limit the rule to (optional)
:param owned_phr: Apply this rule owned PHR namespaces - flag (optional)
:param shared_from_phr: If a study was shared from a PHR namespace delete the copy in the PHR namespace as well - flag (optional)
:param skinny: Make the studies skinny rather than deleting - flag (optional)
:param study_status_tags: A comma separated list of study status tags to purge (optional)
:param suspended: This rule is suspended and not applied - flag (optional)
:param thin: Make the studies thin rather than deleting - flag (optional)
"""
request_data = {
'adults': adults,
'archive': archive,
'days_old': days_old,
'days_old_how': days_old_how,
'global': global_param,
'max_deletes': max_deletes,
'minors': minors,
'modalities': modalities,
'name': name,
'namespaces': namespaces,
'owned_phr': owned_phr,
'shared_from_phr': shared_from_phr,
'skinny': skinny,
'study_status_tags': study_status_tags,
'suspended': suspended,
'thin': thin,
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('GT_ZERO', None)] = GtZero('The parameter must be great than zero. The error_subtype holds the name of the parameter')
errors_mapping[('INVALID_FLAG', None)] = InvalidFlag('An invalid flag was passed. The error_subtype holds the name of the invalid flag')
errors_mapping[('INVALID_JSON', None)] = InvalidJson('The field is not in valid JSON format. The error_subtype holds the name of the field')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_A_NUMBER', None)] = NotANumber('The parameter must be a valid number. The error_subtype holds the name of the parameter')
errors_mapping[('NOT_FOUND', None)] = NotFound('The account or namespace was not found. The error_subtype holds the uuid of the not found item')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to edit a purge rule')
errors_mapping[('ONLY_ONE_FLAG', None)] = OnlyOneFlag('You can set either the skinny, | |
try:
params = request._serialize()
body = self.call("DescribeDDoSEvInfo", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeDDoSEvInfoResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeDDoSEvList(self, request):
"""获取DDoS攻击事件列表
:param request: Request instance for DescribeDDoSEvList.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSEvListRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSEvListResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeDDoSEvList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeDDoSEvListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeDDoSIpLog(self, request):
"""获取DDoSIP攻击日志
:param request: Request instance for DescribeDDoSIpLog.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSIpLogRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSIpLogResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeDDoSIpLog", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeDDoSIpLogResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeDDoSNetCount(self, request):
"""获取高防IP专业版资源的DDoS攻击占比分析
:param request: Request instance for DescribeDDoSNetCount.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSNetCountRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSNetCountResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeDDoSNetCount", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeDDoSNetCountResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeDDoSNetEvInfo(self, request):
"""获取高防IP专业版资源的DDoS攻击事件详情
:param request: Request instance for DescribeDDoSNetEvInfo.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSNetEvInfoRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSNetEvInfoResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeDDoSNetEvInfo", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeDDoSNetEvInfoResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeDDoSNetEvList(self, request):
"""获取高防IP专业版资源的DDoS攻击事件列表
:param request: Request instance for DescribeDDoSNetEvList.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSNetEvListRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSNetEvListResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeDDoSNetEvList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeDDoSNetEvListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeDDoSNetIpLog(self, request):
"""获取高防IP专业版资源的DDoSIP攻击日志
:param request: Request instance for DescribeDDoSNetIpLog.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSNetIpLogRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSNetIpLogResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeDDoSNetIpLog", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeDDoSNetIpLogResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeDDoSNetTrend(self, request):
"""获取高防IP专业版资源的DDoS攻击指标数据
:param request: Request instance for DescribeDDoSNetTrend.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSNetTrendRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSNetTrendResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeDDoSNetTrend", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeDDoSNetTrendResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeDDoSPolicy(self, request):
"""获取DDoS高级策略
:param request: Request instance for DescribeDDoSPolicy.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSPolicyRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSPolicyResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeDDoSPolicy", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeDDoSPolicyResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeDDoSTrend(self, request):
"""获取DDoS攻击流量带宽和攻击包速率数据
:param request: Request instance for DescribeDDoSTrend.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSTrendRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSTrendResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeDDoSTrend", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeDDoSTrendResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeDDoSUsedStatis(self, request):
"""统计用户的高防资源的使用天数和DDoS攻击防护次数
:param request: Request instance for DescribeDDoSUsedStatis.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSUsedStatisRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribeDDoSUsedStatisResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeDDoSUsedStatis", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeDDoSUsedStatisResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeIPProductInfo(self, request):
"""获取独享包或共享包IP对应的云资产信息,只支持独享包和共享包的IP
:param request: Request instance for DescribeIPProductInfo.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribeIPProductInfoRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribeIPProductInfoResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeIPProductInfo", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeIPProductInfoResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeInsurePacks(self, request):
"""获取保险包套餐列表
:param request: Request instance for DescribeInsurePacks.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribeInsurePacksRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribeInsurePacksResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeInsurePacks", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeInsurePacksResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeIpBlockList(self, request):
"""获取IP封堵列表
:param request: Request instance for DescribeIpBlockList.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribeIpBlockListRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribeIpBlockListResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeIpBlockList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeIpBlockListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeIpUnBlockList(self, request):
"""获取IP解封记录
:param request: Request instance for DescribeIpUnBlockList.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribeIpUnBlockListRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribeIpUnBlockListResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeIpUnBlockList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeIpUnBlockListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeL4HealthConfig(self, request):
"""导出四层健康检查配置
:param request: Request instance for DescribeL4HealthConfig.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribeL4HealthConfigRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribeL4HealthConfigResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeL4HealthConfig", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeL4HealthConfigResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeL4RulesErrHealth(self, request):
"""获取L4转发规则健康检查异常结果
:param request: Request instance for DescribeL4RulesErrHealth.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribeL4RulesErrHealthRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribeL4RulesErrHealthResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeL4RulesErrHealth", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeL4RulesErrHealthResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeL7HealthConfig(self, request):
"""导出七层健康检查配置
:param request: Request instance for DescribeL7HealthConfig.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribeL7HealthConfigRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribeL7HealthConfigResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeL7HealthConfig", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeL7HealthConfigResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribePackIndex(self, request):
"""获取产品总览统计,支持高防包、高防IP、高防IP专业版;
:param request: Request instance for DescribePackIndex.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribePackIndexRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribePackIndexResponse`
"""
try:
params = request._serialize()
body = self.call("DescribePackIndex", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribePackIndexResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribePcap(self, request):
"""下载攻击事件的pcap包
:param request: Request instance for DescribePcap.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribePcapRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribePcapResponse`
"""
try:
params = request._serialize()
body = self.call("DescribePcap", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribePcapResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribePolicyCase(self, request):
"""获取策略场景
:param request: Request instance for DescribePolicyCase.
:type request: :class:`tencentcloud.dayu.v20180709.models.DescribePolicyCaseRequest`
:rtype: :class:`tencentcloud.dayu.v20180709.models.DescribePolicyCaseResponse`
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.