filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_15697 | #!/usr/bin/env python
import sys
sys.path.append('../')
from logparser import LogMine
input_dir = '../logs/HDFS/' # The input directory of log file
output_dir = 'LogMine_result/' # The output directory of parsing results
log_file = 'HDFS_2k.log' # The input log file name
log_format = '<Date> <Time> <Pid> <Level> <Component>: <Content>' # HDFS log format
levels = 2 # The levels of hierarchy of patterns
max_dist = 0.001 # The maximum distance between any log message in a cluster and the cluster representative
k = 1 # The message distance weight (default: 1)
regex = [] # Regular expression list for optional preprocessing (default: [])
parser = LogMine.LogParser(input_dir, output_dir, log_format, rex=regex, levels=levels, max_dist=max_dist, k=k)
parser.parse(log_file)
|
the-stack_0_15698 | import tensorflow as tf
import numpy as np
import models
import matplotlib.pyplot as plt
# Number of bootstrap heads
HEADS_N = 10
x_data = np.linspace(0,10,100)
y_data = np.sin(x_data) + np.random.normal(0, .2, x_data.shape)
x_data = x_data.reshape(-1, 1)
y_data = y_data.reshape(-1, 1)
# bootstrap mask - generate one for every element in x_data. Shape: (batch, heads_n)
x_data_mask = np.random.binomial(n=1, p=.1, size=(x_data.shape[0], HEADS_N))
print('Bootstrap mask:', x_data_mask)
tf.reset_default_graph()
# build graph
boot_dnn = models.build_bootstrapped_dnn(shape_x=list(x_data.shape[1:]), shape_y=list(y_data.shape[1:]), heads_n=HEADS_N)
print(boot_dnn)
# start session
sess = tf.InteractiveSession()
# init variables
sess.run(tf.global_variables_initializer())
# train
losses = []
for _ in range(1000):
feed_dict = {boot_dnn.X_pl: x_data, boot_dnn.y_pl: y_data, boot_dnn.bootstrap_mask_pl: x_data_mask}
_, loss = sess.run([boot_dnn.optimize, boot_dnn.loss], feed_dict)
losses.append(loss)
# predict
x_test = np.linspace(-3, 13, 300).reshape(-1, 1)
feed_dict = {boot_dnn.X_pl: x_test}
heads, heads_mean, heads_var = sess.run([boot_dnn.heads, boot_dnn.heads_mean, boot_dnn.heads_var], feed_dict)
# flatten data from shape (batch,x_dim=1) to 1D in order plot
heads_mean = heads_mean.reshape(-1)
heads_var = heads_var.reshape(-1)
# note: heads is shape (heads, batch, x_dim)
print(heads.shape)
# plot
fig,axes = plt.subplots(nrows=2, figsize=(8,8))
axes[0].plot(losses)
axes[0].set_xlabel('epoch')
axes[0].set_ylabel('loss')
axes[1].fill_between(x_test.reshape(-1), heads_mean+heads_var, heads_mean-heads_var, color='r', alpha=.2, label="prediction var (epis)")
axes[1].fill_between(x_test.reshape(-1), heads_mean+heads_var*2, heads_mean-heads_var*2, color='r', alpha=.2)
axes[1].plot(x_test, heads_mean, '-', color='r', lw=1, label='prediction mean')
axes[1].set_ylim((2, -2))
for head in heads:
axes[1].plot(x_test, head, color='r', lw=1, alpha=.2)
axes[1].scatter(x_data, y_data, color='b', s=1, label='training data')
plt.legend()
plt.savefig('bootstrap.png', bbox_inches='tight')
plt.show() |
the-stack_0_15699 | import logging
import os
from typing import Dict, List, Optional
from airflow.operators.bash import BashOperator
from airflow.models import DAG, DagRun
from sciencebeam_airflow.utils.container import escape_helm_set_value
from sciencebeam_airflow.utils.airflow import add_dag_macros
from sciencebeam_airflow.utils.container_operators import (
ContainerRunOperator,
HelmDeployOperator,
HelmDeleteOperator
)
from sciencebeam_airflow.dags.dag_ids import ScienceBeamDagIds
from sciencebeam_airflow.dags.utils import (
get_default_args,
create_validate_config_operation,
create_trigger_next_task_dag_operator,
get_sciencebeam_image
)
LOGGER = logging.getLogger(__name__)
class ConfigProps:
SCIENCEBEAM_RELEASE_NAME = 'sciencebeam_release_name'
MODEL = 'model'
NAMESPACE = 'namespace'
SOURCE_DATA_PATH = 'source_data_path'
SOURCE_FILE_LIST = 'source_file_list'
OUTPUT_DATA_PATH = 'output_data_path'
OUTPUT_FILE_LIST = 'output_file_list'
OUTPUT_SUFFIX = 'output_suffix'
RESUME = 'resume'
LIMIT = 'limit'
REQUIRED_PROPS = {
ConfigProps.SCIENCEBEAM_RELEASE_NAME,
ConfigProps.NAMESPACE,
ConfigProps.LIMIT
}
DEFAULT_ARGS = get_default_args()
DEFAULT_WORKER_COUNT = 10
DEFAULT_REPLICA_COUNT = 0 # don't set replica by default
DEPLOY_SCIENCEBEAM_ARGS_TEMPLATE = (
'''
--timeout 600s \
--set "fullnameOverride={{ dag_run.conf.sciencebeam_release_name }}-sb" \
{% for key, value in get_sciencebeam_deploy_args(dag_run.conf).items() %} \
--set "{{ key }}={{ escape_helm_set_value(value) }}" \
{% endfor %}
'''
)
SCIENCEBEAM_CONVERT_TEMPLATE = (
'''
python -m sciencebeam.pipeline_runners.local_pipeline_runner \
--data-path "{{ get_source_conf(dag_run.conf).data_path }}" \
--source-file-list "{{ get_source_conf(dag_run.conf).file_list }}" \
--source-file-column "{{ get_source_conf(dag_run.conf).file_column }}" \
--output-path "{{ get_output_conf(dag_run.conf).data_path }}" \
--output-suffix "{{ get_output_conf(dag_run.conf).output_suffix }}" \
--pipeline=api \
--api-url=http://{{ dag_run.conf.sciencebeam_release_name }}-sb:8075/api/convert \
{% if dag_run.conf.resume | default(false) %} \
--resume \
{% endif %} \
--limit "{{ get_limit(dag_run.conf) }}" \
--num-workers "{{ get_worker_count(dag_run.conf) }}"
'''
)
SCIENCEBEAM_GET_OUTPUT_FILE_LIST_TEMPLATE = (
'''
python -m sciencebeam_utils.tools.get_output_files \
--source-base-path "{{ get_source_conf(dag_run.conf).data_path }}" \
--source-file-list "{{ get_source_conf(dag_run.conf).file_list }}" \
--source-file-column "{{ get_source_conf(dag_run.conf).file_column }}" \
--output-file-list "{{ get_output_conf(dag_run.conf).absolute_file_list }}" \
--output-file-suffix "{{ get_output_conf(dag_run.conf).output_suffix }}" \
--output-base-path "{{ get_output_conf(dag_run.conf).data_path }}" \
--use-relative-path \
--limit "{{ get_limit(dag_run.conf) }}" \
--check
'''
)
DEFAULT_CONVERT_CONTAINER_REQUESTS = 'cpu=500m,memory=2048Mi'
def parse_image_name_tag(image):
return image.split(':')
def get_model_sciencebeam_image(model: dict) -> dict:
return get_sciencebeam_image(model)
def get_model_sciencebeam_deploy_args(model: dict) -> dict:
if 'chart_args' in model:
return model['chart_args']
sciencebeam_image_repo, sciencebeam_image_tag = parse_image_name_tag(
get_model_sciencebeam_image(model)
)
grobid_image_repo, grobid_image_tag = parse_image_name_tag(
model['grobid_image']
)
return {
'image.repository': sciencebeam_image_repo,
'image.tag': sciencebeam_image_tag,
'sciencebeam.args': model.get('sciencebeam_args', ''),
'grobid.enabled': 'true',
'grobid.image.repository': grobid_image_repo,
'grobid.image.tag': grobid_image_tag,
'grobid.warmup.enabled': 'true',
'grobid.crossref.enabled': model.get('grobid_crossref_enabled', 'false')
}
def get_sciencebeam_child_chart_names_for_helm_args(helm_args: Dict[str, str]) -> List[str]:
return [
key.split('.')[0]
for key, value in helm_args.items()
if key.endswith('.enabled') and len(key.split('.')) == 2 and value == 'true'
]
class ScienceBeamConvertMacros:
def get_model(self, conf: dict) -> dict:
return conf['model']
def get_source_conf(self, conf: dict) -> dict:
return {
'file_column': 'source_url',
'data_path': conf['source_data_path'],
'file_list': conf['source_file_list'],
'absolute_file_list': os.path.join(conf['source_data_path'], conf['source_file_list'])
}
def get_output_conf(self, conf: dict) -> dict:
return {
'output_suffix': conf['output_suffix'],
'data_path': conf['output_data_path'],
'file_list': conf['output_file_list'],
'absolute_file_list': os.path.join(conf['output_data_path'], conf['output_file_list'])
}
def get_limit(self, conf: dict) -> str:
return conf['limit']
def get_convert_config(self, conf: dict) -> dict:
return conf.get('config', {}).get('convert', {})
def get_sciencebeam_convert_container_kwargs(self, conf: dict) -> dict:
return self.get_convert_config(conf).get('container', {})
def get_worker_count(self, conf: dict) -> str:
return int(self.get_convert_config(conf).get('worker_count', DEFAULT_WORKER_COUNT))
def get_replica_count(self, conf: dict) -> str:
return int(self.get_convert_config(conf).get('replica_count', DEFAULT_REPLICA_COUNT))
def get_base_sciencebeam_deploy_args(self, conf: dict) -> dict:
return get_model_sciencebeam_deploy_args(self.get_model(conf))
def get_sciencebeam_deploy_args(self, conf: dict) -> dict:
LOGGER.debug('conf: %s', conf)
helm_args = self.get_base_sciencebeam_deploy_args(conf)
replica_count = self.get_replica_count(conf)
if replica_count:
child_chart_names = list(get_sciencebeam_child_chart_names_for_helm_args(helm_args))
helm_args['replicaCount'] = replica_count
for child_chart_name in child_chart_names:
helm_args['%s.replicaCount' % child_chart_name] = replica_count
return helm_args
def escape_helm_set_value(self, helm_value: str) -> str:
return escape_helm_set_value(helm_value)
def get_sciencebeam_child_chart_names(
self, dag_run: DagRun, **_) -> List[str]:
conf: dict = dag_run.conf
helm_args = self.get_base_sciencebeam_deploy_args(conf)
return get_sciencebeam_child_chart_names_for_helm_args(helm_args)
def get_sciencebeam_image(self, conf: dict) -> str:
return get_sciencebeam_image(conf)
def is_config_valid(self, conf: dict) -> bool:
return (
self.get_model(conf)
and self.get_source_conf(conf)
and self.get_output_conf(conf)
and self.get_limit(conf)
and self.get_worker_count(conf)
and True
)
def add_sciencebeam_convert_dag_macros(
dag: DAG,
macros: Optional[ScienceBeamConvertMacros] = None
) -> ScienceBeamConvertMacros:
if macros is None:
macros = ScienceBeamConvertMacros()
add_dag_macros(dag, macros)
return macros
def create_deploy_sciencebeam_op(
dag: DAG, macros: ScienceBeamConvertMacros = None,
task_id='deploy_sciencebeam'):
if macros is None:
macros = ScienceBeamConvertMacros()
add_sciencebeam_convert_dag_macros(dag, macros)
return HelmDeployOperator(
dag=dag,
task_id=task_id,
namespace='{{ dag_run.conf.namespace }}',
release_name='{{ dag_run.conf.sciencebeam_release_name }}',
chart_name='$HELM_CHARTS_DIR/sciencebeam',
get_child_chart_names=macros.get_sciencebeam_child_chart_names,
preemptible=True,
helm_args=DEPLOY_SCIENCEBEAM_ARGS_TEMPLATE
)
def create_delete_sciencebeam_op(dag, task_id='delete_sciencebeam'):
return HelmDeleteOperator(
dag=dag,
task_id=task_id,
namespace='{{ dag_run.conf.namespace }}',
release_name='{{ dag_run.conf.sciencebeam_release_name }}',
keep_history=False,
trigger_rule='all_done'
)
def create_sciencebeam_convert_op(
dag, macros: ScienceBeamConvertMacros = None,
task_id='sciencebeam_convert') -> BashOperator:
_macros = add_sciencebeam_convert_dag_macros(dag, macros)
return ContainerRunOperator(
dag=dag,
task_id=task_id,
namespace='{{ dag_run.conf.namespace }}',
image='{{ get_sciencebeam_image(dag_run.conf) }}',
name='{{ generate_run_name(dag_run.conf.sciencebeam_release_name, "convert") }}',
preemptible=True,
requests=DEFAULT_CONVERT_CONTAINER_REQUESTS,
container_overrides_fn=_macros.get_sciencebeam_convert_container_kwargs,
command=SCIENCEBEAM_CONVERT_TEMPLATE
)
def create_get_output_file_list_op(
dag, macros: ScienceBeamConvertMacros = None, task_id='get_output_file_list'):
add_sciencebeam_convert_dag_macros(dag, macros)
return ContainerRunOperator(
dag=dag,
task_id=task_id,
namespace='{{ dag_run.conf.namespace }}',
image='{{ get_sciencebeam_image(dag_run.conf) }}',
name='{{ generate_run_name(dag_run.conf.sciencebeam_release_name, "get-output-list") }}',
preemptible=True,
requests='cpu=100m,memory=256Mi',
command=SCIENCEBEAM_GET_OUTPUT_FILE_LIST_TEMPLATE,
)
def create_dag(
dag_id: str = ScienceBeamDagIds.SCIENCEBEAM_CONVERT,
default_args: dict = None,
schedule_interval=None,
macros: ScienceBeamConvertMacros = None,
trigger_next: bool = True):
if default_args is None:
default_args = DEFAULT_ARGS
if macros is None:
macros = ScienceBeamConvertMacros()
dag = DAG(dag_id=dag_id, default_args=default_args, schedule_interval=schedule_interval)
convert_results = (
create_validate_config_operation(
dag=dag, required_props=REQUIRED_PROPS,
is_config_valid=macros.is_config_valid
) >> create_deploy_sciencebeam_op(dag=dag, macros=macros)
>> create_sciencebeam_convert_op(dag=dag, macros=macros)
)
_ = convert_results >> create_delete_sciencebeam_op(dag=dag)
get_output_file_list_results = (
convert_results
>> create_get_output_file_list_op(dag=dag, macros=macros)
)
if trigger_next:
_ = get_output_file_list_results >> create_trigger_next_task_dag_operator(dag=dag)
return dag
MAIN_DAG = create_dag()
|
the-stack_0_15700 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import subprocess
from typing import Optional
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.models import Connection
# Please keep these variables in alphabetical order.
from tests.test_utils import AIRFLOW_MAIN_FOLDER
from tests.test_utils.logging_command_executor import CommandExecutor
GCP_AI_KEY = 'gcp_ai.json'
GCP_AUTOML_KEY = 'gcp_automl.json'
GCP_BIGQUERY_KEY = 'gcp_bigquery.json'
GCP_BIGTABLE_KEY = 'gcp_bigtable.json'
GCP_CLOUD_BUILD_KEY = 'gcp_cloud_build.json'
GCP_CLOUDSQL_KEY = 'gcp_cloudsql.json'
GCP_COMPUTE_KEY = 'gcp_compute.json'
GCP_COMPUTE_SSH_KEY = 'gcp_compute_ssh.json'
GCP_DATACATALOG_KEY = 'gcp_datacatalog.json'
GCP_DATAFLOW_KEY = 'gcp_dataflow.json'
GCP_DATAFUSION_KEY = 'gcp_datafusion.json'
GCP_DATAPROC_KEY = 'gcp_dataproc.json'
GCP_DATASTORE_KEY = 'gcp_datastore.json'
GCP_DLP_KEY = 'gcp_dlp.json'
GCP_FUNCTION_KEY = 'gcp_function.json'
GCP_GCS_KEY = 'gcp_gcs.json'
GCP_GCS_TRANSFER_KEY = 'gcp_gcs_transfer.json'
GCP_GKE_KEY = "gcp_gke.json"
GCP_KMS_KEY = "gcp_kms.json"
GCP_LIFE_SCIENCES_KEY = 'gcp_life_sciences.json'
GCP_MEMORYSTORE = 'gcp_memorystore.json'
GCP_PUBSUB_KEY = "gcp_pubsub.json"
GCP_SECRET_MANAGER_KEY = 'gcp_secret_manager.json'
GCP_SPANNER_KEY = 'gcp_spanner.json'
GCP_STACKDRIVER = 'gcp_stackdriver.json'
GCP_TASKS_KEY = 'gcp_tasks.json'
GCP_WORKFLOWS_KEY = "gcp_workflows.json"
GMP_KEY = 'gmp.json'
G_FIREBASE_KEY = 'g_firebase.json'
GCP_AWS_KEY = 'gcp_aws.json'
KEYPATH_EXTRA = 'extra__google_cloud_platform__key_path'
KEYFILE_DICT_EXTRA = 'extra__google_cloud_platform__keyfile_dict'
SCOPE_EXTRA = 'extra__google_cloud_platform__scope'
PROJECT_EXTRA = 'extra__google_cloud_platform__project'
class GcpAuthenticator(CommandExecutor):
"""
Initialises the authenticator.
:param gcp_key: name of the key to use for authentication (see GCP_*_KEY values)
:param project_extra: optional extra project parameter passed to google cloud
connection
"""
original_account = None # type: Optional[str]
def __init__(self, gcp_key: str, project_extra: Optional[str] = None):
super().__init__()
self.gcp_key = gcp_key
self.project_extra = project_extra
self.project_id = self.get_project_id()
self.full_key_path = None
self._set_key_path()
@staticmethod
def get_project_id():
return os.environ.get('GCP_PROJECT_ID')
def set_key_path_in_airflow_connection(self):
"""
Set key path in 'google_cloud_default' connection to point to the full
key path
:return: None
"""
session = settings.Session()
try:
conn = session.query(Connection).filter(Connection.conn_id == 'google_cloud_default')[0]
extras = conn.extra_dejson
extras[KEYPATH_EXTRA] = self.full_key_path
if extras.get(KEYFILE_DICT_EXTRA):
del extras[KEYFILE_DICT_EXTRA]
extras[SCOPE_EXTRA] = 'https://www.googleapis.com/auth/cloud-platform'
extras[PROJECT_EXTRA] = self.project_extra if self.project_extra else self.project_id
conn.extra = json.dumps(extras)
session.commit()
except BaseException as ex:
self.log.error('Airflow DB Session error: %s', str(ex))
session.rollback()
raise
finally:
session.close()
def set_dictionary_in_airflow_connection(self):
"""
Set dictionary in 'google_cloud_default' connection to contain content
of the json service account file.
:return: None
"""
session = settings.Session()
try:
conn = session.query(Connection).filter(Connection.conn_id == 'google_cloud_default')[0]
extras = conn.extra_dejson
with open(self.full_key_path) as path_file:
content = json.load(path_file)
extras[KEYFILE_DICT_EXTRA] = json.dumps(content)
if extras.get(KEYPATH_EXTRA):
del extras[KEYPATH_EXTRA]
extras[SCOPE_EXTRA] = 'https://www.googleapis.com/auth/cloud-platform'
extras[PROJECT_EXTRA] = self.project_extra
conn.extra = json.dumps(extras)
session.commit()
except BaseException as ex:
self.log.error('Airflow DB Session error: %s', str(ex))
session.rollback()
raise
finally:
session.close()
def _set_key_path(self):
"""
Sets full key path - if GCP_CONFIG_DIR points to absolute
directory, it tries to find the key in this directory. Otherwise it assumes
that Airflow is running from the directory where configuration is checked
out next to airflow directory in config directory
it tries to find the key folder in the workspace's config
directory.
:param : name of the key file to find.
"""
if "GCP_CONFIG_DIR" in os.environ:
gcp_config_dir = os.environ["GCP_CONFIG_DIR"]
else:
gcp_config_dir = os.path.join(AIRFLOW_MAIN_FOLDER, os.pardir, "config")
if not os.path.isdir(gcp_config_dir):
self.log.info("The %s is not a directory", gcp_config_dir)
key_dir = os.path.join(gcp_config_dir, "keys")
if not os.path.isdir(key_dir):
self.log.error("The %s is not a directory", key_dir)
return
key_path = os.path.join(key_dir, self.gcp_key)
if not os.path.isfile(key_path):
self.log.error("The %s file is missing", key_path)
self.full_key_path = key_path
def _validate_key_set(self):
if self.full_key_path is None:
raise AirflowException("The gcp_key is not set!")
if not os.path.isfile(self.full_key_path):
raise AirflowException(
f"The key {self.gcp_key} could not be found. Please copy it to the {self.full_key_path} path."
)
def gcp_authenticate(self):
"""
Authenticate with service account specified via key name.
"""
self._validate_key_set()
self.log.info("Setting the Google Cloud key to %s", self.full_key_path)
# Checking if we can authenticate using service account credentials provided
self.execute_cmd(
[
'gcloud',
'auth',
'activate-service-account',
f'--key-file={self.full_key_path}',
f'--project={self.project_id}',
]
)
self.set_key_path_in_airflow_connection()
def gcp_revoke_authentication(self):
"""
Change default authentication to none - which is not existing one.
"""
self._validate_key_set()
self.log.info("Revoking authentication - setting it to none")
self.execute_cmd(['gcloud', 'config', 'get-value', 'account', f'--project={self.project_id}'])
self.execute_cmd(['gcloud', 'config', 'set', 'account', 'none', f'--project={self.project_id}'])
def gcp_store_authentication(self):
"""
Store authentication as it was originally so it can be restored and revoke
authentication.
"""
self._validate_key_set()
if not GcpAuthenticator.original_account:
GcpAuthenticator.original_account = self.check_output(
['gcloud', 'config', 'get-value', 'account', f'--project={self.project_id}']
).decode('utf-8')
self.log.info("Storing account: to restore it later %s", GcpAuthenticator.original_account)
def gcp_restore_authentication(self):
"""
Restore authentication to the original one.
"""
self._validate_key_set()
if GcpAuthenticator.original_account:
self.log.info("Restoring original account stored: %s", GcpAuthenticator.original_account)
subprocess.call(
[
'gcloud',
'config',
'set',
'account',
GcpAuthenticator.original_account,
f'--project={self.project_id}',
]
)
else:
self.log.info("Not restoring the original Google Cloud account: it is not set")
|
the-stack_0_15701 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.modules.events.tracks.controllers import (RHCreateTrack, RHCreateTrackGroup, RHDeleteTrack,
RHDeleteTrackGroup, RHDisplayTracks, RHEditProgram, RHEditTrack,
RHEditTrackGroup, RHManageTracks, RHSortTracks, RHTracksPDF)
from indico.web.flask.util import make_compat_redirect_func
from indico.web.flask.wrappers import IndicoBlueprint
_bp = IndicoBlueprint('tracks', __name__, template_folder='templates', virtual_template_folder='events/tracks',
url_prefix='/event/<int:event_id>')
_bp.add_url_rule('/manage/tracks/', 'manage', RHManageTracks)
_bp.add_url_rule('/manage/tracks/program', 'edit_program', RHEditProgram, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/tracks/create', 'create_track', RHCreateTrack, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/tracks/sort', 'sort_tracks', RHSortTracks, methods=('POST',))
_bp.add_url_rule('/manage/tracks/<int:track_id>', 'edit_track', RHEditTrack, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/tracks/<int:track_id>', 'delete_track', RHDeleteTrack, methods=('DELETE',))
_bp.add_url_rule('/manage/track-groups/create', 'create_track_group', RHCreateTrackGroup, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/track-groups/<int:track_group_id>', 'edit_track_group', RHEditTrackGroup,
methods=('GET', 'POST'))
_bp.add_url_rule('/manage/track-groups/<int:track_group_id>', 'delete_track_group', RHDeleteTrackGroup,
methods=('DELETE',))
_bp.add_url_rule('/program', 'program', RHDisplayTracks)
_bp.add_url_rule('/program.pdf', 'program_pdf', RHTracksPDF)
_compat_bp = IndicoBlueprint('compat_tracks', __name__, url_prefix='/event/<int:event_id>')
_compat_bp.add_url_rule('/manage/program/tracks/<int:track_id>/contributions/', 'track_contribs',
make_compat_redirect_func('contributions', 'contribution_list',
view_args_conv={'track_id': None}))
|
the-stack_0_15702 | import os
import sys
import subprocess
import random
class Plopper:
def __init__(self,sourcefile,outputdir):
# Initilizing global variables
self.sourcefile = sourcefile
self.outputdir = outputdir+"/tmp_files"
if not os.path.exists(self.outputdir):
os.makedirs(self.outputdir)
#Creating a dictionary using parameter label and value
def createDict(self, x, params):
dictVal = {}
for p, v in zip(params, x):
dictVal[p] = v
return(dictVal)
#Replace the Markers in the source file with the corresponding Pragma values
def plotValues(self, dictVal, inputfile, outputfile):
with open(inputfile, "r") as f1:
buf = f1.readlines()
with open(outputfile, "w") as f2:
for line in buf:
modify_line = line
for key, value in dictVal.items():
if key in modify_line:
if value != 'None': #For empty string options
modify_line = modify_line.replace('#'+key, str(value))
if modify_line != line:
f2.write(modify_line)
else:
#To avoid writing the Marker
f2.write(line)
# Function to find the execution time of the interim file, and return the execution time as cost to the search module
def findRuntime(self, x, params):
interimfile = ""
#exetime = float('inf')
#exetime = sys.maxsize
exetime = 1
counter = random.randint(1, 10001) # To reduce collision increasing the sampling intervals
interimfile = self.outputdir+"/"+str(counter)+".c"
# Generate intermediate file
dictVal = self.createDict(x, params)
self.plotValues(dictVal, self.sourcefile, interimfile)
#compile and find the execution time
tmpbinary = interimfile[:-2]
kernel_idx = self.sourcefile.rfind('/')
kernel_dir = self.sourcefile[:kernel_idx]
cmd1 = "clang -fno-caret-diagnostics " +interimfile +" " + kernel_dir + "/Materials.c " \
+ kernel_dir + "/XSutils.c " + " -I" + kernel_dir + \
" -std=c99 -fopenmp -DOPENMP -fno-unroll-loops -O3 -mllvm -polly -mllvm -polly-process-unprofitable -mllvm -polly-use-llvm-names -ffast-math -march=native -L/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/lib -o "+tmpbinary
cmd2 = kernel_dir + "/exe.pl " + tmpbinary
#Find the compilation status using subprocess
compilation_status = subprocess.run(cmd1, shell=True, stderr=subprocess.PIPE)
#Find the execution time only when the compilation return code is zero, else return infinity
if compilation_status.returncode == 0 :
#and len(compilation_status.stderr) == 0: #Second condition is to check for warnings
execution_status = subprocess.run(cmd2, shell=True, stdout=subprocess.PIPE)
exetime = float(execution_status.stdout.decode('utf-8'))
if exetime == 0:
exetime = 1
else:
print(compilation_status.stderr)
print("compile failed")
return exetime #return execution time as cost
|
the-stack_0_15703 | import commands
import os
import sys
class EnvFileReader:
def read_file(self, filename, env_var = os.environ):
file_lines = open(filename,'r').readlines()
line_num = 1
for line in file_lines:
# get rid of comments
line = line.split("#")[0]
# strip whitespace from ends
line = line.strip()
# check if empty line
if line == "":
line_num += 1
continue
# check for =
if line.find("=") == -1:
raise "Missing '=' on line %i of file %s" % (line_num,filename)
# split into var = val pairs
(var,val) = line.split("=",1)
# remove whitespace from vars and values
var = var.strip()
val = val.strip()
# search for variables in val
done = False
while True:
var_start_index = val.find("$(")
if var_start_index == -1:
break
var_end_index = val.find(")")
if var_end_index == -1:
raise "Variable parse error on line %i of file %s" % (line_num,filename)
# extract variable value
sub_var = val[var_start_index+2:var_end_index]
# look for variable in environment, if there rebuild val
if os.environ.has_key(sub_var):
val = val[0:var_start_index] + os.environ[sub_var] + val[var_end_index+1:]
elif env_var.has_key(sub_var):
val = val[0:var_start_index] + env_var[sub_var] + val[var_end_index+1:]
else:
raise "Variable %s not found in environment" % sub_var
# print "%s = %s" % (var, val)
env_var[var] = val
line_num += 1
if __name__ == '__main__' :
reader = EnvFileReader()
reader.read_file(sys.argv[1])
# for env in os.environ:
# print "%s = %s" % (env,os.environ[env])
|
the-stack_0_15704 | """Utilities for with-statement contexts. See PEP 343."""
import abc
import sys
import _collections_abc
from collections import deque
from functools import wraps
from types import MethodType
__all__ = ["asynccontextmanager", "contextmanager", "closing", "nullcontext",
"AbstractContextManager", "AbstractAsyncContextManager",
"AsyncExitStack", "ContextDecorator", "ExitStack",
"redirect_stdout", "redirect_stderr", "suppress"]
class AbstractContextManager(abc.ABC):
"""An abstract base class for context managers."""
def __enter__(self):
"""Return `self` upon entering the runtime context."""
return self
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
return None
@classmethod
def __subclasshook__(cls, C):
if cls is AbstractContextManager:
return _collections_abc._check_methods(C, "__enter__", "__exit__")
return NotImplemented
class AbstractAsyncContextManager(abc.ABC):
"""An abstract base class for asynchronous context managers."""
async def __aenter__(self):
"""Return `self` upon entering the runtime context."""
return self
@abc.abstractmethod
async def __aexit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
return None
@classmethod
def __subclasshook__(cls, C):
if cls is AbstractAsyncContextManager:
return _collections_abc._check_methods(C, "__aenter__",
"__aexit__")
return NotImplemented
class ContextDecorator(object):
"A base class or mixin that enables context managers to work as decorators."
def _recreate_cm(self):
"""Return a recreated instance of self.
Allows an otherwise one-shot context manager like
_GeneratorContextManager to support use as
a decorator via implicit recreation.
This is a private interface just for _GeneratorContextManager.
See issue #11647 for details.
"""
return self
def __call__(self, func):
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
return func(*args, **kwds)
return inner
class _GeneratorContextManagerBase:
"""Shared functionality for @contextmanager and @asynccontextmanager."""
def __init__(self, func, args, kwds):
self.gen = func(*args, **kwds)
self.func, self.args, self.kwds = func, args, kwds
# Issue 19330: ensure context manager instances have good docstrings
doc = getattr(func, "__doc__", None)
if doc is None:
doc = type(self).__doc__
self.__doc__ = doc
# Unfortunately, this still doesn't provide good help output when
# inspecting the created context manager instances, since pydoc
# currently bypasses the instance docstring and shows the docstring
# for the class instead.
# See http://bugs.python.org/issue19404 for more details.
class _GeneratorContextManager(_GeneratorContextManagerBase,
AbstractContextManager,
ContextDecorator):
"""Helper for @contextmanager decorator."""
def _recreate_cm(self):
# _GCM instances are one-shot context managers, so the
# CM must be recreated each time a decorated function is
# called
return self.__class__(self.func, self.args, self.kwds)
def __enter__(self):
# do not keep args and kwds alive unnecessarily
# they are only needed for recreation, which is not possible anymore
del self.args, self.kwds, self.func
try:
return next(self.gen)
except StopIteration:
raise RuntimeError("generator didn't yield") from None
def __exit__(self, type, value, traceback):
if type is None:
try:
next(self.gen)
except StopIteration:
return False
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
except StopIteration as exc:
# Suppress StopIteration *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed.
return exc is not value
except RuntimeError as exc:
# Don't re-raise the passed in exception. (issue27122)
if exc is value:
return False
# Likewise, avoid suppressing if a StopIteration exception
# was passed to throw() and later wrapped into a RuntimeError
# (see PEP 479).
if type is StopIteration and exc.__cause__ is value:
return False
raise
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
# This cannot use 'except BaseException as exc' (as in the
# async implementation) to maintain compatibility with
# Python 2, where old-style class exceptions are not caught
# by 'except BaseException'.
if sys.exc_info()[1] is value:
return False
raise
raise RuntimeError("generator didn't stop after throw()")
class _AsyncGeneratorContextManager(_GeneratorContextManagerBase,
AbstractAsyncContextManager):
"""Helper for @asynccontextmanager."""
async def __aenter__(self):
try:
return await self.gen.__anext__()
except StopAsyncIteration:
raise RuntimeError("generator didn't yield") from None
async def __aexit__(self, typ, value, traceback):
if typ is None:
try:
await self.gen.__anext__()
except StopAsyncIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
value = typ()
# See _GeneratorContextManager.__exit__ for comments on subtleties
# in this implementation
try:
await self.gen.athrow(typ, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopAsyncIteration as exc:
return exc is not value
except RuntimeError as exc:
if exc is value:
return False
# Avoid suppressing if a StopIteration exception
# was passed to throw() and later wrapped into a RuntimeError
# (see PEP 479 for sync generators; async generators also
# have this behavior). But do this only if the exception wrapped
# by the RuntimeError is actully Stop(Async)Iteration (see
# issue29692).
if isinstance(value, (StopIteration, StopAsyncIteration)):
if exc.__cause__ is value:
return False
raise
except BaseException as exc:
if exc is not value:
raise
def contextmanager(func):
"""@contextmanager decorator.
Typical usage:
@contextmanager
def some_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
with some_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return _GeneratorContextManager(func, args, kwds)
return helper
def asynccontextmanager(func):
"""@asynccontextmanager decorator.
Typical usage:
@asynccontextmanager
async def some_async_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
async with some_async_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return _AsyncGeneratorContextManager(func, args, kwds)
return helper
class closing(AbstractContextManager):
"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
class _RedirectStream(AbstractContextManager):
_stream = None
def __init__(self, new_target):
self._new_target = new_target
# We use a list of old targets to make this CM re-entrant
self._old_targets = []
def __enter__(self):
self._old_targets.append(getattr(sys, self._stream))
setattr(sys, self._stream, self._new_target)
return self._new_target
def __exit__(self, exctype, excinst, exctb):
setattr(sys, self._stream, self._old_targets.pop())
class redirect_stdout(_RedirectStream):
"""Context manager for temporarily redirecting stdout to another file.
# How to send help() to stderr
with redirect_stdout(sys.stderr):
help(dir)
# How to write help() to a file
with open('help.txt', 'w') as f:
with redirect_stdout(f):
help(pow)
"""
_stream = "stdout"
class redirect_stderr(_RedirectStream):
"""Context manager for temporarily redirecting stderr to another file."""
_stream = "stderr"
class suppress(AbstractContextManager):
"""Context manager to suppress specified exceptions
After the exception is suppressed, execution proceeds with the next
statement following the with statement.
with suppress(FileNotFoundError):
os.remove(somefile)
# Execution still resumes here if the file was already removed
"""
def __init__(self, *exceptions):
self._exceptions = exceptions
def __enter__(self):
pass
def __exit__(self, exctype, excinst, exctb):
# Unlike isinstance and issubclass, CPython exception handling
# currently only looks at the concrete type hierarchy (ignoring
# the instance and subclass checking hooks). While Guido considers
# that a bug rather than a feature, it's a fairly hard one to fix
# due to various internal implementation details. suppress provides
# the simpler issubclass based semantics, rather than trying to
# exactly reproduce the limitations of the CPython interpreter.
#
# See http://bugs.python.org/issue12029 for more details
return exctype is not None and issubclass(exctype, self._exceptions)
class _BaseExitStack:
"""A base class for ExitStack and AsyncExitStack."""
@staticmethod
def _create_exit_wrapper(cm, cm_exit):
return MethodType(cm_exit, cm)
@staticmethod
def _create_cb_wrapper(callback, *args, **kwds):
def _exit_wrapper(exc_type, exc, tb):
callback(*args, **kwds)
return _exit_wrapper
def __init__(self):
self._exit_callbacks = deque()
def pop_all(self):
"""Preserve the context stack by transferring it to a new instance."""
new_stack = type(self)()
new_stack._exit_callbacks = self._exit_callbacks
self._exit_callbacks = deque()
return new_stack
def push(self, exit):
"""Registers a callback with the standard __exit__ method signature.
Can suppress exceptions the same way __exit__ method can.
Also accepts any object with an __exit__ method (registering a call
to the method instead of the object itself).
"""
# We use an unbound method rather than a bound method to follow
# the standard lookup behaviour for special methods.
_cb_type = type(exit)
try:
exit_method = _cb_type.__exit__
except AttributeError:
# Not a context manager, so assume it's a callable.
self._push_exit_callback(exit)
else:
self._push_cm_exit(exit, exit_method)
return exit # Allow use as a decorator.
def enter_context(self, cm):
"""Enters the supplied context manager.
If successful, also pushes its __exit__ method as a callback and
returns the result of the __enter__ method.
"""
# We look up the special methods on the type to match the with
# statement.
_cm_type = type(cm)
_exit = _cm_type.__exit__
result = _cm_type.__enter__(cm)
self._push_cm_exit(cm, _exit)
return result
def callback(self, callback, *args, **kwds):
"""Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
"""
_exit_wrapper = self._create_cb_wrapper(callback, *args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
# setting __wrapped__ may still help with introspection.
_exit_wrapper.__wrapped__ = callback
self._push_exit_callback(_exit_wrapper)
return callback # Allow use as a decorator
def _push_cm_exit(self, cm, cm_exit):
"""Helper to correctly register callbacks to __exit__ methods."""
_exit_wrapper = self._create_exit_wrapper(cm, cm_exit)
self._push_exit_callback(_exit_wrapper, True)
def _push_exit_callback(self, callback, is_sync=True):
self._exit_callbacks.append((is_sync, callback))
# Inspired by discussions on http://bugs.python.org/issue13585
class ExitStack(_BaseExitStack, AbstractContextManager):
"""Context manager for dynamic management of a stack of exit callbacks.
For example:
with ExitStack() as stack:
files = [stack.enter_context(open(fname)) for fname in filenames]
# All opened files will automatically be closed at the end of
# the with statement, even if attempts to open files later
# in the list raise an exception.
"""
def __enter__(self):
return self
def __exit__(self, *exc_details):
received_exc = exc_details[0] is not None
# We manipulate the exception state so it behaves as though
# we were actually nesting multiple with statements
frame_exc = sys.exc_info()[1]
def _fix_exception_context(new_exc, old_exc):
# Context may not be correct, so find the end of the chain
while 1:
exc_context = new_exc.__context__
if exc_context is old_exc:
# Context is already set correctly (see issue 20317)
return
if exc_context is None or exc_context is frame_exc:
break
new_exc = exc_context
# Change the end of the chain to point to the exception
# we expect it to reference
new_exc.__context__ = old_exc
# Callbacks are invoked in LIFO order to match the behaviour of
# nested context managers
suppressed_exc = False
pending_raise = False
while self._exit_callbacks:
is_sync, cb = self._exit_callbacks.pop()
assert is_sync
try:
if cb(*exc_details):
suppressed_exc = True
pending_raise = False
exc_details = (None, None, None)
except:
new_exc_details = sys.exc_info()
# simulate the stack of exceptions by setting the context
_fix_exception_context(new_exc_details[1], exc_details[1])
pending_raise = True
exc_details = new_exc_details
if pending_raise:
try:
# bare "raise exc_details[1]" replaces our carefully
# set-up context
fixed_ctx = exc_details[1].__context__
raise exc_details[1]
except BaseException:
exc_details[1].__context__ = fixed_ctx
raise
return received_exc and suppressed_exc
def close(self):
"""Immediately unwind the context stack."""
self.__exit__(None, None, None)
# Inspired by discussions on https://bugs.python.org/issue29302
class AsyncExitStack(_BaseExitStack, AbstractAsyncContextManager):
"""Async context manager for dynamic management of a stack of exit
callbacks.
For example:
async with AsyncExitStack() as stack:
connections = [await stack.enter_async_context(get_connection())
for i in range(5)]
# All opened connections will automatically be released at the
# end of the async with statement, even if attempts to open a
# connection later in the list raise an exception.
"""
@staticmethod
def _create_async_exit_wrapper(cm, cm_exit):
return MethodType(cm_exit, cm)
@staticmethod
def _create_async_cb_wrapper(callback, *args, **kwds):
async def _exit_wrapper(exc_type, exc, tb):
await callback(*args, **kwds)
return _exit_wrapper
async def enter_async_context(self, cm):
"""Enters the supplied async context manager.
If successful, also pushes its __aexit__ method as a callback and
returns the result of the __aenter__ method.
"""
_cm_type = type(cm)
_exit = _cm_type.__aexit__
result = await _cm_type.__aenter__(cm)
self._push_async_cm_exit(cm, _exit)
return result
def push_async_exit(self, exit):
"""Registers a coroutine function with the standard __aexit__ method
signature.
Can suppress exceptions the same way __aexit__ method can.
Also accepts any object with an __aexit__ method (registering a call
to the method instead of the object itself).
"""
_cb_type = type(exit)
try:
exit_method = _cb_type.__aexit__
except AttributeError:
# Not an async context manager, so assume it's a coroutine function
self._push_exit_callback(exit, False)
else:
self._push_async_cm_exit(exit, exit_method)
return exit # Allow use as a decorator
def push_async_callback(self, callback, *args, **kwds):
"""Registers an arbitrary coroutine function and arguments.
Cannot suppress exceptions.
"""
_exit_wrapper = self._create_async_cb_wrapper(callback, *args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
# setting __wrapped__ may still help with introspection.
_exit_wrapper.__wrapped__ = callback
self._push_exit_callback(_exit_wrapper, False)
return callback # Allow use as a decorator
async def aclose(self):
"""Immediately unwind the context stack."""
await self.__aexit__(None, None, None)
def _push_async_cm_exit(self, cm, cm_exit):
"""Helper to correctly register coroutine function to __aexit__
method."""
_exit_wrapper = self._create_async_exit_wrapper(cm, cm_exit)
self._push_exit_callback(_exit_wrapper, False)
async def __aenter__(self):
return self
async def __aexit__(self, *exc_details):
received_exc = exc_details[0] is not None
# We manipulate the exception state so it behaves as though
# we were actually nesting multiple with statements
frame_exc = sys.exc_info()[1]
def _fix_exception_context(new_exc, old_exc):
# Context may not be correct, so find the end of the chain
while 1:
exc_context = new_exc.__context__
if exc_context is old_exc:
# Context is already set correctly (see issue 20317)
return
if exc_context is None or exc_context is frame_exc:
break
new_exc = exc_context
# Change the end of the chain to point to the exception
# we expect it to reference
new_exc.__context__ = old_exc
# Callbacks are invoked in LIFO order to match the behaviour of
# nested context managers
suppressed_exc = False
pending_raise = False
while self._exit_callbacks:
is_sync, cb = self._exit_callbacks.pop()
try:
if is_sync:
cb_suppress = cb(*exc_details)
else:
cb_suppress = await cb(*exc_details)
if cb_suppress:
suppressed_exc = True
pending_raise = False
exc_details = (None, None, None)
except:
new_exc_details = sys.exc_info()
# simulate the stack of exceptions by setting the context
_fix_exception_context(new_exc_details[1], exc_details[1])
pending_raise = True
exc_details = new_exc_details
if pending_raise:
try:
# bare "raise exc_details[1]" replaces our carefully
# set-up context
fixed_ctx = exc_details[1].__context__
raise exc_details[1]
except BaseException:
exc_details[1].__context__ = fixed_ctx
raise
return received_exc and suppressed_exc
class nullcontext(AbstractContextManager):
"""Context manager that does no additional processing.
Used as a stand-in for a normal context manager, when a particular
block of code is only sometimes used with a normal context manager:
cm = optional_cm if condition else nullcontext()
with cm:
# Perform operation, using optional_cm if condition is True
"""
def __init__(self, enter_result=None):
self.enter_result = enter_result
def __enter__(self):
return self.enter_result
def __exit__(self, *excinfo):
pass
|
the-stack_0_15705 | from tkinter import *
root=Tk()
root.title("CAR RENTAL RECEIPT")
root.geometry('700x800')
#Labels
g1=Label(root, text="CAR RENTAL RECEIPT", font="Calibri 18 bold")
l1=Label(root, text="Date: ")
e1=Entry(root,width=30, borderwidth=2)
l2=Label(root, text="Receipt #: ")
e2=Entry(root,width=30, borderwidth=2)
l3=Label(root, text="Rental Company Info", font="Calibri 12 bold")
l3_1=Label(root, text="Company: ")
e3_1=Entry(root,width=30, borderwidth=2)
l3_2=Label(root, text="Representative: ")
e3_2=Entry(root,width=30, borderwidth=2)
l3_3=Label(root, text="Location: ")
e3_3=Entry(root,width=30, borderwidth=2)
l3_4=Label(root, text="City/State/ZIP: ")
e3_4=Entry(root,width=30, borderwidth=2)
l3_5=Label(root, text="Phone: ")
e3_5=Entry(root,width=30, borderwidth=2)
l4=Label(root, text="Lessee Info", font="Calibri 12 bold")
l4_1=Label(root, text="License: ")
e4_1=Entry(root,width=30, borderwidth=2)
l4_2=Label(root, text="Representative: ")
e4_2=Entry(root,width=30, borderwidth=2)
l4_3=Label(root, text="Address: ")
e4_3=Entry(root,width=30, borderwidth=2)
l4_4=Label(root, text="City/State/ZIP: ")
e4_4=Entry(root,width=30, borderwidth=2)
l4_5=Label(root, text="Phone: ")
e4_5=Entry(root,width=30, borderwidth=2)
g2=Label(root, text="Vehicle Information", font="Calibri 18 bold")
l5_1=Label(root, text="VIN: ")
e5_1=Entry(root,width=30, borderwidth=2)
l5_2=Label(root, text="Make: ")
e5_2=Entry(root,width=30, borderwidth=2)
l5_3=Label(root, text="Year: ")
e5_3=Entry(root,width=30, borderwidth=2)
l5_4=Label(root, text="Color: ")
e5_4=Entry(root,width=30, borderwidth=2)
l6_1=Label(root, text="Registration: ")
e6_1=Entry(root,width=30, borderwidth=2)
l6_2=Label(root, text="Model: ")
e6_2=Entry(root,width=30, borderwidth=2)
l6_3=Label(root, text="Mileage: ")
e6_3=Entry(root,width=30, borderwidth=2)
h1=Label(root, text="VIN", font="Calibri 12 bold")
h1_1=Entry(root,width=14, borderwidth=2)
h1_2=Entry(root,width=14, borderwidth=2)
h1_3=Entry(root,width=14, borderwidth=2)
h2=Label(root, text="Cost/Day", font="Calibri 12 bold")
h2_1=Entry(root,width=12, borderwidth=2)
h2_2=Entry(root,width=12, borderwidth=2)
h2_3=Entry(root,width=12, borderwidth=2)
h3=Label(root, text="# of Days", font="Calibri 12 bold")
h3_1=Entry(root,width=19, borderwidth=2)
h3_2=Entry(root,width=19, borderwidth=2)
h3_3=Entry(root,width=19, borderwidth=2)
h4=Label(root, text="Additional Costs", font="Calibri 12 bold")
h4_1=Entry(root,width=18, borderwidth=2)
h4_2=Entry(root,width=18, borderwidth=2)
h4_3=Entry(root,width=18, borderwidth=2)
h4l1=Label(root, text="Subtotal: ")
h4l2=Label(root, text="Tax (%): ")
h4l3=Label(root, text="Total: ")
h4l4=Label(root, text="Amount paid: ")
h4e1=Entry(root,width=8, borderwidth=2)
h4e2=Entry(root,width=9, borderwidth=2)
h4e3=Entry(root,width=10, borderwidth=2)
h4e4=Entry(root,width=4, borderwidth=2)
h5=Label(root, text="Line Total", font="Calibri 12 bold")
h5_1=Entry(root,width=16, borderwidth=2)
h5_2=Entry(root,width=16, borderwidth=2)
h5_3=Entry(root,width=16, borderwidth=2)
h5_4=Entry(root,width=16, borderwidth=2)
h5_5=Entry(root,width=16, borderwidth=2)
h5_6=Entry(root,width=16, borderwidth=2)
h5_7=Entry(root,width=16, borderwidth=2)
xl1=Label(root, text="Payment Method: ")
ck1=Checkbutton(root, text='Cash. ', onvalue=1, offvalue=0)
ck2=Checkbutton(root, text='Check No.: ', onvalue=1, offvalue=0)
ent1=Entry(root,width=31, borderwidth=2)
ck3=Checkbutton(root, text='Credit No.: ', onvalue=1, offvalue=0)
ent3=Entry(root,width=41, borderwidth=2)
ck4=Checkbutton(root, text='Other.: ', onvalue=1, offvalue=0)
ent4=Entry(root,width=44, borderwidth=2)
lasl1=Label(root, text="Authorized Signature: ", font="Calibri 10 bold")
lasl2=Label(root, text="Representative Name: ", font="Calibri 10")
lase1=Entry(root,width=22, borderwidth=2)
lase2=Entry(root,width=20, borderwidth=2)
#Positioning
l1.place(x=10, y=45)
e1.place(x=50, y=45)
l2.place(x=10, y=75)
e2.place(x=73, y=75)
l3.place(x=10, y=110)
l3_1.place(x=10, y=150)
l3_2.place(x=10, y=180)
l3_3.place(x=10, y=210)
l3_4.place(x=10, y=240)
l3_5.place(x=10, y=270)
e3_1.place(x=110, y=150)
e3_2.place(x=110, y=180)
e3_3.place(x=110, y=210)
e3_4.place(x=110, y=240)
e3_5.place(x=110, y=270)
l4.place(x=320, y=110)
l4_1.place(x=320, y=150)
l4_2.place(x=320, y=180)
l4_3.place(x=320, y=210)
l4_4.place(x=320, y=240)
l4_5.place(x=320, y=270)
e4_1.place(x=420, y=150)
e4_2.place(x=420, y=180)
e4_3.place(x=420, y=210)
e4_4.place(x=420, y=240)
e4_5.place(x=420, y=270)
g1.place(x=240, y=0)
g2.place(x=240, y=300)
l5_1.place(x=10, y=360)
l5_2.place(x=10, y=390)
l5_3.place(x=10, y=420)
l5_4.place(x=10, y=450)
e5_1.place(x=60, y=360)
e5_2.place(x=60, y=390)
e5_3.place(x=60, y=420)
e5_4.place(x=60, y=450)
l6_1.place(x=320, y=360)
l6_2.place(x=320, y=390)
l6_3.place(x=320, y=420)
e6_1.place(x=420, y=360)
e6_2.place(x=420, y=390)
e6_3.place(x=420, y=420)
h1.place(x=70, y=490)
h2.place(x=160, y=490)
h3.place(x=290, y=490)
h4.place(x=400, y=490)
h5.place(x=560, y=490)
h1_1.place(x=40, y=520)
h1_2.place(x=40, y=545)
h1_3.place(x=40, y=570)
h2_1.place(x=150, y=520)
h2_2.place(x=150, y=545)
h2_3.place(x=150, y=570)
h3_1.place(x=260, y=520)
h3_2.place(x=260, y=545)
h3_3.place(x=260, y=570)
h4_1.place(x=400, y=520)
h4_2.place(x=400, y=545)
h4_3.place(x=400, y=570)
h4l1.place(x=400, y=595)
h4l2.place(x=400, y=620)
h4l3.place(x=400, y=645)
h4l4.place(x=400, y=670)
h4e1.place(x=460, y=595)
h4e2.place(x=454, y=620)
h4e3.place(x=447, y=645)
h4e4.place(x=483, y=670)
h5_1.place(x=540, y=520)
h5_2.place(x=540, y=545)
h5_3.place(x=540, y=570)
h5_4.place(x=540, y=595)
h5_5.place(x=540, y=620)
h5_6.place(x=540, y=645)
h5_7.place(x=540, y=670)
xl1.place(x=40, y=595)
ck1.place(x=40, y=620)
ck2.place(x=100, y=620)
ck3.place(x=40, y=645)
ck4.place(x=40, y=670)
ent1.place(x=188, y=623)
ent3.place(x=128, y=649)
ent4.place(x=110, y=675)
lasl1.place(x=380, y=710)
lasl2.place(x=390, y=735)
lase1.place(x=505, y=710)
lase2.place(x=518, y=735)
root.mainloop() |
the-stack_0_15706 | # Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test_devstack
-------------
Throw errors if we do not actually detect the services we're supposed to.
"""
import os
from testscenarios import load_tests_apply_scenarios as load_tests # noqa
from openstack.tests.functional.cloud import base
class TestDevstack(base.BaseFunctionalTestCase):
scenarios = [
('designate', dict(env='DESIGNATE', service='dns')),
('heat', dict(env='HEAT', service='orchestration')),
('magnum', dict(env='MAGNUM', service='container-infra')),
('neutron', dict(env='NEUTRON', service='network')),
('octavia', dict(env='OCTAVIA', service='load-balancer')),
('swift', dict(env='SWIFT', service='object-store')),
]
def test_has_service(self):
if os.environ.get(
'OPENSTACKSDK_HAS_{env}'.format(env=self.env), '0') == '1':
self.assertTrue(self.user_cloud.has_service(self.service))
class TestKeystoneVersion(base.BaseFunctionalTestCase):
def test_keystone_version(self):
use_keystone_v2 = os.environ.get('OPENSTACKSDK_USE_KEYSTONE_V2', False)
if use_keystone_v2 and use_keystone_v2 != '0':
self.assertEqual('2.0', self.identity_version)
else:
self.assertEqual('3', self.identity_version)
|
the-stack_0_15709 | # -*- coding: utf-8 -*-
from distutils.core import setup
from setuptools import find_packages
with open('.meta/packages') as reqs:
install_requires = reqs.read().split('\n')
setup(
name='rpihelper',
version='0.0.3',
author='Nikita Grishko',
author_email='[email protected]',
url='https://github.com/Gr1N/rpihelper',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
scripts=[
'bin/rqscheduletasks',
],
)
|
the-stack_0_15710 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyxmlescpos',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/development.html#single-sourcing-the-version
version='0.1.0',
description='Print XML-defined Receipts on ESC/POS Receipt Printers',
long_description=long_description,
# The project's main homepage.
url='https://github.com/fvdsn/py-xml-escpos',
download_url = 'https://github.com/fvdsn/py-xml-escpos/tarball/0.1.0',
# Author details
author='Frédéric van der Essen & Manuel F Martinez',
author_email='[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Printing',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
keywords='printing receipt xml escpos',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=['pyusb', 'qrcode'],
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
# extras_require = {
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
the-stack_0_15711 | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_get_operations_async.py
DESCRIPTION:
This sample demonstrates how to list/get all document model operations (succeeded, in-progress, failed)
associated with the Form Recognizer resource. Kinds of operations returned are "documentModelBuild",
"documentModelCompose", and "documentModelCopyTo". Note that operation information only persists for
24 hours. If the operation was successful, the document model can be accessed using get_model or list_models APIs.
USAGE:
python sample_get_operations_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
"""
import os
import asyncio
async def sample_get_operations_async():
# [START list_operations_async]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer.aio import DocumentModelAdministrationClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
document_model_admin_client = DocumentModelAdministrationClient(endpoint=endpoint, credential=AzureKeyCredential(key))
async with document_model_admin_client:
operations = document_model_admin_client.list_operations()
print("The following document model operations exist under my resource:")
async for operation in operations:
print("\nOperation ID: {}".format(operation.operation_id))
print("Operation kind: {}".format(operation.kind))
print("Operation status: {}".format(operation.status))
print("Operation percent completed: {}".format(operation.percent_completed))
print("Operation created on: {}".format(operation.created_on))
print("Operation last updated on: {}".format(operation.last_updated_on))
print("Resource location of successful operation: {}".format(operation.resource_location))
# [END list_operations_async]
# [START get_operation_async]
# Get an operation by ID
try:
first_operation = await operations.__anext__()
print("\nGetting operation info by ID: {}".format(first_operation.operation_id))
operation_info = await document_model_admin_client.get_operation(first_operation.operation_id)
if operation_info.status == "succeeded":
print("My {} operation is completed.".format(operation_info.kind))
result = operation_info.result
print("Model ID: {}".format(result.model_id))
elif operation_info.status == "failed":
print("My {} operation failed.".format(operation_info.kind))
error = operation_info.error
print("{}: {}".format(error.code, error.message))
else:
print("My operation status is {}".format(operation_info.status))
except StopAsyncIteration:
print("No operations found.")
# [END get_operation_async]
async def main():
await sample_get_operations_async()
if __name__ == '__main__':
asyncio.run(main())
|
the-stack_0_15714 | import numpy as np
from torch import nn
import torch
from encoder.params_model import *
from encoder.params_data import *
from encoder.data_objects.iemocap_dataset import emo_categories
class EmoEncoder(nn.Module):
def __init__(self, device):
super().__init__()
self.device = device
self.lstm = nn.LSTM(input_size=mel_n_channels,
hidden_size=model_hidden_size,
num_layers=model_num_layers,
batch_first=True).to(device)
self.linear = nn.Linear(in_features=model_hidden_size,
out_features=model_embedding_size).to(device)
self.relu = torch.nn.ReLU().to(device)
self.linear_cls = nn.Linear(in_features=model_embedding_size,
out_features=len(emo_categories)).to(device)
def forward(self, utterances, hidden_init=None):
"""
Computes the embeddings of a batch of utterance spectrograms.
:param utterances: batch of mel-scale filterbanks of same duration as a tensor of shape
(batch_size, n_frames, n_channels)
:param hidden_init: initial hidden state of the LSTM as a tensor of shape (num_layers,
batch_size, hidden_size). Will default to a tensor of zeros if None.
:return: the embeddings as a tensor of shape (batch_size, embedding_size)
"""
# Pass the input through the LSTM layers and retrieve all outputs, the final hidden state
# and the final cell state.
out, (hidden, cell) = self.lstm(utterances, hidden_init)
# We take only the hidden state of the last layer
embeds_raw = self.relu(self.linear(hidden[-1]))
# L2-normalize it
embeds = embeds_raw / (torch.norm(embeds_raw, dim=1, keepdim=True) + 1e-5)
pred = self.linear_cls(embeds)
return embeds, pred
class StackedBiLSTMEmoEncoder(nn.Module):
def __init__(self, device):
super(StackedBiLSTMEmoEncoder, self).__init__()
self.device = device
self.lstm1 = nn.LSTM(input_size=mel_n_channels,
hidden_size=512,
bidirectional=True,
batch_first=True).to(device)
self.lstm2 = nn.LSTM(input_size=1024,
hidden_size=256,
bidirectional=True,
batch_first=True).to(device)
self.linear = nn.Linear(in_features=512,
out_features=512).to(device)
self.tanh = nn.Tanh().to(device)
self.linear_cls = nn.Linear(in_features=512,
out_features=len(emo_categories)).to(device)
def forward(self, utterances, hidden_init=None):
o, _ = self.lstm1(utterances, hidden_init)
o, (h, c) = self.lstm2(o)
# Take the hidden state of last layers and concatenate the two directions
h = torch.transpose(h[-2:], 0, 1)
h = h.reshape([h.shape[0], -1])
embeds = self.tanh(self.linear(h))
pred = self.linear_cls(embeds)
return embeds, pred
|
the-stack_0_15716 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch.nn import functional as F
from fcos_core.layers import smooth_l1_loss
from fcos_core.modeling.box_coder import BoxCoder
from fcos_core.modeling.matcher import Matcher
from fcos_core.structures.boxlist_ops import boxlist_iou
from fcos_core.modeling.balanced_positive_negative_sampler import (
BalancedPositiveNegativeSampler
)
from fcos_core.modeling.utils import cat
class FastRCNNLossComputation(torch.nn.Module):
"""
Computes the loss for Faster R-CNN.
Also supports FPN
"""
def __init__(
self,
proposal_matcher,
fg_bg_sampler,
box_coder,
cls_agnostic_bbox_reg=False,
classification_loss_type='CE',
num_classes=81,
attribute_on=False,
boundingbox_loss_type='SL1',
cfg=None,
):
"""
Arguments:
proposal_matcher (Matcher)
fg_bg_sampler (BalancedPositiveNegativeSampler)
box_coder (BoxCoder)
"""
super().__init__()
self.proposal_matcher = proposal_matcher
self.fg_bg_sampler = fg_bg_sampler
self.box_coder = box_coder
self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg
self.attribute_on = attribute_on
self.classification_loss_type = classification_loss_type
if self.classification_loss_type == 'CE':
self._classifier_loss = F.cross_entropy
elif self.classification_loss_type == 'BCE':
from qd.qd_pytorch import BCEWithLogitsNegLoss
self._classifier_loss = BCEWithLogitsNegLoss()
elif self.classification_loss_type.startswith('IBCE'):
param = map(float, self.classification_loss_type[4:].split('_'))
from qd.qd_pytorch import IBCEWithLogitsNegLoss
self._classifier_loss = IBCEWithLogitsNegLoss(*param)
elif self.classification_loss_type == 'MCEB':
from qd.qd_pytorch import MCEBLoss
self._classifier_loss = MCEBLoss()
elif self.classification_loss_type == 'tree':
tree_file = cfg.MODEL.ROI_BOX_HEAD.TREE_0_BKG
from mtorch.softmaxtree_loss import SoftmaxTreeWithLoss
self._classifier_loss = SoftmaxTreeWithLoss(
tree_file,
ignore_label=-1, # this is dummy value since this will not happend
loss_weight=1,
valid_normalization=True,
)
self.copied_fields = ["labels"]
if self.attribute_on:
self.copied_fields.append("attributes")
assert boundingbox_loss_type == 'SL1'
def create_all_bkg_labels(self, num, device):
if self.classification_loss_type in ['CE', 'tree']:
return torch.zeros(num,
dtype=torch.float32,
device=device)
elif self.classification_loss_type in ['BCE'] or \
self.classification_loss_type.startswith('IBCE'):
return torch.zeros((num, self.num_classes),
dtype=torch.float32,
device=device)
else:
raise NotImplementedError(self.classification_loss_type)
def match_targets_to_proposals(self, proposal, target):
match_quality_matrix = boxlist_iou(target, proposal)
matched_idxs = self.proposal_matcher(match_quality_matrix)
# Fast RCNN only need "labels" field for selecting the targets
target = target.copy_with_fields(self.copied_fields)
# get the targets corresponding GT for each proposal
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
if len(target) == 0:
dummy_bbox = torch.zeros((len(matched_idxs), 4),
dtype=torch.float32, device=matched_idxs.device)
from maskrcnn_benchmark.structures.bounding_box import BoxList
matched_targets = BoxList(dummy_bbox, target.size, target.mode)
matched_targets.add_field('labels', self.create_all_bkg_labels(
len(matched_idxs), matched_idxs.device))
matched_targets.add_field('tightness', torch.zeros(len(matched_idxs),
device=matched_idxs.device))
matched_targets.add_field(
'attributes',
torch.zeros((len(matched_idxs), 1),
device=matched_idxs.device))
else:
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field("matched_idxs", matched_idxs)
return matched_targets
def prepare_targets(self, proposals, targets):
labels = []
regression_targets = []
attributes = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
matched_targets = self.match_targets_to_proposals(
proposals_per_image, targets_per_image
)
matched_idxs = matched_targets.get_field("matched_idxs")
labels_per_image = matched_targets.get_field("labels")
labels_per_image = labels_per_image.to(dtype=torch.int64)
# Label background (below the low threshold)
bg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[bg_inds] = 0
# Label ignore proposals (between low and high thresholds)
ignore_inds = matched_idxs == Matcher.BETWEEN_THRESHOLDS
labels_per_image[ignore_inds] = -1 # -1 is ignored by sampler
# compute regression targets
regression_targets_per_image = self.box_coder.encode(
matched_targets.bbox, proposals_per_image.bbox
)
labels.append(labels_per_image)
regression_targets.append(regression_targets_per_image)
if self.attribute_on:
attributes_per_image = matched_targets.get_field("attributes")
attributes_per_image = attributes_per_image.to(dtype=torch.int64)
if len(targets_per_image) > 0:
# Label background (below the low threshold)
# attribute 0 is ignored in the loss
attributes_per_image[bg_inds,:] = 0
# Label ignore proposals (between low and high thresholds)
attributes_per_image[ignore_inds,:] = 0
# return attributes
attributes.append(attributes_per_image)
else:
attributes.append([])
#return labels, regression_targets
result = {
'labels': labels,
'regression_targets': regression_targets,
}
if self.attribute_on:
result['attributes'] = attributes
return result
def subsample(self, proposals, targets):
"""
This method performs the positive/negative sampling, and return
the sampled proposals.
Note: this function keeps a state.
Arguments:
proposals (list[BoxList])
targets (list[BoxList])
"""
prepare_result = self.prepare_targets(proposals, targets)
labels = prepare_result['labels']
regression_targets = prepare_result['regression_targets']
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
proposals = list(proposals)
# add corresponding label and regression_targets information to the bounding boxes
for i, (labels_per_image, regression_targets_per_image,
proposals_per_image) in enumerate(zip(
labels, regression_targets, proposals
)):
proposals_per_image.add_field("labels", labels_per_image)
proposals_per_image.add_field(
"regression_targets", regression_targets_per_image
)
if self.attribute_on:
# add attributes labels
attributes_per_image = prepare_result['attributes'][i]
proposals_per_image.add_field(
"attributes", attributes_per_image
)
# distributed sampled proposals, that were obtained on all feature maps
# concatenated via the fg_bg_sampler, into individual feature map levels
for img_idx, (pos_inds_img, neg_inds_img) in enumerate(
zip(sampled_pos_inds, sampled_neg_inds)
):
img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img,
as_tuple=False).squeeze(1)
proposals_per_image = proposals[img_idx][img_sampled_inds]
proposals[img_idx] = proposals_per_image
self._proposals = proposals
return proposals
def forward(self, class_logits, box_regression):
"""
Computes the loss for Faster R-CNN.
This requires that the subsample method has been called beforehand.
Arguments:
class_logits (list[Tensor])
box_regression (list[Tensor])
Returns:
classification_loss (Tensor)
box_loss (Tensor)
"""
class_logits = cat(class_logits, dim=0)
box_regression = cat(box_regression, dim=0)
device = class_logits.device
if not hasattr(self, "_proposals"):
raise RuntimeError("subsample needs to be called before")
proposals = self._proposals
labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0)
regression_targets = cat(
[proposal.get_field("regression_targets") for proposal in proposals], dim=0
)
classification_loss = self._classifier_loss(class_logits, labels)
# get indices that correspond to the regression targets for
# the corresponding ground truth labels, to be used with
# advanced indexing
sampled_pos_inds_subset = torch.nonzero(labels > 0, as_tuple=False).squeeze(1)
labels_pos = labels[sampled_pos_inds_subset]
if self.cls_agnostic_bbox_reg:
map_inds = torch.tensor([4, 5, 6, 7], device=device)
else:
map_inds = 4 * labels_pos[:, None] + torch.tensor(
[0, 1, 2, 3], device=device)
box_loss = smooth_l1_loss(
box_regression[sampled_pos_inds_subset[:, None], map_inds],
regression_targets[sampled_pos_inds_subset],
size_average=False,
beta=1,
)
box_loss = box_loss / labels.numel()
return classification_loss, box_loss
def make_roi_box_loss_evaluator(cfg):
matcher = Matcher(
cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,
cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,
allow_low_quality_matches=False,
)
bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS
box_coder = BoxCoder(weights=bbox_reg_weights)
attribute_on = cfg.MODEL.ATTRIBUTE_ON
fg_bg_sampler = BalancedPositiveNegativeSampler(
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION
)
cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG
classification_loss_type = cfg.MODEL.ROI_BOX_HEAD.CLASSIFICATION_LOSS
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
cfg = cfg
loss_evaluator = FastRCNNLossComputation(
matcher,
fg_bg_sampler,
box_coder,
cls_agnostic_bbox_reg,
classification_loss_type,
num_classes,
attribute_on=attribute_on,
boundingbox_loss_type=cfg.MODEL.ROI_BOX_HEAD.BOUNDINGBOX_LOSS_TYPE,
cfg=cfg,
)
return loss_evaluator
|
the-stack_0_15717 | import unittest
import sys
try:
import aula1_resp as aula1
except ImportError:
print('Erro: o arquivo aula1.py não foi encontrado')
sys.exit(1)
MAX_PRIMES = 10000
def primes_sieve(limit):
limitn = limit+1
not_prime = [False] * limitn
primes = []
for i in range(2, limitn):
if not_prime[i]:
continue
for f in range(i*2, limitn, i):
not_prime[f] = True
primes.append(i)
return primes
def fibonacci(n):
a, b = 0, 1
for i in range(n):
a, b = b, a+b
return a
def factorial(n):
for i in range(2, n):
n *= i
return n
class TesteAula1(unittest.TestCase):
@unittest.skipIf('is_prime' not in vars(aula1),
'Função "is_prime" não foi encontrada')
def test_is_prime(self):
primes = primes_sieve(MAX_PRIMES)
for i in range(1, MAX_PRIMES):
if aula1.is_prime(i):
self.assertIn(i, primes)
else:
self.assertNotIn(i, primes)
@unittest.skipIf('fibonacci' not in vars(aula1),
'Função "fibonacci" não foi encontrada')
def test_fibonacci(self):
for i in range(0, 30):
self.assertEqual(fibonacci(i), aula1.fibonacci(i))
@unittest.skipIf('factorial' not in vars(aula1),
'Função "factorial" não foi encontrada')
def test_factorial(self):
for i in range(1, 70):
self.assertEqual(factorial(i), aula1.factorial(i))
if __name__ == '__main__':
unittest.main(verbosity=2) |
the-stack_0_15718 | # encoding: utf-8
from __future__ import unicode_literals, absolute_import
import os
import sys
import locale
from itertools import chain
from six import iterkeys, iteritems
from six.moves.configparser import ConfigParser
from .autocomplete import SIMPLE as default_completion, ALL_MODES
class Struct(object):
"""Simple class for instantiating objects we can add arbitrary attributes
to and use for various arbitrary things."""
def getpreferredencoding():
"""Get the user's preferred encoding."""
return locale.getpreferredencoding() or sys.getdefaultencoding()
def can_encode(c):
try:
c.encode(getpreferredencoding())
return True
except UnicodeEncodeError:
return False
def supports_box_chars():
"""Check if the encoding supports Unicode box characters."""
return all(map(can_encode, "│─└┘┌┐"))
def get_config_home():
"""Returns the base directory for bpython's configuration files."""
xdg_config_home = os.environ.get("XDG_CONFIG_HOME", "~/.config")
return os.path.join(xdg_config_home, "bpython")
def default_config_path():
"""Returns bpython's default configuration file path."""
return os.path.join(get_config_home(), "config")
def fill_config_with_default_values(config, default_values):
for section in iterkeys(default_values):
if not config.has_section(section):
config.add_section(section)
for (opt, val) in iteritems(default_values[section]):
if not config.has_option(section, opt):
config.set(section, opt, "%s" % (val,))
def loadini(struct, configfile):
"""Loads .ini configuration file and stores its values in struct"""
config_path = os.path.expanduser(configfile)
config = ConfigParser()
defaults = {
"general": {
"arg_spec": True,
"auto_display_list": True,
"autocomplete_mode": default_completion,
"color_scheme": "default",
"complete_magic_methods": True,
"dedent_after": 1,
"default_autoreload": False,
"editor": os.environ.get("VISUAL", os.environ.get("EDITOR", "vi")),
"flush_output": True,
"highlight_show_source": True,
"hist_duplicates": True,
"hist_file": "~/.pythonhist",
"hist_length": 1000,
"paste_time": 0.02,
"pastebin_confirm": True,
"pastebin_expiry": "1week",
"pastebin_helper": "",
"pastebin_url": "https://bpaste.net",
"save_append_py": False,
"single_undo_time": 1.0,
"syntax": True,
"tab_length": 4,
"unicode_box": True,
},
"keyboard": {
"backspace": "C-h",
"beginning_of_line": "C-a",
"clear_line": "C-u",
"clear_screen": "C-l",
"clear_word": "C-w",
"copy_clipboard": "F10",
"cut_to_buffer": "C-k",
"delete": "C-d",
"down_one_line": "C-n",
"edit_config": "F3",
"edit_current_block": "C-x",
"end_of_line": "C-e",
"exit": "",
"external_editor": "F7",
"help": "F1",
"incremental_search": "M-s",
"last_output": "F9",
"left": "C-b",
"pastebin": "F8",
"redo": "C-g",
"reimport": "F6",
"reverse_incremental_search": "M-r",
"right": "C-f",
"save": "C-s",
"search": "C-o",
"show_source": "F2",
"suspend": "C-z",
"toggle_file_watch": "F5",
"transpose_chars": "C-t",
"undo": "C-r",
"up_one_line": "C-p",
"yank_from_buffer": "C-y",
},
"cli": {"suggestion_width": 0.8, "trim_prompts": False,},
"curtsies": {"list_above": False, "right_arrow_completion": True,},
}
default_keys_to_commands = dict(
(value, key) for (key, value) in iteritems(defaults["keyboard"])
)
fill_config_with_default_values(config, defaults)
try:
if not config.read(config_path):
# No config file. If the user has it in the old place then complain
if os.path.isfile(os.path.expanduser("~/.bpython.ini")):
sys.stderr.write(
"Error: It seems that you have a config file at "
"~/.bpython.ini. Please move your config file to "
"%s\n" % default_config_path()
)
sys.exit(1)
except UnicodeDecodeError as e:
sys.stderr.write(
"Error: Unable to parse config file at '{}' due to an "
"encoding issue. Please make sure to fix the encoding "
"of the file or remove it and then try again.\n".format(config_path)
)
sys.exit(1)
def get_key_no_doublebind(command):
default_commands_to_keys = defaults["keyboard"]
requested_key = config.get("keyboard", command)
try:
default_command = default_keys_to_commands[requested_key]
if default_commands_to_keys[default_command] == config.get(
"keyboard", default_command
):
setattr(struct, "%s_key" % default_command, "")
except KeyError:
pass
return requested_key
struct.config_path = config_path
struct.dedent_after = config.getint("general", "dedent_after")
struct.tab_length = config.getint("general", "tab_length")
struct.auto_display_list = config.getboolean("general", "auto_display_list")
struct.syntax = config.getboolean("general", "syntax")
struct.arg_spec = config.getboolean("general", "arg_spec")
struct.paste_time = config.getfloat("general", "paste_time")
struct.single_undo_time = config.getfloat("general", "single_undo_time")
struct.highlight_show_source = config.getboolean(
"general", "highlight_show_source"
)
struct.hist_file = config.get("general", "hist_file")
struct.editor = config.get("general", "editor")
struct.hist_length = config.getint("general", "hist_length")
struct.hist_duplicates = config.getboolean("general", "hist_duplicates")
struct.flush_output = config.getboolean("general", "flush_output")
struct.default_autoreload = config.getboolean(
"general", "default_autoreload"
)
struct.pastebin_key = get_key_no_doublebind("pastebin")
struct.copy_clipboard_key = get_key_no_doublebind("copy_clipboard")
struct.save_key = get_key_no_doublebind("save")
struct.search_key = get_key_no_doublebind("search")
struct.show_source_key = get_key_no_doublebind("show_source")
struct.suspend_key = get_key_no_doublebind("suspend")
struct.toggle_file_watch_key = get_key_no_doublebind("toggle_file_watch")
struct.undo_key = get_key_no_doublebind("undo")
struct.redo_key = get_key_no_doublebind("redo")
struct.reimport_key = get_key_no_doublebind("reimport")
struct.reverse_incremental_search_key = get_key_no_doublebind(
"reverse_incremental_search"
)
struct.incremental_search_key = get_key_no_doublebind("incremental_search")
struct.up_one_line_key = get_key_no_doublebind("up_one_line")
struct.down_one_line_key = get_key_no_doublebind("down_one_line")
struct.cut_to_buffer_key = get_key_no_doublebind("cut_to_buffer")
struct.yank_from_buffer_key = get_key_no_doublebind("yank_from_buffer")
struct.clear_word_key = get_key_no_doublebind("clear_word")
struct.backspace_key = get_key_no_doublebind("backspace")
struct.clear_line_key = get_key_no_doublebind("clear_line")
struct.clear_screen_key = get_key_no_doublebind("clear_screen")
struct.delete_key = get_key_no_doublebind("delete")
struct.left_key = get_key_no_doublebind("left")
struct.right_key = get_key_no_doublebind("right")
struct.end_of_line_key = get_key_no_doublebind("end_of_line")
struct.beginning_of_line_key = get_key_no_doublebind("beginning_of_line")
struct.transpose_chars_key = get_key_no_doublebind("transpose_chars")
struct.exit_key = get_key_no_doublebind("exit")
struct.last_output_key = get_key_no_doublebind("last_output")
struct.edit_config_key = get_key_no_doublebind("edit_config")
struct.edit_current_block_key = get_key_no_doublebind("edit_current_block")
struct.external_editor_key = get_key_no_doublebind("external_editor")
struct.help_key = get_key_no_doublebind("help")
struct.pastebin_confirm = config.getboolean("general", "pastebin_confirm")
struct.pastebin_url = config.get("general", "pastebin_url")
struct.pastebin_expiry = config.get("general", "pastebin_expiry")
struct.pastebin_helper = config.get("general", "pastebin_helper")
struct.cli_suggestion_width = config.getfloat("cli", "suggestion_width")
struct.cli_trim_prompts = config.getboolean("cli", "trim_prompts")
struct.complete_magic_methods = config.getboolean(
"general", "complete_magic_methods"
)
struct.autocomplete_mode = config.get("general", "autocomplete_mode")
struct.save_append_py = config.getboolean("general", "save_append_py")
struct.curtsies_list_above = config.getboolean("curtsies", "list_above")
struct.curtsies_right_arrow_completion = config.getboolean(
"curtsies", "right_arrow_completion"
)
color_scheme_name = config.get("general", "color_scheme")
default_colors = {
"keyword": "y",
"name": "c",
"comment": "b",
"string": "m",
"error": "r",
"number": "G",
"operator": "Y",
"punctuation": "y",
"token": "C",
"background": "d",
"output": "w",
"main": "c",
"paren": "R",
"prompt": "c",
"prompt_more": "g",
"right_arrow_suggestion": "K",
}
if color_scheme_name == "default":
struct.color_scheme = default_colors
else:
struct.color_scheme = dict()
theme_filename = color_scheme_name + ".theme"
path = os.path.expanduser(
os.path.join(get_config_home(), theme_filename)
)
try:
load_theme(struct, path, struct.color_scheme, default_colors)
except EnvironmentError:
sys.stderr.write(
"Could not load theme '%s'.\n" % (color_scheme_name,)
)
sys.exit(1)
# expand path of history file
struct.hist_file = os.path.expanduser(struct.hist_file)
# verify completion mode
if struct.autocomplete_mode not in ALL_MODES:
struct.autocomplete_mode = default_completion
# set box drawing characters
if config.getboolean("general", "unicode_box") and supports_box_chars():
struct.left_border = "│"
struct.right_border = "│"
struct.top_border = "─"
struct.bottom_border = "─"
struct.left_bottom_corner = "└"
struct.right_bottom_corner = "┘"
struct.left_top_corner = "┌"
struct.right_top_corner = "┐"
else:
struct.left_border = "|"
struct.right_border = "|"
struct.top_border = "-"
struct.bottom_border = "-"
struct.left_bottom_corner = "+"
struct.right_bottom_corner = "+"
struct.left_top_corner = "+"
struct.right_top_corner = "+"
def load_theme(struct, path, colors, default_colors):
theme = ConfigParser()
with open(path, "r") as f:
theme.readfp(f)
for k, v in chain(theme.items("syntax"), theme.items("interface")):
if theme.has_option("syntax", k):
colors[k] = theme.get("syntax", k)
else:
colors[k] = theme.get("interface", k)
# Check against default theme to see if all values are defined
for k, v in iteritems(default_colors):
if k not in colors:
colors[k] = v
|
the-stack_0_15719 | #!/usr/bin/env python
# This will try to import setuptools. If not here, it will reach for the embedded
# ez_setup (or the ez_setup package). If none, it fails with a message
import sys
from codecs import open
try:
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
except ImportError:
try:
import ez_setup
ez_setup.use_setuptools()
except ImportError:
raise ImportError('MoviePy could not be installed, probably because'
' neither setuptools nor ez_setup are installed on this computer.'
'\nInstall ez_setup ([sudo] pip install ez_setup) and try again.')
class PyTest(TestCommand):
"""Handle test execution from setup."""
user_options = [('pytest-args=', 'a', "Arguments to pass into pytest")]
def initialize_options(self):
"""Initialize the PyTest options."""
TestCommand.initialize_options(self)
self.pytest_args = ""
def finalize_options(self):
"""Finalize the PyTest options."""
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
"""Run the PyTest testing suite."""
try:
import pytest
except ImportError:
raise ImportError('Running tests requires additional dependencies.'
'\nPlease run (pip install moviepy[test])')
errno = pytest.main(self.pytest_args.split(" "))
sys.exit(errno)
cmdclass = {'test': PyTest} # Define custom commands.
if 'build_docs' in sys.argv:
try:
from sphinx.setup_command import BuildDoc
except ImportError:
raise ImportError('Running the documenation builds has additional'
' dependencies. Please run (pip install moviepy[docs])')
cmdclass['build_docs'] = BuildDoc
__version__ = None # Explicitly set version to quieten static code checkers.
exec(open('moviepy/version.py').read()) # loads __version__
# Define the requirements for specific execution needs.
requires = [
'decorator>=4.0.2,<5.0',
"imageio>=2.5,<3.0; python_version>='3.4'",
"imageio>=2.0,<2.5; python_version<'3.4'",
"imageio_ffmpeg>=0.2.0; python_version>='3.4'",
'tqdm>=4.11.2,<5.0',
'numpy',
'requests>=2.8.1,<3.0',
'proglog<=1.0.0'
]
optional_reqs = [
"opencv-python>=3.0,<4.0; python_version!='2.7'",
"scikit-image>=0.13.0,<1.0; python_version>='3.4'",
"scikit-learn; python_version>='3.4'",
"scipy>=0.19.0,<1.0; python_version!='3.3'",
"matplotlib>=2.0.0,<3.0; python_version>='3.4'",
"youtube_dl"
]
doc_reqs = [
"pygame>=1.9.3,<2.0; python_version!='3.3'",
'numpydoc>=0.6.0,<1.0',
'sphinx_rtd_theme>=0.1.10b0,<1.0',
'Sphinx>=1.5.2,<2.0',
]
test_reqs = [
'coverage<5.0',
'coveralls>=1.1,<2.0',
'pytest-cov>=2.5.1,<3.0',
'pytest>=3.0.0,<4.0',
'requests>=2.8.1,<3.0'
]
extra_reqs = {
"optional": optional_reqs,
"doc": doc_reqs,
"test": test_reqs
}
# Load the README.
with open('README.rst', 'r', 'utf-8') as f:
readme = f.read()
setup(
name='moviepy',
version=__version__,
author='Zulko 2017',
description='Video editing with Python',
long_description=readme,
url='https://zulko.github.io/moviepy/',
license='MIT License',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Multimedia',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Analysis',
'Topic :: Multimedia :: Video',
'Topic :: Multimedia :: Video :: Capture',
'Topic :: Multimedia :: Video :: Conversion',
],
keywords='video editing audio compositing ffmpeg',
packages=find_packages(exclude=['docs', 'tests']),
cmdclass=cmdclass,
command_options={
'build_docs': {
'build_dir': ('setup.py', './docs/build'),
'config_dir': ('setup.py', './docs'),
'version': ('setup.py', __version__.rsplit('.', 2)[0]),
'release': ('setup.py', __version__)}},
tests_require=test_reqs,
install_requires=requires,
extras_require=extra_reqs,
)
|
the-stack_0_15720 | import os
import random
from collections import namedtuple
import numpy as np
import torch
import torch.utils.data as data
from PIL import Image
import h5py
from lilanet.datasets.transforms import Compose, RandomHorizontalFlip, Normalize
class DENSE(data.Dataset):
"""`DENSE LiDAR`_ Dataset.
Args:
root (string): Root directory of the ``lidar_2d`` and ``ImageSet`` folder.
split (string, optional): Select the split to use, ``train``, ``val`` or ``all``
transform (callable, optional): A function/transform that takes in distance, reflectivity
and target tensors and returns a transformed version.
"""
#TODO: Bu class kismina bir bak
Class = namedtuple('Class', ['name', 'id', 'color'])
classes = [
Class('unknown', 0, (0, 0, 0)),
Class('car', 1, (0, 0, 142)),
Class('pedestrian', 2, (220, 20, 60)),
Class('cyclist', 3, (119, 11, 32)),
]
def __init__(self, root, split='train', transform=None):
self.root = os.path.expanduser(root)
self.lidar_path = os.path.join(self.root, 'lidar_2d')
self.split = os.path.join(self.root, '{}_01'.format(split))
self.transform = transform
self.lidar = []
if split not in ['train', 'val', 'all']:
raise ValueError('Invalid split! Use split="train", split="val" or split="all"')
self.lidar = [os.path.join(r,file) for r,d,f in os.walk(self.split) for file in f]
def __getitem__(self, index):
with h5py.File(self.lidar[index], "r", driver='core') as hdf5:
# for channel in self.channels:
distance_1 = hdf5.get('distance_m_1')[()]
reflectivity_1 = hdf5.get('intensity_1')[()]
label_1 = hdf5.get('labels_1')[()]
#Label transformation is necessary to have contiguous labeling
label_dict= {0:0, 100:1, 101:2, 102:3}
label_1 = np.vectorize(label_dict.get)(label_1)
distance = torch.as_tensor(distance_1.astype(np.float32, copy=False)).contiguous()
reflectivity = torch.as_tensor(reflectivity_1.astype(np.float32, copy=False)).contiguous()
label = torch.as_tensor(label_1.astype(np.float32, copy=False)).contiguous()
# distance = torch.as_tensor(distance_1.astype(np.float32, copy=False))
# reflectivity = torch.as_tensor(reflectivity_1.astype(np.float32, copy=False))
# label = torch.as_tensor(label_1.astype(np.float32, copy=False))
# print("label: '{}'".format(label))
if self.transform:
distance, reflectivity, label = self.transform(distance, reflectivity, label)
return distance, reflectivity, label
def __len__(self):
return len(self.lidar)
@staticmethod
def num_classes():
return len(DENSE.classes)
@staticmethod
def mean():
return [0.21, 12.12]
@staticmethod
def std():
return [0.16, 12.32]
@staticmethod
def class_weights():
return torch.tensor([1 / 15.0, 1.0, 10.0, 10.0])
@staticmethod
def get_colormap():
cmap = torch.zeros([256, 3], dtype=torch.uint8)
for cls in DENSE.classes:
cmap[cls.id, :] = torch.tensor(cls.color, dtype=torch.uint8)
return cmap
if __name__ == '__main__':
import matplotlib.pyplot as plt
joint_transforms = Compose([
RandomHorizontalFlip(),
Normalize(mean=DENSE.mean(), std=DENSE.std())
])
def _normalize(x):
return (x - x.min()) / (x.max() - x.min())
def visualize_seg(label_map, one_hot=False):
if one_hot:
label_map = np.argmax(label_map, axis=-1)
out = np.zeros((label_map.shape[0], label_map.shape[1], 3))
for l in range(1, DENSE.num_classes()):
mask = label_map == l
out[mask, 0] = np.array(DENSE.classes[l].color[1])
out[mask, 1] = np.array(DENSE.classes[l].color[0])
out[mask, 2] = np.array(DENSE.classes[l].color[2])
return out
dataset = DENSE('../../data/DENSE', transform=joint_transforms)
distance, reflectivity, label = random.choice(dataset)
print('Distance size: ', distance.size())
print('Reflectivity size: ', reflectivity.size())
print('Label size: ', label.size())
distance_map = Image.fromarray((255 * _normalize(distance.numpy())).astype(np.uint8))
reflectivity_map = Image.fromarray((255 * _normalize(reflectivity.numpy())).astype(np.uint8))
label_map = Image.fromarray((255 * visualize_seg(label.numpy())).astype(np.uint8))
blend_map = Image.blend(distance_map.convert('RGBA'), label_map.convert('RGBA'), alpha=0.4)
plt.figure(figsize=(10, 5))
plt.subplot(221)
plt.title("Distance")
plt.imshow(distance_map)
plt.subplot(222)
plt.title("Reflectivity")
plt.imshow(reflectivity_map)
plt.subplot(223)
plt.title("Label")
plt.imshow(label_map)
plt.subplot(224)
plt.title("Result")
plt.imshow(blend_map)
plt.show()
|
the-stack_0_15726 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands import CliCommandType
# pylint: disable=line-too-long, too-many-locals, too-many-statements
def load_command_table(self, _):
from ._client_factory import (
cf_alert_rules, cf_metrics, cf_metric_def, cf_alert_rule_incidents, cf_log_profiles, cf_autoscale,
cf_diagnostics, cf_activity_log, cf_action_groups, cf_activity_log_alerts, cf_event_categories)
from ._exception_handler import monitor_exception_handler, missing_resource_handler
from .transformers import (action_group_list_table)
action_group_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.action_groups_operations#ActionGroupsOperations.{}',
client_factory=cf_action_groups)
action_group_custom = CliCommandType(
operations_tmpl='azure.cli.command_modules.monitor.operations.action_groups#{}',
client_factory=cf_action_groups)
activity_log_custom = CliCommandType(
operations_tmpl='azure.cli.command_modules.monitor.operations.activity_log#{}',
client_factory=cf_activity_log)
activity_log_alerts_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.activity_log_alerts_operations#ActivityLogAlertsOperations.{}',
client_factory=cf_activity_log_alerts)
activity_log_alerts_custom = CliCommandType(
operations_tmpl='azure.cli.command_modules.monitor.operations.activity_log_alerts#{}',
client_factory=cf_activity_log_alerts)
alert_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.alert_rules_operations#AlertRulesOperations.{}',
client_factory=cf_alert_rules)
alert_rule_incidents_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.alert_rule_incidents_operations#AlertRuleIncidentsOperations.{}',
client_factory=cf_alert_rule_incidents)
autoscale_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.autoscale_settings_operations#AutoscaleSettingsOperations.{}',
client_factory=cf_autoscale)
autoscale_custom = CliCommandType(
operations_tmpl='azure.cli.command_modules.monitor.operations.autoscale_settings#{}',
client_factory=cf_autoscale)
diagnostics_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.diagnostic_settings_operations#DiagnosticSettingsOperations.{}',
client_factory=cf_diagnostics)
diagnostics_categories_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.diagnostic_settings_category_operations#DiagnosticSettingsCategoryOperations.{}',
client_factory=cf_diagnostics)
diagnostics_custom = CliCommandType(
operations_tmpl='azure.cli.command_modules.monitor.operations.diagnostics_settings#{}',
client_factory=cf_diagnostics)
log_profiles_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.log_profiles_operations#LogProfilesOperations.{}',
client_factory=cf_log_profiles)
metric_operations_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.metrics_operations#MetricsOperations.{}',
client_factory=cf_metrics)
alert_custom = CliCommandType(
operations_tmpl='azure.cli.command_modules.monitor.operations.metric_alert#{}',
client_factory=cf_alert_rules)
metric_definitions_sdk = CliCommandType(
operations_tmpl='azure.mgmt.monitor.operations.metric_definitions_operations#MetricDefinitionsOperations.{}',
client_factory=cf_metric_def)
with self.command_group('monitor action-group', action_group_sdk, custom_command_type=action_group_custom) as g:
g.command('show', 'get', table_transformer=action_group_list_table)
g.command('create', 'create_or_update', table_transformer=action_group_list_table)
g.command('delete', 'delete')
g.command('enable-receiver', 'enable_receiver', table_transformer=action_group_list_table, exception_handler=monitor_exception_handler)
g.custom_command('list', 'list_action_groups', table_transformer=action_group_list_table)
g.generic_update_command('update', custom_func_name='update_action_groups', setter_arg_name='action_group',
table_transformer=action_group_list_table, exception_handler=monitor_exception_handler)
with self.command_group('monitor activity-log', activity_log_custom) as g:
g.command('list', 'list_activity_log')
g.command('list-categories', 'list', operations_tmpl='azure.mgmt.monitor.operations.event_categories_operations#EventCategoriesOperations.{}', client_factory=cf_event_categories)
with self.command_group('monitor activity-log alert', activity_log_alerts_sdk, custom_command_type=activity_log_alerts_custom) as g:
g.custom_command('list', 'list_activity_logs_alert')
g.custom_command('create', 'create', exception_handler=monitor_exception_handler)
g.command('show', 'get', exception_handler=missing_resource_handler)
g.command('delete', 'delete', exception_handler=missing_resource_handler)
g.generic_update_command('update', custom_func_name='update', setter_arg_name='activity_log_alert', exception_handler=monitor_exception_handler)
g.custom_command('action-group add', 'add_action_group', exception_handler=monitor_exception_handler)
g.custom_command('action-group remove', 'remove_action_group', exception_handler=monitor_exception_handler)
g.custom_command('scope add', 'add_scope', exception_handler=monitor_exception_handler)
g.custom_command('scope remove', 'remove_scope', exception_handler=monitor_exception_handler)
with self.command_group('monitor alert', alert_sdk, custom_command_type=alert_custom) as g:
g.custom_command('create', 'create_metric_rule')
g.command('delete', 'delete')
g.command('show', 'get')
g.command('list', 'list_by_resource_group')
g.command('show-incident', 'get', command_type=alert_rule_incidents_sdk)
g.command('list-incidents', 'list_by_alert_rule', command_type=alert_rule_incidents_sdk)
g.generic_update_command('update', custom_func_name='update_metric_rule', exception_handler=monitor_exception_handler)
with self.command_group('monitor autoscale-settings', autoscale_sdk, custom_command_type=autoscale_custom) as g:
g.command('create', 'create_or_update')
g.command('delete', 'delete')
g.command('show', 'get')
g.command('list', 'list_by_resource_group')
g.custom_command('get-parameters-template', 'scaffold_autoscale_settings_parameters')
g.generic_update_command('update', exception_handler=monitor_exception_handler)
with self.command_group('monitor diagnostic-settings', diagnostics_sdk, custom_command_type=diagnostics_custom) as g:
from .validators import validate_diagnostic_settings
g.custom_command('create', 'create_diagnostics_settings', validator=validate_diagnostic_settings)
g.command('show', 'get')
g.command('list', 'list')
g.command('delete', 'delete')
g.generic_update_command('update', exception_handler=monitor_exception_handler)
with self.command_group('monitor diagnostic-settings categories', diagnostics_categories_sdk) as g:
g.command('show', 'get')
g.command('list', 'list')
with self.command_group('monitor log-profiles', log_profiles_sdk) as g:
g.command('create', 'create_or_update')
g.command('delete', 'delete')
g.command('show', 'get')
g.command('list', 'list')
g.generic_update_command('update', exception_handler=monitor_exception_handler)
with self.command_group('monitor metrics') as g:
from .transformers import metrics_table, metrics_definitions_table
g.command('list', 'list', command_type=metric_operations_sdk, table_transformer=metrics_table)
g.command('list-definitions', 'list', command_type=metric_definitions_sdk, table_transformer=metrics_definitions_table)
|
the-stack_0_15727 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nlp.nhnet.decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from official.nlp.modeling import layers
from official.nlp.nhnet import configs
from official.nlp.nhnet import decoder
from official.nlp.nhnet import utils
def _create_cache(batch_size, init_decode_length, num_heads, head_size):
return {
"key":
tf.zeros([batch_size, init_decode_length, num_heads, head_size],
dtype=tf.float32),
"value":
tf.zeros([batch_size, init_decode_length, num_heads, head_size],
dtype=tf.float32)
}
class DecoderTest(tf.test.TestCase):
def setUp(self):
super(DecoderTest, self).setUp()
self._config = utils.get_test_params()
def test_transformer_decoder(self):
decoder_block = decoder.TransformerDecoder(
num_hidden_layers=self._config.num_hidden_layers,
hidden_size=self._config.hidden_size,
num_attention_heads=self._config.num_attention_heads,
intermediate_size=self._config.intermediate_size,
intermediate_activation=self._config.hidden_act,
hidden_dropout_prob=self._config.hidden_dropout_prob,
attention_probs_dropout_prob=self._config.attention_probs_dropout_prob,
initializer_range=self._config.initializer_range)
decoder_block.build(None)
self.assertEqual(len(decoder_block.layers), self._config.num_hidden_layers)
def test_decoder_block_with_cache(self):
decoder_block = decoder.TransformerDecoderBlock(
hidden_size=self._config.hidden_size,
num_attention_heads=self._config.num_attention_heads,
intermediate_size=self._config.intermediate_size,
intermediate_activation=self._config.hidden_act,
hidden_dropout_prob=self._config.hidden_dropout_prob,
attention_probs_dropout_prob=self._config.attention_probs_dropout_prob,
initializer_range=self._config.initializer_range)
# Forward path.
dummy_tensor = tf.zeros([2, 4, self._config.hidden_size], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32)
inputs = [dummy_tensor, dummy_tensor, dummy_mask, dummy_mask]
cache = _create_cache(
2, 0, self._config.num_attention_heads,
self._config.hidden_size // self._config.num_attention_heads)
output, cache = decoder_block(inputs, cache)
self.assertEqual(output.shape, (2, 4, self._config.hidden_size))
self.assertEqual(cache["value"].shape, (2, 4, 2, 8))
def test_bert_decoder(self):
seq_length = 10
encoder_input_ids = tf.keras.layers.Input(
shape=(seq_length,), name="encoder_input_ids", dtype=tf.int32)
target_ids = tf.keras.layers.Input(
shape=(seq_length,), name="target_ids", dtype=tf.int32)
encoder_outputs = tf.keras.layers.Input(
shape=(seq_length, self._config.hidden_size),
name="all_encoder_outputs",
dtype=tf.float32)
embedding_lookup = layers.OnDeviceEmbedding(
vocab_size=self._config.vocab_size,
embedding_width=self._config.hidden_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=self._config.initializer_range),
name="word_embeddings")
cross_attention_bias = decoder.AttentionBias(bias_type="single_cross")(
encoder_input_ids)
self_attention_bias = decoder.AttentionBias(bias_type="decoder_self")(
target_ids)
inputs = dict(
attention_bias=cross_attention_bias,
self_attention_bias=self_attention_bias,
target_ids=target_ids,
all_encoder_outputs=encoder_outputs)
decoder_layer = decoder.Decoder(self._config, embedding_lookup)
outputs = decoder_layer(inputs)
model_inputs = dict(
encoder_input_ids=encoder_input_ids,
target_ids=target_ids,
all_encoder_outputs=encoder_outputs)
model = tf.keras.Model(inputs=model_inputs, outputs=outputs, name="test")
self.assertLen(decoder_layer.trainable_weights, 30)
# Forward path.
fake_inputs = {
"encoder_input_ids": np.zeros((2, 10), dtype=np.int32),
"target_ids": np.zeros((2, 10), dtype=np.int32),
"all_encoder_outputs": np.zeros((2, 10, 16), dtype=np.float32),
}
output_tensor = model(fake_inputs)
self.assertEqual(output_tensor.shape, (2, 10, 16))
def test_multi_doc_decoder(self):
self._config = utils.get_test_params(cls=configs.NHNetConfig)
seq_length = 10
num_docs = 5
encoder_input_ids = tf.keras.layers.Input(
shape=(num_docs, seq_length), name="encoder_input_ids", dtype=tf.int32)
target_ids = tf.keras.layers.Input(
shape=(seq_length,), name="target_ids", dtype=tf.int32)
encoder_outputs = tf.keras.layers.Input(
shape=(num_docs, seq_length, self._config.hidden_size),
name="all_encoder_outputs",
dtype=tf.float32)
embedding_lookup = layers.OnDeviceEmbedding(
vocab_size=self._config.vocab_size,
embedding_width=self._config.hidden_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=self._config.initializer_range),
name="word_embeddings")
doc_attention_probs = tf.keras.layers.Input(
shape=(self._config.num_decoder_attn_heads, seq_length, num_docs),
name="doc_attention_probs",
dtype=tf.float32)
cross_attention_bias = decoder.AttentionBias(bias_type="multi_cross")(
encoder_input_ids)
self_attention_bias = decoder.AttentionBias(bias_type="decoder_self")(
target_ids)
inputs = dict(
attention_bias=cross_attention_bias,
self_attention_bias=self_attention_bias,
target_ids=target_ids,
all_encoder_outputs=encoder_outputs,
doc_attention_probs=doc_attention_probs)
decoder_layer = decoder.Decoder(self._config, embedding_lookup)
outputs = decoder_layer(inputs)
model_inputs = dict(
encoder_input_ids=encoder_input_ids,
target_ids=target_ids,
all_encoder_outputs=encoder_outputs,
doc_attention_probs=doc_attention_probs)
model = tf.keras.Model(inputs=model_inputs, outputs=outputs, name="test")
self.assertLen(decoder_layer.trainable_weights, 30)
# Forward path.
fake_inputs = {
"encoder_input_ids":
np.zeros((2, num_docs, seq_length), dtype=np.int32),
"target_ids":
np.zeros((2, seq_length), dtype=np.int32),
"all_encoder_outputs":
np.zeros((2, num_docs, seq_length, 16), dtype=np.float32),
"doc_attention_probs":
np.zeros(
(2, self._config.num_decoder_attn_heads, seq_length, num_docs),
dtype=np.float32)
}
output_tensor = model(fake_inputs)
self.assertEqual(output_tensor.shape, (2, seq_length, 16))
if __name__ == "__main__":
tf.test.main()
|
the-stack_0_15728 | import numpy as np
import cv2
from os.path import *
import math
# trs, let's assume width is always wider than height
def video_to_npy(infile, outfile=None, width=None, height=None, squarecrop=None, fps=None, mode='rgb', maxlength=None, use_cache=False):
global vcache
if use_cache and outfile is not None and 'vcache' in globals():
if outfile in vcache: return vcache[outfile]
else:
vcache = dict()
# has this video already been saved before?
if outfile and isfile(outfile):
frames = np.load(outfile)
if use_cache: vcache[outfile] = frames
# just return this preloaded video
return frames
print('reading fresh video from %s' % infile)
vidcap = cv2.VideoCapture(infile)
success, image = vidcap.read()
frames = []
count = 0
if not success:
raise ValueError('Could not read the video file!')
while success:
frames.append( image[...,::-1] if mode == 'rgb' else image )
count += 1
success,image = vidcap.read()
if fps:
span = int(vidcap.get(cv2.CAP_PROP_FPS) / fps)
frames = frames[0::span]
if width or height:
width = width if width else int(height / frames[0].shape[0] * frames[0].shape[1])
height = height if height else int(width / frames[0].shape[1] * frames[0].shape[0])
frames = [ cv2.resize(frame, (width, height)) for frame in frames ]
if squarecrop:
tl = int((width/2)-(height/2))
# note that x,y is the wrong way around i.e. it's
# F x Y x X x C
frames = [ frame[ 0:height, tl:(tl+height)] for frame in frames ]
# trs-renamed this from "cropat" as it's a more intuative name
if maxlength:
frames = frames[0:maxlength*fps]
frames = np.array(frames)
if outfile:
np.save(outfile, frames)
return frames
def resize_video(video, video_size=(100,100)):
"""
Resize video content
"""
width, height = video_size
width = width if width else int(height / video[0].shape[0] * video[0].shape[1])
height = height if height else int(width / video[0].shape[1] * video[0].shape[0])
video = np.array([ cv2.resize(frame, (width, height)) for frame in video ])
return video
def dense_optical_flow(frame1, frame2):
f1 = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
f2 = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)
return cv2.calcOpticalFlowFarneback(f1, f2, None, 0.5, 3, 15, 3, 5, 1.2, 0)
def flow_to_hsv(frame1, flow):
hsvImg = np.zeros_like(frame1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsvImg[..., 0] = 0.5 * ang * 180 / np.pi
hsvImg[..., 1] = 255
hsvImg[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
return cv2.cvtColor(hsvImg, cv2.COLOR_HSV2BGR)
def naive_stabilization(f):
vec = np.average(f,axis=(0,1))
mask = f==0
f = f-vec
f[mask]=0
return f
def flow_to_polar(f):
return cv2.cartToPolar(f[..., 0], f[..., 1])
|
the-stack_0_15729 | #!/usr/bin/env python
# Fit proper motion and parallax using ra/dec/mjd data
# Most of this code was taken from here:
# https://github.com/ctheissen/WISE_Parallaxes/blob/master/WISE_Parallax.py
import numpy as np
from astropy.table import Table, vstack, join
import matplotlib.pyplot as plt
from astropy import units as u
from scipy.optimize import curve_fit, minimize
from astropy.time import Time
import astropy.coordinates as coords
from dlnpyutils import utils as dln, coords as dcoords
# Set some constants
d2a = 3600.
d2ma = 3600000.
d2y = 1/365.25
def astrometryfunc(x, Delta1, Delta2, PMra, PMdec, pi):
""" Compute proper motion and parallax model for a set of ra/dec/mjd values."""
# x: input list of central RA and DEC positions and array of MJDs
# Delta1: initial dRA position
# Delta2: initial dDEC position
# PMra: proper motion in RA (arcsec/yr)
# PMdec: proper motion in DEC (arcsec/yr)
# pi: parallax (arcsec)
ra0, dec0, mjds = x
n = len(mjds)
years = (mjds - mjds[0])*d2y
ras = np.zeros(n,np.float64)+ra0
decs = np.zeros(n,np.float64)+dec0
bary = coords.get_body_barycentric('earth', Time(mjds, format='mjd'))
# Parallax factors
Fac1 = (bary.x * np.sin(ras*np.pi/180.) - bary.y * np.cos(ras*np.pi/180.) )
Fac2 = bary.x * np.cos(ras*np.pi/180.) * np.sin(decs*np.pi/180.) + \
bary.y * np.sin(ras*np.pi/180.) * np.sin(decs*np.pi/180.) - \
bary.z * np.cos(decs*np.pi/180.)
RAsend = Delta1 + PMra * years + pi * Fac1.value
DECsend = Delta2 + PMdec * years + pi * Fac2.value
return np.concatenate( [RAsend, DECsend]).flatten()
def fit(cat):
""" Fit proper motion and parallax to ra/dec/mjd data in a table."""
mjd = cat['mjd']
ra = cat['ra']
raerr = cat['raerr']
dec = cat['dec']
decerr = cat['decerr']
# Compute relative positions
cenra = np.mean(ra)
cendec = np.mean(dec)
lon,lat = dcoords.rotsphcen(ra,dec,cenra,cendec,gnomic=True)
lon *= d2a
lat *= d2a
# Fit proper motion and parallax
pars, cov = curve_fit(astrometryfunc, [ra, dec, mjd] ,
np.concatenate( [lon,lat] ).flatten(),
sigma=np.concatenate( [ raerr, decerr ] ).flatten() )
return pars,cov
def plotfit(cat,pars,cov,savefig=None):
""" Plot a figure of the data and the proper motion/parallax fit."""
plt.rcParams.update({'font.size': 12})
# Compute relative positions
cenra = np.mean(cat['ra'])
cendec = np.mean(cat['dec'])
lon,lat = dcoords.rotsphcen(cat['ra'],cat['dec'],cenra,cendec,gnomic=True)
lon *= d2a
lat *= d2a
# Array of MJDs for model curve
mjd = np.linspace(np.min(cat['mjd']),np.max(cat['mjd']),100)
out = astrometryfunc([cenra,cendec,mjd],pars[0],pars[1],pars[2],pars[3],pars[4])
ll = out[0:100]
bb = out[100:]
# Plot the model and data
plt.plot(ll,bb)
plt.errorbar(lon,lat,xerr=cat['raerr'],yerr=cat['decerr'],fmt='o',color='black',
markersize=5,ecolor='lightgray',elinewidth=2,linestyle='none',capsize=0)
plt.xlabel('dRA (arcsec)')
plt.ylabel('dDEC (arcsec)')
xr = dln.minmax(np.concatenate((lon,ll)))
xr = [xr[0]-0.05*dln.valrange(xr),xr[1]+0.05*dln.valrange(xr)]
yr = dln.minmax(np.concatenate((lat,bb)))
yr = [yr[0]-0.05*dln.valrange(yr),yr[1]+0.05*dln.valrange(yr)]
plt.xlim(xr)
plt.ylim(yr)
perr = np.sqrt(np.diag(cov))
plt.annotate(r'$\mu_\alpha$ = %5.3f $\pm$ %5.3f mas/yr' % (pars[2]*1e3,perr[2]*1e3) + '\n' +
r'$\mu_\delta$ = %5.3f $\pm$ %5.3f mas/yr' % (pars[3]*1e3,perr[3]*1e3) + '\n' +
r'$\pi$ = %5.3f $\pm$ %5.3f mas' % (pars[4]*1e3,perr[4]*1e3),
xy=(xr[0]+0.05*dln.valrange(xr),yr[1]-0.20*dln.valrange(yr)),ha='left')
if savefig is not None:
plt.savefig(savefig)
|
the-stack_0_15730 | # Time: O(n)
# Space: O(1)
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def __str__(self):
if self:
return "{}".format(self.val)
else:
return None
class Solution(object):
# @param head, a ListNode
# @return a list node
def detectCycle(self, head):
fast, slow = head, head
while fast and fast.next:
fast, slow = fast.next.next, slow.next
if fast is slow:
fast = head
while fast is not slow:
fast, slow = fast.next, slow.next
return fast
return None
|
the-stack_0_15731 | from sqlbag import S
from schemainspect import get_inspector
CREATE = """
DROP SCHEMA IF EXISTS it CASCADE;
CREATE SCHEMA it;
CREATE FUNCTION it.key_func(jsonb) RETURNS int AS $$
SELECT jsonb_array_length($1);
$$ LANGUAGE SQL IMMUTABLE;
CREATE FUNCTION it.part_func(jsonb) RETURNS boolean AS $$
SELECT jsonb_typeof($1) = 'array';
$$ LANGUAGE SQL IMMUTABLE;
CREATE TABLE it.foo(a bigserial, b jsonb);
CREATE UNIQUE INDEX fun_partial_index ON it.foo (it.key_func(b))
WHERE it.part_func(b);
CREATE INDEX brin_index ON it.foo USING BRIN (a);
"""
def test_indexes(db):
with S(db) as s:
s.execute(CREATE)
i1 = get_inspector(s, schema="it")
# Recreate schema.
# Functions oids will be changed
s.execute(CREATE)
i2 = get_inspector(s, schema="it")
assert i1.indexes == i2.indexes
CREATE_CONST = """
create table t(id uuid primary key, x bigint);
"""
def test_constraints(db):
with S(db) as s:
s.execute(CREATE_CONST)
i = get_inspector(s)
constraints_keys = list(i.constraints.keys())
assert constraints_keys == ['"public"."t"."t_pkey"']
indexes_keys = list(i.indexes.keys())
assert indexes_keys == ['"public"."t_pkey"']
|
the-stack_0_15733 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mining RPCs
- getmininginfo
- getblocktemplate proposal mode
- submitblock"""
import copy
from binascii import b2a_hex
from decimal import Decimal
from test_framework.blocktools import create_coinbase
from test_framework.mininode import CBlock
from test_framework.test_framework import PALLY1TestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
def b2x(b):
return b2a_hex(b).decode('ascii')
def assert_template(node, block, expect, rehash=True):
if rehash:
block.hashMerkleRoot = block.calc_merkle_root()
rsp = node.getblocktemplate({'data': b2x(block.serialize()), 'mode': 'proposal'})
assert_equal(rsp, expect)
class MiningTest(PALLY1TestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = False
def run_test(self):
node = self.nodes[0]
self.log.info('getmininginfo')
mining_info = node.getmininginfo()
assert_equal(mining_info['blocks'], 200)
assert_equal(mining_info['chain'], 'regtest')
assert_equal(mining_info['currentblocktx'], 0)
assert_equal(mining_info['currentblockweight'], 0)
assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10'))
assert_equal(mining_info['networkhashps'], Decimal('0.003333333333333334'))
assert_equal(mining_info['pooledtx'], 0)
# Mine a block to leave initial block download
node.generate(1)
tmpl = node.getblocktemplate()
self.log.info("getblocktemplate: Test capability advertised")
assert 'proposal' in tmpl['capabilities']
assert 'coinbasetxn' not in tmpl
coinbase_tx = create_coinbase(height=int(tmpl["height"]) + 1)
# sequence numbers must not be max for nLockTime to have effect
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
coinbase_tx.rehash()
block = CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
self.log.info("getblocktemplate: Test valid block")
assert_template(node, block, None)
self.log.info("submitblock: Test block decode failure")
assert_raises_rpc_error(-22, "Block decode failed", node.submitblock, b2x(block.serialize()[:-15]))
self.log.info("getblocktemplate: Test bad input hash for coinbase transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].vin[0].prevout.hash += 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-cb-missing')
self.log.info("submitblock: Test invalid coinbase transaction")
assert_raises_rpc_error(-22, "Block does not start with a coinbase", node.submitblock, b2x(bad_block.serialize()))
self.log.info("getblocktemplate: Test truncated final transaction")
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(block.serialize()[:-1]), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test duplicate transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx.append(bad_block.vtx[0])
assert_template(node, bad_block, 'bad-txns-duplicate')
self.log.info("getblocktemplate: Test invalid transaction")
bad_block = copy.deepcopy(block)
bad_tx = copy.deepcopy(bad_block.vtx[0])
bad_tx.vin[0].prevout.hash = 255
bad_tx.rehash()
bad_block.vtx.append(bad_tx)
assert_template(node, bad_block, 'bad-txns-inputs-missingorspent')
self.log.info("getblocktemplate: Test nonfinal transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].nLockTime = 2 ** 32 - 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-txns-nonfinal')
self.log.info("getblocktemplate: Test bad tx count")
# The tx count is immediately after the block header
TX_COUNT_OFFSET = 80
bad_block_sn = bytearray(block.serialize())
assert_equal(bad_block_sn[TX_COUNT_OFFSET], 1)
bad_block_sn[TX_COUNT_OFFSET] += 1
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(bad_block_sn), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test bad bits")
bad_block = copy.deepcopy(block)
bad_block.nBits = 469762303 # impossible in the real world
assert_template(node, bad_block, 'bad-diffbits')
self.log.info("getblocktemplate: Test bad merkle root")
bad_block = copy.deepcopy(block)
bad_block.hashMerkleRoot += 1
assert_template(node, bad_block, 'bad-txnmrklroot', False)
self.log.info("getblocktemplate: Test bad timestamps")
bad_block = copy.deepcopy(block)
bad_block.nTime = 2 ** 31 - 1
assert_template(node, bad_block, 'time-too-new')
bad_block.nTime = 0
assert_template(node, bad_block, 'time-too-old')
self.log.info("getblocktemplate: Test not best block")
bad_block = copy.deepcopy(block)
bad_block.hashPrevBlock = 123
assert_template(node, bad_block, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
MiningTest().main()
|
the-stack_0_15736 | from netatmobeat import BaseTest
import os
class Test(BaseTest):
def test_base(self):
"""
Basic test with exiting Netatmobeat normally
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
netatmobeat_proc = self.start_beat()
self.wait_until(lambda: self.log_contains("netatmobeat is running"))
exit_code = netatmobeat_proc.kill_and_wait()
assert exit_code == 0
|
the-stack_0_15737 | #!/usr/bin/env python3
from dataclasses import dataclass, field
from typing import List, Type
from ml.rl.models.actor import GaussianFullyConnectedActor
from ml.rl.models.base import ModelBase
from ml.rl.net_builder.continuous_actor_net_builder import ContinuousActorNetBuilder
from ml.rl.parameters import NormalizationData, param_hash
from ml.rl.preprocessing.identify_types import CONTINUOUS_ACTION
from ml.rl.preprocessing.normalization import get_num_output_features
@dataclass(frozen=True)
class GaussianFullyConnectedConfig:
__hash__ = param_hash
sizes: List[int] = field(default_factory=lambda: [128, 64])
activations: List[str] = field(default_factory=lambda: ["relu", "relu"])
use_batch_norm: bool = False
use_layer_norm: bool = False
class GaussianFullyConnected(ContinuousActorNetBuilder):
def __init__(self, config: GaussianFullyConnectedConfig):
super().__init__()
assert len(config.sizes) == len(config.activations), (
f"Must have the same numbers of sizes and activations; got: "
f"{config.sizes}, {config.activations}"
)
self.config = config
@classmethod
def config_type(cls) -> Type:
return GaussianFullyConnectedConfig
@property
def default_action_preprocessing(self) -> str:
return CONTINUOUS_ACTION
def build_actor(
self,
state_normalization_data: NormalizationData,
action_normalization_data: NormalizationData,
) -> ModelBase:
state_dim = get_num_output_features(
state_normalization_data.dense_normalization_parameters
)
action_dim = get_num_output_features(
action_normalization_data.dense_normalization_parameters
)
return GaussianFullyConnectedActor(
state_dim=state_dim,
action_dim=action_dim,
sizes=self.config.sizes,
activations=self.config.activations,
use_batch_norm=self.config.use_batch_norm,
use_layer_norm=self.config.use_layer_norm,
)
|
the-stack_0_15740 | """
power_meter_hardware.py
"__"
"""
__author__ = "Prakash Manandhar, and Sophie Yang"
__copyright__ = "Copyright 2021, Hydration Team"
__credits__ = ["Prakash Manandhar, and Sophie Yang"]
__license__ = "Internal"
__version__ = "1.0.0"
__maintainer__ = "Sophie Yang"
__email__ = "[email protected]"
__status__ = "Production"
from time import sleep # this lets us have a time delay
import time
from abc import ABC, abstractmethod # https://docs.python.org/3/library/abc.html
import numpy
import threading
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
from pymodbus.client.sync import ModbusSerialClient
from pymodbus.payload import BinaryPayloadDecoder
class AbstractPowerMeter(ABC):
@abstractmethod
# returns a timestamped power reading
def get_active_power_W(self):
pass
@abstractmethod
def get_current_mA(self):
pass
class MockPowerMeterSensor(AbstractPowerMeter):
def get_active_power_W(self):
return [time.time(), -2000.0]
def get_current_mA(self):
return [time.time(), -999.0]
class PowerMeterThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.stopped = True
self.sensor_readings = {
"time_s": 0.0,
"active_power_W": 0.0,
"current_mA": 0.0,
}
self.client = ModbusSerialClient(port=config.get('PowerMeter', 'port'), method='rtu', baudrate=config.getint('PowerMeter', 'baudrate'))
def run(self):
self.stopped = False
address = config.getint("PowerMeter", "address")
count = config.getint("PowerMeter", "count")
sampling_time = config.getfloat("PowerMeter", "SamplingTime")
while not self.stopped:
loop_start = time.time()
result = self.client.read_holding_registers(address, count, unit=1)
decoder = BinaryPayloadDecoder.fromRegisters(result.registers,
wordorder = '>', byteorder = '>')
current_mA = decoder.decode_32bit_float()
power_W = decoder.decode_32bit_float()
self.sensor_readings["time_s"] = loop_start
self.sensor_readings["active_power_W"] = power_W
self.sensor_readings["current_mA"] = current_mA
loop_end = time.time()
delta_time = loop_end - loop_start
if (delta_time < sampling_time):
time.sleep(sampling_time - delta_time)
def stop(self):
self.stopped = True
class FileWriterThread(threading.Thread):
def __init__(self, power_meter_thread):
threading.Thread.__init__(self)
self.power_meter_thread = power_meter_thread
self.stopped = True
def run(self):
self.stopped = False
time_start_s = time.time()
fp = open(f"power_meter_{time_start_s}.csv", "w")
keys = self.power_meter_thread.sensor_readings.keys()
for k in keys:
fp.write(f"{k},")
fp.write("\n")
sampling_time = config.getfloat("PowerMeter", "SamplingTime")
while not self.stopped: #read sensor continuously
loop_start = time.time()
for k in keys:
fp.write(f"{self.power_meter_thread.sensor_readings[k]},")
fp.write("\n")
loop_start_int = (int(loop_start))%10
if loop_start_int == 0:
print(f"[t (s), Power (W)] = {self.power_meter_thread.sensor_readings['time_s']}, "\
f"{self.power_meter_thread.sensor_readings['active_power_W']}")
loop_end = time.time()
delta_time = loop_end - loop_start
if (delta_time < sampling_time):
time.sleep(sampling_time - delta_time)
fp.close()
def stop(self):
self.stopped = True
class PowerMeter(AbstractPowerMeter):
def __init__(self):
self.power_meter_thread = PowerMeterThread()
self.file_writer_thread = FileWriterThread(self.power_meter_thread)
self.power_meter_thread.start()
self.file_writer_thread.start()
def get_active_power_W(self):
return [self.power_meter_thread.sensor_readings["time_s"],
self.power_meter_thread.sensor_readings["active_power_W"]]
def get_current_mA(self):
return [self.power_meter_thread.sensor_readings["time_s"],
self.power_meter_thread.sensor_readings["current_mA"]]
|
the-stack_0_15745 | from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator, URLValidator
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.translation import gettext
from cms.utils.page import get_all_pages_from_path
from cms.utils.urlutils import admin_reverse, relative_url_regex
def validate_relative_url(value):
RegexValidator(regex=relative_url_regex)(value)
def validate_url(value):
try:
# Validate relative urls first
validate_relative_url(value)
except ValidationError:
# Fallback to absolute urls
URLValidator()(value)
def validate_url_uniqueness(site, path, language, exclude_page=None):
""" Checks for conflicting urls
"""
if '/' in path:
validate_url(path)
path = path.strip('/')
pages = get_all_pages_from_path(site, path, language)
pages = pages.select_related('publisher_public')
if exclude_page:
pages = pages.exclude(pk=exclude_page.pk)
if exclude_page.publisher_public_id:
pages = pages.exclude(pk=exclude_page.publisher_public_id)
try:
conflict_page = pages[0]
except IndexError:
return True
if conflict_page.publisher_is_draft:
page_id = conflict_page.pk
else:
# rare case where draft points to one url
# and live points to another which conflicts.
# Use the draft ID because public page is not editable.
page_id = conflict_page.publisher_public_id
if conflict_page.is_page_type:
change_url = admin_reverse('cms_pagetype_change', args=[page_id])
else:
change_url = admin_reverse('cms_page_change', args=[page_id])
conflict_url = '<a href="%(change_url)s" target="_blank">%(page_title)s</a>' % {
'change_url': change_url,
'page_title': force_text(conflict_page),
}
if exclude_page:
message = gettext('Page %(conflict_page)s has the same url \'%(url)s\' as current page "%(instance)s".')
else:
message = gettext('Page %(conflict_page)s has the same url \'%(url)s\' as current page.')
message = message % {'conflict_page': conflict_url, 'url': path, 'instance': exclude_page}
raise ValidationError(mark_safe(message))
|
the-stack_0_15746 | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Dict, List, Optional, Tuple
import numpy as np
import torch
from ax.models.base import Model
from ax.models.model_utils import tunable_feature_indices
from ax.models.random.base import RandomModel
from ax.models.types import TConfig
from ax.utils.common.docutils import copy_doc
from ax.utils.common.typeutils import not_none
from torch.quasirandom import SobolEngine
class SobolGenerator(RandomModel):
"""This class specifies the generation algorithm for a Sobol generator.
As Sobol does not make use of a model, it does not implement
the fit or predict methods.
Attributes:
deduplicate: If true, a single instantiation of the generator will not
return the same point twice.
init_position: The initial state of the Sobol generator.
Starts at 0 by default.
scramble: If True, permutes the parameter values among
the elements of the Sobol sequence. Default is True.
seed: An optional seed value for scrambling.
"""
engine: Optional[SobolEngine] = None
def __init__(
self,
seed: Optional[int] = None,
deduplicate: bool = False,
init_position: int = 0,
scramble: bool = True,
generated_points: Optional[np.ndarray] = None,
fallback_to_sample_polytope: bool = False,
) -> None:
super().__init__(
deduplicate=deduplicate, seed=seed, generated_points=generated_points
)
self.init_position = init_position
self.scramble = scramble
# Initialize engine on gen.
self._engine = None
self.fallback_to_sample_polytope = fallback_to_sample_polytope
def init_engine(self, n_tunable_features: int) -> SobolEngine:
"""Initialize singleton SobolEngine, only on gen.
Args:
n_tunable_features: The number of features which can be
searched over.
Returns:
SobolEngine, which can generate Sobol points.
"""
if not self._engine:
self._engine = SobolEngine(
dimension=n_tunable_features, scramble=self.scramble, seed=self.seed
).fast_forward(self.init_position)
return self._engine
@property
def engine(self) -> Optional[SobolEngine]:
"""Return a singleton SobolEngine."""
return self._engine
def gen(
self,
n: int,
bounds: List[Tuple[float, float]],
linear_constraints: Optional[Tuple[np.ndarray, np.ndarray]] = None,
fixed_features: Optional[Dict[int, float]] = None,
model_gen_options: Optional[TConfig] = None,
rounding_func: Optional[Callable[[np.ndarray], np.ndarray]] = None,
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate new candidates.
Args:
n: Number of candidates to generate.
bounds: A list of (lower, upper) tuples for each column of X.
linear_constraints: A tuple of (A, b). For k linear constraints on
d-dimensional x, A is (k x d) and b is (k x 1) such that
A x <= b.
fixed_features: A map {feature_index: value} for features that
should be fixed to a particular value during generation.
rounding_func: A function that rounds an optimization result
appropriately (e.g., according to `round-trip` transformations)
but *unused here*.
Returns:
2-element tuple containing
- (n x d) array of generated points.
- Uniform weights, an n-array of ones for each point.
"""
tf_indices = tunable_feature_indices(
bounds=bounds, fixed_features=fixed_features
)
if len(tf_indices) > 0:
self.init_engine(len(tf_indices))
points, weights = super().gen(
n=n,
bounds=bounds,
linear_constraints=linear_constraints,
fixed_features=fixed_features,
model_gen_options=model_gen_options,
rounding_func=rounding_func,
)
if self.engine:
self.init_position = not_none(self.engine).num_generated
return (points, weights)
@copy_doc(Model._get_state)
def _get_state(self) -> Dict[str, Any]:
state = super()._get_state()
state.update({"init_position": self.init_position})
return state
@copy_doc(RandomModel._gen_unconstrained)
def _gen_unconstrained(
self,
n: int,
d: int,
tunable_feature_indices: np.ndarray,
fixed_features: Optional[Dict[int, float]] = None,
) -> np.ndarray:
if len(tunable_feature_indices) == 0:
# Search space is entirely fixed, should return the only avail. point.
fixed_features = fixed_features or {}
# pyre-fixme[7]: Expected `ndarray` but got `Tuple[typing.Any, typing.Any]`.
return (
np.tile(np.array([list(not_none(fixed_features).values())]), (n, 1)),
np.ones(n),
)
return super()._gen_unconstrained(
n=n,
d=d,
tunable_feature_indices=tunable_feature_indices,
fixed_features=fixed_features,
)
def _gen_samples(self, n: int, tunable_d: int) -> np.ndarray:
"""Generate n samples.
tunable_d is ignored; as it is specified at engine initialization.
Args:
bounds: A list of d (lower, upper) tuples for each column of X.
fixed_feature_indices: Indices of features which are fixed at a
particular value.
"""
if self.engine is None:
raise ValueError( # pragma: no cover
"Sobol Engine must be initialized before candidate generation."
)
return not_none(self.engine).draw(n, dtype=torch.double).numpy()
|
the-stack_0_15750 | # Copyright (c) 2015 Nicolas JOUANIN
#
# See the file license.txt for copying permission.
"""
hbmqtt_pub - MQTT 3.1.1 publisher
Usage:
hbmqtt_pub --version
hbmqtt_pub (-h | --help)
hbmqtt_pub --url BROKER_URL -t TOPIC (-f FILE | -l | -m MESSAGE | -n | -s) [-c CONFIG_FILE] [-i CLIENT_ID] [-q | --qos QOS] [-d] [-k KEEP_ALIVE] [--clean-session] [--ca-file CAFILE] [--ca-path CAPATH] [--ca-data CADATA] [ --will-topic WILL_TOPIC [--will-message WILL_MESSAGE] [--will-qos WILL_QOS] [--will-retain] ] [--extra-headers HEADER] [-r]
Options:
-h --help Show this screen.
--version Show version.
--url BROKER_URL Broker connection URL (musr conform to MQTT URI scheme (see https://github.com/mqtt/mqtt.github.io/wiki/URI-Scheme>)
-c CONFIG_FILE Broker configuration file (YAML format)
-i CLIENT_ID Id to use as client ID.
-q | --qos QOS Quality of service to use for the message, from 0, 1 and 2. Defaults to 0.
-r Set retain flag on connect
-t TOPIC Message topic
-m MESSAGE Message data to send
-f FILE Read file by line and publish message for each line
-s Read from stdin and publish message for each line
-k KEEP_ALIVE Keep alive timeout in second
--clean-session Clean session on connect (defaults to False)
--ca-file CAFILE] CA file
--ca-path CAPATH] CA Path
--ca-data CADATA CA data
--will-topic WILL_TOPIC
--will-message WILL_MESSAGE
--will-qos WILL_QOS
--will-retain
--extra-headers EXTRA_HEADERS JSON object with key-value pairs of additional headers for websocket connections
-d Enable debug messages
"""
import sys
import logging
import asyncio
import os
import json
import hbmqtt
from hbmqtt.client import MQTTClient, ConnectException
from docopt import docopt
from hbmqtt.utils import read_yaml_config
logger = logging.getLogger(__name__)
def _gen_client_id():
import os
import socket
pid = os.getpid()
hostname = socket.gethostname()
return "hbmqtt_pub/%d-%s" % (pid, hostname)
def _get_qos(arguments):
try:
return int(arguments["--qos"][0])
except:
return None
def _get_extra_headers(arguments):
try:
return json.loads(arguments["--extra-headers"])
except:
return {}
def _get_message(arguments):
if arguments["-n"]:
yield b""
if arguments["-m"]:
yield arguments["-m"].encode(encoding="utf-8")
if arguments["-f"]:
try:
with open(arguments["-f"], "r") as f:
for line in f:
yield line.encode(encoding="utf-8")
except:
logger.error("Failed to read file '%s'" % arguments["-f"])
if arguments["-l"]:
import sys
for line in sys.stdin:
if line:
yield line.encode(encoding="utf-8")
if arguments["-s"]:
import sys
message = bytearray()
for line in sys.stdin:
message.extend(line.encode(encoding="utf-8"))
yield message
async def do_pub(client, arguments):
running_tasks = []
try:
logger.info("%s Connecting to broker" % client.client_id)
await client.connect(
uri=arguments["--url"],
cleansession=arguments["--clean-session"],
cafile=arguments["--ca-file"],
capath=arguments["--ca-path"],
cadata=arguments["--ca-data"],
extra_headers=_get_extra_headers(arguments),
)
qos = _get_qos(arguments)
topic = arguments["-t"]
retain = arguments["-r"]
for message in _get_message(arguments):
logger.info("%s Publishing to '%s'" % (client.client_id, topic))
task = asyncio.ensure_future(client.publish(topic, message, qos, retain))
running_tasks.append(task)
if running_tasks:
await asyncio.wait(running_tasks)
await client.disconnect()
logger.info("%s Disconnected from broker" % client.client_id)
except KeyboardInterrupt:
await client.disconnect()
logger.info("%s Disconnected from broker" % client.client_id)
except ConnectException as ce:
logger.fatal("connection to '%s' failed: %r" % (arguments["--url"], ce))
except asyncio.CancelledError:
logger.fatal("Publish canceled due to previous error")
def main(*args, **kwargs):
if sys.version_info[:2] < (3, 6):
logger.fatal("Error: Python 3.6+ is required")
sys.exit(-1)
arguments = docopt(__doc__, version=hbmqtt.__version__)
# print(arguments)
formatter = "[%(asctime)s] :: %(levelname)s - %(message)s"
if arguments["-d"]:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(level=level, format=formatter)
if arguments["-c"]:
config = read_yaml_config(arguments["-c"])
else:
config = read_yaml_config(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), "default_client.yaml"
)
)
logger.debug("Using default configuration")
loop = asyncio.get_event_loop()
client_id = arguments.get("-i", None)
if not client_id:
client_id = _gen_client_id()
if arguments["-k"]:
config["keep_alive"] = int(arguments["-k"])
if (
arguments["--will-topic"]
and arguments["--will-message"]
and arguments["--will-qos"]
):
config["will"] = dict()
config["will"]["topic"] = arguments["--will-topic"]
config["will"]["message"] = arguments["--will-message"].encode("utf-8")
config["will"]["qos"] = int(arguments["--will-qos"])
config["will"]["retain"] = arguments["--will-retain"]
client = MQTTClient(client_id=client_id, config=config, loop=loop)
loop.run_until_complete(do_pub(client, arguments))
loop.close()
if __name__ == "__main__":
main()
|
the-stack_0_15751 | # Caeser Encryption
import sys
if (__name__ == "__main__"):
def readFile (path):
file = open(path, "r")
lineList = []
for line in file:
lineList.append(line)
#print(lineList)
return lineList
def encrypt (lines,x):
encrypted = []
for line in lines:
for idx in range(0,len(line)-1):
num = ord(line[idx])+x
while(num < 0):
num += 256
while(num > 255):
num -= 256
char = chr(num)
encrypted.append(char)
encrypted.append("\n")
return encrypted
def writeFile(path, encrypted):
file = open(path, "w")
for char in encrypted:
file.write(char) #print(char)
file.close
if(len(sys.argv) < 4):
print("INPUT ERROR")
print("try:\npython caesar.py \"number\" \"path\" \"-e\\-d\"")
else:
num = int(sys.argv[1])
path = sys.argv[2]
crypt = sys.argv[3]
lines = readFile(path)
if(crypt == "-d"):
num = num * (-1)
encrypted = encrypt(lines, num)
elif(crypt == "-e"):
encrypted = encrypt(lines, num)
writeFile(path, encrypted)
|
the-stack_0_15756 | '''
@Author: hua
@Date: 2019-12-03 14:44:23
@description:
@LastEditors: hua
@LastEditTime: 2019-12-03 15:18:00
'''
from app.Models.Admin import Admin
from sqlalchemy import event
import time
@event.listens_for(Admin, "before_insert")
def admin_before_insert(mapper, connection, target):
target.add_time = int(time.time())
target.update_time = int(time.time())
@event.listens_for(Admin, "before_update")
def admin_before_update(mapper, connection, target):
target.update_time = int(time.time()) |
the-stack_0_15757 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from coredb.query_managers.manager import BaseQueryManager
from polyaxon.pql.builder import BoolCondition, SearchCondition, ValueCondition
from polyaxon.pql.parser import parse_search_operation, parse_value_operation
class ArtifactQueryManager(BaseQueryManager):
NAME = "artifact"
FIELDS_ORDERING = ("name", "kind", "path", "is_input")
FIELDS_USE_UUID = {"run"}
FIELDS_PROXY = {
"id": "name",
"name": "artifact__name",
"kind": "artifact__kind",
"path": "artifact__path",
"state": "artifact__state",
}
CHECK_ALIVE = False
PARSERS_BY_FIELD = {
# Name
"name": parse_search_operation,
# Kind
"kind": parse_value_operation,
# Path
"path": parse_value_operation,
# State
"state": parse_value_operation,
# Is input
"is_input": parse_value_operation,
# Run
"run": parse_value_operation,
}
CONDITIONS_BY_FIELD = {
# Name
"name": SearchCondition,
# Kind
"kind": ValueCondition,
# Path
"path": ValueCondition,
# State
"state": ValueCondition,
# Is input
"is_input": BoolCondition,
# Run
"run": ValueCondition,
}
|
the-stack_0_15763 | import dsd, os
path = 'ADItotal\\'
lista = os.listdir(path)
dsd.limpar_arquivo('ADItotal(sem_andamentos).txt')
dsd.limpar_arquivo('ADItotal(andamentos).txt')
dsd.limpar_arquivo('excluidos.txt')
partes_total = []
dados_csv = []
andamentos_csv = []
lista_excluidos = []
dsd.limpar_arquivo('ADItotalpartes.txt')
dsd.write_csv_header('ADItotalpartes.txt', 'nome, tipo, processo')
contador=0
excluidos = 0
for item in lista[0:]:
gravar_processo = True
contador = contador +1
nome_arquivo = path+item
processo = item.replace('.txt','')
# carrega dados do arquivo
html = 'NA'
html = dsd.carregar_arquivo(nome_arquivo)
html = html.replace(',',';')
html = html.replace('\n','')
html = html.replace(' ',' ')
# extrai as partes
partes_string = dsd.extrair(html,'partes>>>>', '<div id="partes-resumidas">')
partes = dsd.extrair_partes(partes_string)
lista_das_partes = []
lista_das_partes = dsd.listar_partes(partes_string, item.replace('.txt',''))
for y in lista_das_partes:
dsd.write_csv_line('ADItotalpartes.txt', y)
# extrai os andamentos
andamentos = dsd.extrair(html,'andamentos>>>>', 'pauta>>>>')
andamentos = dsd.extrair_andamentos(andamentos)
#extrai os elementos do código fonte
codigofonte =dsd.extrair(html,'fonte>>>>', 'partes>>>>')
eletronico_fisico =dsd.extrair(codigofonte,'bg-primary">','</span>')
sigilo =dsd.extrair(codigofonte,'bg-success">','</span>')
nome_processo =dsd.extrair(codigofonte,'-processo" value="','">')
numerounico = dsd.extrair(codigofonte,'-rotulo">','</div>')
numerounico = dsd.extrair(numerounico,': ', '')
relator = dsd.extrair(codigofonte,'Relator:','</div>')
relator = relator.strip(' ')
relator = relator.replace('MIN. ','')
relator = dsd.remover_acentos(relator)
redator_acordao = dsd.extrair(codigofonte,'>Redator do acórdão:','</div>')
redator_acordao = dsd.remover_acentos(redator_acordao)
redator_acordao = redator_acordao.replace('MIN. ','')
redator_acordao = redator_acordao.strip(' ')
redator_acordao = redator_acordao.replace ('MINISTRO ','')
relator_ultimo_incidente = dsd.extrair(codigofonte,
'Relator do último incidente:'
,'</div>')
relator_ultimo_incidente = relator_ultimo_incidente.replace ('MIN. ','')
relator_ultimo_incidente = relator_ultimo_incidente.replace ('MINISTRO ','')
relator_ultimo_incidente = relator_ultimo_incidente.strip(' ')
relator_ultimo_incidente = dsd.remover_acentos(relator_ultimo_incidente)
ultimoincidente = dsd.extrair(relator_ultimo_incidente,"(",'')
relator_ultimo_incidente = dsd.extrair(relator_ultimo_incidente,'','(')
ultimoincidente = ultimoincidente.replace(')','')
ultimoincidente = ultimoincidente.strip(' ')
#extrai os elementos da aba informações
informacoes = dsd.extrair(html,'informacoes>>>>', '>>>>')
assuntos = dsd.extrair(informacoes, '<ul style="list-style:none;">', '</ul>')
assuntos = dsd.limpar(assuntos)
assuntos = dsd.extrair(assuntos,'<li>','')
assuntos = assuntos.replace('</li>','')
assuntos = dsd.limpar(assuntos)
protocolo_data = dsd.extrair(informacoes, '<div class="col-md-5 processo-detalhes-bold m-l-0">', '</div>')
protocolo_data = protocolo_data.strip(' ')
orgaodeorigem = dsd.extrair(informacoes, '''Órgão de Origem:
</div>
<div class="col-md-5 processo-detalhes">''', '</div>')
numerodeorigem = dsd.extrair(informacoes, '''Número de Origem:
</div>
<div class="col-md-5 processo-detalhes">''', '</div>')
origem = dsd.extrair(informacoes, '''Origem:
</div>
<div class="col-md-5 processo-detalhes">''', '</div>')
procedencia = dsd.extrair(informacoes, '''<span id="descricao-procedencia">''', '</span>')
procedencia = procedencia.replace(' ','')
procedencia = dsd.extrair(procedencia, '', ' -')
cc = 'NA'
# extrai campos CC
if 'ADI' in nome_processo or 'ADPF' in nome_processo or 'ADC' in nome_processo or 'ADO' in nome_processo:
cc = dsd.extrair(html, 'cc>>>','')
# extrai campo incidente
incidentecc = dsd.extrair (cc,
'verProcessoAndamento.asp?incidente=',
'">')
# extrai campos classe + liminar + numero
cln = 'NA'
cln = dsd.extrair(cc,
'<div><h3><strong>',
'</strong>')
dsd.limpar_cln(cln)
cln = cln.upper()
# extrai numero
numerocc = 'NA'
numerocc = dsd.extrair (cln, ' - ', '')
numerocc = dsd.limpar_numero(numerocc)
# extrai liminar e classe
if 'LIMINAR' in cln:
liminarcc = 'sim'
classecc = dsd.extrair(cln, '', ' (MED')
else:
liminarcc = 'não'
classecc = dsd.extrair(cln, '', ' - ')
dsd.limpar_classe(classecc)
classecc.upper()
classecc = classecc.replace('ACAO DIRETA DE INCONSTITUCIONALIDADE','ADI')
classecc = classecc.replace('AÇÃO DIRETA DE INCONSTITUCIONALIDADE','ADI')
classecc = classecc.replace('ARGUIÇÃO DE DESCUMPRIMENTO DE PRECEITO FUNDAMENTAL','ADPF')
# definição de campo: origem
origemcc = 'NA'
origemcc = dsd.extrair(cc,'Origem:</td><td><strong>','</strong>')
procedencia = procedencia.replace('***', dsd.limpa_estado(origemcc).replace('/', ''))
## definição de campo: entrada
entradacc = dsd.extrair(cc,'Entrada no STF:</td><td><strong>','</strong>')
entradacc = dsd.substituir_data(entradacc)
## definição de campo: relator
relatorcc = dsd.extrair(cc,'Relator:</td><td><strong>','</strong>')
relatorcc = relatorcc.replace('MINISTRO ','')
relatorcc = relatorcc.replace('MINISTRA ','')
relatorcc = dsd.remover_acentos(relatorcc)
## definição de campo: distribuição
distribuicaocc = dsd.extrair(cc,'Distribuído:</td><td><strong>','</strong>')
distribuicaocc = dsd.substituir_data(distribuicaocc)
distribuicaocc = distribuicaocc.replace('-','/')
## definição de campo: requerente
requerentecc = dsd.extrair(cc,'Requerente: <strong>','</strong>')
requerentecc = requerentecc.replace(' ',' ')
requerentecc = requerentecc.replace(' ;',';')
requerentecc = requerentecc.replace('; ',';')
requerentecc = requerentecc.replace('( CF','(CF')
if '(CF' in requerentecc:
requerentesplit = requerentecc.split('(CF')
requerentecc = requerentesplit[0]
requerentecc = requerentecc.strip()
requerentetipo = requerentesplit[1]
requerentetipo = dsd.extrair(requerentetipo, ';','')
requerentetipo = requerentetipo.replace(')','')
requerentetipocc = requerentetipo.replace('0','')
requerentetipocc = requerentetipocc.replace(' 2','')
else:
requerentesplit = 'NA'
requerentetipocc = 'NA'
## definição de campo: requerido
requeridocc = dsd.extrair(cc,
'Requerido :<strong>',
'</strong>')
## definição de campo: dispositivo questionado
dispositivoquestionadocc = dsd.extrair(cc,
'Dispositivo Legal Questionado</b></strong><br /><pre>',
'</pre>')
dispositivoquestionadocc = dsd.limpar(dispositivoquestionadocc)
## definição de campo: resultado da liminar
resultadoliminarcc = dsd.extrair(cc,
'Resultado da Liminar</b></strong><br /><br />',
'<br />')
### filtro resultado liminar
# filtros
resultadoliminarcc = resultadoliminarcc.replace('Aguardadno','Aguardadno')
resultadoliminarcc = resultadoliminarcc.replace('Decisão Monocrática - "Ad referendum"','Deferida')
resultadoliminarcc = resultadoliminarcc.replace('Monicrática','Monocrática')
resultadoliminarcc = resultadoliminarcc.replace('Monoacrática','Monocrática')
resultadoliminarcc = resultadoliminarcc.replace('Monocrático','Monocrática')
resultadoliminarcc = resultadoliminarcc.replace('Decisão Monocrática Deferida -','Deferida')
resultadoliminarcc = resultadoliminarcc.replace('"','')
resultadoliminarcc = resultadoliminarcc.replace('Decisão Monocrática - ','')
resultadoliminarcc = resultadoliminarcc.replace('liminar deferida','Deferida')
resultadoliminarcc = resultadoliminarcc.upper()
resultadoliminarcc = resultadoliminarcc.replace('PREJUDICADO','PREJUDICADA')
resultadoliminarcc = resultadoliminarcc.replace('PROCEDENTE','DEFERIDA')
resultadoliminarcc = resultadoliminarcc.replace('AD REFERENDUM','')
resultadoliminarcc = resultadoliminarcc.replace('PROCEDENTE','DEFERIDA')
## definição de campo: resultado final
resultadofinalcc = dsd.extrair(cc,
'Resultado Final</b></strong><br /><br />',
'<br />')
## definição de campo: decisão monocrática final
if 'Decisão Monocrática Final</b></strong><br /><pre>' in cc:
decisaomonofinal = dsd.extrair(cc,
'Decisão Monocrática Final</b></strong><br /><pre>',
'</pre>')
decisaomonofinalcc = dsd.limpar(decisaomonofinal)
else:
decisaomonofinalcc = 'NA'
## definição de campo: fundamento
if 'Fundamentação Constitucional</b></strong><br /><pre>' in cc:
fundamentocc = dsd.extrair(cc,
'Fundamentação Constitucional</b></strong><br /><pre>',
'</pre>')
fundamentocc = dsd.limpar(fundamentocc)
else:
fundamentocc = 'NA'
## definição de campo: indexação
if 'Indexação</b></strong><br /><pre>' in cc:
indexacaocc = dsd.extrair(cc,
'Indexação</b></strong><br /><pre>',
'</pre>')
indexacaocc = dsd.limpar(indexacaocc)
else:
indexacaocc = 'NA'
else:
gravar_processo = False
# criação da variável dados extraídos, com uma lista de dados
dados = [processo, incidentecc, requerentecc,
requerentetipocc, requeridocc, len(lista_das_partes), lista_das_partes ,len(andamentos),
andamentos[:9], eletronico_fisico, sigilo,
numerounico, relatorcc, relator, redator_acordao, ultimoincidente,
relator_ultimo_incidente, assuntos, procedencia, protocolo_data,
distribuicaocc, orgaodeorigem,
numerodeorigem, origem,
liminarcc, dispositivoquestionadocc, resultadoliminarcc, resultadofinalcc,
decisaomonofinalcc, fundamentocc, indexacaocc]
#inserir aqui o conteúdo da lista acima, trocando [] por ''
campos = '''processo, incidentecc, requerentecc,
requerentetipocc, requeridocc, len(partes),partes,len(andamentos),
andamentos[:9], eletronico_fisico, sigilo,
numerounico, relatorcc, relator, redator_acordao, ultimoincidente,
relator_ultimo_incidente, assuntos, procedencia, protocolo_data,
distribuicaocc, orgaodeorigem,
numerodeorigem, origem,
liminarcc, dispositivoquestionadocc, resultadoliminarcc, resultadofinalcc,
decisaomonofinalcc, fundamentocc, indexacaocc'''
campos = campos.replace('\n','')
campos = campos.replace(' ','')
dados2 = [processo, len(andamentos), len(str(andamentos)), andamentos]
campos2 = 'processo, len(andamentos), len(str(andamentos)), andamentos'
dsd.write_csv_header('ADItotal(sem_andamentos).txt',campos)
dsd.write_csv_header('excluidos.txt','processos excluídos')
dsd.write_csv_header('ADItotal(andamentos).txt',campos2)
# grava de 500 em 500
if andamentos == []:
andamentos = ['SEM ANDAMENTOS CADASTRADOS']
if (gravar_processo == False or
nome_processo == 'NA' or
len(lista_das_partes) == 0 or
'IMPOSSIBILIDADE DE PROCESSAMENTO' in andamentos[0] or
'REAUTUADO' in andamentos[0] or
'CANCELAMENTO DE AUTUACAO' in andamentos[0]):
lista_excluidos.append(processo)
excluidos = excluidos + 1
else:
dados_csv.append(dados)
andamentos_csv.append(dados2)
print(nome_processo)
dsd.write_csv_lines('ADItotal(sem_andamentos).txt',dados_csv)
dsd.write_csv_lines('ADItotal(andamentos).txt',andamentos_csv)
dsd.write_csv_lines('excluidos.txt',lista_excluidos)
print ('Gravados arquivos ADItotal(sem_andamentos).txt e ADItotal(andamentos).txt')
print (f'Excluídos {excluidos} processos')
|
the-stack_0_15764 | import pytest
from Cryptodome.PublicKey import RSA
from django.urls import reverse
from oidc_provider.models import RESPONSE_TYPE_CHOICES, RSAKey, UserConsent
from oidc_apis.factories import ApiFactory, ApiScopeFactory
from users.factories import OIDCClientFactory, UserFactory
from users.views import TunnistamoOidcAuthorizeView
@pytest.mark.parametrize('with_trailing_slash', (True, False))
@pytest.mark.django_db
def test_tunnistamo_authorize_view_is_used(client, with_trailing_slash):
response = client.get('/openid/authorize{}'.format('/' if with_trailing_slash else ''))
assert response.resolver_match.func.__name__ == TunnistamoOidcAuthorizeView.as_view().__name__
@pytest.mark.parametrize('ui_locales, expected_text', (
(None, 'Sähköposti'),
('', 'Sähköposti'),
('bogus', 'Sähköposti'),
('en', 'Email'),
('fi en', 'Sähköposti'),
('bogus en fi', 'Email'),
))
@pytest.mark.django_db
def test_tunnistamo_authorize_view_language(client, ui_locales, expected_text):
oidc_client = OIDCClientFactory(require_consent=True)
user = UserFactory()
client.force_login(user)
url = reverse('authorize')
data = {
'client_id': oidc_client.client_id,
'redirect_uri': oidc_client.redirect_uris[0],
'response_type': 'code',
'scope': 'email',
}
if ui_locales is not None:
data['ui_locales'] = ui_locales
response = client.get(url, data)
assert expected_text in response.content.decode('utf-8')
@pytest.mark.django_db
def test_api_scopes_are_shown_in_and_returned_from_consent_screen(client):
oidc_client = OIDCClientFactory(require_consent=True)
user = UserFactory()
client.force_login(user)
api = ApiFactory(required_scopes=['github_username'])
api_scope = ApiScopeFactory(api=api)
response = client.get(reverse('authorize'), {
'client_id': oidc_client.client_id,
'redirect_uri': oidc_client.redirect_uris[0],
'scope': api_scope.identifier,
'response_type': 'code',
})
assert response.status_code == 200
content = response.content.decode('utf-8')
expected_scope = '{} github_username'.format(api_scope.identifier)
assert '<input name="scope" type="hidden" value="{}" />'.format(expected_scope) in content
assert api_scope.name in content
assert api_scope.description in content
@pytest.mark.parametrize('api_scope_in_request', (False, True))
@pytest.mark.django_db
def test_api_scopes_are_added_to_user_consent_after_authorization(client, api_scope_in_request):
oidc_client = OIDCClientFactory(require_consent=True)
user = UserFactory()
client.force_login(user)
api = ApiFactory(required_scopes=['github_username'])
api_scope = ApiScopeFactory(api=api)
response = client.post(reverse('authorize'), {
'client_id': oidc_client.client_id,
'redirect_uri': oidc_client.redirect_uris[0],
'scope': '{} github_username'.format(api_scope.identifier) if api_scope_in_request else api_scope.identifier,
'response_type': 'code',
'allow': True,
})
assert response.status_code == 302
user_consent = UserConsent.objects.get(user=user, client=oidc_client)
assert 'github_username' in user_consent.scope
@pytest.mark.parametrize('create_client', (False, True))
@pytest.mark.django_db
def test_original_client_id_is_saved_to_the_session(
client,
loginmethod_factory,
oidcclient_factory,
create_client,
):
"""Test that the original client id is saved to the session
This is an implementation detail test, but we don't have a better way to test
this right now. Proper testing would need end-to-end tests with e.g. Selenium."""
oidc_client = None
if create_client:
oidc_client = oidcclient_factory(
client_id="test_client",
redirect_uris=['https://tunnistamo.test/redirect_uri'],
response_types=["id_token"]
)
url = reverse('authorize')
data = {
'client_id': 'test_client',
'response_type': 'id_token',
'redirect_uri': 'https://tunnistamo.test/redirect_uri',
'scope': 'openid',
'response_mode': 'form_post',
'nonce': 'abcdefg'
}
client.get(url, data)
if oidc_client:
session_client_id = client.session.get("oidc_authorize_original_client_id")
assert session_client_id == oidc_client.client_id
else:
assert "oidc_authorize_original_client_id" not in client.session
@pytest.mark.django_db
@pytest.mark.parametrize('with_pkce', (True, False))
@pytest.mark.parametrize('response_type', [key for key, val in RESPONSE_TYPE_CHOICES])
def test_public_clients_ability_to_skip_consent(
client,
user,
oidcclient_factory,
with_pkce,
response_type,
):
key = RSA.generate(1024)
rsakey = RSAKey(key=key.exportKey('PEM').decode('utf8'))
rsakey.save()
oidc_client = oidcclient_factory(
client_type='public',
require_consent=False,
response_types=[key for key, val in RESPONSE_TYPE_CHOICES],
redirect_uris=['https://example.com/callback'],
)
client.force_login(user)
url = reverse('authorize')
data = {
'client_id': oidc_client.client_id,
'redirect_uri': oidc_client.redirect_uris[0],
'scope': 'openid profile',
'response_type': response_type,
'nonce': 'testnonce',
}
if with_pkce:
data.update({
# The code challenge value doesn't matter as only its existence is checked
# in the authorize endpoint. The value would be verified in the token endpoint.
'code_challenge': 'abcdefg',
'code_challenge_method': 'S256'
})
response = client.get(url, data)
# Consent skip should happen when using implicit flow, or code flow with pkce.
should_redirect_to_client_map = {
('code', True): True,
('code', False): False,
('id_token', True): True,
('id_token', False): True,
('id_token token', True): True,
('id_token token', False): True,
('code token', True): True,
('code token', False): False,
('code id_token', True): True,
('code id_token', False): False,
('code id_token token', True): True,
('code id_token token', False): False,
}
if should_redirect_to_client_map[(response_type, with_pkce)]:
assert response.status_code == 302
assert response['Location'].startswith(oidc_client.redirect_uris[0])
assert 'error' not in response['Location']
else:
assert response.status_code == 200
assert 'name="allow" type="submit"' in response.content.decode('utf-8')
|
the-stack_0_15766 | import numpy as np
from pyscf import gto, scf
from kspies import wy
mol = gto.M(atom = 'N 0 0 0 ; N 1.1 0 0',
basis = 'cc-pVDZ')
mf = scf.RHF(mol).run()
dm_tar = mf.make_rdm1()
PBS = gto.expand_etbs([(0, 13, 2**-4 , 2),
(1, 3 , 2**-2 , 2)])
mw = wy.RWY(mol, dm_tar, pbas=PBS)
#Note that for this designed-to-be ill-conditioned problem,
#Hessian-based optimization algorithms are problematic.
mw.method = 'bfgs'
mw.tol = 2e-7
mw.run()
mw.info()
Ws_fin = mw.Ws
etas = [ 2.**(-a) for a in np.linspace(5., 27., 45) ]
v = np.zeros(len(etas))
W = np.zeros(len(etas))
for i, eta in enumerate(etas):
mw.reg=eta
mw.run()
v[i] = mw.Dvb()
W[i] = mw.Ws
mw.info()
import matplotlib.pyplot as plt
fig,ax = plt.subplots(2)
ax[0].scatter(np.log10(Ws_fin-W), np.log10(v))
ax[1].scatter(np.log10(etas), v*etas/(Ws_fin-W))
plt.tight_layout()
#plt.savefig('L_curves.pdf', format='pdf')
#plt.savefig('L_curves.eps', format='eps')
plt.show()
|
the-stack_0_15768 | #!/usr/bin/env python
# Construct a command that will create a texture, appending console
# output to the file "out.txt".
def omaketx_command (infile, outfile, extraargs="",
options="", output_cmd="-otex",
showinfo=True, showinfo_extra="",
silent=False, concat=True) :
command = (oiio_app("oiiotool")
+ " " + make_relpath(infile,tmpdir)
+ " " + extraargs
+ " " + output_cmd + options + " " + make_relpath(outfile,tmpdir) )
if not silent :
command += " >> out.txt"
if concat:
command += " ;\n"
if showinfo:
command += info_command (outfile, extraargs=showinfo_extra, safematch=1)
return command
# location of oiio-images directory
oiio_images = OIIO_TESTSUITE_IMAGEDIR
# Just for simplicity, make a checkerboard with a solid alpha
command += oiiotool (" --pattern checker 128x128 4 --ch R,G,B,=1.0"
+ " -d uint8 -o " + make_relpath("checker.tif") )
# Basic test - recreate the grid texture
command += omaketx_command (oiio_images + "/grid.tif", "grid.tx")
# Test --resize (to power of 2) with the grid, which is 1000x1000
command += omaketx_command (oiio_images + "/grid.tif", "grid-resize.tx",
options=":resize=1")
# Test -d to set output data type
command += omaketx_command ("checker.tif", "checker-uint16.tx",
"-d uint16")
# Test --ch to restrict the number of channels
command += omaketx_command ("checker.tif", "checker-1chan.tx",
"--ch 0")
# Test --tiles to set a non-default tile size
command += omaketx_command ("checker.tif", "checker-16x32tile.tx",
"--tile 16 32")
# Test --separate and --compression
command += omaketx_command ("checker.tif", "checker-seplzw.tx",
"--planarconfig separate --compression lzw")
# Test --wrap
command += omaketx_command ("checker.tif", "checker-clamp.tx",
options=":wrap=clamp")
# Test --swrap and --twrap
command += omaketx_command ("checker.tif", "checker-permir.tx",
options=":swrap=periodic:twrap=mirror")
# Test --nomipmap
command += omaketx_command ("checker.tif", "checker-nomip.tx",
options=":nomipmap=1")
# Test setting matrices
command += omaketx_command ("checker.tif", "checker-camera.tx",
"--attrib:type=matrix worldtocamera 1,0,0,0,0,2,0,0,0,0,1,0,0,0,0,1 " +
"--attrib:type=matrix worldtoscreen 3,0,0,0,0,3,0,0,0,0,3,0,1,2,3,1")
# Test --opaque-detect (should drop the alpha channel)
command += omaketx_command ("checker.tif", "checker-opaque.tx",
options=":opaque_detect=1")
# Test --monochrome-detect (first create a monochrome image)
command += oiiotool (" --pattern constant:color=.25,.25,.25 256x256 3 "
+ " -d uint8 -o " + make_relpath("gray.tif"))
command += omaketx_command ("gray.tif", "gray-mono.tx",
options=":monochrome_detect=1")
# Test --monochrome-detect on something that is NOT monochrome
command += oiiotool (" --pattern constant:color=.25,.2,.15 256x256 3 "
+ " -d uint8 -o " + make_relpath("pink.tif"))
command += omaketx_command ("pink.tif", "pink-mono.tx",
options=":monochrome_detect=1")
# Test --prman : should save 'separate' planarconfig, and funny 64x32 tiles
# since we are specifying 16 bits, and it should save as 'int16' even though
# we asked for unsigned.
command += omaketx_command ("checker.tif", "checker-prman.tx",
"-d uint16", options=":prman=1")
# Test --fixnan : take advantage of the bad.exr images in
# testsuite/oiiotool-fixnan. (Use --nomipmap to cut down on stats output)
# FIXME: would also like to test --checknan, but the problem with that is
# that is actually FAILS if there's a nan.
command += omaketx_command (OIIO_TESTSUITE_ROOT+"/oiiotool-fixnan/src/bad.exr", "nan.exr",
"--fixnan box3", options=":nomipmap=1",
showinfo=True, showinfo_extra="--stats")
# Test that when outputting half textures, we clamp large float values
# rather than inadvertetly turning into Inf in the process of output to
# half.
command += oiiotool (" --pattern constant:color=1.0e6,1.0e6,1.0e6 2x2 3 -d float -o million.tif")
command += omaketx_command ("million.tif", "bigval.exr",
"-d half", showinfo_extra="--stats")
# Test --format to force exr even though it can't be deduced from the name.
command += omaketx_command ("checker.tif", "checker-exr.pdq",
options=":fileformatname=exr")
# Test that the oiio:SHA-1 hash is stable, and that that changing filter and
# using -hicomp result in different images and different hashes.
command += omaketx_command (oiio_images + "/grid.tif", "grid-lanczos3.tx",
options = ":filter=lanczos3", showinfo=False)
command += omaketx_command (oiio_images + "/grid.tif", "grid-lanczos3-hicomp.tx",
options = ":filter=lanczos3:highlightcomp=1", showinfo=False)
command += info_command ("grid.tx",
extraargs="--metamatch oiio:SHA-1")
command += info_command ("grid-lanczos3.tx",
extraargs="--metamatch oiio:SHA-1")
command += info_command ("grid-lanczos3-hicomp.tx",
extraargs="--metamatch oiio:SHA-1")
# Test that we cleanly replace any existing SHA-1 hash and ConstantColor
# hint in the ImageDescription of the input file.
command += oiiotool (" --pattern constant:color=1,0,0 64x64 3 "
+ " --caption \"foo SHA-1=1234abcd ConstantColor=[0.0,0,-0.0] bar\""
+ " -d uint8 -o " + make_relpath("small.tif") )
command += info_command ("small.tif", safematch=1);
command += omaketx_command ("small.tif", "small.tx",
options=":oiio=1:constant_color_detect=1")
# Regression test -- at one point, we had a bug where we were botching
# the poles of OpenEXR env maps, adding energy. Check it by creating an
# all-white image, turning it into an env map, and calculating its
# statistics (should be 1.0 everywhere).
command += oiiotool (" --pattern constant:color=1,1,1 4x2 3 "
+ " -d half -o " + make_relpath("white.exr"))
command += omaketx_command ("white.exr", "whiteenv.exr",
output_cmd="-oenv", showinfo=False)
command += oiiotool ("--stats -a whiteenv.exr")
command += oiiotool (" --pattern noise 64x64 1"
+ " -d half -o " + make_relpath("bump.exr"))
command += omaketx_command ("bump.exr", "bumpslope.exr",
extraargs="-d half",
output_cmd="-obump", showinfo=False)
command += oiiotool ("--stats -a bumpslope.exr")
outputs = [ "out.txt" ]
# To do: --filter --checknan --fullpixels
# --prman-metadata --ignore-unassoc
# --mipimage
# --envlatl TIFF, --envlatl EXR
# --colorconvert --unpremult -u --fovcot
|
the-stack_0_15769 | # Always prefer setuptools over distutils
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="implicit_lambda",
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version="0.4.0",
description="Implicit lambdas with placeholder notation and code generation",
# Fix windows newlines.
long_description=long_description.replace("\r\n", "\n"),
# The project's main homepage.
url="https://github.com/blackhc/implicit_lambda",
# Author details
author="Andreas @blackhc Kirsch",
author_email="[email protected]",
# Choose your license
license="MIT",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 3 - Alpha",
# Indicate who your project is intended for
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Software Development :: Libraries :: Python Modules",
# Pick your license as you wish (should match "license" above)
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
],
# What does your project relate to?
keywords="tools lambda placeholder",
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=["implicit_lambda", "implicit_lambda.details", "implicit_lambda.tests"],
package_dir={"": "src"},
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
"dev": ["check-manifest"],
"test": ["coverage", "codecov", "pytest", "pytest-benchmark", "pytest-cov", "hypothesis"],
},
setup_requires=["pytest-runner"],
)
|
the-stack_0_15770 | from cereal import car
from common.numpy_fast import mean
from selfdrive.config import Conversions as CV
from opendbc.can.can_define import CANDefine
from opendbc.can.parser import CANParser
from selfdrive.car.interfaces import CarStateBase
from selfdrive.car.gm.values import DBC, CAR, AccState, CanBus, \
CruiseButtons, STEER_THRESHOLD
class CarState(CarStateBase):
def __init__(self, CP):
super().__init__(CP)
can_define = CANDefine(DBC[CP.carFingerprint]['pt'])
self.shifter_values = can_define.dv["ECMPRDNL"]["PRNDL"]
def update(self, pt_cp):
ret = car.CarState.new_message()
self.prev_cruise_buttons = self.cruise_buttons
self.cruise_buttons = pt_cp.vl["ASCMSteeringButton"]['ACCButtons']
ret.wheelSpeeds.fl = pt_cp.vl["EBCMWheelSpdFront"]['FLWheelSpd'] * CV.KPH_TO_MS
ret.wheelSpeeds.fr = pt_cp.vl["EBCMWheelSpdFront"]['FRWheelSpd'] * CV.KPH_TO_MS
ret.wheelSpeeds.rl = pt_cp.vl["EBCMWheelSpdRear"]['RLWheelSpd'] * CV.KPH_TO_MS
ret.wheelSpeeds.rr = pt_cp.vl["EBCMWheelSpdRear"]['RRWheelSpd'] * CV.KPH_TO_MS
ret.vEgoRaw = mean([ret.wheelSpeeds.fl, ret.wheelSpeeds.fr, ret.wheelSpeeds.rl, ret.wheelSpeeds.rr])
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
ret.standstill = ret.vEgoRaw < 0.01
ret.steeringAngle = pt_cp.vl["PSCMSteeringAngle"]['SteeringWheelAngle']
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(pt_cp.vl["ECMPRDNL"]['PRNDL'], None))
ret.brake = pt_cp.vl["EBCMBrakePedalPosition"]['BrakePedalPosition'] / 0xd0
# Brake pedal's potentiometer returns near-zero reading even when pedal is not pressed.
if ret.brake < 10/0xd0:
ret.brake = 0.
ret.gas = pt_cp.vl["AcceleratorPedal"]['AcceleratorPedal'] / 254.
ret.gasPressed = ret.gas > 1e-5
ret.steeringTorque = pt_cp.vl["PSCMStatus"]['LKADriverAppldTrq']
ret.steeringPressed = abs(ret.steeringTorque) > STEER_THRESHOLD
# 1 - open, 0 - closed
ret.doorOpen = (pt_cp.vl["BCMDoorBeltStatus"]['FrontLeftDoor'] == 1 or
pt_cp.vl["BCMDoorBeltStatus"]['FrontRightDoor'] == 1 or
pt_cp.vl["BCMDoorBeltStatus"]['RearLeftDoor'] == 1 or
pt_cp.vl["BCMDoorBeltStatus"]['RearRightDoor'] == 1)
# 1 - latched
ret.seatbeltUnlatched = pt_cp.vl["BCMDoorBeltStatus"]['LeftSeatBelt'] == 0
ret.leftBlinker = pt_cp.vl["BCMTurnSignals"]['TurnSignals'] == 1
ret.rightBlinker = pt_cp.vl["BCMTurnSignals"]['TurnSignals'] == 2
self.park_brake = pt_cp.vl["EPBStatus"]['EPBClosed']
ret.cruiseState.available = bool(pt_cp.vl["ECMEngineStatus"]['CruiseMainOn'])
ret.espDisabled = pt_cp.vl["ESPStatus"]['TractionControlOn'] != 1
self.pcm_acc_status = pt_cp.vl["AcceleratorPedal2"]['CruiseState']
ret.brakePressed = ret.brake > 1e-5
# Regen braking is braking
if self.car_fingerprint == CAR.VOLT:
ret.brakePressed = ret.brakePressed or bool(pt_cp.vl["EBCMRegenPaddle"]['RegenPaddle'])
ret.cruiseState.enabled = self.pcm_acc_status != AccState.OFF
# ret.cruiseState.standstill = self.pcm_acc_status == AccState.STANDSTILL
ret.cruiseState.standstill = False # Never be in standstill (for auto-resume to work)
# 0 - inactive, 1 - active, 2 - temporary limited, 3 - failed
self.lkas_status = pt_cp.vl["PSCMStatus"]['LKATorqueDeliveredStatus']
ret.steerWarning = self.lkas_status not in [0, 1]
return ret
@staticmethod
def get_can_parser(CP):
# this function generates lists for signal, messages and initial values
signals = [
# sig_name, sig_address, default
("BrakePedalPosition", "EBCMBrakePedalPosition", 0),
("FrontLeftDoor", "BCMDoorBeltStatus", 0),
("FrontRightDoor", "BCMDoorBeltStatus", 0),
("RearLeftDoor", "BCMDoorBeltStatus", 0),
("RearRightDoor", "BCMDoorBeltStatus", 0),
("LeftSeatBelt", "BCMDoorBeltStatus", 0),
("RightSeatBelt", "BCMDoorBeltStatus", 0),
("TurnSignals", "BCMTurnSignals", 0),
("AcceleratorPedal", "AcceleratorPedal", 0),
("CruiseState", "AcceleratorPedal2", 0),
("ACCButtons", "ASCMSteeringButton", CruiseButtons.UNPRESS),
("SteeringWheelAngle", "PSCMSteeringAngle", 0),
("FLWheelSpd", "EBCMWheelSpdFront", 0),
("FRWheelSpd", "EBCMWheelSpdFront", 0),
("RLWheelSpd", "EBCMWheelSpdRear", 0),
("RRWheelSpd", "EBCMWheelSpdRear", 0),
("PRNDL", "ECMPRDNL", 0),
("LKADriverAppldTrq", "PSCMStatus", 0),
("LKATorqueDeliveredStatus", "PSCMStatus", 0),
("TractionControlOn", "ESPStatus", 0),
("EPBClosed", "EPBStatus", 0),
("CruiseMainOn", "ECMEngineStatus", 0),
]
if CP.carFingerprint == CAR.VOLT:
signals += [
("RegenPaddle", "EBCMRegenPaddle", 0),
]
return CANParser(DBC[CP.carFingerprint]['pt'], signals, [], CanBus.POWERTRAIN)
|
the-stack_0_15771 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2020, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_axis19.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [43813504, 45705472]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_x_axis({'label_position': 'high'})
chart.set_y_axis({'label_position': 'low'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
the-stack_0_15772 | import os
import unittest
from shutil import rmtree
import numpy as np
class TestSkeletonIo(unittest.TestCase):
shape = 128
n_nodes = 100
tmp_folder = './tmp'
def setUp(self):
os.makedirs(self.tmp_folder, exist_ok=True)
def tearDown(self):
try:
rmtree(self.tmp_folder)
except OSError:
pass
def _get_skel(self):
coords = np.random.randint(0, self.shape, size=(self.n_nodes, 3))
edges = np.random.randint(0, self.n_nodes, size=(self.n_nodes, 2))
return coords, edges
def test_swc(self):
from elf.skeleton.io import read_swc, write_swc
n_skels = 5
for skel_id in range(n_skels):
path = os.path.join(self.tmp_folder, f'{skel_id}.swc')
coords, edges = self._get_skel()
write_swc(path, coords, edges)
_, coords_read, parents_read, = read_swc(path)
self.assertTrue(np.array_equal(coords, coords_read))
self.assertEqual(len(parents_read), len(coords_read))
# checking for edges is a bit more complicated ...
# self.assertTrue(np.array_equal(edges, edges_read))
def test_nml(self):
from elf.skeleton.io import read_nml, write_nml
if __name__ == '__main__':
unittest.main()
|
the-stack_0_15773 | from exception_wrappers.libraries.playhouse.apsw_ext import *
def migrate(migrator, database):
# Account
migrator.add_column('account', 'deleted', BooleanField(default=False))
#
# Schema specification (for migration verification)
#
SPEC = {
'account': {
'id': 'INTEGER PRIMARY KEY NOT NULL',
'name': 'VARCHAR(255)',
'thumb': 'TEXT',
'deleted': 'SMALLINT NOT NULL',
'refreshed_at': 'DATETIME'
},
}
|
the-stack_0_15774 | import numpy as np
from gym.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerBasketballEnv(SawyerXYZEnv):
def __init__(self):
liftThresh = 0.3
goal_low = (-0.1, 0.85, 0.15)
goal_high = (0.1, 0.9+1e-7, 0.15)
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.6, 0.03)
obj_high = (0.1, 0.7, 0.03)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_angle': .3,
'obj_init_pos': np.array([0, 0.6, 0.03], dtype=np.float32),
'hand_init_pos': np.array((0, 0.6, 0.2), dtype=np.float32),
}
self.goal = np.array([0, 0.9, 0.15])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self.liftThresh = liftThresh
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
)
self.goal_space = Box(
np.array(goal_low) + np.array([0, -0.05001, 0.1000]),
np.array(goal_high) + np.array([0, -0.05000, 0.1001])
)
@property
def model_name(self):
return full_v1_path_for('sawyer_xyz/sawyer_basketball.xml')
@_assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pickRew, placingDist = self.compute_reward(action, ob)
self.curr_path_length += 1
info = {
'reachDist': reachDist,
'goalDist': placingDist,
'epRew': reward,
'pickRew': pickRew,
'success': float(placingDist <= 0.08)
}
return ob, reward, False, info
def _get_pos_objects(self):
return self.data.get_geom_xpos('objGeom')
def reset_model(self):
self._reset_hand()
basket_pos = self.goal.copy()
self.sim.model.body_pos[self.model.body_name2id('basket_goal')] = basket_pos
self._target_pos = self.data.site_xpos[self.model.site_name2id('goal')]
self.objHeight = self.data.get_geom_xpos('objGeom')[2]
self.heightTarget = self.objHeight + self.liftThresh
if self.random_init:
goal_pos = self._get_state_rand_vec()
basket_pos = goal_pos[3:]
while np.linalg.norm(goal_pos[:2] - basket_pos[:2]) < 0.15:
goal_pos = self._get_state_rand_vec()
basket_pos = goal_pos[3:]
self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]]))
self.sim.model.body_pos[self.model.body_name2id('basket_goal')] = basket_pos
self._target_pos = basket_pos + np.array([0, -0.05, 0.1])
self._set_obj_xyz(self.obj_init_pos)
self.maxPlacingDist = np.linalg.norm(np.array([self.obj_init_pos[0], self.obj_init_pos[1], self.heightTarget]) - np.array(self._target_pos)) + self.heightTarget
return self._get_obs()
def _reset_hand(self):
super()._reset_hand(10)
rightFinger, leftFinger = self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector')
self.init_fingerCOM = (rightFinger + leftFinger)/2
self.pickCompleted = False
def compute_reward(self, actions, obs):
objPos = obs[3:6]
rightFinger, leftFinger = self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector')
fingerCOM = (rightFinger + leftFinger)/2
heightTarget = self.heightTarget
goal = self._target_pos
reachDist = np.linalg.norm(objPos - fingerCOM)
placingDist = np.linalg.norm(objPos - goal)
assert np.all(goal == self._get_site_pos('goal'))
def reachReward():
reachRew = -reachDist
reachDistxy = np.linalg.norm(objPos[:-1] - fingerCOM[:-1])
zRew = np.linalg.norm(fingerCOM[-1] - self.init_fingerCOM[-1])
if reachDistxy < 0.05:
reachRew = -reachDist
else:
reachRew = -reachDistxy - 2*zRew
#incentive to close fingers when reachDist is small
if reachDist < 0.05:
reachRew = -reachDist + max(actions[-1],0)/50
return reachRew , reachDist
def pickCompletionCriteria():
tolerance = 0.01
if objPos[2] >= (heightTarget - tolerance):
return True
else:
return False
if pickCompletionCriteria():
self.pickCompleted = True
def objDropped():
return (objPos[2] < (self.objHeight + 0.005)) and (placingDist >0.02) and (reachDist > 0.02)
def orig_pickReward():
hScale = 100
if self.pickCompleted and not(objDropped()):
return hScale*heightTarget
elif (reachDist < 0.1) and (objPos[2]> (self.objHeight + 0.005)) :
return hScale* min(heightTarget, objPos[2])
else:
return 0
def placeReward():
c1 = 1000 ; c2 = 0.01 ; c3 = 0.001
cond = self.pickCompleted and (reachDist < 0.1) and not(objDropped())
if cond:
placeRew = 1000*(self.maxPlacingDist - placingDist) + c1*(np.exp(-(placingDist**2)/c2) + np.exp(-(placingDist**2)/c3))
placeRew = max(placeRew,0)
return [placeRew , placingDist]
else:
return [0 , placingDist]
reachRew, reachDist = reachReward()
pickRew = orig_pickReward()
placeRew , placingDist = placeReward()
assert ((placeRew >=0) and (pickRew>=0))
reward = reachRew + pickRew + placeRew
return [reward, reachDist, pickRew, placingDist]
|
the-stack_0_15776 | from pgshovel.interfaces.replication_pb2 import (
State,
StreamState,
)
from pgshovel.replication.validation.bootstrap import validate_bootstrap_state
from pgshovel.replication.validation.consumers import validate_consumer_state
from pgshovel.replication.validation.transactions import validate_transaction_state
class MultipleStateValidator(object):
def __init__(self, message, validators):
self.message = message
self.validators = validators
def __call__(self, state, *args, **kwargs):
states = {}
for name, validator in self.validators.items():
if state is not None and state.HasField(name):
value = getattr(state, name)
else:
value = None
result = validator(value, *args, **kwargs)
if result is not None:
states[name] = result
return self.message(**states)
validate_state = MultipleStateValidator(State, {
'bootstrap_state': validate_bootstrap_state,
'stream_state': MultipleStateValidator(StreamState, {
'consumer_state': validate_consumer_state,
'transaction_state': validate_transaction_state,
})
})
#: The expected types of event for a stream of transactions when there is no
#: existing ``TransactionState``.
TRANSACTION_START_EVENT_TYPES = validate_state.validators['stream_state'].validators['transaction_state'].receivers[None].keys() # noqa
|
the-stack_0_15777 | """This module provides file I/O for Quake BSP2 map files.
Example:
bsp_file = bsp.Bsp.open('ad_sepulcher.bsp')
"""
import struct
from .bsp29 import Bsp as Bsp29
__all__ = ['is_bspfile', 'Bsp']
IDENTITY = b'BSP2'
def _check_bspfile(fp):
fp.seek(0)
data = fp.read(struct.calcsize('<4s'))
identity = struct.unpack('<4s', data)[0]
fp.seek(0)
return identity == IDENTITY
def is_bspfile(filename):
"""Quickly see if a file is a bsp file by checking the magic number.
The filename argument may be a file for file-like object.
Args:
filename: File to check as string or file-like object.
Returns:
True if given file's magic number is correct.
"""
try:
if hasattr(filename, 'read'):
return _check_bspfile(fp=filename)
else:
with open(filename, 'rb') as fp:
return _check_bspfile(fp)
except Exception:
return False
class Node(Bsp29.factory.Node):
format = '<i8i2I'
size = struct.calcsize(format)
class Face(Bsp29.factory.Face):
format = '<2ii2i4Bi'
size = struct.calcsize(format)
class ClipNode(Bsp29.factory.ClipNode):
format = '<i2i'
size = struct.calcsize(format)
class Leaf(Bsp29.factory.Leaf):
format = '<2i6i2I4B'
size = struct.calcsize(format)
class Edge(Bsp29.factory.Edge):
format = '<2I'
size = struct.calcsize(format)
class Bsp(Bsp29):
"""Class for working with Bsp files
Example:
Basic usage::
from vgio.quake.bsp.bsp29a import Bsp
b = Bsp.open('ad_sepulcher.bsp')
Attributes:
version: Version of the map file. Vanilla Quake is 29.
entities: A string containing the entity definitions.
planes: A sequence of Planes used by the bsp tree data structure.
miptextures: A sequence of Miptextures.
vertexes: A sequence of Vertexes.
visibilities: A sequence of ints representing visibility data.
nodes: A sequence of Nodes used by the bsp tree data structure.
texture_infos: A sequence of TextureInfo objects.
faces: A sequence of Faces.
lighting: A sequence of ints representing lighting data.
clip_nodes: A sequence of ClipNodes used by the bsp tree data structure.
leafs: A sequence of Leafs used by the bsp tree data structure.
mark_surfaces: A sequence of ints representing lists of consecutive faces
used by the Node objects.
edges: A sequence of Edges.
surf_edges: A sequence of ints representing list of consecutive edges used
by the Face objects.
models: A sequence of Models.
Note:
The first model is the entire level.
fp: The file-like object to read data from.
mode: The file mode for the file-like object.
"""
class factory(Bsp29.factory):
Node = Node
Face = Face
ClipNode = ClipNode
Leaf = Leaf
Edge = Edge
|
the-stack_0_15778 | import os
import setuptools
dir_repo = os.path.abspath(os.path.dirname(__file__))
# read the contents of REQUIREMENTS file
with open(os.path.join(dir_repo, "requirements.txt"), "r") as f:
requirements = f.read().splitlines()
# read the contents of README file
with open(os.path.join(dir_repo, "README.md"), encoding="utf-8") as f:
readme = f.read()
setuptools.setup(
name="neuralprophet",
version="0.2.5",
description="A simple yet customizable forecaster",
author="Oskar Triebe",
author_email='[email protected]',
url="https://github.com/ourownstory/neural_prophet",
license="MIT",
packages=setuptools.find_packages(),
python_requires=">=3.7",
install_requires=requirements,
extras_require={
"dev": ["livelossplot>=0.5.3", "black"],
"live": ["livelossplot>=0.5.3"],
},
# setup_requires=[""],
scripts=["scripts/neuralprophet_dev_setup"],
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
the-stack_0_15780 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
from . import _utils, _io, _logger
from ._graph_execution_manager import GraphExecutionManager, _RunStateInfo
from ._execution_agent import InferenceAgent
from .debug_options import DebugOptions
from ._fallback import ORTModuleFallbackException, _FallbackPolicy, _FallbackManager
from onnxruntime.capi import _pybind_state as C
import onnx
import torch
import warnings
class InferenceManager(GraphExecutionManager):
"""Concrete instance of GraphExecutionManager that is able to manage the inference model
InferenceManager is resposible for building and running the forward graph of the inference model
"""
def __init__(self, model, debug_options: DebugOptions, fallback_manager: _FallbackManager):
super().__init__(model, debug_options, fallback_manager)
self._export_mode = torch.onnx.TrainingMode.EVAL
@staticmethod
def execution_session_run_forward(execution_session, onnx_model, device, *inputs):
"""Runs the forward graph on execution_session with given model inputs and device"""
# Assert that the input and model device match
_utils._check_same_device(device, "Input argument to forward", *inputs)
# TODO: Try to reuse the output buffers as some of the output tensors are same sizes,
# especially the backward graph outputs.
# REVIEW(codemzs): Consolidate Training Agent with InferenceAgent on C++ side to not
# have the need for passing IOBinding.
io_binding = execution_session.io_binding()
run_options = C.RunOptions()
# Use IO binding
_utils._create_iobinding(io_binding, inputs, onnx_model, device)
# Run and return module outputs.
ort_output = execution_session.run_forward(io_binding, run_options)
forward_outputs, run_id = ort_output.ortvalues, ort_output.run_id
user_outputs = tuple(_utils._ortvalue_to_torch_tensor(
forward_output._ortvalue) for forward_output in forward_outputs)
state = None
# Assert that the outputs and model device match
_utils._check_same_device(
device, "Output argument from forward", *user_outputs)
output_info = [(output.shape, output.device, output.dtype)
for output in user_outputs]
run_info = _RunStateInfo(state, output_info)
# Return user outputs and forward run information
return user_outputs, run_info
def forward(self, *inputs, **kwargs):
'''Forward pass of the inference model
ONNX model is exported the first time this method is executed.
Next, we build an optimized inference graph with module_graph_builder.
Finally, we instantiate the ONNX Runtime InferenceSession through the InferenceAgent.
'''
# Fallback to PyTorch due to failures *external* to forward(),
# typically from initialization
if self._fallback_manager.is_pending():
return self._fallback_manager.fallback(self._original_module, self._debug_options.logging.log_level, *inputs, **kwargs)
try:
# Exporting module to ONNX for the first time
build_graph = self._export_model(*inputs, **kwargs)
if build_graph:
# If model was exported, then initialize the graph builder
self._initialize_graph_builder(training=False)
# Build the inference graph
if build_graph:
self._build_graph()
module_device = _utils.get_device_from_module(
self._original_module)
# The inference session should be created every time
# the graph was built or if the device changed between calls to forward
create_execution_session = build_graph or self._device != module_device
if self._device != module_device:
self._device = module_device
if create_execution_session:
# Create execution session creates the inference_session
self._create_execution_agent()
user_outputs, _ = InferenceManager.execution_session_run_forward(self._execution_agent,
self._onnx_models.optimized_model,
self._device,
*_io._combine_input_buffers_initializers(
self._graph_initializers,
self._graph_info.user_input_names,
self._input_info,
self._flattened_module.named_buffers(),
inputs,
kwargs,
self._device))
return _io.unflatten_user_output(self._module_output_schema,
user_outputs)
except ORTModuleFallbackException as e:
# Exceptions subject to fallback are handled here
self._fallback_manager.handle_exception(exception=e,
log_level=self._debug_options.logging.log_level)
except Exception as e:
# Catch-all FALLBACK_FORCE_TORCH_FORWARD fallback is handled here
self._fallback_manager.handle_exception(exception=e,
log_level=self._debug_options.logging.log_level,
override_policy=_FallbackPolicy.FALLBACK_FORCE_TORCH_FORWARD)
# Fallback to PyTorch due to failures *during* forward(),
# (e.g. export, model/input post-processing, forward, output processing, etc)
if self._fallback_manager.is_pending():
return self._fallback_manager.fallback(self._original_module, self._debug_options.logging.log_level, *inputs, **kwargs)
def _build_graph(self):
"""Build an optimized inference graph using the module_graph_builder"""
super()._build_graph()
if self._debug_options.save_onnx_models.save:
self._onnx_models.save_optimized_model(self._debug_options.save_onnx_models.path,
self._debug_options.save_onnx_models.name_prefix,
self._export_mode)
def _create_execution_agent(self):
"""Creates an InferenceAgent that can run forward graph on an inference model"""
session_options, providers, provider_options = self._get_session_config()
self._execution_agent = InferenceAgent(self._onnx_models.optimized_model.SerializeToString(),
session_options, providers, provider_options)
|
the-stack_0_15782 | # Copyright (C) 2011-2020 Airbus, [email protected]
import sys, os
import logging
log = logging.getLogger("plasmasm")
try:
# Check amoco dependency on OrderedDict
from collections import OrderedDict
del OrderedDict
except ImportError:
log.error('amoco backend needs python 2.7, with OrderedDict')
raise ImportError('amoco backend needs python 2.7, with OrderedDict')
try:
# Check amoco dependency on pyparsing
import pyparsing
del pyparsing
except ImportError:
log.error('amoco backend needs that pyparsing is installed')
raise ImportError('amoco backend needs that pyparsing is installed')
# If amoco is not installed system-wide, it is recommended to install it
# in the parent directory of plasmasm.
basedir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
if basedir == '': basedir = '.'
sys.path.append(basedir+'/amoco')
sys.path.append(basedir+'/crysp')
sys.path.append(basedir+'/grandalf')
import amoco
from amoco.logger import Log
Log.progress = lambda count, total=0, pfx='': None
try:
from amoco.arch.core import type_data_processing, type_control_flow, type_other, type_cpu_state, type_undefined
except ImportError:
log.error("PATH %s", sys.path)
e = 'amoco backend not well installed: %s' % sys.exc_info()[1]
log.error(e)
raise ImportError(e)
from amoco.cas.mapper import mapper
from amoco.arch.x86 import env
from amoco.arch.x86 import cpu_x86 as cpu_amoco
try:
# Newer amoco
from amoco.arch.x86.cpu_x86 import instruction_x86 as instruction
except ImportError:
# Older amoco
from amoco.arch.core import instruction
env.internals['keep_order'] = True
cpu_addrsize = 32
from amoco.arch.x86.formats import default_mnemo_name, default_eqn_parser, \
mnemo_string_rep, \
IA32_Binutils_ATT, IA32_Binutils_Intel, IA32_MacOSX_ATT
#NON_REGRESSION_FOUND = True # Define this variable to avoid raising errors
# Encapsulation of internals
class API_AMOCO(object):
# API to access opname or prefix
def opname(self):
return default_mnemo_name(self.amoco)[-1][1]
opname = property(opname)
def prefix(self):
if self.amoco.misc.get('pfx') is None:
return []
pfx = []
if self.amoco.misc['pfx'][0] is not None:
pfx.append({
'lock': 0xf0,
'repne': 0xf2,
'rep': 0xf3,
}[self.amoco.misc['pfx'][0]])
if self.amoco.misc['pfx'][1] is not None:
assert 'segreg' == self.amoco.misc['pfx'][1]
pfx.append({
env.es: 0x26,
env.cs: 0x2e,
env.ss: 0x36,
env.ds: 0x3e,
env.fs: 0x64,
env.gs: 0x65,
}[self.amoco.misc['segreg']])
if self.amoco.misc['pfx'][2] is not None:
pfx.append({
'opdsz': 0x66,
}[self.amoco.misc['pfx'][2]])
if self.amoco.misc['pfx'][3] is not None:
pfx.append({
'adrsz': 0x67,
}[self.amoco.misc['pfx'][3]])
return pfx
prefix = property(prefix)
#
# API to access the arguments
def api_nb_arg(self):
''' How many arguments for this instruction '''
return len(self.amoco.operands)
def api_arg_txt(self, pos, asm_format=None):
''' Text representation of argument 'pos' '''
if asm_format == 'att_syntax':
from amoco.arch.x86.formats import att_opers
return list(reversed(att_opers(self.amoco)))[pos*2][1]
else:
from amoco.arch.x86.formats import intel_opers
res = intel_opers(self.amoco)[pos*2][1]
if res.startswith('DWORD PTR '): res = res[10:]
return res
def api_get_cst(self, pos):
''' If the argument 'pos' is numeric,
then get its value as an 'int' '''
arg = self.amoco.operands[pos]
if arg._is_cst:
return int(int(arg))
return None
def api_get_imm(self, pos):
''' If the argument 'pos' contains an immediate value / displacement
then get its value as an 'int' '''
arg = self.amoco.operands[pos]
if arg._is_cst:
return int(int(arg))
elif arg._is_mem and arg.a.base._is_cst:
return int(int(arg.a.base))
elif arg._is_mem and not hasattr(arg.a.disp, '_is_cst'):
return int(int(arg.a.disp))
elif arg._is_eqn and arg.op.symbol == '+' and arg.r._is_cst:
return arg.r.value
elif arg._is_eqn and arg.op.symbol == '-' and arg.r._is_cst:
return (-arg.r).value
return None
def api_get_symbol(self, pos):
''' Gets the argument 'pos' in the form of a symbol if it is a label '''
arg = self.amoco.operands[pos]
if arg._is_lab:
return arg.ref
return None
def api_get_label(self, pos):
''' Gets a label if present in the argument 'pos'.
Gets two labels if it is a label difference. '''
arg = self.amoco.operands[pos]
if arg._is_mem and not hasattr(arg.a.disp, '_is_lab'):
label, label_dif, cste = default_eqn_parser(arg.a.base)
return label, label_dif
elif arg._is_mem:
label, label_dif, cste = default_eqn_parser(arg.a.disp)
return label, label_dif
else:
label, label_dif, cste = default_eqn_parser(arg)
return label, label_dif
def api_is_address(self, pos):
''' True if the argument 'pos' is an address '''
arg = self.amoco.operands[pos]
return arg is not None and arg._is_mem
def api_is_arg_size(self, pos, size):
''' True if the argument 'pos' is a size-bit argument '''
arg = self.amoco.operands[pos]
if arg.size != size: return False
return True
def api_is_reg_size(self, pos, size=None):
''' True if the argument 'pos' is a size-bit register '''
arg = self.amoco.operands[pos]
if expr.get_reg(arg) is None: return False
if size is not None and arg.size != size: return False
return True
def api_is_reg_in_arg(self, pos, reg):
''' True if the argument 'pos' contains a reference to a given register '''
arg = self.amoco.operands[pos]
log.debug("(DEBUG:api_is_reg_in_arg) %s %s", arg, reg)
return str(reg) in str(arg)
def api_same_base_reg(self, pos, instr):
''' Checks that arguments at position 'pos' in 'self' and 'instr'
have the same base register (they may have different disp) '''
arg = expr.get_reg(self.amoco.operands[pos].a.base)
return arg is not None and arg == expr.get_reg(instr.amoco.operands[pos].a.base)
def api_set_imm_label(self, pos, value, label=None, label_dif=None):
''' If the argument 'pos' contains an immediate value / displacement
then substract 'value' and add the symbol 'label'.
If the argument is an absolute address, then 'label' should be at
address 'value'; if it is a relative address, then 'label' should
be at 'value' bytes of the current instruction.
'label_dif' is used for Mach-O binaries to represent LOC_DIF
relocations.
If 'label' is None, we only change the immediate.
If 'label' is False, we remove the label. '''
arg = self.amoco.operands[pos]
sym = 0
if label is False:
# Delete label
assert arg._is_mem
if arg.a.base._is_lab:
_, _, cste = default_eqn_parser(arg.a.base)
arg.a.base = expressions.cst(cste, size=cpu_addrsize)
elif not hasattr(arg.a.disp, '_is_lab'):
_, _, cste = default_eqn_parser(arg.a.base)
arg.a.base = expressions.cst(cste, size=cpu_addrsize)
elif arg.a.disp._is_lab:
arg.a.disp = 0
elif arg.a.disp._is_eqn:
_, _, cste = default_eqn_parser(arg.a.disp)
arg.a.disp = cste
else:
NEVER
elif label is not None:
sym = expressions.lab(label, size=cpu_addrsize)
if label_dif is not None:
sym -= expressions.lab(label_dif, size=cpu_addrsize)
if arg._is_cst:
self.amoco.operands[pos] -= value
self.amoco.operands[pos] += sym
elif arg._is_mem and arg.a.base._is_cst:
arg.a.base -= value
arg.a.base += sym
elif arg._is_mem and (arg.a.base._is_reg or arg.a.base._is_eqn):
arg.a.disp -= value
arg.a.disp += sym
if hasattr(arg.a.disp, '_is_cst') and arg.a.disp._is_cst:
arg.a.disp = arg.a.disp.value
else:
NEVER
def reg_from_name(reg):
if reg == 'eflag': reg = 'eflags'
return env.__dict__[reg]
reg_from_name = staticmethod(reg_from_name)
def api_add_reg(self, pos, reg, last=False):
arg = self.amoco.operands[pos]
reg = self.reg_from_name(reg)
assert arg._is_mem
if arg.a.base._is_cst:
arg.a.disp += arg.a.base.value
arg.a.base = reg
elif arg.a.base._is_lab:
arg.a.disp += arg.a.base
arg.a.base = reg
elif arg.a.base._is_eqn \
and not (arg.a.base.l._is_reg and not arg.a.base.l._is_lab) \
and not (arg.a.base.r._is_reg and not arg.a.base.r._is_lab):
# No register in arg.a.base => becomes a displacement
arg.a.disp += arg.a.base
arg.a.base = reg
elif arg.a.base._is_reg or arg.a.base._is_eqn:
# Replace 'reg+reg' with '2*reg'
if arg.a.base._is_eqn and arg.a.base.op.symbol == '+' \
and arg.a.base.l is arg.a.base.r:
arg.a.base = expressions.op('*',
arg.a.base.l,
expressions.cst(2,size=arg.a.base.l.size))
# Force the order of operands
if last: # reg is last
arg.a.base = expressions.op('+', arg.a.base, reg)
else: # reg is first
arg.a.base = expressions.op('+', reg, arg.a.base)
if env.internals.get('keep_order'): arg.a.base.prop |= 16
else:
NEVER
def api_replace_reg(self, src, dst):
''' In all arguments, replace register 'src' with 'dst'. '''
src = self.reg_from_name(src)
dst = self.reg_from_name(dst)
for pos, arg in enumerate(self.amoco.operands):
if arg._is_cst: pass
elif arg._is_eqn: pass
elif arg._is_reg:
if arg is src: self.amoco.operands[pos] = dst
elif arg._is_mem and arg.a.base._is_reg:
if arg.a.base is src: arg.a.base = dst
elif arg._is_mem and arg.a.base._is_eqn and \
arg.a.base.op.symbol == '*' and \
arg.a.base.l._is_reg:
if arg.a.base.l is src: arg.a.base.l = dst
elif arg._is_mem and arg.a.base._is_eqn and \
arg.a.base.op.symbol == '+' and \
arg.a.base.l._is_reg and \
arg.a.base.r._is_reg:
if arg.a.base.l is src: arg.a.base.l = dst
if arg.a.base.r is src: arg.a.base.r = dst
elif arg._is_mem and arg.a.base._is_eqn and \
arg.a.base.op.symbol == '+' and \
arg.a.base.l._is_reg and \
arg.a.base.r._is_eqn and \
arg.a.base.r.op.symbol == '*' and \
arg.a.base.r.l._is_reg:
if arg.a.base.l is src: arg.a.base.l = dst
if arg.a.base.r.l is src: arg.a.base.r.l = dst
else:
log.error("ARG=%s", arg)
NEVER
class StubNone(object):
''' When amoco fails to disassemble the data '''
def __str__(self, asm_format=None): return "NoneASM"
def __init__(self, offset, bytes):
self.length = len(bytes)
self.bytes = bytes
mnemonic = 'NoneASM'
type = None
operands = []
misc = {}
def __call__(self, m):
# Calling a mapper
pass
def clang_bug_test(self):
if self.amoco.mnemonic == 'TEST' \
and self.symbols.meta.get('compiler') == 'clang' \
and self.symbols.meta.get('os_minversion', (0,0,0))[1] < 14 \
and self.api_is_address(0) \
and self.api_is_reg_size(1) \
:
# Clang-LLVM on MacOSX sometimes use Intel argument order
# it is the case for
# Apple LLVM version 6.0 (clang-600.0.54)
# Apple LLVM version 7.0.2 (clang-700.1.81)
# Apple LLVM version 9.0.0 (clang-900.0.39.2)
# not for
# Apple clang version 11.0.0 (clang-1100.0.33.17)
instr = self.amoco.__class__(b"")
instr.mnemonic = self.amoco.mnemonic
instr.operands = list(reversed(self.amoco.operands))
instr.spec = self.amoco.spec
return instr
else:
return self.amoco
def att_bug_fsub_fdiv(instr):
if not instr.mnemonic[:4] in [ 'FSUB', 'FDIV' ]:
return
for _ in instr.operands:
if _._is_mem:
return
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=372528
# The binutils mix fsubp/fdivp with fsubrp/fdivrp
if instr.mnemonic[4:] == 'P':
instr.mnemonic = instr.mnemonic[:4] + 'RP'
elif instr.mnemonic[4:] == 'RP':
instr.mnemonic = instr.mnemonic[:4] + 'P'
elif len(instr.operands) == 2 and str(instr.operands[0]) != 'st0':
if instr.mnemonic[4:] == '':
instr.mnemonic = instr.mnemonic + 'R'
elif instr.mnemonic[4:] == 'R':
instr.mnemonic = instr.mnemonic[:4]
spec_table = {}
for spec in amoco.arch.x86.spec_ia32.ISPECS \
+ amoco.arch.x86.spec_fpu.ISPECS \
+ amoco.arch.x86.spec_sse.ISPECS:
mnemo = spec.iattr.get('mnemonic', None)
if not mnemo in spec_table:
spec_table[mnemo] = [spec]
elif not spec in spec_table[mnemo]:
spec_table[mnemo].append(spec)
del spec
def set_spec(i, spec_table):
log.debug("%s %s", i.mnemonic, [_.size for _ in reversed(i.operands)])
spec_collision = {
'CBW': 'CWDE',
'CWD': 'CDQ',
'IRET': 'IRETD',
'CDQE': 'CWDE',
'CQO': 'CDQ',
'LFENCE': 'XRSTOR',
'MFENCE': 'XSAVEOPT',
'SFENCE': 'CLFLUSH',
'PEXTRQ': 'PEXTRD',
'PINSRQ': 'PINSRD',
'CMPXCHG16B': 'CMPXCHG8B',
}
if i.mnemonic in spec_collision:
spec_list = spec_table[spec_collision[i.mnemonic]]
elif i.mnemonic[:-1].lower() in mnemo_string_rep:
spec_list = spec_table[i.mnemonic[:-1]+'D']
else:
spec_list = spec_table[i.mnemonic]
if len(spec_list) > 1:
log.debug("Many possible spec for %s", i.mnemonic)
for spec in spec_list:
log.debug("... %s", spec.hook)
log.debug(" misc: %s", i.misc)
ispec_idx = 0
if i.mnemonic in ('CALL','JMP'):
if i.operands[0]._is_mem:
ispec_idx = 0
elif i.operands[0]._is_reg and not i.operands[0]._is_lab:
ispec_idx = 0
else:
ispec_idx = 1
if i.mnemonic.lower()[:-1] in mnemo_string_rep:
if not len(i.operands):
ispec_idx = -1
i.spec = spec_list[ispec_idx]
if 'type' in i.spec.iattr:
i.type = i.spec.iattr['type']
else:
i.type = type_data_processing
import re
def replace_names_with_symbols(symbols, args):
for e in args:
for _ in expressions.symbols_of(e):
if _._is_lab:
symbol = _.ref
r = re.match(r'(\d+)([bf])', symbol)
if r:
symbol, direction = r.groups()
idx = symbols.meta['local_labels'][symbol]
if direction == 'f': idx += 1
symbol = '.L%s\02%d'%(symbol,idx)
_.ref = symbols.find_symbol(name = symbol)
from plasmasm.symbols import Line
from plasmasm.compilers import \
switch_detection_x86_update, \
switch_detection_gcc463m32opt, \
switch_detection_gcc346m32opt, \
gcc_label_for_inlined_memcpy
from amoco.arch.x86.parsers import att_syntax
class Instruction(Line, API_AMOCO):
__slots__ = ('section', 'offset', 'bytelen',
'amoco')
CPU = 'I386'
def from_txt(self, txt):
''' text input, in assembly format '''
log.debug("> %s", txt)
if txt.startswith('rep; ret'): txt = 'rep ret'
instr = att_syntax.instr.parseString(txt, True)[0]
att_bug_fsub_fdiv(instr)
set_spec(instr, spec_table)
replace_names_with_symbols(self.symbols, instr.operands)
self.amoco = instr
return self
def from_bin(self, in_str, section):
''' binary input, in assembly format '''
self.section = section
self.offset = in_str.offset
from plasmasm.parse_bin import endofsection_address
end_of_section = endofsection_address(self.symbols, section)
end_of_instr = in_str.offset+cpu_amoco.disassemble.maxlen
if end_of_instr > end_of_section:
end_of_instr = end_of_section
instr = cpu_amoco.disassemble(in_str[self.offset:end_of_instr])
if instr is None:
instr = StubNone(self.offset, in_str[self.offset:self.offset+1])
self.bytelen = instr.length
in_str.offset = self.offset + self.bytelen
self.amoco = instr
return self
def pack(self):
''' binary representation '''
return self.amoco.bytes # Only if unchanged
def txt(self, asm_format=None):
''' text output, to be used by an assembler '''
if asm_format is not None:
asm_format_orig = self.asm_format
self.set_asm_format(asm_format)
if self.asm_format == 'raw' and str(self.amoco) == 'nop ':
txt = 'nop [%r]' % self.amoco.bytes
elif self.asm_format == 'raw':
txt = '%s [%s]' % (self.amoco, self.amoco.spec.hook.__name__)
else:
txt = str(clang_bug_test(self))
if asm_format is not None:
self.set_asm_format(asm_format_orig)
return txt
def labels(self):
''' labels that are referenced in the line '''
res = set()
for arg in self.amoco.operands:
if arg._is_lab:
res.add(arg)
if arg._is_eqn and arg.l._is_lab:
res.add(arg.l)
if arg._is_eqn and arg.r._is_lab:
res.add(arg.r)
if arg._is_mem and hasattr(arg.a.disp, '_is_lab') and arg.a.disp._is_lab:
res.add(arg.a.disp)
if arg._is_mem and arg.a.base._is_lab:
res.add(arg.a.base)
if arg._is_mem and arg.a.base._is_eqn and arg.a.base.l._is_lab:
res.add(arg.a.base.l)
if arg._is_mem and arg.a.base._is_eqn and arg.a.base.r._is_lab:
res.add(arg.a.base.r)
return set([_.ref for _ in res if hasattr(_.ref, 'name')])
def set_asm_format(self, asm_format):
if asm_format is None or asm_format.startswith('att_syntax'):
if asm_format == 'att_syntax clang':
instruction.set_formatter(IA32_MacOSX_ATT)
else:
instruction.set_formatter(IA32_Binutils_ATT)
# AT&T syntax is buggy, and depends on whether it is used by
# binutils or clang, cf. att_bug_fsub_fdiv
elif asm_format.startswith('intel_syntax'):
instruction.set_formatter(IA32_Binutils_Intel)
# Intel syntax is ambiguous, e.g. call eax
# when there is a global variable eax
self.asm_format = asm_format
set_asm_format = classmethod(set_asm_format)
asm_format = None
def _create_reloc(self, a):
''' needed to be able to pack an instruction '''
TODO
def _extract_symbols(self, a):
# Parsing the argument 'a', find if there is a relocation
# to be made, extract the symbols
# Output: relocation type (None/False/True), label(s)
TODO
def list_relocs(self):
''' needed to create a relocatable ELF '''
TODO
# Methods for binary parser
def create_label_imm(self):
''' Replace immediate values that may be labels '''
from plasmasm.parse_bin import label_for_address
if switch_detection_x86_update(self):
return
address = switch_detection_gcc463m32opt(self)
if address is not None:
section = self.symbols.get_sectionname(address)
label = self.symbols.find_symbol(section = section, address = address)
log.debug("... TABLE(imm) %r", label)
self.api_set_imm_label(1, address, label)
return
for idx in range(self.api_nb_arg()):
value = self.api_get_imm(idx)
label = label_for_address(self.symbols, value)
if label is not None:
assert label.address == value
self.api_set_imm_label(idx, value, label)
gcc_label_for_inlined_memcpy(self)
def create_label_rel(self):
''' Replace relative addresses for call/jmp/jcc '''
if self.opname == 'call' or self.opname.startswith('j'):
idx = 0
value = self.api_get_cst(idx)
else:
return
if value is None:
return
props = { 'address': (self.offset+self.bytelen+value)%(1<<cpu_addrsize),
'section': self.section }
label_imm = self.symbols.find_symbol(**props)
if label_imm is None:
NON_REGRESSION_FOUND
return
if label_imm.is_symbol() and self.bytelen < 5:
# If the argument is not 4 bytes long, create a new label
# and keep the same stack; if we don't do this, then instead
# of generating a relative jump, the assembler will generate
# a jump with relocation; it is the same semantics, but breaks
# non-regression tests asking that the generated .o is the same
# as the original one
# Non-regression: jcmarker.o from libjpeg-6b / gcc 4.6.3
old_stack = label_imm.stack
props['name'] = self.symbols.new_name(**props)
label_imm = self.symbols.find_symbol(**props)
label_imm.stack = old_stack
self.api_set_imm_label(idx, value, label_imm)
def apply_reloc(self, pos, reloc):
''' 'reloc' is a relocation at offset 'pos'
This function modifies the argument impacted by the relocation '''
# Step 1: find which arg is impacted
pos -= self.offset
b, = struct.unpack("B", self.amoco.bytes[pos:pos+1])
b = struct.pack("B", (1+b)%256)
o = cpu_amoco.disassemble(self.amoco.bytes)
patched = self.amoco.bytes[:pos] + b + self.amoco.bytes[pos+1:]
p = cpu_amoco.disassemble(patched)
if o is None or p is None or o.mnemonic != p.mnemonic:
log.error("Relocation changes instruction! %s => %s", o, p)
log.error(" at offset %r with reloc %r", pos, reloc)
log.error(" for '%s' at %s, address=%s",
self, self.section, self.offset)
return
# To find if an argument has changed, we compute the difference
# and test if it is non-zero
argpos = None
for idx, (oa, na) in enumerate(zip(o.operands, p.operands)):
try:
d = na - oa
except ValueError:
log.error("Invalid relocation effect")
log.error(" incompatible sizes %s %s", na, oa)
log.error(" reloc %r for '%s'", reloc, self)
return
if d._is_cst and int(d) == 0:
# Not changed
continue
if argpos is not None:
log.error("Relocation touches many arguments")
log.error(" reloc %r for '%s'", reloc, self)
return
argpos = idx
if argpos is None:
log.error("Relocation touches no argument")
log.error(" reloc %r for '%s'", reloc, self)
log.error("ARGPOS %s", argpos)
return
# Step 2: modify the argument by using the reloc data
address = switch_detection_gcc463m32opt(self)
if address is None:
address = switch_detection_gcc346m32opt(self)
if self.amoco.operands[argpos]._is_cst:
offset = int(self.amoco.operands[argpos])
if offset >= (1<<(cpu_addrsize-1)):
offset -= 1<<cpu_addrsize # Signed
self.amoco.operands[argpos] -= offset
elif self.amoco.operands[argpos]._is_mem:
base = self.amoco.operands[argpos].a.base
if base._is_cst:
offset = int(base)
self.amoco.operands[argpos].a.base -= offset
else:
if base._is_eqn and base.op.symbol == '+':
pass
# We may want to extract the constant from an operation
# (reg+imm), but normally it is stored as (base+disp)
offset = self.amoco.operands[argpos].a.disp
self.amoco.operands[argpos].a.disp -= offset
else:
log.error("Arg of type %s", self.amoco.operands[argpos].__class__)
return
if address is None:
from plasmasm.get_symbols import analyze_reloc
label, label_dif, offset, size = analyze_reloc(self,
reloc, offset, pos, self.bytelen)
else:
# Special case: offset to a switch table
r_type, data = reloc
# Some coherency checks
from elfesteem import elf, pe
if r_type == ('ELF', elf.EM_386, elf.R_386_32):
assert data['section'] == '.rodata'
elif r_type == ('COFF', pe.IMAGE_FILE_MACHINE_I386,
pe.IMAGE_REL_I386_DIR32):
assert data['section'] == '.rdata'
else:
log.error("Unknown reloc type: %s", reloc)
log.error("for: %s", self)
return
label = self.symbols.find_symbol(
section=data['section'], address=address)
label_dif = None
offset -= address
size = cpu_addrsize
log.debug("... TABLE(rel) %r", label)
self.dst = [[label]]
ext_label = expressions.lab(label, size=size)
if label_dif is not None:
ext_label -= expressions.lab(label_dif, size=size)
if offset != 0:
ext_label = ext_label + offset
if self.amoco.operands[argpos]._is_cst:
self.amoco.operands[argpos] += ext_label
elif self.amoco.operands[argpos]._is_mem and self.amoco.operands[argpos].a.base._is_cst:
self.amoco.operands[argpos].a.base += ext_label
elif self.amoco.operands[argpos]._is_mem:
self.amoco.operands[argpos].a.disp += ext_label
else:
NEVER
#if self.amoco.operands[argpos]._is_lab and \
# self.opname in [ 'call', 'jmp' ]:
# self.amoco.misc['dst'] = label
class InstructionCFG(Instruction):
__slots__ = ('flow', 'dst')
def _set_flow(self):
if self.opname == 'call': self.flow = 'sub'
elif self.opname == 'ret': self.flow = 'ret'
elif self.opname == 'retn': self.flow = 'ret'
elif self.opname == 'ud2': self.flow = 'ret'
elif self.opname == 'jmp': self.flow = 'jmp'
elif self.opname.startswith('j'): self.flow = 'jcc'
elif self.opname == 'loop': TODO
elif self.opname == 'iret': TODO
elif self.opname == 'int': TODO
else: self.flow = None
def _set_dst(self):
if hasattr(self, 'dst'):
# Already set by switch detection
return
if self.flow is None:
self.dst = []
elif self.flow == 'ret':
self.dst = [None]
elif self.flow in [ 'sub', 'jmp', 'jcc' ]:
self.dst = [ self.api_get_symbol(0) ]
else:
raise ValueError("Flow %s unknown"%self.flow)
if self.flow == 'sub' and len(self.dst) == 1 \
and hasattr(self, 'offset') \
and getattr(self.dst[0], 'address', None) == self.offset+self.bytelen:
# Detection of clang or gcc 3.x computation of GOT offset
# "call Ln" and "Ln: pop reg" and "add GOT"
self.flow = 'PIC'
def evaluate_lines(self, lines, in_str):
return evaluate_lines(self, lines, in_str)
def get_touched(e, indirect=False):
# If indirect==True, registers read to determine addresses in e
# If indirect==False, other registers read/written when e is read/written
t = set()
if e._is_def == 0: # top
# some flags may have undetermined values, e.g. for sar edx, 31
# some semantics are not implemented, e.g. shld edi, ebx, cl
pass
elif e._is_slc:
t.update(get_touched(e.x, indirect))
elif e._is_cmp:
for s in e.parts:
t.update(get_touched(e.parts[s], indirect))
elif e._is_cst:
pass
elif e._is_lab:
pass
elif e._is_reg:
if not indirect:
t.update([e])
elif e._is_mem:
t.update(get_touched(e.a, indirect))
elif e._is_ptr:
if not indirect:
t.update(['MEM'])
else:
t.update(get_touched(e.base, False))
elif e._is_tst:
t.update(get_touched(e.tst, False))
t.update(get_touched(e.l, indirect))
t.update(get_touched(e.r, indirect))
elif e._is_eqn:
if e.l is not None:
t.update(get_touched(e.l, indirect))
if e.r is not None:
t.update(get_touched(e.r, indirect))
else:
raise ValueError("in get_touched %s %s"%(type(e),e))
return t
def get_rw(m):
r = set()
w = set()
for dst, src in m:
w.update(get_touched(dst, False))
r.update(get_touched(src, False))
r.update(get_touched(dst, True))
r.update(get_touched(src, True))
return r, w
def is_mmx(line, env):
if line.opname.startswith('cvt'):
return True
for reg in env.mmregs + env.xmmregs: # Loop, and use 'is', because
for arg in line.amoco.operands: # membership test with 'in' uses
if arg is reg: return True # '==' which is redefined and buggy
return False
def add_semantics_missing(line, r, w, env, get_touched):
# Some bugs of amoco emulation; we modify r and w
reg_flags = list(get_touched(env.cf))[0] # eflags/rflags in 32/64-bit mode
# flags are not read, for these instructions
if line.opname in ('cmp', 'test', 'inc', 'dec', 'add', 'sub', 'mul', 'imul',
'neg', 'and', 'or', 'bsf', 'bsr',
'aaa', 'aad', 'aam', 'aas', 'daa', 'das',
'bt'):
r.remove(reg_flags)
if line.opname in ('rol', 'ror'):
r.discard(reg_flags)
# No semantics for div in amoco
if line.opname in ('div', 'idiv'):
r.update(get_touched(env.eax))
w.update(get_touched(env.eax))
arg = line.amoco.operands[0]
if not (arg._is_slc and arg.size == 8):
# not 8-bit
r.update(get_touched(env.edx))
w.update(get_touched(env.edx))
r.update(get_touched(arg, False))
r.update(get_touched(arg, True))
# Incomplete semantics
if line.opname == 'bt':
for arg in line.amoco.operands:
r.update(get_touched(arg, False))
r.update(get_touched(arg, True))
if line.opname in ('shld', 'shrd') and line.amoco.operands[2]._is_slc:
dst = line.amoco.operands[0]
src = line.amoco.operands[1]
w.update(get_touched(dst, False))
r.update(get_touched(dst, False))
r.update(get_touched(src, False))
r.update(get_touched(dst, True))
r.update(get_touched(src, True))
r.update(get_touched(env.ecx))
if line.opname in ('ldmxcsr', 'stmxcsr', 'xsave', 'xrstor', 'xsaveopt',
'clflush', 'lfence', 'mfence', 'sfence'):
# reads or writes registers that are not in amoco's model,
# e.g. Processor Extended States
pass
# No semantics for fpu operations in amoco
if line.opname.startswith('f'):
fpu_s = env.fpu_status
fpu_c = env.fpu_control
# NB: we don't include in the following table the modification of C1
# when there is a FPU stack overflow, because it depends on the value
# of other status flags
fpu_table = { # stack, read, written
(2,'fcomi'): (0, (env.st(0),1), (reg_flags,)),
(1,'fcomi'): (0, (env.st(0),0), (reg_flags,)),
(2,'fcomip'): (1, (env.st(0),1), (reg_flags,)),
(1,'fcomip'): (1, (env.st(0),0), (reg_flags,)),
(2,'fucomi'): (0, (env.st(0),1), (reg_flags,)),
(1,'fucomi'): (0, (env.st(0),0), (reg_flags,)),
(2,'fucomip'): (1, (env.st(0),1), (reg_flags,)),
(1,'fucomip'): (1, (env.st(0),0), (reg_flags,)),
(1,'fcom'): (0, (env.st(0),0), (fpu_s,)),
(1,'fcomp'): (0, (env.st(0),0), (fpu_s,)),
(0,'fcompp'): (2, (env.st(0),env.st(1)), (fpu_s,)),
(1,'fucom'): (0, (env.st(0),0), (fpu_s,)),
(1,'fucomp'): (1, (env.st(0),0), (fpu_s,)),
(0,'fucompp'): (2, (env.st(0),env.st(1)), (fpu_s,)),
(0,'fldz'): (0, (fpu_c,), (env.st(0),)),
(0,'fld1'): (0, (fpu_c,), (env.st(0),)),
(0,'fldl2t'): (0, (fpu_c,), (env.st(0),)),
(0,'fldl2e'): (0, (fpu_c,), (env.st(0),)),
(0,'fldpi'): (0, (fpu_c,), (env.st(0),)),
(0,'fldlg2'): (0, (fpu_c,), (env.st(0),)),
(0,'fldln2'): (0, (fpu_c,), (env.st(0),)),
(0,'fxam'): (0, (env.st(0),), (fpu_s,)),
(0,'fabs'): (0, (env.st(0),), (env.st(0),)),
(0,'frndint'): (0, (env.st(0),fpu_c), (env.st(0),)),
(0,'fsqrt'): (0, (env.st(0),), (env.st(0),)),
(0,'fchs'): (0, (env.st(0),), (env.st(0),)),
(0,'fptan'): (-1, (env.st(0),), (env.st(0),)),
(0,'fpatan'): (1, (env.st(0),), (env.st(0),)),
(0,'fprem'): (0, (env.st(0),env.st(1)), (env.st(0),fpu_s)),
(0,'fprem1'): (0, (env.st(0),env.st(1)), (env.st(0),fpu_s)),
(1,'fld'): (-1, (0,), (env.st(0),)),
(1,'fild'): (-1, (0,), (env.st(0),)),
(1,'fst'): (0, (env.st(0),fpu_c), (fpu_s,0)),
(1,'fstp'): (1, (env.st(0),fpu_c), (fpu_s,0)),
(1,'fist'): (0, (env.st(0),fpu_c), (fpu_s,0)),
(1,'fistp'): (1, (env.st(0),fpu_c), (fpu_s,0)),
(1,'fisttp'): (1, (env.st(0),fpu_c), (fpu_s,0)),
(1,'fxch'): (0, (env.st(0),0), (env.st(0),0)),
(1,'fiadd'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fisub'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fisubr'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fimul'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fidiv'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fidivr'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fadd'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fsub'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fsubr'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fmul'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fdiv'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(1,'fdivr'): (0, (env.st(0),0), (fpu_s,env.st(0),)),
(2,'fadd'): (0, (0,1), (fpu_s,0,)),
(2,'fsub'): (0, (0,1), (fpu_s,0,)),
(2,'fsubr'): (0, (0,1), (fpu_s,0,)),
(2,'fmul'): (0, (0,1), (fpu_s,0,)),
(2,'fdiv'): (0, (0,1), (fpu_s,0,)),
(2,'fdivr'): (0, (0,1), (fpu_s,0,)),
(2,'faddp'): (1, (0,1), (fpu_s,0,)),
(2,'fsubp'): (1, (0,1), (fpu_s,0,)),
(2,'fsubrp'): (1, (0,1), (fpu_s,0,)),
(2,'fmulp'): (1, (0,1), (fpu_s,0,)),
(2,'fdivp'): (1, (0,1), (fpu_s,0,)),
(2,'fdivrp'): (1, (0,1), (fpu_s,0,)),
(1,'fbstp'): (0, (env.st(0),fpu_c), (0,)),
(1,'fldcw'): (0, (0,), (fpu_c,)),
(1,'fnstcw'): (0, (fpu_c,), (0,)),
(1,'fnstsw'): (0, (fpu_s,), (0,)),
(0,'fsave'): (0, (fpu_c,fpu_s), ()),
(0,'fnsave'): (0, (fpu_c,fpu_s), ()),
(1,'fstenv'): (0, (fpu_c,fpu_s), (0,)),
(1,'fnstenv'): (0, (fpu_c,fpu_s), (0,)),
(0,'finit'): (0, (), (fpu_c,fpu_s)),
(0,'frstor'): (0, (), (fpu_c,fpu_s)),
(0,'fnclex'): (0, (), (fpu_s,)),
(1,'fxsave'): (0, (fpu_s,), ()),
(1,'fxrstor'): (0, (), (fpu_s,)),
}
try:
key = (len(line.amoco.operands), line.opname)
stack_pop, reg_r, reg_w = fpu_table[key]
except KeyError:
if line.opname.startswith('fcmov'):
stack_pop, reg_r, reg_w = 0, (reg_flags,1), (fpu_s,env.st(0),)
else:
stack_pop, reg_r, reg_w = 0, (), ()
log.error("fpu_table: %r missing",key)
if stack_pop == -1: # push on FPU stack
r.update([env.st(_) for _ in range(7)])
w.update([env.st(1+_) for _ in range(7)])
elif stack_pop == 1: # pop on FPU stack
# bug for faddp %st(7) and similar: because of stack_pop
# the register %st(6) is written instead of %st(7)
r.update([env.st(1+_) for _ in range(7)])
w.update([env.st(_) for _ in range(7)])
elif stack_pop == 2: # pop twice on FPU stack
r.update([env.st(2+_) for _ in range(6)])
w.update([env.st(_) for _ in range(6)])
for reg in reg_r:
if isinstance(reg, int):
r.update(get_touched(line.amoco.operands[reg],False))
r.update(get_touched(line.amoco.operands[reg],True))
else: r.add(reg)
for reg in reg_w:
if isinstance(reg, int):
w.update(get_touched(line.amoco.operands[reg],False))
r.update(get_touched(line.amoco.operands[reg],True))
else: w.add(reg)
# No semantics for MMX/SSE operations in amoco
if is_mmx(line, env):
dst = line.amoco.operands[0]
src = line.amoco.operands[1]
w.update(get_touched(dst, False))
r.update(get_touched(dst, False)) # Not for all MMX operations
r.update(get_touched(src, False))
r.update(get_touched(dst, True))
r.update(get_touched(src, True))
if line.opname.startswith('ucomi'): w.add(reg_flags)
elif 0xF2 in line.prefix or (0xF3 in line.prefix and line.opname != 'ret'):
# True rep/repz/repnz
r.update(get_touched(env.ecx))
w.update(get_touched(env.ecx))
class InstructionRW(InstructionCFG):
__slots__ = ('rw',)
def _set_rw(self):
m = mapper()
self.amoco(m)
r, w = get_rw(m)
add_semantics_missing(self, r, w, env, get_touched)
self.rw = r, w
def reg_name(r):
return str(r)
reg_name = staticmethod(reg_name)
class InstructionDEAD(InstructionRW):
__slots__ = ('pic', 'stack', 'dead', 'immutable')
from amoco.cas import expressions
def evaluate_lines(instr, lines, in_str):
# Run the emulation of the basic bloc
machine = mapper()
def print_machine(machine):
return sorted(str(machine).split("\n"))
for line in lines:
# eip is the next instruction: basic bloc may have been merged,
# but conditional jumps are not taken
machine[env.eip] = env.cst(line.offset,cpu_addrsize)
try:
line.amoco(machine)
except NotImplementedError:
return (('Not implemented', line, print_machine(machine)), [None])
except NameError:
return (('Cannot be emulated (name)', line, print_machine(machine)), [None])
except amoco.arch.core.InstructionError:
return (('Cannot be emulated', line, print_machine(machine)), [None])
except TypeError:
return (('Not callable', line, print_machine(machine)), [None])
if line.opname == 'call':
# amoco emulation pushes eip+i.length
# we prefer to push the label of the next basic bloc
label = instr.symbols.find_symbol(
section=line.section,
address=line.offset+line.amoco.length)
machine[env.mem(env.esp,cpu_addrsize)] = expressions.lab(label, size=cpu_addrsize)
retval = machine[env.eip]
msg, val = evaluate(retval,
machine, instr.symbols.find_symbols, instr, in_str)
if val is None:
return ((str(retval.__class__), retval, print_machine(machine)), [None])
elif val == [None]:
return ((msg, retval, print_machine(machine)), [None])
else:
return (msg, val)
# Interface for expressions
class expr(object):
def get_cst(e):
NON_REGRESSION_FOUND
if e is not None and e._is_cst:
return int(e)
get_cst = staticmethod(get_cst)
def get_lab(e):
if e is not None and e._is_lab:
return e.ref
get_lab = staticmethod(get_lab)
def get_lab_imm(e):
if e is not None and e._is_cst:
return None, int(e)
if e is not None and e._is_lab:
return e.ref, 0
if e is not None and e._is_eqn and e.op.symbol == '+' \
and e.l._is_lab \
and e.r._is_cst:
return e.l.ref, int(e.r)
return None, None
get_lab_imm = staticmethod(get_lab_imm)
def get_reg(e):
if e is not None and e._is_reg and not e._is_lab:
return e.ref
get_reg = staticmethod(get_reg)
def get_mem(e):
if e is None:
return None
if not e._is_mem:
return None
return e.a.base+e.a.disp
get_mem = staticmethod(get_mem)
def get_eqn(e):
NON_REGRESSION_FOUND
if e is not None and e._is_eqn:
return True
get_eqn = staticmethod(get_eqn)
def get_tst(e):
if e is not None and e._is_tst:
return e.l, e.r
get_tst = staticmethod(get_tst)
def evaluate(address, machine, find, instr, in_str):
# Generates a list of labels, each label being a possible value
# for the expression 'address'
log.debug("EVALUATE %s\n\t%s", address.__class__.__name__, address)
address = remove_got(address, instr.symbols)
v = expr.get_reg(address)
if v is not None: return 'REG', [ None ]
v = expr.get_lab(address)
if v is not None: return 'ID', [ v ]
v = expr.get_mem(address)
if v is not None:
# Lookup at some address
return evaluate_mem(v, machine, find, instr, in_str)
v = test_clang_switch_array(address)
if v is not None:
L1, L2 = v
L1 = expr.get_lab(L1)
if not hasattr(L1, 'lines'):
# Switch table needs to be parsed later
# Switch table already detected by pattern matching in compilers.py
NON_REGRESSION_FOUND
log.debug("Parse switch table later %r", L1)
pic_base, ptr_size, tbl_size = L1.switch_table
assert ptr_size == 4
assert L2 == "-%s"%pic_base
return 'SWITCH', 'TABLE'
msg = 'ARRAY'
lines = [ _.value[0] for _ in L1.lines ]
table = []
for s in lines:
if not hasattr(s, 'name'):
s = False
elif not s.name.endswith(L2):
msg = 'INCOHERENT'
continue
else:
s = find(name = s.name[:-len(L2)])[0]
if not s in table: table.append(s)
if not hasattr(L1, 'size'):
# Switch table not complete
if not None in table: table.append(None)
return msg, table
v = expr.get_tst(address)
if v is not None:
msg_l, res_l = evaluate(v[0], machine, find, instr, in_str)
msg_r, res_r = evaluate(v[1], machine, find, instr, in_str)
if res_l is None or res_r is None:
return None, None
return "%s+%s"%(msg_l,msg_r), res_l+res_r
log.debug("Need better analysis of %s:%s", address.__class__.__name__, address)
return None, None
def evaluate_mem(address, machine, find, instr, in_str):
log.debug("EVALUATE_MEM %s\n\t%s", address.__class__.__name__, address)
v = expr.get_reg(address)
if v is not None:
return 'MEM_REG', [ None ]
table, offset = expr.get_lab_imm(address)
if offset is not None:
msg, val = deref_table(table, offset, instr, in_str)
if val is not None:
return msg, val
v = expr.get_mem(address)
if v is not None:
return 'MEM_MEM', [ None ]
return array_detection(address, machine, find, instr, in_str)
def array_detection(input, machine, find, instr, in_str):
log.debug("ARRAY_DETECT %s\n\t%s", input.__class__.__name__, input)
dst_lst = []
# Is it an element of an array?
# Find the multiplication, replace it by 'index_in_array'
index_var = env.ext('index_in_array',size=cpu_addrsize)
item_len = 0
if input.op.symbol == '+' and input.l._is_eqn:
if input.l.op.symbol == '+' and input.l.l._is_eqn and \
input.l.l.op.symbol == '*' and input.l.l.r._is_cst:
item_len = int(input.l.l.r)
input.l.l = index_var
elif input.l.op.symbol == '+' and input.l.r._is_eqn and \
input.l.r.op.symbol == '*' and input.l.r.r._is_cst:
item_len = int(input.l.r.r)
input.l.r = index_var
elif input.l.op.symbol == '*' and input.l.r._is_cst:
item_len = int(input.l.r)
input.l = index_var
elif input.l.op.symbol == '<<':
item_len = 1 << int(input.l.r)
input.l = index_var
elif input.op.symbol == '+' and input.r._is_eqn:
if input.r.op.symbol == '*' and input.r.r._is_cst:
item_len = int(input.r.r)
if input.r.l._is_ptr and input.r.l.disp == 0 and \
input.r.l.base._is_eqn and input.r.l.base.op.symbol == '+' and \
input.r.l.base.r._is_eqn and input.r.l.base.r.op.symbol == '*' \
and input.r.l.base.r.r._is_cst \
and input.r.l.base.l == input.r.l.base.r.l:
item_len *= 1 + int(input.r.l.base.r.r)
input.r = index_var
elif input.r.op.symbol == '<<':
NON_REGRESSION_FOUND
item_len = 1 << int(input.r.r)
input.r = index_var
if item_len == 0:
msg = 'MEM_EXP - NOT AN ARRAY'
return msg, [None]
log.debug(" ARRAY of %d-byte items", item_len)
# Usually 4-byte items
# Can be 8-byte items e.g. for ceval.o from python2.4.5 / gcc 4.6.3
# Can be 12-byte items e.g. for deflate.o from zlib 1.2.8 / gcc 4.6.3
invalid_indexes = 0
index_in_array = -item_len
while invalid_indexes < 4:
index_in_array += item_len
m2 = mapper()
m2[index_var] = env.cst(index_in_array, size=cpu_addrsize)
address_in_array = input.eval(m2)
log.debug(" x[%d] at %s:%s",
index_in_array//item_len,
address_in_array.__class__.__name__,
address_in_array)
msg, val = 'NOT FOUND', None
table, offset = expr.get_lab_imm(address_in_array)
if val is None and offset is not None:
msg, val = deref_table(table, offset, instr, in_str)
if val is None:
mapper.assume_no_aliasing = True
offset = machine.M(env.mem(address_in_array))
mapper.assume_no_aliasing = False
offset = remove_got(offset, instr.symbols)
v = expr.get_lab(offset)
if v:
msg, val = 'MEM', [ v ]
table, offset = expr.get_lab_imm(expr.get_mem(offset))
if offset is not None:
msg, val = deref_table(table, offset, instr, in_str)
if val == 'TABLE':
return msg, val
if val in (None, [None]):
log.debug(" ----> %s", msg)
invalid_indexes += 1
continue
for label in val:
if label.name.endswith('@GOTOFF'):
# to make this work also with executables, we will need to
# change our API and get the offset value that will have to
# be substracted; removing @GOTOFF is not enough!
label = find(name = label.name[:-7])[0]
log.debug(" => %s", label)
if not label in dst_lst:
dst_lst.append(label)
if dst_lst == []:
return 'MEM_EXP', [None]
return 'ARRAY', dst_lst
def deref_table(table, offset, instr, in_str):
pool = instr.symbols
log.debug("DEREF %s at %s", table, offset)
if table is None:
return deref_address(offset, pool, in_str)
if getattr(table, 'section', None) in ['.got.plt', '.got', '.idata']:
assert offset == 0
return 'GOT_PLT', [ table ]
if not hasattr(table, 'lines'):
# 'table' has not been parsed; will be later
return 'MEM_TABLE %s not parsed (offset %d)' % (table, offset), 'TABLE'
if offset < table.bytelen:
# Offset in a table
sz = 0
for line in table.lines:
if sz == offset: break
sz += line.bytelen
else:
line = None
if getattr(line, 'type', None) == 'long' and \
hasattr(line.value[0], 'name'):
label = line.value[0]
if label.name.startswith('_GLOBAL_OFFSET_TABLE_+[.-'):
label = label.reference
return 'MEM_ID', [ label ]
else:
return 'MEM_TABLE %s[%d]=%s' % (table, offset, line), [ None ]
if not hasattr(table, 'address'):
# Non-regression: gp.o from pari-2.5.5 / gcc 4.6.3
NON_REGRESSION_FOUND
return 'MEM_LAB_IMM %r offset=%s' % (table, offset), [ None ]
return deref_address(table.address + offset, pool, in_str)
import struct
def deref_address(offset, pool, in_str):
log.debug("DEREF_ADDRESS %#x", offset)
if offset == 0:
# Non-regression: cjpeg.o from libjpeg-6b / gcc 3.2.3
# relocated value
NON_REGRESSION_FOUND
return 'NULL', [ None ]
# Read from file (mapped in memory)
# Should not happen, the data sections should have been parsed and
# labels should have been created
# However, compilers sometimes generate (idx*4)+(label-4) rather than
# ((idx-1)*4)+label, and therefore 'label' is hidden
section = pool.get_sectionname(offset)
if section in [".data"]:
address = struct.unpack("I", in_str[offset:offset+4])[0]
a_section = pool.get_sectionname(address)
if a_section in [".text", ".plt"]:
label_list = pool.find_symbols(address = address)
if len(label_list): return 'MEM_VAL', label_list
if section in [".got"]:
label = pool.find_symbols(address = offset)
if label == []:
NON_REGRESSION_FOUND
return 'MEM_LAB_IMM %r address=%s' % (table, offset), [ None ]
if label[0].name.startswith('.rel.dyn.'):
label = pool.find_symbol(name = label[0].name[9:])
else:
label = label[0]
NON_REGRESSION_FOUND
return 'MEM_INT GOT', [ label ]
if section in [".idata"]:
NON_REGRESSION_FOUND
label = pool.find_symbol(address = offset)
if label.name.startswith('msvcrt.dll'):
return 'MSVCRT', [ label ]
return 'MEM_INT', [ label ]
return 'NOT IN TABLE [%s:%#x]' % (section, offset), None
def remove_got(address, pool):
if '@GOT' in str(address):
# When the expression contains @GOT or @GOTOFF, one should cancel
# the PIC offset
# This trick works only for relocatable objects :-(
v = remove_pic_offset(address, pool)
if v is not None:
log.debug("REMOVE GOT => %s", v)
return v
return address
def remove_pic_offset(e, pool):
log.debug("DETECT PIC FROM %s:%s", e.__class__.__name__, e)
if e._is_tst:
label_l = remove_pic_offset(e.l, pool)
label_r = remove_pic_offset(e.r, pool)
if label_l is None or label_r is None:
return None
return env.tst(e.tst, label_l, label_r)
# M32[M32[M32[PIC_OFFSET+toto@GOT]]+cte]
# => M32[M32[toto]+cte]
if e._is_mem \
and e.a.base._is_mem \
and e.a.base.a.disp == 0 \
and e.a.base.a.base._is_mem:
label = remove_pic_offset(e.a.base.a.base, pool)
if label is None:
return None
return env.mem(env.mem(label), disp=e.a.disp)
# M32[M32[PIC_OFFSET+toto@GOT]+cte]
# => M32[toto+cte]
if e._is_mem and e.a.base._is_mem:
label = remove_pic_offset(e.a.base, pool)
if label is None:
return None
return env.mem(label, disp=e.a.disp)
# M32[M32[PIC_OFFSET+toto@GOT]+formula]
# => M32[toto+formula]
if e._is_mem \
and e.a.base._is_eqn \
and e.a.base.op.symbol == '+' \
and e.a.base.l._is_mem:
label = remove_pic_offset(e.a.base.l, pool)
if label is None: return
return env.mem(label+e.a.base.r, disp=e.a.disp)
if e._is_mem and not hasattr(e.a.disp, '_is_lab'):
log.debug("BASE %s; DISP %s; TODO", e.a.base, e.a.disp)
return None
# M32[PIC_OFFSET+toto@GOT]
# => toto
if e._is_mem \
and e.a.disp._is_lab \
and e.a.disp.ref.name.endswith('@GOT'):
label_name = e.a.disp.ref.name[:-4]
pic_data = e.a.base
if not check_pic_data(pic_data):
NON_REGRESSION_FOUND
log.debug("PIC OFFSET [%s] LABEL %s", pic_data, label_name)
return None
return env.lab(pool.find_symbol(name = label_name), size=cpu_addrsize)
# M32[PIC_OFFSET+toto@GOTOFF]
# => M32[toto]
if e._is_mem \
and e.a.disp._is_lab \
and e.a.disp.ref.name.endswith('@GOTOFF'):
label = remove_pic_offset(e.a, pool)
if label is None: return
# Not sound: usually is a reference to somewhere in a data section
# that may change at runtime
return env.mem(label)
# (M32[(INDEX_IN_TABLE+PIC_OFFSET)+toto@GOTOFF]+PIC_OFFSET)
if e._is_ptr \
and e.disp == 0:
return remove_pic_offset(e.base, pool)
# (PIC_OFFSET+toto@GOTOFF)
# => toto
if e._is_ptr \
and e.disp._is_lab \
and e.disp.ref.name.endswith('@GOTOFF'):
label_name = e.disp.ref.name[:-7]
pic_data = e.base
if not check_pic_data(pic_data):
log.debug("PIC OFFSET [%s] LABEL %s", pic_data, label_name)
return None
return env.lab(pool.find_symbol(name = label_name), size=cpu_addrsize)
# (PIC_OFFSET+M32[(INDEX_IN_TABLE+PIC_OFFSET)+toto@GOTOFF])
# (M32[(INDEX_IN_TABLE+PIC_OFFSET)+toto@GOTOFF]+PIC_OFFSET)
# (-M32[(INDEX_IN_TABLE+PIC_OFFSET)+toto@GOTOFF]+PIC_OFFSET)
# => M32[toto+INDEX_IN_TABLE]
# @GOTOFF will be removed later from the deref value
# to make this work also with executables, we will need to change
# our API and return the offset that will have to be substracted
if e._is_eqn and e.op.symbol == '+':
base, index, pic_data, pic_data_dup = extract_base_index(e)
if base is None:
log.error("Unknown base %s", e)
return None
if pic_data != pic_data_dup:
log.error("Inconsistent PIC %s != %s", pic_data, pic_data_dup)
return None
label_name = base.disp.ref.name[:-7]
if not check_pic_data(pic_data):
log.error("PIC OFFSET [%s] LABEL %s", pic_data, label_name)
# Don't abort, for now, improvement of pic_tracking needed
label = env.lab(pool.find_symbol(name = label_name), size=cpu_addrsize)
return env.mem(index, disp=label)
def extract_base_index(e):
# M32[(INDEX_IN_TABLE+PIC_OFFSET)+toto@GOTOFF]+PIC_OFFSET
# e.l.a.base.l e.l.a.base.r e.l.a.disp + e.r
if (e.l._is_mem and
e.l.a.disp._is_lab and
e.l.a.base._is_eqn and
e.l.a.base.op.symbol == '+'):
return e.l.a, e.l.a.base.l, e.l.a.base.r, e.r
# PIC_OFFSET+M32[(INDEX_IN_TABLE+PIC_OFFSET)+toto@GOTOFF]
# e.l + e.r.a.base.l e.r.a.base.r e.r.a.disp
if (e.r._is_mem and
e.r.a.disp._is_lab and
e.r.a.base._is_eqn and
e.r.a.base.op.symbol == '+'):
return e.r.a, e.r.a.base.l, e.r.a.base.r, e.l
# (-M32[(INDEX_IN_TABLE+PIC_OFFSET)+toto@GOTOFF]+PIC_OFFSET)
if (e.l._is_eqn and
e.l.op.symbol == '-' and
e.l.l is None and
e.l.r._is_mem and
e.l.r.a.disp._is_lab and
e.l.r.a.base._is_eqn and
e.l.r.a.base.op.symbol == '+'
):
return e.l.r.a, e.l.r.a.base.l, e.l.r.a.base.r, e.r
return None, None, None, None
def check_pic_data(pic):
pic = str(pic)
if pic == '(@_GLOBAL_OFFSET_TABLE_+M32(esp))':
# gcc 4.x PIC
# The backtracking went back to the start of the function, where
# the PIC offset is computed as @_GLOBAL_OFFSET_TABLE_+M32(esp)
# after a call to __i686.get_pc_thunk.?x
return True
if pic == '(@_GLOBAL_OFFSET_TABLE_+ebx)':
# gcc 4.x PIC
# The backtracking went after returning from __i686.get_pc_thunk.bx
return True
if pic == 'ebx':
# gcc 4.x PIC
# The backtracking went not far but ebx may contain the PIC offset
# This is a risky hypothesis, yet it seems to work
return True
if pic == 'ecx':
# gcc 4.x PIC
# The backtracking went not far but ecx may contain the PIC offset
# This is a risky hypothesis, yet it seems to work
return True
return False
def test_clang_switch_array(address):
# Expression of the form M32[L1-L2+r2+(r1*4)]+r2
# L1 is the label of the table
# r1 is the index in the table (register, sometimes shifted)
# r2 stores the address of label L2 (register, immediate, ...)
if not address._is_eqn:
return None
if not address.op.symbol == '+':
return None
if address.l._is_mem and getattr(address.l.a.disp, '_is_eqn', False):
mem_expr, r2 = address.l, address.r
elif address.r._is_mem and getattr(address.r.a.disp, '_is_eqn', False):
r2, mem_expr = address.l, address.r
else:
return None
if mem_expr.a.base._is_eqn and mem_expr.a.base.op.symbol == '+' and \
mem_expr.a.base.r == r2:
r1_4 = mem_expr.a.base.l
elif mem_expr.a.base._is_eqn and mem_expr.a.base.op.symbol == '+' and \
mem_expr.a.base.l == r2:
r1_4 = mem_expr.a.base.r
else:
return None
if not r1_4._is_eqn or not r1_4.r._is_cst or r1_4.r != 4:
return None
if mem_expr.a.disp._is_eqn and mem_expr.a.disp.op.symbol == '+' and \
mem_expr.a.disp.r._is_lab and mem_expr.a.disp.l._is_eqn and \
mem_expr.a.disp.l.op.symbol == '-' and \
mem_expr.a.disp.l.l is None and mem_expr.a.disp.l.r._is_lab:
L1 = mem_expr.a.disp.r
L2 = '-%s' % mem_expr.a.disp.l.r.ref
else:
return None
# Now that everything has been verified, we scan the array
log.debug("CLANG SWITCH %s%s %s %s", L1, L2, r1_4, r2)
return L1, L2
|
the-stack_0_15785 | # coding: utf-8
import copy
import numpy as np
from flearn.common.distiller import DFDistiller, KDLoss
from .strategy import ParentStrategy
from .utils import convert_to_tensor
class DF(ParentStrategy):
"""
Ensemble distillation for robust model fusion in federated learning
[1] Lin T, Kong L, Stich S U, et al. Ensemble distillation for robust model fusion in federated learning[J]. arXiv preprint arXiv:2006.07242, 2020.
"""
def __init__(self, model_base, strategy):
super().__init__(strategy)
self.model_base = model_base
def server_post_processing(self, ensemble_params_lst, ensemble_params, **kwargs):
w_glob = convert_to_tensor(ensemble_params["w_glob"])
agg_weight_lst, w_local_lst = self.server_pre_processing(ensemble_params_lst)
teacher_lst = []
for w_local in w_local_lst:
self.model_base.load_state_dict(convert_to_tensor(w_local))
teacher_lst.append(copy.deepcopy(self.model_base))
self.model_base.load_state_dict(w_glob)
student = copy.deepcopy(self.model_base)
kd_loader, device = kwargs.pop("kd_loader"), kwargs.pop("device")
temperature = kwargs.pop("T")
distiller = DFDistiller(
kd_loader,
device,
kd_loss=KDLoss(temperature),
)
molecular = np.sum(agg_weight_lst)
weight_lst = [w / molecular for w in agg_weight_lst]
# agg_weight_lst:应该依照每个模型在验证集上的性能来进行分配
ensemble_params["w_glob"] = distiller.multi(
teacher_lst, student, kwargs.pop("method"), weight_lst=weight_lst, **kwargs
)
return ensemble_params
def server(self, ensemble_params_lst, round_, **kwargs):
"""
kwargs: dict
{
"lr": 学习率,
"T": 蒸馏超参,温度
"epoch": 蒸馏训练轮数
"method": 多个教师蒸馏一个学习的方法,avg_logits, avg_losses
"kd_loader": 蒸馏数据集,仅需输入,无需标签
}
"""
ensemble_params = super().server(ensemble_params_lst, round_)
return self.server_post_processing(
ensemble_params_lst, ensemble_params, **kwargs
)
|
the-stack_0_15787 | import webloader
from bs4 import BeautifulSoup as soup
def get_company_credentials(url):
html = webloader.load(url)
return html_to_list(html)
def html_to_list(html):
page_soup = soup(html, "html.parser")
table = page_soup.find("div", {"class": "govspeak"}).table.findAll("tr")
table_list = []
for row in table:
cells = row.findAll("td")
table_row = []
for cell in cells:
table_row.append(cell.contents[0])
table_list.append(table_row)
return table_list[1:-1]
|
the-stack_0_15794 | #
# Copyright (c) 2006-2019, RT-Thread Development Team
#
# SPDX-License-Identifier: Apache-2.0
#
# Change Logs:
# Date Author Notes
# 2019-03-21 Bernard the first version
# 2019-04-15 armink fix project update error
#
import os
import sys
import glob
from utils import *
from utils import _make_path_relative
from utils import xml_indent
import xml.etree.ElementTree as etree
from xml.etree.ElementTree import SubElement
from building import *
MODULE_VER_NUM = 5
source_pattern = ['*.c', '*.cpp', '*.cxx', '*.s', '*.S', '*.asm']
def OSPath(path):
import platform
if type(path) == type('str'):
if platform.system() == 'Windows':
return path.replace('/', '\\')
else:
return path.replace('\\', '/')
else:
if platform.system() == 'Windows':
return [item.replace('/', '\\') for item in path]
else:
return [item.replace('\\', '/') for item in path]
# collect the build source code path and parent path
def CollectPaths(paths):
all_paths = []
def ParentPaths(path):
ret = os.path.dirname(path)
if ret == path or ret == '':
return []
return [ret] + ParentPaths(ret)
for path in paths:
# path = os.path.abspath(path)
path = path.replace('\\', '/')
all_paths = all_paths + [path] + ParentPaths(path)
all_paths = list(set(all_paths))
return sorted(all_paths)
'''
Collect all of files under paths
'''
def CollectFiles(paths, pattern):
files = []
for path in paths:
if type(pattern) == type(''):
files = files + glob.glob(path + '/' + pattern)
else:
for item in pattern:
# print('--> %s' % (path + '/' + item))
files = files + glob.glob(path + '/' + item)
return sorted(files)
def CollectAllFilesinPath(path, pattern):
files = []
for item in pattern:
files += glob.glob(path + '/' + item)
list = os.listdir(path)
if len(list):
for item in list:
if item.startswith('.'):
continue
if item == 'bsp':
continue
if os.path.isdir(os.path.join(path, item)):
files = files + CollectAllFilesinPath(os.path.join(path, item), pattern)
return files
'''
Exclude files from infiles
'''
def ExcludeFiles(infiles, files):
in_files = set([OSPath(file) for file in infiles])
exl_files = set([OSPath(file) for file in files])
exl_files = in_files - exl_files
return exl_files
# caluclate the exclude path for project
def ExcludePaths(rootpath, paths):
ret = []
files = os.listdir(OSPath(rootpath))
for file in files:
if file.startswith('.'):
continue
fullname = os.path.join(OSPath(rootpath), file)
if os.path.isdir(fullname):
# print(fullname)
if not fullname in paths:
ret = ret + [fullname]
else:
ret = ret + ExcludePaths(fullname, paths)
return ret
rtt_path_prefix = '"${workspace_loc://${ProjName}//'
def ConverToRttEclipsePathFormat(path):
return rtt_path_prefix + path + '}"'
def IsRttEclipsePathFormat(path):
if path.startswith(rtt_path_prefix):
return True
else:
return False
# all libs added by scons should be ends with five whitespace as a flag
rtt_lib_flag = 5 * " "
def ConverToRttEclipseLibFormat(lib):
return str(lib) + str(rtt_lib_flag)
def IsRttEclipseLibFormat(path):
if path.endswith(rtt_lib_flag):
return True
else:
return False
def IsCppProject():
return GetDepend('RT_USING_CPLUSPLUS')
def HandleToolOption(tools, env, project, reset):
is_cpp_prj = IsCppProject()
BSP_ROOT = os.path.abspath(env['BSP_ROOT'])
CPPDEFINES = project['CPPDEFINES']
paths = [ConverToRttEclipsePathFormat(RelativeProjectPath(env, os.path.normpath(i)).replace('\\', '/')) for i in
project['CPPPATH']]
compile_include_paths_options = []
compile_include_files_options = []
compile_defs_options = []
linker_scriptfile_option = None
linker_script_option = None
linker_nostart_option = None
linker_libs_option = None
linker_paths_option = None
linker_newlib_nano_option = None
for tool in tools:
if tool.get('id').find('compile') != 1:
options = tool.findall('option')
# find all compile options
for option in options:
if option.get('id').find('compiler.include.paths') != -1 or option.get('id').find(
'compiler.option.includepaths') != -1:
compile_include_paths_options += [option]
elif option.get('id').find('compiler.include.files') != -1 or option.get('id').find(
'compiler.option.includefiles') != -1:
compile_include_files_options += [option]
elif option.get('id').find('compiler.defs') != -1 or option.get('id').find(
'compiler.option.definedsymbols') != -1:
compile_defs_options += [option]
if tool.get('id').find('linker') != -1:
options = tool.findall('option')
# find all linker options
for option in options:
# the project type and option type must equal
if is_cpp_prj != (option.get('id').find('cpp.linker') != -1):
continue
if option.get('id').find('linker.scriptfile') != -1:
linker_scriptfile_option = option
elif option.get('id').find('linker.option.script') != -1:
linker_script_option = option
elif option.get('id').find('linker.nostart') != -1:
linker_nostart_option = option
elif option.get('id').find('linker.libs') != -1:
linker_libs_option = option
elif option.get('id').find('linker.paths') != -1 and env.has_key('LIBPATH'):
linker_paths_option = option
elif option.get('id').find('linker.usenewlibnano') != -1:
linker_newlib_nano_option = option
# change the inclue path
for option in compile_include_paths_options:
# find all of paths in this project
include_paths = option.findall('listOptionValue')
for item in include_paths:
if reset is True or IsRttEclipsePathFormat(item.get('value')):
# clean old configuration
option.remove(item)
# print('c.compiler.include.paths')
paths = sorted(paths)
for item in paths:
SubElement(option, 'listOptionValue', {'builtIn': 'false', 'value': item})
# change the inclue files (default) or definitions
for option in compile_include_files_options:
# add '_REENT_SMALL' to CPPDEFINES when --specs=nano.specs has select
if linker_newlib_nano_option is not None and linker_newlib_nano_option.get(
'value') == 'true' and '_REENT_SMALL' not in CPPDEFINES:
CPPDEFINES += ['_REENT_SMALL']
file_header = '''
#ifndef RTCONFIG_PREINC_H__
#define RTCONFIG_PREINC_H__
/* Automatically generated file; DO NOT EDIT. */
/* RT-Thread pre-include file */
'''
file_tail = '\n#endif /*RTCONFIG_PREINC_H__*/\n'
rtt_pre_inc_item = '"${workspace_loc:/${ProjName}/rtconfig_preinc.h}"'
# save the CPPDEFINES in to rtconfig_preinc.h
with open('rtconfig_preinc.h', mode='w+') as f:
f.write(file_header)
for cppdef in CPPDEFINES:
f.write("#define " + cppdef.replace('=', ' ') + '\n')
f.write(file_tail)
# change the c.compiler.include.files
files = option.findall('listOptionValue')
find_ok = False
for item in files:
if item.get('value') == rtt_pre_inc_item:
find_ok = True
break
if find_ok is False:
SubElement(option, 'listOptionValue', {'builtIn': 'false', 'value': rtt_pre_inc_item})
if len(compile_include_files_options) == 0:
for option in compile_defs_options:
defs = option.findall('listOptionValue')
project_defs = []
for item in defs:
if reset is True:
# clean all old configuration
option.remove(item)
else:
project_defs += [item.get('value')]
if len(project_defs) > 0:
cproject_defs = set(CPPDEFINES) - set(project_defs)
else:
cproject_defs = CPPDEFINES
# print('c.compiler.defs')
cproject_defs = sorted(cproject_defs)
for item in cproject_defs:
SubElement(option, 'listOptionValue', {'builtIn': 'false', 'value': item})
# update linker script config
if linker_scriptfile_option is not None:
option = linker_scriptfile_option
linker_script = 'link.lds'
items = env['LINKFLAGS'].split(' ')
if '-T' in items:
linker_script = items[items.index('-T') + 1]
linker_script = ConverToRttEclipsePathFormat(linker_script)
listOptionValue = option.find('listOptionValue')
if listOptionValue != None:
listOptionValue.set('value', linker_script)
else:
SubElement(option, 'listOptionValue', {'builtIn': 'false', 'value': linker_script})
# scriptfile in stm32cubeIDE
if linker_script_option is not None:
option = linker_script_option
items = env['LINKFLAGS'].split(' ')
if '-T' in items:
linker_script = ConverToRttEclipsePathFormat(items[items.index('-T') + 1]).strip('"')
option.set('value', linker_script)
# update nostartfiles config
if linker_nostart_option is not None:
option = linker_nostart_option
if env['LINKFLAGS'].find('-nostartfiles') != -1:
option.set('value', 'true')
else:
option.set('value', 'false')
# update libs
if linker_libs_option is not None:
option = linker_libs_option
# remove old libs
for item in option.findall('listOptionValue'):
if IsRttEclipseLibFormat(item.get("value")):
option.remove(item)
# add new libs
if env.has_key('LIBS'):
for lib in env['LIBS']:
formatedLib = ConverToRttEclipseLibFormat(lib)
SubElement(option, 'listOptionValue', {
'builtIn': 'false', 'value': formatedLib})
# update lib paths
if linker_paths_option is not None:
option = linker_paths_option
# remove old lib paths
for item in option.findall('listOptionValue'):
if IsRttEclipsePathFormat(item.get('value')):
# clean old configuration
option.remove(item)
# add new old lib paths
for path in env['LIBPATH']:
SubElement(option, 'listOptionValue', {'builtIn': 'false', 'value': ConverToRttEclipsePathFormat(
RelativeProjectPath(env, path).replace('\\', '/'))})
return
def UpdateProjectStructure(env, prj_name):
bsp_root = env['BSP_ROOT']
rtt_root = env['RTT_ROOT']
project = etree.parse('.project')
root = project.getroot()
if rtt_root.startswith(bsp_root):
linkedResources = root.find('linkedResources')
if linkedResources == None:
linkedResources = SubElement(root, 'linkedResources')
links = linkedResources.findall('link')
# delete all RT-Thread folder links
for link in links:
if link.find('name').text.startswith('rt-thread'):
linkedResources.remove(link)
if prj_name:
name = root.find('name')
if name == None:
name = SubElement(root, 'name')
name.text = prj_name
out = open('.project', 'w')
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
xml_indent(root)
out.write(etree.tostring(root, encoding='utf-8'))
out.close()
return
def GenExcluding(env, project):
rtt_root = os.path.abspath(env['RTT_ROOT'])
bsp_root = os.path.abspath(env['BSP_ROOT'])
coll_dirs = CollectPaths(project['DIRS'])
all_paths_temp = [OSPath(path) for path in coll_dirs]
all_paths = []
# add used path
for path in all_paths_temp:
if path.startswith(rtt_root) or path.startswith(bsp_root):
all_paths.append(path)
if bsp_root.startswith(rtt_root):
# bsp folder is in the RT-Thread root folder, such as the RT-Thread source code on GitHub
exclude_paths = ExcludePaths(rtt_root, all_paths)
elif rtt_root.startswith(bsp_root):
# RT-Thread root folder is in the bsp folder, such as project folder which generate by 'scons --dist' cmd
check_path = []
exclude_paths = []
# analyze the primary folder which relative to BSP_ROOT and in all_paths
for path in all_paths:
if path.startswith(bsp_root):
folders = RelativeProjectPath(env, path).split('\\')
if folders[0] != '.' and '\\' + folders[0] not in check_path:
check_path += ['\\' + folders[0]]
# exclue the folder which has managed by scons
for path in check_path:
exclude_paths += ExcludePaths(bsp_root + path, all_paths)
else:
exclude_paths = ExcludePaths(rtt_root, all_paths)
exclude_paths += ExcludePaths(bsp_root, all_paths)
paths = exclude_paths
exclude_paths = []
# remove the folder which not has source code by source_pattern
for path in paths:
# add bsp and libcpu folder and not collect source files (too more files)
if path.endswith('rt-thread\\bsp') or path.endswith('rt-thread\\libcpu'):
exclude_paths += [path]
continue
set = CollectAllFilesinPath(path, source_pattern)
if len(set):
exclude_paths += [path]
exclude_paths = [RelativeProjectPath(env, path).replace('\\', '/') for path in exclude_paths]
all_files = CollectFiles(all_paths, source_pattern)
src_files = project['FILES']
exclude_files = ExcludeFiles(all_files, src_files)
exclude_files = [RelativeProjectPath(env, file).replace('\\', '/') for file in exclude_files]
env['ExPaths'] = exclude_paths
env['ExFiles'] = exclude_files
return exclude_paths + exclude_files
def RelativeProjectPath(env, path):
project_root = os.path.abspath(env['BSP_ROOT'])
rtt_root = os.path.abspath(env['RTT_ROOT'])
if path.startswith(project_root):
return _make_path_relative(project_root, path)
if path.startswith(rtt_root):
return 'rt-thread/' + _make_path_relative(rtt_root, path)
# TODO add others folder
print('ERROR: the ' + path + ' not support')
return path
def HandleExcludingOption(entry, sourceEntries, excluding):
old_excluding = []
if entry != None:
old_excluding = entry.get('excluding').split('|')
sourceEntries.remove(entry)
value = ''
for item in old_excluding:
if item.startswith('//'):
old_excluding.remove(item)
else:
if value == '':
value = item
else:
value += '|' + item
for item in excluding:
# add special excluding path prefix for RT-Thread
item = '//' + item
if value == '':
value = item
else:
value += '|' + item
SubElement(sourceEntries, 'entry',
{'excluding': value, 'flags': 'VALUE_WORKSPACE_PATH|RESOLVED', 'kind': 'sourcePath', 'name': ""})
def UpdateCproject(env, project, excluding, reset, prj_name):
excluding = sorted(excluding)
cproject = etree.parse('.cproject')
root = cproject.getroot()
cconfigurations = root.findall('storageModule/cconfiguration')
for cconfiguration in cconfigurations:
tools = cconfiguration.findall('storageModule/configuration/folderInfo/toolChain/tool')
HandleToolOption(tools, env, project, reset)
sourceEntries = cconfiguration.find('storageModule/configuration/sourceEntries')
entry = sourceEntries.find('entry')
HandleExcludingOption(entry, sourceEntries, excluding)
# update refreshScope
if prj_name:
prj_name = '/' + prj_name
configurations = root.findall('storageModule/configuration')
for configuration in configurations:
resource = configuration.find('resource')
configuration.remove(resource)
SubElement(configuration, 'resource', {'resourceType': "PROJECT", 'workspacePath': prj_name})
# write back to .cproject
out = open('.cproject', 'w')
out.write('<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n')
out.write('<?fileVersion 4.0.0?>')
xml_indent(root)
out.write(etree.tostring(root, encoding='utf-8'))
out.close()
def TargetEclipse(env, reset=False, prj_name=None):
global source_pattern
print('Update eclipse setting...')
if not os.path.exists('.cproject'):
print('no eclipse CDT project found!')
return
project = ProjectInfo(env)
# update the project file structure info on '.project' file
UpdateProjectStructure(env, prj_name)
# generate the exclude paths and files
excluding = GenExcluding(env, project)
# update the project configuration on '.cproject' file
UpdateCproject(env, project, excluding, reset, prj_name)
print('done!')
return |
the-stack_0_15795 | """
API operations allowing clients to determine datatype supported by Galaxy.
"""
from galaxy.web import _future_expose_api_anonymous_and_sessionless as expose_api_anonymous_and_sessionless
from galaxy import exceptions
from galaxy.web.base.controller import BaseAPIController
from galaxy.util import asbool
from galaxy.datatypes.data import Data
import logging
log = logging.getLogger( __name__ )
class DatatypesController( BaseAPIController ):
@expose_api_anonymous_and_sessionless
def index( self, trans, **kwd ):
"""
GET /api/datatypes
Return an object containing upload datatypes.
"""
datatypes_registry = self._datatypes_registry
extension_only = asbool( kwd.get( 'extension_only', True ) )
upload_only = asbool( kwd.get( 'upload_only', True ) )
try:
if extension_only:
if upload_only:
return datatypes_registry.upload_file_formats
else:
return [ ext for ext in datatypes_registry.datatypes_by_extension ]
else:
rval = []
for elem in datatypes_registry.datatype_elems:
if not asbool(elem.get('display_in_upload')) and upload_only:
continue
keys = ['extension', 'description', 'description_url']
dictionary = {}
for key in keys:
dictionary[key] = elem.get(key)
extension = elem.get('extension')
if extension in datatypes_registry.datatypes_by_extension:
composite_files = datatypes_registry.datatypes_by_extension[ extension ].composite_files
if composite_files:
dictionary['composite_files'] = [_.dict() for _ in composite_files.itervalues()]
rval.append(dictionary)
return rval
except Exception as exception:
log.error( 'could not get datatypes: %s', str( exception ), exc_info=True )
if not isinstance( exception, exceptions.MessageException ):
raise exceptions.InternalServerError( str( exception ) )
else:
raise
@expose_api_anonymous_and_sessionless
def mapping( self, trans, **kwd ):
'''
GET /api/datatypes/mapping
Return a dictionary of class to class mappings.
'''
try:
ext_to_class_name = dict()
classes = []
for k, v in self._datatypes_registry.datatypes_by_extension.iteritems():
c = v.__class__
ext_to_class_name[k] = c.__module__ + "." + c.__name__
classes.append( c )
class_to_classes = dict()
def visit_bases( types, cls ):
for base in cls.__bases__:
if issubclass( base, Data ):
types.add( base.__module__ + "." + base.__name__ )
visit_bases( types, base )
for c in classes:
n = c.__module__ + "." + c.__name__
types = set( [ n ] )
visit_bases( types, c )
class_to_classes[ n ] = dict( ( t, True ) for t in types )
return dict( ext_to_class_name=ext_to_class_name, class_to_classes=class_to_classes )
except Exception as exception:
log.error( 'could not get datatype mapping: %s', str( exception ), exc_info=True )
if not isinstance( exception, exceptions.MessageException ):
raise exceptions.InternalServerError( str( exception ) )
else:
raise
@expose_api_anonymous_and_sessionless
def sniffers( self, trans, **kwd ):
'''
GET /api/datatypes/sniffers
Return a list of sniffers.
'''
try:
rval = []
for sniffer_elem in self._datatypes_registry.sniffer_elems:
datatype = sniffer_elem.get( 'type' )
if datatype is not None:
rval.append( datatype )
return rval
except Exception as exception:
log.error( 'could not get datatypes: %s', str( exception ), exc_info=True )
if not isinstance( exception, exceptions.MessageException ):
raise exceptions.InternalServerError( str( exception ) )
else:
raise
@expose_api_anonymous_and_sessionless
def converters( self, trans, **kwd ):
converters = []
for (source_type, targets) in self._datatypes_registry.datatype_converters.iteritems():
for target_type in targets:
converters.append( {
'source': source_type,
'target': target_type,
'tool_id': targets[ target_type ].id,
} )
return converters
@expose_api_anonymous_and_sessionless
def edam_formats( self, trans, **kwds ):
return self._datatypes_registry.edam_formats
@property
def _datatypes_registry( self ):
return self.app.datatypes_registry
|
the-stack_0_15797 | """Webroot plugin."""
import argparse
import collections
import json
import logging
from typing import DefaultDict
from typing import Dict
from typing import List
from typing import Set
from acme import challenges
from certbot import crypto_util
from certbot import errors
from certbot import interfaces
from certbot._internal import cli
from certbot.achallenges import KeyAuthorizationAnnotatedChallenge as AnnotatedChallenge
from certbot.compat import filesystem
from certbot.compat import os
from certbot.display import ops
from certbot.display import util as display_util
from certbot.plugins import common
from certbot.plugins import util
from certbot.util import safe_open
logger = logging.getLogger(__name__)
_WEB_CONFIG_CONTENT = """\
<?xml version="1.0" encoding="UTF-8" ?>
<!--Generated by Certbot-->
<configuration>
<system.webServer>
<staticContent>
<mimeMap fileExtension="." mimeType="text/plain" />
</staticContent>
</system.webServer>
</configuration>
"""
# This list references the hashes of all versions of the web.config files that Certbot could
# have generated during an HTTP-01 challenge. If you modify _WEB_CONFIG_CONTENT, you MUST add
# the new hash in this list.
_WEB_CONFIG_SHA256SUMS = ["20c5ca1bd58fa8ad5f07a2f1be8b7cbb707c20fcb607a8fc8db9393952846a97"]
class Authenticator(common.Plugin, interfaces.Authenticator):
"""Webroot Authenticator."""
description = "Place files in webroot directory"
MORE_INFO = """\
Authenticator plugin that performs http-01 challenge by saving
necessary validation resources to appropriate paths on the file
system. It expects that there is some other HTTP server configured
to serve all files under specified web root ({0})."""
def more_info(self): # pylint: disable=missing-function-docstring
return self.MORE_INFO.format(self.conf("path"))
@classmethod
def add_parser_arguments(cls, add):
add("path", "-w", default=[], action=_WebrootPathAction,
help="public_html / webroot path. This can be specified multiple "
"times to handle different domains; each domain will have "
"the webroot path that preceded it. For instance: `-w "
"/var/www/example -d example.com -d www.example.com -w "
"/var/www/thing -d thing.net -d m.thing.net` (default: Ask)")
add("map", default={}, action=_WebrootMapAction,
help="JSON dictionary mapping domains to webroot paths; this "
"implies -d for each entry. You may need to escape this from "
"your shell. E.g.: --webroot-map "
'\'{"eg1.is,m.eg1.is":"/www/eg1/", "eg2.is":"/www/eg2"}\' '
"This option is merged with, but takes precedence over, -w / "
"-d entries. At present, if you put webroot-map in a config "
"file, it needs to be on a single line, like: webroot-map = "
'{"example.com":"/var/www"}.')
def auth_hint(self, failed_achalls): # pragma: no cover
return ("The Certificate Authority failed to download the temporary challenge files "
"created by Certbot. Ensure that the listed domains serve their content from "
"the provided --webroot-path/-w and that files created there can be downloaded "
"from the internet.")
def get_chall_pref(self, domain): # pragma: no cover
# pylint: disable=unused-argument,missing-function-docstring
return [challenges.HTTP01]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.full_roots: Dict[str, str] = {}
self.performed: DefaultDict[str, Set[AnnotatedChallenge]] = collections.defaultdict(set)
# stack of dirs successfully created by this authenticator
self._created_dirs: List[str] = []
def prepare(self): # pylint: disable=missing-function-docstring
pass
def perform(self, achalls): # pylint: disable=missing-function-docstring
self._set_webroots(achalls)
self._create_challenge_dirs()
return [self._perform_single(achall) for achall in achalls]
def _set_webroots(self, achalls):
if self.conf("path"):
webroot_path = self.conf("path")[-1]
logger.info("Using the webroot path %s for all unmatched domains.",
webroot_path)
for achall in achalls:
self.conf("map").setdefault(achall.domain, webroot_path)
else:
known_webroots = list(set(self.conf("map").values()))
for achall in achalls:
if achall.domain not in self.conf("map"):
new_webroot = self._prompt_for_webroot(achall.domain,
known_webroots)
# Put the most recently input
# webroot first for easy selection
try:
known_webroots.remove(new_webroot)
except ValueError:
pass
known_webroots.insert(0, new_webroot)
self.conf("map")[achall.domain] = new_webroot
def _prompt_for_webroot(self, domain, known_webroots):
webroot = None
while webroot is None:
if known_webroots:
# Only show the menu if we have options for it
webroot = self._prompt_with_webroot_list(domain, known_webroots)
if webroot is None:
webroot = self._prompt_for_new_webroot(domain)
else:
# Allow prompt to raise PluginError instead of looping forever
webroot = self._prompt_for_new_webroot(domain, True)
return webroot
def _prompt_with_webroot_list(self, domain, known_webroots):
path_flag = "--" + self.option_name("path")
while True:
code, index = display_util.menu(
"Select the webroot for {0}:".format(domain),
["Enter a new webroot"] + known_webroots,
cli_flag=path_flag, force_interactive=True)
if code == display_util.CANCEL:
raise errors.PluginError(
"Every requested domain must have a "
"webroot when using the webroot plugin.")
return None if index == 0 else known_webroots[index - 1] # code == display_util.OK
def _prompt_for_new_webroot(self, domain, allowraise=False):
code, webroot = ops.validated_directory(
_validate_webroot,
"Input the webroot for {0}:".format(domain),
force_interactive=True)
if code == display_util.CANCEL:
if not allowraise:
return None
raise errors.PluginError(
"Every requested domain must have a "
"webroot when using the webroot plugin.")
return _validate_webroot(webroot) # code == display_util.OK
def _create_challenge_dirs(self):
path_map = self.conf("map")
if not path_map:
raise errors.PluginError(
"Missing parts of webroot configuration; please set either "
"--webroot-path and --domains, or --webroot-map. Run with "
" --help webroot for examples.")
for name, path in path_map.items():
self.full_roots[name] = os.path.join(path, os.path.normcase(
challenges.HTTP01.URI_ROOT_PATH))
logger.debug("Creating root challenges validation dir at %s",
self.full_roots[name])
# Change the permissions to be writable (GH #1389)
# Umask is used instead of chmod to ensure the client can also
# run as non-root (GH #1795)
old_umask = filesystem.umask(0o022)
try:
# We ignore the last prefix in the next iteration,
# as it does not correspond to a folder path ('/' or 'C:')
for prefix in sorted(util.get_prefixes(self.full_roots[name])[:-1], key=len):
if os.path.isdir(prefix):
# Don't try to create directory if it already exists, as some filesystems
# won't reliably raise EEXIST or EISDIR if directory exists.
continue
try:
# Set owner as parent directory if possible, apply mode for Linux/Windows.
# For Linux, this is coupled with the "umask" call above because
# os.mkdir's "mode" parameter may not always work:
# https://docs.python.org/3/library/os.html#os.mkdir
filesystem.mkdir(prefix, 0o755)
self._created_dirs.append(prefix)
try:
filesystem.copy_ownership_and_apply_mode(
path, prefix, 0o755, copy_user=True, copy_group=True)
except (OSError, AttributeError) as exception:
logger.warning("Unable to change owner and uid of webroot directory")
logger.debug("Error was: %s", exception)
except OSError as exception:
raise errors.PluginError(
"Couldn't create root for {0} http-01 "
"challenge responses: {1}".format(name, exception))
finally:
filesystem.umask(old_umask)
# On Windows, generate a local web.config file that allows IIS to serve expose
# challenge files despite the fact they do not have a file extension.
if not filesystem.POSIX_MODE:
web_config_path = os.path.join(self.full_roots[name], "web.config")
if os.path.exists(web_config_path):
logger.info("A web.config file has not been created in "
"%s because another one already exists.", self.full_roots[name])
continue
logger.info("Creating a web.config file in %s to allow IIS "
"to serve challenge files.", self.full_roots[name])
with safe_open(web_config_path, mode="w", chmod=0o644) as web_config:
web_config.write(_WEB_CONFIG_CONTENT)
def _get_validation_path(self, root_path, achall):
return os.path.join(root_path, achall.chall.encode("token"))
def _perform_single(self, achall):
response, validation = achall.response_and_validation()
root_path = self.full_roots[achall.domain]
validation_path = self._get_validation_path(root_path, achall)
logger.debug("Attempting to save validation to %s", validation_path)
# Change permissions to be world-readable, owner-writable (GH #1795)
old_umask = filesystem.umask(0o022)
try:
with safe_open(validation_path, mode="wb", chmod=0o644) as validation_file:
validation_file.write(validation.encode())
finally:
filesystem.umask(old_umask)
self.performed[root_path].add(achall)
return response
def cleanup(self, achalls): # pylint: disable=missing-function-docstring
for achall in achalls:
root_path = self.full_roots.get(achall.domain, None)
if root_path is not None:
validation_path = self._get_validation_path(root_path, achall)
logger.debug("Removing %s", validation_path)
os.remove(validation_path)
self.performed[root_path].remove(achall)
if not filesystem.POSIX_MODE:
web_config_path = os.path.join(root_path, "web.config")
if os.path.exists(web_config_path):
sha256sum = crypto_util.sha256sum(web_config_path)
if sha256sum in _WEB_CONFIG_SHA256SUMS:
logger.info("Cleaning web.config file generated by Certbot in %s.",
root_path)
os.remove(web_config_path)
else:
logger.info("Not cleaning up the web.config file in %s "
"because it is not generated by Certbot.", root_path)
not_removed: List[str] = []
while self._created_dirs:
path = self._created_dirs.pop()
try:
os.rmdir(path)
except OSError as exc:
not_removed.insert(0, path)
logger.info("Challenge directory %s was not empty, didn't remove", path)
logger.debug("Error was: %s", exc)
self._created_dirs = not_removed
logger.debug("All challenges cleaned up")
class _WebrootMapAction(argparse.Action):
"""Action class for parsing webroot_map."""
def __call__(self, parser, namespace, webroot_map, option_string=None):
for domains, webroot_path in json.loads(webroot_map).items():
webroot_path = _validate_webroot(webroot_path)
namespace.webroot_map.update(
(d, webroot_path) for d in cli.add_domains(namespace, domains))
class _WebrootPathAction(argparse.Action):
"""Action class for parsing webroot_path."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._domain_before_webroot = False
def __call__(self, parser, namespace, webroot_path, option_string=None):
if self._domain_before_webroot:
raise errors.PluginError(
"If you specify multiple webroot paths, "
"one of them must precede all domain flags")
if namespace.webroot_path:
# Apply previous webroot to all matched
# domains before setting the new webroot path
prev_webroot = namespace.webroot_path[-1]
for domain in namespace.domains:
namespace.webroot_map.setdefault(domain, prev_webroot)
elif namespace.domains:
self._domain_before_webroot = True
namespace.webroot_path.append(_validate_webroot(webroot_path))
def _validate_webroot(webroot_path):
"""Validates and returns the absolute path of webroot_path.
:param str webroot_path: path to the webroot directory
:returns: absolute path of webroot_path
:rtype: str
"""
if not os.path.isdir(webroot_path):
raise errors.PluginError(webroot_path + " does not exist or is not a directory")
return os.path.abspath(webroot_path)
|
the-stack_0_15801 | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=50)
evaluation = dict(interval=50, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=0.0015,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[200, 260])
total_epochs = 300
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
data_cfg = dict(
image_size=512,
base_size=256,
base_sigma=2,
heatmap_size=[128],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
num_scales=1,
scale_aware_sigma=False,
)
# model settings
model = dict(
type='BottomUp',
pretrained='mmcls://mobilenet_v2',
backbone=dict(type='MobileNetV2', widen_factor=1., out_indices=(7, )),
keypoint_head=dict(
type='BottomUpSimpleHead',
in_channels=1280,
num_joints=17,
tag_per_joint=True,
with_ae_loss=[True]),
train_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
img_size=data_cfg['image_size']),
test_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
max_num_people=30,
scale_factor=[1],
with_heatmaps=[True],
with_ae=[True],
project2image=True,
nms_kernel=5,
nms_padding=2,
tag_per_joint=True,
detection_threshold=0.1,
tag_threshold=1,
use_detection_val=True,
ignore_too_much=False,
adjust=True,
refine=True,
flip_test=True),
loss_pose=dict(
type='MultiLossFactory',
num_joints=17,
num_stages=1,
ae_loss_type='exp',
with_ae_loss=[True],
push_loss_factor=[0.001],
pull_loss_factor=[0.001],
with_heatmaps_loss=[True],
heatmaps_loss_factor=[1.0],
),
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='BottomUpRandomAffine',
rot_factor=30,
scale_factor=[0.75, 1.5],
scale_type='short',
trans_factor=40),
dict(type='BottomUpRandomFlip', flip_prob=0.5),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='BottomUpGenerateTarget',
sigma=2,
max_num_people=30,
),
dict(
type='Collect',
keys=['img', 'joints', 'targets', 'masks'],
meta_keys=[]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='BottomUpGetImgSize', test_scale_factor=[1]),
dict(
type='BottomUpResizeAlign',
transforms=[
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'aug_data', 'test_scale_factor', 'base_size',
'center', 'scale', 'flip_index'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=24,
workers_per_gpu=1,
train=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
|
the-stack_0_15803 | #!/usr/bin/env python
# coding: utf-8
# In[7]:
#!/usr/bin/env python
# coding: utf-8
# In[1]:
"""
This script will check for the zip codes format and whether it begins with a
68 for the City of Omaha
"""
import xml.etree.cElementTree as ET
from collections import defaultdict
import re
import pprint
osmfile = 'omaha_nebraska'
zip_type_re = re.compile(r'\d{5}$') #5 digit zip code, no dashes
def audit_ziptype(zip_types, zipcode):
if zipcode[0:2]!= 68:
zip_types[zipcode[0:2]].add(zipcode)
def is_zipcode(elem):
return (elem.attrib['k'] == "addr:postcode")
def audit_zip(osmfile):
osm_file = open (osmfile, "r")
zip_types = defaultdict(set)
for event, elem in ET.iterparse(osmfile, events=("start",)):
if elem.tag == "node" or elem.tag == "way":
for tag in elem.iter("tag"):
if is_zipcode(tag):
audit_ziptype(zip_types,tag.attrib['v'])
osm_file.close()
return zip_types
zip_print = audit_zip(osmfile)
def test():
pprint.pprint(dict(zip_print))
if __name__ == '__main__':
test()
def update_zipcode(zipcode):
"""
This function updates the zip codes by replacing the wrong zip codes with fixed ones'''
"""
if re.findall(r'(^\d{5})-\d{4}$', zipcode):
valid_zipcode = re.findall(r'(^\d{5})-\d{4}$',zipcode)[0]
return valid_zipcode
else:
return zipcode
def test_zip():
for zips, ways in zip_print.items():
for name in ways:
better_name = update_zipcode(name)
print (name, "=>", better_name)
if __name__ == '__main__':
test_zip()
# In[ ]:
|
the-stack_0_15806 | import logging
import examples.basic.main as basic
import sim.docker as docker
from sim.core import Environment
from sim.faas import FunctionDefinition, FunctionSimulator, FunctionReplica, FunctionRequest
from sim.faassim import Simulation
logger = logging.getLogger(__name__)
def main():
logging.basicConfig(level=logging.INFO)
# prepare simulation with topology and benchmark from basic example
sim = Simulation(basic.example_topology(), basic.ExampleBenchmark())
# override the SimulatorFactory factory
sim.create_simulator_factory = CustomSimulatorFactory
# run the simulation
sim.run()
class CustomSimulatorFactory:
def __init__(self) -> None:
super().__init__()
def create(self, env: Environment, fn: FunctionDefinition) -> FunctionSimulator:
return MyFunctionSimulator()
class MyFunctionSimulator(FunctionSimulator):
def deploy(self, env: Environment, replica: FunctionReplica):
# simulate a docker pull command for deploying the function (also done by sim.faassim.DockerDeploySimMixin)
yield from docker.pull(env, replica.function.image, replica.node.ether_node)
def startup(self, env: Environment, replica: FunctionReplica):
logger.info('[simtime=%.2f] starting up function replica for function %s', env.now, replica.function.name)
# you could create a very fine-grained setup routines here
yield env.timeout(10) # simulate docker startup
def setup(self, env: Environment, replica: FunctionReplica):
# no setup routine
yield env.timeout(0)
def invoke(self, env: Environment, replica: FunctionReplica, request: FunctionRequest):
# you would probably either create one simulator per function, or use a generalized simulator, this is just
# to demonstrate how the simulators are used to encapsulate simulator behavior.
logger.info('[simtime=%.2f] invoking function %s on node %s', env.now, request, replica.node.name)
if replica.function.name == 'python-pi':
if replica.node.name.startswith('rpi3'): # those are nodes we created in basic.example_topology()
yield env.timeout(20) # invoking this function takes 20 seconds on a raspberry pi
else:
yield env.timeout(2) # invoking this function takes 2 seconds on all other nodes in the cluster
elif replica.function.name == 'resnet50-inference':
yield env.timeout(0.5) # invoking this function takes 500 ms
else:
yield env.timeout(0)
def teardown(self, env: Environment, replica: FunctionReplica):
yield env.timeout(0)
if __name__ == '__main__':
main()
|
the-stack_0_15807 | from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class BinTests(TranspileTestCase):
def test_int_but_no_index(self):
self.assertCodeExecution("""
class IntLike:
def __init__(self, val):
self.val = val
def __int__(self):
return self.val
x = IntLike(5)
print(bin(x))
""", run_in_function=False)
class BuiltinBinFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["bin"]
not_implemented = [
'test_int',
]
|
the-stack_0_15808 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import LegacyFairseqCriterion, register_criterion
from fairseq.data import encoders
@register_criterion("wsc")
class WSCCriterion(LegacyFairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
if self.args.save_predictions is not None:
self.prediction_h = open(self.args.save_predictions, "w")
else:
self.prediction_h = None
self.bpe = encoders.build_bpe(args)
self.tokenizer = encoders.build_tokenizer(args)
def __del__(self):
if self.prediction_h is not None:
self.prediction_h.close()
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
parser.add_argument("--wsc-margin-alpha", type=float, metavar="A", default=1.0)
parser.add_argument("--wsc-margin-beta", type=float, metavar="B", default=0.0)
parser.add_argument(
"--wsc-cross-entropy",
action="store_true",
help="use cross entropy formulation instead of margin loss",
)
parser.add_argument(
"--save-predictions", metavar="FILE", help="file to save predictions to"
)
def get_masked_input(self, tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask] = self.task.mask
return masked_tokens
def get_lprobs(self, model, tokens, mask):
logits, _ = model(src_tokens=self.get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
mask = mask.type_as(scores)
scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
return scores
def get_loss(self, query_lprobs, cand_lprobs):
if self.args.wsc_cross_entropy:
return F.cross_entropy(
torch.cat([query_lprobs, cand_lprobs]).unsqueeze(0),
query_lprobs.new([0]).long(),
)
else:
return (
-query_lprobs
+ self.args.wsc_margin_alpha
* (cand_lprobs - query_lprobs + self.args.wsc_margin_beta).clamp(min=0)
).sum()
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
loss, nloss = 0.0, 0
ncorrect, nqueries = 0, 0
for i, label in enumerate(sample["labels"]):
query_lprobs = self.get_lprobs(
model,
sample["query_tokens"][i].unsqueeze(0),
sample["query_masks"][i].unsqueeze(0),
)
cand_lprobs = self.get_lprobs(
model,
sample["candidate_tokens"][i],
sample["candidate_masks"][i],
)
pred = (query_lprobs >= cand_lprobs).all().item()
if label is not None:
label = 1 if label else 0
ncorrect += 1 if pred == label else 0
nqueries += 1
if label:
# only compute a loss for positive instances
nloss += 1
loss += self.get_loss(query_lprobs, cand_lprobs)
id = sample["id"][i].item()
if self.prediction_h is not None:
print("{}\t{}\t{}".format(id, pred, label), file=self.prediction_h)
if nloss == 0:
loss = torch.tensor(0.0, requires_grad=True)
sample_size = nqueries if nqueries > 0 else 1
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
"ncorrect": ncorrect,
"nqueries": nqueries,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2),
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
nqueries = sum(log.get("nqueries", 0) for log in logging_outputs)
if nqueries > 0:
agg_output["accuracy"] = ncorrect / float(nqueries)
return agg_output
@register_criterion("winogrande")
class WinograndeCriterion(WSCCriterion):
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
query_lprobs = self.get_lprobs(
model,
sample["query_tokens"],
sample["query_masks"],
)
cand_lprobs = self.get_lprobs(
model,
sample["candidate_tokens"],
sample["candidate_masks"],
)
pred = query_lprobs >= cand_lprobs
loss = self.get_loss(query_lprobs, cand_lprobs)
sample_size = sample["query_tokens"].size(0)
ncorrect = pred.sum().item()
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
"ncorrect": ncorrect,
"nqueries": sample_size,
}
return loss, sample_size, logging_output
|
the-stack_0_15810 | # -*- coding: utf-8 -*-
"""
author: 左想
date: 2018-01-11
"""
import cv2
import random
import numpy as np
from math import fabs, sin, cos, radians
from PIL import Image, ImageDraw, ImageEnhance
def img_rotation(file_path, output, degree, is_full):
"""
对图片进行旋转,并另存为旋转后的图片;
:param file_path: String 图片路径;
:param output: String 输出旋转后的图片路径;
:param degree: String 旋转角度;
:param is_full: Bool 是否保留整张图片进行旋转。
True则在旋转时会将尺寸进行扩大以保留完整的图片;
False则在旋转时保留原始图片的尺寸进行旋转;
:return:
"""
im = cv2.imread(file_path, 1)
height, width = im.shape[:2]
matRotation = cv2.getRotationMatrix2D((width / 2, height / 2), degree, 1)
if is_full:
height_new = int(width * fabs(sin(radians(degree))) + height * fabs(cos(radians(degree))))
width_new = int(height * fabs(sin(radians(degree))) + width * fabs(cos(radians(degree))))
matRotation[0, 2] += (width_new - width) / 2 # 重点在这步,目前不懂为什么加这步
matRotation[1, 2] += (height_new - height) / 2 # 重点在这步
imgRotation = cv2.warpAffine(im, matRotation, (width_new, height_new), borderMode=cv2.BORDER_REPLICATE)
else:
imgRotation = cv2.warpAffine(im, matRotation, (width, height), borderMode=cv2.BORDER_REPLICATE)
return imgRotation
def randomColor(image):
"""
对图像进行颜色抖动
:param image: PIL的图像image
:return: 有颜色色差的图像image
"""
random_factor = np.random.randint(0, 21) / 10. # 随机因子
color_image = ImageEnhance.Color(image).enhance(random_factor) # 调整图像的饱和度
random_factor = np.random.randint(3, 15) / 10. # 随机因子
brightness_image = ImageEnhance.Brightness(color_image).enhance(random_factor) # 调整图像的亮度
random_factor = np.random.randint(10, 15) / 10. # 随机因1子
contrast_image = ImageEnhance.Contrast(brightness_image).enhance(random_factor) # 调整图像对比度
random_factor = np.random.randint(0, 21) / 10. # 随机因子
return ImageEnhance.Sharpness(contrast_image).enhance(random_factor) # 调整图像锐度
def randomGaussian(image, mean=0.5, sigma=0.3):
"""
对图像进行高斯噪声处理
:param image:
:return:
"""
def gaussianNoisy(im, mean=0.5, sigma=0.3):
"""
对图像做高斯噪音处理
:param im: 单通道图像
:param mean: 偏移量
:param sigma: 标准差
:return:
"""
for _i in range(len(im)):
im[_i] += random.gauss(mean, sigma)
return im
# 将图像转化成数组
img = np.asarray(image)
img.flags.writeable = True # 将数组改为读写模式
width, height = img.shape[:2]
img_r = gaussianNoisy(img[:, :, 0].flatten(), mean, sigma)
img_g = gaussianNoisy(img[:, :, 1].flatten(), mean, sigma)
img_b = gaussianNoisy(img[:, :, 2].flatten(), mean, sigma)
img[:, :, 0] = img_r.reshape([width, height])
img[:, :, 1] = img_g.reshape([width, height])
img[:, :, 2] = img_b.reshape([width, height])
return Image.fromarray(np.uint8(img))
def translate_coord(left_top, length, width, theta, center=None):
"""
根据旋转前矩形坐标以及旋转弧度来计算将矩形旋转弧度之后的顶点坐标
:param left_top: 左下角顶点坐标
:param length: 矩形长度
:param width: 矩形宽度
:param theta: 旋转弧度
:return: 返回四个顶点坐标
"""
# 获取左下角顶点坐标
left_down = [left_top[0], left_top[1] + width]
# 获取右上角顶点坐标
right_top = [left_top[0] + length, left_top[1]]
# 获取右下角顶点坐标
right_down = [left_top[0] + length, left_top[1] + width]
# 计算中心点坐标
if center is None:
center = [(left_top[0] + right_down[0]) / 2, (left_top[1] + right_down[1]) / 2]
# 计算四个顶点旋转后的坐标
right_down_rotation = calculate_rotation_coord(right_down, center, theta)
right_top_rotation = calculate_rotation_coord(right_top, center, theta)
left_down_rotation = calculate_rotation_coord(left_down, center, theta)
left_top_rotation = calculate_rotation_coord(left_top, center, theta)
return left_top_rotation, left_down_rotation, right_top_rotation, right_down_rotation
def calculate_rotation_coord(point, center, theta):
"""
计算一个点以另一个点为中心,旋转theta弧度后的坐标
:param point: 旋转前点的坐标
:param center: 旋转中心坐标
:param theta: 旋转弧度
:return: 返回旋转后点的坐标
"""
# 计算旋转之后点的坐标
right_rotation_x = (point[0] - center[0]) * cos(theta) - \
(point[1] - center[1]) * sin(theta) + center[0]
right_rotation_y = (point[0] - center[0]) * sin(theta) + \
(point[1] - center[1]) * cos(theta) + center[1]
return [int(right_rotation_x), int(right_rotation_y)]
def draw_box(img, img_save, left_top, left_down, right_top, right_down):
"""
根据矩形的四个点的坐标,在图片中画框
:param img: 图片路径
:param img_save: 图片保存路径
:param left_top: 左上顶点坐标
:param left_down: 左下顶点坐标
:param right_top: 右上顶点坐标
:param right_down: 右下顶点坐标
:return: None
"""
# 打开图片
im = Image.open(img)
draw = ImageDraw.Draw(im)
# 分别画四条直线,即框出box的位置了
draw.line((left_top[0], left_top[1], left_down[0], left_down[1]))
draw.line((left_top[0], left_top[1], right_top[0], right_top[1]))
draw.line((right_top[0], right_top[1], right_down[0], right_down[1]))
draw.line((left_down[0], left_down[1], right_down[0], right_down[1]))
im.save(img_save)
def get_color_box(img, height_im, width_im, move_pix=10):
"""
给定一个box的长宽以及移动的像素值,从图片的左上角开始移动,
每次通过统计区域中像素颜色h的方差,找出图片h方差最小的区域也就是颜色相近的区域
:param img: 样本图片
:param height_im: 选取区域的长度
:param width_im: 选取区域的宽度
:param move_pix: 移动的像素值大小
:return: 返回图片颜色相近区域的起始位置,该区域颜色的反色rgb值
"""
im = cv2.imread(img)
# 将rgb值转化为hsv值
hsv_im = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
# rgb矩阵
rgb_array = np.array(im)
# hsv矩阵
hsv_array = np.array(hsv_im)
height, width, chanel = hsv_array.shape
# 计算box需要移动的次数
width_times = int((width - width_im) / move_pix + 1)
height_times = int((height - height_im) / move_pix + 1)
# 定义统计方差的list
var_result = np.ndarray([height_times, width_times], dtype=np.float32)
# 开始移动box
for i in range(height_times):
for j in range(width_times):
# 计算box的起始位置
begin_height = i * move_pix
end_height = begin_height + height_im + 1
begin_width = j * move_pix
end_width = begin_width + width_im + 1
# 获取到box对应的hsv数组
hsv_box = hsv_array[begin_height:end_height, begin_width:end_width, :]
# 计算box内的hsv中h的方差
box_color_count = statistic_color(hsv_box)
var_result[i, j] = box_color_count
# 找出方差最小的box所在的行和列
min_row, min_col = np.where(var_result == np.min(var_result))
# 随机从符合的位置中选取一个
rand_number = random.randint(0, len(min_row)-1)
# 计算box对应的起始位置
height_location_begin = min_row[rand_number] * move_pix
height_location_end = height_location_begin + height_im
width_location_begin = min_col[rand_number] * move_pix
width_location_end = width_location_begin + width_im
# 获取到box对应的rgb数组
rgb_box = rgb_array[height_location_begin:height_location_end, width_location_begin:width_location_end, :]
# 获取box的里的反色
diff_max_rgb = get_diff_color(rgb_box)
return [[height_location_begin, height_location_end], [width_location_begin, width_location_end]], diff_max_rgb
def statistic_color(color_array):
"""
主要是获取box内的hsv值中h的方差
:param color_array: box对应的矩阵
:return: 返回box的hsv值中h的方差
"""
h_value = color_array[:, :, 0]
s_value = color_array[:, :, 1]
variance = np.var(h_value) + np.var(s_value)
return variance
def get_diff_color(color_array):
"""
主要是获取当前box的rgb值的均值,再求均值的反色
:param color_array: 当前box的rgb矩阵
:return: 当前box的反色的rgb值
"""
r_mean = np.mean(color_array[:, :, 0])
g_mean = np.mean(color_array[:, :, 1])
b_mean = np.mean(color_array[:, :, 2])
return (int(255-r_mean), int(255-g_mean), int(255-b_mean))
def save_image_use_cv2(image, path):
cv2.imwrite(path, image)
def save_image_use_pil(image, path):
image.save(path)
|
the-stack_0_15811 | import numpy as np
import pytest
from docarray import DocumentArray
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
@pytest.mark.parametrize(
'cls',
[
DocumentArray,
DocumentArraySqlite,
DocumentArrayAnnlite,
DocumentArrayWeaviate,
DocumentArrayQdrant,
DocumentArrayElastic,
],
)
@pytest.mark.parametrize(
'content_attr', ['texts', 'embeddings', 'tensors', 'blobs', 'contents']
)
def test_content_empty_getter_return_none(cls, content_attr, start_storage):
if cls in [
DocumentArrayAnnlite,
DocumentArrayWeaviate,
DocumentArrayQdrant,
DocumentArrayElastic,
]:
da = cls(config={'n_dim': 3})
else:
da = cls()
assert getattr(da, content_attr) is None
@pytest.mark.parametrize(
'cls',
[
DocumentArray,
DocumentArraySqlite,
DocumentArrayAnnlite,
DocumentArrayWeaviate,
DocumentArrayQdrant,
DocumentArrayElastic,
],
)
@pytest.mark.parametrize(
'content_attr',
[
('texts', ''),
('embeddings', np.array([])),
('tensors', np.array([])),
('blobs', []),
('contents', []),
],
)
def test_content_empty_setter(cls, content_attr, start_storage):
if cls in [
DocumentArrayAnnlite,
DocumentArrayWeaviate,
DocumentArrayQdrant,
DocumentArrayElastic,
]:
da = cls(config={'n_dim': 3})
else:
da = cls()
setattr(da, content_attr[0], content_attr[1])
assert getattr(da, content_attr[0]) is None
@pytest.mark.parametrize(
'cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
],
)
@pytest.mark.parametrize(
'content_attr',
[
('texts', ['s'] * 10),
('tensors', np.random.random([10, 2])),
('blobs', [b's'] * 10),
],
)
def test_content_getter_setter(cls, content_attr, config, start_storage):
if config:
da = cls.empty(10, config=config)
else:
da = cls.empty(10)
setattr(da, content_attr[0], content_attr[1])
np.testing.assert_equal(da.contents, content_attr[1])
da.contents = content_attr[1]
np.testing.assert_equal(da.contents, content_attr[1])
np.testing.assert_equal(getattr(da, content_attr[0]), content_attr[1])
da.contents = None
assert da.contents is None
@pytest.mark.parametrize('da_len', [0, 1, 2])
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
],
)
def test_content_empty(da_len, da_cls, config, start_storage):
if config:
da = da_cls.empty(da_len, config=config)
else:
da = da_cls.empty(da_len)
assert not da.contents
assert not da.tensors
if da_len == 0:
assert not da.texts
assert not da.blobs
else:
assert da.texts == [''] * da_len
assert da.blobs == [b''] * da_len
da.texts = ['hello'] * da_len
if da_len == 0:
assert not da.contents
else:
assert da.contents == ['hello'] * da_len
assert da.texts == ['hello'] * da_len
assert not da.tensors
assert da.blobs == [b''] * da_len
@pytest.mark.parametrize('da_len', [0, 1, 2])
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=5)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=5)),
(DocumentArrayQdrant, QdrantConfig(n_dim=5)),
(DocumentArrayElastic, ElasticConfig(n_dim=5)),
],
)
def test_embeddings_setter(da_len, da_cls, config, start_storage):
if config:
da = da_cls.empty(da_len, config=config)
else:
da = da_cls.empty(da_len)
da.embeddings = np.random.rand(da_len, 5)
for doc in da:
assert doc.embedding.shape == (5,)
|
the-stack_0_15813 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2020 Edgewall Software
# Copyright (C) 2004 Francois Harvey <[email protected]>
# Copyright (C) 2005 Matthew Good <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
#
# Author: Francois Harvey <[email protected]>
# Matthew Good <[email protected]>
import os.path
from trac.config import ConfigurationError, Option, ParsingError, \
PathOption, UnicodeConfigParser
from trac.core import Component, TracError, implements
from trac.perm import IPermissionPolicy
from trac.util import pathjoin, to_list
from trac.util.text import exception_to_unicode
from trac.versioncontrol.api import RepositoryManager
def parent_iter(path):
while 1:
yield path
if path == '/':
return
path = path[:-1]
yield path
idx = path.rfind('/')
path = path[:idx + 1]
def parse(authz_file, modules):
"""Parse a Subversion authorization file.
Return a dict of modules, each containing a dict of paths, each containing
a dict mapping users to permissions. Only modules contained in `modules`
are retained.
"""
parser = UnicodeConfigParser(ignorecase_option=False)
parser.read(authz_file)
groups = {}
aliases = {}
sections = {}
for section in parser.sections():
if section == 'groups':
for name, value in parser.items(section):
groups.setdefault(name, set()).update(to_list(value))
elif section == 'aliases':
for name, value in parser.items(section):
aliases[name] = value.strip()
else:
for name, value in parser.items(section):
parts = section.split(':', 1)
module, path = parts[0] if len(parts) > 1 else '', parts[-1]
if module in modules:
sections.setdefault((module, path), []) \
.append((name, value))
def resolve(subject, done):
if subject.startswith('@'):
done.add(subject)
for members in groups[subject[1:]] - done:
for each in resolve(members, done):
yield each
elif subject.startswith('&'):
yield aliases[subject[1:]]
else:
yield subject
authz = {}
for (module, path), items in sections.iteritems():
section = authz.setdefault(module, {}).setdefault(path, {})
for subject, perms in items:
readable = 'r' in perms
# Ordering isn't significant; any entry could grant permission
section.update((user, readable)
for user in resolve(subject, set())
if not section.get(user))
return authz
class AuthzSourcePolicy(Component):
"""Permission policy for `source:` and `changeset:` resources using a
Subversion authz file.
`FILE_VIEW` and `BROWSER_VIEW` permissions are granted as specified in the
authz file.
`CHANGESET_VIEW` permission is granted for changesets where `FILE_VIEW` is
granted on at least one modified file, as well as for empty changesets.
"""
implements(IPermissionPolicy)
authz_file = PathOption('svn', 'authz_file', '',
"""The path to the Subversion
[%(svnbook)s authorization (authz) file].
To enable authz permission checking, the `AuthzSourcePolicy`
permission policy must be added to `[trac] permission_policies`.
Non-absolute paths are relative to the Environment `conf`
directory.
""",
doc_args={'svnbook': 'http://svnbook.red-bean.com/en/1.7/'
'svn.serverconfig.pathbasedauthz.html'})
authz_module_name = Option('svn', 'authz_module_name', '',
"""The module prefix used in the `authz_file` for the default
repository. If left empty, the global section is used.
""")
_handled_perms = frozenset([(None, 'BROWSER_VIEW'),
(None, 'CHANGESET_VIEW'),
(None, 'FILE_VIEW'),
(None, 'LOG_VIEW'),
('source', 'BROWSER_VIEW'),
('source', 'FILE_VIEW'),
('source', 'LOG_VIEW'),
('changeset', 'CHANGESET_VIEW')])
def __init__(self):
self._mtime = 0
self._authz = {}
self._users = set()
# IPermissionPolicy methods
def check_permission(self, action, username, resource, perm):
realm = resource.realm if resource else None
if (realm, action) in self._handled_perms:
authz, users = self._get_authz_info()
if authz is None:
return False
if username == 'anonymous':
usernames = '$anonymous', '*'
else:
usernames = username, '$authenticated', '*'
if resource is None:
return True if users & set(usernames) else None
rm = RepositoryManager(self.env)
try:
repos = rm.get_repository(resource.parent.id)
except TracError:
return True # Allow error to be displayed in the repo index
if repos is None:
return True
modules = [resource.parent.id or self.authz_module_name]
if modules[0]:
modules.append('')
def check_path_0(spath):
sections = [authz.get(module, {}).get(spath)
for module in modules]
sections = [section for section in sections if section]
denied = False
for user in usernames:
for section in sections:
if user in section:
if section[user]:
return True
denied = True
# Don't check section without module name
# because the section with module name defines
# the user's permissions.
break
if denied: # All users has no readable permission.
return False
def check_path(path):
path = '/' + pathjoin(repos.scope, path)
if path != '/':
path += '/'
# Allow access to parent directories of allowed resources
for spath in set(sum((list(authz.get(module, {}))
for module in modules), [])):
if spath.startswith(path):
result = check_path_0(spath)
if result is True:
return True
# Walk from resource up parent directories
for spath in parent_iter(path):
result = check_path_0(spath)
if result is not None:
return result
if realm == 'source':
return check_path(resource.id)
elif realm == 'changeset':
changes = list(repos.get_changeset(resource.id).get_changes())
if not changes or any(check_path(change[0])
for change in changes):
return True
def _get_authz_info(self):
if not self.authz_file:
self.log.error("The [svn] authz_file configuration option in "
"trac.ini is empty or not defined")
raise ConfigurationError()
try:
mtime = os.path.getmtime(self.authz_file)
except OSError as e:
self.log.error("Error accessing svn authz permission policy "
"file: %s", exception_to_unicode(e))
raise ConfigurationError()
if mtime != self._mtime:
self._mtime = mtime
rm = RepositoryManager(self.env)
modules = set(repos.reponame
for repos in rm.get_real_repositories())
if '' in modules and self.authz_module_name:
modules.add(self.authz_module_name)
modules.add('')
self.log.info("Parsing authz file: %s", self.authz_file)
try:
self._authz = parse(self.authz_file, modules)
except ParsingError as e:
self.log.error("Error parsing svn authz permission policy "
"file: %s", exception_to_unicode(e))
raise ConfigurationError()
else:
self._users = {user
for paths in self._authz.itervalues()
for path in paths.itervalues()
for user, result in path.iteritems()
if result}
return self._authz, self._users
|
the-stack_0_15814 | # -*- coding: utf-8 -*-
"""
pygments.lexers.sas
~~~~~~~~~~~~~~~~~~~
Lexer for SAS.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words
from pygments.token import Comment, Keyword, Name, Number, String, Text, \
Other, Generic
__all__ = ['SASLexer']
class SASLexer(RegexLexer):
"""
For `SAS <http://www.sas.com/>`_ files.
.. versionadded:: 2.2
"""
# Syntax from syntax/sas.vim by James Kidd <[email protected]>
name = 'SAS'
aliases = ['sas']
filenames = ['*.SAS', '*.sas']
mimetypes = ['text/x-sas', 'text/sas', 'application/x-sas']
flags = re.IGNORECASE | re.MULTILINE
builtins_macros = (
"bquote", "nrbquote", "cmpres", "qcmpres", "compstor", "datatyp",
"display", "do", "else", "end", "eval", "global", "goto", "if",
"index", "input", "keydef", "label", "left", "length", "let",
"local", "lowcase", "macro", "mend", "nrquote",
"nrstr", "put", "qleft", "qlowcase", "qscan",
"qsubstr", "qsysfunc", "qtrim", "quote", "qupcase", "scan",
"str", "substr", "superq", "syscall", "sysevalf", "sysexec",
"sysfunc", "sysget", "syslput", "sysprod", "sysrc", "sysrput",
"then", "to", "trim", "unquote", "until", "upcase", "verify",
"while", "window"
)
builtins_conditionals = (
"do", "if", "then", "else", "end", "until", "while"
)
builtins_statements = (
"abort", "array", "attrib", "by", "call", "cards", "cards4",
"catname", "continue", "datalines", "datalines4", "delete", "delim",
"delimiter", "display", "dm", "drop", "endsas", "error", "file",
"filename", "footnote", "format", "goto", "in", "infile", "informat",
"input", "keep", "label", "leave", "length", "libname", "link",
"list", "lostcard", "merge", "missing", "modify", "options", "output",
"out", "page", "put", "redirect", "remove", "rename", "replace",
"retain", "return", "select", "set", "skip", "startsas", "stop",
"title", "update", "waitsas", "where", "window", "x", "systask"
)
builtins_sql = (
"add", "and", "alter", "as", "cascade", "check", "create",
"delete", "describe", "distinct", "drop", "foreign", "from",
"group", "having", "index", "insert", "into", "in", "key", "like",
"message", "modify", "msgtype", "not", "null", "on", "or",
"order", "primary", "references", "reset", "restrict", "select",
"set", "table", "unique", "update", "validate", "view", "where"
)
builtins_functions = (
"abs", "addr", "airy", "arcos", "arsin", "atan", "attrc",
"attrn", "band", "betainv", "blshift", "bnot", "bor",
"brshift", "bxor", "byte", "cdf", "ceil", "cexist", "cinv",
"close", "cnonct", "collate", "compbl", "compound",
"compress", "cos", "cosh", "css", "curobs", "cv", "daccdb",
"daccdbsl", "daccsl", "daccsyd", "dacctab", "dairy", "date",
"datejul", "datepart", "datetime", "day", "dclose", "depdb",
"depdbsl", "depsl", "depsyd",
"deptab", "dequote", "dhms", "dif", "digamma",
"dim", "dinfo", "dnum", "dopen", "doptname", "doptnum",
"dread", "dropnote", "dsname", "erf", "erfc", "exist", "exp",
"fappend", "fclose", "fcol", "fdelete", "fetch", "fetchobs",
"fexist", "fget", "fileexist", "filename", "fileref",
"finfo", "finv", "fipname", "fipnamel", "fipstate", "floor",
"fnonct", "fnote", "fopen", "foptname", "foptnum", "fpoint",
"fpos", "fput", "fread", "frewind", "frlen", "fsep", "fuzz",
"fwrite", "gaminv", "gamma", "getoption", "getvarc", "getvarn",
"hbound", "hms", "hosthelp", "hour", "ibessel", "index",
"indexc", "indexw", "input", "inputc", "inputn", "int",
"intck", "intnx", "intrr", "irr", "jbessel", "juldate",
"kurtosis", "lag", "lbound", "left", "length", "lgamma",
"libname", "libref", "log", "log10", "log2", "logpdf", "logpmf",
"logsdf", "lowcase", "max", "mdy", "mean", "min", "minute",
"mod", "month", "mopen", "mort", "n", "netpv", "nmiss",
"normal", "note", "npv", "open", "ordinal", "pathname",
"pdf", "peek", "peekc", "pmf", "point", "poisson", "poke",
"probbeta", "probbnml", "probchi", "probf", "probgam",
"probhypr", "probit", "probnegb", "probnorm", "probt",
"put", "putc", "putn", "qtr", "quote", "ranbin", "rancau",
"ranexp", "rangam", "range", "rank", "rannor", "ranpoi",
"rantbl", "rantri", "ranuni", "repeat", "resolve", "reverse",
"rewind", "right", "round", "saving", "scan", "sdf", "second",
"sign", "sin", "sinh", "skewness", "soundex", "spedis",
"sqrt", "std", "stderr", "stfips", "stname", "stnamel",
"substr", "sum", "symget", "sysget", "sysmsg", "sysprod",
"sysrc", "system", "tan", "tanh", "time", "timepart", "tinv",
"tnonct", "today", "translate", "tranwrd", "trigamma",
"trim", "trimn", "trunc", "uniform", "upcase", "uss", "var",
"varfmt", "varinfmt", "varlabel", "varlen", "varname",
"varnum", "varray", "varrayx", "vartype", "verify", "vformat",
"vformatd", "vformatdx", "vformatn", "vformatnx", "vformatw",
"vformatwx", "vformatx", "vinarray", "vinarrayx", "vinformat",
"vinformatd", "vinformatdx", "vinformatn", "vinformatnx",
"vinformatw", "vinformatwx", "vinformatx", "vlabel",
"vlabelx", "vlength", "vlengthx", "vname", "vnamex", "vtype",
"vtypex", "weekday", "year", "yyq", "zipfips", "zipname",
"zipnamel", "zipstate"
)
tokens = {
'root': [
include('comments'),
include('proc-data'),
include('cards-datalines'),
include('logs'),
include('general'),
(r'.', Text),
],
# SAS is multi-line regardless, but * is ended by ;
'comments': [
(r'^\s*\*.*?;', Comment),
(r'/\*.*?\*/', Comment),
(r'^\s*\*(.|\n)*?;', Comment.Multiline),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
],
# Special highlight for proc, data, quit, run
'proc-data': [
(r'(^|;)\s*(proc \w+|data|run|quit)[\s;]',
Keyword.Reserved),
],
# Special highlight cards and datalines
'cards-datalines': [
(r'^\s*(datalines|cards)\s*;\s*$', Keyword, 'data'),
],
'data': [
(r'(.|\n)*^\s*;\s*$', Other, '#pop'),
],
# Special highlight for put NOTE|ERROR|WARNING (order matters)
'logs': [
(r'\n?^\s*%?put ', Keyword, 'log-messages'),
],
'log-messages': [
(r'NOTE(:|-).*', Generic, '#pop'),
(r'WARNING(:|-).*', Generic.Emph, '#pop'),
(r'ERROR(:|-).*', Generic.Error, '#pop'),
include('general'),
],
'general': [
include('keywords'),
include('vars-strings'),
include('special'),
include('numbers'),
],
# Keywords, statements, functions, macros
'keywords': [
(words(builtins_statements,
prefix = r'\b',
suffix = r'\b'),
Keyword),
(words(builtins_sql,
prefix = r'\b',
suffix = r'\b'),
Keyword),
(words(builtins_conditionals,
prefix = r'\b',
suffix = r'\b'),
Keyword),
(words(builtins_macros,
prefix = r'%',
suffix = r'\b'),
Name.Builtin),
(words(builtins_functions,
prefix = r'\b',
suffix = r'\('),
Name.Builtin),
],
# Strings and user-defined variables and macros (order matters)
'vars-strings': [
(r'&[a-z_]\w{0,31}\.?', Name.Variable),
(r'%[a-z_]\w{0,31}', Name.Function),
(r'\'', String, 'string_squote'),
(r'"', String, 'string_dquote'),
],
'string_squote': [
('\'', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape),
# AFAIK, macro variables are not evaluated in single quotes
# (r'&', Name.Variable, 'validvar'),
(r'[^$\'\\]+', String),
(r'[$\'\\]', String),
],
'string_dquote': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape),
(r'&', Name.Variable, 'validvar'),
(r'[^$&"\\]+', String),
(r'[$"\\]', String),
],
'validvar': [
(r'[a-z_]\w{0,31}\.?', Name.Variable, '#pop'),
],
# SAS numbers and special variables
'numbers': [
(r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)(E[+-]?[0-9]+)?i?\b',
Number),
],
'special': [
(r'(null|missing|_all_|_automatic_|_character_|_n_|'
r'_infile_|_name_|_null_|_numeric_|_user_|_webout_)',
Keyword.Constant),
],
# 'operators': [
# (r'(-|=|<=|>=|<|>|<>|&|!=|'
# r'\||\*|\+|\^|/|!|~|~=)', Operator)
# ],
}
|
the-stack_0_15815 | #!/usr/bin/env python
"""
"""
# ==============================================================================
# --General imports ------------------------------------------------------------
# ==============================================================================
from time import sleep
import math
import numpy as np
import sys
import copy
sys.path.append('./HiddenMarkovModel')
from HiddenMarkovModel.HMM_MODEL import *
# ==============================================================================
# -- ROS imports ---------------------------------------------------------------
# ==============================================================================
import rospy
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from perception.msg import Object
from perception.msg import ObjectsList
from perception.msg import WaypointsList
from perception.msg import Waypoint
from perception.msg import TrajectoriesList
from perception.srv import DrivingPaths, DrivingPathsRequest, DrivingPathsResponse
from prediction.msg import VehiclesCollisionEvent
from prediction.msg import VehiclesCollisionEventList
from prediction.msg import PedestrianCollisionEvent
from prediction.msg import PedestrianCollisionEventList
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
# ==============================================================================
# -- Implementation ------------------------------------------------------------
# ==============================================================================
MAX_SEQ_LENGTH_MEMORY = 5
MAX_STATE_SEQ_LENGTH_MEMORY = 1
GLOBAL_DRAW = True
state_i = 0
class VehicleState:
def __init__(self, vehicle, angle):
self.vehicle = [vehicle]
self.angles = [angle]
self.vehicle_id = vehicle.object_id
self.trajectories = []
self.possible_trajectory_id = []
self.state = []
self.observations_seq = []
def v_pop(self, index=0):
if len(self.vehicle) > MAX_SEQ_LENGTH_MEMORY:
self.angles.pop(index)
self.vehicle.pop(index)
def s_pop(self, index=0):
if len(self.state) > MAX_STATE_SEQ_LENGTH_MEMORY:
self.state.pop(index)
def ob_pop(self, index=0):
if len(self.observations_seq) > MAX_STATE_SEQ_LENGTH_MEMORY:
self.observations_seq.pop(index)
class MotionPrediction:
def __init__(self):
self.vehicles_state_list = []
self.vehicles_id = []
self.trajectories_length = 40.0
self.HMM_MODEL = HMM_MODEL()
self.vehicles_collision_events_info = []
self.pedestrians_collision_events_info = []
# --- ROS ---
rospy.init_node('Prediction_node', anonymous=True)
self.ego_vehicle = Object()
self.vehicles_list = []
self.pedestrians_list = []
self.traffic_signs_loc = []
self.ego_trajectory = []
self.subscriber_ego_vehicle = rospy.Subscriber("ego_vehicle_msg", Object, self.callback_ego_vehicle,
queue_size=1)
self.subscriber_ego_trajectory = rospy.Subscriber("ego_trajectory_msg", WaypointsList, self.callback_ego_trajectory,
queue_size=1)
self.subscriber_vehicles_list = rospy.Subscriber('vehicles_list_msg', ObjectsList, self.callback_vehicles_list,
queue_size=1)
self.subscriber_pedestrians_list = rospy.Subscriber('pedestrians_list_msg', ObjectsList, self.callback_pedestrians_list,
queue_size=1)
self.subscriber_signs_location = rospy.Subscriber("signs_location_msg", WaypointsList, self.callback_signs_location,
queue_size=1)
self.pub_vehicles_collision_info = rospy.Publisher('vehicles_collision_info_msg', VehiclesCollisionEventList, queue_size=1)
self.pub_pedestrians_collision_info = rospy.Publisher('pedestrians_collision_info_msg', PedestrianCollisionEventList, queue_size=1)
# Draw trajectories
if GLOBAL_DRAW:
self.pub_prototype_trajectories_draw = rospy.Publisher('prototype_trajectories_draw_msg', TrajectoriesList, queue_size=1)
self.pub_collision_points_draw = rospy.Publisher('collision_points_draw_msg', WaypointsList, queue_size=1)
def track_vehicles_and_save_states(self, vehicles, angles):
# For vehicles found
for vehicle in vehicles:
if vehicle.object_id in self.vehicles_id:
index = self.vehicles_id.index(vehicle.object_id)
veh_state = self.vehicles_state_list[index]
veh_state.vehicle.append(vehicle)
veh_state.angles.append(angles[vehicles.index(vehicle)])
veh_state.v_pop(0)
else:
self.vehicles_state_list.append(VehicleState(vehicle, angles[vehicles.index(vehicle)]))
self.vehicles_id.append(vehicle.object_id)
# Remove vehicles from list which don't exist any more
if not vehicles:
self.vehicles_state_list = []
self.vehicles_id = []
existed_vehicles_id = [veh.object_id for veh in vehicles]
#print(existed_vehicles_id)
if len(existed_vehicles_id) != 0:
self.vehicles_state_list = [self.vehicles_state_list[i] for i in range(len(self.vehicles_state_list)) if self.vehicles_id[i] in existed_vehicles_id]
self.vehicles_id = [v_id for v_id in self.vehicles_id if v_id in existed_vehicles_id]
def get_objects_around_vehicle(self, obj_type="vehicle", min_radius=50.0):
ego_vehicle = copy.deepcopy(self.ego_vehicle)
if obj_type == "vehicle":
to_angle = 120.0 - ego_vehicle.speed * 3.6 if ego_vehicle.speed * 3.6 < 30.0 else 90.0
else:
to_angle = 80.0 - ego_vehicle.speed * 3.6 if ego_vehicle.speed * 3.6 < 30.0 else 40.0
from_angle = - to_angle
t_stop = 3.0
radius = min_radius + ego_vehicle.speed * t_stop
objects, angles = self.objects_in_angle_range_and_in_radius(obj_type, from_angle, to_angle, radius)
return objects, angles
def objects_in_angle_range_and_in_radius(self, object_type, from_angle=-90.0, to_angle=90.0, radius=20.0):
"""
Method to find all the objects of a type like vehicles, pedestrians,etc
between two angles (from_angle -> to_angle) in relation to vehicle coordinate system
:param ego_vehicle: The self driving vehicle
:param object_type: The object type, vehicles, pedestrians, traffic signs etc.
:param from_angle: Start angle in relation to vehicle coordinate system in degrees in the interval [-180, 180)
:param to_angle: The final angle in relation to vehicle coordinate system in degrees in the interval [-180, 180)
:param radius: The max radius in which the object need to be
"""
ego_vehicle = copy.deepcopy(self.ego_vehicle)
if object_type == "vehicle":
objects_list = copy.deepcopy(self.vehicles_list)
elif object_type == "pedestrian":
objects_list = copy.deepcopy(self.pedestrians_list)
else:
return [], []
if len(objects_list) == 0:
return [], []
target_objects = []
angle_list = []
for an_object in objects_list:
x = an_object.x - ego_vehicle.x
y = an_object.y - ego_vehicle.y
theta = math.degrees(math.atan2(y, x)) % 360.0
theta = theta - ego_vehicle.yaw
theta = theta % 360.0
theta = theta - 360.0 if theta > 180.0 else theta
rel_dist = math.hypot(an_object.y - ego_vehicle.y, an_object.x - ego_vehicle.x)
if from_angle <= theta <= to_angle and rel_dist < radius:
target_objects.append(an_object)
# theta = theta + 360 if theta < 0 else theta
angle_list.append(theta)
return [object_i for object_i in target_objects] if len(target_objects) != 0 else [], angle_list
def get_prototype_trajectories_and_vehicles(self):
draw_points_flag = True
percentage_renew = 0.5
vehicles, angles = self.get_objects_around_vehicle(obj_type="vehicle")
self.track_vehicles_and_save_states(vehicles, angles)
veh_trajectories = self.client_driving_paths(vehicles)
if vehicles:
for v_i, vehicle in enumerate(vehicles):
index = self.vehicles_id.index(vehicle.object_id)
if len(self.vehicles_state_list[index].trajectories) == 0:
self.vehicles_state_list[index].trajectories = veh_trajectories[v_i]
else:
w_e = self.vehicles_state_list[index].trajectories[0][-1]
w_b = self.vehicles_state_list[index].trajectories[0][0]
dist = math.hypot(vehicle.x - w_e.x, vehicle.y - w_e.y)
dist_reverse = math.hypot(vehicle.x - w_b.x, vehicle.y - w_b.y)
if dist < percentage_renew*self.trajectories_length or \
dist_reverse > (percentage_renew/4)*self.trajectories_length or \
len(self.vehicles_state_list[index].trajectories) == 1:
self.vehicles_state_list[index].trajectories = veh_trajectories[v_i]
if GLOBAL_DRAW:
self.publish_prototype_trajectories_draw()
return [state.vehicle[-1] for state in self.vehicles_state_list], [state.trajectories for state in self.vehicles_state_list]
def calculate_trajectories_probability(self):
"""
For each vehicle saved in data base we calculate the most possible trajectory from the prototypes trajectories
to follow. For each vehicle track we take all the past instances of the vehicle and we calculate the minimum
distance of each from each trajectory and sum them. Finally the trajectory with the lowest sum is chosen.
The most possible trajectory and the position of the vehicle on it is saved in "possible_trajectory_id"
"""
e_threshold = 0.1
min_probability = 0.3
max_probability = 0.6
trajectories_list = []
probabilities = []
for track in self.vehicles_state_list:
x_y = [[vehicle_inst.x, vehicle_inst.y] for vehicle_inst in track.vehicle]
min_sums = []
index = []
yaw_cos = []
for trajectory in track.trajectories:
dist_min_list = []
idx = 0
for i in range(len(x_y)):
min_dist = float("inf")
for w in trajectory:
dist = math.hypot(w.x - x_y[i][0], w.y - x_y[i][1])
if min_dist > dist:
min_dist = dist
if i == len(x_y)-1:
idx = trajectory.index(w)
dist_min_list.append(min_dist)
yaw_cos.append(sum([math.cos(math.radians(tr.yaw)) for tr in trajectory[idx:]])/len(trajectory[idx:]))
index.append(idx)
min_sums.append(sum(dist_min_list))
k1 = abs(math.cos(math.radians(track.vehicle[-1].yaw)))
max_yaw = [1-abs(k1 - abs(c_yaw)) for c_yaw in yaw_cos]
max_sums = [1-m_s/(max(min_sums)+0.0000001) for m_s in min_sums]
max_value = [(max_sums[i]+max_yaw[i]**2) for i in range(len(max_sums))]
probability = [m_v/(sum(max_value)+0.000001) for m_v in max_value]
max_of_all = max(probability)
track.possible_trajectory_id = []
for i in range(len(probability)):
if abs(max_of_all-probability[i]) < e_threshold or \
probability[probability.index(max_of_all)] < min_probability or\
probability[i] > max_probability:
track.possible_trajectory_id.append([i, index[i]])
if len(track.possible_trajectory_id) == 0:
i = probability.index(max_of_all)
track.possible_trajectory_id.append([i, index[i]])
trajectories = []
for i in range(len(track.trajectories)):
pos_tr = [p_t[0] for p_t in track.possible_trajectory_id]
if i in pos_tr:
trajectories.append(track.trajectories[i][track.possible_trajectory_id[pos_tr.index(i)][1]:])
track.trajectories = trajectories
probabilities.append(probability)
trajectories_list.append(trajectories)
return trajectories_list, probabilities
def get_trajectories_with_stop_constraints(self):
min_dist = 3.0
vehicles = [track.vehicle[-1] for track in self.vehicles_state_list]
trajectories = [track.trajectories for track in self.vehicles_state_list]
traffic_signs_loc = copy.deepcopy(self.traffic_signs_loc)
traject_with_constr = []
for signs_loc in traffic_signs_loc:
for v_t in trajectories:
for trajectory in v_t:
for t_loc in trajectory:
dist = math.hypot(signs_loc.x - t_loc.x, signs_loc.y - t_loc.y)
if dist < min_dist:
vehicle = vehicles[trajectories.index(v_t)]
v_dist = math.hypot(signs_loc.x - vehicle.x, signs_loc.y - vehicle.y)
traject_with_constr.append([trajectories.index(v_t), v_t.index(trajectory), v_dist])
break
return traject_with_constr
def predict_vehicle_speed(self, traject_with_constr):
ego_vehicle = copy.deepcopy(self.ego_vehicle)
vehicles_speed = []
speed_probabilities = []
tick_time = 1.0
a_stop = 6.0
a_deceleration = 4.0
a_acceleration = 3.0
global state_i
for v_i in range(len(self.vehicles_state_list)):
vehicle = self.vehicles_state_list[v_i].vehicle[-1]
traf_info_const = [const_info for const_info in traject_with_constr if v_i == const_info[0]]
if len(traf_info_const) != 0:
observation = self.HMM_MODEL.get_observation(vehicle, traf_info_const, ego_vehicle)
#print(observation)
self.vehicles_state_list[v_i].observations_seq.append(observation)
self.vehicles_state_list[v_i].ob_pop(0)
#print(self.vehicles_state_list[v_i].observations_seq)
ln_prob, num_seq, curr_state = self.HMM_MODEL.predict_state(vehicle.speed, self.vehicles_state_list[v_i].observations_seq)
probability = math.exp(ln_prob)
states = [STATE_VECTOR[i] for i in num_seq]
#print(states)
next_state = states[-1]
state_i = OBSERVATION_VECTOR.index(observation)
else:
probability = 1
next_state = STATE_VECTOR[2] # Steady state
curr_state = next_state
if next_state == STATE_VECTOR[2]: # Steady state
predicted_speed = vehicle.speed
elif next_state == STATE_VECTOR[0]: # Stop
predicted_speed = vehicle.speed - tick_time*a_stop
predicted_speed = 0.0 if predicted_speed < 0.0 else predicted_speed
elif next_state == STATE_VECTOR[3]: # Accelerate
predicted_speed = vehicle.speed + tick_time * a_acceleration*probability
else: # Deceleration
predicted_speed = vehicle.speed - tick_time * a_deceleration*probability
predicted_speed = 0.0 if predicted_speed < 0.0 else predicted_speed
speed_probabilities.append(probability)
vehicles_speed.append(predicted_speed)
#print(STATE_VECTOR.index(next_state), state_i, round(predicted_speed, 2), round(vehicle.speed, 2), round(probability, 2), STATE_VECTOR.index(curr_state))
return vehicles_speed, speed_probabilities
def predict_vehicles_collision(self):
ego_vehicle = copy.deepcopy(self.ego_vehicle)
draw_points_flag = True
ego_vehicle_trajectory = copy.deepcopy(self.ego_trajectory)
if len(ego_vehicle_trajectory) == 0:
return True
vehicles, all_vehicles_paths = self.get_prototype_trajectories_and_vehicles()
possible_trajectories, trajectories_probabilities = self.calculate_trajectories_probability()
traject_with_constr = self.get_trajectories_with_stop_constraints()
predicted_vehicles_speed, speed_probabilities = self.predict_vehicle_speed(traject_with_constr)
# angles = [angles[i] for i in range(len(vehicles)) if vehicles[i] in c_vehicles]
min_collision_dist = 2.0
t = [0.0]
ego_vehicle_speed = ego_vehicle.speed + 0.00000001
dist = 0.0
for i_d in range(len(ego_vehicle_trajectory)-1):
dist += math.hypot(ego_vehicle_trajectory[i_d + 1].y - ego_vehicle_trajectory[i_d].y,
ego_vehicle_trajectory[i_d + 1].x - ego_vehicle_trajectory[i_d].x)
t.append(dist/ego_vehicle_speed if ego_vehicle_speed > 0.0 else 100000.0)
t_v = []
for trajectories in possible_trajectories:
t_tr = []
other_vehicles_speed = predicted_vehicles_speed[possible_trajectories.index(trajectories)] + 0.001
for trajectory in trajectories:
t_t = [0.0]
dist = 0.0
for i_d in range(len(trajectory) - 1):
dist += math.hypot(trajectory[i_d + 1].y - trajectory[i_d].y,
trajectory[i_d + 1].x - trajectory[i_d].x)
t_t.append(dist / other_vehicles_speed if other_vehicles_speed > 0.0 else 100000.0)
t_tr.append(t_t)
t_v.append(t_tr)
collision_vehicles = []
for t_tr in t_v:
# t_windows = min_collision_dist/vehicles[t_v.index(t_tr)].speed + 0.001
for t_t in t_tr:
break_flag = False
for time_i in t_t:
bigger_than = False
for time_j in t:
if time_j >= time_i:
bigger_than = True
w1 = possible_trajectories[t_v.index(t_tr)][t_tr.index(t_t)][t_t.index(time_i)]
w2 = ego_vehicle_trajectory[t.index(time_j)]
dist = math.hypot(w2.y - w1.y, w2.x - w1.x)
if dist < min_collision_dist:
break_flag = True
collision_time = time_i
collision_vehicles.append([t_v.index(t_tr), t_tr.index(t_t), collision_time, w1])
break
if not bigger_than and time_i != 0.0:
for time_j in t:
w1 = possible_trajectories[t_v.index(t_tr)][t_tr.index(t_t)][t_t.index(time_i)]
w2 = ego_vehicle_trajectory[t.index(time_j)]
dist = math.hypot(w2.y - w1.y, w2.x - w1.x)
if dist < min_collision_dist:
break_flag = True
collision_time = time_i
if math.hypot(w2.y - ego_vehicle_trajectory[0].y, w2.x - ego_vehicle_trajectory[0].x) > \
math.hypot(w2.y - possible_trajectories[t_v.index(t_tr)][t_tr.index(t_t)][0].y,
w2.x - possible_trajectories[t_v.index(t_tr)][t_tr.index(t_t)][0].x):
collision_time = time_j
collision_vehicles.append([t_v.index(t_tr), t_tr.index(t_t), collision_time, w1])
break
if break_flag:
break
collision_events_info = []
for collision in collision_vehicles:
vehicle = self.vehicles_state_list[collision[0]].vehicle[-1]
trajectory_probability = trajectories_probabilities[collision[0]][collision[1]]
speed_probability = speed_probabilities[collision[0]]
collision_time = collision[2]
collision_point = collision[3]
predict_probability = trajectory_probability*speed_probability
collision_events_info.append([vehicle, collision_point, collision_time, predict_probability])
self.vehicles_collision_events_info = collision_events_info
self.publish_vehicles_collision_events()
if GLOBAL_DRAW and len(vehicles) != 0 and len(collision_vehicles) != 0:
self.publish_collision_points_draw([collision[-1] for collision in collision_vehicles])
def predict_pedestrians_collision(self):
ego_vehicle = copy.deepcopy(self.ego_vehicle)
radius_sum = 2.0
pedestrians, angles = self.get_objects_around_vehicle(obj_type="pedestrian", min_radius=12)
t_col = []
dist_col = []
for pedestrian in pedestrians:
t = 100000.0
min_dist = 10000.0
relative_speed_x = pedestrian.vel_x - ego_vehicle.vel_x - math.cos(math.radians(ego_vehicle.yaw))*0.1
relative_speed_y = pedestrian.vel_y - ego_vehicle.vel_y - math.sin(math.radians(ego_vehicle.yaw))*0.1
relative_distance_x = pedestrian.x - ego_vehicle.x
relative_distance_y = pedestrian.y - ego_vehicle.y
a = relative_speed_x**2 + relative_speed_y**2 + 0.000001
b = 2*(relative_speed_x*relative_distance_x + relative_speed_y*relative_distance_y)
c = relative_distance_x**2 + relative_distance_y**2 - radius_sum**2
discriminant = b**2 - 4*a*c
if discriminant > 0.0: # Collision happens
t0 = (-b - math.sqrt(discriminant)) / (2.0 * a)
t1 = (-b + math.sqrt(discriminant)) / (2.0 * a)
t = min([t0, t1]) if min([t0, t1]) >= 0.0 else max([t0, t1])
min_dist = 0.0
else:
t = -b/(2*a)
min_dist = a * (t ** 2) + t * b + c
t, min_dist = (100000.0, 10000.0) if t < 0.0 else (t, min_dist) # No collision if t < 0
t_col.append(t)
dist_col.append(min_dist)
if t_col:
collision_event = [pedestrians[dist_col.index(min(dist_col))], angles[dist_col.index(min(dist_col))],
t_col[dist_col.index(min(dist_col))], min(dist_col)]
else:
collision_event = []
self.pedestrians_collision_events_info = [collision_event]
self.publish_pedestrians_collision_events()
if GLOBAL_DRAW and len(collision_event) != 0 and collision_event[-1] < 2.0:
pedestrian_object = collision_event[0]
dist_ped = math.hypot(pedestrian_object.x-ego_vehicle.x, pedestrian_object.y-ego_vehicle.y)
angle = math.radians(collision_event[1])
dist_ped = dist_ped*math.cos(angle)
waypoint = Waypoint()
theta = math.radians(ego_vehicle.yaw)
waypoint.x = ego_vehicle.x + dist_ped * math.cos(theta)
waypoint.y = ego_vehicle.y + dist_ped * math.sin(theta)
self.publish_collision_points_draw([waypoint])
# -------- ROS functions ---------
def callback_ego_vehicle(self, ros_data):
self.ego_vehicle = ros_data
def callback_vehicles_list(self, ros_data):
self.vehicles_list = ros_data.objects_list
def callback_pedestrians_list(self, ros_data):
self.pedestrians_list = ros_data.objects_list
def callback_signs_location(self, ros_data):
self.traffic_signs_loc = ros_data.waypoints_list
def callback_ego_trajectory(self, ros_data):
self.ego_trajectory = ros_data.waypoints_list
def client_driving_paths(self, vehicles):
rospy.wait_for_service('driving_paths_srv')
rospy.wait_for_service('driving_paths_srv')
x_list = [vehicle.x for vehicle in vehicles]
y_list = [vehicle.y for vehicle in vehicles]
try:
driving_paths = rospy.ServiceProxy('driving_paths_srv', DrivingPaths)
resp1 = driving_paths(x_list, y_list, self.trajectories_length)
veh_trajectories = []
for vehicle in resp1.driving_paths:
trajectories = [trajectory.waypoints_list for trajectory in vehicle.trajectories_list]
veh_trajectories.append(trajectories)
#rospy.loginfo(veh_trajectories)
return veh_trajectories
except rospy.ServiceException as e:
print("Service call failed: %s" % e)
def publish_vehicles_collision_events(self):
pub = self.pub_vehicles_collision_info
collision_event_list = []
for collision_event in self.vehicles_collision_events_info:
ros_vehicle_collision_event = VehiclesCollisionEvent()
ros_vehicle_collision_event.object = collision_event[0]
ros_vehicle_collision_event.collision_point = collision_event[1]
ros_vehicle_collision_event.collision_time = collision_event[2]
ros_vehicle_collision_event.prediction_probability = collision_event[3]
collision_event_list.append(ros_vehicle_collision_event)
ros_collision_event_list = VehiclesCollisionEventList()
ros_collision_event_list.collision_event_list = collision_event_list
#rospy.loginfo(ros_collision_event_list)
pub.publish(ros_collision_event_list)
def publish_pedestrians_collision_events(self):
pub = self.pub_pedestrians_collision_info
collision_event_list = []
if self.pedestrians_collision_events_info[0]:
for collision_event in self.pedestrians_collision_events_info:
ros_collision_event = PedestrianCollisionEvent()
ros_collision_event.object = collision_event[0]
ros_collision_event.angle = collision_event[1]
ros_collision_event.collision_time = collision_event[2]
ros_collision_event.collision_distance = collision_event[3]
collision_event_list.append(ros_collision_event)
ros_collision_event_list = PedestrianCollisionEventList()
ros_collision_event_list.collision_event_list = collision_event_list
# rospy.loginfo(ros_collision_event)
pub.publish(ros_collision_event_list)
def publish_prototype_trajectories_draw(self):
pub = self.pub_prototype_trajectories_draw
trajectories_list = []
for prototype_trajectories in [veh.trajectories for veh in self.vehicles_state_list]:
for trajectory in prototype_trajectories:
ros_trajectory = WaypointsList()
ros_trajectory.waypoints_list = trajectory
trajectories_list.append(ros_trajectory)
prototype_trajectories = TrajectoriesList()
prototype_trajectories.trajectories_list = trajectories_list
#rospy.loginfo(prototype_trajectories)
pub.publish(prototype_trajectories)
def publish_collision_points_draw(self, collision_points):
pub = self.pub_collision_points_draw
ros_points_list = WaypointsList()
ros_points_list.waypoints_list = collision_points
#rospy.loginfo(prototype_trajectories)
pub.publish(ros_points_list)
def main():
motion_prediction = MotionPrediction()
try:
while not rospy.is_shutdown():
motion_prediction.predict_vehicles_collision()
motion_prediction.predict_pedestrians_collision()
except rospy.ROSInterruptException:
print("Local path planner node failed")
pass
if __name__ == '__main__':
main()
|
the-stack_0_15816 | """Data types for agent-based learning."""
import collections
import enum
import akro
import numpy as np
from garage.np import concat_tensor_dict_list, slice_nested_dict
# pylint: disable=too-many-lines
class EpisodeBatch(
collections.namedtuple('EpisodeBatch', [
'env_spec',
'observations',
'last_observations',
'actions',
'rewards',
'env_infos',
'agent_infos',
'step_types',
'lengths',
])):
# pylint: disable=missing-return-doc, missing-return-type-doc, missing-param-doc, missing-type-doc # noqa: E501
r"""A tuple representing a batch of whole episodes.
Data type for on-policy algorithms.
A :class:`~EpisodeBatch` represents a batch of whole episodes, produced
when one or more agents interacts with one or more environments.
+-----------------------+-------------------------------------------------+
| Symbol | Description |
+=======================+=================================================+
| :math:`N` | Episode batch dimension |
+-----------------------+-------------------------------------------------+
| :math:`[T]` | Variable-length time dimension of each |
| | episode |
+-----------------------+-------------------------------------------------+
| :math:`S^*` | Single-step shape of a time-series tensor |
+-----------------------+-------------------------------------------------+
| :math:`N \bullet [T]` | A dimension computed by flattening a |
| | variable-length time dimension :math:`[T]` into |
| | a single batch dimension with length |
| | :math:`sum_{i \in N} [T]_i` |
+-----------------------+-------------------------------------------------+
Attributes:
env_spec (EnvSpec): Specification for the environment from
which this data was sampled.
observations (numpy.ndarray): A numpy array of shape
:math:`(N \bullet [T], O^*)` containing the (possibly
multi-dimensional) observations for all time steps in this batch.
These must conform to :obj:`EnvStep.observation_space`.
last_observations (numpy.ndarray): A numpy array of shape
:math:`(N, O^*)` containing the last observation of each episode.
This is necessary since there are one more observations than
actions every episode.
actions (numpy.ndarray): A numpy array of shape
:math:`(N \bullet [T], A^*)` containing the (possibly
multi-dimensional) actions for all time steps in this batch. These
must conform to :obj:`EnvStep.action_space`.
rewards (numpy.ndarray): A numpy array of shape
:math:`(N \bullet [T])` containing the rewards for all time steps
in this batch.
env_infos (dict): A dict of numpy arrays arbitrary environment state
information. Each value of this dict should be a numpy array of
shape :math:`(N \bullet [T])` or :math:`(N \bullet [T], S^*)`.
agent_infos (numpy.ndarray): A dict of numpy arrays arbitrary agent
state information. Each value of this dict should be a numpy array
of shape :math:`(N \bullet [T])` or :math:`(N \bullet [T], S^*)`.
For example, this may contain the hidden states from an RNN policy.
step_types (numpy.ndarray): A numpy array of `StepType with shape
:math:`(N,)` containing the time step types for all transitions in
this batch.
lengths (numpy.ndarray): An integer numpy array of shape :math:`(N,)`
containing the length of each episode in this batch. This may be
used to reconstruct the individual episodes.
Raises:
ValueError: If any of the above attributes do not conform to their
prescribed types and shapes.
"""
__slots__ = ()
def __new__(cls, env_spec, observations, last_observations, actions,
rewards, env_infos, agent_infos, step_types,
lengths): # noqa: D102
# pylint: disable=too-many-branches
first_observation = observations[0]
first_action = actions[0]
inferred_batch_size = lengths.sum()
# lengths
if len(lengths.shape) != 1:
raise ValueError(
'Lengths tensor must be a tensor of shape (N,), but got a '
'tensor of shape {} instead'.format(lengths.shape))
if not (lengths.dtype.kind == 'u' or lengths.dtype.kind == 'i'):
raise ValueError(
'Lengths tensor must have an integer dtype, but got dtype {} '
'instead.'.format(lengths.dtype))
# observations
if not env_spec.observation_space.contains(first_observation):
# Discrete actions can be either in the space normally, or one-hot
# encoded.
if isinstance(env_spec.observation_space,
(akro.Box, akro.Discrete, akro.Dict)):
if env_spec.observation_space.flat_dim != np.prod(
first_observation.shape):
raise ValueError('observations should have the same '
'dimensionality as the observation_space '
'({}), but got data with shape {} '
'instead'.format(
env_spec.observation_space.flat_dim,
first_observation.shape))
else:
raise ValueError(
'observations must conform to observation_space {}, but '
'got data with shape {} instead.'.format(
env_spec.observation_space, first_observation))
if observations.shape[0] != inferred_batch_size:
raise ValueError(
'Expected batch dimension of observations to be length {}, '
'but got length {} instead.'.format(inferred_batch_size,
observations.shape[0]))
# observations
if not env_spec.observation_space.contains(last_observations[0]):
# Discrete actions can be either in the space normally, or one-hot
# encoded.
if isinstance(env_spec.observation_space,
(akro.Box, akro.Discrete, akro.Dict)):
if env_spec.observation_space.flat_dim != np.prod(
last_observations[0].shape):
raise ValueError('last_observations should have the same '
'dimensionality as the observation_space '
'({}), but got data with shape {} '
'instead'.format(
env_spec.observation_space.flat_dim,
last_observations[0].shape))
else:
raise ValueError(
'last_observations must conform to observation_space {}, '
'but got data with shape {} instead.'.format(
env_spec.observation_space, last_observations[0]))
if last_observations.shape[0] != len(lengths):
raise ValueError(
'Expected batch dimension of last_observations to be length '
'{}, but got length {} instead.'.format(
len(lengths), last_observations.shape[0]))
# actions
if not env_spec.action_space.contains(first_action):
# Discrete actions can be either in the space normally, or one-hot
# encoded.
if isinstance(env_spec.action_space,
(akro.Box, akro.Discrete, akro.Dict)):
if env_spec.action_space.flat_dim != np.prod(
first_action.shape):
raise ValueError('actions should have the same '
'dimensionality as the action_space '
'({}), but got data with shape {} '
'instead'.format(
env_spec.action_space.flat_dim,
first_action.shape))
else:
raise ValueError(
'actions must conform to action_space {}, but got data '
'with shape {} instead.'.format(env_spec.action_space,
first_action))
if actions.shape[0] != inferred_batch_size:
raise ValueError(
'Expected batch dimension of actions to be length {}, but got '
'length {} instead.'.format(inferred_batch_size,
actions.shape[0]))
# rewards
if rewards.shape != (inferred_batch_size, ):
raise ValueError(
'Rewards tensor must have shape {}, but got shape {} '
'instead.'.format(inferred_batch_size, rewards.shape))
# env_infos
for key, val in env_infos.items():
if not isinstance(val, (dict, np.ndarray)):
raise ValueError(
'Each entry in env_infos must be a numpy array or '
'dictionary, but got key {} with value type {} instead.'.
format(key, type(val)))
if (isinstance(val, np.ndarray)
and val.shape[0] != inferred_batch_size):
raise ValueError(
'Each entry in env_infos must have a batch dimension of '
'length {}, but got key {} with batch size {} instead.'.
format(inferred_batch_size, key, val.shape[0]))
# agent_infos
for key, val in agent_infos.items():
if not isinstance(val, (dict, np.ndarray)):
raise ValueError(
'Each entry in agent_infos must be a numpy array or '
'dictionary, but got key {} with value type {} instead.'
'instead'.format(key, type(val)))
if (isinstance(val, np.ndarray)
and val.shape[0] != inferred_batch_size):
raise ValueError(
'Each entry in agent_infos must have a batch dimension of '
'length {}, but got key {} with batch size {} instead.'.
format(inferred_batch_size, key, val.shape[0]))
# step_types
if step_types.shape != (inferred_batch_size, ):
raise ValueError(
'step_types tensor must have shape {}, but got shape {} '
'instead.'.format(inferred_batch_size, step_types.shape))
if step_types.dtype != StepType:
raise ValueError(
'step_types tensor must be dtype `StepType`, but got tensor '
'of dtype {} instead.'.format(step_types.dtype))
return super().__new__(EpisodeBatch, env_spec, observations,
last_observations, actions, rewards, env_infos,
agent_infos, step_types, lengths)
@classmethod
def concatenate(cls, *batches):
"""Create a EpisodeBatch by concatenating EpisodeBatches.
Args:
batches (list[EpisodeBatch]): Batches to concatenate.
Returns:
EpisodeBatch: The concatenation of the batches.
"""
if __debug__:
for b in batches:
assert (set(b.env_infos.keys()) == set(
batches[0].env_infos.keys()))
assert (set(b.agent_infos.keys()) == set(
batches[0].agent_infos.keys()))
env_infos = {
k: np.concatenate([b.env_infos[k] for b in batches])
for k in batches[0].env_infos.keys()
}
agent_infos = {
k: np.concatenate([b.agent_infos[k] for b in batches])
for k in batches[0].agent_infos.keys()
}
return cls(
env_spec=batches[0].env_spec,
observations=np.concatenate(
[batch.observations for batch in batches]),
last_observations=np.concatenate(
[batch.last_observations for batch in batches]),
actions=np.concatenate([batch.actions for batch in batches]),
rewards=np.concatenate([batch.rewards for batch in batches]),
env_infos=env_infos,
agent_infos=agent_infos,
step_types=np.concatenate([batch.step_types for batch in batches]),
lengths=np.concatenate([batch.lengths for batch in batches]))
def split(self):
"""Split an EpisodeBatch into a list of EpisodeBatches.
The opposite of concatenate.
Returns:
list[EpisodeBatch]: A list of EpisodeBatches, with one
episode per batch.
"""
episodes = []
start = 0
for i, length in enumerate(self.lengths):
stop = start + length
eps = EpisodeBatch(
env_spec=self.env_spec,
observations=self.observations[start:stop],
last_observations=np.asarray([self.last_observations[i]]),
actions=self.actions[start:stop],
rewards=self.rewards[start:stop],
env_infos=slice_nested_dict(self.env_infos, start, stop),
agent_infos=slice_nested_dict(self.agent_infos, start, stop),
step_types=self.step_types[start:stop],
lengths=np.asarray([length]))
episodes.append(eps)
start = stop
return episodes
def to_list(self):
"""Convert the batch into a list of dictionaries.
Returns:
list[dict[str, np.ndarray or dict[str, np.ndarray]]]: Keys:
* observations (np.ndarray): Non-flattened array of
observations. Has shape (T, S^*) (the unflattened state
space of the current environment). observations[i] was
used by the agent to choose actions[i].
* next_observations (np.ndarray): Non-flattened array of
observations. Has shape (T, S^*). next_observations[i] was
observed by the agent after taking actions[i].
* actions (np.ndarray): Non-flattened array of actions. Should
have shape (T, S^*) (the unflattened action space of the
current environment).
* rewards (np.ndarray): Array of rewards of shape (T,) (1D
array of length timesteps).
* agent_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `agent_info` arrays.
* env_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `env_info` arrays.
* step_types (numpy.ndarray): A numpy array of `StepType with
shape (T,) containing the time step types for all
transitions in this batch.
"""
start = 0
episodes = []
for i, length in enumerate(self.lengths):
stop = start + length
episodes.append({
'observations':
self.observations[start:stop],
'next_observations':
np.concatenate((self.observations[1 + start:stop],
[self.last_observations[i]])),
'actions':
self.actions[start:stop],
'rewards':
self.rewards[start:stop],
'env_infos':
{k: v[start:stop]
for (k, v) in self.env_infos.items()},
'agent_infos':
{k: v[start:stop]
for (k, v) in self.agent_infos.items()},
'step_types':
self.step_types[start:stop]
})
start = stop
return episodes
@classmethod
def from_list(cls, env_spec, paths):
"""Create a EpisodeBatch from a list of episodes.
Args:
env_spec (EnvSpec): Specification for the environment from which
this data was sampled.
paths (list[dict[str, np.ndarray or dict[str, np.ndarray]]]): Keys:
* observations (np.ndarray): Non-flattened array of
observations. Typically has shape (T, S^*) (the unflattened
state space of the current environment). observations[i]
was used by the agent to choose actions[i]. observations
may instead have shape (T + 1, S^*).
* next_observations (np.ndarray): Non-flattened array of
observations. Has shape (T, S^*). next_observations[i] was
observed by the agent after taking actions[i]. Optional.
Note that to ensure all information from the environment
was preserved, observations[i] should have shape (T + 1,
S^*), or this key should be set. However, this method is
lenient and will "duplicate" the last observation if the
original last observation has been lost.
* actions (np.ndarray): Non-flattened array of actions. Should
have shape (T, S^*) (the unflattened action space of the
current environment).
* rewards (np.ndarray): Array of rewards of shape (T,) (1D
array of length timesteps).
* agent_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `agent_info` arrays.
* env_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `env_info` arrays.
* step_types (numpy.ndarray): A numpy array of `StepType with
shape (T,) containing the time step types for all
transitions in this batch.
"""
lengths = np.asarray([len(p['rewards']) for p in paths])
if all(
len(path['observations']) == length + 1
for (path, length) in zip(paths, lengths)):
last_observations = np.asarray(
[p['observations'][-1] for p in paths])
observations = np.concatenate(
[p['observations'][:-1] for p in paths])
else:
# The number of observations and timesteps must match.
observations = np.concatenate([p['observations'] for p in paths])
if paths[0].get('next_observations') is not None:
last_observations = np.asarray(
[p['next_observations'][-1] for p in paths])
else:
last_observations = np.asarray(
[p['observations'][-1] for p in paths])
stacked_paths = concat_tensor_dict_list(paths)
# Temporary solution. This logic is not needed if algorithms process
# step_types instead of dones directly.
if 'dones' in stacked_paths and 'step_types' not in stacked_paths:
step_types = np.array([
StepType.TERMINAL if done else StepType.MID
for done in stacked_paths['dones']
],
dtype=StepType)
stacked_paths['step_types'] = step_types
del stacked_paths['dones']
return cls(env_spec=env_spec,
observations=observations,
last_observations=last_observations,
actions=stacked_paths['actions'],
rewards=stacked_paths['rewards'],
env_infos=stacked_paths['env_infos'],
agent_infos=stacked_paths['agent_infos'],
step_types=stacked_paths['step_types'],
lengths=lengths)
@property
def next_observations(self):
"""Get the observations seen after actions are performed.
Usually, in an :class:`~EpisodeBatch`, next_observations don't need to
be stored explicitly, since the next observation is already stored in
the batch.
Returns:
np.ndarray: The "next_observations".
"""
return np.concatenate(
tuple([
np.concatenate((eps.observations[1:], eps.last_observations))
for eps in self.split()
]))
class StepType(enum.IntEnum):
"""Defines the status of a :class:`~TimeStep` within a sequence.
Note that the last :class:`~TimeStep` in a sequence can either be
:attribute:`StepType.TERMINAL` or :attribute:`StepType.TIMEOUT`.
Suppose max_episode_length = 5:
* A success sequence terminated at step 4 will look like:
FIRST, MID, MID, TERMINAL
* A success sequence terminated at step 5 will look like:
FIRST, MID, MID, MID, TERMINAL
* An unsuccessful sequence truncated by time limit will look like:
FIRST, MID, MID, MID, TIMEOUT
"""
# Denotes the first :class:`~TimeStep` in a sequence.
FIRST = 0
# Denotes any :class:`~TimeStep` in the middle of a sequence (i.e. not the
# first or last one).
MID = 1
# Denotes the last :class:`~TimeStep` in a sequence that terminates
# successfully.
TERMINAL = 2
# Denotes the last :class:`~TimeStep` in a sequence truncated by time
# limit.
TIMEOUT = 3
@classmethod
def get_step_type(cls, step_cnt, max_episode_length, done):
"""Determines the step type based on step cnt and done signal.
Args:
step_cnt (int): current step cnt of the environment.
max_episode_length (int): maximum episode length.
done (bool): the done signal returned by Environment.
Returns:
StepType: the step type.
Raises:
ValueError: if step_cnt is < 1. In this case a environment's
`reset()` is likely not called yet and the step_cnt is None.
"""
if max_episode_length is not None and step_cnt >= max_episode_length:
return StepType.TIMEOUT
elif done:
return StepType.TERMINAL
elif step_cnt == 1:
return StepType.FIRST
elif step_cnt < 1:
raise ValueError('Expect step_cnt to be >= 1, but got {} '
'instead. Did you forget to call `reset('
')`?'.format(step_cnt))
else:
return StepType.MID
class TimeStep(
collections.namedtuple('TimeStep', [
'env_spec', 'observation', 'action', 'reward', 'next_observation',
'env_info', 'agent_info', 'step_type'
])):
# pylint: disable=missing-return-doc, missing-return-type-doc, missing-param-doc, missing-type-doc # noqa: E501
r"""A tuple representing a single TimeStep.
A :class:`~TimeStep` represents a single sample when an agent interacts
with an environment. It describes as SARS (State–action–reward–state)
tuple that characterizes the evolution of a MDP.
Attributes:
env_spec (EnvSpec): Specification for the environment from which this
data was sampled.
observation (numpy.ndarray): A numpy array of shape :math:`(O^*)`
containing the observation for the this time step in the
environment. These must conform to
:obj:`EnvStep.observation_space`.
The observation before applying the action.
`None` if `step_type` is `StepType.FIRST`, i.e. at the start of a
sequence.
action (numpy.ndarray): A numpy array of shape :math:`(A^*)`
containing the action for the this time step. These must conform
to :obj:`EnvStep.action_space`.
`None` if `step_type` is `StepType.FIRST`, i.e. at the start of a
sequence.
reward (float): A float representing the reward for taking the action
given the observation, at the this time step.
`None` if `step_type` is `StepType.FIRST`, i.e. at the start of a
sequence.
next_observation (numpy.ndarray): A numpy array of shape :math:`(O^*)`
containing the observation for the this time step in the
environment. These must conform to
:obj:`EnvStep.observation_space`.
The observation after applying the action.
env_info (dict): A dict arbitrary environment state information.
agent_info (dict): A dict of arbitrary agent
state information. For example, this may contain the hidden states
from an RNN policy.
step_type (StepType): a :class:`~StepType` enum value. Can be one of
:attribute:`~StepType.FIRST`, :attribute:`~StepType.MID`,
:attribute:`~StepType.TERMINAL`, or :attribute:`~StepType.TIMEOUT`.
"""
@property
def first(self):
"""bool: Whether this step is the first of its episode."""
return self.step_type is StepType.FIRST
@property
def mid(self):
"""bool: Whether this step is in the middle of its episode."""
return self.step_type is StepType.MID
@property
def terminal(self):
"""bool: Whether this step records a termination condition."""
return self.step_type is StepType.TERMINAL
@property
def timeout(self):
"""bool: Whether this step records a timeout condition."""
return self.step_type is StepType.TIMEOUT
@property
def last(self):
"""bool: Whether this step is the last of its episode."""
return self.step_type is StepType.TERMINAL or self.step_type \
is StepType.TIMEOUT
@classmethod
def from_env_step(cls, env_step, last_observation, agent_info):
"""Create a TimeStep from a EnvStep.
Args:
env_step (EnvStep): the env step returned by the environment.
last_observation (numpy.ndarray): A numpy array of shape
:math:`(O^*)` containing the observation for the this time
step in the environment. These must conform to
:obj:`EnvStep.observation_space`.
The observation before applying the action.
agent_info (dict): A dict of arbitrary agent state information.
Returns:
TimeStep: The TimeStep with all information of EnvStep plus the
agent info.
"""
return cls(env_spec=env_step.env_spec,
observation=last_observation,
action=env_step.action,
reward=env_step.reward,
next_observation=env_step.observation,
env_info=env_step.env_info,
agent_info=agent_info,
step_type=env_step.step_type)
class InOutSpec:
"""Describes the input and output spaces of a primitive or module.
Args:
input_space (akro.Space): Input space of a module.
output_space (akro.Space): Output space of a module.
"""
def __init__(self, input_space, output_space):
self._input_space = input_space
self._output_space = output_space
@property
def input_space(self):
"""Get input space of the module.
Returns:
akro.Space: Input space of the module.
"""
return self._input_space
@property
def output_space(self):
"""Get output space of the module.
Returns:
akro.Space: Output space of the module.
"""
return self._output_space
class TimeStepBatch(
collections.namedtuple('TimeStepBatch', [
'env_spec', 'observations', 'actions', 'rewards',
'next_observations', 'env_infos', 'agent_infos', 'step_types'
])):
# pylint: disable=missing-param-doc, missing-type-doc
"""A tuple representing a batch of TimeSteps.
Data type for off-policy algorithms, imitation learning and batch-RL.
Attributes:
env_spec (EnvSpec): Specification for the environment from
which this data was sampled.
observations (numpy.ndarray): Non-flattened array of observations.
Typically has shape (batch_size, S^*) (the unflattened state space
of the current environment).
actions (numpy.ndarray): Non-flattened array of actions. Should
have shape (batch_size, S^*) (the unflattened action space of the
current environment).
rewards (numpy.ndarray): Array of rewards of shape (batch_size, 1).
next_observation (numpy.ndarray): Non-flattened array of next
observations. Has shape (batch_size, S^*). next_observations[i] was
observed by the agent after taking actions[i].
env_infos (dict): A dict arbitrary environment state
information.
agent_infos (dict): A dict of arbitrary agent state information. For
example, this may contain the hidden states from an RNN policy.
step_types (numpy.ndarray): A numpy array of `StepType with shape (
batch_size,) containing the time step types for all transitions in
this batch.
Raises:
ValueError: If any of the above attributes do not conform to their
prescribed types and shapes.
"""
__slots__ = ()
def __new__(cls, env_spec, observations, actions, rewards,
next_observations, env_infos, agent_infos,
step_types): # noqa: D102
# pylint: disable=missing-return-doc, missing-return-type-doc,
# pylint: disable=too-many-branches
inferred_batch_size = len(rewards)
if inferred_batch_size < 1:
raise ValueError(
'Expected batch dimension of rewards to be greater than 1, '
'but got length {} instead.'.format(inferred_batch_size))
first_observation = observations[0]
first_action = actions[0]
# observation
if not env_spec.observation_space.contains(first_observation):
if isinstance(env_spec.observation_space,
(akro.Box, akro.Discrete, akro.Dict)):
if env_spec.observation_space.flat_dim != np.prod(
first_observation.shape):
raise ValueError('observations should have the same '
'dimensionality as the observation_space '
'({}), but got data with shape {} '
'instead'.format(
env_spec.observation_space.flat_dim,
first_observation.shape))
else:
raise ValueError(
'observations must conform to observation_space {}, '
'but got data with shape {} instead.'.format(
env_spec.observation_space, first_observation.shape))
if observations.shape[0] != inferred_batch_size:
raise ValueError(
'Expected batch dimension of observations to be length {}, '
'but got length {} instead.'.format(inferred_batch_size,
observations.shape[0]))
# next_observation
if not env_spec.observation_space.contains(next_observations[0]):
if isinstance(env_spec.observation_space,
(akro.Box, akro.Discrete, akro.Dict)):
if env_spec.observation_space.flat_dim != np.prod(
next_observations[0].shape):
raise ValueError('next_observations should have the same '
'dimensionality as the observation_space '
'({}), but got data with shape {} '
'instead'.format(
env_spec.observation_space.flat_dim,
next_observations[0].shape))
else:
raise ValueError(
'next_observations must conform to observation_space {}, '
'but got data with shape {} instead.'.format(
env_spec.observation_space,
next_observations[0].shape[0]))
if next_observations.shape[0] != inferred_batch_size:
raise ValueError(
'Expected batch dimension of next_observations to be length {'
'}, but got length {} instead.'.format(
inferred_batch_size, next_observations[0].shape[0]))
# action
if not env_spec.action_space.contains(first_action):
if isinstance(env_spec.action_space,
(akro.Box, akro.Discrete, akro.Dict)):
if env_spec.action_space.flat_dim != np.prod(
first_action.shape):
raise ValueError('actions should have the same '
'dimensionality as the action_space '
'({}), but got data with shape {} '
'instead'.format(
env_spec.action_space.flat_dim,
first_action.shape))
else:
raise ValueError('actions must conform to action_space {}, '
'but got data with shape {} instead.'.format(
env_spec.action_space,
first_action.shape))
if actions.shape[0] != inferred_batch_size:
raise ValueError(
'Expected batch dimension of actions to be length {}, but got '
'length {} instead.'.format(inferred_batch_size,
actions.shape[0]))
# rewards
if rewards.shape != (inferred_batch_size, 1):
raise ValueError(
'Rewards tensor must have shape {}, but got shape {} '
'instead.'.format((inferred_batch_size, 1), rewards.shape))
# step_types
if step_types.shape[0] != inferred_batch_size:
raise ValueError(
'Expected batch dimension of step_types to be length {}, '
'but got '
'length {} instead.'.format(inferred_batch_size,
rewards.shape[0]))
for step_type in step_types:
if not isinstance(step_type, StepType):
raise ValueError(
'Each entry in step_types must be a StepType, but got'
' value type {} instead.'.format(type(step_type)))
# env_infos
for key, val in env_infos.items():
if not isinstance(val, (dict, np.ndarray)):
raise ValueError(
'Each entry in env_infos must be a numpy array or '
'dictionary, but got key {} with value type {} '
'instead.'.format(key, type(val)))
if (isinstance(val, np.ndarray)
and val.shape[0] != inferred_batch_size):
raise ValueError(
'Each entry in env_infos must have a batch dimension '
'of '
'length {}, but got key {} with batch size {} instead.'.
format(inferred_batch_size, key, val.shape[0]))
# agent_infos
for key, val in agent_infos.items():
if not isinstance(val, (dict, np.ndarray)):
raise ValueError(
'Each entry in agent_infos must be a numpy array or '
'dictionary, but got key {} with value type {} instead.'
'instead'.format(key, type(val)))
if (isinstance(val, np.ndarray)
and val.shape[0] != inferred_batch_size):
raise ValueError(
'Each entry in agent_infos must have a batch '
'dimension of '
'length {}, but got key {} with batch size {} instead.'.
format(inferred_batch_size, key, val.shape[0]))
return super().__new__(TimeStepBatch, env_spec, observations, actions,
rewards, next_observations, env_infos,
agent_infos, step_types)
@classmethod
def concatenate(cls, *batches):
"""Concatenate two or more :class:`TimeStepBatch`s.
Args:
batches (list[TimeStepBatch]): Batches to concatenate.
Returns:
TimeStepBatch: The concatenation of the batches.
Raises:
ValueError: If no TimeStepBatches are provided.
"""
if len(batches) < 1:
raise ValueError('Please provide at least one TimeStepBatch to '
'concatenate')
env_infos = {
k: np.concatenate([b.env_infos[k] for b in batches])
for k in batches[0].env_infos.keys()
}
agent_infos = {
k: np.concatenate([b.agent_infos[k] for b in batches])
for k in batches[0].agent_infos.keys()
}
return cls(
env_spec=batches[0].env_spec,
observations=np.concatenate(
[batch.observations for batch in batches]),
actions=np.concatenate([batch.actions for batch in batches]),
rewards=np.concatenate([batch.rewards for batch in batches]),
next_observations=np.concatenate(
[batch.next_observations for batch in batches]),
env_infos=env_infos,
agent_infos=agent_infos,
step_types=np.concatenate([batch.step_types for batch in batches]))
def split(self):
"""Split a :class:`~TimeStepBatch` into a list of :class:`~TimeStepBatch`s.
The opposite of concatenate.
Returns:
list[TimeStepBatch]: A list of :class:`TimeStepBatch`s, with one
:class:`~TimeStep` per :class:`~TimeStepBatch`.
"""
time_steps = []
for i in range(len(self.rewards)):
time_step = TimeStepBatch(
env_spec=self.env_spec,
observations=np.asarray([self.observations[i]]),
actions=np.asarray([self.actions[i]]),
rewards=np.asarray([self.rewards[i]]),
next_observations=np.asarray([self.next_observations[i]]),
env_infos={
k: np.asarray([v[i]])
for (k, v) in self.env_infos.items()
},
agent_infos={
k: np.asarray([v[i]])
for (k, v) in self.agent_infos.items()
},
step_types=np.asarray([self.step_types[i]], dtype=StepType))
time_steps.append(time_step)
return time_steps
def to_time_step_list(self):
"""Convert the batch into a list of dictionaries.
Breaks the :class:`~TimeStepBatch` into a list of single time step
sample dictionaries. len(rewards) (or the number of discrete time step)
dictionaries are returned
Returns:
list[dict[str, np.ndarray or dict[str, np.ndarray]]]: Keys:
observations (numpy.ndarray): Non-flattened array of
observations.
Typically has shape (batch_size, S^*) (the unflattened
state space
of the current environment).
actions (numpy.ndarray): Non-flattened array of actions. Should
have shape (batch_size, S^*) (the unflattened action
space of the
current environment).
rewards (numpy.ndarray): Array of rewards of shape (
batch_size,) (1D array of length batch_size).
next_observation (numpy.ndarray): Non-flattened array of next
observations. Has shape (batch_size, S^*).
next_observations[i] was
observed by the agent after taking actions[i].
env_infos (dict): A dict arbitrary environment state
information.
agent_infos (dict): A dict of arbitrary agent state
information. For example, this may contain the
hidden states from an RNN policy.
step_types (numpy.ndarray): A numpy array of `StepType with
shape (batch_size,) containing the time step types for
all transitions in this batch.
"""
samples = []
for i in range(len(self.rewards)):
samples.append({
'observations':
np.asarray([self.observations[i]]),
'actions':
np.asarray([self.actions[i]]),
'rewards':
np.asarray([self.rewards[i]]),
'next_observations':
np.asarray([self.next_observations[i]]),
'env_infos':
{k: np.asarray([v[i]])
for (k, v) in self.env_infos.items()},
'agent_infos':
{k: np.asarray([v[i]])
for (k, v) in self.agent_infos.items()},
'step_types':
np.asarray([self.step_types[i]]),
})
return samples
@property
def terminals(self):
"""Get an array of boolean indicating ternianal information.
Returns:
numpy.ndarray: An array of boolean of shape (batch_size, 1)
indicating whether the `StepType is `TERMINAL
"""
return np.array([[s == StepType.TERMINAL] for s in self.step_types])
@classmethod
def from_time_step_list(cls, env_spec, ts_samples):
"""Create a :class:`~TimeStepBatch` from a list of time step dictionaries.
Args:
env_spec (EnvSpec): Specification for the environment from which
this data was sampled.
ts_samples (list[dict[str, np.ndarray or dict[str, np.ndarray]]]):
keys:
* observations (numpy.ndarray): Non-flattened array of
observations.
Typically has shape (batch_size, S^*) (the unflattened
state space of the current environment).
* actions (numpy.ndarray): Non-flattened array of actions.
Should have shape (batch_size, S^*) (the unflattened action
space of the current environment).
* rewards (numpy.ndarray): Array of rewards of shape (
batch_size,) (1D array of length batch_size).
* next_observation (numpy.ndarray): Non-flattened array of next
observations. Has shape (batch_size, S^*).
next_observations[i] was observed by the agent after
taking actions[i].
* env_infos (dict): A dict arbitrary environment state
information.
* agent_infos (dict): A dict of arbitrary agent
state information. For example, this may contain the
hidden states from an RNN policy.
* step_types (numpy.ndarray): A numpy array of `StepType with
shape (batch_size,) containing the time step types for all
transitions in this batch.
Returns:
TimeStepBatch: The concatenation of samples.
Raises:
ValueError: If no dicts are provided.
"""
if len(ts_samples) < 1:
raise ValueError('Please provide at least one dict')
ts_batches = [
TimeStepBatch(env_spec=env_spec,
observations=sample['observations'],
actions=sample['actions'],
rewards=sample['rewards'],
next_observations=sample['next_observations'],
env_infos=sample['env_infos'],
agent_infos=sample['agent_infos'],
step_types=sample['step_types'])
for sample in ts_samples
]
return TimeStepBatch.concatenate(*ts_batches)
@classmethod
def from_episode_batch(cls, batch):
"""Construct a :class:`~TimeStepBatch` from an :class:`~EpisodeBatch`.
Args:
batch (EpisodeBatch): Episode batch to convert.
Returns:
TimeStepBatch: The converted batch.
"""
next_observations = np.concatenate(
tuple([
np.concatenate((eps.observations[1:], eps.last_observations))
for eps in batch.split()
]))
return cls(env_spec=batch.env_spec,
observations=batch.observations,
actions=batch.actions,
rewards=batch.rewards.reshape(-1, 1),
next_observations=next_observations,
env_infos=batch.env_infos,
agent_infos=batch.agent_infos,
step_types=batch.step_types)
|
the-stack_0_15817 | #!/usr/local/bin/python3
import argparse
import random
class GenerationConfig:
count = 0
maximum = 0
minimum = 0
def __init__(self, count = 100, maximum = 100, minimum = 0):
if count <= 0:
raise Exception("Count must be positive!")
if minimum >= maximum:
raise Exception("Maximum must be greater than minimum!")
self.count = count
self.maximum = maximum
self.minimum = minimum
def generate(self):
result = []
for i in range(self.count):
result.append(random.randint(self.minimum, self.maximum))
return result
def process_args():
parser = argparse.ArgumentParser()
parser.add_argument("--count", help="number of items to generate", default = 100, required = False)
parser.add_argument("--max", help="maximum value of generated items", default = 100, required = False)
parser.add_argument("--min", help="maximum value of generated items", default = 0, required = False)
args = parser.parse_args()
return GenerationConfig(int(args.count), int(args.max), int(args.min))
def generate(generationConfig):
return generationConfig.generate()
def main():
generationConfig = process_args()
data = generate(generationConfig)
for item in data:
print(item)
if __name__ == '__main__':
main()
|
the-stack_0_15818 | """Utility functions for tensor operations
"""
import numpy as np
from six.moves import xrange
def _check_1d_vector(vector):
"""Check 1D vector shape
Check 1D vector shape. array with shape
[n, 1] or [n, ] are accepted. Will return
a 1 dimension vector.
Parameters
----------
vector : array (n,) or (n, 1)
rank one vector
Returns
-------
vector : array, (n,)
"""
v_shape = vector.shape
if len(v_shape) == 1:
return vector
elif len(v_shape) == 2 and v_shape[1] == 1:
return vector.reshape(v_shape[0],)
else:
raise ValueError("Vector is not 1-d array: shape %s" % str(v_shape))
def _check_square_matrix(matrix):
"""Check 2D matrix shape
Check 1D vector shape. array with shape
[n, 1] or [n, ] are accepted. Will return
a 1 dimension vector.
Parameters
----------
matrix : (n, n)
rank one vector
Returns
-------
matrix : array, (n, n)
"""
m_shape = matrix.shape
if len(m_shape) == 2:
if m_shape[0] != m_shape[1]:
raise ValueError("matrix is not square: shape %s" % str(m_shape))
return matrix
else:
raise ValueError("matrix is not 2-d array: shape %s" % str(m_shape))
def rank_1_tensor_3d(a, b, c):
"""Generate a 3-D tensor from 3 1-D vectors
Generate a 3D tensor from 3 rank one vectors
`a`, `b`, and `c`. The returned 3-D tensor is
in unfolded format.
Parameters
----------
a : array, shape (n,)
first rank one vector
b : array, shape (n,)
second rank one vector
c : array, shape (n,)
thrid rank one vector
Returns
-------
tensor: array, (n, n * n)
3D tensor in unfolded format. element
(i, j, k) will map to (i, (n * k) + j)
"""
a = _check_1d_vector(a)
b = _check_1d_vector(b)
c = _check_1d_vector(c)
dim = a.shape[0]
# check dimension
if (dim != b.shape[0]) or (dim != c.shape[0]):
raise ValueError("Vector dimension mismatch: (%d, %d, %d)" %
(dim, b.shape[0], c.shape[0]))
outter = b[:, np.newaxis] * c[:, np.newaxis].T
tensor = a[:, np.newaxis] * outter.ravel(order='F')[np.newaxis, :]
return tensor
def tensor_3d_from_vector_matrix(a, b):
"""Generate 3-D tensor from 1-D vector and 2-D matrix
Generate a 3D tensor from a 1-D vector `a` and 2-D
matrix `b`. The returned 3-D tensor is
in unfolded format.
Parameters
----------
a : array, shape (m,)
1-D vector
b : 2-D array, shape (n, p)
2-D matrix
Returns
-------
tensor: array, (m, n * p)
3D tensor in unfolded format.
"""
a = _check_1d_vector(a)
tensor = a[:, np.newaxis] * b.ravel(order='F')[np.newaxis, :]
return tensor
def tensor_3d_from_matrix_vector(b, a):
"""Generate 3-D tensor from 2-D matrix and 1-D vector
This function is similar to `tensor_3d_from_vector_matrix`
function. The only difference is the first argument is 2-D
matrix and the second element is 1-D vector.
Parameters
----------
b : array, shape (m, n)
2-D matrix
a : array, shape (p,)
vector
Returns
-------
tensor : array, shape (m, n * p)
3D tensor in unfolded format.
"""
len_a = a.shape[0]
n_col = b.shape[1]
tensor = np.tile(b, len_a)
for i in xrange(len_a):
col_from = n_col * i
col_to = n_col * (i+1)
tensor[:, col_from:col_to] *= a[i]
return tensor
def tensor_3d_permute(tensor, tensor_shape, a, b, c):
"""Permute the mode of a 3-D tensor
This is a slow implementation to generate 3-D tensor
permutations.
Parameters
----------
tensor : 2D array, shape (n, m * k)
3D tensor in unfolded format
tensor_shape : int triple
Shape of the tensor. Since tensor is in
unfolded format. We need it's real format
to calculate permutation.
a : int, {1, 2, 3}
new first index
}
b : int, {1, 2, 3}
new second index
c : int, {1, 2, 3}
new thrid order index
Return
------
permuted_tensor: 2D array
Permuted tensor, element (i_1, i_2, i_3) in
the permuted tensor will be element
(i_a, i_b, i_c) in the original tensor
"""
# TODO: check parameter
a_idx = a - 1
b_idx = b - 1
c_idx = c - 1
# TODO: move this part to cython loop
n_col = tensor_shape[1]
dim1 = tensor_shape[a_idx]
dim2 = tensor_shape[b_idx]
dim3 = tensor_shape[c_idx]
permuted_tensor = np.empty((dim1, dim2 * dim3))
old_idx = np.zeros(3).astype('int32')
for i in xrange(dim1):
for j in xrange(dim2):
for k in xrange(dim3):
old_idx[a_idx] = i
old_idx[b_idx] = j
old_idx[c_idx] = k
old_val = tensor[old_idx[0], (n_col * old_idx[2]) + old_idx[1]]
# new index
permuted_tensor[i, (dim2 * k) + j] = old_val
return permuted_tensor
def khatri_rao_prod(a, b):
"""Khatri-Rao product
Generate Khatri-Rao product from 2 2-D matrix.
Parameters
----------
a : 2D array, shape (n, k)
first matrix
b : 2D array, shape (m, k)
second matrix
Returns
-------
matrix : 2D array, shape (n * m, k)
Khatri-Rao product of `a` and `b`
"""
a_row, a_col = a.shape
b_row, b_col = b.shape
# check column size
if a_col != b_col:
raise ValueError("column dimension mismatch: %d != %d" %
a_col, b_col)
matrix = np.empty((a_row * b_row, a_col))
for i in xrange(a_col):
matrix[:, i] = np.kron(a[:, i], b[:, i])
return matrix
def tensor_3d_prod(tensor, a, b, c):
"""Calculate product of 3D tensor with matrix on each dimension
TODO: move it to test
Parameters
----------
tensor : 3D array, shape (n1, n2, n3)
a : array, (n1, m)
b : array, (n2, n)
c : array, (n3, p)
Returns
-------
t_abc : array, (m, n, p)
tensor(a, b, c)
"""
n1, n2, n3 = tensor.shape
n1_, m = a.shape
n2_, n = b.shape
n3_, p = c.shape
# (n1, n2, p)
t_c = np.dot(tensor, c)
t_bc = np.empty((n1, n, p))
for i in xrange(n1):
# (n, p) = (n, n2) * (n2, p)
t_bc[i, :, :] = np.dot(b.T, t_c[i, :, :])
t_abc = np.empty((m, n, p))
for i in xrange(p):
t_abc[:, :, i] = np.dot(a.T, t_bc[:, :, i])
return t_abc
|
the-stack_0_15821 | """ Analysis code for IGMSurvey objects
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import glob
import json
import pdb
from astropy.table import Table
from linetools import utils as ltu
def calc_slgrid_atan(surveys, Agrid, Bgrid, Cgrid, C2grid):
""" Calculate the sightline grid for a Atan l(z) fit
Breaking this off for bootstrap speed-up
Parameters
----------
surveys : list of DLASurvey objects
Agrid
Bgrid
Cgrid
C2grid
Returns
-------
slgrid : ndarray
Sightline term in likelihood function
"""
# Integrating over the sightlines
slgrid = np.zeros_like(Agrid)
# Int(atan[x-a]) = (a-x) atan(a-x) - 0.5 * ln(a**2 - 2ax + x**2 + 1)
for isurvey in surveys:
slines = isurvey.sightlines
gds = slines['Z_START'] < slines['Z_END']
zstart = slines['Z_START'][gds]
zend = slines['Z_END'][gds]
# Integrate constant term
AAgrid = Agrid * np.sum(zend-zstart)
slgrid += AAgrid
# Integrate second term
for iz in zend:
CCgrid = (Cgrid-iz) * np.arctan(Cgrid-iz) - 0.5 * np.log(
C2grid - 2*Cgrid*iz + iz**2 + 1)
slgrid += Bgrid * CCgrid
if np.min(CCgrid) < -0.1:
pdb.set_trace()
for iz in zstart:
CCgrid = (Cgrid-iz) * np.arctan(Cgrid-iz) - 0.5 * np.log(
C2grid - 2*Cgrid*iz + iz**2 + 1)
slgrid -= Bgrid * CCgrid
# Return
return slgrid
def fit_atan_dla_lz(surveys, nstep=20, bootstrap=True,
nboot=10, nproc=2,
fit_out=None, boot_out=None,
verbose=True):
""" Fit a A + B * atan(z-C) l(z) model to AbsSys data
Writes bootstrap analysis to hard-drive
Code used in Prochaska & Neeleman 2017 for DLAs
Parameters
----------
surveys : list of IGMSurvey objects
If None, a default list is loaded
nstep : int, optional
Steps in each dimension of the grid
bootstrap : bool, optional
Perform bootstrap analysis
nboot : int, optional
Number of bootstrap iterations
nproc : int, optional
Number of processors to use
fit_out : str, optional
Output filename for best fit (JSON)
boot_out : str, optional
Output filename for bootstrap analysis
verbose : bool, optional
Returns
-------
dfits : dict
Best fit parameters
boot_tbl : Table
Returned if bootstrap=True
else return None
"""
# Name and date
# Init
if boot_out is None:
boot_out = './lz_boot.fits.gz'
if fit_out is None:
fit_out = './lz_fit.json'
# Synthesize
all_z = np.concatenate([isurvey.zabs for isurvey in surveys])
ndla = len(all_z)
# Model : l(z) = A + B * atan(C-z)
Aparm = np.linspace(0.05, 0.5, num=nstep).astype(np.float32)
Bparm = np.linspace(0.05, 0.5, num=nstep).astype(np.float32)
Cparm = np.linspace(1., 6., num=nstep).astype(np.float32)
# Generate grids (float32)
Agrid, Bgrid, Cgrid = np.meshgrid(Aparm, Bparm, Cparm, copy=False)
C2grid = Cgrid**2
# Sightline grid
if verbose:
print("Sightline calculation...")
slgrid = calc_slgrid_atan(surveys, Agrid, Bgrid, Cgrid, C2grid)
if bootstrap:
if verbose:
print("Bootstrapping!")
sv_fits = []
rN = np.random.poisson(ndla, size=nboot)
# Boot me
z_list = []
for kk,irN in enumerate(rN):
# Draw nPoisson
rval = (np.random.uniform(size=irN)*ndla).astype(int)
# Draw from all_z
draw_z = all_z[rval]
z_list.append(draw_z)
# Run
if nproc == 1:
for draw_z in z_list:
if verbose:
print("Working on iteration: {:d} of {:d}".format(kk, nboot))
dfits, _, _ = Ln_lz_atan(Agrid, Bgrid, Cgrid, slgrid, draw_z, write=False)
# Save
sv_fits.append(dfits.copy())
else:
import multiprocessing
pool = multiprocessing.Pool(nproc) # initialize thread pool N threads
inp_list = []
for ii in range(nboot):
inp_list.append(
dict(A=Agrid, B=Bgrid, C=Cgrid, sl=slgrid, z=z_list[ii]))
if verbose:
print("Mapping...")
sv_fits = pool.map(map_Ln_atan, inp_list)
# Write
boot_tbl = Table()
for key in ['A', 'B', 'C']:
boot_tbl[key] = [ifits['lz']['atan'][key] for ifits in sv_fits]
boot_tbl.write(boot_out, overwrite=True)
if verbose:
print("Wrote {:s}".format(boot_out))
else:
boot_tbl = None
# Best
dfits, _, _ = Ln_lz_atan(Agrid, Bgrid, Cgrid, slgrid, all_z, write=True)
# Finish
return dfits, boot_tbl
def Ln_lz_atan(Agrid, Bgrid, Cgrid, slgrid, all_z, write=True, verbose=True):
""" Likelihood function for arctan model
Parameters
----------
Agrid
Bgrid
Cgrid
slgrid
all_z
write
Returns
-------
dfits : dict
Contains best fit model
dlagrid : ndarray
for debugging
lngrid : ndarray
"""
# z0 estimate from 21cm surveys
lz_z0 = dict(value=np.mean([0.026, 0.045]), sig=0.01)
# Init
dlagrid = np.zeros_like(Agrid)
# Generate Likelihood for DLAs
np.seterr(invalid='ignore')
for z in all_z:
dlagrid += np.log(Agrid + Bgrid * np.arctan(z-Cgrid))
bad = np.isnan(dlagrid)
dlagrid[bad] = -1e9
# Likelihood
lngrid = dlagrid - slgrid
# z=0
model_z0 = Agrid + Bgrid * np.arctan(0.-Cgrid)
lnP = -1 * (model_z0-lz_z0['value'])**2 / 2 / (lz_z0['sig']**2)
lngrid += lnP
# Best
indices = np.where(lngrid == np.max(lngrid))
best = Agrid[indices][0], Bgrid[indices][0], Cgrid[indices][0]
if verbose:
print('Best fit: A={}, B={}, C={}'.format(best[0], best[1], best[2]))
# Load
dfits = {}
# Write
dfits['lz'] = {}
dfits['lz']['atan'] = dict(A=Agrid[indices][0], B=Bgrid[indices][0], C=Cgrid[indices][0],
form='A + B*atan(z-C)')
# Return
return dfits, dlagrid, lngrid
def map_Ln_atan(map_dict):
""" For multiprocessing the bootstrap
Parameters
----------
map_dict
Returns
-------
"""
dfits, _, _ = Ln_lz_atan(map_dict['A'], map_dict['B'], map_dict['C'],
map_dict['sl'], map_dict['z'], write=False,
verbose=False)
return dfits
def fit_fN_dblpow(NHI, a3_mnx, a4_mnx, Nd_mnx, nstep=100,
Nmin=10**(20.3), Nmax=1e99, verbose=True):
""" Fit a double power-law to an input NHI distribution
Only does the shape
Done in float32 to preserve memory
Code from Prochaska & Neeleman (2017) [and also PHW05]
Parameters
----------
NHI : ndarray
log10 NHI values
a3_mnx : tuple
min/max of lower NHI power-law
a4_mnx : tuple
min/max of upper NHI power-law
Nd_mnx : tuple
min/max of break column in log10
nstep : int, optional
Nmin : float, optional
Minimum NHI in the analysis [usually DLA criterion]
Nmax : float, optional
Maximum NHI in the analysis
Returns
-------
dfits : dict
Contains the fit
best : tuple
Best fit values in grid for Nd, a3, a4
Ndgrid
a3grid
a4grid
lik
"""
# Generate 1D arrays
a3stp = np.linspace(a3_mnx[0], a3_mnx[1], nstep).astype(np.float32)
a4stp = np.linspace(a4_mnx[0], a4_mnx[1], nstep).astype(np.float32)
Ndstp = np.linspace(Nd_mnx[0], Nd_mnx[1], nstep).astype(np.float32)
# Generate grids (float32)
a3grid, a4grid, Ndgrid = np.meshgrid(a3stp, a4stp, Ndstp, copy=False)
# Linear
Ns10 = 10.**Ndgrid
# Calculate denominator
denom = Ns10 * ((1. - (Nmin / Ns10)**(a3grid + 1.)) / (1. + a3grid) + (
(Nmax / Ns10)**(a4grid + 1) - 1.) / (a4grid + 1.))
num = np.zeros_like(Ns10)
# Numerator
# Loop on DLAs
for iNHI10 in 10.**NHI:
# Upper end
high = iNHI10 > Ns10
if np.sum(high) > 0:
num[high] += a4grid[high] * np.log(iNHI10 / Ns10[high])
# Low end
if np.sum(~high) > 0:
num[~high] += a3grid[~high] * np.log(iNHI10 / Ns10[~high])
# Liklihood (Beware of Signs!)
lik = num - NHI.size * np.log(denom)
mxL = np.max(lik)
indices = np.where(lik == mxL)
best = Ndgrid[indices][0], a3grid[indices][0], a4grid[indices][0]
if verbose:
print('Best fit: Nd={}, a3={}, a4={}'.format(best[0], best[1], best[2]))
# Load
dfits = {}
# Write
dfits['fN'] = {}
dfits['fN']['dpow'] = dict(Nd=Ndgrid[indices][0], a3=a3grid[indices][0], a4=a4grid[indices][0],
form='(N/Nd)**aa with aa=a3 if N<Nd else aa=a4')
# KS Test
ks_test = False
if ks_test:
ns10 = 10**best[0]
dblpow_k = 1. / (ns10 * (1. - (Nmin / Ns10)**(best[1] + 1)) / (1. + best[1]) + (
(Nmax / Ns10)**(best[2] + 1) - 1.) / (best[2] + 1))
dblpow_b1 = best[1]
dblpow_b2 = best[2]
dblpow_nd = ns10
dblpow_nmin = Nmin
noise = 0.02
dNHI = 10**(NHI + noise * np.random.uniform(size=NHI.size))
#ksone, darr, 'x_maxdblpow_kscumf', d, ksprob
return dfits, best, Ndgrid, a3grid, a4grid, lik
|
the-stack_0_15822 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from typing import Any, Optional
from flask import g, request, Response
from flask_appbuilder.api import expose, permission_name, protect, rison, safe
from flask_appbuilder.hooks import before_request
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import ngettext
from marshmallow import ValidationError
from superset import is_feature_enabled
from superset.charts.filters import ChartFilter
from superset.constants import MODEL_API_RW_METHOD_PERMISSION_MAP, RouteMethod
from superset.dashboards.filters import DashboardAccessFilter
from superset.databases.filters import DatabaseFilter
from superset.models.reports import ReportSchedule
from superset.reports.commands.bulk_delete import BulkDeleteReportScheduleCommand
from superset.reports.commands.create import CreateReportScheduleCommand
from superset.reports.commands.delete import DeleteReportScheduleCommand
from superset.reports.commands.exceptions import (
ReportScheduleBulkDeleteFailedError,
ReportScheduleCreateFailedError,
ReportScheduleDeleteFailedError,
ReportScheduleForbiddenError,
ReportScheduleInvalidError,
ReportScheduleNotFoundError,
ReportScheduleUpdateFailedError,
)
from superset.reports.commands.update import UpdateReportScheduleCommand
from superset.reports.filters import ReportScheduleAllTextFilter
from superset.reports.schemas import (
get_delete_ids_schema,
openapi_spec_methods_override,
ReportSchedulePostSchema,
ReportSchedulePutSchema,
)
from superset.views.base_api import (
BaseSupersetModelRestApi,
RelatedFieldFilter,
statsd_metrics,
)
from superset.views.filters import FilterRelatedOwners
logger = logging.getLogger(__name__)
class ReportScheduleRestApi(BaseSupersetModelRestApi):
datamodel = SQLAInterface(ReportSchedule)
@before_request
def ensure_alert_reports_enabled(self) -> Optional[Response]:
if not is_feature_enabled("ALERT_REPORTS"):
return self.response_404()
return None
include_route_methods = RouteMethod.REST_MODEL_VIEW_CRUD_SET | {
RouteMethod.RELATED,
"bulk_delete", # not using RouteMethod since locally defined
}
class_permission_name = "ReportSchedule"
method_permission_name = MODEL_API_RW_METHOD_PERMISSION_MAP
resource_name = "report"
allow_browser_login = True
show_columns = [
"id",
"active",
"chart.id",
"chart.slice_name",
"chart.viz_type",
"context_markdown",
"creation_method",
"crontab",
"dashboard.dashboard_title",
"dashboard.id",
"database.database_name",
"database.id",
"description",
"grace_period",
"last_eval_dttm",
"last_state",
"last_value",
"last_value_row_json",
"log_retention",
"name",
"owners.first_name",
"owners.id",
"owners.last_name",
"recipients.id",
"recipients.recipient_config_json",
"recipients.type",
"report_format",
"sql",
"timezone",
"type",
"validator_config_json",
"validator_type",
"working_timeout",
]
show_select_columns = show_columns + [
"chart.datasource_id",
"chart.datasource_type",
]
list_columns = [
"active",
"changed_by.first_name",
"changed_by.last_name",
"changed_on",
"changed_on_delta_humanized",
"created_by.first_name",
"created_by.last_name",
"created_on",
"creation_method",
"crontab",
"crontab_humanized",
"description",
"id",
"last_eval_dttm",
"last_state",
"name",
"owners.first_name",
"owners.id",
"owners.last_name",
"recipients.id",
"recipients.type",
"timezone",
"type",
]
add_columns = [
"active",
"chart",
"context_markdown",
"creation_method",
"crontab",
"dashboard",
"database",
"description",
"grace_period",
"log_retention",
"name",
"owners",
"recipients",
"report_format",
"sql",
"timezone",
"type",
"validator_config_json",
"validator_type",
"working_timeout",
]
edit_columns = add_columns
add_model_schema = ReportSchedulePostSchema()
edit_model_schema = ReportSchedulePutSchema()
order_columns = [
"active",
"description",
"created_by.first_name",
"changed_by.first_name",
"changed_on",
"changed_on_delta_humanized",
"created_on",
"crontab",
"last_eval_dttm",
"name",
"type",
"crontab_humanized",
]
search_columns = [
"name",
"active",
"created_by",
"type",
"last_state",
"creation_method",
"dashboard_id",
"chart_id",
]
search_filters = {"name": [ReportScheduleAllTextFilter]}
allowed_rel_fields = {"owners", "chart", "dashboard", "database", "created_by"}
filter_rel_fields = {
"chart": [["id", ChartFilter, lambda: []]],
"dashboard": [["id", DashboardAccessFilter, lambda: []]],
"database": [["id", DatabaseFilter, lambda: []]],
}
text_field_rel_fields = {
"dashboard": "dashboard_title",
"chart": "slice_name",
"database": "database_name",
}
related_field_filters = {
"dashboard": "dashboard_title",
"chart": "slice_name",
"database": "database_name",
"owners": RelatedFieldFilter("first_name", FilterRelatedOwners),
}
apispec_parameter_schemas = {
"get_delete_ids_schema": get_delete_ids_schema,
}
openapi_spec_tag = "Report Schedules"
openapi_spec_methods = openapi_spec_methods_override
@expose("/<int:pk>", methods=["DELETE"])
@protect()
@safe
@statsd_metrics
@permission_name("delete")
def delete(self, pk: int) -> Response:
"""Delete a Report Schedule
---
delete:
description: >-
Delete a Report Schedule
parameters:
- in: path
schema:
type: integer
name: pk
description: The report schedule pk
responses:
200:
description: Item deleted
content:
application/json:
schema:
type: object
properties:
message:
type: string
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
try:
DeleteReportScheduleCommand(g.user, pk).run()
return self.response(200, message="OK")
except ReportScheduleNotFoundError:
return self.response_404()
except ReportScheduleForbiddenError:
return self.response_403()
except ReportScheduleDeleteFailedError as ex:
logger.error(
"Error deleting report schedule %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/", methods=["POST"])
@protect()
@statsd_metrics
@permission_name("post")
def post(self) -> Response:
"""Creates a new Report Schedule
---
post:
description: >-
Create a new Report Schedule
requestBody:
description: Report Schedule schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
responses:
201:
description: Report schedule added
content:
application/json:
schema:
type: object
properties:
id:
type: number
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
return self.response_400(message="Request is not JSON")
try:
item = self.add_model_schema.load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
try:
new_model = CreateReportScheduleCommand(g.user, item).run()
return self.response(201, id=new_model.id, result=item)
except ReportScheduleNotFoundError as ex:
return self.response_400(message=str(ex))
except ReportScheduleInvalidError as ex:
return self.response_422(message=ex.normalized_messages())
except ReportScheduleCreateFailedError as ex:
logger.error(
"Error creating report schedule %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/<int:pk>", methods=["PUT"])
@protect()
@safe
@statsd_metrics
@permission_name("put")
def put(self, pk: int) -> Response:
"""Updates an Report Schedule
---
put:
description: >-
Updates a Report Schedule
parameters:
- in: path
schema:
type: integer
name: pk
description: The Report Schedule pk
requestBody:
description: Report Schedule schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
responses:
200:
description: Report Schedule changed
content:
application/json:
schema:
type: object
properties:
id:
type: number
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
return self.response_400(message="Request is not JSON")
try:
item = self.edit_model_schema.load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
try:
new_model = UpdateReportScheduleCommand(g.user, pk, item).run()
return self.response(200, id=new_model.id, result=item)
except ReportScheduleNotFoundError:
return self.response_404()
except ReportScheduleInvalidError as ex:
return self.response_422(message=ex.normalized_messages())
except ReportScheduleForbiddenError:
return self.response_403()
except ReportScheduleUpdateFailedError as ex:
logger.error(
"Error updating report %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/", methods=["DELETE"])
@protect()
@safe
@statsd_metrics
@rison(get_delete_ids_schema)
def bulk_delete(self, **kwargs: Any) -> Response:
"""Delete bulk Report Schedule layers
---
delete:
description: >-
Deletes multiple report schedules in a bulk operation.
parameters:
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_delete_ids_schema'
responses:
200:
description: Report Schedule bulk delete
content:
application/json:
schema:
type: object
properties:
message:
type: string
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
item_ids = kwargs["rison"]
try:
BulkDeleteReportScheduleCommand(g.user, item_ids).run()
return self.response(
200,
message=ngettext(
"Deleted %(num)d report schedule",
"Deleted %(num)d report schedules",
num=len(item_ids),
),
)
except ReportScheduleNotFoundError:
return self.response_404()
except ReportScheduleForbiddenError:
return self.response_403()
except ReportScheduleBulkDeleteFailedError as ex:
return self.response_422(message=str(ex))
|
the-stack_0_15824 | import copy
from membase.helper.cluster_helper import ClusterOperationHelper
from couchbase_helper.documentgenerator import BlobGenerator
from .xdcrnewbasetests import XDCRNewBaseTest
from .xdcrnewbasetests import NodeHelper
from .xdcrnewbasetests import Utility, BUCKET_NAME, OPS
from remote.remote_util import RemoteMachineShellConnection
from lib.memcached.helper.data_helper import MemcachedClientHelper
from membase.api.rest_client import RestConnection
# Assumption that at least 2 nodes on every cluster
class bidirectional(XDCRNewBaseTest):
def setUp(self):
super(bidirectional, self).setUp()
self.src_cluster = self.get_cb_cluster_by_name('C1')
self.src_master = self.src_cluster.get_master_node()
self.dest_cluster = self.get_cb_cluster_by_name('C2')
self.dest_master = self.dest_cluster.get_master_node()
def tearDown(self):
super(bidirectional, self).tearDown()
def __perform_ops_joint_sets(self):
# Merging the keys as keys are actually replicated.
temp_expires = self._expires
self._expires = 0 # Assigning it to 0, so that merge_buckets don't wait for expiration here.
self.merge_all_buckets()
tasks = []
kv_gen_src = self.src_cluster.get_kv_gen()[OPS.CREATE]
gen_update = BlobGenerator(kv_gen_src.name,
kv_gen_src.seed,
kv_gen_src.value_size,
start=0,
end=int(kv_gen_src.end * (float)(self._perc_upd) / 100))
gen_delete = BlobGenerator(kv_gen_src.name,
kv_gen_src.seed,
kv_gen_src.value_size,
start=int((kv_gen_src.end) * (float)(100 - self._perc_del) / 100),
end=kv_gen_src.end)
if "C1" in self._upd_clusters:
tasks += self.src_cluster.async_load_all_buckets_from_generator(gen_update, OPS.UPDATE, self._expires)
if "C2" in self._upd_clusters:
tasks += self.dest_cluster.async_load_all_buckets_from_generator(gen_update, OPS.UPDATE, self._expires)
if "C1" in self._del_clusters:
tasks += self.src_cluster.async_load_all_buckets_from_generator(gen_delete, OPS.DELETE, 0)
if "C2" in self._del_clusters:
tasks += self.dest_cluster.async_load_all_buckets_from_generator(gen_delete, OPS.DELETE, 0)
for task in tasks:
task.result()
self._expires = temp_expires
if (self._wait_for_expiration and self._expires) and ("C1" in self._upd_clusters or "C2" in self._upd_clusters):
self.sleep(self._expires)
self.sleep(self._wait_timeout)
"""Bidirectional replication between two clusters(currently), create-updates-deletes on DISJOINT sets on same bucket."""
def load_with_ops(self):
self.setup_xdcr_and_load()
self.perform_update_delete()
self.verify_results()
"""Bidirectional replication between two clusters(currently), create-updates-deletes on DISJOINT sets on same bucket.
Here running incremental load on both cluster1 and cluster2 as specified by the user/conf file"""
def load_with_async_ops(self):
self.setup_xdcr_and_load()
self.async_perform_update_delete()
self.verify_results()
"""Testing Bidirectional load( Loading at source/destination). Failover node at Source/Destination while
Create/Update/Delete are performed in parallel based on doc-ops specified by the user.
Verifying whether XDCR replication is successful on subsequent destination clusters. """
def load_with_async_ops_and_joint_sets(self):
self.setup_xdcr_and_load()
self.async_perform_update_delete()
self.verify_results()
def load_with_async_ops_with_warmup(self):
self.setup_xdcr_and_load()
warmupnodes = []
if "C1" in self._warmup:
warmupnodes.append(self.src_cluster.warmup_node())
if "C2" in self._warmup:
warmupnodes.append(self.dest_cluster.warmup_node())
self.sleep(self._wait_timeout)
NodeHelper.wait_warmup_completed(warmupnodes)
self.async_perform_update_delete()
self.sleep(self._wait_timeout // 2)
self.verify_results()
def load_with_async_ops_with_warmup_master(self):
self.setup_xdcr_and_load()
warmupnodes = []
if "C1" in self._warmup:
warmupnodes.append(self.src_cluster.warmup_node(master=True))
if "C2" in self._warmup:
warmupnodes.append(self.dest_cluster.warmup_node(master=True))
self.sleep(self._wait_timeout)
NodeHelper.wait_warmup_completed(warmupnodes)
self.async_perform_update_delete()
self.sleep(self._wait_timeout // 2)
self.verify_results()
def load_with_async_ops_and_joint_sets_with_warmup(self):
bucket_type = self._input.param("bucket_type", "membase")
if bucket_type == "ephemeral":
"Test case does not apply for Ephemeral buckets"
return
self.setup_xdcr_and_load()
warmupnodes = []
if "C1" in self._warmup:
warmupnodes.append(self.src_cluster.warmup_node())
if "C2" in self._warmup:
warmupnodes.append(self.dest_cluster.warmup_node())
self.sleep(self._wait_timeout)
self.async_perform_update_delete()
self.sleep(self._wait_timeout // 2)
NodeHelper.wait_warmup_completed(warmupnodes)
self.verify_results()
def load_with_async_ops_and_joint_sets_with_warmup_master(self):
self.setup_xdcr_and_load()
warmupnodes = []
if "C1" in self._warmup:
warmupnodes.append(self.src_cluster.warmup_node(master=True))
if "C2" in self._warmup:
warmupnodes.append(self.dest_cluster.warmup_node(master=True))
self.sleep(self._wait_timeout)
self.async_perform_update_delete()
self.sleep(self._wait_timeout // 2)
NodeHelper.wait_warmup_completed(warmupnodes)
self.verify_results()
def load_with_failover(self):
self.setup_xdcr_and_load()
if "C1" in self._failover:
self.src_cluster.failover_and_rebalance_nodes()
if "C2" in self._failover:
self.dest_cluster.failover_and_rebalance_nodes()
self.sleep(self._wait_timeout // 6)
self.perform_update_delete()
self.sleep(300)
self.verify_results()
def load_with_failover_then_add_back(self):
self.setup_xdcr_and_load()
if "C1" in self._failover:
self.src_cluster.failover_and_rebalance_nodes(rebalance=False)
self.src_cluster.add_back_node()
if "C2" in self._failover:
self.dest_cluster.failover_and_rebalance_nodes(rebalance=False)
self.dest_cluster.add_back_node()
self.perform_update_delete()
self.verify_results()
def load_with_failover_master(self):
self.setup_xdcr_and_load()
if "C1" in self._failover:
self.src_cluster.failover_and_rebalance_master()
if "C2" in self._failover:
self.dest_cluster.failover_and_rebalance_master()
self.sleep(self._wait_timeout // 6)
self.perform_update_delete()
self.verify_results()
"""Replication with compaction ddocs and view queries on both clusters.
This test begins by loading a given number of items on both clusters.
It creates _num_views as development/production view with default
map view funcs(_is_dev_ddoc = True by default) on both clusters.
Then we disabled compaction for ddoc on src cluster. While we don't reach
expected fragmentation for ddoc on src cluster we update docs and perform
view queries for all views. Then we start compaction when fragmentation
was reached fragmentation_value. When compaction was completed we perform
a full verification: wait for the disk queues to drain
and then verify that there has been no data loss on both clusters."""
def replication_with_ddoc_compaction(self):
bucket_type = self._input.param("bucket_type", "membase")
if bucket_type == "ephemeral":
self.log.info("Test case does not apply to ephemeral")
return
self.setup_xdcr()
self.src_cluster.load_all_buckets(self._num_items)
self.dest_cluster.load_all_buckets(self._num_items)
num_views = self._input.param("num_views", 5)
is_dev_ddoc = self._input.param("is_dev_ddoc", True)
fragmentation_value = self._input.param("fragmentation_value", 80)
for bucket in self.src_cluster.get_buckets():
views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc)
ddoc_name = "ddoc1"
prefix = ("", "dev_")[is_dev_ddoc]
query = {"full_set": "true", "stale": "false"}
tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
for task in tasks:
task.result(self._poll_timeout)
self.src_cluster.disable_compaction()
fragmentation_monitor = self.src_cluster.async_monitor_view_fragmentation(prefix + ddoc_name, fragmentation_value, BUCKET_NAME.DEFAULT)
# generate load until fragmentation reached
while fragmentation_monitor.state != "FINISHED":
# update docs to create fragmentation
self.src_cluster.update_delete_data(OPS.UPDATE)
for view in views:
# run queries to create indexes
self.src_cluster.query_view(prefix + ddoc_name, view.name, query)
self.dest_cluster.query_view(prefix + ddoc_name, view.name, query)
fragmentation_monitor.result()
compaction_task = self.src_cluster.async_compact_view(prefix + ddoc_name, 'default')
self.assertTrue(compaction_task.result())
self.verify_results()
def replication_with_view_queries_and_ops(self):
bucket_type = self._input.param("bucket_type", "membase")
if bucket_type == "ephemeral":
self.log.info("Test case does not apply to ephemeral")
return
tasks = []
try:
self.setup_xdcr()
self.src_cluster.load_all_buckets(self._num_items)
self.dest_cluster.load_all_buckets(self._num_items)
num_views = self._input.param("num_views", 5)
is_dev_ddoc = self._input.param("is_dev_ddoc", True)
for bucket in self.src_cluster.get_buckets():
views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc)
ddoc_name = "ddoc1"
prefix = ("", "dev_")[is_dev_ddoc]
query = {"full_set": "true", "stale": "false", "connection_timeout": 60000}
tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
for task in tasks:
task.result(self._poll_timeout)
tasks = []
# Setting up doc-ops at source nodes
if "C1" in self._upd_clusters:
tasks.extend(self.src_cluster.async_update_delete(OPS.UPDATE, self._perc_upd, self._expires))
if "C1" in self._del_clusters:
tasks.extend(self.src_cluster.async_update_delete(OPS.DELETE, self._perc_del))
if "C2" in self._upd_clusters:
tasks.extend(self.dest_cluster.async_update_delete(OPS.UPDATE, self._perc_upd, self._expires))
if "C2" in self._del_clusters:
tasks.extend(self.dest_cluster.async_update_delete(OPS.DELETE, self._perc_del))
self.sleep(5)
while True:
for view in views:
self.src_cluster.query_view(prefix + ddoc_name, view.name, query)
self.dest_cluster.query_view(prefix + ddoc_name, view.name, query)
if {task.state for task in tasks} != {"FINISHED"}:
continue
else:
if self._wait_for_expiration:
if "C1" in self._upd_clusters or "C2" in self._upd_clusters:
self.sleep(self._expires)
break
self.merge_all_buckets()
self.src_cluster.verify_items_count()
self.dest_cluster.verify_items_count()
tasks = []
src_buckets = self.src_cluster.get_buckets()
dest_buckets = self.dest_cluster.get_buckets()
for view in views:
tasks.append(self.src_cluster.async_query_view(prefix + ddoc_name, view.name, query, src_buckets[0].kvs[1].__len__()))
tasks.append(self.src_cluster.async_query_view(prefix + ddoc_name, view.name, query, dest_buckets[0].kvs[1].__len__()))
for task in tasks:
task.result(self._poll_timeout)
self.verify_results()
finally:
# For timeout error, all tasks to be cancelled
# Before proceeding to next test
for task in tasks:
task.cancel()
"""Replication with disabled/enabled ddoc compaction on both clusters.
This test begins by loading a given number of items on both clusters.
Then we disabled or enabled compaction on both clusters( set via params).
Then we mutate and delete data on clusters 3 times. After deletion we recreate
deleted items. When data was changed 3 times we perform
a full verification: wait for the disk queues to drain
and then verify that there has been no data loss on both clusters."""
def replication_with_disabled_ddoc_compaction(self):
self.setup_xdcr()
self.src_cluster.load_all_buckets(self._num_items)
self.dest_cluster.load_all_buckets(self._num_items)
if "C1" in self._disable_compaction:
self.src_cluster.disable_compaction()
if "C2" in self._disable_compaction:
self.dest_cluster.disable_compaction()
# perform doc's ops 3 times to increase rev number
for _ in range(3):
self.async_perform_update_delete()
# wait till deletes have been sent to recreate
self.sleep(60)
# restore(re-creating) deleted items
if 'C1' in self._del_clusters:
c1_kv_gen = self.src_cluster.get_kv_gen()
c1_gen_delete = copy.deepcopy(c1_kv_gen[OPS.DELETE])
if self._expires:
# if expiration set, recreate those keys before
# trying to update
c1_gen_update = copy.deepcopy(c1_kv_gen[OPS.UPDATE])
self.src_cluster.load_all_buckets_from_generator(kv_gen=c1_gen_update)
self.src_cluster.load_all_buckets_from_generator(kv_gen=c1_gen_delete)
if 'C2' in self._del_clusters:
c2_kv_gen = self.dest_cluster.get_kv_gen()
c2_gen_delete = copy.deepcopy(c2_kv_gen[OPS.DELETE])
if self._expires:
c2_gen_update = copy.deepcopy(c2_kv_gen[OPS.UPDATE])
self.dest_cluster.load_all_buckets_from_generator(kv_gen=c2_gen_update)
self.dest_cluster.load_all_buckets_from_generator(kv_gen=c2_gen_delete)
# wait till we recreate deleted keys before we can delete/update
self.sleep(300)
self.verify_results()
def replication_while_rebooting_a_non_master_src_dest_node(self):
bucket_type = self._input.param("bucket_type", "membase")
if bucket_type == "ephemeral":
self.log.info("Test case does not apply to ephemeral")
return
self.setup_xdcr_and_load()
self.async_perform_update_delete()
self.sleep(self._wait_timeout)
reboot_node_dest = self.dest_cluster.reboot_one_node(self)
NodeHelper.wait_node_restarted(reboot_node_dest, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True)
reboot_node_src = self.src_cluster.reboot_one_node(self)
NodeHelper.wait_node_restarted(reboot_node_src, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True)
self.sleep(120)
ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_dest], self, wait_if_warmup=True)
ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_src], self, wait_if_warmup=True)
self.verify_results()
def test_disk_full(self):
self.setup_xdcr_and_load()
self.verify_results()
self.sleep(self._wait_timeout)
zip_file = "%s.zip" % (self._input.param("file_name", "collectInfo"))
try:
for node in [self.src_master, self.dest_master]:
self.shell = RemoteMachineShellConnection(node)
self.shell.execute_cbcollect_info(zip_file)
if self.shell.extract_remote_info().type.lower() != "windows":
command = "unzip %s" % (zip_file)
output, error = self.shell.execute_command(command)
self.shell.log_command_output(output, error)
if len(error) > 0:
raise Exception("unable to unzip the files. Check unzip command output for help")
cmd = 'grep -R "Approaching full disk warning." cbcollect_info*/'
output, _ = self.shell.execute_command(cmd)
else:
cmd = "curl -0 http://{1}:{2}@{0}:8091/diag 2>/dev/null | grep 'Approaching full disk warning.'".format(
self.src_master.ip,
self.src_master.rest_username,
self.src_master.rest_password)
output, _ = self.shell.execute_command(cmd)
self.assertNotEqual(len(output), 0, "Full disk warning not generated as expected in %s" % node.ip)
self.log.info("Full disk warning generated as expected in %s" % node.ip)
self.shell.delete_files(zip_file)
self.shell.delete_files("cbcollect_info*")
except Exception as e:
self.log.info(e)
def test_rollback(self):
bucket = self.src_cluster.get_buckets()[0]
src_nodes = self.src_cluster.get_nodes()
dest_nodes = self.dest_cluster.get_nodes()
nodes = src_nodes + dest_nodes
# Stop Persistence on Node A & Node B
for node in nodes:
mem_client = MemcachedClientHelper.direct_client(node, bucket)
mem_client.stop_persistence()
goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
+ '/goxdcr.log*'
self.setup_xdcr()
self.src_cluster.pause_all_replications()
self.dest_cluster.pause_all_replications()
gen = BlobGenerator("C1-", "C1-", self._value_size, end=self._num_items)
self.src_cluster.load_all_buckets_from_generator(gen)
gen = BlobGenerator("C2-", "C2-", self._value_size, end=self._num_items)
self.dest_cluster.load_all_buckets_from_generator(gen)
self.src_cluster.resume_all_replications()
self.dest_cluster.resume_all_replications()
# Perform mutations on the bucket
self.async_perform_update_delete()
rest1 = RestConnection(self.src_cluster.get_master_node())
rest2 = RestConnection(self.dest_cluster.get_master_node())
# Fetch count of docs in src and dest cluster
_count1 = rest1.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
_count2 = rest2.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
self.log.info("Before rollback src cluster count = {0} dest cluster count = {1}".format(_count1, _count2))
# Kill memcached on Node A so that Node B becomes master
shell = RemoteMachineShellConnection(self.src_cluster.get_master_node())
shell.kill_memcached()
shell = RemoteMachineShellConnection(self.dest_cluster.get_master_node())
shell.kill_memcached()
# Start persistence on Node B
mem_client = MemcachedClientHelper.direct_client(src_nodes[1], bucket)
mem_client.start_persistence()
mem_client = MemcachedClientHelper.direct_client(dest_nodes[1], bucket)
mem_client.start_persistence()
# Failover Node B
failover_task = self.src_cluster.async_failover()
failover_task.result()
failover_task = self.dest_cluster.async_failover()
failover_task.result()
# Wait for Failover & rollback to complete
self.sleep(60)
# Fetch count of docs in src and dest cluster
_count1 = rest1.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
_count2 = rest2.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
self.log.info("After rollback src cluster count = {0} dest cluster count = {1}".format(_count1, _count2))
self.assertTrue(self.src_cluster.wait_for_outbound_mutations(),
"Mutations in source cluster not replicated to target after rollback")
self.assertTrue(self.dest_cluster.wait_for_outbound_mutations(),
"Mutations in target cluster not replicated to source after rollback")
_, count = NodeHelper.check_goxdcr_log(
src_nodes[0],
"Received rollback from DCP stream",
goxdcr_log)
self.assertGreater(count, 0, "rollback did not happen as expected")
self.log.info("rollback happened as expected")
_, count = NodeHelper.check_goxdcr_log(
dest_nodes[0],
"Received rollback from DCP stream",
goxdcr_log)
self.assertGreater(count, 0, "rollback did not happen as expected")
self.log.info("rollback happened as expected")
def test_scramsha(self):
"""
Creates a new bi-xdcr replication with scram-sha
Make sure to pass use-scramsha=True
from command line
"""
self.setup_xdcr()
self.sleep(60, "wait before checking logs")
for node in [self.src_cluster.get_master_node()]+[self.dest_cluster.get_master_node()]:
_, count = NodeHelper.check_goxdcr_log(node,
"HttpAuthMech=ScramSha for remote cluster reference remoteCluster", timeout=60)
if count <= 0:
self.fail("Node {0} does not use SCRAM-SHA authentication".format(node.ip))
else:
self.log.info("SCRAM-SHA auth successful on node {0}".format(node.ip))
self.verify_results()
def test_update_to_scramsha_auth(self):
"""
Start with ordinary replication, then switch to use scram_sha_auth
Search for success log stmtsS
"""
_, old_count = NodeHelper.check_goxdcr_log(self.src_cluster.get_master_node(),
"HttpAuthMech=ScramSha for remote cluster reference remoteCluster", timeout=60)
self.setup_xdcr()
# modify remote cluster ref to use scramsha
for remote_cluster in self.src_cluster.get_remote_clusters()+self.dest_cluster.get_remote_clusters():
remote_cluster.use_scram_sha_auth()
self.sleep(60, "wait before checking the logs for using scram-sha")
for node in [self.src_cluster.get_master_node()]+[self.dest_cluster.get_master_node()]:
_, count = NodeHelper.check_goxdcr_log(node, "HttpAuthMech=ScramSha for remote cluster reference remoteCluster", timeout=60)
if count <= old_count:
self.fail("Node {0} does not use SCRAM-SHA authentication".format(node.ip))
else:
self.log.info("SCRAM-SHA auth successful on node {0}".format(node.ip))
self.verify_results() |
the-stack_0_15825 | # -*- coding: utf-8 -*-
"""
Provides the service module for systemd
.. versionadded:: 0.10.0
.. important::
If you feel that Salt should be using this module to manage services on a
minion, and it is using a different module (or gives an error similar to
*'service.start' is not available*), see :ref:`here
<module-provider-override>`.
.. important::
This is an implementation of virtual 'service' module. As such, you must
call it under the name 'service' and NOT 'systemd'. You can see that also
in the examples below.
"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import errno
import fnmatch
import glob
import logging
import os
import re
import shlex
# Import Salt libs
import salt.utils.files
import salt.utils.itertools
import salt.utils.path
import salt.utils.stringutils
import salt.utils.systemd
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
__func_alias__ = {
"reload_": "reload",
"unmask_": "unmask",
}
SYSTEM_CONFIG_PATHS = ("/lib/systemd/system", "/usr/lib/systemd/system")
LOCAL_CONFIG_PATH = "/etc/systemd/system"
INITSCRIPT_PATH = "/etc/init.d"
VALID_UNIT_TYPES = (
"service",
"socket",
"device",
"mount",
"automount",
"swap",
"target",
"path",
"timer",
)
# Define the module's virtual name
__virtualname__ = "service"
# Disable check for string substitution
# pylint: disable=E1321
def __virtual__():
"""
Only work on systems that have been booted with systemd
"""
if __grains__.get("kernel") == "Linux" and salt.utils.systemd.booted(__context__):
return __virtualname__
return (
False,
"The systemd execution module failed to load: only available on Linux "
"systems which have been booted with systemd.",
)
def _root(path, root):
"""
Relocate an absolute path to a new root directory.
"""
if root:
return os.path.join(root, os.path.relpath(path, os.path.sep))
else:
return path
def _canonical_unit_name(name):
"""
Build a canonical unit name treating unit names without one
of the valid suffixes as a service.
"""
if not isinstance(name, six.string_types):
name = six.text_type(name)
if any(name.endswith(suffix) for suffix in VALID_UNIT_TYPES):
return name
return "%s.service" % name
def _check_available(name):
"""
Returns boolean telling whether or not the named service is available
"""
_status = _systemctl_status(name)
sd_version = salt.utils.systemd.version(__context__)
if sd_version is not None and sd_version >= 231:
# systemd 231 changed the output of "systemctl status" for unknown
# services, and also made it return an exit status of 4. If we are on
# a new enough version, check the retcode, otherwise fall back to
# parsing the "systemctl status" output.
# See: https://github.com/systemd/systemd/pull/3385
# Also: https://github.com/systemd/systemd/commit/3dced37
return 0 <= _status["retcode"] < 4
out = _status["stdout"].lower()
if "could not be found" in out:
# Catch cases where the systemd version is < 231 but the return code
# and output changes have been backported (e.g. RHEL 7.3).
return False
for line in salt.utils.itertools.split(out, "\n"):
match = re.match(r"\s+loaded:\s+(\S+)", line)
if match:
ret = match.group(1) != "not-found"
break
else:
raise CommandExecutionError("Failed to get information on unit '%s'" % name)
return ret
def _check_for_unit_changes(name):
"""
Check for modified/updated unit files, and run a daemon-reload if any are
found.
"""
contextkey = "systemd._check_for_unit_changes.{0}".format(name)
if contextkey not in __context__:
if _untracked_custom_unit_found(name) or _unit_file_changed(name):
systemctl_reload()
# Set context key to avoid repeating this check
__context__[contextkey] = True
def _check_unmask(name, unmask, unmask_runtime, root=None):
"""
Common code for conditionally removing masks before making changes to a
service's state.
"""
if unmask:
unmask_(name, runtime=False, root=root)
if unmask_runtime:
unmask_(name, runtime=True, root=root)
def _clear_context():
"""
Remove context
"""
# Using list() here because modifying a dictionary during iteration will
# raise a RuntimeError.
for key in list(__context__):
try:
if key.startswith("systemd._systemctl_status."):
__context__.pop(key)
except AttributeError:
continue
def _default_runlevel():
"""
Try to figure out the default runlevel. It is kept in
/etc/init/rc-sysinit.conf, but can be overridden with entries
in /etc/inittab, or via the kernel command-line at boot
"""
# Try to get the "main" default. If this fails, throw up our
# hands and just guess "2", because things are horribly broken
try:
with salt.utils.files.fopen("/etc/init/rc-sysinit.conf") as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith("env DEFAULT_RUNLEVEL"):
runlevel = line.split("=")[-1].strip()
except Exception: # pylint: disable=broad-except
return "2"
# Look for an optional "legacy" override in /etc/inittab
try:
with salt.utils.files.fopen("/etc/inittab") as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if not line.startswith("#") and "initdefault" in line:
runlevel = line.split(":")[1]
except Exception: # pylint: disable=broad-except
pass
# The default runlevel can also be set via the kernel command-line.
try:
valid_strings = set(
("0", "1", "2", "3", "4", "5", "6", "s", "S", "-s", "single")
)
with salt.utils.files.fopen("/proc/cmdline") as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
for arg in line.strip().split():
if arg in valid_strings:
runlevel = arg
break
except Exception: # pylint: disable=broad-except
pass
return runlevel
def _get_systemd_services(root):
"""
Use os.listdir() to get all the unit files
"""
ret = set()
for path in SYSTEM_CONFIG_PATHS + (LOCAL_CONFIG_PATH,):
# Make sure user has access to the path, and if the path is a
# link it's likely that another entry in SYSTEM_CONFIG_PATHS
# or LOCAL_CONFIG_PATH points to it, so we can ignore it.
path = _root(path, root)
if os.access(path, os.R_OK) and not os.path.islink(path):
for fullname in os.listdir(path):
try:
unit_name, unit_type = fullname.rsplit(".", 1)
except ValueError:
continue
if unit_type in VALID_UNIT_TYPES:
ret.add(unit_name if unit_type == "service" else fullname)
return ret
def _get_sysv_services(root, systemd_services=None):
"""
Use os.listdir() and os.access() to get all the initscripts
"""
initscript_path = _root(INITSCRIPT_PATH, root)
try:
sysv_services = os.listdir(initscript_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
pass
elif exc.errno == errno.EACCES:
log.error(
"Unable to check sysvinit scripts, permission denied to %s",
initscript_path,
)
else:
log.error(
"Error %d encountered trying to check sysvinit scripts: %s",
exc.errno,
exc.strerror,
)
return []
if systemd_services is None:
systemd_services = _get_systemd_services(root)
ret = []
for sysv_service in sysv_services:
if os.access(os.path.join(initscript_path, sysv_service), os.X_OK):
if sysv_service in systemd_services:
log.debug(
"sysvinit script '%s' found, but systemd unit "
"'%s.service' already exists",
sysv_service,
sysv_service,
)
continue
ret.append(sysv_service)
return ret
def _get_service_exec():
"""
Returns the path to the sysv service manager (either update-rc.d or
chkconfig)
"""
contextkey = "systemd._get_service_exec"
if contextkey not in __context__:
executables = ("update-rc.d", "chkconfig")
for executable in executables:
service_exec = salt.utils.path.which(executable)
if service_exec is not None:
break
else:
raise CommandExecutionError(
"Unable to find sysv service manager (tried {0})".format(
", ".join(executables)
)
)
__context__[contextkey] = service_exec
return __context__[contextkey]
def _runlevel():
"""
Return the current runlevel
"""
contextkey = "systemd._runlevel"
if contextkey in __context__:
return __context__[contextkey]
out = __salt__["cmd.run"]("runlevel", python_shell=False, ignore_retcode=True)
try:
ret = out.split()[1]
except IndexError:
# The runlevel is unknown, return the default
ret = _default_runlevel()
__context__[contextkey] = ret
return ret
def _strip_scope(msg):
"""
Strip unnecessary message about running the command with --scope from
stderr so that we can raise an exception with the remaining stderr text.
"""
ret = []
for line in msg.splitlines():
if not line.endswith(".scope"):
ret.append(line)
return "\n".join(ret).strip()
def _systemctl_cmd(action, name=None, systemd_scope=False, no_block=False, root=None):
"""
Build a systemctl command line. Treat unit names without one
of the valid suffixes as a service.
"""
ret = []
if (
systemd_scope
and salt.utils.systemd.has_scope(__context__)
and __salt__["config.get"]("systemd.scope", True)
):
ret.extend(["systemd-run", "--scope"])
ret.append("systemctl")
if no_block:
ret.append("--no-block")
if root:
ret.extend(["--root", root])
if isinstance(action, six.string_types):
action = shlex.split(action)
ret.extend(action)
if name is not None:
ret.append(_canonical_unit_name(name))
if "status" in ret:
ret.extend(["-n", "0"])
return ret
def _systemctl_status(name):
"""
Helper function which leverages __context__ to keep from running 'systemctl
status' more than once.
"""
contextkey = "systemd._systemctl_status.%s" % name
if contextkey in __context__:
return __context__[contextkey]
__context__[contextkey] = __salt__["cmd.run_all"](
_systemctl_cmd("status", name),
python_shell=False,
redirect_stderr=True,
ignore_retcode=True,
)
return __context__[contextkey]
def _sysv_enabled(name, root):
"""
A System-V style service is assumed disabled if the "startup" symlink
(starts with "S") to its script is found in /etc/init.d in the current
runlevel.
"""
# Find exact match (disambiguate matches like "S01anacron" for cron)
rc = _root("/etc/rc{}.d/S*{}".format(_runlevel(), name), root)
for match in glob.glob(rc):
if re.match(r"S\d{,2}%s" % name, os.path.basename(match)):
return True
return False
def _untracked_custom_unit_found(name, root=None):
"""
If the passed service name is not available, but a unit file exist in
/etc/systemd/system, return True. Otherwise, return False.
"""
system = _root("/etc/systemd/system", root)
unit_path = os.path.join(system, _canonical_unit_name(name))
return os.access(unit_path, os.R_OK) and not _check_available(name)
def _unit_file_changed(name):
"""
Returns True if systemctl reports that the unit file has changed, otherwise
returns False.
"""
status = _systemctl_status(name)["stdout"].lower()
return "'systemctl daemon-reload'" in status
def systemctl_reload():
"""
.. versionadded:: 0.15.0
Reloads systemctl, an action needed whenever unit files are updated.
CLI Example:
.. code-block:: bash
salt '*' service.systemctl_reload
"""
out = __salt__["cmd.run_all"](
_systemctl_cmd("--system daemon-reload"),
python_shell=False,
redirect_stderr=True,
)
if out["retcode"] != 0:
raise CommandExecutionError(
"Problem performing systemctl daemon-reload: %s" % out["stdout"]
)
_clear_context()
return True
def get_running():
"""
Return a list of all running services, so far as systemd is concerned
CLI Example:
.. code-block:: bash
salt '*' service.get_running
"""
ret = set()
# Get running systemd units
out = __salt__["cmd.run"](
_systemctl_cmd("--full --no-legend --no-pager"),
python_shell=False,
ignore_retcode=True,
)
for line in salt.utils.itertools.split(out, "\n"):
try:
comps = line.strip().split()
fullname = comps[0]
if len(comps) > 3:
active_state = comps[3]
except ValueError as exc:
log.error(exc)
continue
else:
if active_state != "running":
continue
try:
unit_name, unit_type = fullname.rsplit(".", 1)
except ValueError:
continue
if unit_type in VALID_UNIT_TYPES:
ret.add(unit_name if unit_type == "service" else fullname)
return sorted(ret)
def get_enabled(root=None):
"""
Return a list of all enabled services
root
Enable/disable/mask unit files in the specified root directory
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
"""
ret = set()
# Get enabled systemd units. Can't use --state=enabled here because it's
# not present until systemd 216.
out = __salt__["cmd.run"](
_systemctl_cmd("--full --no-legend --no-pager list-unit-files", root=root),
python_shell=False,
ignore_retcode=True,
)
for line in salt.utils.itertools.split(out, "\n"):
try:
fullname, unit_state = line.strip().split(None, 1)
except ValueError:
continue
else:
if unit_state != "enabled":
continue
try:
unit_name, unit_type = fullname.rsplit(".", 1)
except ValueError:
continue
if unit_type in VALID_UNIT_TYPES:
ret.add(unit_name if unit_type == "service" else fullname)
# Add in any sysvinit services that are enabled
ret.update(set([x for x in _get_sysv_services(root) if _sysv_enabled(x, root)]))
return sorted(ret)
def get_disabled(root=None):
"""
Return a list of all disabled services
root
Enable/disable/mask unit files in the specified root directory
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
"""
ret = set()
# Get disabled systemd units. Can't use --state=disabled here because it's
# not present until systemd 216.
out = __salt__["cmd.run"](
_systemctl_cmd("--full --no-legend --no-pager list-unit-files", root=root),
python_shell=False,
ignore_retcode=True,
)
for line in salt.utils.itertools.split(out, "\n"):
try:
fullname, unit_state = line.strip().split(None, 1)
except ValueError:
continue
else:
if unit_state != "disabled":
continue
try:
unit_name, unit_type = fullname.rsplit(".", 1)
except ValueError:
continue
if unit_type in VALID_UNIT_TYPES:
ret.add(unit_name if unit_type == "service" else fullname)
# Add in any sysvinit services that are disabled
ret.update(set([x for x in _get_sysv_services(root) if not _sysv_enabled(x, root)]))
return sorted(ret)
def get_static(root=None):
"""
.. versionadded:: 2015.8.5
Return a list of all static services
root
Enable/disable/mask unit files in the specified root directory
CLI Example:
.. code-block:: bash
salt '*' service.get_static
"""
ret = set()
# Get static systemd units. Can't use --state=static here because it's
# not present until systemd 216.
out = __salt__["cmd.run"](
_systemctl_cmd("--full --no-legend --no-pager list-unit-files", root=root),
python_shell=False,
ignore_retcode=True,
)
for line in salt.utils.itertools.split(out, "\n"):
try:
fullname, unit_state = line.strip().split(None, 1)
except ValueError:
continue
else:
if unit_state != "static":
continue
try:
unit_name, unit_type = fullname.rsplit(".", 1)
except ValueError:
continue
if unit_type in VALID_UNIT_TYPES:
ret.add(unit_name if unit_type == "service" else fullname)
# sysvinit services cannot be static
return sorted(ret)
def get_all(root=None):
"""
Return a list of all available services
root
Enable/disable/mask unit files in the specified root directory
CLI Example:
.. code-block:: bash
salt '*' service.get_all
"""
ret = _get_systemd_services(root)
ret.update(set(_get_sysv_services(root, systemd_services=ret)))
return sorted(ret)
def available(name):
"""
.. versionadded:: 0.10.4
Check that the given service is available taking into account template
units.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd
"""
_check_for_unit_changes(name)
return _check_available(name)
def missing(name):
"""
.. versionadded:: 2014.1.0
The inverse of :py:func:`service.available
<salt.modules.systemd.available>`. Returns ``True`` if the specified
service is not available, otherwise returns ``False``.
CLI Example:
.. code-block:: bash
salt '*' service.missing sshd
"""
return not available(name)
def unmask_(name, runtime=False, root=None):
"""
.. versionadded:: 2015.5.0
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Unmask the specified service with systemd
runtime : False
Set to ``True`` to unmask this service only until the next reboot
.. versionadded:: 2017.7.0
In previous versions, this function would remove whichever mask was
identified by running ``systemctl is-enabled`` on the service.
However, since it is possible to both have both indefinite and
runtime masks on a service simultaneously, this function now
removes a runtime mask only when this argument is set to ``True``,
and otherwise removes an indefinite mask.
root
Enable/disable/mask unit files in the specified root directory
CLI Example:
.. code-block:: bash
salt '*' service.unmask foo
salt '*' service.unmask foo runtime=True
"""
_check_for_unit_changes(name)
if not masked(name, runtime, root=root):
log.debug("Service '%s' is not %smasked", name, "runtime-" if runtime else "")
return True
cmd = "unmask --runtime" if runtime else "unmask"
out = __salt__["cmd.run_all"](
_systemctl_cmd(cmd, name, systemd_scope=True, root=root),
python_shell=False,
redirect_stderr=True,
)
if out["retcode"] != 0:
raise CommandExecutionError("Failed to unmask service '%s'" % name)
return True
def mask(name, runtime=False, root=None):
"""
.. versionadded:: 2015.5.0
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Mask the specified service with systemd
runtime : False
Set to ``True`` to mask this service only until the next reboot
.. versionadded:: 2015.8.5
root
Enable/disable/mask unit files in the specified root directory
CLI Example:
.. code-block:: bash
salt '*' service.mask foo
salt '*' service.mask foo runtime=True
"""
_check_for_unit_changes(name)
cmd = "mask --runtime" if runtime else "mask"
out = __salt__["cmd.run_all"](
_systemctl_cmd(cmd, name, systemd_scope=True, root=root),
python_shell=False,
redirect_stderr=True,
)
if out["retcode"] != 0:
raise CommandExecutionError(
"Failed to mask service '%s'" % name, info=out["stdout"]
)
return True
def masked(name, runtime=False, root=None):
"""
.. versionadded:: 2015.8.0
.. versionchanged:: 2015.8.5
The return data for this function has changed. If the service is
masked, the return value will now be the output of the ``systemctl
is-enabled`` command (so that a persistent mask can be distinguished
from a runtime mask). If the service is not masked, then ``False`` will
be returned.
.. versionchanged:: 2017.7.0
This function now returns a boolean telling the user whether a mask
specified by the new ``runtime`` argument is set. If ``runtime`` is
``False``, this function will return ``True`` if an indefinite mask is
set for the named service (otherwise ``False`` will be returned). If
``runtime`` is ``False``, this function will return ``True`` if a
runtime mask is set, otherwise ``False``.
Check whether or not a service is masked
runtime : False
Set to ``True`` to check for a runtime mask
.. versionadded:: 2017.7.0
In previous versions, this function would simply return the output
of ``systemctl is-enabled`` when the service was found to be
masked. However, since it is possible to both have both indefinite
and runtime masks on a service simultaneously, this function now
only checks for runtime masks if this argument is set to ``True``.
Otherwise, it will check for an indefinite mask.
root
Enable/disable/mask unit files in the specified root directory
CLI Examples:
.. code-block:: bash
salt '*' service.masked foo
salt '*' service.masked foo runtime=True
"""
_check_for_unit_changes(name)
root_dir = _root("/run" if runtime else "/etc", root)
link_path = os.path.join(root_dir, "systemd", "system", _canonical_unit_name(name))
try:
return os.readlink(link_path) == "/dev/null"
except OSError as exc:
if exc.errno == errno.ENOENT:
log.trace(
"Path %s does not exist. This is normal if service '%s' is "
"not masked or does not exist.",
link_path,
name,
)
elif exc.errno == errno.EINVAL:
log.error(
"Failed to check mask status for service %s. Path %s is a "
"file, not a symlink. This could be caused by changes in "
"systemd and is probably a bug in Salt. Please report this "
"to the developers.",
name,
link_path,
)
return False
def start(name, no_block=False, unmask=False, unmask_runtime=False):
"""
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Start the specified service with systemd
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: 2017.7.0
unmask : False
Set to ``True`` to remove an indefinite mask before attempting to start
the service.
.. versionadded:: 2017.7.0
In previous releases, Salt would simply unmask a service before
starting. This behavior is no longer the default.
unmask_runtime : False
Set to ``True`` to remove a runtime mask before attempting to start the
service.
.. versionadded:: 2017.7.0
In previous releases, Salt would simply unmask a service before
starting. This behavior is no longer the default.
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
"""
_check_for_unit_changes(name)
_check_unmask(name, unmask, unmask_runtime)
ret = __salt__["cmd.run_all"](
_systemctl_cmd("start", name, systemd_scope=True, no_block=no_block),
python_shell=False,
)
if ret["retcode"] != 0:
# Instead of returning a bool, raise an exception so that we can
# include the error message in the return data. This helps give more
# information to the user in instances where the service is masked.
raise CommandExecutionError(_strip_scope(ret["stderr"]))
return True
def stop(name, no_block=False):
"""
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Stop the specified service with systemd
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
"""
_check_for_unit_changes(name)
# Using cmd.run_all instead of cmd.retcode here to make unit tests easier
return (
__salt__["cmd.run_all"](
_systemctl_cmd("stop", name, systemd_scope=True, no_block=no_block),
python_shell=False,
)["retcode"]
== 0
)
def restart(name, no_block=False, unmask=False, unmask_runtime=False):
"""
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Restart the specified service with systemd
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: 2017.7.0
unmask : False
Set to ``True`` to remove an indefinite mask before attempting to
restart the service.
.. versionadded:: 2017.7.0
In previous releases, Salt would simply unmask a service before
restarting. This behavior is no longer the default.
unmask_runtime : False
Set to ``True`` to remove a runtime mask before attempting to restart
the service.
.. versionadded:: 2017.7.0
In previous releases, Salt would simply unmask a service before
restarting. This behavior is no longer the default.
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
"""
_check_for_unit_changes(name)
_check_unmask(name, unmask, unmask_runtime)
ret = __salt__["cmd.run_all"](
_systemctl_cmd("restart", name, systemd_scope=True, no_block=no_block),
python_shell=False,
)
if ret["retcode"] != 0:
# Instead of returning a bool, raise an exception so that we can
# include the error message in the return data. This helps give more
# information to the user in instances where the service is masked.
raise CommandExecutionError(_strip_scope(ret["stderr"]))
return True
def reload_(name, no_block=False, unmask=False, unmask_runtime=False):
"""
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Reload the specified service with systemd
no_block : False
Set to ``True`` to reload the service using ``--no-block``.
.. versionadded:: 2017.7.0
unmask : False
Set to ``True`` to remove an indefinite mask before attempting to
reload the service.
.. versionadded:: 2017.7.0
In previous releases, Salt would simply unmask a service before
reloading. This behavior is no longer the default.
unmask_runtime : False
Set to ``True`` to remove a runtime mask before attempting to reload
the service.
.. versionadded:: 2017.7.0
In previous releases, Salt would simply unmask a service before
reloading. This behavior is no longer the default.
CLI Example:
.. code-block:: bash
salt '*' service.reload <service name>
"""
_check_for_unit_changes(name)
_check_unmask(name, unmask, unmask_runtime)
ret = __salt__["cmd.run_all"](
_systemctl_cmd("reload", name, systemd_scope=True, no_block=no_block),
python_shell=False,
)
if ret["retcode"] != 0:
# Instead of returning a bool, raise an exception so that we can
# include the error message in the return data. This helps give more
# information to the user in instances where the service is masked.
raise CommandExecutionError(_strip_scope(ret["stderr"]))
return True
def force_reload(name, no_block=True, unmask=False, unmask_runtime=False):
"""
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. versionadded:: 0.12.0
Force-reload the specified service with systemd
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: 2017.7.0
unmask : False
Set to ``True`` to remove an indefinite mask before attempting to
force-reload the service.
.. versionadded:: 2017.7.0
In previous releases, Salt would simply unmask a service before
force-reloading. This behavior is no longer the default.
unmask_runtime : False
Set to ``True`` to remove a runtime mask before attempting to
force-reload the service.
.. versionadded:: 2017.7.0
In previous releases, Salt would simply unmask a service before
force-reloading. This behavior is no longer the default.
CLI Example:
.. code-block:: bash
salt '*' service.force_reload <service name>
"""
_check_for_unit_changes(name)
_check_unmask(name, unmask, unmask_runtime)
ret = __salt__["cmd.run_all"](
_systemctl_cmd("force-reload", name, systemd_scope=True, no_block=no_block),
python_shell=False,
)
if ret["retcode"] != 0:
# Instead of returning a bool, raise an exception so that we can
# include the error message in the return data. This helps give more
# information to the user in instances where the service is masked.
raise CommandExecutionError(_strip_scope(ret["stderr"]))
return True
# The unused sig argument is required to maintain consistency with the API
# established by Salt's service management states.
def status(name, sig=None): # pylint: disable=unused-argument
"""
Return the status for a service via systemd.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
sig (str): Not implemented
Returns:
bool: True if running, False otherwise
dict: Maps service name to True if running, False otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name> [service signature]
"""
contains_globbing = bool(re.search(r"\*|\?|\[.+\]", name))
if contains_globbing:
services = fnmatch.filter(get_all(), name)
else:
services = [name]
results = {}
for service in services:
_check_for_unit_changes(service)
results[service] = (
__salt__["cmd.retcode"](
_systemctl_cmd("is-active", service),
python_shell=False,
ignore_retcode=True,
)
== 0
)
if contains_globbing:
return results
return results[name]
# **kwargs is required to maintain consistency with the API established by
# Salt's service management states.
def enable(
name, no_block=False, unmask=False, unmask_runtime=False, root=None, **kwargs
): # pylint: disable=unused-argument
"""
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Enable the named service to start when the system boots
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: 2017.7.0
unmask : False
Set to ``True`` to remove an indefinite mask before attempting to
enable the service.
.. versionadded:: 2017.7.0
In previous releases, Salt would simply unmask a service before
enabling. This behavior is no longer the default.
unmask_runtime : False
Set to ``True`` to remove a runtime mask before attempting to enable
the service.
.. versionadded:: 2017.7.0
In previous releases, Salt would simply unmask a service before
enabling. This behavior is no longer the default.
root
Enable/disable/mask unit files in the specified root directory
CLI Example:
.. code-block:: bash
salt '*' service.enable <service name>
"""
_check_for_unit_changes(name)
_check_unmask(name, unmask, unmask_runtime, root)
if name in _get_sysv_services(root):
cmd = []
if salt.utils.systemd.has_scope(__context__) and __salt__["config.get"](
"systemd.scope", True
):
cmd.extend(["systemd-run", "--scope"])
service_exec = _get_service_exec()
if service_exec.endswith("/update-rc.d"):
cmd.extend([service_exec, "-f", name, "defaults", "99"])
elif service_exec.endswith("/chkconfig"):
cmd.extend([service_exec, name, "on"])
return (
__salt__["cmd.retcode"](cmd, python_shell=False, ignore_retcode=True) == 0
)
ret = __salt__["cmd.run_all"](
_systemctl_cmd(
"enable", name, systemd_scope=True, no_block=no_block, root=root
),
python_shell=False,
ignore_retcode=True,
)
if ret["retcode"] != 0:
# Instead of returning a bool, raise an exception so that we can
# include the error message in the return data. This helps give more
# information to the user in instances where the service is masked.
raise CommandExecutionError(_strip_scope(ret["stderr"]))
return True
# The unused kwargs argument is required to maintain consistency with the API
# established by Salt's service management states.
def disable(
name, no_block=False, root=None, **kwargs
): # pylint: disable=unused-argument
"""
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Disable the named service to not start when the system boots
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: 2017.7.0
root
Enable/disable/mask unit files in the specified root directory
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
"""
_check_for_unit_changes(name)
if name in _get_sysv_services(root):
cmd = []
if salt.utils.systemd.has_scope(__context__) and __salt__["config.get"](
"systemd.scope", True
):
cmd.extend(["systemd-run", "--scope"])
service_exec = _get_service_exec()
if service_exec.endswith("/update-rc.d"):
cmd.extend([service_exec, "-f", name, "remove"])
elif service_exec.endswith("/chkconfig"):
cmd.extend([service_exec, name, "off"])
return (
__salt__["cmd.retcode"](cmd, python_shell=False, ignore_retcode=True) == 0
)
# Using cmd.run_all instead of cmd.retcode here to make unit tests easier
return (
__salt__["cmd.run_all"](
_systemctl_cmd(
"disable", name, systemd_scope=True, no_block=no_block, root=root
),
python_shell=False,
ignore_retcode=True,
)["retcode"]
== 0
)
# The unused kwargs argument is required to maintain consistency with the API
# established by Salt's service management states.
def enabled(name, root=None, **kwargs): # pylint: disable=unused-argument
"""
Return if the named service is enabled to start on boot
root
Enable/disable/mask unit files in the specified root directory
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
"""
# Try 'systemctl is-enabled' first, then look for a symlink created by
# systemctl (older systemd releases did not support using is-enabled to
# check templated services), and lastly check for a sysvinit service.
if (
__salt__["cmd.retcode"](
_systemctl_cmd("is-enabled", name, root=root),
python_shell=False,
ignore_retcode=True,
)
== 0
):
return True
elif "@" in name:
# On older systemd releases, templated services could not be checked
# with ``systemctl is-enabled``. As a fallback, look for the symlinks
# created by systemctl when enabling templated services.
local_config_path = _root(LOCAL_CONFIG_PATH, "/")
cmd = [
"find",
local_config_path,
"-name",
name,
"-type",
"l",
"-print",
"-quit",
]
# If the find command returns any matches, there will be output and the
# string will be non-empty.
if bool(__salt__["cmd.run"](cmd, python_shell=False)):
return True
elif name in _get_sysv_services(root):
return _sysv_enabled(name, root)
return False
def disabled(name, root=None):
"""
Return if the named service is disabled from starting on boot
root
Enable/disable/mask unit files in the specified root directory
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
"""
return not enabled(name, root=root)
def show(name, root=None):
"""
.. versionadded:: 2014.7.0
Show properties of one or more units/jobs or the manager
root
Enable/disable/mask unit files in the specified root directory
CLI Example:
salt '*' service.show <service name>
"""
ret = {}
out = __salt__["cmd.run"](
_systemctl_cmd("show", name, root=root), python_shell=False
)
for line in salt.utils.itertools.split(out, "\n"):
comps = line.split("=")
name = comps[0]
value = "=".join(comps[1:])
if value.startswith("{"):
value = value.replace("{", "").replace("}", "")
ret[name] = {}
for item in value.split(" ; "):
comps = item.split("=")
ret[name][comps[0].strip()] = comps[1].strip()
elif name in ("Before", "After", "Wants"):
ret[name] = value.split()
else:
ret[name] = value
return ret
def execs(root=None):
"""
.. versionadded:: 2014.7.0
Return a list of all files specified as ``ExecStart`` for all services.
root
Enable/disable/mask unit files in the specified root directory
CLI Example:
salt '*' service.execs
"""
ret = {}
for service in get_all(root=root):
data = show(service, root=root)
if "ExecStart" not in data:
continue
ret[service] = data["ExecStart"]["path"]
return ret
def firstboot(
locale=None,
locale_message=None,
keymap=None,
timezone=None,
hostname=None,
machine_id=None,
root=None,
):
"""
.. versionadded:: TBD
Call systemd-firstboot to configure basic settings of the system
locale
Set primary locale (LANG=)
locale_message
Set message locale (LC_MESSAGES=)
keymap
Set keymap
timezone
Set timezone
hostname
Set host name
machine_id
Set machine ID
root
Operate on an alternative filesystem root
CLI Example:
salt '*' service.firstboot keymap=jp locale=en_US.UTF-8
"""
cmd = ["systemd-firstboot"]
parameters = [
("locale", locale),
("locale-message", locale_message),
("keymap", keymap),
("timezone", timezone),
("hostname", hostname),
("machine-ID", machine_id),
("root", root),
]
for parameter, value in parameters:
if value:
cmd.extend(["--{}".format(parameter), str(value)])
out = __salt__["cmd.run_all"](cmd)
if out["retcode"] != 0:
raise CommandExecutionError("systemd-firstboot error: {}".format(out["stderr"]))
return True
|
the-stack_0_15829 | from __future__ import unicode_literals
import hashlib
import itertools
import json
import re
from ..compat import compat_HTTPError, compat_str
from ..utils import (ExtractorError, float_or_none, get_element_by_attribute,
int_or_none, lowercase_escape, std_headers, try_get,
url_or_none)
from .common import InfoExtractor
class InstagramIE(InfoExtractor):
_VALID_URL = (
r"(?P<url>https?://(?:www\.)?instagram\.com/(?:p|tv|reel)/(?P<id>[^/?#&]+))"
)
_TESTS = [
{
"url": "https://instagram.com/p/aye83DjauH/?foo=bar#abc",
"md5": "0d2da106a9d2631273e192b372806516",
"info_dict": {
"id": "aye83DjauH",
"ext": "mp4",
"title": "Video by naomipq",
"description": "md5:1f17f0ab29bd6fe2bfad705f58de3cb8",
"thumbnail": r"re:^https?://.*\.jpg",
"duration": 0,
"timestamp": 1371748545,
"upload_date": "20130620",
"uploader_id": "naomipq",
"uploader": "B E A U T Y F O R A S H E S",
"like_count": int,
"comment_count": int,
"comments": list,
},
},
{
# missing description
"url": "https://www.instagram.com/p/BA-pQFBG8HZ/?taken-by=britneyspears",
"info_dict": {
"id": "BA-pQFBG8HZ",
"ext": "mp4",
"title": "Video by britneyspears",
"thumbnail": r"re:^https?://.*\.jpg",
"duration": 0,
"timestamp": 1453760977,
"upload_date": "20160125",
"uploader_id": "britneyspears",
"uploader": "Britney Spears",
"like_count": int,
"comment_count": int,
"comments": list,
},
"params": {
"skip_download": True,
},
},
{
# multi video post
"url": "https://www.instagram.com/p/BQ0eAlwhDrw/",
"playlist": [
{
"info_dict": {
"id": "BQ0dSaohpPW",
"ext": "mp4",
"title": "Video 1",
},
},
{
"info_dict": {
"id": "BQ0dTpOhuHT",
"ext": "mp4",
"title": "Video 2",
},
},
{
"info_dict": {
"id": "BQ0dT7RBFeF",
"ext": "mp4",
"title": "Video 3",
},
},
],
"info_dict": {
"id": "BQ0eAlwhDrw",
"title": "Post by instagram",
"description": "md5:0f9203fc6a2ce4d228da5754bcf54957",
},
},
{
# IGTV
"url": "https://www.instagram.com/tv/BkfuX9UB-eK/",
"info_dict": {
"id": "BkfuX9UB-eK",
"ext": "mp4",
"title": "Fingerboarding Tricks with @cass.fb",
"thumbnail": r"re:^https?://.*\.jpg",
"duration": 53.83,
"timestamp": 1530032919,
"upload_date": "20180626",
"uploader_id": "instagram",
"uploader": "Instagram",
"like_count": int,
"comment_count": int,
"comments": list,
"description": "Meet Cass Hirst (@cass.fb), a fingerboarding pro who can perform tiny ollies and kickflips while blindfolded.",
},
},
{
"url": "https://instagram.com/p/-Cmh1cukG2/",
"only_matching": True,
},
{
"url": "http://instagram.com/p/9o6LshA7zy/embed/",
"only_matching": True,
},
{
"url": "https://www.instagram.com/tv/aye83DjauH/",
"only_matching": True,
},
{
"url": "https://www.instagram.com/reel/CDUMkliABpa/",
"only_matching": True,
},
]
@staticmethod
def _extract_embed_url(webpage):
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?instagram\.com/p/[^/]+/embed.*?)\1',
webpage,
)
if mobj:
return mobj.group("url")
blockquote_el = get_element_by_attribute("class", "instagram-media", webpage)
if blockquote_el is None:
return
mobj = re.search(r'<a[^>]+href=([\'"])(?P<link>[^\'"]+)\1', blockquote_el)
if mobj:
return mobj.group("link")
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group("id")
url = mobj.group("url")
webpage = self._download_webpage(url, video_id)
(
media,
video_url,
description,
thumbnail,
timestamp,
uploader,
uploader_id,
like_count,
comment_count,
comments,
height,
width,
) = [None] * 12
shared_data = self._parse_json(
self._search_regex(
r"window\._sharedData\s*=\s*({.+?});",
webpage,
"shared data",
default="{}",
),
video_id,
fatal=False,
)
if shared_data:
media = try_get(
shared_data,
(
lambda x: x["entry_data"]["PostPage"][0]["graphql"][
"shortcode_media"
],
lambda x: x["entry_data"]["PostPage"][0]["media"],
),
dict,
)
# _sharedData.entry_data.PostPage is empty when authenticated (see
# https://github.com/nextdl/nextdl/pull/22880)
if not media:
additional_data = self._parse_json(
self._search_regex(
r"window\.__additionalDataLoaded\s*\(\s*[^,]+,\s*({.+?})\s*\)\s*;",
webpage,
"additional data",
default="{}",
),
video_id,
fatal=False,
)
if additional_data:
media = try_get(
additional_data, lambda x: x["graphql"]["shortcode_media"], dict
)
if media:
video_url = media.get("video_url")
height = int_or_none(media.get("dimensions", {}).get("height"))
width = int_or_none(media.get("dimensions", {}).get("width"))
description = try_get(
media,
lambda x: x["edge_media_to_caption"]["edges"][0]["node"]["text"],
compat_str,
) or media.get("caption")
title = media.get("title")
thumbnail = media.get("display_src") or media.get("display_url")
duration = float_or_none(media.get("video_duration"))
timestamp = int_or_none(
media.get("taken_at_timestamp") or media.get("date")
)
uploader = media.get("owner", {}).get("full_name")
uploader_id = media.get("owner", {}).get("username")
def get_count(keys, kind):
if not isinstance(keys, (list, tuple)):
keys = [keys]
for key in keys:
count = int_or_none(
try_get(
media,
(
lambda x: x["edge_media_%s" % key]["count"],
lambda x: x["%ss" % kind]["count"],
),
)
)
if count is not None:
return count
like_count = get_count("preview_like", "like")
comment_count = get_count(
("preview_comment", "to_comment", "to_parent_comment"), "comment"
)
comments = [
{
"author": comment.get("user", {}).get("username"),
"author_id": comment.get("user", {}).get("id"),
"id": comment.get("id"),
"text": comment.get("text"),
"timestamp": int_or_none(comment.get("created_at")),
}
for comment in media.get("comments", {}).get("nodes", [])
if comment.get("text")
]
if not video_url:
edges = (
try_get(
media, lambda x: x["edge_sidecar_to_children"]["edges"], list
)
or []
)
if edges:
entries = []
for edge_num, edge in enumerate(edges, start=1):
node = try_get(edge, lambda x: x["node"], dict)
if not node:
continue
node_video_url = url_or_none(node.get("video_url"))
if not node_video_url:
continue
entries.append(
{
"id": node.get("shortcode") or node["id"],
"title": node.get("title") or "Video %d" % edge_num,
"url": node_video_url,
"thumbnail": node.get("display_url"),
"duration": float_or_none(node.get("video_duration")),
"width": int_or_none(
try_get(node, lambda x: x["dimensions"]["width"])
),
"height": int_or_none(
try_get(node, lambda x: x["dimensions"]["height"])
),
"view_count": int_or_none(node.get("video_view_count")),
}
)
return self.playlist_result(
entries,
video_id,
"Post by %s" % uploader_id if uploader_id else None,
description,
)
if not video_url:
video_url = self._og_search_video_url(webpage, secure=False)
formats = [
{
"url": video_url,
"width": width,
"height": height,
}
]
if not uploader_id:
uploader_id = self._search_regex(
r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"',
webpage,
"uploader id",
fatal=False,
)
if not description:
description = self._search_regex(
r'"caption"\s*:\s*"(.+?)"', webpage, "description", default=None
)
if description is not None:
description = lowercase_escape(description)
if not thumbnail:
thumbnail = self._og_search_thumbnail(webpage)
return {
"id": video_id,
"formats": formats,
"ext": "mp4",
"title": title or "Video by %s" % uploader_id,
"description": description,
"duration": duration,
"thumbnail": thumbnail,
"timestamp": timestamp,
"uploader_id": uploader_id,
"uploader": uploader,
"like_count": like_count,
"comment_count": comment_count,
"comments": comments,
}
class InstagramPlaylistIE(InfoExtractor):
# A superclass for handling any kind of query based on GraphQL which
# results in a playlist.
_gis_tmpl = None # used to cache GIS request type
def _parse_graphql(self, webpage, item_id):
# Reads a webpage and returns its GraphQL data.
return self._parse_json(
self._search_regex(
r"sharedData\s*=\s*({.+?})\s*;\s*[<\n]", webpage, "data"
),
item_id,
)
def _extract_graphql(self, data, url):
# Parses GraphQL queries containing videos and generates a playlist.
def get_count(suffix):
return int_or_none(
try_get(node, lambda x: x["edge_media_" + suffix]["count"])
)
uploader_id = self._match_id(url)
csrf_token = data["config"]["csrf_token"]
rhx_gis = data.get("rhx_gis") or "3c7ca9dcefcf966d11dacf1f151335e8"
cursor = ""
for page_num in itertools.count(1):
variables = {
"first": 12,
"after": cursor,
}
variables.update(self._query_vars_for(data))
variables = json.dumps(variables)
if self._gis_tmpl:
gis_tmpls = [self._gis_tmpl]
else:
gis_tmpls = [
"%s" % rhx_gis,
"",
"%s:%s" % (rhx_gis, csrf_token),
"%s:%s:%s" % (rhx_gis, csrf_token, std_headers["User-Agent"]),
]
# try all of the ways to generate a GIS query, and not only use the
# first one that works, but cache it for future requests
for gis_tmpl in gis_tmpls:
try:
json_data = self._download_json(
"https://www.instagram.com/graphql/query/",
uploader_id,
"Downloading JSON page %d" % page_num,
headers={
"X-Requested-With": "XMLHttpRequest",
"X-Instagram-GIS": hashlib.md5(
("%s:%s" % (gis_tmpl, variables)).encode("utf-8")
).hexdigest(),
},
query={
"query_hash": self._QUERY_HASH,
"variables": variables,
},
)
media = self._parse_timeline_from(json_data)
self._gis_tmpl = gis_tmpl
break
except ExtractorError as e:
# if it's an error caused by a bad query, and there are
# more GIS templates to try, ignore it and keep trying
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
if gis_tmpl != gis_tmpls[-1]:
continue
raise
edges = media.get("edges")
if not edges or not isinstance(edges, list):
break
for edge in edges:
node = edge.get("node")
if not node or not isinstance(node, dict):
continue
if (
node.get("__typename") != "GraphVideo"
and node.get("is_video") is not True
):
continue
video_id = node.get("shortcode")
if not video_id:
continue
info = self.url_result(
"https://instagram.com/p/%s/" % video_id,
ie=InstagramIE.ie_key(),
video_id=video_id,
)
description = try_get(
node,
lambda x: x["edge_media_to_caption"]["edges"][0]["node"]["text"],
compat_str,
)
thumbnail = node.get("thumbnail_src") or node.get("display_src")
timestamp = int_or_none(node.get("taken_at_timestamp"))
comment_count = get_count("to_comment")
like_count = get_count("preview_like")
view_count = int_or_none(node.get("video_view_count"))
info.update(
{
"description": description,
"thumbnail": thumbnail,
"timestamp": timestamp,
"comment_count": comment_count,
"like_count": like_count,
"view_count": view_count,
}
)
yield info
page_info = media.get("page_info")
if not page_info or not isinstance(page_info, dict):
break
has_next_page = page_info.get("has_next_page")
if not has_next_page:
break
cursor = page_info.get("end_cursor")
if not cursor or not isinstance(cursor, compat_str):
break
def _real_extract(self, url):
user_or_tag = self._match_id(url)
webpage = self._download_webpage(url, user_or_tag)
data = self._parse_graphql(webpage, user_or_tag)
self._set_cookie("instagram.com", "ig_pr", "1")
return self.playlist_result(
self._extract_graphql(data, url), user_or_tag, user_or_tag
)
class InstagramUserIE(InstagramPlaylistIE):
_VALID_URL = r"https?://(?:www\.)?instagram\.com/(?P<id>[^/]{2,})/?(?:$|[?#])"
IE_DESC = "Instagram user profile"
IE_NAME = "instagram:user"
_TEST = {
"url": "https://instagram.com/porsche",
"info_dict": {
"id": "porsche",
"title": "porsche",
},
"playlist_count": 5,
"params": {
"extract_flat": True,
"skip_download": True,
"playlistend": 5,
},
}
_QUERY_HASH = ("42323d64886122307be10013ad2dcc44",)
@staticmethod
def _parse_timeline_from(data):
# extracts the media timeline data from a GraphQL result
return data["data"]["user"]["edge_owner_to_timeline_media"]
@staticmethod
def _query_vars_for(data):
# returns a dictionary of variables to add to the timeline query based
# on the GraphQL of the original page
return {"id": data["entry_data"]["ProfilePage"][0]["graphql"]["user"]["id"]}
class InstagramTagIE(InstagramPlaylistIE):
_VALID_URL = r"https?://(?:www\.)?instagram\.com/explore/tags/(?P<id>[^/]+)"
IE_DESC = "Instagram hashtag search"
IE_NAME = "instagram:tag"
_TEST = {
"url": "https://instagram.com/explore/tags/lolcats",
"info_dict": {
"id": "lolcats",
"title": "lolcats",
},
"playlist_count": 50,
"params": {
"extract_flat": True,
"skip_download": True,
"playlistend": 50,
},
}
_QUERY_HASH = ("f92f56d47dc7a55b606908374b43a314",)
@staticmethod
def _parse_timeline_from(data):
# extracts the media timeline data from a GraphQL result
return data["data"]["hashtag"]["edge_hashtag_to_media"]
@staticmethod
def _query_vars_for(data):
# returns a dictionary of variables to add to the timeline query based
# on the GraphQL of the original page
return {
"tag_name": data["entry_data"]["TagPage"][0]["graphql"]["hashtag"]["name"]
}
|
the-stack_0_15830 | import time
import inspect
from functools import update_wrapper
from fixate.core.common import mode_builder, unit_scale
from fixate.core.exceptions import ParameterError, InstrumentError
from fixate.drivers.funcgen.helper import FuncGen
MODES = {
":SINusoid": {
" [{frequency}]": {",[{amplitude}]": {",[{offset}]": {}}},
":CH2": {" [{frequency}]": {",[{amplitude}]": {",[{offset}]": {}}}},
},
":SQUare": {
" [{frequency}]": {",[{amplitude}]": {",[{offset}]": {}}},
":CH2": {" [{frequency}]": {",[{amplitude}]": {",[{offset}]": {}}}},
},
":RAMP": {
" [{frequency}]": {",[{amplitude}]": {",[{offset}]": {}}},
":CH2": {" [{frequency}]": {",[{amplitude}]": {",[{offset}]": {}}}},
},
":PULSE": {
" [{frequency}]": {",[{amplitude}]": {",[{offset}]": {}}},
":CH2": {" [{frequency}]": {",[{amplitude}]": {",[{offset}]": {}}}},
},
":NOISe DEFault": {",[{amplitude}]": {",[{offset}]": {}}},
":DC DEFault,DEFault": {",[{offset}]": {}},
":USER": {" [{frequency}]": {",[{amplitude}]": {",[{offset}]": {}}}},
}
ADV_MODES = {":SQUare:" "DCYCle"}
# -----------------------------------------------------------------------------------------------------------------------
class RigolDG1022(FuncGen):
REGEX_ID = "RIGOL TECHNOLOGIES,DG1022"
INSTR_TYPE = "VISA"
retrys_on_timeout = 3
_verify = True
def __init__(self, instrument):
"""
The self._ values indicate the user values as entered if valid.
The self.__ values are the sanitised values used internally in the system to parse between functions
Limitations:
The function generator switches internal relays at certain thresholds.
Try to avoid these ranges in design if the function generator is loaded with a relatively low impedance
Table of ranges on the same relay arrangement
Min mVpp Max mVpp
4 60
60.1 199.9
200 599.9
600 2000
2001 6000
6001 20000
:param instrument:
:return:
"""
super().__init__(instrument)
self.instrument.query_delay = 0.2
self.instrument.timeout = 1000
# Rigol Restrictions
self.__restr_bandwidth = {"min": unit_scale("4uHz"), "max": unit_scale("20MHz")}
self.__restr_phase = {"min": -180, "max": 180}
self.__restr_amplitude = {
"min": unit_scale("4mVpp"),
"max": unit_scale("20Vpp"),
}
self._amplitude = None
self._store = {"ch1_duty": "50", "ch2_duty": "50"}
self.api = [
# WAVEFORM SELECTION:
# Channel 1:
(
"channel1.waveform.sin",
self.store_and_write,
("FUNC SIN", {"ch1_waveform_handler": None}), # base_str
), # handler
(
"channel1.waveform.square",
self.store_and_write,
(
"FUNC SQU\r\nFUNC:SQU:DCYC {self._store[ch1_duty]}",
{"ch1_waveform_handler": "channel1.waveform.square"},
),
),
(
"channel1.waveform.ramp",
self.store_and_write,
("FUNC RAMP", {"ch1_waveform_handler": None}),
),
(
"channel1.waveform.pulse",
self.store_and_write,
(
"FUNC PULS\r\nPULS:DCYC {self._store[ch1_duty]}",
{"ch1_waveform_handler": "channel1.waveform.pulse"},
),
),
(
"channel1.waveform.arb",
self.store_and_write,
("FUNC USER", {"ch1_waveform_handler": None}),
),
(
"channel1.waveform.triangle",
self.store_and_write,
("FUNC TRI", {"ch1_waveform_handler": None}),
),
(
"channel1.waveform.noise",
self.store_and_write,
("FUNC NOIS", {"ch1_waveform_handler": None}),
),
(
"channel1.waveform.dc",
self.store_and_write,
("FUNC DC", {"ch1_waveform_handler": None}),
),
# Channel 2:
(
"channel2.waveform.sin",
self.store_and_write,
("FUNC:CH2 SIN", {"ch2_waveform_handler": None}), # base_str
), # handler
(
"channel2.waveform.square",
self.store_and_write,
(
"FUNC:CH2 SQU\r\nFUNC:SQU:DCYC:CH2 {self._store[ch2_duty]}",
{"ch2_waveform_handler": "channel2.waveform.square"},
),
),
(
"channel2.waveform.ramp",
self.store_and_write,
("FUNC:CH2 RAMP", {"ch2_waveform_handler": None}),
),
(
"channel2.waveform.pulse",
self.store_and_write,
(
"FUNC:CH2 PULS\r\nPULS:DCYC {self._store[ch2_duty]}",
{"ch2_waveform_handler": "channel2.waveform.pulse"},
),
),
(
"channel2.waveform.arb",
self.store_and_write,
("FUNC:CH2 USER", {"ch2_waveform_handler": None}),
),
(
"channel2.waveform.triangle",
self.store_and_write,
("FUNC:CH2 TRI", {"ch2_waveform_handler": None}),
),
(
"channel2.waveform.noise",
self.store_and_write,
("FUNC:CH2 NOIS", {"ch2_waveform_handler": None}),
),
(
"channel2.waveform.dc",
self.store_and_write,
("FUNC:CH2 DC", {"ch2_waveform_handler": None}),
),
# CHANNEL CONFIGURATION:
# Channel 1:
("channel1.vrms", self.write, "VOLT:UNIT VRMS\r\nVOLT {value}"),
("channel1.vpp", self.write, "VOLT:UNIT VPP\r\nVOLT {value}"),
("channel1.dbm", self.write, "VOLT:UNIT DBM\r\nVOLT {value}"),
("channel1.offset", self.write, "VOLT:OFFS {value}"),
("channel1.phase", self.write, "PHAS {value}"),
(
"channel1.duty",
self.store_and_execute,
({"ch1_duty": "{value}"}, "ch1_waveform_handler"),
),
("channel1.frequency", self.write, "FREQ {value}"),
# Channel 2:
("channel2.vrms", self.write, "VOLT:UNIT:CH2 VRMS\r\nVOLT {value}"),
("channel2.vpp", self.write, "VOLT:UNIT:CH2 VPP\r\nVOLT {value}"),
("channel2.dbm", self.write, "VOLT:UNIT:CH2 DBM\r\nVOLT {value}"),
("channel2.offset", self.write, "VOLT:OFFS:CH2 {value}"),
("channel2.phase", self.write, "PHAS:CH2 {value}"),
("channel2.duty", self.store, {"ch2_duty": "{value}"}),
("channel2.frequency", self.write, "FREQ:CH2 {value}"),
# CHANNEL ACTIVATION:
(
"channel1._call",
self.write,
"OUTP {value}",
), # True won't work here needs to be ON or 1, OFF or 0
(
"channel2._call",
self.write,
"OUTP:CH2 {value}",
), # True won't work here needs to be ON or 1, OFF or 0
# SYNC CONFIGURATION:
("sync.polarity.normal", self.write, ""),
("sync.mode.normal", self.write, ""),
("sync.mode.source", self.write, ""),
("sync._call", self.write, "OUTP {value}"),
# TRIGGER CONFIGURATION:
("trigger.immediate", self.write, "TRIG:SOUR IMM"),
("trigger.external._call", self.write, "TRIG:SOUR EXT"),
("trigger.external.rising", self.write, "TRIG:SOUR EXT\r\n TRIG1:SLOP POS"),
(
"trigger.external.falling",
self.write,
"TRIG:SOUR EXT\r\n TRIG1:SLOP NEG",
),
("trigger.manual", self.write, "TRIG:SOUR BUS"),
("trigger.delay", self.write, "TRIG:DEL {seconds}"),
("trigger.out.off", self.write, "OUTP:TRIG OFF"),
("trigger.out._call", self.write, "OUTP:TRIG {output}"),
("trigger.out.rising", self.write, "OUTP:TRIG:SLOP POS"),
("trigger.out.falling", self.write, "OUTP:TRIG:SLOP NEG"),
# Modulate
# Channel 1:
(
"channel1.modulate.am._call",
self.store,
{"ch1_modulate_state": "AM", "ch1_modulate_setting": "FREQ"},
),
(
"channel1.modulate.fm._call",
self.store,
{"ch1_modulate_state": "FM", "ch1_modulate_setting": "FREQ"},
),
(
"channel1.modulate.pm._call",
self.store,
{"ch1_modulate_state": "PM", "ch1_modulate_setting": "FREQ"},
),
(
"channel1.modulate.fsk._call",
self.store,
{"ch1_modulate_state": "FSK", "ch1_modulate_setting": "RATE"},
),
(
"channel1.modulate.bpsk._call",
self.store,
{"ch1_modulate_state": "BPSK", "ch1_modulate_setting": "RATE"},
),
(
"channel1.modulate.sum._call",
self.store,
{"ch1_modulate_state": "SUM", "ch1_modulate_setting": "RATE"},
),
# MODULATE SOURCES:
(
"channel1.modulate.source.internal._call",
self.store_and_write,
(
"{self._store[ch1_modulate_state]}:SOUR INT",
{"ch1_modulate_source": "INT"},
),
),
(
"channel1.modulate.source.external",
self.store_and_write,
(
"{self._store[ch1_modulate_state]}:SOUR EXT",
{"ch1_modulate_source": "EXT"},
),
),
# MODULATE ACTIVATION:
# Channel 1:
(
"channel1.modulate._call",
self.write,
"{self._store[ch1_modulate_state]}:STAT {value}\r\n{self._store[ch1_modulate_state]}:SOUR"
"{self._store[ch1_modulate_source]}",
),
# MODULATE OPTIONS:
# Channel 1:
("channel1.modulate.am.depth", self.write, "AM:DEPT {value}"),
("channel1.modulate.fm.freq_dev", self.write, "FM:DEV {value}"),
("channel1.modulate.pm.phase_dev", self.write, "PM:DEV{value}"),
("channel1.modulate.fsk.hop_freq", self.write, "FSK:FREQ {value}"),
("channel1.modulate.fsk.rate", self.write, "FSK:INT:RATE {value}"),
# MODULATE SHAPES:
# Channel 1:
(
"channel1.modulate.source.internal.shape.sin",
self.write,
"{self._store[ch1_modulate_state]}:INT:FUNC SIN",
),
(
"channel1.modulate.source.internal.shape.square",
self.write,
"{self._store[ch1_modulate_state]}:INT:FUNC SQU",
),
(
"channel1.modulate.source.internal.shape.triangle",
self.write,
"{self._store[ch1_modulate_state]}:INT:FUNC TRI",
),
(
"channel1.modulate.source.internal.shape.up_ramp",
self.write,
"{self._store[ch1_modulate_state]}:INT:FUNC RAMP",
),
(
"channel1.modulate.source.internal.shape.down_ramp",
self.write,
"{self._store[ch1_modulate_state]}:INT:FUNC NRAMP",
),
(
"channel1.modulate.source.internal.shape.noise",
self.write,
"{self._store[ch1_modulate_state]}:INT:FUNC NOIS",
),
# BURST
# Channel 1:
("channel1.burst.gated._call", self.write, "BURS:MODE GAT"),
("channel1.burst._call", self.write, "BURS:STAT {value}"),
("channel1.burst.ncycle._call", self.write, "BURS:MODE TRIG"),
("channel1.burst.ncycle.cycles._call", self.write, "BURS:NCYC {cycles}"),
("channel1.burst.ncycle.cycles.infinite", self.write, "BURS:NCYC INF"),
(
"channel1.burst.ncycle.burst_period",
self.write,
"BURS:INT:PER {seconds}",
),
("channel1.burst.gated.positive", self.write, "BURS:GATE:POL NORM"),
("channel1.burst.gated.negative", self.write, "BURS:GATE:POL INV"),
("channel1.burst.phase", self.write, "BURS:PHAS {degrees}"),
# Modulate Frequency
(
"channel1.modulate.source.internal.frequency",
self.write,
"{self._store[ch1_modulate_state]}:INT:{self._store[ch1_modulate_setting]} {value}",
),
# LOAD:
# channel1:
("channel1.load._call", self.write, "OUTP:LOAD {ohms}"),
("channel1.load.infinite", self.write, "OUTP:LOAD INF"),
# channel2:
("channel2.load._call", self.write, "OUTP:LOAD:CH2 {ohms}"),
("channel2.load.infinite", self.write, "OUTP:LOAD:CH2 INF"),
]
# -----------------------------------------------------------------------------------------------------------------------
self.init_api()
def sync_output(self, sync):
"""
:param sync:
True or False
:return:
None
"""
if sync:
self._write(["OUTPut:SYNC ON"])
else:
self._write(["OUTPut:SYNC OFF"])
def trigger_output(self, trigger, rising=False, falling=False):
"""
:param sync:
True or False
:return:
None
"""
if rising and falling:
raise ValueError("Cannot trigger on both rising and falling edges")
if trigger:
if rising:
self._write(["OUTPut:TRIGger:SLOPe POSitive"])
if falling:
self._write(["OUTPut:TRIGger:SLOPe NEGative"])
self._write(["OUTPut:TRIGger ON"])
else:
self._write(["OUTPut:TRIGger OFF"])
@property
def verify_values(self):
return self._verify
@verify_values.setter
def verify_values(self, val):
if val not in [True, False]:
raise ValueError("Invalid value. Use True or False")
self._verify = val
@property
def amplitude_ch1(self):
return self.instrument.query_ascii_values("VOLTAGE?")[0]
@property
def amplitude_ch2(self):
return self.instrument.query_ascii_values("VOLTAGE:CH2?")[0]
@amplitude_ch1.setter
def amplitude_ch1(self, val):
self._write("VOLTAGE {}".format(val))
@amplitude_ch2.setter
def amplitude_ch2(self, val):
self._write("VOLTAGE:CH2 {}".format(val))
@property
def output_ch1(self):
resp = self.instrument.query("OUTP?")
if "OFF" in resp:
return False
elif "ON" in resp:
return True
@output_ch1.setter
def output_ch1(self, val):
if val not in [True, False]:
raise ParameterError(
"Unknown output {} value for CH1\r\nPlease select True or False".format(
val
)
)
if val:
self._write("OUTP ON")
else:
self._write("OUTP OFF")
@property
def output_ch2(self):
resp = self.instrument.query("OUTP:CH2?")
if "OFF" in resp:
return False
elif "ON" in resp:
return True
@output_ch2.setter
def output_ch2(self, val):
if val not in [True, False]:
raise ParameterError(
"Unknown output {} value for CH2\nPlease select True or False".format(
val
)
)
if val:
self._write("OUTP:CH2 ON")
else:
self._write("OUTP:CH2 OFF")
@FuncGen.output_sync.setter
def output_sync(self, val):
time.sleep(0.5)
if val not in [True, False]:
raise ParameterError(
"Unknown output {} value for SYNC\nPlease select True or False".format(
val
)
)
self._output_sync = val
if self._output_sync:
self._write("OUTP:SYNC ON")
else:
self._write("OUTP:SYNC OFF")
def local(self):
"""
Gives local control back to the instrument
Remote control is activated on any other commands set to the device
:return:
"""
time.sleep(0.5)
self._write("SYSTem:LOCal")
def reset(self):
"""
Be aware that the funcgen can have a short period where it sets to 5Vpp 1kHz with the output on for a short
period. This could cause issues. Ensure that setup is in a safe state to receive such a signal.
:return:
"""
# Due to the 5Vpp 1kHz signal. Explicit call to turn output off first
self.output_ch1 = False
self.output_ch2 = False
self._write("*RST")
def function(
self, waveform, channel=1, duty_cycle=None, symmetry=None, phase=None, **kwargs
):
"""
if parameters empty then uses previous set mode
The mode and mode parameters are used in mode_build to search recursively through the
MODES dictionary to build the visa string necessary for the equipment to interpret the commands.
usage
function('sin')
parsed to visa:
'APPLy:SINusoid'
function('square', channel=2, amplit=5, offset=2.5, freq='1kHz')
parsed to visa:
'APPLy:SQUare:CH2 1000, 5, 2.5'
corresponds to a square wave at 1kHz, where the min of the wave is at 0 and the max at 5V
for more advanced functions that cannot be explained through waveform, amplitude, offset and frequency:
use adv_function.
"""
if int(channel) in range(1, 3):
channel = "CH{}".format(channel)
else:
raise ValueError(
"Invalid channel {} use a number between 1-2".format(channel)
)
mode = (waveform, channel)
# self.reset()
if duty_cycle is not None:
if waveform.upper() not in "PULSE":
if channel == "CH1":
self._write(
["FUNCtion:{}:DCYCle {}".format(waveform.upper(), duty_cycle)]
)
else:
self._write(
[
"FUNCtion:{}:DCYCle:{} {}".format(
waveform.upper(), channel, duty_cycle
)
]
)
else:
if channel == "CH1":
self._write(["PULSe:DCYC {}".format(duty_cycle)])
else:
self._write(["PULSe:DCYC:{} {}".format(channel, duty_cycle)])
if symmetry is not None:
if channel == "CH1":
self._write(["FUNCtion:RAMP:SYMMetry {}".format(symmetry)])
else:
self._write(["FUNCtion:RAMP:SYMMetry:{} {}".format(channel, symmetry)])
if phase is not None:
if channel == "CH1":
self._write(["PHASe {}".format(phase)])
else:
self._write(["PHASe:CH2 {}".format(phase)])
self._write(["APPLy{}".format(mode_builder(MODES, {}, *mode, **kwargs))])
def am(self, frequency, depth, source=None, waveform="SIN"):
self._write(
[
"AM:SOURce INT",
"AM:INT:FREQuency {frequency}".format(frequency=frequency),
"AM:DEPTh {depth}".format(depth=depth),
"AM:INT:FUNC {waveform}".format(waveform=waveform),
"AM:STATe ON",
]
)
def disable_am(self):
self._write(["AM:STATe OFF"])
def enable_am(self):
self._write(["AM:STATe ON"])
def adv_function(self, *mode, **mode_params):
"""
Exposes the advanced functionality of the function generator.
Currently not implemented
:param mode:
:param mode_params:
:return:
"""
raise NotImplementedError
def _write(self, data):
"""
The DG1022 cannot respond to visa commands as quickly as some other devices
A 100ms delay was found to be reliable for most commands with the exception of the *IDN?
identification command. An extra 100ms should be allowed for explicit calls to *IDN?
Note:
The 6000 number for the sleep is derived from trial and error. The write calls don't seem to block at the rate
they write. By allowing 166uS delay for each byte of data then the Funcgen doesn't choke on the next call. A
flat 100ms is added to allow processing time.
This is especially important for commands that write large amounts of data such as user arbitrary forms.
"""
if data:
if isinstance(data, str):
data = data.split("\r\n")
for itm in data:
self.instrument.write(itm)
time.sleep(0.1 + len(itm) / 6000)
else:
raise ParameterError("Missing data in instrument write")
self._is_error()
def _check_errors(self):
resp = self.instrument.query("SYST:ERR?")
code, msg = resp.strip("\n").split(",")
code = int(code)
msg = msg.strip('"')
return code, msg
def _is_error(self, silent=False):
errors = []
while True:
code, msg = self._check_errors()
if code != 0:
errors.append((code, msg))
else:
break
if errors:
if silent:
return errors
else:
raise InstrumentError(
"Error(s) Returned from FuncGen\n"
+ "\n".join(
[
"Code: {}\nMessage:{}".format(code, msg)
for code, msg in errors
]
)
)
def write(self, base_str, *args, **kwargs):
formatted_string = self._format_string(base_str, **kwargs)
self._write(formatted_string)
def _format_string(self, base_str, **kwargs):
kwargs["self"] = self
prev_string = base_str
cur_string = ""
while True:
cur_string = prev_string.format(**kwargs)
if cur_string == prev_string:
break
prev_string = cur_string
return cur_string
def store(self, store_dict, *args, **kwargs):
"""
Store a dictionary of values in TestClass
:param kwargs:
Dictionary containing the parameters to store
:return:
"""
new_dict = store_dict.copy()
for k, v in store_dict.items():
# I want the same function from write to set up the string before putting it in new_dict
try:
new_dict[k] = v.format(**kwargs)
except:
pass
self._store.update(new_dict)
def store_and_execute(self, params, *args, **kwargs):
store_dict, handler_id = params
self.store(store_dict, *args, **kwargs)
handler_string = self._store[handler_id]
if handler_string is not None:
*parents, func = handler_string.split(".")
parent_obj = self
for parent in parents:
parent_obj = getattr(parent_obj, parent)
handler = getattr(parent_obj, func)
handler()
def store_and_write(self, params, *args, **kwargs):
base_str, store_dict = params
self.store(store_dict)
self.write(base_str, *args, **kwargs)
def init_api(self):
for func_str, handler, base_str in self.api:
*parents, func = func_str.split(".")
parent_obj = self
try:
for parent in parents:
parent_obj = getattr(parent_obj, parent)
func_obc = getattr(parent_obj, func)
except AttributeError:
# print("FAILED ON:", func_str)
raise
setattr(parent_obj, func, self.prepare_string(func_obc, handler, base_str))
def prepare_string(self, func, handler, base_str, *args, **kwargs):
def temp_func(*nargs, **nkwargs):
"""
Only formats using **nkwargs
New Temp
:param nargs:
:param nkwargs:
:return:
"""
sig = inspect.signature(func)
keys = [itm[0] for itm in sig.parameters.items()]
# Hard coding for RIGOL. BOOLS should be converted to "ON", "OFF"
for index, param in enumerate(nargs):
nkwargs[keys[index]] = param
for k, v in nkwargs.items():
if sig.parameters[k].annotation == bool:
if v:
nkwargs[k] = "ON"
else:
nkwargs[k] = "OFF"
return handler(base_str, **nkwargs)
return update_wrapper(temp_func, func)
# ------------------------------------------------------------------------------------------
def get_identity(self):
"""
Query ID character string of instrument, including a field separated by 4 “,”, manufactory, model, serial number
and the edition number that consists of numbers and separated by “.” .
:return: RIGOL TECHNOLOGIES,DG1022,DG1000000002,00.01.00.04.00
"""
return self.instrument.query("*IDN?").strip()
|
the-stack_0_15832 | from __future__ import unicode_literals
import os
import re
import sys
import types
from django.conf import settings
from django.core.urlresolvers import Resolver404, resolve
from django.http import (
HttpRequest, HttpResponse, HttpResponseNotFound, build_request_repr,
)
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.utils import lru_cache, six, timezone
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_bytes, smart_text
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as _
# Minimal Django templates engine to render the error templates
# regardless of the project's TEMPLATES setting.
DEBUG_ENGINE = Engine(debug=True)
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE')
CLEANSED_SUBSTITUTE = '********************'
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p + 1
p = template_source.find('\n', p + 1)
yield len(template_source) + 1
class CallableSettingWrapper(object):
""" Object to wrap callable appearing in settings
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding to set attributes (#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = {k: cleanse_setting(k, v) for k, v in value.items()}
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
if callable(cleansed):
# For fixing #21345 and #23070
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb, status_code=500):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponse(text, status=status_code, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponse(html, status=status_code, content_type='text/html')
@lru_cache.lru_cache()
def get_default_exception_reporter_filter():
# Instantiate the default filter for the first time and cache it.
return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
def get_exception_reporter_filter(request):
default_filter = get_default_exception_reporter_filter()
return getattr(request, 'exception_reporter_filter', default_filter)
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_request_repr(self, request):
if request is None:
return repr(None)
else:
return build_request_repr(request, POST_override=self.get_post_parameters(request))
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(six.iteritems(tb_frame.f_locals))
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replaces the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = CLEANSED_SUBSTITUTE
return multivaluedict
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
try:
# If value is lazy or a complex object of another kind, this check
# might raise an exception. isinstance checks that lazy HttpRequests
# or MultiValueDicts will have a return value.
is_request = isinstance(value, HttpRequest)
except Exception as e:
return '{!r} while evaluating {!r}'.format(e, value)
if is_request:
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
elif isinstance(value, MultiValueDict):
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = getattr(self.exc_value, 'template_debug', None)
self.template_does_not_exist = False
self.loader_debug_info = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, six.string_types):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def format_path_status(self, path):
if not os.path.exists(path):
return "File does not exist"
return "File exists"
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
try:
default_template_engine = Engine.get_default()
except Exception:
# Since the debug view must never crash, catch all exceptions.
# If Django can't find a default template engine, get_default()
# raises ImproperlyConfigured. If some template engines fail to
# load, any exception may be raised.
default_template_engine = None
# TODO: add support for multiple template engines (#24120).
# TemplateDoesNotExist should carry all the information.
# Replaying the search process isn't a good design.
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
if default_template_engine is None:
template_loaders = []
else:
self.template_does_not_exist = True
self.loader_debug_info = []
# If Django fails in get_template_loaders, provide an empty list
# for the following loop to not fail.
try:
template_loaders = default_template_engine.template_loaders
except Exception:
template_loaders = []
for loader in template_loaders:
try:
source_list_func = loader.get_template_sources
# NOTE: This assumes exc_value is the name of the template that
# the loader attempted to load.
template_list = [{
'name': t,
'status': self.format_path_status(t),
} for t in source_list_func(str(self.exc_value))]
except AttributeError:
template_list = []
loader_name = loader.__module__ + '.' + loader.__class__.__name__
self.loader_debug_info.append({
'loader': loader_name,
'templates': template_list,
})
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame_vars = []
for k, v in frame['vars']:
v = pprint(v)
# The force_escape filter assume unicode, make sure that works
if isinstance(v, six.binary_type):
v = v.decode('utf-8', 'replace') # don't choke on non-utf-8 input
# Trim large blobs of data
if len(v) > 4096:
v = '%s... <trimmed %d bytes string>' % (v[0:4096], len(v))
frame_vars.append((k, force_escape(v)))
frame['vars'] = frame_vars
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_text(
unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))],
'ascii', errors='replace'
)
from django import get_version
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'filtered_POST': self.filter.get_post_parameters(self.request),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': timezone.now(),
'django_version_info': get_version(),
'sys_path': sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'loader_debug_info': self.loader_debug_info,
}
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = smart_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"Return HTML version of debug 500 HTTP error page."
t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEMPLATE)
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"Return plain text version of debug 500 HTTP error page."
t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEXT_TEMPLATE)
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.read().splitlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1:upper_bound]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
def explicit_or_implicit_cause(exc_value):
explicit = getattr(exc_value, '__cause__', None)
implicit = getattr(exc_value, '__context__', None)
return explicit or implicit
# Get the exception and all its causes
exceptions = []
exc_value = self.exc_value
while exc_value:
exceptions.append(exc_value)
exc_value = explicit_or_implicit_cause(exc_value)
frames = []
# No exceptions were supplied to ExceptionReporter
if not exceptions:
return frames
# In case there's just one exception (always in Python 2,
# sometimes in Python 3), take the traceback from self.tb (Python 2
# doesn't have a __traceback__ attribute on Exception)
exc_value = exceptions.pop()
tb = self.tb if not exceptions else exc_value.__traceback__
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(
filename, lineno, 7, loader, module_name,
)
if pre_context_lineno is not None:
frames.append({
'exc_cause': explicit_or_implicit_cause(exc_value),
'exc_cause_explicit': getattr(exc_value, '__cause__', True),
'tb': tb,
'type': 'django' if module_name.startswith('django.') else 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
# If the traceback for current exception is consumed, try the
# other exception.
if not tb.tb_next and exceptions:
exc_value = exceptions.pop()
tb = exc_value.__traceback__
else:
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
error_url = exception.args[0]['path']
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried # empty URLconf
or (request.path == '/'
and len(tried) == 1 # default URLconf
and len(tried[0]) == 1
and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
caller = ''
try:
resolver_match = resolve(request.path)
except Resolver404:
pass
else:
obj = resolver_match.func
if hasattr(obj, '__name__'):
caller = obj.__name__
elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
caller = obj.__class__.__name__
if hasattr(obj, '__module__'):
module = obj.__module__
caller = '%s.%s' % (module, caller)
t = DEBUG_ENGINE.from_string(TECHNICAL_404_TEMPLATE)
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': error_url,
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
'raising_view_name': caller,
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
"Create an empty URLconf 404 error response."
t = DEBUG_ENGINE.from_string(DEFAULT_URLCONF_TEMPLATE)
c = Context({
"title": _("Welcome to Django"),
"heading": _("It worked!"),
"subheading": _("Congratulations on your first Django-powered page."),
"instructions": _("Of course, you haven't actually done any work yet. "
"Next, start your first app by running <code>python manage.py startapp [app_label]</code>."),
"explanation": _("You're seeing this message because you have <code>DEBUG = True</code> in your "
"Django settings file and you haven't configured any URLs. Get to work!"),
})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = ("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
"""{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 0 20px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block': 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.innerHTML = link.innerHTML == s1 ? s2: s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
"""{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">"""
"""{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}"""
"""</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if loader_debug_info %}
<p>Django tried loading these templates, in this order:</p>
<ul>
{% for loader in loader_debug_info %}
<li>Using loader <code>{{ loader.loader }}</code>:
<ul>
{% for t in loader.templates %}<li><code>{{ t.name }}</code> ({{ t.status }})</li>{% endfor %}
</ul>
</li>
{% endfor %}
</ul>
{% else %}
<p>Django couldn't find any templates because your <code>'loaders'</code> option is empty!</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}
{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>
{{ template_info.before }}
<span class="specific">{{ template_info.during }}</span>
{{ template_info.after }}
</td>
</tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">
Switch to copy-and-paste view</a></span>{% endif %}
</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
{% ifchanged frame.exc_cause %}{% if frame.exc_cause %}
<li><h3>
{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}
</h3></li>
{% endif %}{% endifchanged %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">
{% for line in frame.pre_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line">
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>
{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">
{% for line in frame.post_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title"
value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template Loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} ({{ t.status }})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your 'loaders' option is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}
Traceback:
{% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}
{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public Web site">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in filtered_POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.FILES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard page generated by the handler for this status code.
</p>
</div>
{% endif %}
</body>
</html>
""")
TECHNICAL_500_TEXT_TEMPLATE = """{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception message supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} ({{ t.status }})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your 'loaders' option is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}{% if frames %}
Traceback:
{% for frame in frames %}
{% ifchanged frame.exc_cause %}
{% if frame.exc_cause %}
{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}
{% endif %}
{% endifchanged %}
File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
GET:{% for k, v in request.GET.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request.FILES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request.COOKIES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard page generated by the handler for this status code.
"""
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% if raising_view_name %}
<tr>
<th>Raised by:</th>
<td>{{ raising_view_name }}</td>
</tr>
{% endif %}
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
DEFAULT_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>{{ title }}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ heading }}</h1>
<h2>{{ subheading }}</h2>
</div>
<div id="instructions">
<p>
{{ instructions|safe }}
</p>
</div>
<div id="explanation">
<p>
{{ explanation|safe }}
</p>
</div>
</body></html>
"""
|
the-stack_0_15835 | EPSILON = 1e-5
DICT_ALIASES_CORE = {
'node': 'NODE',
'displacement': 'DISPLACEMENT',
'disp': 'DISPLACEMENT',
'nodal_stress': 'NodalSTRESS',
'nodal_strain': 'NodalSTRAIN',
'nodal_mises': 'NodalMISES',
't_init': 'INITIAL_TEMPERATURE',
't_cnt': 'CNT_TEMPERATURE',
'reac': 'REACTION_FORCE',
'elemental_stress': 'ElementalSTRESS',
'elemental_strain': 'ElementalSTRAIN',
'elemental_mises': 'ElementalMISES',
'modulus': 'Young_modulus',
'poisson_ratio': 'Poisson_ratio',
'density': 'density',
'lte': 'linear_thermal_expansion_coefficient',
'lte_full': 'linear_thermal_expansion_coefficient_full',
'specific_heat': 'specific_heat',
'thermal_conductivity': 'thermal_conductivity',
'orient': 'ORIENTATION',
'boundary': 'boundary',
'cload': 'cload',
'fixtemp': 'fixtemp',
'istrain1': 'GaussSTRAIN1',
'istrain2': 'GaussSTRAIN2',
'istrain3': 'GaussSTRAIN3',
'istrain4': 'GaussSTRAIN4',
'istrain5': 'GaussSTRAIN5',
'istrain6': 'GaussSTRAIN6',
'istrain7': 'GaussSTRAIN7',
'istrain8': 'GaussSTRAIN8',
'vf': 'VF',
'pressure_start_shrinkage': 'pressure_start_shrinkage',
'specific_volume_start_shrinkage': 'specific_volume_start_shrinkage',
'average_temperature_start_shrinkage':
'average_temperature_start_shrinkage',
'time_start_shrinkage': 'time_start_shrinkage',
'shrinkage': 'shrinkage',
'gradient_temperature_mold': 'gradient_temperature_mold',
'shrinkage_mold': 'shrinkage_mold',
'pressure': 'pressure',
'specific_volume': 'specific_volume',
'average_temperature': 'average_temperature',
'max_temperature': 'max_temperature',
'thickness_flow_layer': 'thickness_flow_layer',
'viscosity': 'viscosity',
'shear_velocity': 'shear_velocity',
'shear_stress': 'shear_stress',
'flow_velocity': 'flow_velocity',
'fiber_orientation_tensor': 'fiber_orientation_tensor',
'fiber_orientation_vector': 'fiber_orientation_vector',
'fiber_velocity': 'fiber_velocity',
'skin_fiber_orientation_vector': 'skin_fiber_orientation_vector',
'inflow_gate': 'inflow_gate',
'flow_length': 'flow_length',
'flow_length_by_thickness': 'flow_length_by_thickness',
'temperature_difference': 'temperature_difference',
'flow_front_time': 'flow_front_time',
'normal': 'normal',
'area': 'area',
}
DICT_ALIASES = dict(DICT_ALIASES_CORE)
DICT_ALIASES.update({
v: v for v in DICT_ALIASES_CORE.values()})
DICT_INVERSE_ALIASES = {v: k for k, v in DICT_ALIASES_CORE.items()}
DICT_INVERSE_ALIASES.update({
v: v for v in DICT_INVERSE_ALIASES.values()})
LIST_NODAL = [
'node',
'displacement',
'disp',
'nodal_mises',
'nodal_stress',
'nodal_strain',
'reac',
't_cnt',
't_init',
'pressure_start_shrinkage',
'specific_volume_start_shrinkage',
'average_temperature_start_shrinkage',
'time_start_shrinkage',
'shrinkage',
'gradient_temperature_mold',
'shrinkage_mold',
'pressure',
'specific_volume',
'average_temperature',
'max_temperature',
'thickness_flow_layer',
'temperature_difference',
'flow_front_time',
]
LIST_ELEMENTAL = [
'density',
'elemental_mises',
'elemental_strain',
'elemental_stress',
'istrain1',
'istrain2',
'istrain3',
'istrain4',
'istrain5',
'istrain6',
'istrain7',
'istrain8',
'lte',
'lte_full',
'modulus',
'orient',
'poisson_ratio',
'vf',
'viscosity',
'shear_velocity',
'shear_stress',
'flow_velocity',
'fiber_orientation_tensor',
'fiber_orientation_vector',
'fiber_velocity',
'skin_fiber_orientation_vector',
]
LIST_CONSTRAINTS = [
'boundary',
'cload',
'fixtemp',
]
LIST_MATERIALS = [
'modulus',
'poisson_ratio',
'density',
'lte',
'lte_full',
'specific_heat',
'thermal_conductivity',
'linear_thermal_expansion_coefficient',
'linear_thermal_expansion_coefficient_full',
]
LINE_ELEMENT_NAMES = [
'line',
'line2',
]
SHELL_ELEMENT_NAMES = [
'tri',
'tri2',
'quad',
'quad2',
]
SOLID_ELEMENT_NAMES = [
'line',
'line2',
'tet',
'tet2',
'pyr',
'pyr2',
'prism',
'prism2',
'hex',
'hex2',
]
DICT_FEMIO_ELEMENT_TO_MESHIO_ELEMENT = {
'pt': 'vertex',
'line': 'line',
'line2': 'line3',
'tri': 'triangle',
'tri2': 'triangle6',
'quad': 'quad',
'quad2': 'quad8',
'tet': 'tetra',
'tet2': 'tetra10',
'pyr': 'pyramid',
'pyr2': 'pyramid13',
'prism': 'wedge',
'prism2': 'wedge15',
'hex': 'hexahedron',
'hex2': 'hexahedron27',
'hexprism': 'hexa_prism',
}
DICT_MESHIO_ELEMENT_TO_FEMIO_ELEMENT = {
v: k for k, v in DICT_FEMIO_ELEMENT_TO_MESHIO_ELEMENT.items()}
DICT_EXT = {
'fistr': '',
'obj': 'obj',
'stl': 'stl',
'ucd': 'inp',
'vtk': 'vtk',
}
|
the-stack_0_15836 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2015, Perceivon Hosting Inc.
# Copyright 2021, Vladimir Botka <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY [COPYRIGHT HOLDER] AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL [COPYRIGHT HOLDER] OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: iocage
short_description: FreeBSD iocage jail handling
description:
- The M(iocage) module allows several iocage commands to be executed through ansible.
- document use-cases here
options:
state:
description:
- I(state) of the desired result.
type: str
choices: [basejail, thickjail, template, present, cloned, started,
stopped, restarted, fetched, exec, pkg, exists, absent,
set, facts]
default: facts
name:
description:
- I(name) of the jail (former uuid).
type: str
pkglist:
description:
- Path to a JSON file containing packages to install. Only applicable when creating a jail.
type: path
properties:
description:
- I(properties) of the jail.
type: dict
args:
description:
- Additional arguments.
type: dict
user:
description:
- I(user) who runs the command I(cmd).
type: str
default: root
cmd:
description:
- Execute the command I(cmd) inside the specified jail I(name).
type: str
clone_from:
description:
- Clone the jail I(clone_from) to I(name). Use I(properties) to configure the clone.
type: str
release:
description:
- Specify which RELEASE to fetch, update, or create a jail.
type: str
update:
description:
- Update the fetch to the latest patch level.
type: bool
default: False
components:
description:
- Uses a local file directory for the root directory instead
of HTTP to downloads and/or updates releases.
type: list
elements: path
aliases: [files, component]
requirements:
- lang/python >= 3.6
- sysutils/iocage
notes:
- Supports C(check_mode).
- The module always creates facts B(iocage_releases), B(iocage_templates), and B(iocage_jails)
- There is no mandatory option.
- Returns B(module_args) when debugging is set B(ANSIBLE_DEBUG=true)
seealso:
- name: iocage - A FreeBSD Jail Manager
description: iocage 1.2 documentation
link: https://iocage.readthedocs.io/en/latest/
- name: iocage -- jail manager using ZFS and VNET
description: FreeBSD System Manager's Manual
link: https://www.freebsd.org/cgi/man.cgi?query=iocage
author:
- Johannes Meixner (@xmj)
- dgeo (@dgeo)
- Berend de Boer (@berenddeboer)
- Dr Josef Karthauser (@Infiniverse)
- Kevin P. Fleming (@kpfleming)
- Ross Williams (@overhacked)
- david8001 (@david8001)
- luto (@luto)
- Keve Müller (@kevemueller)
- Mårten Lindblad (@martenlindblad)
- Vladimir Botka (@vbotka)
'''
EXAMPLES = r'''
- name: Create all iocage_* ansible_facts
iocage:
- name: Display lists of bases, names of templates, and names of jails
debug:
msg: |-
{{ iocage_releases }}
{{ iocage_templates.keys()|list }}
{{ iocage_jails.keys()|list }}
- name: Create jail without cloning
iocage:
name: foo
state: present
pkglist: /path/to/pkglist.json
properties:
ip4_addr: 'lo1|10.1.0.5'
boot: true
allow_sysvipc: true
defaultrouter: '10.1.0.1'
- name: Create template
iocage:
name: tplfoo
state: template
pkglist: /path/to/pkglist.json
properties:
ip4_addr: 'lo1|10.1.0.5'
boot: true
allow_sysvipc: true
defaultrouter: '10.1.0.1'
- name: Create a cloned jail. Creates basejail if needed.
iocage:
name: foo
state: present
clone_from: tplfoo
pkglist: /path/to/pkglist.json
properties:
ip4_addr: 'lo1|10.1.0.5'
boot: true
allow_sysvipc: true
defaultrouter: '10.1.0.1'
- name: Start existing jail
iocage:
name: foo
state: started
- name: Stop existing jail
iocage:
name: foo
state: stopped
- name: Restart existing jail
iocage:
name: foo
state: restarted
- name: Execute command in running jail
iocage:
name: foo
state: exec
cmd: service sshd start
- name: Destroy jail
iocage:
name: foo
state: absent
'''
RETURN = r'''
ansible_facts:
description: Facts to add to ansible_facts.
returned: always
type: dict
contains:
iocage_releases:
description: List of all bases.
returned: always
type: list
elements: str
sample: ['13.0-RELEASE']
iocage_templates:
description: Dictionary of all templates.
returned: always
type: dict
sample: {}
iocage_jails:
description: Dictionary of all jails.
returned: always
type: dict
sample: {}
module_args:
description: Information on how the module was invoked.
returned: debug
type: dict
'''
import json
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes
def _command_fail(module, label, cmd, rc, stdout, stderr):
module.fail_json(msg=f"{label}\ncmd: '{cmd}' return: {rc}\nstdout: '{stdout}'\nstderr: '{stderr}'")
def _get_iocage_facts(module, iocage_path, argument="all", name=None):
opt = dict(jails="list -hl",
templates="list -hlt",
releases="list -hr",
init="list -h")
if argument == "all":
# _init = _get_iocage_facts(module, iocage_path, "init")
_jails = _get_iocage_facts(module, iocage_path, "jails")
_templates = _get_iocage_facts(module, iocage_path, "templates")
_releases = _get_iocage_facts(module, iocage_path, "releases")
return dict(iocage_jails=_jails,
iocage_templates=_templates,
iocage_releases=_releases)
elif argument in opt:
cmd = f"{iocage_path} {opt[argument]}"
else:
module.fail_json(msg=f"_get_iocage_facts({argument}): argument not understood")
rc, state, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'),
errors='surrogate_or_strict')
if rc != 0 and argument != "init":
_command_fail(module, "_get_iocage_facts()", cmd, rc, state, err)
elif argument == "init":
return {}
if argument == 'releases':
_releases = []
for line in state.split('\n'):
if re.match(r'\s*\d', line):
_releases.append(line.strip())
return _releases
_jails = {}
try:
for line in state.split('\n'):
if line == "":
continue
_jid = line.split('\t')[0]
if _jid == '---':
# non-iocage jails: skip all
break
elif re.match(r'(\d+|-)', _jid):
_fragments = line.split('\t')
if len(_fragments) == 10:
(_jid, _name, _boot, _state, _type, _release, _ip4, _ip6, _template, _basejail) = _fragments
else:
(_jid, _name, _boot, _state, _type, _release, _ip4, _ip6, _template) = _fragments
if _name != "":
_properties = _jail_get_properties(module, iocage_path, _name)
_jails[_name] = {"jid": _jid, "name": _name, "state": _state, "properties": _properties}
else:
module.fail_json(msg=f"_get_iocage_facts():\nUnreadable stdout line from cmd '{cmd}': '{line}'")
except ValueError:
module.fail_json(msg=f"unable to parse {state}")
if name is not None:
if name in _jails:
return _jails[name]
else:
return {}
return _jails
def _jail_started(module, iocage_path, name):
cmd = f"{iocage_path} list -h"
rc, state, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'),
errors='surrogate_or_strict')
if rc != 0:
_command_fail(module, f"jail_started({name})", cmd, rc, state, err)
st = None
for line in state.split('\n'):
u = line.split('\t')[1]
if u == name:
s = line.split('\t')[2]
if s == 'up':
st = True
break
elif s == 'down':
st = False
break
else:
module.fail_json(msg=f"Jail {name} unknown state: {line}")
return st
def jail_exists(module, iocage_path, argument=None, assume_absent=False):
cmd = f"{iocage_path} get host_hostuuid {argument}"
rc, name, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'),
errors='surrogate_or_strict')
if not rc == 0:
name = ""
# local variable '_msg' is assigned to but never used [F841]
# _msg = ""
if name != "" and assume_absent:
module.fail_json(msg=f"Jail {argument} exists.")
return name.strip()
def jail_start(module, iocage_path, name):
cmd = f"{iocage_path} start {name}"
rc = 1
out = ""
_msg = ""
_changed = True
if not module.check_mode:
rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'),
errors='surrogate_or_strict')
if not rc == 0:
_command_fail(module, f"Jail {name} could not be started.", cmd, rc, out, err)
_msg = f"Jail {name} was started.\n{out}"
else:
_msg = f"Jail {name} would have been started."
return _changed, _msg
def _props_to_str(props):
argstr = ""
# local variable 'minargs' is assigned to but never used [F841]
# minargs = ""
for _prop in props:
_val = props[_prop]
if _val == '-' or _val == '' or not _val:
continue
if _val in ['yes', 'on', True]:
argstr += f"{_prop}=1 "
elif _val in ['no', 'off', False]:
argstr += f"{_prop}=0 "
elif _val in ['-', 'none']:
argstr += f"{_prop}={_val} "
else:
argstr += f"{_prop}={str(_val)} "
return argstr
def release_fetch(module, iocage_path, update=False, release="NO-RELEASE", components=None, args=""):
if not module.check_mode:
if update:
args += " -U"
if components is not None:
for _component in components:
if _component != "":
args += f" -F {_component}"
cmd = f"{iocage_path} fetch -r {release} {args}"
rc = 1
rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'),
errors='surrogate_or_strict')
if not rc == 0:
_command_fail(module, f"Release {release} could not be fetched.", cmd, rc, out, err)
_changed = True
if update:
_msg = f"Release {release} was successfully updated."
else:
_msg = f"Release {release} was successfully fetched."
else:
_changed = True
_msg = f"Release {release} would have been fetched."
return release, _changed, _msg
def jail_restart(module, iocage_path, name):
cmd = f"{iocage_path} restart {name}"
rc = 1
out = ""
_msg = ""
_changed = True
if not module.check_mode:
rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'),
errors='surrogate_or_strict')
if not rc == 0:
_command_fail(module, f"Jail {name} could not be restarted.", cmd, rc, out, err)
_msg = f"Jail {name} was restarted.\n{rc}"
else:
_msg = f"Jail {name} would have been restarted."
return _changed, _msg
def jail_stop(module, iocage_path, name):
cmd = f"{iocage_path} stop {name}"
_changed = False
rc = 1
out = ""
_msg = ""
if not module.check_mode:
rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'),
errors='surrogate_or_strict')
if not rc == 0:
_command_fail(module, f"Jail {name} could not be stopped.", cmd, rc, out, err)
_msg = f"Jail {name} was stopped.\n"
else:
_msg = f"Jail {name} would have been stopped"
return _changed, _msg
def jail_exec(module, iocage_path, name, user="root", _cmd='/usr/bin/true'):
rc = 1
out = ""
err = ""
_msg = ""
_changed = True
if not module.check_mode:
cmd = f"{iocage_path} exec -u {user} {name} -- {_cmd}"
rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'),
errors='surrogate_or_strict')
if not rc == 0:
_command_fail(module,
f"Command '{_cmd}' could not be executed in jail '{name}'.",
cmd, rc, out, err)
_msg = (f"Command '{cmd}' was executed in jail '{name}'.\nrc: {rc}\nstdout:\n{out}\nstderr:\n{err}")
else:
_msg = f"Command '{_cmd}' would have been executed in jail '{name}'."
return _changed, _msg, out, err
def jail_pkg(module, iocage_path, name, _cmd='info'):
rc = 1
out = ""
err = ""
_msg = ""
_changed = True
if not module.check_mode:
cmd = f"{iocage_path} pkg {name} {_cmd}"
rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'),
errors='surrogate_or_strict')
if not rc == 0:
_command_fail(module,
f"pkg '{_cmd}' could not be executed in jail '{name}'.",
cmd, rc, out, err)
_msg = (f"pkg '{_cmd}' was executed in jail '{name}'.\nstdout:\n{out}\nstderr:\n{err}")
else:
_msg = f"pkg '{_cmd}' would have been executed in jail '{name}'."
return _changed, _msg, out, err
def _jail_get_properties(module, iocage_path, name):
rc = 1
out = ""
if name is not None and name != "":
properties = {}
cmd = f"{iocage_path} get all {name}"
rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'),
errors='surrogate_or_strict')
if rc == 0:
_properties = [line.strip() for line in out.strip().split('\n')]
for p in _properties:
for _property in [p.split(':', 1)]:
if len(_property) == 2:
properties[_property[0]] = _property[1]
else:
module.fail_json(msg=f"error parsing property {p} from {str(properties)}")
else:
_command_fail(module, f"_jail_get_properties({name})", cmd, rc, out, err)
elif module.check_mode and name == "CHECK_MODE_FAKE_UUID":
properties = {"CHECK_NEW_JAIL": True}
else:
module.fail_json(msg=f"jail {name} not found.")
return properties
def jail_set(module, iocage_path, name, properties=None):
if properties is None:
properties = {}
rc = 1
out = ""
_msg = ""
_changed = False
cmd = ""
_existing_props = _jail_get_properties(module, iocage_path, name)
_props_to_be_changed = {}
for _property in properties:
if _property not in _existing_props:
continue
if _existing_props[_property] == '-' and not properties[_property]:
continue
if _property == "template":
continue
propval = None
_val = properties[_property]
_oval = _existing_props[_property]
if _val in [0, 'no', 'off', False]:
propval = 0
elif _val in [1, 'yes', 'on', True]:
propval = 1
elif isinstance(_oval, str):
if _val == '':
propval = 'none'
else:
propval = f'{_val}'
else:
module.fail_json(msg="Unable to set attribute {0} to {1} for jail {2}"
.format(_property, str(_val).replace("'", "'\\''"), name))
if 'CHECK_NEW_JAIL' in _existing_props or \
(_property in _existing_props.keys() and str(_existing_props[_property]) != str(propval)) and \
propval is not None:
_props_to_be_changed[_property] = propval
if len(_props_to_be_changed) > 0:
need_restart = False
for p in _props_to_be_changed.keys():
if p in ['ip4_addr', 'ip6_addr', 'template', 'interfaces', 'vnet', 'host_hostname']:
need_restart = _jail_started(module, iocage_path, name)
cmd = f"{iocage_path} set {_props_to_str(_props_to_be_changed)} {name}"
if not module.check_mode:
if need_restart:
jail_stop(module, iocage_path, name)
rc, out, err = module.run_command(cmd)
if need_restart:
jail_start(module, iocage_path, name)
if not rc == 0 or (rc == 1 and "is already a jail!" in err):
_command_fail(module, f"Attributes could not be set on jail '{name}'.", cmd, rc, out, err)
_msg = f"properties {str(_props_to_be_changed.keys())} were set on jail '{name}' with cmd={cmd}."
else:
_msg = f"properties {str(_props_to_be_changed.keys())} would have been changed for jail {name} with command {cmd}"
_msg += str(_props_to_be_changed)
_changed = True
else:
_changed = False
_msg = f"properties {properties.keys()} already set for jail {name}"
return _changed, _msg
def jail_create(module, iocage_path, name=None, properties=None, clone_from_name=None,
clone_from_template=None, release=None, basejail=False, thickjail=False, pkglist=None):
if properties is None:
properties = {}
rc = 1
out = ""
_msg = ""
if clone_from_name is None and clone_from_template is None:
if basejail:
cmd = f"{iocage_path} create -b -n {name} -r {release}"
elif thickjail:
cmd = f"{iocage_path} create -T -n {name} -r {release} {_props_to_str(properties)}"
else:
cmd = f"{iocage_path} create -n {name} -r {release} {_props_to_str(properties)}"
if pkglist:
cmd += " --pkglist=" + pkglist
elif clone_from_name:
cmd = f"{iocage_path} clone {clone_from_name} -n {name} {_props_to_str(properties)}"
elif clone_from_template:
cmd = f"{iocage_path} create -t {clone_from_template} -n {name} {_props_to_str(properties)}"
if not module.check_mode:
rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'),
errors='surrogate_or_strict')
if not rc == 0:
_command_fail(module, f"Jail '{name}' could not be created.", cmd, rc, out, err)
_msg += f"Jail '{name}' was created with properties {str(properties)}.\n\n{cmd}"
name = jail_exists(module, iocage_path, name)
if not name:
module.fail_json(msg=f"Jail '{name}' not created ???\ncmd: {cmd}\nstdout:\n{out}\nstderr:\n{err}")
else:
_msg += f"Jail {name} would be created with command:\n{cmd}\n"
name = f"CHECK_MODE_FAKE_UUID_FOR_{name}"
return name, True, _msg
def jail_update(module, iocage_path, name):
rc = 1
out = ""
_msg = ""
_changed = False
cmd = f"{iocage_path} update {name}"
if not module.check_mode:
rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'),
errors='surrogate_or_strict')
if not rc == 0:
_command_fail(module, f"Jail '{name}' not updated.", cmd, rc, out, err)
if "No updates needed" in out:
_changed = False
elif "updating to" in out:
nv = re.search(r' ([^ ]*):$', filter((lambda x: 'updating to' in x), out.split('\n'))[0]).group(1)
_msg = f"jail {name} updated to {nv}"
_changed = True
else:
_msg = "Unable to check for updates in check_mode"
return _changed, _msg
def jail_destroy(module, iocage_path, name):
rc = 1
out = ""
_msg = ""
_changed = True
if not module.check_mode:
cmd = f"{iocage_path} destroy -f {name}"
rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'),
errors='surrogate_or_strict')
if not rc == 0:
_command_fail(module, f"Jail '{name}' could not be destroyed.", cmd, rc, out, err)
_msg = f"Jail '{name}' was destroyed."
jail_exists(module, iocage_path, name, True)
else:
_msg = f"Jail {name} would have been destroyed."
return name, _changed, _msg
def run_module():
module_args = dict(
state=dict(type='str',
default="facts",
choices=["basejail", "thickjail", "template", "present", "cloned", "started",
"stopped", "restarted", "fetched", "exec", "pkg", "exists", "absent",
"set", "facts"],),
name=dict(type='str'),
pkglist=dict(type='path'),
properties=dict(type='dict'),
args=dict(type='dict'),
user=dict(type='str', default="root"),
cmd=dict(type='str'),
clone_from=dict(type='str'),
release=dict(type='str'),
update=dict(type='bool', default=False,),
components=dict(type='list', elements='path', aliases=["files", "component"],),)
module = AnsibleModule(argument_spec=module_args,
supports_check_mode=True)
iocage_path = module.get_bin_path('iocage', True)
if not iocage_path:
module.fail_json(msg='Utility iocage not found!')
p = module.params
name = p["name"]
properties = p["properties"]
cmd = p["cmd"]
args = p["args"]
clone_from = p["clone_from"]
user = p["user"]
release = p["release"]
update = p["update"]
components = p["components"]
pkglist = p["pkglist"]
msgs = []
changed = False
out = ""
err = ""
facts = _get_iocage_facts(module, iocage_path, "all")
jails = {}
for u in facts["iocage_jails"]:
jails[u] = facts["iocage_jails"][u]
for u in facts["iocage_templates"]:
jails[u] = facts["iocage_templates"][u]
if p["state"] == "facts":
result = dict(changed=changed,
msg=", ".join(msgs),
ansible_facts=facts,
stdout=out,
stderr=err,
)
if module._debug:
result['module_args'] = f"{(json.dumps(module.params, indent=4))}"
module.exit_json(**result)
# Input validation
# states that need name of jail
if name is None and p["state"] in ["started", "stopped", "restarted", "exists", "set", "exec", "pkg", "absent"]:
module.fail_json(msg=f"name needed for state {p['state']}")
# states that need release defined
if p["state"] in ["basejail", "thickjail", "template", "fetched", "present"] or p["update"]:
if release is None or release == "":
# if name and not (upgrade):
# _jail_props = _jail_get_properties(module, iocage_path, name)
# release = _jail_props["release"]
# else:
rc, out, err = module.run_command("uname -r")
if rc != 0:
module.fail_json(msg="Unable to run uname -r ???")
matches = re.match(r'(\d+\.\d+)\-(RELEASE|RC\d+).*', out.strip())
if matches is not None:
release = matches.group(1) + "-RELEASE"
else:
module.fail_json(msg=f"Release not recognised: {out}")
# need existing jail
if p["state"] in ["started", "stopped", "restarted", "set", "exec", "pkg", "exists"]:
if name not in jails:
module.fail_json(msg=f"Jail '{name}' doesn't exist")
# states that need running jail
if p["state"] in ["exec", "pkg"] and jails[name]["state"] != "up":
module.fail_json(msg=f"Jail '{name}' not running")
if p["state"] == "started":
if jails[name]["state"] != "up":
changed, _msg = jail_start(module, iocage_path, name)
msgs.append(_msg)
jails[name] = _get_iocage_facts(module, iocage_path, "jails", name)
if jails[name]["state"] != "up" and not module.check_mode:
module.fail_json(msg=f"Starting jail {name} failed with {_msg}")
else:
msgs.append(f"Jail {name} already started")
elif p["state"] == "stopped":
if jails[name]["state"] == "up":
changed, _msg = jail_stop(module, iocage_path, name)
msgs.append(_msg)
if not module.check_mode:
jails[name] = _get_iocage_facts(module, iocage_path, "jails", name)
if jails[name]["state"] != "down":
module.fail_json(msg=f"Stopping jail {name} failed with {_msg}")
else:
msgs.append(f"Jail {name} already stopped")
elif p["state"] == "restarted":
changed, _msg = jail_restart(module, iocage_path, name)
jails[name] = _get_iocage_facts(module, iocage_path, "jails", name)
if jails[name]["state"] != "up":
module.fail_json(msg=f"Restarting jail {name} failed with {_msg}")
msgs.append(_msg)
elif p["state"] == "exec":
changed, _msg, out, err = jail_exec(module, iocage_path, name, user, cmd)
msgs.append(_msg)
elif p["state"] == "pkg":
changed, _msg, out, err = jail_pkg(module, iocage_path, name, cmd)
msgs.append(_msg)
elif p["state"] == "exists":
msgs.append(f"Jail {name} exists")
elif p["state"] == "fetched":
if update or release not in facts["iocage_releases"]:
rel, changed, _msg = release_fetch(module, iocage_path, update, release, components, args)
msgs.append(_msg)
facts["iocage_releases"] = _get_iocage_facts(module, iocage_path, "releases")
if release not in facts["iocage_releases"] or update:
module.fail_json(msg=f"Fetching release {release} failed with {_msg}")
else:
msgs.append(f"Release {release} already fetched")
elif p["state"] == "set":
changed, _msg = jail_set(module, iocage_path, name, properties)
msgs.append(_msg)
jails[name] = _get_iocage_facts(module, iocage_path, "jails", name)
elif p["state"] in ["present", "cloned", "template", "basejail", "thickjail"]:
do_basejail = False
do_thickjail = False
clone_from_name = None
clone_from_template = None
# local variable 'jail_exists' is assigned to but never used [F841]
# jail_exists = False
if p["state"] != "cloned" and release not in facts["iocage_releases"]:
release, _release_changed, _release_msg = release_fetch(module, iocage_path, update, release, components, args)
if _release_changed:
facts["iocage_releases"] = _get_iocage_facts(module, iocage_path, "releases")
msgs.append(_release_msg)
if p["state"] == "template":
if properties is None:
properties = {}
properties["template"] = "true"
properties["boot"] = "false"
if name in facts["iocage_templates"]:
# local variable 'jail_exists' is assigned to but never used [F841]
# jail_exists = True
pass
elif p["state"] == "basejail":
properties = {}
do_basejail = True
elif p["state"] == "thickjail":
do_thickjail = True
elif clone_from:
if clone_from in facts["iocage_jails"]:
clone_from_name = clone_from
elif clone_from in facts["iocage_templates"]:
clone_from_template = clone_from
else:
if module.check_mode:
# todo: use facts to check if basejail would have been created before
msgs.append(f"Jail {name} would have been cloned from (nonexisting) jail or template {clone_from}")
else:
module.fail_json(msg=f"unable to create jail {name}\nbasejail {clone_from} doesn't exist")
if name not in facts["iocage_templates"] and name not in facts["iocage_jails"]:
name, changed, _msg = jail_create(module, iocage_path, name, properties, clone_from_name,
clone_from_template, release, do_basejail, do_thickjail,
pkglist)
msgs.append(_msg)
else:
changed, _msg = jail_set(module, iocage_path, name, properties)
msgs.append("%s already exists" % (name))
if changed:
msgs.append(_msg)
if p["update"]:
if release not in facts["iocage_releases"]:
release, _release_changed, _release_msg = release_fetch(module, iocage_path, update, release, components, args)
if _release_changed:
_msg += _release_msg
facts["iocage_releases"] = _get_iocage_facts(module, iocage_path, "releases")
release, changed, _msg = jail_update(module, iocage_path, name, release)
msgs.append(_msg)
# # re-set properties (iocage missing them on creation - iocage-sh bug)
# if len(p["properties"]) > 0:
# changed, _msg = jail_set(module, iocage_path, name, properties)
# if changed:
# msgs.append(_msg)
if changed:
if p["state"] == "template":
facts["iocage_templates"][name] = _get_iocage_facts(module, iocage_path, "templates", name)
else:
facts["iocage_jails"][name] = _get_iocage_facts(module, iocage_path, "jails", name)
elif p["state"] == "absent":
if name in jails:
if jails[name]['state'] == "up":
changed, _msg = jail_stop(module, iocage_path, name)
msgs.append(_msg)
name, changed, _msg = jail_destroy(module, iocage_path, name)
msgs.append(_msg)
del(jails[name])
else:
_msg = f"Jail {name} is already absent."
msgs.append(_msg)
if name in facts["iocage_jails"]:
del(facts["iocage_jails"][name])
_msg = f"Jail {name} removed from iocage_jails."
msgs.append(_msg)
if name in facts["iocage_templates"]:
del(facts["iocage_templates"][name])
_msg = f"Jail {name} removed from iocage_templates."
msgs.append(_msg)
result = dict(changed=changed,
msg=", ".join(msgs),
ansible_facts=facts,
stdout=out,
stderr=err,
)
if module._debug:
result['module_args'] = f"{(json.dumps(module.params, indent=4))}"
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
|
the-stack_0_15837 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .tracked_resource import TrackedResource
class ApplicationResourceDescription(TrackedResource):
"""This type describes an application resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified identifier for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Ex-
Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
:vartype type: str
:param location: The geo-location where the resource lives
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:ivar provisioning_state: State of the resource.
:vartype provisioning_state: str
:param description: User readable description of the application.
:type description: str
:param debug_params: Internal use.
:type debug_params: str
:param services: describes the services in the application.
:type services:
list[~azure.mgmt.servicefabricmesh.models.ServiceResourceDescription]
:ivar health_state: Describes the health state of an application resource.
Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown'
:vartype health_state: str or
~azure.mgmt.servicefabricmesh.models.HealthState
:ivar unhealthy_evaluation: When the application's health state is not
'Ok', this additional details from service fabric Health Manager for the
user to know why the application is marked unhealthy.
:vartype unhealthy_evaluation: str
:ivar status: Status of the application resource. Possible values include:
'Invalid', 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed'
:vartype status: str or
~azure.mgmt.servicefabricmesh.models.ApplicationResourceStatus
:ivar status_details: Gives additional information about the current
status of the application deployment.
:vartype status_details: str
:ivar service_names: Names of the services in the application.
:vartype service_names: list[str]
:param diagnostics: Describes the diagnostics definition and usage for an
application resource.
:type diagnostics:
~azure.mgmt.servicefabricmesh.models.DiagnosticsDescription
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'health_state': {'readonly': True},
'unhealthy_evaluation': {'readonly': True},
'status': {'readonly': True},
'status_details': {'readonly': True},
'service_names': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'debug_params': {'key': 'properties.debugParams', 'type': 'str'},
'services': {'key': 'properties.services', 'type': '[ServiceResourceDescription]'},
'health_state': {'key': 'properties.healthState', 'type': 'str'},
'unhealthy_evaluation': {'key': 'properties.unhealthyEvaluation', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'status_details': {'key': 'properties.statusDetails', 'type': 'str'},
'service_names': {'key': 'properties.serviceNames', 'type': '[str]'},
'diagnostics': {'key': 'properties.diagnostics', 'type': 'DiagnosticsDescription'},
}
def __init__(self, location=None, tags=None, description=None, debug_params=None, services=None, diagnostics=None):
super(ApplicationResourceDescription, self).__init__(location=location, tags=tags)
self.provisioning_state = None
self.description = description
self.debug_params = debug_params
self.services = services
self.health_state = None
self.unhealthy_evaluation = None
self.status = None
self.status_details = None
self.service_names = None
self.diagnostics = diagnostics
|
the-stack_0_15838 | import numpy as np
import scipy.stats as stats
from sira.modelling.structural import Base
from sira.modelling.structural import Element as _Element
from sira.modelling.structural import Info
class Algorithm:
@staticmethod
def factory(response_params):
function_name = response_params["function_name"]
funcname_nocase = str(function_name).casefold()
if funcname_nocase in [
"stepfunc", "step_func", "stepfunction", "step_function"]:
return StepFunc(**response_params)
elif funcname_nocase in [
"lognormal", "lognormalcdf", "lognormal_cdf"]:
return LogNormalCDF(**response_params)
elif funcname_nocase in [
"normal", "normalcdf", "normal_cdf"]:
return NormalCDF(**response_params)
elif funcname_nocase in [
"rayleigh", "rayleighcdf", "rayleigh_cdf"]:
return RayleighCDF(**response_params)
elif funcname_nocase in [
"ConstantFunction".lower(), "constant_function"]:
return ConstantFunction(**response_params)
elif funcname_nocase in [
"Level0Response".lower(), "Level0Recovery".lower()]:
return Level0Response(**response_params)
elif funcname_nocase in [
"PiecewiseFunction".lower(), "piecewise_function"]:
return PiecewiseFunction(**response_params)
raise ValueError("No response model matches {}".format(function_name))
class Level0Response(Base):
"""
Standard response for no damage.
"""
mode = 1
damage_ratio = 0.0
functionality = 1.0
beta = 0.0
median = 1.0
lower_limit = _Element(
'float',
'lower limit of function if part of piecewise function',
None, [lambda x: float(x) > 0.])
upper_limit = _Element(
'float',
'upper limit of function if part of piecewise function',
None, [lambda x: float(x) > 0.])
def __call__(self, hazard_level):
return 0.0
class RayleighCDF(Base):
"""
The Rayliegh CDF response model for components.
"""
scale = _Element(
'float',
'Scale parameter for Rayleigh CDF.',
_Element.NO_DEFAULT, validators=[lambda x: float(x) > 0.])
loc = _Element(
'float',
'Location parameter for Rayleigh CDF.',
default=0, validators=[lambda x: float(x) >= 0.])
def __call__(self, x):
"""
SciPy implementation of Rayleigh CDF:
loc = shift parameter
scale = scaling parameter
"""
return stats.rayleigh.cdf(x, loc=self.loc, scale=self.scale)
class LogNormalCDF(Base):
"""
The lognormal CDF response model for components.
"""
median = _Element('float', 'Median of the log normal CDF.',
_Element.NO_DEFAULT, [lambda x: float(x) > 0.])
beta = _Element('float', 'Log standard deviation of the log normal CDF',
_Element.NO_DEFAULT, [lambda x: float(x) > 0.])
location = _Element('float', 'Location parameter of the log normal CDF',
0.0, [lambda x: float(x) > 0.])
lower_limit = _Element(
'float',
'lower limit of function if part of piecewise function',
None,
[lambda x: float(x) > 0.])
upper_limit = _Element(
'float',
'upper limit of function if part of piecewise function',
None,
[lambda x: float(x) > 0.])
def __call__(self, data_point):
"""
SciPy implementation of LogNormal CDF:
scipy.stats.lognorm.cdf(x, s, loc=0, scale=1)
where,
s = sigma # or beta or standard deviation (shape parameter)
scale = exp(mean) = median
loc is used to shift the distribution and commonly not used
"""
return stats.lognorm.cdf(
data_point, self.beta, loc=self.location, scale=self.median)
class NormalCDF(Base):
"""
The normal CDF response model for components
"""
# -----------------------------------------------
mean = _Element(
'float',
'Mean of the normal or Gaussian CDF',
_Element.NO_DEFAULT,
[lambda x: float(x) >= 0.])
stddev = _Element(
'float',
'Standard deviation of the normal CDF',
_Element.NO_DEFAULT,
[lambda x: float(x) > 0.])
# -----------------------------------------------
lower_limit = _Element(
'float',
'lower limit of function if part of piecewise function',
-np.inf,
[lambda x: float(x) > 0.])
upper_limit = _Element(
'float',
'upper limit of function if part of piecewise function',
np.inf,
[lambda x: float(x) > 0.])
# -----------------------------------------------
def __call__(self, data_point, inverse=False):
"""
SciPy implementation of Normal CDF:
scipy.stats.norm.cdf(x, loc=0, scale=1)
where,
loc = Mean
scale = Standard Deviation i.e. square root of Variance
"""
if not inverse:
return stats.norm.cdf(data_point,
loc=self.mean,
scale=self.stddev)
elif inverse:
return stats.norm.ppf(data_point,
loc=self.mean,
scale=self.stddev)
class ConstantFunction(Base):
"""
A function for defining a constant amplitude for a given range
"""
amplitude = _Element(
'float',
'Constant amplitude of function',
_Element.NO_DEFAULT, [lambda x: float(x) >= 0.])
lower_limit = _Element(
'float',
'lower limit of function if part of piecewise function',
None, [lambda x: float(x) >= 0.])
upper_limit = _Element(
'float',
'upper limit of function if part of piecewise function',
None, [lambda x: float(x) >= 0])
def __call__(self, hazard_intensity):
return self.amplitude
class StepFunc(Base):
"""
A response model that does not have a cumulative distribution
function, rather a series of steps for damage.
"""
xys = _Element(
'XYPairs', 'A list of X, Y pairs.', list,
[lambda xy: [(float(x), float(y)) for x, y in xy]])
lower_limit = _Element(
'float',
'lower limit of function if part of piecewise function',
None,
[lambda x: float(x) > 0.])
upper_limit = _Element(
'float',
'upper limit of function if part of piecewise function',
None,
[lambda x: float(x) > 0.])
def __call__(self, hazard_intensity):
"""
Note that intervals are closed on the right.
"""
for x, y in self.xys: # noqa: E1133
if hazard_intensity < x:
return y
raise ValueError('value is greater than all xs!')
class XYPairs(object):
"""
A list of float values that implement a step function.
"""
description = Info("The (x, f(x)) pairs defining a step function.")
def __init__(self, pairs):
"""
Create the tuple list containing the float values.
:param pairs: An iterable container of tuples containing floats
"""
self.pairs = pairs
def __iter__(self):
"""
Return the XYPairs
:return: iterator over the XYPairs
"""
return iter(self.pairs)
class PiecewiseFunction(object):
"""
This class builds a piecwise function defined by algorithm constructor
data of a specified format. This data is part of the defined
attributes of a system Component.
Each dict in the list contains:
- the parameters required to construct an algorithm, and
- the conditions where that algorithm will be applicable
"""
piecewise_function_constructor = None
def __init__(self, **kwargs):
"""
input: a list of dicts.
Dict name must be 'piecewise_function_constructor'
"""
for k, v in kwargs.items():
setattr(self, k, v)
self.functions = []
self.validranges = []
for param_dict in self.piecewise_function_constructor: # noqa: E1133
lo = self.check_limit(param_dict['lower_limit'], which_lim='lower')
hi = self.check_limit(param_dict['upper_limit'], which_lim='upper')
self.functions.append(Algorithm.factory(param_dict))
self.validranges.append((lo, hi))
def check_limit(self, val, which_lim):
if which_lim == 'lower':
inf, infstr = -np.inf, ['-np.inf', '-inf']
else:
inf, infstr = np.inf, ['np.inf', '+np.inf', 'inf', '+inf']
if (val is None) or str(val) in ['', 'NA', *infstr]:
val = inf
else:
try:
val = float(val)
except ValueError:
print(f"Invalid value passed for {which_lim} limit of function.")
exit(1)
return val
def condfunc(self, x, func_lims):
return (x >= func_lims[0]) & (x < func_lims[1])
def pwfunc(self, x):
x = np.asarray(x)
y = np.zeros(x.shape)
for i, func in enumerate(self.functions):
func_lims = self.validranges[i]
y += self.condfunc(x, func_lims) * func(x) # noqa: W0123
return y
def __call__(self, hazard_intensity):
"""
input: hazard intensity value
output: probability of a response (linked to a damage state)
"""
vectorized_pwf = np.vectorize(self.pwfunc)
return vectorized_pwf(hazard_intensity)
|
the-stack_0_15839 | # Copyright (c) 2014 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The contents of this file are mainly copied from cm_api sources,
# released by Cloudera. Codes not used by Sahara CDH plugin are removed.
# You can find the original codes at
#
# https://github.com/cloudera/cm_api/tree/master/python/src/cm_api
#
# To satisfy the pep8 and python3 tests, we did some changes to the codes.
# We also change some importings to use Sahara inherited classes.
import copy
import datetime
import time
from oslo_serialization import jsonutils as json
from oslo_utils import reflection
import six
from sahara.plugins import context
from sahara_plugin_cdh.i18n import _
from sahara_plugin_cdh.plugins.cdh import exceptions as ex
class Attr(object):
"""Base Attribute
Encapsulates information about an attribute in the JSON encoding of the
object. It identifies properties of the attribute such as whether it's
read-only, its type, etc.
"""
DATE_FMT = "%Y-%m-%dT%H:%M:%S.%fZ"
def __init__(self, atype=None, rw=True, is_api_list=False):
self._atype = atype
self._is_api_list = is_api_list
self.rw = rw
def to_json(self, value, preserve_ro):
"""Returns the JSON encoding of the given attribute value
If the value has a 'to_json_dict' object, that method is called.
Otherwise, the following values are returned for each input type:
- datetime.datetime: string with the API representation of a date.
- dictionary: if 'atype' is ApiConfig, a list of ApiConfig objects.
- python list: python list (or ApiList) with JSON encoding of items
- the raw value otherwise
"""
if hasattr(value, 'to_json_dict'):
return value.to_json_dict(preserve_ro)
elif isinstance(value, dict) and self._atype == ApiConfig:
return config_to_api_list(value)
elif isinstance(value, datetime.datetime):
return value.strftime(self.DATE_FMT)
elif isinstance(value, list) or isinstance(value, tuple):
if self._is_api_list:
return ApiList(value).to_json_dict()
else:
return [self.to_json(x, preserve_ro) for x in value]
else:
return value
def from_json(self, resource_root, data):
"""Parses the given JSON value into an appropriate python object
This means:
- a datetime.datetime if 'atype' is datetime.datetime
- a converted config dictionary or config list if 'atype' is ApiConfig
- if the attr is an API list, an ApiList with instances of 'atype'
- an instance of 'atype' if it has a 'from_json_dict' method
- a python list with decoded versions of the member objects if the
input is a python list.
- the raw value otherwise
"""
if data is None:
return None
if self._atype == datetime.datetime:
return datetime.datetime.strptime(data, self.DATE_FMT)
elif self._atype == ApiConfig:
# ApiConfig is special. We want a python dictionary for summary
# views, but an ApiList for full views. Try to detect each case
# from the JSON data.
if not data['items']:
return {}
first = data['items'][0]
return json_to_config(data, len(first) == 2)
elif self._is_api_list:
return ApiList.from_json_dict(data, resource_root, self._atype)
elif isinstance(data, list):
return [self.from_json(resource_root, x) for x in data]
elif hasattr(self._atype, 'from_json_dict'):
return self._atype.from_json_dict(data, resource_root)
else:
return data
class ROAttr(Attr):
"""Subclass that just defines the attribute as read-only."""
def __init__(self, atype=None, is_api_list=False):
Attr.__init__(self, atype=atype, rw=False, is_api_list=is_api_list)
def check_api_version(resource_root, min_version):
"""Check API version
Checks if the resource_root's API version it at least the given minimum
version.
"""
if resource_root.version < min_version:
raise ex.CMApiVersionError(
_("API version %(minv)s is required but %(acv)s is in use.")
% {'minv': min_version, 'acv': resource_root.version})
def call(method, path, ret_type,
ret_is_list=False, data=None, params=None, api_version=1):
"""Call a resource method
Generic function for calling a resource method and automatically dealing
with serialization of parameters and deserialization of return values.
:param method: method to call (must be bound to a resource;
e.g., "resource_root.get").
:param path: the full path of the API method to call.
:param ret_type: return type of the call.
:param ret_is_list: whether the return type is an ApiList.
:param data: Optional data to send as payload to the call.
:param params: Optional query parameters for the call.
:param api_version: minimum API version for the call.
"""
check_api_version(method.__self__, api_version)
if data is not None:
data = json.dumps(Attr(is_api_list=True).to_json(data, False))
ret = method(path, data=data, params=params)
else:
ret = method(path, params=params)
if ret_type is None:
return
elif ret_is_list:
return ApiList.from_json_dict(ret, method.__self__, ret_type)
elif isinstance(ret, list):
return [ret_type.from_json_dict(x, method.__self__) for x in ret]
else:
return ret_type.from_json_dict(ret, method.__self__)
class BaseApiObject(object):
"""The BaseApiObject helps with (de)serialization from/to JSON
The derived class has two ways of defining custom attributes:
- Overwriting the '_ATTRIBUTES' field with the attribute dictionary
- Override the _get_attributes() method, in case static initialization of
the above field is not possible.
It's recommended that the _get_attributes() implementation do caching to
avoid computing the dictionary on every invocation.
The derived class's constructor must call the base class's init() static
method. All constructor arguments (aside from self and resource_root) must
be keywords arguments with default values (typically None), or
from_json_dict() will not work.
"""
_ATTRIBUTES = {}
_WHITELIST = ('_resource_root', '_attributes')
@classmethod
def _get_attributes(cls):
"""Get an attribute dictionary
Returns a map of property names to attr instances (or None for default
attribute behavior) describing the properties of the object.
By default, this method will return the class's _ATTRIBUTES field.
Classes can override this method to do custom initialization of the
attributes when needed.
"""
return cls._ATTRIBUTES
@staticmethod
def init(obj, resource_root, attrs=None):
"""Wraper of real constructor
Wraper around the real constructor to avoid issues with the 'self'
argument. Call like this, from a subclass's constructor:
- BaseApiObject.init(self, locals())
"""
# This works around http://bugs.python.org/issue2646
# We use unicode strings as keys in kwargs.
str_attrs = {}
if attrs:
for k, v in six.iteritems(attrs):
if k not in ('self', 'resource_root'):
str_attrs[k] = v
BaseApiObject.__init__(obj, resource_root, **str_attrs)
def __init__(self, resource_root, **attrs):
"""Init method
Initializes internal state and sets all known writable properties of
the object to None. Then initializes the properties given in the
provided attributes dictionary.
:param resource_root: API resource object.
:param attrs: optional dictionary of attributes to set. This should
only contain r/w attributes.
"""
self._resource_root = resource_root
for name, attr in six.iteritems(self._get_attributes()):
object.__setattr__(self, name, None)
if attrs:
self._set_attrs(attrs, from_json=False)
def _set_attrs(self, attrs, allow_ro=False, from_json=True):
"""Set attributes from dictionary
Sets all the attributes in the dictionary. Optionally, allows setting
read-only attributes (e.g. when deserializing from JSON) and skipping
JSON deserialization of values.
"""
for k, v in six.iteritems(attrs):
attr = self._check_attr(k, allow_ro)
if attr and from_json:
v = attr.from_json(self._get_resource_root(), v)
object.__setattr__(self, k, v)
def __setattr__(self, name, val):
if name not in BaseApiObject._WHITELIST:
self._check_attr(name, False)
object.__setattr__(self, name, val)
def _check_attr(self, name, allow_ro):
cls_name = reflection.get_class_name(self, fully_qualified=False)
if name not in self._get_attributes():
raise ex.CMApiAttributeError(
_('Invalid property %(attname)s for class %(classname)s.')
% {'attname': name, 'classname': cls_name})
attr = self._get_attributes()[name]
if not allow_ro and attr and not attr.rw:
raise ex.CMApiAttributeError(
_('Attribute %(attname)s of class %(classname)s '
'is read only.')
% {'attname': name, 'classname': cls_name})
return attr
def _get_resource_root(self):
return self._resource_root
def _update(self, api_obj):
"""Copy state from api_obj to this object."""
if not isinstance(self, api_obj.__class__):
raise ex.CMApiValueError(
_("Class %(class1)s does not derive from %(class2)s; "
"cannot update attributes.")
% {'class1': self.__class__, 'class2': api_obj.__class__})
for name in self._get_attributes().keys():
try:
val = getattr(api_obj, name)
setattr(self, name, val)
except AttributeError:
pass
def to_json_dict(self, preserve_ro=False):
dic = {}
for name, attr in six.iteritems(self._get_attributes()):
if not preserve_ro and attr and not attr.rw:
continue
try:
value = getattr(self, name)
if value is not None:
if attr:
dic[name] = attr.to_json(value, preserve_ro)
else:
dic[name] = value
except AttributeError:
pass
return dic
def __str__(self):
"""Give a printable format of an attribute
Default implementation of __str__. Uses the type name and the first
attribute retrieved from the attribute map to create the string.
"""
cls_name = reflection.get_class_name(self, fully_qualified=False)
name = list(self._get_attributes().keys())[0]
value = getattr(self, name, None)
return "<%s>: %s = %s" % (cls_name, name, value)
@classmethod
def from_json_dict(cls, dic, resource_root):
obj = cls(resource_root)
obj._set_attrs(dic, allow_ro=True)
return obj
class BaseApiResource(BaseApiObject):
"""Base ApiResource
A specialization of BaseApiObject that provides some utility methods for
resources. This class allows easier serialization / deserialization of
parameters and return values.
"""
def _api_version(self):
"""Get API version
Returns the minimum API version for this resource. Defaults to 1.
"""
return 1
def _path(self):
"""Get resource path
Returns the path to the resource.
e.g., for a service 'foo' in cluster 'bar', this should return
'/clusters/bar/services/foo'.
"""
raise NotImplementedError
def _require_min_api_version(self, version):
"""Check minimum version requirement
Raise an exception if the version of the api is less than the given
version.
:param version: The minimum required version.
"""
actual_version = self._get_resource_root().version
version = max(version, self._api_version())
if actual_version < version:
raise ex.CMApiVersionError(
_("API version %(minv)s is required but %(acv)s is in use.")
% {'minv': version, 'acv': actual_version})
def _cmd(self, command, data=None, params=None, api_version=1):
"""Invoke a command on the resource
Invokes a command on the resource. Commands are expected to be under
the "commands/" sub-resource.
"""
return self._post("commands/" + command, ApiCommand,
data=data, params=params, api_version=api_version)
def _get_config(self, rel_path, view, api_version=1):
"""Get resource configurations
Retrieves an ApiConfig list from the given relative path.
"""
self._require_min_api_version(api_version)
params = dict(view=view) if view else None
resp = self._get_resource_root().get(self._path() + '/' + rel_path,
params=params)
return json_to_config(resp, view == 'full')
def _update_config(self, rel_path, config, api_version=1):
self._require_min_api_version(api_version)
resp = self._get_resource_root().put(self._path() + '/' + rel_path,
data=config_to_json(config))
return json_to_config(resp, False)
def _delete(self, rel_path, ret_type, ret_is_list=False, params=None,
api_version=1):
return self._call('delete', rel_path, ret_type, ret_is_list, None,
params, api_version)
def _get(self, rel_path, ret_type, ret_is_list=False, params=None,
api_version=1):
return self._call('get', rel_path, ret_type, ret_is_list, None,
params, api_version)
def _post(self, rel_path, ret_type, ret_is_list=False, data=None,
params=None, api_version=1):
return self._call('post', rel_path, ret_type, ret_is_list, data,
params, api_version)
def _put(self, rel_path, ret_type, ret_is_list=False, data=None,
params=None, api_version=1):
return self._call('put', rel_path, ret_type, ret_is_list, data,
params, api_version)
def _call(self, method, rel_path, ret_type, ret_is_list=False, data=None,
params=None, api_version=1):
path = self._path()
if rel_path:
path += '/' + rel_path
return call(getattr(self._get_resource_root(), method),
path,
ret_type,
ret_is_list,
data,
params,
api_version)
class ApiList(BaseApiObject):
"""A list of some api object"""
LIST_KEY = "items"
def __init__(self, objects, resource_root=None, **attrs):
BaseApiObject.__init__(self, resource_root, **attrs)
# Bypass checks in BaseApiObject.__setattr__
object.__setattr__(self, 'objects', objects)
def __str__(self):
return ("<ApiList>(%d): [%s]" % (len(self.objects),
", ".join([str(item) for item in self.objects])))
def to_json_dict(self, preserve_ro=False):
ret = BaseApiObject.to_json_dict(self, preserve_ro)
attr = Attr()
ret[ApiList.LIST_KEY] = [attr.to_json(x, preserve_ro)
for x in self.objects]
return ret
def __len__(self):
return self.objects.__len__()
def __iter__(self):
return self.objects.__iter__()
def __getitem__(self, i):
return self.objects.__getitem__(i)
def __getslice__(self, i, j):
return self.objects.__getslice__(i, j)
@classmethod
def from_json_dict(cls, dic, resource_root, member_cls=None):
if not member_cls:
member_cls = cls._MEMBER_CLASS
attr = Attr(atype=member_cls)
items = []
if ApiList.LIST_KEY in dic:
items = [attr.from_json(resource_root, x)
for x in dic[ApiList.LIST_KEY]]
ret = cls(items)
# If the class declares custom attributes, populate them based on the
# input dict. The check avoids extra overhead for the common case,
# where we just have a plain list. _set_attrs() also does not
# understand the "items" attribute, so it can't be in the input data.
if cls._ATTRIBUTES:
if ApiList.LIST_KEY in dic:
dic = copy.copy(dic)
del dic[ApiList.LIST_KEY]
ret._set_attrs(dic, allow_ro=True)
return ret
class ApiHostRef(BaseApiObject):
_ATTRIBUTES = {
'hostId': None,
}
def __init__(self, resource_root, hostId=None):
BaseApiObject.init(self, resource_root, locals())
def __str__(self):
return "<ApiHostRef>: %s" % (self.hostId)
class ApiServiceRef(BaseApiObject):
_ATTRIBUTES = {
'clusterName': None,
'serviceName': None,
'peerName': None,
}
def __init__(self, resource_root, serviceName=None, clusterName=None,
peerName=None):
BaseApiObject.init(self, resource_root, locals())
class ApiClusterRef(BaseApiObject):
_ATTRIBUTES = {
'clusterName': None,
}
def __init__(self, resource_root, clusterName=None):
BaseApiObject.init(self, resource_root, locals())
class ApiRoleRef(BaseApiObject):
_ATTRIBUTES = {
'clusterName': None,
'serviceName': None,
'roleName': None,
}
def __init__(self, resource_root, serviceName=None, roleName=None,
clusterName=None):
BaseApiObject.init(self, resource_root, locals())
class ApiRoleConfigGroupRef(BaseApiObject):
_ATTRIBUTES = {
'roleConfigGroupName': None,
}
def __init__(self, resource_root, roleConfigGroupName=None):
BaseApiObject.init(self, resource_root, locals())
class ApiCommand(BaseApiObject):
SYNCHRONOUS_COMMAND_ID = -1
@classmethod
def _get_attributes(cls):
if not ('_ATTRIBUTES' in cls.__dict__):
cls._ATTRIBUTES = {
'id': ROAttr(),
'name': ROAttr(),
'startTime': ROAttr(datetime.datetime),
'endTime': ROAttr(datetime.datetime),
'active': ROAttr(),
'success': ROAttr(),
'resultMessage': ROAttr(),
'clusterRef': ROAttr(ApiClusterRef),
'serviceRef': ROAttr(ApiServiceRef),
'roleRef': ROAttr(ApiRoleRef),
'hostRef': ROAttr(ApiHostRef),
'children': ROAttr(ApiCommand, is_api_list=True),
'parent': ROAttr(ApiCommand),
'resultDataUrl': ROAttr(),
'canRetry': ROAttr(),
}
return cls._ATTRIBUTES
def __str__(self):
return ("<ApiCommand>: '%s' (id: %s; active: %s; success: %s)"
% (self.name, self.id, self.active, self.success))
def _path(self):
return '/commands/%d' % self.id
def fetch(self):
"""Retrieve updated data about the command from the server
:return: A new ApiCommand object.
"""
if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID:
return self
resp = self._get_resource_root().get(self._path())
return ApiCommand.from_json_dict(resp, self._get_resource_root())
def wait(self, timeout=None):
"""Wait for command to finish
:param timeout: (Optional) Max amount of time (in seconds) to wait.
Wait forever by default.
:return: The final ApiCommand object, containing the last known state.
The command may still be running in case of timeout.
"""
if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID:
return self
SLEEP_SEC = 5
if timeout is None:
deadline = None
else:
deadline = time.time() + timeout
while True:
cmd = self.fetch()
if not cmd.active:
return cmd
if deadline is not None:
now = time.time()
if deadline < now:
return cmd
else:
context.sleep(min(SLEEP_SEC, deadline - now))
else:
context.sleep(SLEEP_SEC)
def abort(self):
"""Abort a running command
:return: A new ApiCommand object with the updated information.
"""
if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID:
return self
path = self._path() + '/abort'
resp = self._get_resource_root().post(path)
return ApiCommand.from_json_dict(resp, self._get_resource_root())
class ApiBulkCommandList(ApiList):
_ATTRIBUTES = {
'errors': ROAttr(),
}
_MEMBER_CLASS = ApiCommand
#
# Configuration helpers.
#
class ApiConfig(BaseApiObject):
_ATTRIBUTES = {
'name': None,
'value': None,
'required': ROAttr(),
'default': ROAttr(),
'displayName': ROAttr(),
'description': ROAttr(),
'relatedName': ROAttr(),
'validationState': ROAttr(),
'validationMessage': ROAttr(),
}
def __init__(self, resource_root, name=None, value=None):
BaseApiObject.init(self, resource_root, locals())
def __str__(self):
return "<ApiConfig>: %s = %s" % (self.name, self.value)
def config_to_api_list(dic):
"""Convert a python dictionary into an ApiConfig list
Converts a python dictionary into a list containing the proper
ApiConfig encoding for configuration data.
:param dic: Key-value pairs to convert.
:return: JSON dictionary of an ApiConfig list (*not* an ApiList).
"""
config = []
for k, v in six.iteritems(dic):
config.append({'name': k, 'value': v})
return {ApiList.LIST_KEY: config}
def config_to_json(dic):
"""Converts a python dictionary into a JSON payload
The payload matches the expected "apiConfig list" type used to update
configuration parameters using the API.
:param dic: Key-value pairs to convert.
:return: String with the JSON-encoded data.
"""
return json.dumps(config_to_api_list(dic))
def json_to_config(dic, full=False):
"""Converts a JSON-decoded config dictionary to a python dictionary
When materializing the full view, the values in the dictionary will be
instances of ApiConfig, instead of strings.
:param dic: JSON-decoded config dictionary.
:param full: Whether to materialize the full view of the config data.
:return: Python dictionary with config data.
"""
config = {}
for entry in dic['items']:
k = entry['name']
if full:
config[k] = ApiConfig.from_json_dict(entry, None)
else:
config[k] = entry.get('value')
return config
|
the-stack_0_15840 | import re
import textwrap
from ast import literal_eval
from inspect import cleandoc
from weakref import WeakKeyDictionary
from parso.python import tree
from parso.cache import parser_cache
from parso import split_lines
_EXECUTE_NODES = {'funcdef', 'classdef', 'import_from', 'import_name', 'test',
'or_test', 'and_test', 'not_test', 'comparison', 'expr',
'xor_expr', 'and_expr', 'shift_expr', 'arith_expr',
'atom_expr', 'term', 'factor', 'power', 'atom'}
_FLOW_KEYWORDS = (
'try', 'except', 'finally', 'else', 'if', 'elif', 'with', 'for', 'while'
)
def get_executable_nodes(node, last_added=False):
"""
For static analysis.
"""
result = []
typ = node.type
if typ == 'name':
next_leaf = node.get_next_leaf()
if last_added is False and node.parent.type != 'param' and next_leaf != '=':
result.append(node)
elif typ == 'expr_stmt':
# I think inferring the statement (and possibly returned arrays),
# should be enough for static analysis.
result.append(node)
for child in node.children:
result += get_executable_nodes(child, last_added=True)
elif typ == 'decorator':
# decorator
if node.children[-2] == ')':
node = node.children[-3]
if node != '(':
result += get_executable_nodes(node)
else:
try:
children = node.children
except AttributeError:
pass
else:
if node.type in _EXECUTE_NODES and not last_added:
result.append(node)
for child in children:
result += get_executable_nodes(child, last_added)
return result
def get_sync_comp_fors(comp_for):
yield comp_for
last = comp_for.children[-1]
while True:
if last.type == 'comp_for':
yield last.children[1] # Ignore the async.
elif last.type == 'sync_comp_for':
yield last
elif not last.type == 'comp_if':
break
last = last.children[-1]
def for_stmt_defines_one_name(for_stmt):
"""
Returns True if only one name is returned: ``for x in y``.
Returns False if the for loop is more complicated: ``for x, z in y``.
:returns: bool
"""
return for_stmt.children[1].type == 'name'
def get_flow_branch_keyword(flow_node, node):
start_pos = node.start_pos
if not (flow_node.start_pos < start_pos <= flow_node.end_pos):
raise ValueError('The node is not part of the flow.')
keyword = None
for i, child in enumerate(flow_node.children):
if start_pos < child.start_pos:
return keyword
first_leaf = child.get_first_leaf()
if first_leaf in _FLOW_KEYWORDS:
keyword = first_leaf
return None
def clean_scope_docstring(scope_node):
""" Returns a cleaned version of the docstring token. """
node = scope_node.get_doc_node()
if node is not None:
# TODO We have to check next leaves until there are no new
# leaves anymore that might be part of the docstring. A
# docstring can also look like this: ``'foo' 'bar'
# Returns a literal cleaned version of the ``Token``.
return cleandoc(safe_literal_eval(node.value))
return ''
def find_statement_documentation(tree_node):
if tree_node.type == 'expr_stmt':
tree_node = tree_node.parent # simple_stmt
maybe_string = tree_node.get_next_sibling()
if maybe_string is not None:
if maybe_string.type == 'simple_stmt':
maybe_string = maybe_string.children[0]
if maybe_string.type == 'string':
return cleandoc(safe_literal_eval(maybe_string.value))
return ''
def safe_literal_eval(value):
first_two = value[:2].lower()
if first_two[0] == 'f' or first_two in ('fr', 'rf'):
# literal_eval is not able to resovle f literals. We have to do that
# manually, but that's right now not implemented.
return ''
return literal_eval(value)
def get_signature(funcdef, width=72, call_string=None,
omit_first_param=False, omit_return_annotation=False):
"""
Generate a string signature of a function.
:param width: Fold lines if a line is longer than this value.
:type width: int
:arg func_name: Override function name when given.
:type func_name: str
:rtype: str
"""
# Lambdas have no name.
if call_string is None:
if funcdef.type == 'lambdef':
call_string = '<lambda>'
else:
call_string = funcdef.name.value
params = funcdef.get_params()
if omit_first_param:
params = params[1:]
p = '(' + ''.join(param.get_code() for param in params).strip() + ')'
# TODO this is pretty bad, we should probably just normalize.
p = re.sub(r'\s+', ' ', p)
if funcdef.annotation and not omit_return_annotation:
rtype = " ->" + funcdef.annotation.get_code()
else:
rtype = ""
code = call_string + p + rtype
return '\n'.join(textwrap.wrap(code, width))
def move(node, line_offset):
"""
Move the `Node` start_pos.
"""
try:
children = node.children
except AttributeError:
node.line += line_offset
else:
for c in children:
move(c, line_offset)
def get_following_comment_same_line(node):
"""
returns (as string) any comment that appears on the same line,
after the node, including the #
"""
try:
if node.type == 'for_stmt':
whitespace = node.children[5].get_first_leaf().prefix
elif node.type == 'with_stmt':
whitespace = node.children[3].get_first_leaf().prefix
elif node.type == 'funcdef':
# actually on the next line
whitespace = node.children[4].get_first_leaf().get_next_leaf().prefix
else:
whitespace = node.get_last_leaf().get_next_leaf().prefix
except AttributeError:
return None
except ValueError:
# TODO in some particular cases, the tree doesn't seem to be linked
# correctly
return None
if "#" not in whitespace:
return None
comment = whitespace[whitespace.index("#"):]
if "\r" in comment:
comment = comment[:comment.index("\r")]
if "\n" in comment:
comment = comment[:comment.index("\n")]
return comment
def is_scope(node):
t = node.type
if t == 'comp_for':
# Starting with Python 3.8, async is outside of the statement.
return node.children[1].type != 'sync_comp_for'
return t in ('file_input', 'classdef', 'funcdef', 'lambdef', 'sync_comp_for')
def _get_parent_scope_cache(func):
cache = WeakKeyDictionary()
def wrapper(used_names, node, include_flows=False):
try:
for_module = cache[used_names]
except KeyError:
for_module = cache[used_names] = {}
try:
return for_module[node]
except KeyError:
result = for_module[node] = func(node, include_flows)
return result
return wrapper
def get_parent_scope(node, include_flows=False):
"""
Returns the underlying scope.
"""
scope = node.parent
if scope is None:
return None # It's a module already.
while True:
if is_scope(scope):
if scope.type in ('classdef', 'funcdef', 'lambdef'):
index = scope.children.index(':')
if scope.children[index].start_pos >= node.start_pos:
if node.parent.type == 'param' and node.parent.name == node:
pass
elif node.parent.type == 'tfpdef' and node.parent.children[0] == node:
pass
else:
scope = scope.parent
continue
return scope
elif include_flows and isinstance(scope, tree.Flow):
# The cursor might be on `if foo`, so the parent scope will not be
# the if, but the parent of the if.
if not (scope.type == 'if_stmt'
and any(n.start_pos <= node.start_pos < n.end_pos
for n in scope.get_test_nodes())):
return scope
scope = scope.parent
get_cached_parent_scope = _get_parent_scope_cache(get_parent_scope)
def get_cached_code_lines(grammar, path):
"""
Basically access the cached code lines in parso. This is not the nicest way
to do this, but we avoid splitting all the lines again.
"""
return parser_cache[grammar._hashed][path].lines
def cut_value_at_position(leaf, position):
"""
Cuts of the value of the leaf at position
"""
lines = split_lines(leaf.value, keepends=True)[:position[0] - leaf.line + 1]
column = position[1]
if leaf.line == position[0]:
column -= leaf.column
if not lines:
return ''
lines[-1] = lines[-1][:column]
return ''.join(lines)
def expr_is_dotted(node):
"""
Checks if a path looks like `name` or `name.foo.bar` and not `name()`.
"""
if node.type == 'atom':
if len(node.children) == 3 and node.children[0] == '(':
return expr_is_dotted(node.children[1])
return False
if node.type == 'atom_expr':
children = node.children
if children[0] == 'await':
return False
if not expr_is_dotted(children[0]):
return False
# Check trailers
return all(c.children[0] == '.' for c in children[1:])
return node.type == 'name'
def _function_is_x_method(*method_names):
def wrapper(function_node):
"""
This is a heuristic. It will not hold ALL the times, but it will be
correct pretty much for anyone that doesn't try to beat it.
staticmethod/classmethod are builtins and unless overwritten, this will
be correct.
"""
for decorator in function_node.get_decorators():
dotted_name = decorator.children[1]
if dotted_name.get_code() in method_names:
return True
return False
return wrapper
function_is_staticmethod = _function_is_x_method('staticmethod')
function_is_classmethod = _function_is_x_method('classmethod')
function_is_property = _function_is_x_method('property', 'cached_property')
|
the-stack_0_15845 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Phillipe Smith <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: rundeck_job_run
short_description: Run a Rundeck job
description:
- This module runs a Rundeck job specified by ID.
author: "Phillipe Smith (@phsmith)"
version_added: 3.8.0
options:
job_id:
type: str
description:
- The job unique ID.
required: true
job_options:
type: dict
description:
- The job options for the steps.
- Numeric values must be quoted.
filter_nodes:
type: str
description:
- Filter the nodes where the jobs must run.
- See U(https://docs.rundeck.com/docs/manual/11-node-filters.html#node-filter-syntax).
run_at_time:
type: str
description:
- Schedule the job execution to run at specific date and time.
- ISO-8601 date and time format like C(2021-10-05T15:45:00-03:00).
loglevel:
type: str
description:
- Log level configuration.
choices: [debug, verbose, info, warn, error]
default: info
wait_execution:
type: bool
description:
- Wait until the job finished the execution.
default: true
wait_execution_delay:
type: int
description:
- Delay, in seconds, between job execution status check requests.
default: 5
wait_execution_timeout:
type: int
description:
- Job execution wait timeout in seconds.
- If the timeout is reached, the job will be aborted.
- Keep in mind that there is a sleep based on I(wait_execution_delay) after each job status check.
default: 120
abort_on_timeout:
type: bool
description:
- Send a job abort request if exceeded the I(wait_execution_timeout) specified.
default: false
extends_documentation_fragment:
- community.general.rundeck
- url
'''
EXAMPLES = '''
- name: Run a Rundeck job
community.general.rundeck_job_run:
url: "https://rundeck.example.org"
api_version: 39
api_token: "mytoken"
job_id: "xxxxxxxxxxxxxxxxx"
register: rundeck_job_run
- name: Show execution info
ansible.builtin.debug:
var: rundeck_job_run.execution_info
- name: Run a Rundeck job with options
community.general.rundeck_job_run:
url: "https://rundeck.example.org"
api_version: 39
api_token: "mytoken"
job_id: "xxxxxxxxxxxxxxxxx"
job_options:
option_1: "value_1"
option_2: "value_3"
option_3: "value_3"
register: rundeck_job_run
- name: Run a Rundeck job with timeout, delay between status check and abort on timeout
community.general.rundeck_job_run:
url: "https://rundeck.example.org"
api_version: 39
api_token: "mytoken"
job_id: "xxxxxxxxxxxxxxxxx"
wait_execution_timeout: 30
wait_execution_delay: 10
abort_on_timeout: true
register: rundeck_job_run
- name: Schedule a Rundeck job
community.general.rundeck_job_run:
url: "https://rundeck.example.org"
api_version: 39
api_token: "mytoken"
job_id: "xxxxxxxxxxxxxxxxx"
run_at_time: "2021-10-05T15:45:00-03:00"
register: rundeck_job_schedule
- name: Fire-and-forget a Rundeck job
community.general.rundeck_job_run:
url: "https://rundeck.example.org"
api_version: 39
api_token: "mytoken"
job_id: "xxxxxxxxxxxxxxxxx"
wait_execution: false
register: rundeck_job_run
'''
RETURN = '''
execution_info:
description: Rundeck job execution metadata.
returned: always
type: dict
sample: {
"msg": "Job execution succeeded!",
"execution_info": {
"id": 1,
"href": "https://rundeck.example.org/api/39/execution/1",
"permalink": "https://rundeck.example.org/project/myproject/execution/show/1",
"status": "succeeded",
"project": "myproject",
"executionType": "user",
"user": "admin",
"date-started": {
"unixtime": 1633449020784,
"date": "2021-10-05T15:50:20Z"
},
"date-ended": {
"unixtime": 1633449026358,
"date": "2021-10-05T15:50:26Z"
},
"job": {
"id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
"averageDuration": 4917,
"name": "Test",
"group": "",
"project": "myproject",
"description": "",
"options": {
"exit_code": "0"
},
"href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
"permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a"
},
"description": "sleep 5 && echo 'Test!' && exit ${option.exit_code}",
"argstring": "-exit_code 0",
"serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068",
"successfulNodes": [
"localhost"
],
"output": "Test!"
}
}
'''
# Modules import
import json
from datetime import datetime, timedelta
from time import sleep
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.six.moves.urllib.parse import quote
from ansible_collections.community.general.plugins.module_utils.rundeck import (
api_argument_spec,
api_request
)
class RundeckJobRun(object):
def __init__(self, module):
self.module = module
self.url = self.module.params["url"]
self.api_version = self.module.params["api_version"]
self.job_id = self.module.params["job_id"]
self.job_options = self.module.params["job_options"] or {}
self.filter_nodes = self.module.params["filter_nodes"] or ""
self.run_at_time = self.module.params["run_at_time"] or ""
self.loglevel = self.module.params["loglevel"].upper()
self.wait_execution = self.module.params['wait_execution']
self.wait_execution_delay = self.module.params['wait_execution_delay']
self.wait_execution_timeout = self.module.params['wait_execution_timeout']
self.abort_on_timeout = self.module.params['abort_on_timeout']
for k, v in self.job_options.items():
if not isinstance(v, str):
self.module.exit_json(
msg="Job option '%s' value must be a string" % k,
execution_info={}
)
def job_status_check(self, execution_id):
response = dict()
timeout = False
due = datetime.now() + timedelta(seconds=self.wait_execution_timeout)
while not timeout:
endpoint = "execution/%d" % execution_id
response = api_request(module=self.module, endpoint=endpoint)[0]
output = api_request(module=self.module,
endpoint="execution/%d/output" % execution_id)
log_output = "\n".join([x["log"] for x in output[0]["entries"]])
response.update({"output": log_output})
if response["status"] == "aborted":
break
elif response["status"] == "scheduled":
self.module.exit_json(msg="Job scheduled to run at %s" % self.run_at_time,
execution_info=response,
changed=True)
elif response["status"] == "failed":
self.module.fail_json(msg="Job execution failed",
execution_info=response)
elif response["status"] == "succeeded":
self.module.exit_json(msg="Job execution succeeded!",
execution_info=response)
if datetime.now() >= due:
timeout = True
break
# Wait for 5s before continue
sleep(self.wait_execution_delay)
response.update({"timed_out": timeout})
return response
def job_run(self):
response, info = api_request(
module=self.module,
endpoint="job/%s/run" % quote(self.job_id),
method="POST",
data={
"loglevel": self.loglevel,
"options": self.job_options,
"runAtTime": self.run_at_time,
"filter": self.filter_nodes
}
)
if info["status"] != 200:
self.module.fail_json(msg=info["msg"])
if not self.wait_execution:
self.module.exit_json(msg="Job run send successfully!",
execution_info=response)
job_status = self.job_status_check(response["id"])
if job_status["timed_out"]:
if self.abort_on_timeout:
api_request(
module=self.module,
endpoint="execution/%s/abort" % response['id'],
method="GET"
)
abort_status = self.job_status_check(response["id"])
self.module.fail_json(msg="Job execution aborted due the timeout specified",
execution_info=abort_status)
self.module.fail_json(msg="Job execution timed out",
execution_info=job_status)
def main():
argument_spec = api_argument_spec()
argument_spec.update(dict(
job_id=dict(required=True, type="str"),
job_options=dict(type="dict"),
filter_nodes=dict(type="str"),
run_at_time=dict(type="str"),
wait_execution=dict(type="bool", default=True),
wait_execution_delay=dict(type="int", default=5),
wait_execution_timeout=dict(type="int", default=120),
abort_on_timeout=dict(type="bool", default=False),
loglevel=dict(
type="str",
choices=["debug", "verbose", "info", "warn", "error"],
default="info"
)
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False
)
if module.params["api_version"] < 14:
module.fail_json(msg="API version should be at least 14")
rundeck = RundeckJobRun(module)
rundeck.job_run()
if __name__ == "__main__":
main()
|
the-stack_0_15846 | # Copyright (C) 2019 Project AGI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model base class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from sklearn.model_selection import learning_curve
class Model:
"""
The base class for building a classifcation models and running inference
on them.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, hparams, batch_size=None, num_classes=None,
summary_dir=None, verbose=False):
"""
Initializes the model parameters.
Args:
hparams: The hyperparameters for the model as
tf.contrib.training.HParams.
batch_size: An integer, the number of samples in a batch.
num_classes: An integer, the number of classes.
summary_dir: The output directory for the results.
verbose: A boolean to enable verbose logging.
"""
self._model = None
self._hparams = hparams
self._verbose = verbose
self._batch_size = batch_size
self._num_classes = num_classes
self._summary_dir = summary_dir
@abc.abstractstaticmethod
def default_hparams():
"""Builds an HParam object with default hyperparameters."""
raise NotImplementedError('Not implemented')
@abc.abstractmethod
def train(self, features, labels, seed=None):
"""
Setup the model with specified hyperparameters and train the model.
Args:
features: A numpy n-dimensional array containing the features in
the shape of samples x features.
labels: A numpy array containing the label for each sample.
seed: An integer used to specify the randomness seed.
"""
raise NotImplementedError('Not implemented')
@abc.abstractmethod
def evaluate(self, features, labels):
"""
Evaluates the trained model using the specified features and labels.
Args:
features: A numpy n-dimensional array containing the features in
the shape of samples x features.
labels: A numpy array containing the label for each sample.
Returns:
accuracy: The accuracy score of the model.
predictions: The labels predicted by the model for each sample.
"""
raise NotImplementedError('Not implemented')
def learning_curve(self, features, labels):
"""Simple wrapper around sklearn's learning curve module"""
return learning_curve(self._model, features, labels)
|
the-stack_0_15847 | """train finetune"""
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
from mindspore import context
from mindspore.context import ParallelMode
import mindspore.dataset as ds
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.communication.management import init, get_rank, get_group_size
from mindspore.common import set_seed
from src.args import args
from src.data.imagenet import ImgData
from src.data.srdata import SRData
from src.data.div2k import DIV2K
from src.data.bicubic import bicubic
from src.ipt_model import IPT
from src.utils import Trainer
def train_net(distribute, imagenet):
"""Train net with finetune"""
set_seed(1)
device_id = int(os.getenv('DEVICE_ID', '0'))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False, device_id=device_id)
if imagenet == 1:
train_dataset = ImgData(args)
elif not args.derain:
train_dataset = DIV2K(args, name=args.data_train, train=True, benchmark=False)
train_dataset.set_scale(args.task_id)
else:
train_dataset = SRData(args, name=args.data_train, train=True, benchmark=False)
train_dataset.set_scale(args.task_id)
if distribute:
init()
rank_id = get_rank()
rank_size = get_group_size()
parallel_mode = ParallelMode.DATA_PARALLEL
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=rank_size, gradients_mean=True)
print('Rank {}, group_size {}'.format(rank_id, rank_size))
if imagenet == 1:
train_de_dataset = ds.GeneratorDataset(train_dataset,
["HR", "Rain", "LRx2", "LRx3", "LRx4", "scales", "filename"],
num_shards=rank_size, shard_id=rank_id, shuffle=True)
else:
train_de_dataset = ds.GeneratorDataset(train_dataset, ["LR", "HR", "idx", "filename"],
num_shards=rank_size, shard_id=rank_id, shuffle=True)
else:
if imagenet == 1:
train_de_dataset = ds.GeneratorDataset(train_dataset,
["HR", "Rain", "LRx2", "LRx3", "LRx4", "scales", "filename"],
shuffle=True)
else:
train_de_dataset = ds.GeneratorDataset(train_dataset, ["LR", "HR", "idx", "filename"], shuffle=True)
if args.imagenet == 1:
resize_fuc = bicubic()
train_de_dataset = train_de_dataset.batch(
args.batch_size,
input_columns=["HR", "Rain", "LRx2", "LRx3", "LRx4", "scales", "filename"],
output_columns=["LR", "HR", "idx", "filename"], drop_remainder=True,
per_batch_map=resize_fuc.forward)
else:
train_de_dataset = train_de_dataset.batch(args.batch_size, drop_remainder=True)
train_loader = train_de_dataset.create_dict_iterator(output_numpy=True)
net_m = IPT(args)
print("Init net weights successfully")
if args.pth_path:
param_dict = load_checkpoint(args.pth_path)
load_param_into_net(net_m, param_dict)
print("Load net weight successfully")
train_func = Trainer(args, train_loader, net_m)
for epoch in range(0, args.epochs):
train_func.update_learning_rate(epoch)
train_func.train()
if __name__ == "__main__":
train_net(distribute=args.distribute, imagenet=args.imagenet)
|
the-stack_0_15848 | # coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.audio_volume_filter import AudioVolumeFilter
from bitmovin_api_sdk.models.bitmovin_response import BitmovinResponse
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
from bitmovin_api_sdk.encoding.filters.audio_volume.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.filters.audio_volume.audio_volume_filter_list_query_params import AudioVolumeFilterListQueryParams
class AudioVolumeApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(AudioVolumeApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
self.customdata = CustomdataApi(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def create(self, audio_volume_filter, **kwargs):
# type: (AudioVolumeFilter, dict) -> AudioVolumeFilter
"""Create Audio Volume Filter
:param audio_volume_filter: The Audio Volume Filter to be created
:type audio_volume_filter: AudioVolumeFilter, required
:return: Audio volume details
:rtype: AudioVolumeFilter
"""
return self.api_client.post(
'/encoding/filters/audio-volume',
audio_volume_filter,
type=AudioVolumeFilter,
**kwargs
)
def delete(self, filter_id, **kwargs):
# type: (string_types, dict) -> BitmovinResponse
"""Delete Audio Volume Filter
:param filter_id: Id of the Audio volume configuration.
:type filter_id: string_types, required
:return: Id of the Audio volume.
:rtype: BitmovinResponse
"""
return self.api_client.delete(
'/encoding/filters/audio-volume/{filter_id}',
path_params={'filter_id': filter_id},
type=BitmovinResponse,
**kwargs
)
def get(self, filter_id, **kwargs):
# type: (string_types, dict) -> AudioVolumeFilter
"""Audio Volume Filter Details
:param filter_id: Id of the audio volume configuration.
:type filter_id: string_types, required
:return: Audio volume details
:rtype: AudioVolumeFilter
"""
return self.api_client.get(
'/encoding/filters/audio-volume/{filter_id}',
path_params={'filter_id': filter_id},
type=AudioVolumeFilter,
**kwargs
)
def list(self, query_params=None, **kwargs):
# type: (AudioVolumeFilterListQueryParams, dict) -> AudioVolumeFilter
"""List Audio Volume Filters
:param query_params: Query parameters
:type query_params: AudioVolumeFilterListQueryParams
:return: List of Audio volume ids
:rtype: AudioVolumeFilter
"""
return self.api_client.get(
'/encoding/filters/audio-volume',
query_params=query_params,
pagination_response=True,
type=AudioVolumeFilter,
**kwargs
)
|
the-stack_0_15849 | # -*- coding: utf-8 -*-
"""
DWX_ZMQ_Execution.py
--
@author: Darwinex Labs (www.darwinex.com)
Copyright (c) 2019 onwards, Darwinex. All rights reserved.
Licensed under the BSD 3-Clause License, you may not use this file except
in compliance with the License.
You may obtain a copy of the License at:
https://opensource.org/licenses/BSD-3-Clause
"""
from pandas import to_datetime
from time import sleep
class DWX_ZMQ_Execution():
def __init__(self, _zmq):
self._zmq = _zmq
##########################################################################
def _execute_(self,
_exec_dict,
_verbose=False,
_delay=0.1,
_wbreak=10):
_check = ''
# Reset thread data output
self._zmq._set_response_(None)
# OPEN TRADE
if _exec_dict['_action'] == 'OPEN':
_check = '_action'
self._zmq._DWX_MTX_NEW_TRADE_(_order=_exec_dict)
# CLOSE TRADE
elif _exec_dict['_action'] == 'CLOSE':
_check = '_response_value'
self._zmq._DWX_MTX_CLOSE_TRADE_BY_TICKET_(_exec_dict['_ticket'])
if _verbose:
print('\n[{}] {} -> MetaTrader'.format(_exec_dict['_comment'],
str(_exec_dict)))
# While loop start time reference
_ws = to_datetime('now')
# While data not received, sleep until timeout
while self._zmq._valid_response_('zmq') == False:
sleep(_delay)
if (to_datetime('now') - _ws).total_seconds() > (_delay * _wbreak):
break
# If data received, return DataFrame
if self._zmq._valid_response_('zmq'):
if _check in self._zmq._get_response_().keys():
return self._zmq._get_response_()
# Default
return None
##########################################################################
|
the-stack_0_15851 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import os
import logging
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "/media/disk2/jennybae/bert-base-uncased-vocab.txt",
'bert-base-cased': "/media/disk2/jennybae/bert-base-cased-vocab.txt",
'bert-large-uncased': "/media/disk2/jennybae/bert-large-uncased-vocab.txt",
# 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
# 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
# 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
}
VOCAB_NAME = 'vocab.txt'
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True, max_len=None):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
raise ValueError(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
@classmethod
def from_pretrained(cls, pretrained_model_name, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name]
else:
vocab_file = pretrained_model_name
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except FileNotFoundError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
if pretrained_model_name in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
the-stack_0_15852 | import os, tempfile, subprocess
from string import Template
from PuzzleLib import Config
from PuzzleLib.Compiler.JIT import getCacheDir, computeHash, FileLock
from PuzzleLib.Cuda.SourceModule import SourceModule, ElementwiseKernel, ElementHalf2Kernel, ReductionKernel
from PuzzleLib.Cuda.SourceModule import eltwiseTest, reductionTest
from PuzzleLib.Hip import Driver as HipDriver
hipWarpBit, hipBlockBit = 6, 8
hipWarpSize, hipBlockSize = 1 << hipWarpBit, 1 << hipBlockBit
class HipSourceModule(SourceModule):
Driver = HipDriver
runtimeHeader = """
#include <hip/hip_runtime.h>
#define __shfl_xor_sync(mask, value, laneMask, ...) __shfl_xor(value, laneMask, __VA_ARGS__)
#define __shfl_up_sync(mask, value, delta, ...) __shfl_up(value, delta, __VA_ARGS__)
"""
def __init__(self, source, options=None, includes=None, externC=False, verbose=True, debug=False, recompile=False,
name=None):
super().__init__(source, options, includes, externC, verbose, debug, name)
self.recompile = recompile
self.includes = [] if self.includes is None else self.includes
def build(self):
source = self.source.replace("cuda_fp16.h", "hip/hip_fp16.h")
source = ("%sextern \"C\"\n{\n%s\n}\n" if self.externC else "%s%s") % (self.runtimeHeader, source)
cachedir = getCacheDir(os.path.join(Config.libname, Config.Backend.hip.name))
with FileLock(cachedir):
try:
codename = self.tryBuild(source, cachedir)
except subprocess.CalledProcessError as e:
log = e.output.decode()
text = log if self.debug else "%s\nSource:\n%s" % (
log,
"\n".join("%-4s %s" % (i + 1, line) for i, line in enumerate(source.splitlines(keepends=False)))
)
raise self.Driver.RtcError(text)
with open(codename, mode="rb") as f:
hsaco = f.read()
self.cumod = self.Driver.Module(hsaco)
def tryBuild(self, source, cachedir):
options, includes = self.options, self.includes
hashsum = computeHash(source, *options, *includes)
codepath = os.path.join(cachedir, hashsum)
name, srcext = "module" if self.name is None else self.name, ".hip.cpp"
codename = os.path.join(codepath, "%s.code" % name)
sourcename = os.path.join(codepath, "%s%s" % (name, srcext))
if not os.path.exists(codename) or self.recompile:
os.makedirs(codepath, exist_ok=True)
args = ["hipcc", "--genco"] + options + ["-o", codename]
stderr = subprocess.STDOUT if self.verbose else subprocess.DEVNULL
Config.getLogger().debug("No cache found for HIP extension '%s', performing compilation ...", name)
if not self.debug:
f = tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", suffix=srcext, delete=False)
try:
with f:
f.write(source)
subprocess.check_output(args + [f.name], stderr=stderr)
finally:
os.remove(f.name)
else:
with open(sourcename, mode="w", encoding="utf-8") as f:
f.write(source)
subprocess.check_output(args + [sourcename], stderr=stderr)
else:
Config.getLogger().debug("Found cached compilation for HIP extension '%s', skipping compilation ...", name)
return codename
@classmethod
def getDefaultOptions(cls):
deviceIdx = cls.Driver.Device.getCurrent()
return ["--targets gfx%s" % cls.Driver.Device(deviceIdx).getArch()]
class HipEltwiseKernel(ElementwiseKernel):
Driver = HipDriver
SourceModule = HipSourceModule
warpBit, warpSize = hipWarpBit, hipWarpSize
blockBit, blockSize = hipBlockBit, hipBlockSize
class HipEltHalf2Kernel(ElementHalf2Kernel):
Driver = HipDriver
SourceModule = HipSourceModule
warpBit, warpSize = hipWarpBit, hipWarpSize
blockBit, blockSize = hipBlockBit, hipBlockSize
class HipReductionKernel(ReductionKernel):
Driver = HipDriver
SourceModule = HipSourceModule
warpBit, warpSize = hipWarpBit, hipWarpSize
blockBit, blockSize = hipBlockBit, hipBlockSize
reduceTmpl = Template("""
#undef READ_AND_MAP
#undef REDUCE
#define READ_AND_MAP(i) ($mapExpr)
#define REDUCE(a, b) ($reduceExpr)
extern "C" __global__ void $name($arguments, $T *partials, int size)
{
__shared__ $T sdata[$warpSize];
int tid = threadIdx.x;
int gid = tid + blockIdx.x * $NT;
$T acc = $neutral;
for (int i = gid; i < size; i += $NT * gridDim.x)
acc = REDUCE(acc, READ_AND_MAP(i));
for (int mask = $warpSize / 2; mask > 0; mask /= 2)
{
$T upval = __shfl_xor(acc, mask, $warpSize);
acc = REDUCE(acc, upval);
}
if (tid % $warpSize == 0)
sdata[tid / $warpSize] = acc;
__syncthreads();
int nwarps = $NT / $warpSize;
if (tid < $warpSize)
{
acc = (tid < nwarps) ? sdata[tid] : $neutral;
for (int mask = $warpSize / 2; mask > 0; mask /= 2)
{
$T upval = __shfl_xor(acc, mask, $warpSize);
acc = REDUCE(acc, upval);
}
}
if (tid == 0)
partials[blockIdx.x] = acc;
}
""")
def unittest():
from PuzzleLib.Hip import Backend
for deviceIdx in range(Backend.getDeviceCount()):
bnd = Backend.getBackend(deviceIdx)
eltwiseTest(bnd)
reductionTest(bnd)
if __name__ == "__main__":
unittest()
|
the-stack_0_15853 | # Written by Hannah Horng ([email protected])
import pandas as pd
import neuroCombat as nC
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
from scipy.stats import ranksums, ttest_ind, ttest_rel, ks_2samp
import os
def NestedComBat(dat, covars, batch_list, categorical_cols=None, continuous_cols=None, drop=False,
write_p=False, plotting=False, filepath=''):
"""
Completes sequential nested ComBat harmonization on an input DataFrame. Order is determined by number of features
with statistically significant differences in distribution (KS test) due to a particular batch effect.
Arguments
---------
data : DataFrame of original data with shape (features, samples)
covars : DataFrame with shape (samples, covariates) corresponding to original data. All variables should be label-
encoded (i.e. strings converted to integer designations)
batch_list : list of strings indicating batch effect column names within covars (i.e. ['Manufacturer', 'CE'...])
categorical_cols : string or list of strings of categorical variables to adjust for
continuous_cols : string or list of strings of continuous variables to adjust for
drop : Boolean, if True -- features with significant differences in distribution due to the batch effect being
harmonized are dropped with each iteration (corresponds to NestedD)
write_p : Boolean, if True -- KS test p-values will be written as a CSV into the directory created from filepath
plotting : Boolean, if True -- kernel density plots will be written as image files into the directory created from
filepath
filepath : root directory path for saving KS test p-values and kernel density plots created during harmonization
Returns
-------
new_dat : DataFrame with shape (features, samples) that has been sequentially harmonized with Nested ComBat
"""
p_dict = {}
count_dict = {}
f_dict = {}
print('ROUND 1:')
for a in range(len(batch_list)):
batch_col = batch_list[a]
print('Harmonizing by ' + batch_col + '...')
filepath2 = filepath + 'Round 1/' + batch_col + '/'
if not os.path.exists(filepath2):
os.makedirs(filepath2)
# RUN COMBAT
print('ComBat with Raw Data...')
output = nC.neuroCombat(dat, covars, batch_col, continuous_cols=continuous_cols,
categorical_cols=categorical_cols)['data']
output_df = pd.DataFrame.from_records(output.T)
output_df.columns = dat.T.columns
f_dict[batch_col] = output_df
if plotting:
combat_histograms(dat.T, output_df, covars, covars, batch_col, filepath2)
if write_p:
p_values = combat_kstest(dat.T, output_df, covars, covars, batch_col, write=True, filepath=filepath2)
else:
p_values = combat_kstest(dat.T, output_df, covars, covars, batch_col)
p_values.index = output_df.columns
p_dict[batch_col] = p_values['ComBat']
count_dict[batch_col] = len(p_values[p_values['ComBat'] < .05])
drop_feature = [key for key, value in count_dict.items() if value == min(count_dict.values())][0]
# Iteration
batch_list2 = batch_list.copy()
batch_list2.remove(drop_feature)
new_data_df = f_dict[drop_feature]
new_pvalues = p_dict[drop_feature]
new_dat = new_data_df.T
if drop:
new_dat = new_data_df.T[new_pvalues > .05] # Dropping every iteration
c = 1
while len(batch_list2) > 0:
print('ROUND ' + str(c+1) + ':')
p_dict = {}
count_dict = {}
f_dict = {}
c = c+1
for b in range(len(batch_list2)):
batch_col = batch_list2[b]
print('Harmonizing by ' + batch_col + '...')
filepath2 = filepath+'Round '+str(c) + '/' + batch_col+'/'
if not os.path.exists(filepath2):
os.makedirs(filepath2)
# RUN COMBAT
# print('ComBat with Raw Data...')
output = nC.neuroCombat(new_dat, covars, batch_col, continuous_cols=continuous_cols, categorical_cols=categorical_cols)['data']
output_df = pd.DataFrame.from_records(output.T)
output_df.columns = new_dat.T.columns
f_dict[batch_col] = output_df
if plotting:
combat_histograms(new_dat.T, output_df, covars, covars, batch_col, filepath2)
if write_p:
p_values = combat_kstest(new_dat.T, output_df, covars, covars, batch_col, write=True, filepath=filepath2)
else:
p_values = combat_kstest(new_dat.T, output_df, covars, covars, batch_col)
p_values.index = output_df.columns
p_dict[batch_col] = p_values['ComBat']
count_dict[batch_col] = len(p_values[p_values['ComBat'] < .05])
drop_feature = [key for key, value in count_dict.items() if value == min(count_dict.values())][0]
new_data_df = f_dict[drop_feature]
new_pvalues = p_dict[drop_feature]
if drop:
new_dat = new_data_df.T[new_pvalues > .05] # Iteration + Dropping
else:
new_dat = new_data_df.T
batch_list2.remove(drop_feature)
output_df = pd.DataFrame.from_records(new_dat.T)
output_df.columns = new_dat.T.columns
return output_df
def combat_kstest(data, output, covars1, covars2, batch_col, filepath='', write=False):
"""
Calculating KS test for differences in distribution due to batch effect before and after harmonization
*Note that this is differs from the version in GMMComBat only by file destination naming
Arguments
---------
data : DataFrame of original data with shape (samples, features)
output: DataFrame of harmonized data with shape (samples, features)
covars1 : DataFrame with shape (samples, covariates) corresponding to original data
covars2 : DataFrame with shape (samples, covariates) corresponding to harmonized data
batch_col : string indicating batch/imaging parameter name in covars
filepath : write destination for ks p-value DataFrame if write is True
write: Boolean, set to True to save ks p-value DataFrame
Returns
-------
p_df : DataFrame with two colums corresponding to KS test p-value testing for significant differences in
distribution attributable to the batch effect specified by batch_col
"""
data_keys = data.keys()
batch_var1 = covars1[batch_col]
batch_var2 = covars2[batch_col]
data_0 = data[batch_var1 == 0]
data_1 = data[batch_var1 == 1]
output_0 = output[batch_var2 == 0]
output_1 = output[batch_var2 == 1]
# KS Test (more generalized differences in distribution)
p_before = []
p_after = []
for m in range(0, data.shape[1]):
p_value1 = ks_2samp(data_0.iloc[:, m], data_1.iloc[:, m])
p_value2 = ks_2samp(output_0.iloc[:, m], output_1.iloc[:, m])
p_before.append(p_value1.pvalue)
p_after.append(p_value2.pvalue)
p_df = pd.DataFrame({'Raw': p_before, 'ComBat': p_after})
if write:
p_df = pd.DataFrame({'Raw': p_before, 'ComBat': p_after})
p_df.index = data_keys
p_df.to_csv(filepath + '_' + batch_col + '_feature_ks_values.csv')
return p_df
def combat_histograms(data, output, covars1, covars2, batch_col, filepath):
"""
Plots kernel density plots separated by batch effect groups and before vs. after ComBat harmonization
Arguments
---------
data : DataFrame of original data with shape (samples, features)
output: DataFrame of harmonized data with shape (samples, features)
covars1 : DataFrame with shape (samples, covariates) corresponding to original data
covars2 : DataFrame with shape (samples, covariates) corresponding to harmonized data
batch_col : string indicating batch/imaging parameter name in covars
filepath : write destination for kernel density plots
"""
print('Plotting histograms...')
data_keys = data.keys()
batch_var1 = covars1[batch_col]
batch_var2 = covars2[batch_col]
data_0 = data[batch_var1 == 0]
data_1 = data[batch_var1 == 1]
output_0 = output[batch_var2 == 0]
output_1 = output[batch_var2 == 1]
for k in range(0, data.shape[1]):
plt.figure()
data_0.iloc[:, k].plot.kde()
data_1.iloc[:, k].plot.kde()
output_0.iloc[:, k].plot.kde()
output_1.iloc[:, k].plot.kde()
plt.xlabel(data_keys[k])
leg = ["0", "1", "0_ComBat", "1_ComBat"]
plt.legend(leg, loc='upper right')
plt.rcParams.update({'font.size': 12})
filename = filepath + batch_col + '_' + 'histogram_' + data_keys[k] + ".png"
plt.savefig(filename, bbox_inches='tight')
plt.close()
def feature_kstest_histograms(dat, covars, batch_list, filepath):
"""
Plots kernel density plots and computes KS test p-values separated by batch effect groups for a dataset (intended
to assess differences in distribution to all batch effects in batch_list following harmonization with
NestedComBat
*Note that this is differs from the version in GMMComBat only by file destination naming
Arguments
---------
data : DataFrame of original data with shape (samples, features)
output: DataFrame of harmonized data with shape (samples, features)
covars : DataFrame with shape (samples, covariates) corresponding to original data. All variables should be label-
encoded (i.e. strings converted to integer designations)
batch_list : list of strings indicating batch effect column names within covars (i.e. ['Manufacturer', 'CE'...])
filepath : write destination for kernel density plots
"""
print('Plotting final feature histograms...')
p_df = pd.DataFrame()
for batch_col in batch_list:
p = []
split_col = covars[batch_col]
filepath2 = filepath + 'feature_histograms/' + batch_col + '/'
if not os.path.exists(filepath2):
os.makedirs(filepath2)
for feature in dat:
plt.figure()
dat[feature][split_col == 0].plot.kde()
dat[feature][split_col == 1].plot.kde()
plt.xlabel(feature)
filename = filepath2 + feature + '.png'
plt.savefig(filename, bbox_inches='tight')
plt.close()
p_value = ks_2samp(dat[feature][split_col == 0], dat[feature][split_col == 1])
p.append(p_value.pvalue)
p_df[batch_col] = p
p_df.index = dat.keys()
p_df.to_csv(filepath + 'final_nested_ks_values.csv')
|
the-stack_0_15854 | import subprocess
import re
# ************************************************
# remove_custom_emoji
# 絵文字IDは読み上げない
# ************************************************
def remove_custom_emoji(text):
#pattern = r'<:[a-zA-Z0-9_]+:[0-9]+>' # カスタム絵文字のパターン
pattern = r'<:' # カスタム絵文字のパターン
text = re.sub(pattern,'',text) # 置換処理
pattern = r':[0-9]+>' # カスタム絵文字のパターン
return re.sub(pattern,'',text) # 置換処理
# ************************************************
# exclude_url
# URLなら省略
# ************************************************
def exclude_url(text):
pattern = "https?://[\w/:%#\$&\?\(\)~\.=\+\-]+"
return re.sub(pattern,'URL',text) # 置換処理
# ************************************************
# remove_picture
# 画像ファイルなら読み上げない
# ************************************************
def remove_picture(text):
pattern = r'.*(\.jpg|\.jpeg|\.gif|\.png|\.bmp)'
return re.sub(pattern,'画像',text) # 置換処理
# ************************************************
# remove_command
# コマンドは読み上げない
# ************************************************
def remove_command(text):
pattern = r'^\!.*'
return re.sub(pattern,'',text) # 置換処理
# ************************************************
# remove_log
# 参加ログは読み上げない
# ************************************************
def remove_log(text):
pattern = r'(\【VC参加ログ\】.*)'
return re.sub(pattern,'',text) # 置換処理
# ************************************************
# user_custam
# ユーザ登録した文字を読み替える
# ************************************************
def user_custam(text):
f = open('user_dictionary.txt', 'r')
line = f.readline()
while line:
pattern = line.strip().split(',')
if pattern[0] in text:
text = text.replace(pattern[0], pattern[1])
print('置換後のtext:'+text)
break
else:
line = f.readline()
f.close()
return text
# ************************************************
# creat_WAV
# message.contentをテキストファイルと音声ファイルに書き込む
# 引数:inputText
# 書き込みファイル:input.txt、output.wav
# ************************************************
def creat_WAV(text):
text = remove_custom_emoji(text) # 絵文字IDは読み上げない
text = exclude_url(text) # URLなら省略
open_jtalk = ['jtalk/bin/open_jtalk']
mech = ['-x', 'jtalk/dic']
htsvoice = ['-m', 'jtalk/mei/mei_normal.htsvoice']
speed = ['-r', '1.0']
outwav = ['-ow', 'output.wav']
cmd = open_jtalk + mech + htsvoice + speed + outwav
c = subprocess.Popen(' '.join(cmd), stdin=subprocess.PIPE, shell=True)
c.stdin.write(text.encode())
c.stdin.close()
c.wait()
if __name__ == '__main__':
creat_WAV('hello world')
|
the-stack_0_15855 | '''
The Pool.map and Pool.apply will lock the main program until all a process is finished, which is quite useful if we want to obtain resuls in a particular order for certain applications.
In contrast, the async variants will submit all processes at once and retrieve the results as soon as they are finished. One more difference is that we need to use the get method after the apply_async() call in order to obtain the return values of the finished processes.
'''
from multiprocessing import Pool, current_process, cpu_count
def f(x):
return x * 2
def start_process():
print('Starting', current_process().name)
if __name__ == '__main__':
inputs = list(range(10))
print('Input :', inputs)
builtin_outputs = list(map(f, inputs))
print('Built-in:', builtin_outputs)
#number of threads to use
pool_size = cpu_count()
print("Number of processes:", pool_size)
pool = Pool(processes=pool_size, initializer=start_process)
#using pool.map()
pool_outputs = pool.map(f, inputs)
#using pool.apply()
#pool_outputs = [pool.apply(f, args=(x,)) for x in inputs]
#using pool.apply_async()
#pool_outputs = [pool.apply_async(f, args=(x,)) for x in inputs]
#need to use p.get()
#pool_outputs = [p.get() for p in pool_outputs]
pool.close() # no more tasks
pool.join() # wrap up current tasks
print('Pool:', pool_outputs)
|
the-stack_0_15857 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class dad(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /ipv6/ipv6-global-cmds/nd-global/dad. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__global_dad_time',)
_yang_name = 'dad'
_rest_name = 'dad'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__global_dad_time = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..5']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="global-dad-time", rest_name="time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'retransmit time interval for Neighbor solicitations, sent as part of duplicate address detection', u'alt-name': u'time'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='common-def:time-interval-sec', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'ipv6', u'ipv6-global-cmds', u'nd-global', u'dad']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'ipv6', u'nd', u'dad']
def _get_global_dad_time(self):
"""
Getter method for global_dad_time, mapped from YANG variable /ipv6/ipv6_global_cmds/nd_global/dad/global_dad_time (common-def:time-interval-sec)
"""
return self.__global_dad_time
def _set_global_dad_time(self, v, load=False):
"""
Setter method for global_dad_time, mapped from YANG variable /ipv6/ipv6_global_cmds/nd_global/dad/global_dad_time (common-def:time-interval-sec)
If this variable is read-only (config: false) in the
source YANG file, then _set_global_dad_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_global_dad_time() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..5']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="global-dad-time", rest_name="time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'retransmit time interval for Neighbor solicitations, sent as part of duplicate address detection', u'alt-name': u'time'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='common-def:time-interval-sec', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """global_dad_time must be of a type compatible with common-def:time-interval-sec""",
'defined-type': "common-def:time-interval-sec",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..5']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="global-dad-time", rest_name="time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'retransmit time interval for Neighbor solicitations, sent as part of duplicate address detection', u'alt-name': u'time'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='common-def:time-interval-sec', is_config=True)""",
})
self.__global_dad_time = t
if hasattr(self, '_set'):
self._set()
def _unset_global_dad_time(self):
self.__global_dad_time = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..5']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="global-dad-time", rest_name="time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'retransmit time interval for Neighbor solicitations, sent as part of duplicate address detection', u'alt-name': u'time'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='common-def:time-interval-sec', is_config=True)
global_dad_time = __builtin__.property(_get_global_dad_time, _set_global_dad_time)
_pyangbind_elements = {'global_dad_time': global_dad_time, }
|
the-stack_0_15861 | import numpy as np
import cv2 as cv
'''
Based on GeeksForGeeks article with an opticalFLow demonstration
'''
def opticalFlow(img1_path,img2_path,blur=False,maxValue=True):
'''
Optical flow between two images
inputs:
two consecutive images,
blur: Boolean - adds blurring to the image for filtering out small particles
maxValue: Boolean - crates vector by means of the maximum value within the pellet bounding box. Average vector otherwise.
'''
framet1 = cv.imread(img1_path)
framet2 = cv.imread(img2_path)
if(blur):
framet1 = cv.GaussianBlur(framet1, (3, 31), 0)
framet2 = cv.GaussianBlur(framet2, (3, 31), 0)
grayt1 = cv.cvtColor(framet1, cv.COLOR_BGR2GRAY)
grayt2 = cv.cvtColor(framet2, cv.COLOR_BGR2GRAY)
flow = cv.calcOpticalFlowFarneback(grayt1, grayt2,
None,
0.5, 3, 15, 3, 5, 1.2, 0)
magnitude, angle = cv.cartToPolar(flow[..., 0], flow[..., 1])
mask = np.zeros_like(framet1)
mask[..., 1] = 255
# Sets image hue according to the optical flow
# direction
mask[..., 0] = angle * 180 / np.pi / 2
# Sets image value according to the optical flow
# magnitude (normalized)
mask[..., 2] = cv.normalize(magnitude, None, 0, 255, cv.NORM_MINMAX)
# Converts HSV to RGB (BGR) color representation
rgb = cv.cvtColor(mask, cv.COLOR_HSV2BGR)
x0 = 973
x1 = 1021
y0 = 300
y1 = 350
drawBbox(framet1,x0,x1,y0,y1)
if(maxValue):
d = (np.max(flow[973:1021,300:350][0][:,0],axis=0),np.max(flow[973:1021,300:350][0][:,1],axis=0))
else:
d = np.average(flow[973:1021,300:350][0],axis=0)
centerPoint = ((1000,325))
endPoint = (int(centerPoint[0]+d[0]),int(centerPoint[1]+d[1]))
framet1 = framet1+rgb
cv.arrowedLine(framet1,centerPoint,endPoint,(0,0,255) ,2)
displayImage(framet1)
return flow
def displayImage(frame,description="input, press 0 to exit"):
cv.imshow(description, frame)
cv.waitKey(0)
def drawBbox(frame,bbox, color = 100, linewidth=3):
cv.line(frame, (bbox.x0,bbox.y0), (bbox.x0,bbox.y1), color, linewidth)
cv.line(frame, (bbox.x0,bbox.y1), (bbox.x1,bbox.y1), color, linewidth)
cv.line(frame, (bbox.x1,bbox.y0), (bbox.x1,bbox.y1), color, linewidth)
cv.line(frame, (bbox.x0,bbox.y0), (bbox.x1,bbox.y0), color, linewidth)
return frame
def drawVectfromBbox(frame,startPoint,endPoint):
cv.arrowedLine(frame,startPoint,endPoint,(0,0,255) ,2)
return frame
def euclidianDistance(p1,p2):
return np.sqrt((int(p1[0])-int(p2[0]))**2+(int(p1[1])-int(p2[1]))**2)
#opticalFlow('img0371.jpg','img0372.jpg', blur = False) |
the-stack_0_15864 | """Utilities for processing test outputs."""
import pathlib
import re
import subprocess
from pathlib import Path
from typing import Iterator, Tuple
def split_string(
string: str, sub_length: int = 40, copy: bool = False
) -> Tuple[str, ...]:
"""Split a string into subsections less than or equal to new length.
Args:
string (str): The long string to split up.
sub_length (int): The maximum length of the subsections.
Defaults to 56.
copy (bool): Copy output to clipboard.
Returns:
Tuple[str]: The string split into sections.
"""
string_length = len(string)
split = tuple(
string[begin : begin + sub_length]
for begin in range(0, string_length, sub_length)
)
if copy is True:
string = str(split)
copy_string(string)
return split
def copy_string(string: str) -> None:
"""Copies the string to clipboard.
Uses pbcopy, so for now only works with macOS.
"""
subprocess.run("/usr/bin/pbcopy", text=True, input=string) # noqa: S603
def write_output(string: str, test_name: str, replace_links: bool = True) -> None:
"""Write the output to the expected output's file."""
if replace_links:
tempfile_link_pattern = re.compile(
r"(?P<prefix>file://)"
r"(?P<core_tempfile_link>[\w\s/\\]*)"
r"(?P<suffix>\d+\.\w+)"
)
string = tempfile_link_pattern.sub(
lambda match: f"{match.group('prefix')}"
"{{ tempfile_path }}"
f"{match.group('suffix')}",
string=string,
)
output_directory = pathlib.Path(__file__).parent.parent / pathlib.Path(
"unit", "expected_outputs"
)
expected_output_file = output_directory / pathlib.Path(test_name).with_suffix(
".txt"
)
expected_output_file.write_text(string)
def _get_all_expected_output_paths() -> Iterator[Path]:
"""Get the paths of all the expected output files."""
file_dir = pathlib.Path(__file__).parent
expected_outputs = (
file_dir.parent / pathlib.Path("unit", "expected_outputs")
).glob("**/*.txt")
yield from expected_outputs
def replace_expected_section(old: str, new: str) -> None:
"""Replace all occurrences of a section in the expected output."""
expected_output_paths = _get_all_expected_output_paths()
for expected_output_path in expected_output_paths:
old_text = expected_output_path.read_text()
new_text = old_text.replace(old, new)
expected_output_path.write_text(new_text)
|
the-stack_0_15866 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import int_or_none
class TumblrIE(InfoExtractor):
_VALID_URL = r'https?://(?P<blog_name>[^/?#&]+)\.tumblr\.com/(?:post|video)/(?P<id>[0-9]+)(?:$|[/?#])'
_TESTS = [{
'url': 'http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes',
'md5': '479bb068e5b16462f5176a6828829767',
'info_dict': {
'id': '54196191430',
'ext': 'mp4',
'title': 'tatiana maslany news, Orphan Black || DVD extra - behind the scenes ↳...',
'description': 'md5:37db8211e40b50c7c44e95da14f630b7',
'thumbnail': r're:http://.*\.jpg',
}
}, {
'url': 'http://5sostrum.tumblr.com/post/90208453769/yall-forgetting-the-greatest-keek-of-them-all',
'md5': 'bf348ef8c0ef84fbf1cbd6fa6e000359',
'info_dict': {
'id': '90208453769',
'ext': 'mp4',
'title': '5SOS STRUM ;]',
'description': 'md5:dba62ac8639482759c8eb10ce474586a',
'thumbnail': r're:http://.*\.jpg',
}
}, {
'url': 'http://hdvideotest.tumblr.com/post/130323439814/test-description-for-my-hd-video',
'md5': '7ae503065ad150122dc3089f8cf1546c',
'info_dict': {
'id': '130323439814',
'ext': 'mp4',
'title': 'HD Video Testing \u2014 Test description for my HD video',
'description': 'md5:97cc3ab5fcd27ee4af6356701541319c',
'thumbnail': r're:http://.*\.jpg',
},
'params': {
'format': 'hd',
},
}, {
'url': 'http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching',
'md5': 'de07e5211d60d4f3a2c3df757ea9f6ab',
'info_dict': {
'id': 'Wmur',
'ext': 'mp4',
'title': 'naked smoking & stretching',
'upload_date': '20150506',
'timestamp': 1430931613,
'age_limit': 18,
'uploader_id': '1638622',
'uploader': 'naked-yogi',
},
'add_ie': ['Vidme'],
}, {
'url': 'http://camdamage.tumblr.com/post/98846056295/',
'md5': 'a9e0c8371ea1ca306d6554e3fecf50b6',
'info_dict': {
'id': '105463834',
'ext': 'mp4',
'title': 'Cam Damage-HD 720p',
'uploader': 'John Moyer',
'uploader_id': 'user32021558',
},
'add_ie': ['Vimeo'],
}, {
'url': 'http://sutiblr.tumblr.com/post/139638707273',
'md5': '2dd184b3669e049ba40563a7d423f95c',
'info_dict': {
'id': 'ir7qBEIKqvq',
'ext': 'mp4',
'title': 'Vine by sutiblr',
'alt_title': 'Vine by sutiblr',
'uploader': 'sutiblr',
'uploader_id': '1198993975374495744',
'upload_date': '20160220',
'like_count': int,
'comment_count': int,
'repost_count': int,
},
'add_ie': ['Vine'],
}, {
'url': 'http://vitasidorkina.tumblr.com/post/134652425014/joskriver-victoriassecret-invisibility-or',
'md5': '01c12ceb82cbf6b2fe0703aa56b3ad72',
'info_dict': {
'id': '-7LnUPGlSo',
'ext': 'mp4',
'title': 'Video by victoriassecret',
'description': 'Invisibility or flight…which superpower would YOU choose? #VSFashionShow #ThisOrThat',
'uploader_id': 'victoriassecret',
'thumbnail': r're:^https?://.*\.jpg'
},
'add_ie': ['Instagram'],
}]
def _real_extract(self, url):
m_url = re.match(self._VALID_URL, url)
video_id = m_url.group('id')
blog = m_url.group('blog_name')
url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id)
webpage, urlh = self._download_webpage_handle(url, video_id)
iframe_url = self._search_regex(
r'src=\'(https?://www\.tumblr\.com/video/[^\']+)\'',
webpage, 'iframe url', default=None)
if iframe_url is None:
return self.url_result(urlh.geturl(), 'Generic')
iframe = self._download_webpage(iframe_url, video_id, 'Downloading iframe page')
duration = None
sources = []
sd_url = self._search_regex(
r'<source[^>]+src=(["\'])(?P<url>.+?)\1', iframe,
'sd video url', default=None, group='url')
if sd_url:
sources.append((sd_url, 'sd'))
options = self._parse_json(
self._search_regex(
r'data-crt-options=(["\'])(?P<options>.+?)\1', iframe,
'hd video url', default='', group='options'),
video_id, fatal=False)
if options:
duration = int_or_none(options.get('duration'))
hd_url = options.get('hdUrl')
if hd_url:
sources.append((hd_url, 'hd'))
formats = [{
'url': video_url,
'ext': 'mp4',
'format_id': format_id,
'height': int_or_none(self._search_regex(
r'/(\d{3,4})$', video_url, 'height', default=None)),
'quality': quality,
} for quality, (video_url, format_id) in enumerate(sources)]
self._sort_formats(formats)
# The only place where you can get a title, it's not complete,
# but searching in other places doesn't work for all videos
video_title = self._html_search_regex(
r'(?s)<title>(?P<title>.*?)(?: \| Tumblr)?</title>',
webpage, 'title')
return {
'id': video_id,
'title': video_title,
'description': self._og_search_description(webpage, default=None),
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'duration': duration,
'formats': formats,
}
|
the-stack_0_15867 | import re
import threading
from typing import Any
from antlr4 import CommonTokenStream, InputStream, ParserRuleContext
from antlr4.error.ErrorListener import ErrorListener
from .errors import GrammarParseError
# Import from visitor in order to check the presence of generated grammar files
# files in a single place.
from .grammar_visitor import ( # type: ignore
OmegaConfGrammarLexer,
OmegaConfGrammarParser,
)
# Used to cache grammar objects to avoid re-creating them on each call to `parse()`.
# We use a per-thread cache to make it thread-safe.
_grammar_cache = threading.local()
# Build regex pattern to efficiently identify typical interpolations.
# See test `test_match_simple_interpolation_pattern` for examples.
_config_key = r"[$\w]+" # foo, $0, $bar, $foo_$bar123$
_key_maybe_brackets = f"{_config_key}|\\[{_config_key}\\]" # foo, [foo], [$bar]
_node_access = f"\\.{_key_maybe_brackets}" # .foo, [foo], [$bar]
_node_path = f"(\\.)*({_key_maybe_brackets})({_node_access})*" # [foo].bar, .foo[bar]
_node_inter = f"\\${{\\s*{_node_path}\\s*}}" # node interpolation ${foo.bar}
_id = "[a-zA-Z_]\\w*" # foo, foo_bar, abc123
_resolver_name = f"({_id}(\\.{_id})*)?" # foo, ns.bar3, ns_1.ns_2.b0z
_arg = "[a-zA-Z_0-9/\\-\\+.$%*@?|]+" # string representing a resolver argument
_args = f"{_arg}(\\s*,\\s*{_arg})*" # list of resolver arguments
_resolver_inter = f"\\${{\\s*{_resolver_name}\\s*:\\s*{_args}?\\s*}}" # ${foo:bar}
_inter = f"({_node_inter}|{_resolver_inter})" # any kind of interpolation
_outer = "([^$]|\\$(?!{))+" # any character except $ (unless not followed by {)
SIMPLE_INTERPOLATION_PATTERN = re.compile(
f"({_outer})?({_inter}({_outer})?)+$", flags=re.ASCII
)
# NOTE: SIMPLE_INTERPOLATION_PATTERN must not generate false positive matches:
# it must not accept anything that isn't a valid interpolation (per the
# interpolation grammar defined in `omegaconf/grammar/*.g4`).
class OmegaConfErrorListener(ErrorListener): # type: ignore
def syntaxError(
self,
recognizer: Any,
offending_symbol: Any,
line: Any,
column: Any,
msg: Any,
e: Any,
) -> None:
raise GrammarParseError(str(e) if msg is None else msg) from e
def reportAmbiguity(
self,
recognizer: Any,
dfa: Any,
startIndex: Any,
stopIndex: Any,
exact: Any,
ambigAlts: Any,
configs: Any,
) -> None:
raise GrammarParseError("ANTLR error: Ambiguity") # pragma: no cover
def reportAttemptingFullContext(
self,
recognizer: Any,
dfa: Any,
startIndex: Any,
stopIndex: Any,
conflictingAlts: Any,
configs: Any,
) -> None:
# Note: for now we raise an error to be safe. However this is mostly a
# performance warning, so in the future this may be relaxed if we need
# to change the grammar in such a way that this warning cannot be
# avoided (another option would be to switch to SLL parsing mode).
raise GrammarParseError(
"ANTLR error: Attempting Full Context"
) # pragma: no cover
def reportContextSensitivity(
self,
recognizer: Any,
dfa: Any,
startIndex: Any,
stopIndex: Any,
prediction: Any,
configs: Any,
) -> None:
raise GrammarParseError("ANTLR error: ContextSensitivity") # pragma: no cover
def parse(
value: str, parser_rule: str = "configValue", lexer_mode: str = "DEFAULT_MODE"
) -> ParserRuleContext:
"""
Parse interpolated string `value` (and return the parse tree).
"""
l_mode = getattr(OmegaConfGrammarLexer, lexer_mode)
istream = InputStream(value)
cached = getattr(_grammar_cache, "data", None)
if cached is None:
error_listener = OmegaConfErrorListener()
lexer = OmegaConfGrammarLexer(istream)
lexer.removeErrorListeners()
lexer.addErrorListener(error_listener)
lexer.mode(l_mode)
token_stream = CommonTokenStream(lexer)
parser = OmegaConfGrammarParser(token_stream)
parser.removeErrorListeners()
parser.addErrorListener(error_listener)
# The two lines below could be enabled in the future if we decide to switch
# to SLL prediction mode. Warning though, it has not been fully tested yet!
# from antlr4 import PredictionMode
# parser._interp.predictionMode = PredictionMode.SLL
# Note that although the input stream `istream` is implicitly cached within
# the lexer, it will be replaced by a new input next time the lexer is re-used.
_grammar_cache.data = lexer, token_stream, parser
else:
lexer, token_stream, parser = cached
# Replace the old input stream with the new one.
lexer.inputStream = istream
# Initialize the lexer / token stream / parser to process the new input.
lexer.mode(l_mode)
token_stream.setTokenSource(lexer)
parser.reset()
try:
return getattr(parser, parser_rule)()
except Exception as exc:
if type(exc) is Exception and str(exc) == "Empty Stack":
# This exception is raised by antlr when trying to pop a mode while
# no mode has been pushed. We convert it into an `GrammarParseError`
# to facilitate exception handling from the caller.
raise GrammarParseError("Empty Stack")
else:
raise
|
the-stack_0_15868 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import pytest
# test.BACKEND_NAME is a configuration variable determining which
# nGraph backend tests will use. It's set during pytest configuration time.
# See `pytest_configure` hook in `conftest.py` for more details.
BACKEND_NAME = None
# test.MODEL_ZOO_DIR is a configuration variable providing the path
# to the ZOO of ONNX models to test. It's set during pytest configuration time.
# See `pytest_configure` hook in `conftest.py` for more
# details.
MODEL_ZOO_DIR = None
# test.MODEL_ZOO_XFAIL is a configuration variable which enable xfails for model zoo.
MODEL_ZOO_XFAIL = False
def xfail_test(reason="Mark the test as expected to fail", strict=True):
return pytest.mark.xfail(reason=reason, strict=strict)
skip_segfault = pytest.mark.skip(reason="Segmentation fault error")
xfail_issue_33488 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"MaxUnpool")
xfail_issue_33512 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"Einsum")
xfail_issue_33535 = xfail_test(reason="nGraph does not support the following ONNX operations:"
"DynamicQuantizeLinear")
xfail_issue_33538 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"Scan")
skip_issue_38084 = pytest.mark.skip(reason="Aborted (core dumped) Assertion "
"`(layer->get_output_partial_shape(i).is_static())' failed.")
xfail_issue_33589 = xfail_test(reason="nGraph does not support the following ONNX operations:"
"IsNaN and isInf")
xfail_issue_33595 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"Unique")
xfail_issue_33596 = xfail_test(reason="RuntimeError: nGraph does not support different sequence operations:"
"ConcatFromSequence, SequenceConstruct, SequenceAt, SplitToSequence,"
"SequenceEmpty, SequenceInsert, SequenceErase, SequenceLength ")
xfail_issue_33606 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"Det")
xfail_issue_33651 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"TfIdfVectorizer")
xfail_issue_33581 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"GatherElements")
xfail_issue_33633 = xfail_test(reason="MaxPool: dilations unsupported")
xfail_issue_35911 = xfail_test(reason="Assertion error: Pad model mismatch error")
xfail_issue_35912 = xfail_test(reason="RuntimeError: Error of validate layer: B with type: "
"Pad. Cannot parse parameter pads_end from IR for layer B. "
"Value -1,0 cannot be casted to int.")
xfail_issue_35923 = xfail_test(reason="RuntimeError: PReLU without weights is not supported")
xfail_issue_35925 = xfail_test(reason="Assertion error - reduction ops results mismatch")
xfail_issue_35927 = xfail_test(reason="RuntimeError: B has zero dimension that is not allowable")
xfail_issue_36480 = xfail_test(reason="RuntimeError: [NOT_FOUND] Unsupported property dummy_option "
"by CPU plugin")
xfail_issue_36485 = xfail_test(reason="RuntimeError: Check 'm_group >= 1' failed at "
"/openvino/ngraph/core/src/op/shuffle_channels.cpp:77:")
xfail_issue_36486 = xfail_test(reason="RuntimeError: HardSigmoid operation should be converted "
"to HardSigmoid_IE")
xfail_issue_36487 = xfail_test(reason="Assertion error - mvn operator computation mismatch")
xfail_issue_38084 = xfail_test(reason="RuntimeError: AssertionFailed: layer->get_output_partial_shape(i)"
"is_static() nGraph <value> operation with name: <value> cannot be"
"converted to <value> layer with name: <value> because output"
"with index 0 contains dynamic shapes: {<value>}. Try to use "
"CNNNetwork::reshape() method in order to specialize shapes "
"before the conversion.")
xfail_issue_38091 = xfail_test(reason="AssertionError: Mismatched elements")
xfail_issue_38699 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"ai.onnx.preview.training.Gradient")
xfail_issue_38701 = xfail_test(reason="RuntimeError: unsupported element type: STRING")
xfail_issue_38706 = xfail_test(reason="RuntimeError: output_3.0 has zero dimension which is not allowed")
xfail_issue_38708 = xfail_test(reason="RuntimeError: While validating ONNX node '<Node(Slice): y>': "
"Axes input must be constant")
xfail_issue_38710 = xfail_test(reason="RuntimeError: roi has zero dimension which is not allowed")
xfail_issue_38713 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"ai.onnx.preview.training.Momentum")
xfail_issue_43742 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"If")
xfail_issue_45457 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v5::Loop"
"Not constant termination condition body output is not supported")
xfail_issue_38722 = xfail_test(reason="RuntimeError: While validating ONNX nodes MatMulInteger"
"and QLinearMatMul"
"Input0 scale and input0 zero point shape must be same and 1")
xfail_issue_38723 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"QLinearConv")
xfail_issue_38724 = xfail_test(reason="RuntimeError: While validating ONNX node '<Node(Resize): Y>':"
"tf_crop_and_resize - this type of coordinate transformation mode"
"is not supported. Choose one of the following modes:"
"tf_half_pixel_for_nn, asymmetric, align_corners, pytorch_half_pixel,"
"half_pixel")
xfail_issue_38725 = xfail_test(reason="RuntimeError: While validating ONNX node '<Node(Loop):"
"value info has no element type specified")
xfail_issue_38726 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"LessOrEqual")
xfail_issue_38732 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"ConvInteger")
xfail_issue_38734 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"ai.onnx.preview.training.Adam")
xfail_issue_38735 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"ai.onnx.preview.training.Adagrad")
xfail_issue_48052 = xfail_test(reason="Dropout op is not supported in traning mode")
xfail_issue_45180 = xfail_test(reason="RuntimeError: Unsupported dynamic op: ReduceSum")
xfail_issue_44839 = xfail_test(reason="Huge computation missmatch")
xfail_issue_44848 = xfail_test(reason="E Unsupported dynamic op: Range")
xfail_issue_44851 = xfail_test(reason="E Unsupported dynamic op: Broadcast")
xfail_issue_44854 = xfail_test(reason="E Unsupported dynamic op: VariadicSplit")
xfail_issue_44858 = xfail_test(reason="E Unsupported dynamic op: Unsqueeze")
xfail_issue_44956 = xfail_test(reason="E Unsupported dynamic op: Loop")
xfail_issue_44957 = xfail_test(reason="E Unsupported dynamic op: NonZero")
xfail_issue_44958 = xfail_test(reason="E Unsupported dynamic op: Interpolate")
xfail_issue_44965 = xfail_test(reason="E RuntimeError: value info has no element")
xfail_issue_44968 = xfail_test(reason="E Unsupported dynamic op: Squeeze")
xfail_issue_44970 = xfail_test(reason="Assertion error")
xfail_issue_44976 = xfail_test(reason="E RuntimeError: Quantize layer with name:"
"FakeQuantize_xxx has non const input on 1 port")
xfail_issue_46762 = xfail_test(reason="Incorrect result of Minimum op if uint data type is used")
xfail_issue_46765 = xfail_test(reason="select_last_index attribute is not supported by ArgMin and ArgMax")
xfail_issue_47323 = xfail_test(reason="RuntimeError: The plugin does not support FP64")
xfail_issue_47337 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v1::OneHot")
xfail_issue_33593 = xfail_test(reason="Current implementation of MaxPool doesn't support indices output")
# Model MSFT issues:
xfail_issue_37957 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"com.microsoft.CropAndResize, com.microsoft.GatherND,"
"com.microsoft.Pad, com.microsoft.Range")
xfail_issue_39669 = xfail_test(reason="AssertionError: This model has no test data")
xfail_issue_40686 = xfail_test(reason="NANs as results")
xfail_issue_36534 = xfail_test(reason="RuntimeError: node input index is out of range")
xfail_issue_36536 = xfail_test(reason="RuntimeError: can't protect")
xfail_issue_36538 = xfail_test(reason="RuntimeError: Check 'PartialShape::broadcast_merge_into( pshape, "
"node->get_input_partial_shape(i), autob)' failed at "
"/openvino/ngraph/src/ngraph/op/util/elementwise_args.cpp:48:")
xfail_issue_39656 = xfail_test(reason="RuntimeError: Reshape reshaped has dynamic second input!")
xfail_issue_39658 = xfail_test(reason="RuntimeError: Tile operation has a form that is not supported."
" z should be converted to TileIE operation.")
xfail_issue_39659 = xfail_test(reason="RuntimeError: Broadcast operation has a form that is not supported."
" y should be converted to Tile operation.")
xfail_issue_45344 = xfail_test(reason="Unsupported dynamic ops: v3::NonMaxSuppressionIE3")
xfail_issue_39662 = xfail_test(reason="RuntimeError: 'ScatterElementsUpdate' layer with name 'y' have "
"indices value that points to non-existing output tensor element")
xfail_issue_37973 = xfail_test(reason="TF Inception V2 - AssertionError: zoo models results mismatch")
xfail_issue_47430 = xfail_test(reason="FCN ResNet models - AssertionError: zoo models results mismatch")
xfail_issue_47495 = xfail_test(reason="BertSquad-10 from MSFT - AssertionError: zoo models results mismatch")
xfail_issue_49207 = xfail_test(reason="Function references undeclared parameters")
xfail_issue_48145 = xfail_test(reason="BertSquad-8 - AssertionError: Items are not equal: ACTUAL: 4 "
"DESIRED: 3")
xfail_issue_48190 = xfail_test(reason="RobertaBase-11 - AssertionError: Items are not equal: "
"ACTUAL: dtype('float64') DESIRED: dtype('float32')")
xfail_issue_49750 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v4::Interpolate")
xfail_issue_49752 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v1::Pad")
xfail_issue_49753 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v1::StridedSlice")
xfail_issue_49754 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v1::TopKIE")
xfail_issue_52463 = xfail_test(reason="test_operator_add_size1_singleton_broadcast_cpu - "
"Not equal to tolerance")
xfail_issue_49391 = xfail_test(reason="Roll is not implemented in CPU plugin.")
|
the-stack_0_15869 | #########################################################################
## This file is part of the alpha-beta-CROWN verifier ##
## ##
## Copyright (C) 2021, Huan Zhang <[email protected]> ##
## Kaidi Xu <[email protected]> ##
## Shiqi Wang <[email protected]> ##
## Zhouxing Shi <[email protected]> ##
## Yihan Wang <[email protected]> ##
## ##
## This program is licenced under the BSD 3-Clause License, ##
## contained in the LICENCE file in this directory. ##
## ##
#########################################################################
"""alpha-beta-CROWN verifier interface to handle robustness verification."""
import os
import re
import socket
import random
import time
import gc
from utils import get_test_acc, load_model, load_verification_dataset
import numpy as np
import pandas as pd
import torch
import arguments
from auto_LiRPA import BoundedModule, BoundedTensor
from auto_LiRPA.perturbations import PerturbationLpNorm
from bab_verification_general import mip, incomplete_verifier, bab
from attack_pgd import pgd_attack
from utils import Normalization, get_pgd_acc
def config_args():
# Add arguments specific for this front-end.
h = ["general"]
arguments.Config.add_argument("--mode", type=str, default="verified-acc", choices=["verified-acc", "runnerup", "clean-acc", "crown-only-verified-acc", "alpha-crown-only-verified-acc", "ibp-only-verified-acc", "attack-only", "specify-target"],
help='Verify against all labels ("verified-acc" mode), or just the runnerup labels ("runnerup" mode), or using a specified label in dataset ("speicify-target" mode, only used for oval20). Mode can also be set as "crown-only-verified-acc" or "alpha-crown-only-verified-acc", which quickly computes the verified accuracy over the entire dataset via CROWN or alpha-CROWN.', hierarchy=h + ["mode"])
arguments.Config.add_argument('--complete_verifier', choices=["bab", "mip", "bab-refine", "skip"], default="bab",
help='Complete verification verifier. "bab": branch and bound with beta-CROWN; "mip": mixed integer programming (MIP) formulation; "bab-refine": branch and bound with intermediate layer bounds computed by MIP.', hierarchy=h + ["complete_verifier"])
arguments.Config.add_argument('--no_incomplete', action='store_false', dest='incomplete',
help='Enable/Disable initial alpha-CROWN incomplete verification (this can save GPU memory when disabled).', hierarchy=h + ["enable_incomplete_verification"])
arguments.Config.add_argument("--crown", action='store_true', help='Compute CROWN verified accuracy before verification (not used).', hierarchy=h + ["get_crown_verified_acc"])
h = ["model"]
arguments.Config.add_argument("--model", type=str, default="please_specify_model_name", help='Name of model. Model must be defined in the load_verification_dataset() function in utils.py.', hierarchy=h + ["name"])
h = ["data"]
arguments.Config.add_argument("--dataset", type=str, default="CIFAR", choices=["MNIST", "CIFAR", "CIFAR_SDP_FULL", "CIFAR_RESNET", "CIFAR_SAMPLE", "MNIST_SAMPLE", "CIFAR_ERAN", "MNIST_ERAN",
"MNIST_ERAN_UN", "MNIST_SDP", "MNIST_MADRY_UN", "CIFAR_SDP", "CIFAR_UN"], help='Dataset name. Dataset must be defined in utils.py.', hierarchy=h + ["dataset"])
arguments.Config.add_argument("--filter_path", type=str, default=None, help='A filter in pkl format contains examples that will be skipped (not used).', hierarchy=h + ["data_filter_path"])
arguments.Config.add_argument("--data_idx_file", type=str, default=None, help='A text file with a list of example IDs to run.', hierarchy=h + ["data_idx_file"])
h = ["attack"]
arguments.Config.add_argument("--mip_attack", action='store_true', help='Use MIP (Gurobi) based attack if PGD cannot find a successful adversarial example.', hierarchy=h + ["enable_mip_attack"])
arguments.Config.add_argument('--pgd_steps', type=int, default=100, help="Steps of PGD attack.", hierarchy=h + ["pgd_steps"])
arguments.Config.add_argument('--pgd_restarts', type=int, default=30, help="Number of random PGD restarts.", hierarchy= h + ["pgd_restarts"])
arguments.Config.add_argument('--no_pgd_early_stop', action='store_false', dest='pgd_early_stop', help="Early stop PGD when an adversarial example is found.", hierarchy=h + ["pgd_early_stop"])
arguments.Config.add_argument('--pgd_lr_decay', type=float, default=0.99, help='Learning rate decay factor used in PGD attack.', hierarchy= h + ["pgd_lr_decay"])
arguments.Config.add_argument('--pgd_alpha', type=str, default="auto", help='Step size of PGD attack. Default (auto) is epsilon/4.', hierarchy=h + ["pgd_alpha"])
h = ["debug"]
arguments.Config.add_argument("--lp_test", type=str, default=None, choices=["MIP", "LP", "LP_intermediate_refine", "MIP_intermediate_refine", None], help='Debugging option, do not use.', hierarchy=h + ['lp_test'])
arguments.Config.parse_config()
def get_statistics(model, image, true_label, eps, data_min, data_max, batch_size, method="CROWN"):
"""For quickly checking clean accuracy and CROWN verified accuracy."""
assert method == "CROWN" or method == "alpha-CROWN" or method == "IBP"
# Clearn accuracy
predicted = model(image)
n_correct = (predicted.argmax(dim=1) == true_label).sum().item()
print(f'{n_correct} examples are correct, image range ({image.min()}, {image.max()})')
# CROWN verified accuracy
verified = 0
N = image.size(0)
num_outputs = arguments.Config["data"]["num_outputs"]
norm = np.inf
assert norm == arguments.Config["specification"]["norm"] # TODO: make this function support more norms.
model = BoundedModule(model, torch.empty_like(image[:batch_size]), device=arguments.Config["general"]["device"])
if method == 'alpha-CROWN':
# Set alpha-CROWN optimization parameters.
lr_alpha = arguments.Config["solver"]["alpha-crown"]["lr_alpha"]
iteration = arguments.Config["solver"]["alpha-crown"]["iteration"]
share_slopes = arguments.Config["solver"]["alpha-crown"]["share_slopes"]
optimizer = arguments.Config["solver"]["beta-crown"]["optimizer"]
lr_decay = arguments.Config["solver"]["beta-crown"]["lr_decay"]
model.set_bound_opts({'optimize_bound_args': {'ob_iteration': iteration, 'ob_beta': False, 'ob_alpha': True,
'ob_alpha_share_slopes': share_slopes, 'ob_optimizer': optimizer,
'ob_lr': lr_alpha, 'ob_lr_decay': lr_decay}})
batch_idx = 0
all_start_time = time.time()
while batch_idx * batch_size < N:
start_time = time.time()
start_idx, end_idx = batch_idx*batch_size, min(batch_idx*batch_size+batch_size, N)
data, labels = image[start_idx:end_idx], torch.tensor(true_label[start_idx:end_idx])
if arguments.Config["specification"]["type"] == "lp":
# Linf norm only so far.
data_ub = torch.min(data + eps, data_max)
data_lb = torch.max(data - eps, data_min)
else:
# Per-example, per-element lower and upper bounds.
data_ub = data_max[start_idx:end_idx]
data_lb = data_min[start_idx:end_idx]
data, data_lb, data_ub, labels = data.cuda(), data_lb.cuda(), data_ub.cuda(), labels.cuda()
ptb = PerturbationLpNorm(norm=norm, eps=None, x_L=data_lb, x_U=data_ub)
data = BoundedTensor(data, ptb)
# labels = torch.argmax(pred, dim=1).cpu().detach().numpy()
c = torch.eye(num_outputs).type_as(data)[labels].unsqueeze(1) - torch.eye(num_outputs).type_as(data).unsqueeze(0)
I = (~(labels.data.unsqueeze(1) == torch.arange(num_outputs).type_as(labels.data).unsqueeze(0)))
c = (c[I].view(data.size(0), num_outputs - 1, num_outputs)).cuda()
if method == "CROWN" or method == "IBP":
with torch.no_grad():
lb, ub = model.compute_bounds(x=(data,), method=method, C=c, bound_upper=False)
else:
# alpha-CROWN requires gradient.
lb, ub = model.compute_bounds(x=(data,), method="CROWN-optimized", C=c, bound_upper=False)
verified += (lb.min(1)[0]>=0).sum().item()
# Print some bounds for the first batch for debugging.
duration = time.time() - start_time
if batch_idx == 0:
print("Bounds for first a few examples:")
print(lb[:10].detach().cpu().numpy())
print(f"batch: {batch_idx}, verified acc: {(lb.min(1)[0]>=0).sum().item()} / {data.size(0)}, time {duration}")
del lb, ub
batch_idx += 1
full_duration = time.time() - all_start_time
print(f"{method} verified acc: {verified/N * 100}%, {verified} verified, time {full_duration}")
def main():
print(f'Experiments at {time.ctime()} on {socket.gethostname()}')
torch.manual_seed(arguments.Config["general"]["seed"])
random.seed(arguments.Config["general"]["seed"])
np.random.seed(arguments.Config["general"]["seed"])
if arguments.Config["general"]["device"] != 'cpu':
torch.cuda.manual_seed_all(arguments.Config["general"]["seed"])
# Always disable TF32 (precision is too low for verification).
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
if arguments.Config["general"]["deterministic"]:
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
torch.use_deterministic_algorithms(True)
if arguments.Config["general"]["double_fp"]:
torch.set_default_dtype(torch.float64)
if arguments.Config["attack"]["pgd_order"] != "skip":
if arguments.Config["specification"]["type"] == "lp" and arguments.Config["specification"]["norm"] != np.inf:
print('Only Linf-norm attack is supported, the pgd_order will be changed to skip')
arguments.Config["attack"]["pgd_order"] = "skip"
model_ori = load_model(weights_loaded=True)
if arguments.Config["specification"]["epsilon"] is not None:
perturb_epsilon = torch.tensor(arguments.Config["specification"]["epsilon"], dtype=torch.get_default_dtype())
else:
print('No epsilon defined!')
perturb_epsilon = None
X, labels, runnerup, data_max, data_min, perturb_epsilon, target_label = load_verification_dataset(perturb_epsilon)
if arguments.Config["general"]["mode"] == "clean-acc":
print("Testing clean accuracy.")
get_test_acc(model_ori, X=X, labels=labels, batch_size=arguments.Config["solver"]["beta-crown"]["batch_size"])
return
if "MNIST" in arguments.Config["data"]["dataset"]:
attack_dataset = "MNIST"
elif "CIFAR" in arguments.Config["data"]["dataset"]:
attack_dataset = "CIFAR"
else:
attack_dataset = "UNKNOWN"
if arguments.Config["specification"]["type"] == 'lp':
if perturb_epsilon is None:
raise ValueError("Perturbation epsilon is not set by data loader. Do you mean to use the 'bound' type specification? Try adding --spec_type bound")
if arguments.Config["specification"]["epsilon"] is not None:
print(f"epsilon after preprocessing: {perturb_epsilon}, data_max = {data_max}, data_min = {data_min}")
if data_max.size(0) != 1 or data_min.size(0) != 1:
raise ValueError("For 'lp' type specification, you need rabsolute (global) lower and upper bounds, not per example bounds.")
elif arguments.Config["specification"]["type"] == 'bound':
print(f'Loaded datasets with per-element lower and upper bounds: max = {data_max.max().item()}, min = {data_min.min().item()}')
if data_max.size(0) != X.size(0) or data_min.size(0) != X.size(0):
raise ValueError("For 'bound' type specification, you need per example lower and upper bounds.")
if perturb_epsilon is None:
perturb_epsilon = (data_max - data_min).mean() / 2.
print(f'eps set to {perturb_epsilon}. This will not be used for certification, but will be used to determine PGD step size.')
if arguments.Config["data"]["data_idx_file"] is not None:
# Go over a list of data indices.
with open(arguments.Config["data"]["data_idx_file"]) as f:
bnb_ids = re.split(r'[;|,|\n|\s]+', f.read().strip())
bnb_ids = [int(b_id) for b_id in bnb_ids]
print(f'Example indices (total {len(bnb_ids)}): {bnb_ids}')
else:
# By default, we go over all data.
bnb_ids = list(range(X.shape[0]))
bnb_ids = bnb_ids[arguments.Config["data"]["start"]: arguments.Config["data"]["end"]]
print('Task length:', len(bnb_ids))
save_path = 'Verified_ret_[{}]_start={}_end={}_iter={}_b={}_timeout={}_branching={}-{}-{}_lra-init={}_lra={}_lrb={}_PGD={}.npy'. \
format(arguments.Config['model']['name'], arguments.Config["data"]["start"], arguments.Config["data"]["end"], arguments.Config["solver"]["beta-crown"]["iteration"], arguments.Config["solver"]["beta-crown"]["batch_size"],
arguments.Config["bab"]["timeout"], arguments.Config["bab"]["branching"]["method"], arguments.Config["bab"]["branching"]["reduceop"],
arguments.Config["bab"]["branching"]["candidates"], arguments.Config["solver"]["alpha-crown"]["lr_alpha"], arguments.Config["solver"]["beta-crown"]["lr_alpha"], arguments.Config["solver"]["beta-crown"]["lr_beta"], arguments.Config["attack"]["pgd_order"])
print(f'saving results to {save_path}')
if arguments.Config["general"]["mode"] == "crown-only-verified-acc":
get_statistics(model_ori, X, labels, perturb_epsilon, data_min, data_max, batch_size=arguments.Config["solver"]["beta-crown"]["batch_size"])
return
if arguments.Config["general"]["mode"] == "alpha-crown-only-verified-acc":
get_statistics(model_ori, X, labels, perturb_epsilon, data_min, data_max, batch_size=arguments.Config["solver"]["beta-crown"]["batch_size"], method="alpha-CROWN")
return
if arguments.Config["general"]["mode"] == "ibp-only-verified-acc":
get_statistics(model_ori, X, labels, perturb_epsilon, data_min, data_max, batch_size=arguments.Config["solver"]["beta-crown"]["batch_size"], method="IBP")
return
if arguments.Config["general"]["mode"] == "attack-only":
get_pgd_acc(model_ori, X, labels, perturb_epsilon, data_min, data_max, batch_size=arguments.Config["solver"]["beta-crown"]["batch_size"])
ret, lb_record, attack_success = [], [], []
mip_unsafe, mip_safe, mip_unknown = [], [], []
verified_acc = len(bnb_ids)
verified_failed = []
verified_success_list = []
example_time = []
skipped_examples = []
nat_acc = len(bnb_ids)
orig_timeout = arguments.Config["bab"]["timeout"]
model_ori, all_data_max, all_data_min = model_ori.to(arguments.Config["general"]["device"]), data_max.to(arguments.Config["general"]["device"]), data_min.to(arguments.Config["general"]["device"])
if isinstance(perturb_epsilon, torch.Tensor):
perturb_eps = perturb_epsilon.to(arguments.Config["general"]["device"])
for new_idx, imag_idx in enumerate(bnb_ids):
arguments.Config["bab"]["timeout"] = orig_timeout
print('\n %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% idx:', new_idx, 'img ID:', imag_idx, '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
torch.cuda.empty_cache()
gc.collect()
x, y = X[imag_idx], int(labels[imag_idx].item())
x = x.unsqueeze(0).to(dtype=torch.get_default_dtype(), device=arguments.Config["general"]["device"])
if arguments.Config["specification"]["type"] == 'bound':
data_min = all_data_min[imag_idx].unsqueeze(0)
data_max = all_data_max[imag_idx].unsqueeze(0)
else:
data_min = all_data_min
data_max = all_data_max
# first check the model is correct at the input
logit_pred = model_ori(x)[0]
if logit_pred.size(0) > 1:
# Multi-class.
y_pred = torch.max(logit_pred, 0)[1].item()
else:
# Binary classifier: logit_pred > 0 => label 1, otherwise label 0.
y_pred = int(logit_pred.item() > 0)
if type(perturb_epsilon) is list:
# Each image has different epsilon (e.g., OVAL 20).
perturb_eps = perturb_epsilon[imag_idx].to(arguments.Config["general"]["device"])
print(f'predicted label {y_pred}, correct label {y}, image norm {x.abs().sum().item()}, logits {logit_pred}')
if y_pred != y:
print(f'Result: image {imag_idx} prediction is incorrect, skipped.')
skipped_examples.append(imag_idx)
verified_acc -= 1
nat_acc -= 1
# attack_success.append(imag_idx)
continue
# else:
# # enable here to check clean acc
# continue
verified_success = False
verified_status = "unknown"
attack_margin = None
attack_images = None
example_start_time = time.time()
if arguments.Config["attack"]["pgd_order"] == "before":
start_attack = time.time()
attack_args = {'dataset': attack_dataset, 'model': model_ori, 'x': x, 'max_eps': perturb_eps, 'data_min': data_min, 'data_max': data_max, 'y': y}
attack_ret, attack_images, attack_margin = pgd_attack(**attack_args)
ret.append([imag_idx, 0, 0, time.time()-start_attack, new_idx, -3, np.inf, np.inf])
if attack_ret:
# Attack success.
verified_status = "unsafe-pgd"
verified_acc -= 1
attack_success.append(imag_idx)
print(f"Result: image {imag_idx} attack success!")
example_time.append(time.time() - example_start_time)
print(f'Wall time: {example_time[-1]}')
continue
# continue # uncomment for checking pgd attacking results
init_global_lb = saved_bounds = saved_slopes = None
# Incomplete verification is enabled by default. The intermediate lower and upper bounds will be reused in bab and mip.
if not verified_success and (arguments.Config["general"]["enable_incomplete_verification"] or arguments.Config["general"]["complete_verifier"] == "bab-refine"):
start_incomplete = time.time()
data = x
if arguments.Config["specification"]["type"] == 'lp':
# Lp norm.
if arguments.Config["specification"]["norm"] == np.inf:
if data_max is None:
data_ub = data + perturb_eps # perturb_eps is already normalized.
data_lb = data - perturb_eps
else:
data_ub = torch.min(data + perturb_eps, data_max)
data_lb = torch.max(data - perturb_eps, data_min)
else:
data_ub = data_lb = data
elif arguments.Config["specification"]["type"] == 'bound':
# Given lower and upper bounds *per example per pixel*.
data_lb = data_min
data_ub = data_max
else:
raise ValueError(f'Unexpected specification type {arguments.Config["specification"]["type"]}')
verified_status, init_global_lb, saved_bounds, saved_slopes = incomplete_verifier(model_ori, x,
y, data_ub=data_ub, data_lb=data_lb, eps=perturb_eps)
verified_success = verified_status != "unknown"
if not verified_success:
lower_bounds, upper_bounds = saved_bounds[1], saved_bounds[2]
arguments.Config["bab"]["timeout"] -= (time.time()-start_incomplete)
ret.append([imag_idx, 0, 0, time.time()-start_incomplete, new_idx, -1, np.inf, np.inf])
if verified_success:
print(f"Result: image {imag_idx} verification success (with incomplete verifier)!")
verified_success_list.append(imag_idx)
example_time.append(time.time() - example_start_time)
print(f'Wall time: {example_time[-1]}')
continue
if arguments.Config["attack"]["pgd_order"] == "after":
start_attack = time.time()
attack_args = {'dataset': attack_dataset, 'model': model_ori, 'x': x, 'max_eps': perturb_eps, 'data_min': data_min, 'data_max': data_max, 'y': y}
attack_ret, attack_images, attack_margin = pgd_attack(**attack_args)
ret.append([imag_idx, 0, 0, time.time()-start_attack, new_idx, -3, np.inf, np.inf])
if attack_ret:
# Attack success.
verified_status = "unsafe-pgd"
verified_acc -= 1
attack_success.append(imag_idx)
print(f"Result: image {imag_idx} attack success!")
example_time.append(time.time() - example_start_time)
print(f'Wall time: {example_time[-1]}')
continue
elif arguments.Config["attack"]["enable_mip_attack"]:
c = torch.eye(arguments.Config["data"]["num_outputs"]).type_as(data)[[y]].unsqueeze(1) - torch.eye(arguments.Config["data"]["num_outputs"]).type_as(data).unsqueeze(0)
lirpa_model, lower_bounds, upper_bounds, masks = saved_bounds[:4]
lirpa_model.build_mip_model(lower_bounds, upper_bounds, arguments.Config["bab"]["timeout"], arguments.Config["solver"]["mip"]["parallel_solvers"], arguments.Config["solver"]["mip"]["solver_threads"])
total_unstable = 0
for layer_i, m in enumerate(masks):
unstable = int(m.sum().item())
total_unstable += unstable
print(f'layer {layer_i} has {unstable} unstable neurons')
print(f'Total {total_unstable} unstable neurons.')
attack_ret = False
labels_to_verify = attack_margin.argsort().squeeze().tolist()
print('Sorted order for labels to attack:', labels_to_verify)
for target in labels_to_verify:
if target != y:
if init_global_lb[0][target].item() > 0:
print(f'Label {target} is already verified.')
continue
attack_image_target = target if target < y else target - 1
adv_pool = AdvExamplePool(lirpa_model.net, masks, C=c[:, target:target+1])
# Add adversarial image for the specific target only.
adv_pool.add_adv_images(attack_images[:, :, attack_image_target].view((-1, *attack_images.shape[-3:])))
neuron_idx, coeff = adv_pool.get_activation_pattern_from_pool()
# The initial starting point and activation pattern has a batch dimension because there can be multiple initializations.
selected_advs = adv_pool.adv_pool
best_adv = torch.stack([adv.x for adv in selected_advs], dim=0)
best_adv_pattern = [torch.stack([adv.activation_pattern[layer_i] for adv in selected_advs], dim=0) for layer_i in range(adv_pool.nlayers)]
print(f'Best adv example in pool: {adv_pool.adv_pool[0].obj}, worse {adv_pool.adv_pool[-1].obj}')
print(f'Target label {target} has {len(coeff)} out of {total_unstable} unstable neurons fixed.')
attack_ret, solver_results = lirpa_model.update_mip_model_fix_relu([neuron_idx], [coeff], target, arguments.Config["solver"]["mip"]["parallel_solvers"], arguments.Config["solver"]["mip"]["solver_threads"],
async_mip=False, best_adv=[best_adv], adv_activation_pattern=[best_adv_pattern])
with torch.no_grad():
pred = lirpa_model.net(solver_results[0][3].to(lirpa_model.net.device)).squeeze(0)
attack_margin = pred[y] - pred
print(f"attack margin: {attack_margin}, for label {target}: {pred[y] - pred[target]}")
if attack_ret:
break
if attack_ret:
# Attack success.
verified_status = "unsafe-mip_attack"
verified_acc -= 1
attack_success.append(imag_idx)
print(f"Result: image {imag_idx} attack success!")
example_time.append(time.time() - example_start_time)
print(f'Wall time: {example_time[-1]}')
continue
if arguments.Config["general"]["mode"] == "verified-acc":
if arguments.Config["attack"]["pgd_order"] != "skip":
# .reshape(-1) to handle the case where there is only 1 element.
labels_to_verify = attack_margin.argsort().squeeze().reshape(-1).tolist()
print('Sorted order for labels to attack:', labels_to_verify)
elif arguments.Config["general"]["enable_incomplete_verification"]:
# We have initial incomplete bounds.
labels_to_verify = init_global_lb.argsort().squeeze(0).tolist()
else:
labels_to_verify = list(range(arguments.Config["data"]["num_outputs"]))
elif arguments.Config["general"]["mode"] == "runnerup":
labels_to_verify = [logit_pred.argsort(descending=True)[1]]
elif arguments.Config["general"]["mode"] == "specify-target":
labels_to_verify = [target_label[imag_idx]]
else:
raise ValueError("unknown verification mode")
# MIP or MIP refined bounds.
if not verified_success and (arguments.Config["general"]["complete_verifier"] == "mip" or arguments.Config["general"]["complete_verifier"] == "bab-refine"):
start_refine = time.time()
verified_status, init_global_lb, lower_bounds, upper_bounds = mip(saved_bounds=saved_bounds, y=y, labels_to_verify=labels_to_verify)
verified_success = verified_status != "unknown-mip"
if verified_status == "unknown-mip":
verified_acc -= 1
mip_unknown.append(imag_idx)
elif verified_status == "unsafe-mip":
verified_acc -= 1
mip_unsafe.append(imag_idx)
elif verified_status == "safe-mip" or verified_status == "safe-incomplete-refine":
mip_safe.append(imag_idx)
arguments.Config["bab"]["timeout"] -= (time.time()-start_refine)
ret.append([imag_idx, 0, 0, time.time()-start_refine, new_idx, -2, np.inf, np.inf])
print("time threshold left for bab:", arguments.Config["bab"]["timeout"])
if verified_success:
if verified_status == "safe-mip":
print(f"Result: image {imag_idx} verification success (with mip)!")
verified_success_list.append(imag_idx)
elif verified_status == "safe-incomplete-refine":
print(f"Result: image {imag_idx} verification success (with mip refine)!")
verified_success_list.append(imag_idx)
elif verified_status == "unsafe-mip":
print(f"Result: image {imag_idx} attack success (with mip)!")
attack_success.append(imag_idx)
else:
print(f"Warning: verified status {verified_status} not supported!")
exit()
example_time.append(time.time() - example_start_time)
print(f'Wall time: {example_time[-1]}')
continue
elif arguments.Config["general"]["complete_verifier"] == 'skip':
print(f"Result: image {imag_idx} verification failure (complete verifier skipped as requested).")
verified_acc -= 1
verified_failed.append(imag_idx)
example_time.append(time.time() - example_start_time)
print(f'Wall time: {example_time[-1]}')
continue
elif verified_status == "unknown-mip":
print(f"Result: image {imag_idx} verification failure (with mip).")
example_time.append(time.time() - example_start_time)
print(f'Wall time: {example_time[-1]}')
continue
pidx_all_verified = True
bab_attack_success = False
for pidx in labels_to_verify:
if isinstance(pidx, torch.Tensor):
pidx = pidx.item()
print('##### [{}:{}] Tested against {} ######'.format(new_idx, imag_idx, pidx))
if pidx == y:
print("groundtruth label, skip!")
ret.append([imag_idx, 0, 0, 0, new_idx, pidx, np.inf, np.inf])
continue
torch.cuda.empty_cache()
gc.collect()
start_inner = time.time()
# attack_images shape: (1, batch, restarts, num_outputs-1, c, h, w)
# select target label attack_images according to pidx. New shape (restarts, c, h, w).
targeted_attack_images = None
if attack_images is not None:
targeted_attack_images = attack_images[0, :, pidx if pidx < y else pidx - 1]
attack_args.update({'target': pidx, 'only_target_attack': True})
attack_args.update({'data_max': torch.min(x + perturb_eps, data_max)})
attack_args.update({'data_min': torch.max(x - perturb_eps, data_min)})
arguments.attack_args = attack_args
else:
arguments.attack_args = None
try:
if arguments.Config["general"]["enable_incomplete_verification"]:
# Reuse results from incomplete results, or from refined MIPs.
# skip the prop that already verified
rlb, rub = list(lower_bounds), list(upper_bounds)
rlb[-1] = rlb[-1][0, pidx]
rub[-1] = rub[-1][0, pidx]
# print(init_global_lb[0].min().item(), init_global_lb[0].min().item() - arguments.Config["bab"]["decision_thresh"] <= -100.)
if init_global_lb[0].min().item() - arguments.Config["bab"]["decision_thresh"] <= -100.:
print(f"Initial alpha-CROWN with poor bound {init_global_lb[0].min().item()}. We will run not branch and bound.")
l, u, nodes, glb_record = rlb[-1].item(), float('inf'), 0, []
elif init_global_lb[0, pidx] >= arguments.Config["bab"]["decision_thresh"]:
print(f"Initial alpha-CROWN verified for label {pidx} with bound {init_global_lb[0, pidx]}")
l, u, nodes, glb_record = rlb[-1].item(), float('inf'), 0, []
else:
if arguments.Config["bab"]["timeout"] < 0:
print(f"Image {imag_idx} verification failure (running out of time budget).")
l, u, nodes, glb_record = rlb[-1].item(), float('inf'), 0, []
else:
# feed initialed bounds to save time
l, u, nodes, glb_record = bab(model_ori, x, pidx, y=y, eps=perturb_eps, data_ub=data_max, data_lb=data_min,
lower_bounds=lower_bounds, upper_bounds=upper_bounds, reference_slopes=saved_slopes, attack_images=targeted_attack_images)
else:
assert arguments.Config["general"]["complete_verifier"] == "bab" # for MIP and BaB-Refine.
# Main function to run verification
l, u, nodes, glb_record = bab(model_ori, x, pidx, y=y, eps=perturb_eps,
data_ub=data_max, data_lb=data_min, attack_images=targeted_attack_images)
time_cost = time.time() - start_inner
print('Image {} label {} verification end, final lower bound {}, upper bound {}, time: {}'.format(imag_idx, pidx, l, u, time_cost))
ret.append([imag_idx, l, nodes, time_cost, new_idx, pidx, u, attack_margin[pidx] if attack_margin is not None else np.inf])
arguments.Config["bab"]["timeout"] -= time_cost
lb_record.append([glb_record])
print(imag_idx, l)
np.save(save_path, np.array(ret))
# np.save('lb_record_' + save_path, np.array(lb_record))
if u < arguments.Config["bab"]["decision_thresh"]:
verified_status = "unsafe-bab"
pidx_all_verified = False
bab_attack_success = True
break
elif l < arguments.Config["bab"]["decision_thresh"]:
pidx_all_verified = False
# break to run next sample save time if any label is not verified.
break
except KeyboardInterrupt:
print('time:', imag_idx, time.time()-start_inner, "\n",)
print(ret)
pidx_all_verified = False
break
example_time.append(time.time() - example_start_time)
if not pidx_all_verified:
verified_acc -= 1
if bab_attack_success:
attack_success.append(imag_idx)
print(f'Result: image {imag_idx} attack success (with branch and bound)!')
else:
verified_failed.append(imag_idx)
print(f'Result: image {imag_idx} verification failure (with branch and bound).')
else:
verified_success_list.append(imag_idx)
print(f'Result: image {imag_idx} verification success (with branch and bound)!')
# Make sure ALL tensors used in this loop are deleted here.
del init_global_lb, saved_bounds, saved_slopes
print(f'Wall time: {example_time[-1]}')
# some results analysis
np.set_printoptions(suppress=True)
ret = np.array(ret)
print(f'\nnumber of correctly classified examples: {nat_acc}')
print(f'incorrectly classified idx (total {len(skipped_examples)}):', skipped_examples)
print(f'attack success idx (total {len(attack_success)}):', attack_success)
if len(attack_success) > 0:
print('attack_success rate:', len(attack_success)/len(bnb_ids))
np.save('Attack-success_{}_{}_start{}_end{}.npy'.
format(arguments.Config['model']['name'], arguments.Config["data"]["dataset"], arguments.Config["data"]["start"], arguments.Config["data"]["end"]), np.array(attack_success))
print(f'verification success idx (total {len(verified_success_list)}):', verified_success_list)
print(f'verification failure idx (total {len(verified_failed)}):', verified_failed)
if arguments.Config["general"]["complete_verifier"] == "mip":
print("##### Complete MIP solver summary #####")
print(f"mip verified safe idx: {mip_safe}")
print(f"mip unsafe idx: {mip_unsafe}")
print(f"mip unknown idx: {mip_unknown}")
print(f"mip verified safe rate {len(mip_safe)/len(bnb_ids)}, "
f"unsafe rate {len(mip_unsafe)/len(bnb_ids)}, "
f"unknown rate {len(mip_unknown)/len(bnb_ids)}, "
f"total {len(bnb_ids)}")
print("final verified acc: {}%[{}]".format(verified_acc/len(bnb_ids)*100., len(bnb_ids)))
np.save('Verified-acc_{}_{}_start{}_end{}_{}_branching_{}.npy'.
format(arguments.Config['model']['name'], arguments.Config["data"]["dataset"], arguments.Config["data"]["start"], arguments.Config["data"]["end"], verified_acc, arguments.Config["bab"]["branching"]["method"]), np.array(verified_failed))
total_verification = len(verified_success_list) + len(verified_failed)
print(f"verifier is called on {total_verification} examples.")
print("total verified:", verified_acc)
if ret.size > 0:
# print("mean time [total:{}]: {}".format(len(bnb_ids), ret[:, 3].sum()/float(len(bnb_ids))))
print("mean time [cnt:{}] (excluding attack success): {}".format(total_verification, ret[:, 3][ret[:, 5] != -3].sum()/float(total_verification if total_verification != 0 else "nan")))
if len(attack_success) > 0:
print("mean time [cnt:{}] (including attack success): {}".format(total_verification + len(attack_success), ret[:, 3].sum() / float(total_verification + len(attack_success))))
if __name__ == "__main__":
config_args()
main()
|
the-stack_0_15870 | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An experimental new unified TPU executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo import compat as tf
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import base_model
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import checkpointer
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import cluster_factory
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import ml_perf_log as mlp_log
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import multitask_model
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import py_utils
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import task_scheduler
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.google.trainer import base_runner
from REDACTED.tensorflow.python.tpu import device_assignment as device_assignment_lib # pylint: disable=g-direct-tensorflow-import
tf.flags.DEFINE_bool(
'cluster_placer_in_executor', False,
'If True, cluster.GetPlacer() is used in Executor. ' +
'When running on TPU model weights can be distributed ' +
'across TPU hosts, for outrageously large models this ' +
'enables sharded checkpointing and reduces host memory ' +
'requirements, see _LeastLoadedPlacer in cluster.py.')
tf.flags.DEFINE_bool(
'disable_meta_optimizer_in_executor', False,
'Disabling the grappler meta_optimizer improves start-up time.')
FLAGS = tf.flags.FLAGS
def GetExecutorParams(model_name, cluster_params, model_registry):
"""Get the params needed to instantiate the Executor.
Args:
model_name: A model name regsitered in the ModelRegistry.
cluster_params: A cluster hyperparams object.
model_registry: A ModelRegistry object.
Returns:
A tuple (dict, Params):
- ps_params_dict: High-level task name -> ProgramScheduleParams
- train_cfg: A SingleTaskModelParams or MultiTaskModelParams.
"""
ps_params_dict = {}
with cluster_factory.Cluster(cluster_params):
ps_cfg = model_registry.GetProgramSchedule(model_name)
train_cfg = model_registry.GetParams(model_name, 'Train')
train_cfg.cluster = cluster_params
if issubclass(train_cfg.cls, base_model.MultiTaskModel):
multi_task_train_cfg = train_cfg
# Create SingleTaskModelParams from a MultiTaskModelParams.
for k, _ in multi_task_train_cfg.task_params.IterParams():
single_task_params = base_model.SingleTaskModel.Params()
single_task_params.cluster = multi_task_train_cfg.cluster
single_task_params.input = multi_task_train_cfg.input.Get(k)
single_task_params.task = multi_task_train_cfg.task_params.Get(k)
single_task_params.train = single_task_params.task.train
if k not in ps_cfg.program_schedule_dict:
tf.logging.fatal(
'Could not find %s in ps_cfg.program_schedule_dict: %s', k,
ps_cfg)
program_schedule_params = ps_cfg.program_schedule_dict[k]
program_schedule_params.task_dict = {'Train': single_task_params}
for eval_dataset_name in program_schedule_params.dataset_names:
multi_task_eval_cfg = model_registry.GetParams(
model_name, eval_dataset_name)
eval_task_params = base_model.SingleTaskModel.Params()
eval_task_params.cluster = single_task_params.cluster
eval_task_params.input = multi_task_eval_cfg.input.Get(k)
eval_task_params.task = multi_task_eval_cfg.task_params.Get(k)
program_schedule_params.task_dict[
eval_dataset_name] = eval_task_params
ps_params_dict[k] = program_schedule_params
else:
program_schedule_params = ps_cfg
program_schedule_params.task_dict = {'Train': train_cfg}
for eval_dataset_name in program_schedule_params.dataset_names:
task_eval_params = model_registry.GetParams(model_name,
eval_dataset_name)
task_eval_params.cluster = train_cfg.cluster
program_schedule_params.task_dict[eval_dataset_name] = task_eval_params
ps_params_dict[''] = program_schedule_params
return ps_params_dict, train_cfg
class ExecutorTpu(base_runner.BaseRunner):
"""An experimental runner that does arbitrary multi-program execution on TPU.
Overview of operation:
- During construction, all programs construct their sub-graphs, in a sense
creating a mega-graph.
- A sequence of programs is then executed in-whole associated with that task.
eg: [train x 1000 steps, checkpoint, eval 4 steps, decode 2 steps]
- In this manner, programs and higher-level tasks cooperatively time-slice
share the TPU.
"""
def __init__(self, train_cfg, ps_params_dict, model_task_name, logdir,
tf_master, **kwargs):
"""Construct an ExecutorTpu BaseRunner.
Args:
train_cfg: SingleTaskModelParams or MultiTaskModelParams
ps_params_dict: A dict of top-level task name -> ProgramSchedule params,
if train_cfg is a SingleTaskModelParams, we expect only one entry.
model_task_name: An override for multi-task models, currently unused.
logdir: String path to the log directory to output to.
tf_master: String path to the master job, e.g. 'local'.
**kwargs: keyword args to pass through to BaseRunner.
"""
super(ExecutorTpu, self).__init__(train_cfg, model_task_name, logdir,
tf_master, **kwargs)
self._cluster_def = self._cluster.worker_cluster_def
# There is a single Executor task
assert self._cluster.num_replicas == 1
data_parallelism = self._cluster.num_splits_per_client
assert data_parallelism
num_devices_per_split = self._cluster.num_devices_per_split
tf.logging.info('data_parallelism: %d, num_devices_per_split: %d',
data_parallelism, num_devices_per_split)
self.task_scheduler = None
self._checkpoint_dir = os.path.join(logdir, 'train')
self._variable_renaming_rules = []
self._ml_perf = None
# If this is a multi-task model, grab the params for the TaskScheduler.
if issubclass(train_cfg.cls, base_model.SingleTaskModel):
tf.logging.info('single_task_model')
assert len(ps_params_dict) == 1
self._model_task_name = list(ps_params_dict.keys())[0]
self._single_task_mode = True
elif issubclass(train_cfg.cls, base_model.MultiTaskModel):
tf.logging.info('multi_task_model')
if issubclass(train_cfg.cls, multitask_model.RegExSharedVariableModel):
self._variable_renaming_rules = train_cfg.variable_renaming_rules
if train_cfg.task_schedule is None:
task_schedule_params = task_scheduler.ConstantScheduler.Params()
task_schedule_params.task_probs = sorted(
list(train_cfg.task_probs.IterParams()))
else:
task_schedule_params = train_cfg.task_schedule
self.task_scheduler = task_schedule_params.Instantiate()
self._single_task_mode = False
else:
tf.logging.fatal(
'Model %s is not a sub-class of SingleTaskModel or MultiTaskModel',
train_cfg.cls)
tf.logging.info('train_cfg.cls: %s', train_cfg.cls)
self._WriteToLog(train_cfg.ToText(), self._checkpoint_dir, 'params.txt')
self._program_schedule_dict = {}
self._programs = []
for task_string, program_schedule_params in ps_params_dict.items():
program_schedule_params.logdir = logdir
program_schedule_params.num_splits_per_client = data_parallelism
program_schedule_params.task_name = task_string
ps = program_schedule_params.Instantiate()
self._program_schedule_dict[task_string] = ps
tf.logging.info('program_schedule_params: %s',
program_schedule_params.ToText())
self._programs += ps.Programs()
if program_schedule_params.ml_perf.benchmark_name is not None:
self._ml_perf = program_schedule_params.ml_perf
tf.logging.info('num_programs: %d', len(self._programs))
if self._ml_perf is not None:
self._ml_perf_log = True
mlp_log.mlperf_print(key='benchmark', value=self._ml_perf.benchmark_name)
else:
self._ml_perf_log = False
# BaseRunner legacy
self.enqueue_ops = None
@py_utils.RetryOnTransientTfError()
def _WaitTillInit():
"""Wait until the model is ready."""
try:
with self._graph.as_default(), self._GetSession(
cluster_def=self._cluster_def,
disable_meta_optimizer=FLAGS.disable_meta_optimizer_in_executor
) as sess:
topology = sess.run(
tf.tpu.initialize_system(embedding_config=None, job=None))
device_assignment = device_assignment_lib.device_assignment(
topology,
computation_shape=py_utils.ComputationShape(
num_devices_per_split),
num_replicas=data_parallelism)
py_utils.SetTpuDeviceAssignment(device_assignment)
tf.logging.info('device_assignment.core_assignment: %s',
str(device_assignment.core_assignment))
tf.logging.info(
'device_assignment.topology.device_coordinates: %s',
str(device_assignment.topology.device_coordinates))
except py_utils.transient_tf_errors as e:
tf.logging.info('TPU initialization failed: %s', e)
raise
if self._ml_perf_log:
mlp_log.mlperf_print(key='cache_clear', value=True)
mlp_log.mlperf_print(key='init_start', value=None)
_WaitTillInit()
with self._graph.as_default(), tf.container(self._container_id):
tf.logging.info('self._cluster.job_spec.name: %s',
self._cluster.job_spec.name)
with self._cluster, tf.device(
self._cluster.job_spec.name if not FLAGS.cluster_placer_in_executor
else self._cluster.GetPlacer()):
with py_utils.VariableRenameScope(self._variable_renaming_rules):
_ = py_utils.GetOrCreateGlobalStepVar()
for program in self._programs:
program.BuildTpuSubgraph()
for program in self._programs:
program.SetStatusMessageFn(self._SetStatusMessage)
program.CreateCheckpointer()
self._initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
self.save_only_checkpointer = checkpointer.Checkpointer(
self._checkpoint_dir,
model=None,
train_params=train_cfg.train,
save_only=True)
def Start(self):
# Run training.
self._RunLoop('executor_tpu', self._Loop)
def _Loop(self):
with tf.container(self._container_id), self._GetSession(
cluster_def=self._cluster_def,
disable_meta_optimizer=FLAGS.disable_meta_optimizer_in_executor
) as sess:
# Initialize the variables first, if needed.
for program in self._programs:
program.RestoreIfNeeded(sess)
program.Compile(sess)
sess.run(self._initialize_tables)
sess.run(self._initialize_local_vars)
while True:
global_step = sess.run(py_utils.GetGlobalStep())
if self._ShouldStop(sess, global_step):
tf.logging.info('Training finished.')
if not self._ml_perf_log:
self.save_only_checkpointer.Save(sess, global_step)
return
# If a task is explicitly selected, only run the programs associated
# with that task.
if self._single_task_mode or self._model_task_name:
tf.logging.info('Single task mode: %s', self._model_task_name)
program_schedule = self._program_schedule_dict[self._model_task_name]
else:
# Otherwise, sample a task.
model_task = self.task_scheduler.Sample(global_step)
tf.logging.info('Sampled %s', model_task)
program_schedule = self._program_schedule_dict[model_task]
done = program_schedule.Run(sess)
if done:
tf.logging.info('Program schedule told us to stop.')
return
# TODO(blee): More complex saving rules. Currently, we assume
# we save after every task's program schedule execution.
#
# global_step local variable above is a result of sess.run, not a
# tf variable, so when we do save_only_checkpointer.Save(...) here
# py_utils.GetGlobalStep() is ahead of it by
# (train_executions_per_eval * train_steps_per_loop)
# steps ahead already, due to program_schedule.Run(sess).
#
if not self._ml_perf_log:
self.save_only_checkpointer.Save(sess, py_utils.GetGlobalStep())
|
the-stack_0_15871 | import types
import sys
import os
import io
from pathlib import Path
from urllib.parse import urlparse
import logging
import asyncio
import tarfile
from io import BytesIO
import mimetypes
import functools
import ssl
import click
from girder_client import GirderClient
from flask import Flask, send_from_directory, jsonify
import aiohttp
from async_lru import alru_cache
import tenacity
class AsyncGirderClient(object):
def __init__(self, session, api_url):
self._ratelimit_semaphore = asyncio.Semaphore(5)
self._api_url = api_url.rstrip('/')
self._folder_create_semaphore = asyncio.Semaphore()
self._item_create_semaphore = asyncio.Semaphore()
self._session = session
async def authenticate(self, api_key):
params = {'key': api_key}
async with self._session.post('%s/api_key/token' % (self._api_url), params=params) as r:
r.raise_for_status()
auth = await r.json()
self._headers = {
'Girder-Token': auth['authToken']['token']
}
@tenacity.retry(retry=tenacity.retry_if_exception_type(aiohttp.client_exceptions.ServerConnectionError),
wait=tenacity.wait_exponential(max=10),
stop=tenacity.stop_after_attempt(10))
async def post(self, path, headers=None, params=None, raise_for_status=True, **kwargs):
if params is not None:
params = {k:str(v) for (k,v) in params.items()}
if headers is None:
headers = self._headers
else:
headers.update(self._headers)
async with self._ratelimit_semaphore:
async with self._session.post('%s/%s' % (self._api_url, path),
headers=headers, params=params,
**kwargs) as r:
if raise_for_status:
r.raise_for_status()
return await r.json()
@tenacity.retry(retry=tenacity.retry_if_exception_type(aiohttp.client_exceptions.ServerConnectionError),
wait=tenacity.wait_exponential(max=10),
stop=tenacity.stop_after_attempt(10))
async def put(self, path, headers=None, params=None, raise_for_status=True, **kwargs):
if params is not None:
params = {k:str(v) for (k,v) in params.items()}
if headers is None:
headers = self._headers
else:
headers.update(self._headers)
async with self._ratelimit_semaphore:
async with self._session.put('%s/%s' % (self._api_url, path),
headers=headers, params=params,
**kwargs) as r:
if raise_for_status:
r.raise_for_status()
return await r.json()
@tenacity.retry(retry=tenacity.retry_if_exception_type(aiohttp.client_exceptions.ServerConnectionError),
wait=tenacity.wait_exponential(max=10),
stop=tenacity.stop_after_attempt(10))
async def get(self, path, raise_for_status=True, params=None, status=False, **kwargs):
if params is not None:
params = {k:str(v) for (k,v) in params.items()}
async with self._ratelimit_semaphore:
async with self._session.get('%s/%s' % (self._api_url, path),
headers=self._headers, params=params, **kwargs) as r:
if raise_for_status and not status:
r.raise_for_status()
if status:
return (r.status, await r.json())
else:
return await r.json()
@alru_cache(maxsize=1000)
async def create_folder(self, parent_id, parent_type, name):
params = {
'parentId': parent_id,
'parentType': parent_type,
'name': name,
'description': '',
'reuseExisting': True
}
# We need this sempahore to prevent two folders with the same name be
# create, this opertion in not atomic in Girder.
async with self._folder_create_semaphore:
return await self.post('folder', params=params)
async def list_item(self, folder, name):
params = {
'parentId': folder['_id'],
'name': name
}
return await self.get('item', params=params)
@alru_cache(maxsize=1000)
async def create_item(self, folder_id, name):
params = {
'folderId': folder_id,
'name': name,
'description': '',
'reuseExisting': True
}
# We need this sempahore to prevent two items with the same name be
# create, this opertion in not atomic in Girder.
async with self._item_create_semaphore:
return await self.post('item', params=params)
async def upload_file(self, item, file_name, bits, size):
mime_type, _ = mimetypes.guess_type(file_name)
params = {
'parentType': 'item',
'parentId': item['_id'],
'name': file_name,
'size': size,
'mimeType': mime_type
}
headers = {
'Content-Length': str(size)
}
headers.update(self._headers)
upload = await self.post('file', params=params, headers=headers, data=bits)
return upload
async def set_metadata(self, resource_type, _id, meta, semaphore=None):
# The metadata put operation is not atomic!
if semaphore is not None:
await semaphore.acquire()
try:
return await self.put('%s/%s/metadata' % (resource_type, _id), json=meta)
finally:
if semaphore is not None:
semaphore.release()
async def get_metadata(self, resource_type, _id):
resource = await self.get('%s/%s' % (resource_type, _id))
return resource.get('meta')
@alru_cache(maxsize=1000)
async def resource_path(self, _id, resource_type):
params = {
'type': resource_type
}
return await self.get('resource/%s/path' % _id, params=params)
async def lookup_resource(self, path):
params = {
'path': path,
'test': True
}
(status, json_body) = await self.get('resource/lookup', params=params, status=True)
if status == 400:
return None
else:
return json_body
async def file_exist(self, item, name):
item_path = await self.resource_path(item['_id'], 'item')
return await self.lookup_resource('%s/%s' % (item_path, name)) is not None
async def ensure_folders(gc, parent, folders):
for folder_name in folders:
parent = await gc.create_folder(parent['_id'], 'folder', folder_name)
return parent
async def upload_image(gc, folder, shot_name, run_name, variable, timestep, bits, size, check_exists=False):
log = logging.getLogger('adash')
image_path = Path(variable['image_name'])
image_folders = [shot_name, run_name, variable['group_name']]
parent_folder = await ensure_folders(gc, folder, image_folders)
name = None
for k in ['name', 'variable_name']:
name = variable.get(k)
if name is not None:
break
if name is None:
raise Exception('Unable to extract variable name.')
variable_item = await gc.create_item(parent_folder['_id'], name)
image_name = '%s%s' % (str(timestep).zfill(4), image_path.suffix)
create = True
if check_exists:
create = not await gc.file_exist(variable_item, image_name)
if create:
log.info('Uploading "%s/%s/%s".' % ('/'.join([str(i) for i in image_folders]), name, image_name))
await gc.upload_file(variable_item, image_name, bits, size)
@tenacity.retry(retry=tenacity.retry_if_exception_type(aiohttp.client_exceptions.ServerConnectionError),
wait=tenacity.wait_exponential(max=10),
stop=tenacity.stop_after_attempt(10))
async def fetch_variables(session, upload_site_url, shot_name, run_name, timestep):
async with session.get('%s/shots/%s/%s/%d/variables.json' % (upload_site_url, shot_name,
run_name, timestep)) as r:
if r.status == 404:
return None
r.raise_for_status()
return await r.json()
@tenacity.retry(retry=tenacity.retry_if_exception_type(aiohttp.client_exceptions.ServerConnectionError),
wait=tenacity.wait_exponential(max=10),
stop=tenacity.stop_after_attempt(10))
async def fetch_images_archive(session, upload_site_url, shot_name,
run_name, timestep):
async with session.get('%s/shots/%s/%s/%d/images.tar.gz' % (upload_site_url, shot_name,
run_name, timestep)) as r:
if r.status == 404:
return None
r.raise_for_status()
return await r.read()
async def fetch_images(session, gc, folder, upload_site_url, shot_name, run_name, timestep,
metadata_semaphore, check_exists=False):
log = logging.getLogger('adash')
log.info('Fetching variables.json for timestep: "%d".' % timestep)
# Fetch variables.json
variables = await fetch_variables(session, upload_site_url, shot_name, run_name, timestep)
if variables is None:
log.warning('Unable to fetch variables.json. Timestep "%d" is missing.' % timestep)
else:
log.info('Fetching images.tar.gz for timestep: "%d".' % timestep)
buffer = BytesIO(await fetch_images_archive(session, upload_site_url, shot_name, run_name, timestep))
tasks = []
with tarfile.open(fileobj=buffer) as tgz:
for v in variables:
info = None
k = '%s/%s' % (v['group_name'], v['image_name'])
try:
info = tgz.getmember(k)
except KeyError:
pass
if info is None:
raise Exception('Unable to extract image: "%s"' % k)
br = tgz.extractfile(info)
bits = br.read()
tasks.append(
asyncio.create_task(
upload_image(gc, folder, shot_name, run_name, v,
timestep, bits, info.size, check_exists)
)
)
# Gather, so we fetch all images for this timestep before moving on to the
# next one!
await asyncio.gather(*tasks)
# Set the current timestep
metadata = {
'currentTimestep': timestep
}
run_folder = await ensure_folders(gc, folder, [shot_name, run_name])
await gc.set_metadata('folder', run_folder['_id'], metadata, metadata_semaphore)
# scheduler used to schedule fetch_images request inorder, so we fetch the images
# in timestep order.
async def fetch_images_scheduler(queue):
log = logging.getLogger('adash')
while True:
try:
fetch = await queue.get()
log.info(fetch)
await fetch
queue.task_done()
except asyncio.CancelledError:
raise
except:
log.exception('Exception occured fetching images.')
@tenacity.retry(retry=tenacity.retry_if_exception_type(aiohttp.client_exceptions.ServerConnectionError),
wait=tenacity.wait_exponential(max=10),
stop=tenacity.stop_after_attempt(10))
async def fetch_run_time(session, upload_site_url, shot_name, run_name):
run_path = 'shots/%s/%s/time.json' % (shot_name, run_name)
async with session.get('%s/%s' % (upload_site_url, run_path),
raise_for_status=False) as r:
if r.status == 404:
return None
return await r.json()
async def watch_run(session, gc, folder, upload_site_url, shot_name, run_name,
username, machine, run_poll_interval):
log = logging.getLogger('adash')
log.info('Starting to watch run "%s" shot "%s".' % (run_name, shot_name))
fetch_images_queue = asyncio.Queue()
metadata_semaphore = asyncio.Semaphore()
scheduler = asyncio.create_task(
fetch_images_scheduler(fetch_images_queue)
)
last_timestep = None
metadata = {
'username': username,
'machine': machine
}
run_folder = await ensure_folders(gc, folder, [shot_name, run_name])
await gc.set_metadata('folder', run_folder['_id'], metadata, metadata_semaphore)
while True:
# Check to see what the last successfully processed timestep was
metadata = await gc.get_metadata('folder', run_folder['_id'])
if last_timestep is None:
last_timestep = metadata.get('currentTimestep')
if last_timestep is not None:
log.info('Last timestep processed: "%d"' % last_timestep)
else:
log.info('No previous timestep have been processed.')
last_timestep = 0
# Now see where the simulation upload has got to
run_path = 'shots/%s/%s/time.json' % (shot_name, run_name)
time = await fetch_run_time(session, upload_site_url, shot_name, run_name)
# Wait for time.json to appear
if time is None:
log.warn('Unable to fetch "%s", waiting for 1 sec.' % run_path)
await asyncio.sleep(1)
continue
new_timestep = time['current']
complete = time.get('complete', False)
# Are we done. The run is marked as complete and we have ingested all the
# timesteps.
if complete and last_timestep == new_timestep:
log.info('Run "%s" is complete.' % run_name)
await fetch_images_queue.join()
scheduler.cancel()
break
# Did we miss any timesteps?
delta = new_timestep - last_timestep
# We have missed to timesteps so need to catch up!
if delta > 1:
# First schedule a fetch of the next timesetp checking if the files
# exists ( this is the one that could be partially processed.
fetch_images_queue.put_nowait(
fetch_images(session, gc, folder, upload_site_url,
shot_name, run_name, last_timestep+1,
metadata_semaphore, check_exists=True)
)
# Then process the rest normally
for t in range(last_timestep+2, new_timestep+1):
fetch_images_queue.put_nowait(
fetch_images(session, gc, folder, upload_site_url,
shot_name, run_name, t,
metadata_semaphore)
)
# We successfully processed the last timestep so just schedule the processing
# of the next.
elif delta == 1:
fetch_images_queue.put_nowait(
fetch_images(session, gc, folder, upload_site_url,
shot_name, run_name, new_timestep,
metadata_semaphore,
# If we processing the first timestep we need to check
# the existence of the files, as the fetching of this
# timestep may have failed before.
last_timestep == 0)
)
last_timestep = new_timestep
await asyncio.sleep(run_poll_interval)
@tenacity.retry(retry=tenacity.retry_if_exception_type(aiohttp.client_exceptions.ServerConnectionError),
wait=tenacity.wait_exponential(max=10),
stop=tenacity.stop_after_attempt(10))
async def fetch_shot_index(session, upload_site_url):
async with session.get('%s/shots/index.json' % upload_site_url) as r:
if r.status == 404:
return None
else:
r.raise_for_status()
return await r.json()
async def watch_shots_index(session, gc, folder, upload_site_url, api_url,
api_key, shot_poll_interval, run_poll_internval):
log = logging.getLogger('adash')
runs = set()
shot_metadata_semaphore = asyncio.Semaphore()
# Get users and machines
metadata = await gc.get_metadata('folder', folder['_id'])
users = set(metadata.get('users', [])) if metadata is not None else set()
machines = set(metadata.get('machines', [])) if metadata is not None else set()
while True:
log.info('Fetching /shots/index.json')
index = await fetch_shot_index(session, upload_site_url)
if index is None:
# Just wait for index.json to appear
log.warn('Unable to fetch "shots/index.json", waiting for 1 sec.')
await asyncio.sleep(1)
continue
for shot in index:
username = shot['username']
users.add(username)
machine = shot['machine_name']
machines.add(machine)
# TODO Update the meta data
shot_name = shot['shot_name']
run_name = shot['run_name']
run_key = '%s/%s' % ( shot_name, run_name)
if run_key not in runs:
asyncio.create_task(
watch_run(session, gc, folder, upload_site_url, shot_name,
run_name, username, machine, run_poll_internval)
)
runs.add(run_key)
metadata = {
'machines': list(machines),
'users': list(users)
}
await gc.set_metadata('folder', folder['_id'], metadata, shot_metadata_semaphore)
await asyncio.sleep(shot_poll_interval)
async def watch(folder_id, upload_site_url, api_url, api_key,
shot_poll_interval, run_poll_internval):
def ignore_aiohttp_ssl_eror(loop, aiohttpversion='3.5.4'):
"""Ignore aiohttp #3535 issue with SSL data after close
There appears to be an issue on Python 3.7 and aiohttp SSL that throws a
ssl.SSLError fatal error (ssl.SSLError: [SSL: KRB5_S_INIT] application data
after close notify (_ssl.c:2609)) after we are already done with the
connection. See GitHub issue aio-libs/aiohttp#3535
Given a loop, this sets up a exception handler that ignores this specific
exception, but passes everything else on to the previous exception handler
this one replaces.
If the current aiohttp version is not exactly equal to aiohttpversion
nothing is done, assuming that the next version will have this bug fixed.
This can be disabled by setting this parameter to None
"""
if aiohttpversion is not None and aiohttp.__version__ != aiohttpversion:
return
orig_handler = loop.get_exception_handler()
def ignore_ssl_error(loop, context):
if context.get('message') == 'SSL error in data received':
# validate we have the right exception, transport and protocol
exception = context.get('exception')
protocol = context.get('protocol')
if (
isinstance(exception, ssl.SSLError) and exception.reason == 'KRB5_S_INIT' and
isinstance(protocol, asyncio.sslproto.SSLProtocol) and
isinstance(protocol._app_protocol, aiohttp.client_proto.ResponseHandler)
):
if loop.get_debug():
asyncio.log.logger.debug('Ignoring aiohttp SSL KRB5_S_INIT error')
return
if orig_handler is not None:
orig_handler(loop, context)
else:
loop.default_exception_handler(context)
loop.set_exception_handler(ignore_ssl_error)
ignore_aiohttp_ssl_eror(asyncio.get_running_loop())
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
gc = AsyncGirderClient(session, api_url)
await gc.authenticate(api_key)
folder = {
'_id': folder_id
}
await watch_shots_index(session, gc, folder, upload_site_url, api_url,
api_key, shot_poll_interval, run_poll_internval)
@click.command('watch', help='Watch upload site and ingest data into Girder')
@click.option('-f', '--folder-id', help='the folder to ingest into. [default: GIRDER_FOLDER_IR env. variable]', envvar='GIRDER_FOLDER_ID')
@click.option('-r', '--upload-site_url', help='the URL to the upload site to watch. [default: UPLOAD_SITE_URL env. variable]', envvar='UPLOAD_SITE_URL',)
@click.option('-u', '--api-url', default='http://localhost:8080/api/v1', help='RESTful API URL '
'(e.g https://girder.example.com/api/v1). [default: GIRDER_API_URL env. variable]', envvar='GIRDER_API_URL')
@click.option('-k', '--api-key', envvar='GIRDER_API_KEY',
help='[default: GIRDER_API_KEY env. variable]')
@click.option('-i', '--shot-poll-interval', default=30, type=int, help='shot poll interval (sec)')
@click.option('-v', '--run-poll-interval', default=30, type=int, help='run poll interval (sec)')
def main(folder_id, upload_site_url, api_url, api_key, shot_poll_interval, run_poll_interval):
#gc = GC(api_url=api_url, api_key=api_key)
if upload_site_url[-1] == '/':
upload_site_url = upload_site_url[:-1]
log = logging.getLogger('adash')
log.info('Watching: %s' % upload_site_url)
asyncio.run(
watch(folder_id, upload_site_url, api_url, api_key,
shot_poll_interval, run_poll_interval)
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.