repo
stringlengths 7
59
| instance_id
stringlengths 11
63
| base_commit
stringlengths 40
40
| patch
stringlengths 167
798k
| test_patch
stringclasses 1
value | problem_statement
stringlengths 20
65.2k
| hints_text
stringlengths 0
142k
| created_at
timestamp[ns]date 2015-08-30 10:31:05
2024-12-13 16:08:19
| environment_setup_commit
stringclasses 1
value | version
stringclasses 1
value | FAIL_TO_PASS
sequencelengths 0
0
| PASS_TO_PASS
sequencelengths 0
0
|
---|---|---|---|---|---|---|---|---|---|---|---|
augerai/a2ml | augerai__a2ml-611 | 30601dc95093e45ebafb500fa70fcdafe65edd24 | diff --git a/a2ml/__init__.py b/a2ml/__init__.py
index e71bfca8..5baef1cb 100644
--- a/a2ml/__init__.py
+++ b/a2ml/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '1.0.79'
+__version__ = '1.0.80'
diff --git a/a2ml/api/a2ml.py b/a2ml/api/a2ml.py
index 03454e01..b7002dab 100644
--- a/a2ml/api/a2ml.py
+++ b/a2ml/api/a2ml.py
@@ -279,7 +279,7 @@ def deploy(self, model_id, locally=False, review=False, provider=None,
@show_result
def predict(self, model_id, filename=None, data=None, columns=None, predicted_at=None,
threshold=None, score=False, score_true_data=None,
- output=None, no_features_in_result = None, locally=False, provider=None):
+ output=None, no_features_in_result = None, locally=False, provider=None, predict_labels=None):
"""Predict results with new data against deployed model. Predictions are stored next to the file with data to be predicted on. The file name will be appended with suffix _predicted.
Note:
@@ -299,6 +299,7 @@ def predict(self, model_id, filename=None, data=None, columns=None, predicted_at
no_features_in_result(bool) : Do not return feature columns in prediction result. False by default
locally(bool, str): Predicts using a local model with auger.ai.predict if True, on the Provider Cloud if False. If set to "docker", then docker image used to run the model
provider (str): The automl provider you wish to run. For example 'auger'. The default is None - use provider set in costructor or config.
+ predict_labels (dict, bool): Run ActiveLearn to select data for labelling
Returns:
if filename is not None. ::
@@ -363,7 +364,7 @@ def predict(self, model_id, filename=None, data=None, columns=None, predicted_at
"""
return self.get_runner(locally, model_id, provider).execute_one_provider('predict', filename, model_id,
threshold, locally, data, columns, predicted_at, output, no_features_in_result,
- score, score_true_data )
+ score, score_true_data, predict_labels )
@show_result
def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None,
diff --git a/a2ml/api/a2ml_model.py b/a2ml/api/a2ml_model.py
index 7f810fd8..770c9376 100644
--- a/a2ml/api/a2ml_model.py
+++ b/a2ml/api/a2ml_model.py
@@ -68,7 +68,7 @@ def deploy(self, model_id, locally=False, review=False, provider=None,
@show_result
def predict(self, model_id, filename=None, data=None, columns=None, predicted_at=None,
threshold=None, score=False, score_true_data=None,
- output=None, no_features_in_result=None, locally=False, provider=None):
+ output=None, no_features_in_result=None, locally=False, provider=None, predict_labels=None):
"""Predict results with new data against deployed model. Predictions are stored next to the file with data to be predicted on. The file name will be appended with suffix _predicted.
Note:
@@ -88,6 +88,7 @@ def predict(self, model_id, filename=None, data=None, columns=None, predicted_at
no_features_in_result(bool) : Do not return feature columns in prediction result. False by default
locally(bool, str): Predicts using a local model with auger.ai.predict if True, on the Provider Cloud if False. If set to "docker", then docker image used to run the model
provider (str): The automl provider you wish to run. For example 'auger'. The default is None - use provider set in costructor or config.
+ predict_labels (dict, bool): Run ActiveLearn to select data for labelling
Returns:
if filename is not None. ::
@@ -152,7 +153,7 @@ def predict(self, model_id, filename=None, data=None, columns=None, predicted_at
"""
return self.get_runner(locally, model_id, provider).execute_one_provider('predict', filename, model_id,
threshold, locally, data, columns, predicted_at, output, no_features_in_result,
- score, score_true_data )
+ score, score_true_data, predict_labels )
@show_result
def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None,
diff --git a/a2ml/api/auger/a2ml.py b/a2ml/api/auger/a2ml.py
index f367c7a6..af40e9f8 100644
--- a/a2ml/api/auger/a2ml.py
+++ b/a2ml/api/auger/a2ml.py
@@ -25,10 +25,11 @@ def deploy(self, model_id, locally=False, review=False, name=None, algorithm=Non
return AugerModel(self.ctx).deploy(model_id, locally, review, name, algorithm, score, data_path, metadata)
def predict(self, model_id, filename, threshold=None, locally=False, data=None, columns=None,
- predicted_at=None, output=None, no_features_in_result=None, score=False, score_true_data=None):
+ predicted_at=None, output=None, no_features_in_result=None, score=False,
+ score_true_data=None, predict_labels=None):
return AugerModel(self.ctx).predict(
model_id, filename, threshold, locally, data, columns, predicted_at, output,
- no_features_in_result, score, score_true_data)
+ no_features_in_result, score, score_true_data, predict_labels)
def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None,
actual_date_column=None, experiment_params=None, locally=False):
diff --git a/a2ml/api/auger/impl/cloud/pipeline.py b/a2ml/api/auger/impl/cloud/pipeline.py
index dc056474..aacf1a0e 100644
--- a/a2ml/api/auger/impl/cloud/pipeline.py
+++ b/a2ml/api/auger/impl/cloud/pipeline.py
@@ -95,7 +95,7 @@ def has_endpoint(self, props=None):
return is_endpoint
def predict(self, records, features, threshold=None, file_url=None, predicted_at=None,
- no_features_in_result=None, score=False, score_true_data=None):
+ no_features_in_result=None, score=False, score_true_data=None, predict_labels=None):
if self.object_id is None:
raise AugerException('Please provide Auger Pipeline id')
@@ -120,7 +120,7 @@ def predict(self, records, features, threshold=None, file_url=None, predicted_at
prediction_properties = \
prediction_api.create(records, features, threshold=threshold, file_url=file_url,
predicted_at=predicted_at, no_features_in_result=no_features_in_result,
- score=score, score_true_data=score_true_data)
+ score=score, score_true_data=score_true_data, predict_labels=predict_labels)
return prediction_properties.get('result')
diff --git a/a2ml/api/auger/impl/cloud/prediction.py b/a2ml/api/auger/impl/cloud/prediction.py
index 81f50387..da3dd44b 100644
--- a/a2ml/api/auger/impl/cloud/prediction.py
+++ b/a2ml/api/auger/impl/cloud/prediction.py
@@ -15,7 +15,7 @@ def __init__(self, ctx, pipeline_api, use_endpoint=False):
self._set_api_request_path("AugerEndpointPredictionApi")
def create(self, records, features, threshold=None, file_url=None, predicted_at=None,
- no_features_in_result=None, score=False, score_true_data=None):
+ no_features_in_result=None, score=False, score_true_data=None, predict_labels=None):
params = {
'records': records,
'features': features,
@@ -43,4 +43,7 @@ def create(self, records, features, threshold=None, file_url=None, predicted_at=
if score_true_data:
params['score_true_data'] = score_true_data
+ if predict_labels:
+ params['predict_labels'] = predict_labels
+
return self._call_create(params, ['requested', 'running'])
diff --git a/a2ml/api/auger/impl/model.py b/a2ml/api/auger/impl/model.py
index 1af255e6..f2797bb4 100644
--- a/a2ml/api/auger/impl/model.py
+++ b/a2ml/api/auger/impl/model.py
@@ -31,9 +31,9 @@ def undeploy(self, model_id, locally=False):
return ModelUndeploy(self.ctx, self.project).execute(model_id, locally)
def predict(self, filename, model_id, threshold=None, locally=False, data=None, columns=None, predicted_at=None,
- output=None, no_features_in_result=None, score=False, score_true_data=None):
+ output=None, no_features_in_result=None, score=False, score_true_data=None, predict_labels=None):
return ModelPredict(self.ctx).execute(filename, model_id, threshold, locally, data, columns,
- predicted_at, output, no_features_in_result, score, score_true_data)
+ predicted_at, output, no_features_in_result, score, score_true_data, predict_labels)
def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None,
actual_date_column=None, experiment_params=None, locally=False):
diff --git a/a2ml/api/auger/impl/mparts/predict.py b/a2ml/api/auger/impl/mparts/predict.py
index d523f6ca..8743c065 100644
--- a/a2ml/api/auger/impl/mparts/predict.py
+++ b/a2ml/api/auger/impl/mparts/predict.py
@@ -24,7 +24,7 @@ def __init__(self, ctx):
def execute(self, filename, model_id, threshold=None, locally=False, data=None, columns=None,
predicted_at=None, output=None, no_features_in_result=None,
- score=False, score_true_data=None):
+ score=False, score_true_data=None, predict_labels=None):
if filename is not None and isinstance(filename, str) and \
not (filename.startswith("http:") or filename.startswith("https:")) and \
not fsclient.is_s3_path(filename):
@@ -34,13 +34,13 @@ def execute(self, filename, model_id, threshold=None, locally=False, data=None,
if locally:
if locally == "docker":
predicted = self._predict_locally_in_docker(filename, model_id, threshold, data, columns, predicted_at, output,
- no_features_in_result, score, score_true_data)
+ no_features_in_result, score, score_true_data, predict_labels)
else:
predicted = self._predict_locally(filename, model_id, threshold, data, columns, predicted_at, output,
- no_features_in_result, score, score_true_data)
+ no_features_in_result, score, score_true_data, predict_labels)
else:
predicted = self._predict_on_cloud(filename, model_id, threshold, data, columns, predicted_at, output,
- no_features_in_result, score, score_true_data)
+ no_features_in_result, score, score_true_data, predict_labels)
return predicted
@@ -104,7 +104,7 @@ def _check_model_project(self, pipeline_api):
self.ctx.config.get('name'), model_project_name))
def _predict_on_cloud(self, filename, model_id, threshold, data, columns, predicted_at,
- output, no_features_in_result, score, score_true_data):
+ output, no_features_in_result, score, score_true_data, predict_labels):
records, features, file_url, is_pandas_df = self._process_input(filename, data, columns)
temp_file = None
ds_result = None
@@ -114,7 +114,7 @@ def _predict_on_cloud(self, filename, model_id, threshold, data, columns, predic
pipeline_api = AugerPipelineApi(self.ctx, None, model_id)
predictions = pipeline_api.predict(records, features, threshold=threshold, file_url=file_url,
predicted_at=predicted_at, no_features_in_result=no_features_in_result,
- score=score, score_true_data=score_true_data)
+ score=score, score_true_data=score_true_data, predict_labels=predict_labels)
try:
ds_result = DataFrame.create_dataframe(predictions.get('signed_prediction_url'),
@@ -145,7 +145,7 @@ def _predict_on_cloud(self, filename, model_id, threshold, data, columns, predic
fsclient.remove_file(temp_file)
def _predict_locally(self, filename_arg, model_id, threshold, data, columns, predicted_at,
- output, no_features_in_result, score, score_true_data):
+ output, no_features_in_result, score, score_true_data, predict_labels):
from auger_ml.model_exporter import ModelExporter
is_model_loaded, model_path = ModelDeploy(self.ctx, None).verify_local_model(model_id)
@@ -161,12 +161,17 @@ def _predict_locally(self, filename_arg, model_id, threshold, data, columns, pre
if score and score_true_data is None:
options = fsclient.read_json_file(os.path.join(model_path, "options.json"))
- ds = DataFrame.create_dataframe(filename_arg, data, [options['targetFeature']])
+ ds = DataFrame.create_dataframe(filename_arg, data)#, [options['targetFeature']])
score_true_data = ds.df
-
- res, options = ModelExporter({}).predict_by_model_to_ds(model_path,
- path_to_predict=filename_arg, records=data, features=columns,
- threshold=threshold, no_features_in_result=no_features_in_result)
+
+ if predict_labels:
+ res, options = ModelExporter({}).predict_labels_by_model_to_ds(model_path,
+ path_to_predict=filename_arg, records=data, features=columns,
+ threshold=threshold, no_features_in_result=no_features_in_result, predict_labels=predict_labels)
+ else:
+ res, options = ModelExporter({}).predict_by_model_to_ds(model_path,
+ path_to_predict=filename_arg, records=data, features=columns,
+ threshold=threshold, no_features_in_result=no_features_in_result)
ds_result = DataFrame({'data_path': None})
ds_result.df = res.df
@@ -192,7 +197,7 @@ def _predict_locally(self, filename_arg, model_id, threshold, data, columns, pre
# no_features_in_result=no_features_in_result) #, output=output)
def _predict_locally_in_docker(self, filename_arg, model_id, threshold, data, columns, predicted_at,
- output, no_features_in_result, score, score_true_data):
+ output, no_features_in_result, score, score_true_data, predict_labels):
model_deploy = ModelDeploy(self.ctx, None)
is_model_loaded, model_path = model_deploy.verify_local_model(model_id, add_model_folder=False)
if not is_model_loaded:
@@ -205,7 +210,7 @@ def _predict_locally_in_docker(self, filename_arg, model_id, threshold, data, co
filename = os.path.join(self.ctx.config.get_path(), '.augerml', 'predict_data.csv')
ds.saveToCsvFile(filename, compression=None)
- predicted = self._docker_run_predict(filename, threshold, model_path, score, score_true_data)
+ predicted = self._docker_run_predict(filename, threshold, model_path, score, score_true_data, predict_labels)
if not filename_arg:
ds_result = DataFrame.create_dataframe(predicted)
@@ -223,7 +228,7 @@ def _predict_locally_in_docker(self, filename_arg, model_id, threshold, data, co
return predicted
- def _docker_run_predict(self, filename, threshold, model_path, score, score_true_data):
+ def _docker_run_predict(self, filename, threshold, model_path, score, score_true_data, predict_labels):
cluster_settings = AugerClusterApi.get_cluster_settings(self.ctx)
docker_tag = cluster_settings.get('kubernetes_stack')
predict_file = os.path.basename(filename)
diff --git a/a2ml/api/auger/model.py b/a2ml/api/auger/model.py
index 326a219f..5f061718 100644
--- a/a2ml/api/auger/model.py
+++ b/a2ml/api/auger/model.py
@@ -23,15 +23,15 @@ def deploy(self, project, model_id, locally, review, name, algorithm, score, dat
@authenticated
#@with_project(autocreate=False)
def predict(self, filename, model_id, threshold, locally, data, columns, predicted_at, output,
- no_features_in_result, score, score_true_data):
+ no_features_in_result, score, score_true_data, predict_labels):
if locally:
self.deploy(model_id, locally, review=False, name=None, algorithm=None, score=None, data_path=None)
predicted = Model(self.ctx, project=None).predict(
filename, model_id, threshold, locally, data, columns, predicted_at, output,
- no_features_in_result, score, score_true_data)
+ no_features_in_result, score, score_true_data, predict_labels)
- if filename:
+ if output:
self.ctx.log('Predictions stored in %s' % predicted)
if isinstance(predicted, dict) and 'predicted' in predicted:
diff --git a/a2ml/api/utils/formatter.py b/a2ml/api/utils/formatter.py
index e7a118bb..d5f37508 100644
--- a/a2ml/api/utils/formatter.py
+++ b/a2ml/api/utils/formatter.py
@@ -12,7 +12,8 @@ def print_table(log, table_list, headers=None, hor_lines=True):
col_list = list(table_list[0].keys() if table_list else [])
row_list = [col_list] # 1st row = header
for item in table_list:
- row_list.append([str(item.get(col) or '') for col in col_list])
+ row_list.append([str(item.get(col)) if item.get(col) is not None else '' for col in col_list])
+
# maximun size of the col for each element
col_size = [max(map(len, col)) for col in zip(*row_list)]
# insert seperating line before every line, and extra one for ending.
diff --git a/setup.py b/setup.py
index 646d8b7c..3998fdeb 100644
--- a/setup.py
+++ b/setup.py
@@ -84,13 +84,13 @@ def run(self):
'google-cloud-automl'
],
'predict': [
- 'auger.ai.predict[all]==1.0.104'
+ 'auger.ai.predict[all]==1.0.106'
],
'predict_no_cat_lgbm': [
- 'auger.ai.predict[no_cat_lgbm]==1.0.104'
+ 'auger.ai.predict[no_cat_lgbm]==1.0.106'
],
'predict_no_lgbm': [
- 'auger.ai.predict[no_cat_lgbm]==1.0.104',
+ 'auger.ai.predict[no_cat_lgbm]==1.0.106',
'catboost'
]
}
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2022-09-29T10:02:12 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-610 | 0198cb1d42783000318a5e4579018a8be362743f | diff --git a/.circleci/config.yml b/.circleci/config.yml
deleted file mode 100644
index 8b70a2d2..00000000
--- a/.circleci/config.yml
+++ /dev/null
@@ -1,254 +0,0 @@
-version: 2.1
-
-orbs:
- docker: circleci/[email protected]
- aws-eks: circleci/[email protected]
- role: airswap/[email protected]
- aws-cli: circleci/[email protected]
- kubernetes: circleci/[email protected]
- helm: circleci/[email protected]
-
-jobs:
- build-docs:
- docker:
- - image: circleci/python:3.7-stretch
- steps:
- - checkout
- - run: sudo chown -R circleci:circleci /usr/local/bin
- - run: sudo chown -R circleci:circleci /usr/local/lib/python3.7/site-packages
- - restore_cache:
- keys:
- - a2ml-python-doc-deps-v3-{{ arch }}-3.7-{{ .Branch }}-{{ checksum "setup.py" }}-{{ checksum "docs/requirements.txt" }}
- - a2ml-python-doc-deps-v3-{{ arch }}-3.7-{{ .Branch }}
- - a2ml-python-doc-deps-v3-{{ arch }}-3.7
- - run:
- name: Install dependencies
- command: |
- virtualenv venv
- source venv/bin/activate
- make develop-docs
- - save_cache:
- key: a2ml-python-doc-deps-v3-{{ arch }}-3.7-{{ .Branch }}-{{ checksum "setup.py" }}-{{ checksum "docs/requirements.txt" }}
- paths:
- - "venv"
- - "/home/circleci/.cache/pip"
- - run:
- name: Build docs
- command: |
- source venv/bin/activate
- cd docs/
- make html
- - persist_to_workspace:
- root: docs/build
- paths: html
- publish-docs:
- docker:
- - image: node:10.15.0
- steps:
- - checkout
- - attach_workspace:
- at: docs/build
- - add_ssh_keys:
- fingerprints: "44:aa:23:95:60:12:6b:b5:8d:b2:e5:05:24:1f:94:cf"
- - run:
- name: Deploy docs to gh-pages branch
- command: |
- git config user.email "[email protected]"
- git config user.name "augerbot"
- npm install -g --silent [email protected]
- gh-pages --dotfiles --message "[skip ci] Updates" --dist docs/build/html
- build-and-test:
- docker:
- - image: cimg/base:stable
- steps:
- - checkout
- - setup_remote_docker
- - run:
- name: Install Docker Compose
- environment:
- COMPOSE_VERSION: '1.29.2'
- command: |
- curl -L "https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-$(uname -s)-$(uname -m)" -o ~/docker-compose
- chmod +x ~/docker-compose
- sudo mv ~/docker-compose /usr/local/bin/docker-compose
- - run: make config docker-test
- - run: make docker-save
- - persist_to_workspace:
- root: .
- paths:
- - ./image.tar.gz
- publish-docker:
- executor: docker/machine
- parameters:
- docker-tag:
- description: Tag to publish
- type: string
- default: latest
- steps:
- - checkout
- - attach_workspace:
- at: ./
- - docker/check
- - run: make docker-load
- - run: DOCKER_TAG=<< parameters.docker-tag >> make docker-tag
- - docker/push:
- image: augerai/a2ml
- tag: << parameters.docker-tag >>
-
- publish-pip:
- docker:
- - image: circleci/python:3.7-stretch
- steps:
- - checkout
- - run:
- command: |
- echo -e "[pypi]" >> ~/.pypirc
- echo -e "username = $PYPI_USERNAME" >> ~/.pypirc
- echo -e "password = $PYPI_PASSWORD" >> ~/.pypirc
- - run: make build
- - run: make release
-
- deploy-to-k8s:
- executor: aws-eks/python3
- parameters:
- cluster-name:
- description: |
- Name of the EKS cluster
- type: string
- default: ${STAGING_CLUSTER_NAME}
- aws-region:
- description: |
- AWS region
- type: string
- default: ${AWS_DEFAULT_REGION}
- namespace:
- description: |
- a2ml namespace
- type: string
- default: a2ml
- release-name:
- description: |
- a2ml helm release-name
- type: string
- default: a2ml
- reuse-values:
- description: |
- Reuse last release's values and merge in any overrides
- type: boolean
- default: true
- account-id:
- description: |
- AWS account containing the cluster
- type: string
- default: ${ACCOUNT_ID}
- role-name:
- description: |
- AWS role to assume for deploying to eks
- type: string
- default: ${ROLE_NAME}
- values-to-override:
- description: |
- Values will be used as helm install --set "key1=value1,key2=value2"
- type: string
- default: ""
- steps:
- - aws-cli/setup
- - role/assume-role:
- account-id: << parameters.account-id >>
- role-name: << parameters.role-name >>
- - aws-eks/update-kubeconfig-with-authenticator:
- cluster-name: << parameters.cluster-name >>
- aws-region: << parameters.aws-region >>
- install-kubectl: true
- - helm/install-helm-client:
- version: v3.4.2
- - run:
- command: |
- helm repo add augerai https://augerai.github.io/charts
- helm repo update
- helm repo list
- name: Add augerai repo
- - helm/upgrade-helm-chart:
- helm-version: v3.2.4
- chart: augerai/a2ml
- namespace: << parameters.namespace >>
- release-name: << parameters.release-name >>
- reuse-values: << parameters.reuse-values >>
- values-to-override: << parameters.values-to-override >>
-
-workflows:
- build-test-publish:
- jobs:
- - build-and-test:
- filters:
- tags:
- only: /^v.*/
- - build-docs
- - publish-docs:
- requires:
- - build-and-test
- - build-docs
- filters:
- branches:
- only:
- - master
- - publish-docker:
- context: docker-hub
- name: docker-publish-tag
- docker-tag: $CIRCLE_TAG
- requires:
- - build-and-test
- filters:
- tags:
- only: /^v.*/
- branches:
- ignore: /.*/
- - publish-docker:
- context: docker-hub
- name: docker-publish-master
- requires:
- - build-and-test
- filters:
- branches:
- only:
- - master
- tags:
- ignore: /^v.*/
- - deploy-to-k8s:
- name: deploy-to-k8s-master
- cluster-name: ${STAGING_CLUSTER_NAME}
- values-to-override: "image.tag=latest"
- release-name: a2ml
- namespace: a2ml
- context: eks
- requires:
- - docker-publish-master
- filters:
- branches:
- only:
- - master
- tags:
- ignore: /^v.*/
- - deploy-to-k8s:
- name: deploy-to-k8s
- namespace: a2ml
- release-name: a2ml
- values-to-override: "image.tag=$CIRCLE_TAG"
- context: eks
- requires:
- - docker-publish-tag
- account-id: ${PROD_ACCOUNT_ID}
- cluster-name: ${PROD_CLUSTER_NAME}
- role-name: ${PROD_ROLE_NAME}
- filters:
- tags:
- only: /^v.*/
- branches:
- ignore: /.*/
- - publish-pip:
- filters:
- tags:
- only: /^v.*/
- branches:
- ignore: /.*/
- context: pypi
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
new file mode 100644
index 00000000..f87c87d2
--- /dev/null
+++ b/.github/workflows/docs.yml
@@ -0,0 +1,32 @@
+name: docs
+
+on:
+ push:
+ tags:
+ - v*
+
+jobs:
+ build_publish_docs:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v3
+ - name: Set up Python
+ uses: actions/setup-python@v3
+ with:
+ python-version: 3.7
+
+ - name: Build docs
+ run: |
+ make develop-docs
+ cd docs/
+ make html
+
+ - name: Deploy
+ uses: peaceiris/actions-gh-pages@v3
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ publish_dir: docs/build/html
+
+
+
\ No newline at end of file
diff --git a/.github/workflows/publish_docker.yml b/.github/workflows/publish_docker.yml
new file mode 100644
index 00000000..80739507
--- /dev/null
+++ b/.github/workflows/publish_docker.yml
@@ -0,0 +1,69 @@
+name: publish_docker
+
+on:
+ push:
+ tags:
+ - v*
+ branches:
+ - master
+
+jobs:
+ publish_docker:
+ runs-on: ubuntu-latest
+
+ permissions:
+ id-token: write
+ contents: read
+ env:
+ DOCKER_TAG: ${{github.ref_type == 'tag' && github.ref_name || 'latest'}}
+ REPO_NAME: 'augerai/a2ml'
+ DOCKER_USER: ${{ secrets.DOCKER_USER }}
+ DOCKER_PASS: ${{ secrets.DOCKER_PASS }}
+
+ AWS_KS_ROLE: ${{ secrets[format('AWS_KS_{0}_ROLE', github.ref_type == 'tag' && 'STABLE' || 'EXPERIMENTAL')] }}
+ CLUSTER_NAME: ${{ secrets[format('{0}_CLUSTER_NAME', github.ref_type == 'tag' && 'STABLE' || 'EXPERIMENTAL')] }}
+ KUBECONFIG_FILE: '/home/runner/.kube/config'
+ RELEASE_NAME: 'a2ml'
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Build docker and run tests
+ run: make config docker-test
+
+ - name: Push docker image
+ run: |
+ docker login -u $DOCKER_USER -p $DOCKER_PASS
+ docker push $REPO_NAME:$DOCKER_TAG
+
+ - name: configure aws credentials with role1
+ uses: aws-actions/configure-aws-credentials@v1
+ with:
+ role-to-assume: ${{ secrets.AWS_ROLE }}
+ aws-region: us-west-2
+
+ - name: Assume execution role
+ uses: aws-actions/configure-aws-credentials@v1
+ with:
+ aws-access-key-id: ${{ env.AWS_ACCESS_KEY_ID }}
+ aws-region: us-west-2
+ aws-secret-access-key: ${{ env.AWS_SECRET_ACCESS_KEY }}
+ aws-session-token: ${{ env.AWS_SESSION_TOKEN }}
+ role-duration-seconds: 3000
+ role-skip-session-tagging: true
+ role-to-assume: ${{ env.AWS_KS_ROLE }}
+
+ - uses: azure/[email protected]
+ - uses: azure/setup-helm@v1
+ with:
+ version: 'v3.4.2'
+
+ - name: setup the kubectl config
+ run : aws eks update-kubeconfig --name $CLUSTER_NAME
+
+ - name: Helm upgrade augerai repo
+ run: |
+ helm repo add augerai https://augerai.github.io/charts
+ helm repo update
+ helm repo list
+ helm upgrade $RELEASE_NAME $REPO_NAME --namespace=$RELEASE_NAME --set=image.tag=$DOCKER_TAG --reuse-values --wait --atomic --kubeconfig $KUBECONFIG_FILE
diff --git a/.github/workflows/publish_pip.yml b/.github/workflows/publish_pip.yml
new file mode 100644
index 00000000..e10e2f9a
--- /dev/null
+++ b/.github/workflows/publish_pip.yml
@@ -0,0 +1,33 @@
+name: publish_pip
+
+on:
+ push:
+ tags:
+ - v*
+
+jobs:
+ publish_pip:
+ runs-on: ubuntu-latest
+ env:
+ PYPI_USERNAME: ${{ secrets.PYPI_USERNAME }}
+ PYPI_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
+
+ steps:
+ - uses: actions/checkout@v3
+ - name: Set up Python
+ uses: actions/setup-python@v3
+ with:
+ python-version: 3.7
+
+ - name: Make pypirc file
+ run: |
+ echo -e "[pypi]" >> ~/.pypirc
+ echo -e "username = $PYPI_USERNAME" >> ~/.pypirc
+ echo -e "password = $PYPI_PASSWORD" >> ~/.pypirc
+
+ - name: Install dependencies
+ run: pip install wheel
+ - name: Build wheel
+ run: make build
+ - name: Deploy package
+ run: make release
diff --git a/a2ml/__init__.py b/a2ml/__init__.py
index 53a7c762..461d9678 100644
--- a/a2ml/__init__.py
+++ b/a2ml/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '1.0.76'
+__version__ = '1.0.77'
diff --git a/a2ml/api/a2ml.py b/a2ml/api/a2ml.py
index dc72609a..03454e01 100644
--- a/a2ml/api/a2ml.py
+++ b/a2ml/api/a2ml.py
@@ -8,7 +8,7 @@ class A2ML(BaseA2ML):
"""Facade to A2ML providers."""
def __init__(self, ctx, provider = None):
- """Initializes A2ML PREDIT instance.
+ """Initializes new A2ML PREDIT instance.
Args:
ctx (object): An instance of the a2ml Context.
diff --git a/a2ml/api/model_review/model_review.py b/a2ml/api/model_review/model_review.py
index f84dfa20..11f95335 100644
--- a/a2ml/api/model_review/model_review.py
+++ b/a2ml/api/model_review/model_review.py
@@ -79,6 +79,8 @@ def _do_score_actual(self, df_data, predicted_feature=None, extra_features=[]):
def validate_roi_syntax(self, expressions, features=[]):
res = []
+ logging.info('validate_roi_syntax with experession: %s'%(expressions))
+
known_vars = ["A", "P", "$" + self.target_feature] + list(
map(lambda name: "$" + name, set(self.original_features + features))
)
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 51d880e1..0f8a6e5f 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,2 +1,2 @@
-sphinx~=3.0.4
+sphinx~=4.4.0
git+https://github.com/augerai/sphinx_rtd_theme.git@bump-version#2ab38df0d303163e0e6c2bac80d907e9915000cb'
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2022-05-09T20:42:48 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-607 | 2273c754ddc486783b6b1095e15c5b67e7c48242 | diff --git a/a2ml/__init__.py b/a2ml/__init__.py
index a53afbe9..218e3516 100644
--- a/a2ml/__init__.py
+++ b/a2ml/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '1.0.65'
+__version__ = '1.0.66'
diff --git a/a2ml/api/auger/config.py b/a2ml/api/auger/config.py
index 0a9e47a5..3d8013fb 100644
--- a/a2ml/api/auger/config.py
+++ b/a2ml/api/auger/config.py
@@ -3,7 +3,7 @@ def __init__(self, ctx):
super(AugerConfig, self).__init__()
self.ctx = ctx
- def set_data_set(self, name, source=None, validation=False):
+ def set_data_set(self, name, source=None, validation=False, user_name=None):
#TODO: add more providers later
if validation:
self.ctx.config.set('experiment/validation_dataset', name)
@@ -12,6 +12,9 @@ def set_data_set(self, name, source=None, validation=False):
else:
#print("set_data_set: %s"%self.ctx.use_auger_cloud())
self.ctx.config.set('dataset', name)
+ if user_name:
+ self.ctx.config.set('dataset_name', user_name)
+
if self.ctx.use_auger_cloud() and 'azure' in self.ctx.get_providers():
self.ctx.config.set('dataset', name, "azure")
@@ -21,9 +24,34 @@ def set_data_set(self, name, source=None, validation=False):
def set_experiment(self, experiment_name, experiment_session_id):
self.ctx.config.set('experiment/name', experiment_name)
self.ctx.config.set('experiment/experiment_session_id', experiment_session_id)
+
+ if self.ctx.config.get('dataset_name'):
+ dataset_name = self.ctx.config.get('dataset_name')
+ self.ctx.config.set(f'experiments/{dataset_name}/experiment_id', experiment_name)
+ self.ctx.config.set(f'experiments/{dataset_name}/experiment_session_id', experiment_session_id)
+
self.ctx.config.write()
+ def _get_experiment_by_dataset(self):
+ dataset_name = self.ctx.config.get('dataset_name')
+ experiments = self.ctx.config.get('experiments', {})
+
+ return experiments.get(dataset_name, {})
+
+ def get_experiment(self):
+ return self._get_experiment_by_dataset().get('experiment_id',
+ self.ctx.config.get('experiment/name'))
+
+ def get_experiment_session(self):
+ return self._get_experiment_by_dataset().get('experiment_session_id',
+ self.ctx.config.get('experiment/experiment_session_id'))
+
+ def get_dataset(self):
+ return self._get_experiment_by_dataset().get('dataset_id',
+ self.ctx.config.get('dataset'))
+
def set_project(self, project_name):
self.ctx.config.set('name', project_name)
self.ctx.config.write()
return self
+
diff --git a/a2ml/api/auger/dataset.py b/a2ml/api/auger/dataset.py
index 65ec91ea..a99cc098 100644
--- a/a2ml/api/auger/dataset.py
+++ b/a2ml/api/auger/dataset.py
@@ -18,7 +18,7 @@ def __init__(self, ctx):
@with_project(autocreate=False)
def list(self, project):
count = 0
- selected = self.ctx.config.get('dataset', None)
+ selected = AugerConfig(self.ctx).get_dataset() #self.ctx.config.get('dataset', None)
for dataset in iter(DataSet(self.ctx, project).list()):
self.ctx.log(
('[%s] ' % ('x' if selected == dataset.get('name') else ' ')) +
@@ -40,7 +40,7 @@ def _create(self, project, source = None, validation=False, name=None, descripti
if source is None:
source = self.ctx.config.get('source', None)
dataset = DataSet(self.ctx, project).create(source, name, description)
- AugerConfig(self.ctx).set_data_set(dataset.name, source, validation)
+ AugerConfig(self.ctx).set_data_set(dataset.name, source, validation, name)
return dataset
@@ -60,7 +60,7 @@ def upload(self, project, source = None, name=None):
@with_project(autocreate=False)
def delete(self, project, name):
if name is None:
- name = self.ctx.config.get('dataset', None)
+ name = AugerConfig(self.ctx).get_dataset() #self.ctx.config.get('dataset', None)
DataSet(self.ctx, project, name).delete()
if name == self.ctx.config.get('dataset', None):
AugerConfig(self.ctx).set_data_set(None, None, False).set_experiment(None, None)
@@ -69,7 +69,7 @@ def delete(self, project, name):
@error_handler
def select(self, name):
- old_name = self.ctx.config.get('dataset', None)
+ old_name = AugerConfig(self.ctx).get_dataset() #self.ctx.config.get('dataset', None)
if name != old_name:
AugerConfig(self.ctx).set_data_set(name, None, False).set_experiment(None, None)
self.ctx.log('Selected DataSet %s' % name)
@@ -80,7 +80,7 @@ def select(self, name):
@with_project(autocreate=False)
def download(self, project, name, path_to_download):
if name is None:
- name = self.ctx.config.get('dataset', None)
+ name = AugerConfig(self.ctx).get_dataset() #self.ctx.config.get('dataset', None)
file_name = DataSet(self.ctx, project, name).download(path_to_download)
self.ctx.log('Downloaded dataset %s to %s' % (name, file_name))
return {'dowloaded': name, 'file': file_name}
diff --git a/a2ml/api/auger/experiment.py b/a2ml/api/auger/experiment.py
index edb4df07..04dbc3cc 100644
--- a/a2ml/api/auger/experiment.py
+++ b/a2ml/api/auger/experiment.py
@@ -31,7 +31,7 @@ def list(self, dataset):
@with_dataset
def start(self, dataset):
experiment_name = \
- self.ctx.config.get('experiment/name', None)
+ AugerConfig(self.ctx).get_experiment() #self.ctx.config.get('experiment/name', None)
experiment_name, session_id = \
Experiment(self.ctx, dataset, experiment_name).start()
AugerConfig(self.ctx).set_experiment(experiment_name, session_id)
@@ -41,12 +41,12 @@ def start(self, dataset):
@authenticated
@with_dataset
def stop(self, dataset, run_id = None):
- name = self.ctx.config.get('experiment/name', None)
+ name = AugerConfig(self.ctx).get_experiment() #self.ctx.config.get('experiment/name', None)
if name is None:
raise AugerException('Please specify Experiment name...')
if run_id is None:
- run_id = self.ctx.config.get(
- 'experiment/experiment_session_id', None)
+ run_id = AugerConfig(self.ctx).get_experiment_session()
+ #self.ctx.config.get('experiment/experiment_session_id', None)
if Experiment(self.ctx, dataset, name).stop(run_id):
self.ctx.log('Search is stopped...')
@@ -58,12 +58,13 @@ def stop(self, dataset, run_id = None):
@authenticated
@with_dataset
def leaderboard(self, dataset, run_id = None):
- name = self.ctx.config.get('experiment/name', None)
+ name = AugerConfig(self.ctx).get_experiment() #self.ctx.config.get('experiment/name', None)
if name is None:
raise AugerException('Please specify Experiment name...')
if run_id is None:
- run_id = self.ctx.config.get(
- 'experiment/experiment_session_id', None)
+ run_id = AugerConfig(self.ctx).get_experiment_session()
+ # run_id = self.ctx.config.get(
+ # 'experiment/experiment_session_id', None)
leaderboard, status, run_id, trials_count, errors = Experiment(
self.ctx, dataset, name).leaderboard(run_id)
if leaderboard is None:
@@ -109,7 +110,7 @@ def leaderboard(self, dataset, run_id = None):
@authenticated
@with_dataset
def history(self, dataset):
- name = self.ctx.config.get('experiment/name', None)
+ name = AugerConfig(self.ctx).get_experiment() #self.ctx.config.get('experiment/name', None)
if name is None:
raise AugerException('Please specify Experiment name...')
for exp_run in iter(Experiment(self.ctx, dataset, name).history()):
diff --git a/a2ml/api/auger/impl/decorators.py b/a2ml/api/auger/impl/decorators.py
index aaa73523..f681cad2 100644
--- a/a2ml/api/auger/impl/decorators.py
+++ b/a2ml/api/auger/impl/decorators.py
@@ -33,10 +33,11 @@ def wrapper(self, *args, **kwargs):
def with_dataset(decorated):
from .dataset import DataSet
+ from ..config import AugerConfig
def wrapper(self, *args, **kwargs):
project = _get_project(self, False)
- data_set_name = self.ctx.config.get('dataset', None)
+ data_set_name = AugerConfig(self.ctx).get_dataset() #self.ctx.config.get('dataset', None)
if data_set_name is None:
raise AugerException(
'Please specify dataset name in auger.yaml/dataset...')
diff --git a/a2ml/api/utils/context.py b/a2ml/api/utils/context.py
index 853aa570..cab0cf6c 100644
--- a/a2ml/api/utils/context.py
+++ b/a2ml/api/utils/context.py
@@ -14,7 +14,7 @@
class Context(object):
"""The Context class provides an environment to run A2ML"""
- def __init__(self, name='config', path=None, debug=False):
+ def __init__(self, name='auger', path=None, debug=False):
"""Initializes the Context instance
Args:
@@ -39,7 +39,7 @@ def __init__(self, name='config', path=None, debug=False):
self.provider_info = None
if len(self.name) > 0:
- self.name = "{:<9}".format('[%s]' % self.name)
+ self.name = f'[{self.name}] ' #"{:<9}".format('[%s]' % self.name)
self.debug = self.config.get('debug', debug)
self.set_runs_on_server(False)
@@ -109,6 +109,8 @@ def is_external_provider(self):
return providers and providers[0] == 'external'
def copy(self, name):
+ return self
+
"""creates a copy of an existing Context
Args:
@@ -123,23 +125,23 @@ def copy(self, name):
ctx = Context()
new_ctx = ctx.copy()
"""
- new = Context(name, self.config.path, self.debug)
- new.set_runs_on_server(self._runs_on_server)
- new.notificator = self.notificator
- new.request_id = self.request_id
- new.config.parts = self.config.parts
- new.config.parts_changes = self.config.parts_changes
-
- try:
- new.config.set("providers", name, config_name='config')
- except Exception as e:
- # In case if command run in folder without config, do not set it
- pass
+ # new = Context(name, self.config.path, self.debug)
+ # new.set_runs_on_server(self._runs_on_server)
+ # new.notificator = self.notificator
+ # new.request_id = self.request_id
+ # new.config.parts = self.config.parts
+ # new.config.parts_changes = self.config.parts_changes
+
+ # try:
+ # new.config.set("providers", name, config_name='config')
+ # except Exception as e:
+ # # In case if command run in folder without config, do not set it
+ # pass
- if hasattr(self, 'credentials'):
- new.credentials = self.credentials
+ # if hasattr(self, 'credentials'):
+ # new.credentials = self.credentials
- return new
+ # return new
def log(self, msg, *args, **kwargs):
log.info('%s%s' %(self.name, msg), *args, **kwargs)
diff --git a/a2ml/cmdl/commands/cmd_import.py b/a2ml/cmdl/commands/cmd_import.py
index 2502ef75..fcc87dbc 100644
--- a/a2ml/cmdl/commands/cmd_import.py
+++ b/a2ml/cmdl/commands/cmd_import.py
@@ -5,12 +5,14 @@
@click.command('import', short_help='Import data for training.')
@click.option('--source', '-s', type=click.STRING, required=False,
help='Source file to import.If skipped, then import source from config.yml.')
[email protected]('--name', '-n', type=click.STRING, required=False,
+ help='Name file to import.')
@click.option('--description', '-d', type=click.STRING, required=False,
help='Description of dataset.')
@click.option('--provider', '-p', type=click.Choice(['auger','azure']), required=False,
help='Cloud AutoML Provider.')
@pass_context
-def cmdl(ctx, source, description, provider):
+def cmdl(ctx, source, name, description, provider):
"""Import data for training."""
ctx.setup_logger(format='')
- A2ML(ctx, provider).import_data(source, description=description)
+ A2ML(ctx, provider).import_data(source, name, description=description)
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2022-03-17T09:51:24 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-601 | bb141e98e7cb89a5d6aa226cb98adbb1b3163add | diff --git a/a2ml/api/a2ml.py b/a2ml/api/a2ml.py
index 69f5b188..8424c81a 100644
--- a/a2ml/api/a2ml.py
+++ b/a2ml/api/a2ml.py
@@ -365,7 +365,8 @@ def predict(self, model_id, filename=None, data=None, columns=None, predicted_at
score, score_true_data )
@show_result
- def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None, actual_date_column=None, locally=False, provider=None):
+ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None,
+ actual_date_column=None, experiment_params=None, locally=False, provider=None):
"""Submits actual results(ground truths) for predictions of a deployed model. This is used to review and monitor active models.
Note:
@@ -396,6 +397,12 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
columns(list): list of column names if data is array of records
actuals_at: Actuals date. Use for review of historical data.
actual_date_column(str): name of column in data which contains actual date
+ experiment_params(dict): parameters to calculate experiment metrics ::
+
+ start_date(date): experiment actuals start date
+ end_date(date): experiment actuals end date
+ date_col(str): column name with date
+
locally(bool): Process actuals locally.
provider (str): The automl provider you wish to run. For example 'auger'. The default is None - use provider set in costructor or config.
@@ -437,7 +444,8 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
A2ML(ctx, "external").actuals('external_model_id', data=actual_records,columns=columns)
"""
- return self.get_runner(locally, model_id, provider).execute_one_provider('actuals', model_id, filename, data, columns, actuals_at, actual_date_column, locally)
+ return self.get_runner(locally, model_id, provider).execute_one_provider('actuals',
+ model_id, filename, data, columns, actuals_at, actual_date_column, experiment_params, locally)
@show_result
diff --git a/a2ml/api/a2ml_model.py b/a2ml/api/a2ml_model.py
index aaf1cf0c..86dae92b 100644
--- a/a2ml/api/a2ml_model.py
+++ b/a2ml/api/a2ml_model.py
@@ -155,7 +155,8 @@ def predict(self, model_id, filename=None, data=None, columns=None, predicted_at
score, score_true_data )
@show_result
- def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None, actual_date_column=None, locally=False, provider=None):
+ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None,
+ actual_date_column=None, experiment_params=None, locally=False, provider=None):
"""Submits actual results(ground truths) for predictions of a deployed model. This is used to review and monitor active models.
Note:
@@ -186,6 +187,13 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
columns(list): list of column names if data is array of records
actuals_at: Actuals date. Use for review of historical data.
actual_date_column(str): name of column in data which contains actual date
+ experiment_params(dict): parameters to calculate experiment metrics ::
+
+ start_date(date): experiment actuals start date
+ end_date(date): experiment actuals end date
+ date_col(str): column name with date
+
+
locally(bool): Process actuals locally.
provider (str): The automl provider you wish to run. For example 'auger'. The default is None - use provider set in costructor or config.
@@ -227,7 +235,8 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
A2MLModel(ctx, "external").actuals('external_model_id', data=actual_records,columns=columns)
"""
- return self.get_runner(locally, model_id, provider).execute_one_provider('actuals', model_id, filename, data, columns, actuals_at, actual_date_column, locally)
+ return self.get_runner(locally, model_id, provider).execute_one_provider('actuals',
+ model_id, filename, data, columns, actuals_at, actual_date_column, experiment_params, locally)
@show_result
def review_alert(self, model_id, parameters = None, locally=False, provider=None, name=None):
diff --git a/a2ml/api/auger/a2ml.py b/a2ml/api/auger/a2ml.py
index 27c75909..46f7db9d 100644
--- a/a2ml/api/auger/a2ml.py
+++ b/a2ml/api/auger/a2ml.py
@@ -30,9 +30,10 @@ def predict(self, model_id, filename, threshold=None, locally=False, data=None,
model_id, filename, threshold, locally, data, columns, predicted_at, output,
no_features_in_result, score, score_true_data)
- def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None, actual_date_column=None, locally=False):
+ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None,
+ actual_date_column=None, experiment_params=None, locally=False):
return AugerModel(self.ctx).actuals(
- model_id, filename, data, columns, actuals_at, actual_date_column, locally)
+ model_id, filename, data, columns, actuals_at, actual_date_column, experiment_params, locally)
def delete_actuals(self, model_id, with_predictions=False, begin_date=None, end_date=None, locally=False):
return AugerModel(self.ctx).delete_actuals(
diff --git a/a2ml/api/auger/impl/cloud/actual.py b/a2ml/api/auger/impl/cloud/actual.py
index 17e08373..c29c7f23 100644
--- a/a2ml/api/auger/impl/cloud/actual.py
+++ b/a2ml/api/auger/impl/cloud/actual.py
@@ -12,7 +12,7 @@ def __init__(self, ctx, pipeline_api, use_endpoint=False):
self.parent_id_name = "endpoint_id"
self._set_api_request_path("AugerEndpointActualApi")
- def create(self, records, features, actuals_at, actuals_path, actual_date_column):
+ def create(self, records, features, actuals_at, actuals_path, actual_date_column, experiment_params):
params = {}
if self.use_endpoint:
params['endpoint_id'] = self.parent_api.object_id
@@ -35,6 +35,10 @@ def create(self, records, features, actuals_at, actuals_path, actual_date_column
if actuals_at:
params['actuals_at'] = str(actuals_at)
+ if experiment_params:
+ params['experiment_params'] = experiment_params
+
+ print(params)
return self._call_create(
params=params,
has_return_object=False)
diff --git a/a2ml/api/auger/impl/cloud/pipeline.py b/a2ml/api/auger/impl/cloud/pipeline.py
index 4c2e4605..b5c1847e 100644
--- a/a2ml/api/auger/impl/cloud/pipeline.py
+++ b/a2ml/api/auger/impl/cloud/pipeline.py
@@ -94,12 +94,12 @@ def predict(self, records, features, threshold=None, file_url=None, predicted_at
return prediction_properties.get('result')
- def actual(self, records, features, actuals_at, actuals_path, actual_date_column):
+ def actual(self, records, features, actuals_at, actuals_path, actual_date_column, experiment_params):
if self.object_id is None:
raise AugerException('Please provide Auger Pipeline id')
actual_api = AugerActualApi(self.ctx, self, use_endpoint=self.check_endpoint())
- actual_api.create(records, features, actuals_at, actuals_path, actual_date_column)
+ actual_api.create(records, features, actuals_at, actuals_path, actual_date_column, experiment_params)
#TODO: get object actual from cloud
return True
diff --git a/a2ml/api/auger/impl/model.py b/a2ml/api/auger/impl/model.py
index 89e2b25e..72826d08 100644
--- a/a2ml/api/auger/impl/model.py
+++ b/a2ml/api/auger/impl/model.py
@@ -35,7 +35,8 @@ def predict(self, filename, model_id, threshold=None, locally=False, data=None,
return ModelPredict(self.ctx).execute(filename, model_id, threshold, locally, data, columns,
predicted_at, output, no_features_in_result, score, score_true_data)
- def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None, actual_date_column=None, locally=False):
+ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None,
+ actual_date_column=None, experiment_params=None, locally=False):
if locally:
is_loaded, model_path = ModelDeploy(self.ctx, self.project).verify_local_model(model_id)
@@ -57,10 +58,12 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
columns=columns,
actual_date=actuals_at,
actual_date_column=actual_date_column,
+ experiment_params=experiment_params,
provider='auger'
)
else:
- return ModelActual(self.ctx).execute(model_id, filename, data, columns, actuals_at, actual_date_column)
+ return ModelActual(self.ctx).execute(model_id, filename, data, columns, actuals_at,
+ actual_date_column, experiment_params)
def delete_actuals(self, model_id, with_predictions=False, begin_date=None, end_date=None, locally=False):
if locally:
diff --git a/a2ml/api/auger/impl/mparts/actual.py b/a2ml/api/auger/impl/mparts/actual.py
index aad79269..50edcb7b 100644
--- a/a2ml/api/auger/impl/mparts/actual.py
+++ b/a2ml/api/auger/impl/mparts/actual.py
@@ -12,9 +12,9 @@ def __init__(self, ctx):
super(ModelActual, self).__init__()
self.ctx = ctx
- def execute(self, model_id, filename, data, columns, actuals_at, actual_date_column):
+ def execute(self, model_id, filename, data, columns, actuals_at, actual_date_column, experiment_params):
records, features, file_url, is_pandas_df = ModelPredict(self.ctx)._process_input(filename, data,
columns=columns)
pipeline_api = AugerPipelineApi(self.ctx, None, model_id)
- return pipeline_api.actual(records, features, actuals_at, file_url, actual_date_column)
+ return pipeline_api.actual(records, features, actuals_at, file_url, actual_date_column, experiment_params)
diff --git a/a2ml/api/auger/model.py b/a2ml/api/auger/model.py
index 7901f871..326a219f 100644
--- a/a2ml/api/auger/model.py
+++ b/a2ml/api/auger/model.py
@@ -42,8 +42,9 @@ def predict(self, filename, model_id, threshold, locally, data, columns, predict
@error_handler
@authenticated
@with_project(autocreate=False)
- def actuals(self, project, model_id, filename=None, data=None, columns=None, actuals_at=None, actual_date_column=None, locally=False):
- return Model(self.ctx, project).actuals(model_id, filename, data, columns, actuals_at, actual_date_column, locally)
+ def actuals(self, project, model_id, filename=None, data=None, columns=None, actuals_at=None,
+ actual_date_column=None, experiment_params=None, locally=False):
+ return Model(self.ctx, project).actuals(model_id, filename, data, columns, actuals_at, actual_date_column, experiment_params, locally)
@error_handler
@authenticated
diff --git a/a2ml/api/model_review/model_review.py b/a2ml/api/model_review/model_review.py
index 6df5be05..1d2e0bf9 100644
--- a/a2ml/api/model_review/model_review.py
+++ b/a2ml/api/model_review/model_review.py
@@ -145,7 +145,7 @@ def add_external_model(self, target_column, scoring, task_type, binary_classific
def add_actuals(
self, ctx, actuals_path=None, data=None, columns=None, external_model=False,
actual_date=None, actual_date_column=None, actuals_id = None, return_count=False, provider='auger',
- do_predict=False
+ do_predict=False, experiment_params=None
):
ds_actuals = DataFrame.create_dataframe(actuals_path, data, features=columns)
@@ -178,8 +178,12 @@ def add_actuals(
result = self._do_score_actual(ds_actuals.df)
baseline_score = {}
+ experiment_score = {}
+ experiment_count = 0
if "baseline_target" in ds_actuals.columns:
baseline_score = self._do_score_actual(ds_actuals.df, "baseline_target")
+ if experiment_params:
+ experiment_score, experiment_count = self._do_score_actual_experiment(ds_actuals, experiment_params)
#logging.info("Actual result: %s", result)
ds_actuals.df = ds_actuals.df.rename(columns={self.target_feature: 'a2ml_predicted'})
@@ -207,10 +211,34 @@ def add_actuals(
ds_actuals.saveToFeatherFile(os.path.join(self.model_path, "predictions", file_name))
if return_count:
- return {'score': result, 'count': actuals_count, 'baseline_score': baseline_score}
+ return {'score': result, 'count': actuals_count, 'baseline_score': baseline_score,
+ 'experiment_score': experiment_score, 'experiment_count': experiment_count}
else:
return result
+ def _do_score_actual_experiment(self, ds_actuals, experiment_params):
+ if experiment_params.get('start_date') and experiment_params.get('end_date'):
+ df_exp_actuals = ds_actuals.df.query("%s>='%s' and %s<'%s'"%(
+ experiment_params.get('date_col'),
+ experiment_params.get('start_date'),
+ experiment_params.get('date_col'),
+ experiment_params.get('end_date')
+ ))
+ elif experiment_params.get('start_date'):
+ df_exp_actuals = ds_actuals.df.query("%s>='%s'"%(
+ experiment_params.get('date_col'),
+ experiment_params.get('start_date')
+ ))
+ elif experiment_params.get('end_date'):
+ df_exp_actuals = ds_actuals.df.query("%s<'%s'"%(
+ experiment_params.get('date_col'),
+ experiment_params.get('end_date')
+ ))
+ else:
+ df_exp_actuals = ds_actuals.df
+
+ return self._do_score_actual(df_exp_actuals), len(df_exp_actuals)
+
def _do_predict(self, ctx, ds_actuals, provider, predict_feature=None, predicted_at=None):
missing_features = set(self.original_features) - set(ds_actuals.columns)
if len(missing_features) > 0:
diff --git a/a2ml/tasks_queue/tasks_hub_api.py b/a2ml/tasks_queue/tasks_hub_api.py
index 73bafa16..d5ccbae4 100644
--- a/a2ml/tasks_queue/tasks_hub_api.py
+++ b/a2ml/tasks_queue/tasks_hub_api.py
@@ -560,7 +560,8 @@ def score_actuals_by_model_task(params):
return_count=params.get('return_count', False),
provider=params.get('provider'),
external_model=external_model,
- do_predict=params.get('do_predict', False)
+ do_predict=params.get('do_predict', False),
+ experiment_params=params.get('experiment_params'),
)
@celeryApp.task(ignore_result=True)
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-10-29T17:51:10 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-594 | c255bd05603d41d4bfb604ec940225d2656b0b6c | diff --git a/a2ml/__init__.py b/a2ml/__init__.py
index b41bc4d5..f47e40c5 100644
--- a/a2ml/__init__.py
+++ b/a2ml/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '1.0.37'
+__version__ = '1.0.38'
diff --git a/a2ml/api/a2ml.py b/a2ml/api/a2ml.py
index 14fd78f0..6049297e 100644
--- a/a2ml/api/a2ml.py
+++ b/a2ml/api/a2ml.py
@@ -1,6 +1,6 @@
from a2ml.api.base_a2ml import BaseA2ML
from a2ml.api.utils.show_result import show_result
-from a2ml.api.utils import convert_source
+#from a2ml.api.utils import convert_source
from a2ml.api.utils.context import Context
@@ -28,7 +28,7 @@ def __init__(self, ctx, provider = None):
self.local_runner = lambda: self.build_runner(ctx, provider, force_local=True)
@show_result
- def import_data(self, source=None):
+ def import_data(self, source=None, name=None):
"""Imports data defined in context. Uploading the same file name will result in versions being appended to the file name.
Note:
@@ -46,6 +46,7 @@ def import_data(self, source=None):
Args:
source (str, optional): Local file name, remote url to the data source file, Pandas DataFrame or postgres url
+ name (str, optional): Name of dataset, if none then file name used. If source is DataFrame then name should be specified.
Returns:
Results for each provider. ::
@@ -70,8 +71,7 @@ def import_data(self, source=None):
a2ml = A2ML(ctx, 'auger, azure')
a2ml.import_data()
"""
- with convert_source(source, self.ctx.config.get("name", "source_data")) as data_source:
- return self.runner.execute('import_data', source=data_source)
+ return self.runner.execute('import_data', source=source, name=name)
@show_result
def preprocess_data(self, data, preprocessors, locally=False):
diff --git a/a2ml/api/a2ml_dataset.py b/a2ml/api/a2ml_dataset.py
index 1a623e87..9861efa2 100644
--- a/a2ml/api/a2ml_dataset.py
+++ b/a2ml/api/a2ml_dataset.py
@@ -1,6 +1,6 @@
from a2ml.api.base_a2ml import BaseA2ML
from a2ml.api.utils.show_result import show_result
-from a2ml.api.utils import convert_source
+#from a2ml.api.utils import convert_source
class A2MLDataset(BaseA2ML):
@@ -58,11 +58,12 @@ def list(self):
return self.runner.execute('list')
@show_result
- def create(self, source = None):
+ def create(self, source = None, name=None):
"""Create a new DataSet for the Project specified in the .yaml.
Args:
source (str, optional): Local file name, remote url to the data source file, Pandas DataFrame or postgres url
+ name (str, optional): Name of dataset, if none then file name used. If source is DataFrame then name should be specified.
Returns:
Results for each provider. ::
@@ -82,8 +83,7 @@ def create(self, source = None):
ctx = Context()
dataset = DataSet(ctx, 'auger, azure').create('../dataset.csv')
"""
- with convert_source(source, self.ctx.config.get("name", "source_data")) as data_source:
- return self.runner.execute('create', data_source)
+ return self.runner.execute('create', data_source, name)
@show_result
def delete(self, name = None):
diff --git a/a2ml/api/auger/a2ml.py b/a2ml/api/auger/a2ml.py
index 394c873f..86cc3b80 100644
--- a/a2ml/api/auger/a2ml.py
+++ b/a2ml/api/auger/a2ml.py
@@ -9,8 +9,8 @@ def __init__(self, ctx):
super(AugerA2ML, self).__init__()
self.ctx = ctx
- def import_data(self, source=None):
- return AugerDataset(self.ctx).create(source=source)
+ def import_data(self, source=None, name=None):
+ return AugerDataset(self.ctx).create(source=source, name=name)
def preprocess_data(self, data, preprocessors, locally=False):
return AugerDataset(self.ctx).preprocess_data(data, preprocessors, locally)
diff --git a/a2ml/api/auger/dataset.py b/a2ml/api/auger/dataset.py
index 4e7ffc6a..0619f62c 100644
--- a/a2ml/api/auger/dataset.py
+++ b/a2ml/api/auger/dataset.py
@@ -31,15 +31,15 @@ def list(self, project):
@error_handler
@authenticated
@with_project(autocreate=True)
- def create(self, project, source = None, validation=False):
- dataset = self._create(project, source, validation)
+ def create(self, project, source = None, validation=False, name=None):
+ dataset = self._create(project, source, validation, name)
self.ctx.log('Created DataSet %s' % dataset.name)
return {'created': dataset.name}
- def _create(self, project, source = None, validation=False):
+ def _create(self, project, source = None, validation=False, name=None):
if source is None:
source = self.ctx.config.get('source', None)
- dataset = DataSet(self.ctx, project).create(source)
+ dataset = DataSet(self.ctx, project).create(source, name)
AugerConfig(self.ctx).set_data_set(dataset.name, source, validation)
return dataset
diff --git a/a2ml/api/auger/impl/cloud/dataset.py b/a2ml/api/auger/impl/cloud/dataset.py
index 6419bfda..ae56a0c7 100644
--- a/a2ml/api/auger/impl/cloud/dataset.py
+++ b/a2ml/api/auger/impl/cloud/dataset.py
@@ -6,6 +6,7 @@
import urllib.parse
import urllib.request
import xml.etree.ElementTree as ElementTree
+from a2ml.api.utils.dataframe import DataFrame
from .cluster import AugerClusterApi
from .project_file import AugerProjectFileApi
@@ -31,8 +32,17 @@ def do_upload_file(self, data_source_file, data_set_name=None, local_data_source
# AugerDataSetApi.verify(data_source_file, self.ctx.config.path)
if local_data_source:
- file_url = self._upload_to_cloud(data_source_file)
- file_name = os.path.basename(data_source_file)
+ if DataFrame.is_dataframe(data_source_file):
+ with fsclient.save_atomic("%s.parquet"%data_set_name, move_file=False) as local_path:
+ ds = DataFrame.create_dataframe(data_source_file)
+ ds.saveToParquetFile(local_path)
+ file_url = self._upload_to_cloud(local_path)
+
+ file_name = data_set_name
+ else:
+ file_url = self._upload_to_cloud(data_source_file)
+ file_name = os.path.basename(data_source_file)
+
if data_set_name:
self.object_name = data_set_name
else:
@@ -81,6 +91,9 @@ def _get_readable_name(self):
@staticmethod
def verify(data_source_file, config_path=None):
+ if DataFrame.is_dataframe(data_source_file):
+ return data_source_file, True
+
if urllib.parse.urlparse(data_source_file).scheme in ['http', 'https']:
return data_source_file, False
diff --git a/a2ml/api/auger/impl/dataset.py b/a2ml/api/auger/impl/dataset.py
index 6a86af1a..b05fb435 100644
--- a/a2ml/api/auger/impl/dataset.py
+++ b/a2ml/api/auger/impl/dataset.py
@@ -10,7 +10,7 @@ def __init__(self, ctx, project, data_set_name=None):
ctx, project, data_set_name)
self.project = project
- def create(self, data_source_file):
+ def create(self, data_source_file, name):
if data_source_file is None:
raise AugerException('Please specify data source file...')
@@ -19,7 +19,7 @@ def create(self, data_source_file):
self.project.start()
- super().create(data_source_file, self.object_name, local_data_source=local_data_source)
+ super().create(data_source_file, name if name else self.object_name, local_data_source=local_data_source)
return self
def upload_file(self, data_source_file):
diff --git a/a2ml/api/utils/__init__.py b/a2ml/api/utils/__init__.py
index 22429b26..6acf1a52 100644
--- a/a2ml/api/utils/__init__.py
+++ b/a2ml/api/utils/__init__.py
@@ -215,14 +215,14 @@ def convert_to_date(date):
else:
return date
[email protected]
-def convert_source(source, name):
- if source is not None and isinstance(source, pd.DataFrame):
- with fsclient.save_atomic("%s.parquet"%name, move_file=False) as local_path:
- source.to_parquet(local_path, index=False, compression="gzip")
- yield local_path
- else:
- yield source
+# @contextlib.contextmanager
+# def convert_source(source, name):
+# if source is not None and isinstance(source, pd.DataFrame):
+# with fsclient.save_atomic("%s.parquet"%name, move_file=False) as local_path:
+# source.to_parquet(local_path, index=False, compression="gzip")
+# yield local_path
+# else:
+# yield source
def retry_helper(func, retry_errors=[], num_try=10, delay=10, ctx=None):
nTry = 0
diff --git a/a2ml/api/utils/dataframe.py b/a2ml/api/utils/dataframe.py
index ca737052..d81998f7 100644
--- a/a2ml/api/utils/dataframe.py
+++ b/a2ml/api/utils/dataframe.py
@@ -43,21 +43,48 @@ def _get_compression(self, extension):
return compression
@staticmethod
- def create_dataframe(data_path=None, records=None, features=None):
- if data_path:
- ds = DataFrame({'data_path': data_path})
- ds.load(features = features)
- elif records is not None and isinstance(records, pd.DataFrame):
- ds = DataFrame({})
- ds.df = records
- if features:
- ds.df = ds.df[features]
-
- ds.from_pandas = True
+ def create_dataframe(data_path=None, records=None, features=None, reset_index=False):
+ if data_path is not None:
+ if isinstance(data_path, pd.DataFrame):
+ ds = DataFrame({})
+ ds.df = data_path
+ elif isinstance(data_path, DataFrame):
+ ds = data_path
+ elif isinstance(data_path, list):
+ ds = DataFrame({})
+ ds.load_records(data_path)
+ elif isinstance(data_path, dict):
+ ds = DataFrame({})
+
+ if 'data' in data_path and 'columns' in data_path:
+ ds.load_records(data_path['data'], features=data_path['columns'])
+ else:
+ ds.load_records(data_path)
+ else:
+ ds = DataFrame({'data_path': data_path})
+ ds.load(features = features)
else:
ds = DataFrame({})
ds.load_records(records, features=features)
+ if reset_index and ds.df is not None:
+ ds.df.reset_index(drop=True, inplace=True)
+
+
+ # if data_path:
+ # ds = DataFrame({'data_path': data_path})
+ # ds.load(features = features)
+ # elif records is not None and isinstance(records, pd.DataFrame):
+ # ds = DataFrame({})
+ # ds.df = records
+ # if features:
+ # ds.df = ds.df[features]
+
+ # ds.from_pandas = True
+ # else:
+ # ds = DataFrame({})
+ # ds.load_records(records, features=features)
+
return ds
@staticmethod
@@ -72,6 +99,10 @@ def load_from_files(files, features=None):
except Exception as exc:
logging.exception("load_from_files failed for: %s. Error: %s"%(path, exc))
+ @staticmethod
+ def is_dataframe(data):
+ return isinstance(data, pd.DataFrame) or isinstance(data, DataFrame)
+
def load_from_file(self, path, features=None, nrows=None):
from collections import OrderedDict
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-09-01T21:01:12 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-591 | e8fc37867e2ede621fa14a6877f661d62de5ae20 | diff --git a/a2ml/__init__.py b/a2ml/__init__.py
index 1c327d42..24d5b4c5 100644
--- a/a2ml/__init__.py
+++ b/a2ml/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '1.0.34'
+__version__ = '1.0.35'
diff --git a/a2ml/api/a2ml.py b/a2ml/api/a2ml.py
index aa0e4284..14fd78f0 100644
--- a/a2ml/api/a2ml.py
+++ b/a2ml/api/a2ml.py
@@ -73,6 +73,61 @@ def import_data(self, source=None):
with convert_source(source, self.ctx.config.get("name", "source_data")) as data_source:
return self.runner.execute('import_data', source=data_source)
+ @show_result
+ def preprocess_data(self, data, preprocessors, locally=False):
+ """Preprocess data
+
+ Args:
+ data (str|pandas.DataFrame): Input data for preprocess. Can be path to file(local or s3) or Pandas Dataframe
+ preprocessors (array of dicts): List of preprocessors with parameters ::
+
+ [
+ {'text': {'text_cols': []}}
+ ]
+
+ Preprocessors:
+ text
+ * text_cols(array): List of text columns to process
+ * text_metrics ['mean_length', 'unique_count', 'separation_score'] : Calculate metrics for text fields and after vectorize(separation_score)
+ * tokenize (dict): Default - {'max_text_len': 30000, 'tokenizers': ['sent'], 'remove_chars': '○•'}
+ * vectorize ('en_use_lg'|'hashing'|'en_use_md'|'en_use_cmlm_md'|'en_use_cmlm_lg'): See see https://github.com/MartinoMensio/spacy-universal-sentence-encoder
+ * dim_reduction(dict): Generate features based on vectors. See https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html ::
+
+ {
+ 'alg_name': 'PCA'|'t-SNE',
+ 'args': {'n_components': 2} #Number of components to keep.
+ }
+
+ * output_prefix (str): Prefix for generated columns. Format name: {prefix}_{colname}_{num}
+
+ * calc_distance ['none', 'cosine', 'cityblock', 'euclidean', 'haversine', 'l1', 'l2', 'manhattan', 'nan_euclidean'] | 'cosine' : See https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.distance_metrics.html#sklearn.metrics.pairwise.distance_metrics
+ * compare_pairs (array of dicts): When calc_distance is not none. ::
+
+ [
+ {'compare_cols': [{'dataset_idx': 0, 'cols': ['col1']}, {'dataset_idx': 1, 'cols': ['col2']}],
+ 'output_name':'cosine_col1_col2', 'params': {}
+ },
+ {'compare_cols': [{'dataset_idx': 0, 'cols': ['col3']}, {'dataset_idx': 1, 'cols': ['col4']}],
+ 'output_name':'cosine_col3_col4', 'params': {}
+ },
+ ]
+
+ * datasets: List of datasets to process, may be empty, so all fields takes from main dataset ::
+
+ [
+ {'path': 'path', 'keys': ['main_key', 'local_key'], 'text_metrics': ['separation_score', 'mean_length', 'unique_count']},
+ {'path': 'path1', 'keys': ['main_key1', 'local_key1']}
+ ]
+
+ Returns:
+ {
+ 'result': True,
+ 'data': 'data in input format'
+ }
+
+ """
+ return self.get_runner(locally).execute_one_provider('preprocess_data', data, preprocessors, locally)
+
@show_result
def train(self):
"""Starts training session based on context state.
diff --git a/a2ml/api/auger/a2ml.py b/a2ml/api/auger/a2ml.py
index 2a55df5e..394c873f 100644
--- a/a2ml/api/auger/a2ml.py
+++ b/a2ml/api/auger/a2ml.py
@@ -12,6 +12,9 @@ def __init__(self, ctx):
def import_data(self, source=None):
return AugerDataset(self.ctx).create(source=source)
+ def preprocess_data(self, data, preprocessors, locally=False):
+ return AugerDataset(self.ctx).preprocess_data(data, preprocessors, locally)
+
def train(self):
return AugerExperiment(self.ctx).start()
diff --git a/a2ml/api/auger/dataset.py b/a2ml/api/auger/dataset.py
index dc0b6fee..4e7ffc6a 100644
--- a/a2ml/api/auger/dataset.py
+++ b/a2ml/api/auger/dataset.py
@@ -73,3 +73,25 @@ def download(self, project, name, path_to_download):
file_name = DataSet(self.ctx, project, name).download(path_to_download)
self.ctx.log('Downloaded dataset %s to %s' % (name, file_name))
return {'dowloaded': name, 'file': file_name}
+
+ def preprocess_data(self, data, preprocessors, locally):
+ if locally:
+ return self._preprocess_data_locally(data, preprocessors)
+ else:
+ raise Exception("preprocess_data supported with locally=True only.")
+
+ def _preprocess_data_locally(self, data, preprocessors):
+ from auger_ml.preprocessors.text import TextPreprocessor
+
+ res = data
+ for p in preprocessors:
+ name = list(p.keys())[0]
+ params = list(p.values())[0]
+ if name != 'text':
+ raise Exception("Only text preprocessor supported.")
+
+ tp = TextPreprocessor(params)
+ res = tp.fit_transform(res)
+
+ return res
+
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 0a6f572f..2b11a88c 100644
--- a/setup.py
+++ b/setup.py
@@ -86,7 +86,7 @@ def run(self):
'google-cloud-automl'
],
'predict': [
- 'auger.ai.predict==1.0.79'
+ 'auger.ai.predict==1.0.80'
]
}
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-08-12T20:59:36 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-587 | 858d43f5475354996519821e2f99d8a038440a23 | diff --git a/a2ml/__init__.py b/a2ml/__init__.py
index 93dae163..0630b51e 100644
--- a/a2ml/__init__.py
+++ b/a2ml/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '1.0.32'
+__version__ = '1.0.33'
diff --git a/a2ml/api/a2ml.py b/a2ml/api/a2ml.py
index 09fe4116..aa0e4284 100644
--- a/a2ml/api/a2ml.py
+++ b/a2ml/api/a2ml.py
@@ -221,7 +221,8 @@ def deploy(self, model_id, locally=False, review=True, provider=None,
@show_result
def predict(self, model_id, filename=None, data=None, columns=None, predicted_at=None,
- threshold=None, output=None, no_features_in_result = None, locally=False, provider=None):
+ threshold=None, score=False, score_true_data=None,
+ output=None, no_features_in_result = None, locally=False, provider=None):
"""Predict results with new data against deployed model. Predictions are stored next to the file with data to be predicted on. The file name will be appended with suffix _predicted.
Note:
@@ -235,6 +236,8 @@ def predict(self, model_id, filename=None, data=None, columns=None, predicted_at
columns(list): list of column names if data is array of records
predicted_at: Predict data date. Use for review of historical data.
threshold(float): For classification models only. This will return class probabilities with response.
+ score(bool): Calculate scores for predicted results.
+ score_true_data(str, pandas.DataFrame, dict): Data with true values to calculate scores. If missed, target from filename used for true values.
output(str): Output csv file path.
no_features_in_result(bool) : Do not return feature columns in prediction result. False by default
locally(bool, str): Predicts using a local model with auger.ai.predict if True, on the Provider Cloud if False. If set to "docker", then docker image used to run the model
@@ -301,7 +304,9 @@ def predict(self, model_id, filename=None, data=None, columns=None, predicted_at
# predictions are stored in rv[provider]['data']['predicted']
"""
- return self.get_runner(locally, model_id, provider).execute_one_provider('predict', filename, model_id, threshold, locally, data, columns, predicted_at, output, no_features_in_result)
+ return self.get_runner(locally, model_id, provider).execute_one_provider('predict', filename, model_id,
+ threshold, locally, data, columns, predicted_at, output, no_features_in_result,
+ score, score_true_data )
@show_result
def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None, actual_date_column=None, locally=False, provider=None):
diff --git a/a2ml/api/a2ml_model.py b/a2ml/api/a2ml_model.py
index 2be596f0..b58e2132 100644
--- a/a2ml/api/a2ml_model.py
+++ b/a2ml/api/a2ml_model.py
@@ -66,7 +66,8 @@ def deploy(self, model_id, locally=False, review=True, provider=None,
@show_result
def predict(self, model_id, filename=None, data=None, columns=None, predicted_at=None,
- threshold=None, output=None, no_features_in_result=None, locally=False, provider=None):
+ threshold=None, score=False, score_true_data=None,
+ output=None, no_features_in_result=None, locally=False, provider=None):
"""Predict results with new data against deployed model. Predictions are stored next to the file with data to be predicted on. The file name will be appended with suffix _predicted.
Note:
@@ -80,6 +81,8 @@ def predict(self, model_id, filename=None, data=None, columns=None, predicted_at
columns(list): list of column names if data is array of records
predicted_at: Predict data date. Use for review of historical data.
threshold(float): For classification models only. This will return class probabilities with response.
+ score(bool): Calculate scores for predicted results.
+ score_true_data(str, pandas.DataFrame, dict): Data with true values to calculate scores. If missed, target from filename used for true values.
output(str): Output csv file path.
no_features_in_result(bool) : Do not return feature columns in prediction result. False by default
locally(bool, str): Predicts using a local model with auger.ai.predict if True, on the Provider Cloud if False. If set to "docker", then docker image used to run the model
@@ -146,7 +149,9 @@ def predict(self, model_id, filename=None, data=None, columns=None, predicted_at
# predictions are stored in rv[provider]['data']['predicted']
"""
- return self.get_runner(locally, model_id, provider).execute_one_provider('predict', filename, model_id, threshold, locally, data, columns, predicted_at, output, no_features_in_result)
+ return self.get_runner(locally, model_id, provider).execute_one_provider('predict', filename, model_id,
+ threshold, locally, data, columns, predicted_at, output, no_features_in_result,
+ score, score_true_data )
@show_result
def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None, actual_date_column=None, locally=False, provider=None):
diff --git a/a2ml/api/auger/a2ml.py b/a2ml/api/auger/a2ml.py
index 3e998b5c..2a55df5e 100644
--- a/a2ml/api/auger/a2ml.py
+++ b/a2ml/api/auger/a2ml.py
@@ -21,9 +21,11 @@ def evaluate(self, run_id = None):
def deploy(self, model_id, locally=False, review=True, name=None, algorithm=None, score=None, data_path=None ):
return AugerModel(self.ctx).deploy(model_id, locally, review, name, algorithm, score, data_path)
- def predict(self, model_id, filename, threshold=None, locally=False, data=None, columns=None, predicted_at=None, output=None, no_features_in_result=None):
+ def predict(self, model_id, filename, threshold=None, locally=False, data=None, columns=None,
+ predicted_at=None, output=None, no_features_in_result=None, score=False, score_true_data=None):
return AugerModel(self.ctx).predict(
- model_id, filename, threshold, locally, data, columns, predicted_at, output, no_features_in_result)
+ model_id, filename, threshold, locally, data, columns, predicted_at, output,
+ no_features_in_result, score, score_true_data)
def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None, actual_date_column=None, locally=False):
return AugerModel(self.ctx).actuals(
diff --git a/a2ml/api/auger/config.py b/a2ml/api/auger/config.py
index 5331a4f8..0a9e47a5 100644
--- a/a2ml/api/auger/config.py
+++ b/a2ml/api/auger/config.py
@@ -7,12 +7,12 @@ def set_data_set(self, name, source=None, validation=False):
#TODO: add more providers later
if validation:
self.ctx.config.set('experiment/validation_dataset', name)
- if self.ctx.use_auger_cloud():
+ if self.ctx.use_auger_cloud() and 'azure' in self.ctx.get_providers():
self.ctx.config.set('experiment/validation_dataset', name, "azure")
else:
- print("set_data_set: %s"%self.ctx.use_auger_cloud())
+ #print("set_data_set: %s"%self.ctx.use_auger_cloud())
self.ctx.config.set('dataset', name)
- if self.ctx.use_auger_cloud():
+ if self.ctx.use_auger_cloud() and 'azure' in self.ctx.get_providers():
self.ctx.config.set('dataset', name, "azure")
self.ctx.config.write_all()
diff --git a/a2ml/api/auger/impl/cloud/pipeline.py b/a2ml/api/auger/impl/cloud/pipeline.py
index e3232201..42057f2b 100644
--- a/a2ml/api/auger/impl/cloud/pipeline.py
+++ b/a2ml/api/auger/impl/cloud/pipeline.py
@@ -49,7 +49,8 @@ def check_endpoint(self, props=None):
return is_endpoint
- def predict(self, records, features, threshold=None, file_url=None, predicted_at=None, no_features_in_result=None):
+ def predict(self, records, features, threshold=None, file_url=None, predicted_at=None,
+ no_features_in_result=None, score=False, score_true_data=None):
if self.object_id is None:
raise AugerException('Please provide Auger Pipeline id')
@@ -73,8 +74,9 @@ def predict(self, records, features, threshold=None, file_url=None, predicted_at
prediction_api = AugerPredictionApi(self.ctx, self, use_endpoint=self.check_endpoint(props))
prediction_properties = \
prediction_api.create(records, features, threshold=threshold, file_url=file_url,
- predicted_at=predicted_at, no_features_in_result=no_features_in_result)
-
+ predicted_at=predicted_at, no_features_in_result=no_features_in_result,
+ score=score, score_true_data=score_true_data)
+
return prediction_properties.get('result')
def actual(self, records, features, actuals_at, actuals_path, actual_date_column):
diff --git a/a2ml/api/auger/impl/cloud/prediction.py b/a2ml/api/auger/impl/cloud/prediction.py
index c9c76704..81f50387 100644
--- a/a2ml/api/auger/impl/cloud/prediction.py
+++ b/a2ml/api/auger/impl/cloud/prediction.py
@@ -14,7 +14,8 @@ def __init__(self, ctx, pipeline_api, use_endpoint=False):
self.parent_id_name = "endpoint_id"
self._set_api_request_path("AugerEndpointPredictionApi")
- def create(self, records, features, threshold=None, file_url=None, predicted_at=None, no_features_in_result=None):
+ def create(self, records, features, threshold=None, file_url=None, predicted_at=None,
+ no_features_in_result=None, score=False, score_true_data=None):
params = {
'records': records,
'features': features,
@@ -35,5 +36,11 @@ def create(self, records, features, threshold=None, file_url=None, predicted_at=
if no_features_in_result is not None:
params['no_features_in_result'] = no_features_in_result
-
+
+ if score:
+ params['score'] = score
+
+ if score_true_data:
+ params['score_true_data'] = score_true_data
+
return self._call_create(params, ['requested', 'running'])
diff --git a/a2ml/api/auger/impl/cloud/rest_api.py b/a2ml/api/auger/impl/cloud/rest_api.py
index 58dcb9a7..fd1f39ae 100644
--- a/a2ml/api/auger/impl/cloud/rest_api.py
+++ b/a2ml/api/auger/impl/cloud/rest_api.py
@@ -25,11 +25,18 @@ def call_ex(self, method, params={}):
if params.get('id') and not method.startswith('create_'):
oid = params['id']
del params['id']
- #print(method, oid, params)
- return getattr(self.hub_client, method)(oid, **params)
+ print(method, oid, params)
+ res = getattr(self.hub_client, method)(oid, **params)
else:
- #print(method, params)
- return getattr(self.hub_client, method)(**params)
+ if method == 'create_endpoint_prediction' or method == 'create_endpoint_actual':
+ print(method, params.keys())
+ else:
+ print(method, params)
+
+ res = getattr(self.hub_client, method)(**params)
+
+ #print(res)
+ return res
def call(self, method, params={}):
result = self.call_ex(method, params)
diff --git a/a2ml/api/auger/impl/model.py b/a2ml/api/auger/impl/model.py
index c6afa0e7..eeee9960 100644
--- a/a2ml/api/auger/impl/model.py
+++ b/a2ml/api/auger/impl/model.py
@@ -30,11 +30,13 @@ def review(self, model_id):
def undeploy(self, model_id, locally=False):
return ModelUndeploy(self.ctx, self.project).execute(model_id, locally)
- def predict(self, filename, model_id, threshold=None, locally=False, data=None, columns=None, predicted_at=None, output=None, no_features_in_result=None):
+ def predict(self, filename, model_id, threshold=None, locally=False, data=None, columns=None, predicted_at=None,
+ output=None, no_features_in_result=None, score=False, score_true_data=None):
if locally:
self.deploy(model_id, locally)
- return ModelPredict(self.ctx).execute(filename, model_id, threshold, locally, data, columns, predicted_at, output, no_features_in_result)
+ return ModelPredict(self.ctx).execute(filename, model_id, threshold, locally, data, columns,
+ predicted_at, output, no_features_in_result, score, score_true_data)
def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None, actual_date_column=None, locally=False):
if locally:
diff --git a/a2ml/api/auger/impl/mparts/predict.py b/a2ml/api/auger/impl/mparts/predict.py
index 005de202..ec19a7a2 100644
--- a/a2ml/api/auger/impl/mparts/predict.py
+++ b/a2ml/api/auger/impl/mparts/predict.py
@@ -23,7 +23,8 @@ def __init__(self, ctx):
self.ctx = ctx
def execute(self, filename, model_id, threshold=None, locally=False, data=None, columns=None,
- predicted_at=None, output=None, no_features_in_result=None):
+ predicted_at=None, output=None, no_features_in_result=None,
+ score=False, score_true_data=None):
if filename and not (filename.startswith("http:") or filename.startswith("https:")) and\
not fsclient.is_s3_path(filename):
self.ctx.log('Predicting on data in %s' % filename)
@@ -31,11 +32,14 @@ def execute(self, filename, model_id, threshold=None, locally=False, data=None,
if locally:
if locally == "docker":
- predicted = self._predict_locally_in_docker(filename, model_id, threshold, data, columns, predicted_at, output, no_features_in_result)
+ predicted = self._predict_locally_in_docker(filename, model_id, threshold, data, columns, predicted_at, output,
+ no_features_in_result, score, score_true_data)
else:
- predicted = self._predict_locally(filename, model_id, threshold, data, columns, predicted_at, output, no_features_in_result)
+ predicted = self._predict_locally(filename, model_id, threshold, data, columns, predicted_at, output,
+ no_features_in_result, score, score_true_data)
else:
- predicted = self._predict_on_cloud(filename, model_id, threshold, data, columns, predicted_at, output, no_features_in_result)
+ predicted = self._predict_on_cloud(filename, model_id, threshold, data, columns, predicted_at, output,
+ no_features_in_result, score, score_true_data)
return predicted
@@ -98,7 +102,8 @@ def _check_model_project(self, pipeline_api):
raise AugerException("Project name: %s in config.yml is different from model project name: %s. Please change name in config.yml."%(
self.ctx.config.get('name'), model_project_name))
- def _predict_on_cloud(self, filename, model_id, threshold, data, columns, predicted_at, output, no_features_in_result):
+ def _predict_on_cloud(self, filename, model_id, threshold, data, columns, predicted_at,
+ output, no_features_in_result, score, score_true_data):
records, features, file_url, is_pandas_df = self._process_input(filename, data, columns)
temp_file = None
ds_result = None
@@ -107,7 +112,8 @@ def _predict_on_cloud(self, filename, model_id, threshold, data, columns, predic
else:
pipeline_api = AugerPipelineApi(self.ctx, None, model_id)
predictions = pipeline_api.predict(records, features, threshold=threshold, file_url=file_url,
- predicted_at=predicted_at, no_features_in_result=no_features_in_result)
+ predicted_at=predicted_at, no_features_in_result=no_features_in_result,
+ score=score, score_true_data=score_true_data)
try:
ds_result = DataFrame.create_dataframe(predictions.get('signed_prediction_url'),
@@ -137,7 +143,8 @@ def _predict_on_cloud(self, filename, model_id, threshold, data, columns, predic
if temp_file:
fsclient.remove_file(temp_file)
- def _predict_locally(self, filename_arg, model_id, threshold, data, columns, predicted_at, output, no_features_in_result):
+ def _predict_locally(self, filename_arg, model_id, threshold, data, columns, predicted_at,
+ output, no_features_in_result, score, score_true_data):
from auger_ml.model_exporter import ModelExporter
is_model_loaded, model_path = ModelDeploy(self.ctx, None).verify_local_model(model_id)
@@ -148,6 +155,11 @@ def _predict_locally(self, filename_arg, model_id, threshold, data, columns, pre
if columns is not None:
columns = list(columns)
+ if score and score_true_data is None:
+ options = fsclient.read_json_file(os.path.join(model_path, "options.json"))
+ ds = DataFrame.create_dataframe(filename_arg, data, [options['targetFeature']])
+ score_true_data = ds.df
+
res, options = ModelExporter({}).predict_by_model_to_ds(model_path,
path_to_predict=filename_arg, records=data, features=columns,
threshold=threshold, no_features_in_result=no_features_in_result)
@@ -158,16 +170,25 @@ def _predict_locally(self, filename_arg, model_id, threshold, data, columns, pre
if isinstance(data, pd.DataFrame):
ds_result.from_pandas = True
- return ModelHelper.save_prediction(ds_result,
+ predictions = ModelHelper.save_prediction(ds_result,
prediction_id = None, json_result=False, count_in_result=False, prediction_date=predicted_at,
model_path=model_path, model_id=model_id, output=output)
+ if not score:
+ return predictions
+
+ scores = ModelExporter({}).score_by_model(model_path, predictions=predictions,
+ test_path = score_true_data)
+
+ return {'predicted': predictions, 'scores': scores}
+
# return ModelExporter({}).predict_by_model(model_path=model_path,
# path_to_predict=filename_arg, records=data, features=columns,
# threshold=threshold, prediction_date=predicted_at,
# no_features_in_result=no_features_in_result) #, output=output)
- def _predict_locally_in_docker(self, filename_arg, model_id, threshold, data, columns, predicted_at, output, no_features_in_result):
+ def _predict_locally_in_docker(self, filename_arg, model_id, threshold, data, columns, predicted_at,
+ output, no_features_in_result, score, score_true_data):
model_deploy = ModelDeploy(self.ctx, None)
is_model_loaded, model_path = model_deploy.verify_local_model(model_id, add_model_folder=False)
if not is_model_loaded:
@@ -180,7 +201,7 @@ def _predict_locally_in_docker(self, filename_arg, model_id, threshold, data, co
filename = os.path.join(self.ctx.config.get_path(), '.augerml', 'predict_data.csv')
ds.saveToCsvFile(filename, compression=None)
- predicted = self._docker_run_predict(filename, threshold, model_path)
+ predicted = self._docker_run_predict(filename, threshold, model_path, score, score_true_data)
if not filename_arg:
ds_result = DataFrame.create_dataframe(predicted)
@@ -198,13 +219,14 @@ def _predict_locally_in_docker(self, filename_arg, model_id, threshold, data, co
return predicted
- def _docker_run_predict(self, filename, threshold, model_path):
+ def _docker_run_predict(self, filename, threshold, model_path, score, score_true_data):
cluster_settings = AugerClusterApi.get_cluster_settings(self.ctx)
docker_tag = cluster_settings.get('kubernetes_stack')
predict_file = os.path.basename(filename)
data_path = os.path.abspath(os.path.dirname(filename))
model_path = os.path.abspath(model_path)
+ #TODO: support score, score_true_data
call_args = "--verbose=True --path_to_predict=./model_data/%s %s" % \
(predict_file, "--threshold=%s" % str(threshold) if threshold else '')
diff --git a/a2ml/api/auger/model.py b/a2ml/api/auger/model.py
index 84f8c13a..d9011329 100644
--- a/a2ml/api/auger/model.py
+++ b/a2ml/api/auger/model.py
@@ -21,10 +21,12 @@ def deploy(self, project, model_id, locally, review, name, algorithm, score, dat
@error_handler
@authenticated
- @with_project(autocreate=False)
- def predict(self, project, filename, model_id, threshold, locally, data, columns, predicted_at, output, no_features_in_result):
- predicted = Model(self.ctx, project).predict(
- filename, model_id, threshold, locally, data, columns, predicted_at, output, no_features_in_result)
+ #@with_project(autocreate=False)
+ def predict(self, filename, model_id, threshold, locally, data, columns, predicted_at, output,
+ no_features_in_result, score, score_true_data):
+ predicted = Model(self.ctx, project=None).predict(
+ filename, model_id, threshold, locally, data, columns, predicted_at, output,
+ no_features_in_result, score, score_true_data)
if filename:
self.ctx.log('Predictions stored in %s' % predicted)
diff --git a/a2ml/api/utils/dataframe.py b/a2ml/api/utils/dataframe.py
index caf7c45b..ca737052 100644
--- a/a2ml/api/utils/dataframe.py
+++ b/a2ml/api/utils/dataframe.py
@@ -50,6 +50,9 @@ def create_dataframe(data_path=None, records=None, features=None):
elif records is not None and isinstance(records, pd.DataFrame):
ds = DataFrame({})
ds.df = records
+ if features:
+ ds.df = ds.df[features]
+
ds.from_pandas = True
else:
ds = DataFrame({})
diff --git a/a2ml/cmdl/commands/cmd_deploy.py b/a2ml/cmdl/commands/cmd_deploy.py
index cd050865..0920c958 100644
--- a/a2ml/cmdl/commands/cmd_deploy.py
+++ b/a2ml/cmdl/commands/cmd_deploy.py
@@ -4,10 +4,10 @@
@click.command('deploy', short_help='Deploy trained model.')
[email protected]('model-id', required=False, type=click.STRING)
@click.option('--provider', '-p', type=click.Choice(['auger','azure','external']), required=False,
help='Cloud AutoML Provider.')
[email protected]('model-id', required=False, type=click.STRING)
[email protected]('--locally', is_flag=True, default=False,
[email protected]('--locally', '-l', is_flag=True, default=False,
help='Download and deploy trained model locally.')
@click.option('--no-review', is_flag=True, default=False,
help='Do not support model review based on actual data.')
diff --git a/a2ml/cmdl/commands/cmd_model.py b/a2ml/cmdl/commands/cmd_model.py
index 8fedc0a6..c9291fbc 100644
--- a/a2ml/cmdl/commands/cmd_model.py
+++ b/a2ml/cmdl/commands/cmd_model.py
@@ -28,15 +28,19 @@ def cmdl(ctx):
@pass_context
def deploy(ctx, provider, model_id, locally, no_review, name, algorithm, score, data_path):
"""Deploy trained model."""
- A2MLModel(ctx, provider).deploy(model_id, locally, not no_review, name=name, algorithm=algorithm, score=score, data_path=data_path)
+ A2MLModel(ctx, provider).deploy(model_id, locally=locally, review=not no_review,
+ name=name, algorithm=algorithm, score=score, data_path=data_path)
@click.command('predict', short_help='Predict with deployed model.')
[email protected]('model-id', required=True, type=click.STRING)
@click.argument('filename', required=True, type=click.STRING)
@click.option('--threshold', '-t', default=None, type=float,
help='Threshold.')
[email protected]('--model-id', '-m', type=click.STRING, required=True,
- help='Deployed model id.')
[email protected]('--locally', is_flag=True, default=False,
[email protected]('--score', '-s', is_flag=True, default=False,
+ help='Calculate scores for predicted results.')
[email protected]('--score_true_path', type=click.STRING, required=False,
+ help='Path to true values to calculate scores. If missed, target from filename used for true values.')
[email protected]('--locally', '-l', is_flag=True, default=False,
help='Predict locally using auger.ai.predict package.')
@click.option('--docker', is_flag=True, default=False,
help='Predict locally using Docker image to run model.')
@@ -50,12 +54,13 @@ def predict(ctx, provider, filename, model_id, threshold, locally, docker, outpu
if docker:
locally = "docker"
- A2MLModel(ctx, provider).predict(filename=filename, model_id=model_id, threshold=threshold, locally=locally, output=output)
+ A2MLModel(ctx, provider).predict(model_id, filename=filename,
+ threshold=threshold, score=score, score_true_data=score_true_path,
+ locally=locally, output=output)
@click.command('actuals', short_help='Send actual values for deployed model. Needed for review and monitoring.')
[email protected]('model-id', required=True, type=click.STRING)
@click.argument('filename', required=True, type=click.STRING)
[email protected]('--model-id', '-m', type=click.STRING, required=True,
- help='Deployed model id.')
@click.option('--provider', '-p', type=click.Choice(['auger','azure']), required=False,
help='Cloud AutoML Provider.')
@click.option('--locally', is_flag=True, default=False,
@@ -98,8 +103,7 @@ def undeploy(ctx, provider, model_id, locally):
A2MLModel(ctx, provider).undeploy(model_id, locally)
@click.command('delete_actuals', short_help='Delete files with actuals and predcitions locally or from specified provider(s).')
[email protected]('--model-id', '-m', type=click.STRING, required=True,
- help='Deployed model id.')
[email protected]('model-id', required=True, type=click.STRING)
@click.option('--with-predictions', is_flag=True, default=False,
help='Remove predictions.')
@click.option('--begin-date', '-b', type=click.STRING, required=False,
diff --git a/a2ml/cmdl/commands/cmd_predict.py b/a2ml/cmdl/commands/cmd_predict.py
index 526c31cb..b27c00aa 100644
--- a/a2ml/cmdl/commands/cmd_predict.py
+++ b/a2ml/cmdl/commands/cmd_predict.py
@@ -4,24 +4,28 @@
@click.command('predict', short_help='Predict with deployed model.')
[email protected]('--provider', '-p', type=click.Choice(['auger','azure']), required=False,
- help='Cloud AutoML Provider.')
[email protected]('model-id', required=True, type=click.STRING)
@click.argument('filename', required=True, type=click.STRING)
@click.option('--threshold', '-t', default=None, type=float,
help='Threshold.')
[email protected]('--model-id', '-m', type=click.STRING, required=False,
- help='Deployed model id.')
[email protected]('--locally', is_flag=True, default=False,
[email protected]('--score', '-s', is_flag=True, default=False,
+ help='Calculate scores for predicted results.')
[email protected]('--score_true_path', type=click.STRING, required=False,
+ help='Path to true values to calculate scores. If missed, target from filename used for true values.')
[email protected]('--locally', '-l', is_flag=True, default=False,
help='Predict locally using auger.ai.predict package.')
@click.option('--docker', is_flag=True, default=False,
help='Predict locally using Docker image to run model.')
@click.option('--output', '-o', type=click.STRING, required=False,
help='Output csv file path.')
[email protected]('--provider', '-p', type=click.Choice(['auger','azure']), required=False,
+ help='Cloud AutoML Provider.')
@pass_context
def cmdl(ctx, provider, filename, model_id, threshold, locally, docker, output):
"""Predict with deployed model."""
ctx.setup_logger(format='')
if docker:
locally = "docker"
- A2ML(ctx, provider).predict(
- filename=filename, model_id=model_id, threshold=threshold, locally=locally, output=output)
+ A2ML(ctx, provider).predict( model_id, filename=filename,
+ threshold=threshold, score=score, score_true_data=score_true_path,
+ locally=locally, output=output)
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-08-08T19:21:50 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-582 | 4963565d278901b20ce8715959270a36d4c2d053 | diff --git a/a2ml/api/roi/var_names_fetcher.py b/a2ml/api/roi/var_names_fetcher.py
index 2bbeb87c..355c8418 100644
--- a/a2ml/api/roi/var_names_fetcher.py
+++ b/a2ml/api/roi/var_names_fetcher.py
@@ -36,6 +36,9 @@ def evaluate_unary_op_node(self, node):
def evaluate_func_node(self, node):
list(map(lambda node: self.evaluate(node), node.arg_nodes))
+ def evaluate_tuple_node(self, node):
+ list(map(lambda node: self.evaluate(node), node.item_nodes))
+
def evaluate_top_node(self, node):
list(map(lambda n: self.evaluate(n), node.child_nodes()))
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-07-05T18:24:00 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-578 | 2486bd45dfae8cf149e8dec88c18ba63b7f1141a | diff --git a/a2ml/__init__.py b/a2ml/__init__.py
index ac1a0a5d..cf42dc04 100644
--- a/a2ml/__init__.py
+++ b/a2ml/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '1.0.28'
+__version__ = '1.0.29'
diff --git a/a2ml/api/a2ml.py b/a2ml/api/a2ml.py
index 32308b80..09fe4116 100644
--- a/a2ml/api/a2ml.py
+++ b/a2ml/api/a2ml.py
@@ -314,7 +314,7 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
:widths: 50 50 50
:header-rows: 1
- * - target: predicted value. If missed - predict called automatically
+ * - predicted( or target): predicted value. If missed - predict called automatically
- actual
- baseline_target: predicted value for baseline model (OPTIONAL)
* - Iris-setosa
diff --git a/a2ml/api/a2ml_model.py b/a2ml/api/a2ml_model.py
index 9730d888..2be596f0 100644
--- a/a2ml/api/a2ml_model.py
+++ b/a2ml/api/a2ml_model.py
@@ -159,7 +159,7 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
:widths: 50 50 50
:header-rows: 1
- * - target: predicted value. If missed - predict called automatically
+ * - predicted ( or target): predicted value. If missed - predict called automatically
- actual
- baseline_target: predicted value for baseline model (OPTIONAL)
* - Iris-setosa
diff --git a/a2ml/api/model_review/model_review.py b/a2ml/api/model_review/model_review.py
index 46130f39..d641dc8c 100644
--- a/a2ml/api/model_review/model_review.py
+++ b/a2ml/api/model_review/model_review.py
@@ -169,8 +169,11 @@ def add_actuals(
actuals_count = ds_actuals.count()
ds_actuals.df.rename(columns={"actual": 'a2ml_actual'}, inplace=True)
+ if 'predicted' in ds_actuals.columns and not self.target_feature in ds_actuals.columns:
+ ds_actuals.df = ds_actuals.df.rename(columns={'predicted': self.target_feature})
+
if provider is not None and (do_predict or not self.target_feature in ds_actuals.columns):
- logging.info("Actual data missing predicted value column: %s. Call predict with features from actual data: %s"%(self.target_feature, ds_actuals.columns))
+ logging.info("Actual data missing 'predicted' column and predicted value column: %s. Call predict with features from actual data: %s"%(self.target_feature, ds_actuals.columns))
self._do_predict(ctx, ds_actuals, provider)
result = self._do_score_actual(ds_actuals.df)
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-07-02T19:47:17 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-560 | 6cf1f1c1e471b7101fbf8f510e8ef3cec6bacc88 | diff --git a/a2ml/__init__.py b/a2ml/__init__.py
index dce8b34b..fa7c0d49 100644
--- a/a2ml/__init__.py
+++ b/a2ml/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '1.0.19'
+__version__ = '1.0.20'
diff --git a/a2ml/api/a2ml.py b/a2ml/api/a2ml.py
index 0a727c3b..921f8892 100644
--- a/a2ml/api/a2ml.py
+++ b/a2ml/api/a2ml.py
@@ -221,7 +221,7 @@ def deploy(self, model_id, locally=False, review=True, provider=None,
@show_result
def predict(self, model_id, filename=None, data=None, columns=None, predicted_at=None,
- threshold=None, output=None, locally=False, provider=None):
+ threshold=None, output=None, no_features_in_result = None, locally=False, provider=None):
"""Predict results with new data against deployed model. Predictions are stored next to the file with data to be predicted on. The file name will be appended with suffix _predicted.
Note:
@@ -236,7 +236,8 @@ def predict(self, model_id, filename=None, data=None, columns=None, predicted_at
predicted_at: Predict data date. Use for review of historical data.
threshold(float): For classification models only. This will return class probabilities with response.
output(str): Output csv file path.
- locally(bool): Predicts using a local model if True, on the Provider Cloud if False.
+ no_features_in_result(bool) : Do not return feature columns in prediction result. False by default
+ locally(bool, str): Predicts using a local model with auger.ai.predict if True, on the Provider Cloud if False. If set to "docker", then docker image used to run the model
provider (str): The automl provider you wish to run. For example 'auger'. The default is None - use provider set in costructor or config.
Returns:
@@ -287,7 +288,7 @@ def predict(self, model_id, filename=None, data=None, columns=None, predicted_at
# predictions are returned as rv[provider]['data']['predicted']
"""
- return self.get_runner(locally, model_id, provider).execute_one_provider('predict', filename, model_id, threshold, locally, data, columns, predicted_at, output)
+ return self.get_runner(locally, model_id, provider).execute_one_provider('predict', filename, model_id, threshold, locally, data, columns, predicted_at, output, no_features_in_result)
@show_result
def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None, actual_date_column=None, locally=False, provider=None):
diff --git a/a2ml/api/a2ml_model.py b/a2ml/api/a2ml_model.py
index da68a419..4df4de90 100644
--- a/a2ml/api/a2ml_model.py
+++ b/a2ml/api/a2ml_model.py
@@ -66,7 +66,7 @@ def deploy(self, model_id, locally=False, review=True, provider=None,
@show_result
def predict(self, model_id, filename=None, data=None, columns=None, predicted_at=None,
- threshold=None, output=None, locally=False, provider=None):
+ threshold=None, output=None, no_features_in_result=None, locally=False, provider=None):
"""Predict results with new data against deployed model. Predictions are stored next to the file with data to be predicted on. The file name will be appended with suffix _predicted.
Note:
@@ -81,7 +81,8 @@ def predict(self, model_id, filename=None, data=None, columns=None, predicted_at
predicted_at: Predict data date. Use for review of historical data.
threshold(float): For classification models only. This will return class probabilities with response.
output(str): Output csv file path.
- locally(bool): Predicts using a local model if True, on the Provider Cloud if False.
+ no_features_in_result(bool) : Do not return feature columns in prediction result. False by default
+ locally(bool, str): Predicts using a local model with auger.ai.predict if True, on the Provider Cloud if False. If set to "docker", then docker image used to run the model
provider (str): The automl provider you wish to run. For example 'auger'. The default is None - use provider set in costructor or config.
Returns:
@@ -132,7 +133,7 @@ def predict(self, model_id, filename=None, data=None, columns=None, predicted_at
# predictions are returned as rv[provider]['data']['predicted']
"""
- return self.get_runner(locally, model_id, provider).execute_one_provider('predict', filename, model_id, threshold, locally, data, columns, predicted_at, output)
+ return self.get_runner(locally, model_id, provider).execute_one_provider('predict', filename, model_id, threshold, locally, data, columns, predicted_at, output, no_features_in_result)
@show_result
def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None, actual_date_column=None, locally=False, provider=None):
diff --git a/a2ml/api/auger/a2ml.py b/a2ml/api/auger/a2ml.py
index 5bc25d38..3e998b5c 100644
--- a/a2ml/api/auger/a2ml.py
+++ b/a2ml/api/auger/a2ml.py
@@ -21,9 +21,9 @@ def evaluate(self, run_id = None):
def deploy(self, model_id, locally=False, review=True, name=None, algorithm=None, score=None, data_path=None ):
return AugerModel(self.ctx).deploy(model_id, locally, review, name, algorithm, score, data_path)
- def predict(self, model_id, filename, threshold=None, locally=False, data=None, columns=None, predicted_at=None, output=None):
+ def predict(self, model_id, filename, threshold=None, locally=False, data=None, columns=None, predicted_at=None, output=None, no_features_in_result=None):
return AugerModel(self.ctx).predict(
- model_id, filename, threshold, locally, data, columns, predicted_at, output)
+ model_id, filename, threshold, locally, data, columns, predicted_at, output, no_features_in_result)
def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None, actual_date_column=None, locally=False):
return AugerModel(self.ctx).actuals(
diff --git a/a2ml/api/auger/impl/cloud/experiment_session.py b/a2ml/api/auger/impl/cloud/experiment_session.py
index 7b6a474d..2668f8fe 100644
--- a/a2ml/api/auger/impl/cloud/experiment_session.py
+++ b/a2ml/api/auger/impl/cloud/experiment_session.py
@@ -72,6 +72,8 @@ def get_leaderboard(self):
'{0:.4f}'.format(item.get('score_value'))
}
review_metric = self.ctx.config.get('review/metric')
+ if review_metric == 'roi':
+ review_metric = 'mda'
if review_metric:
l_item[review_metric] = \
'{0:.4f}'.format(item.get('raw_data', {}).get('all_scores', {}).get(review_metric, 0.0))
diff --git a/a2ml/api/auger/impl/cloud/pipeline.py b/a2ml/api/auger/impl/cloud/pipeline.py
index 8149b184..e3232201 100644
--- a/a2ml/api/auger/impl/cloud/pipeline.py
+++ b/a2ml/api/auger/impl/cloud/pipeline.py
@@ -49,7 +49,7 @@ def check_endpoint(self, props=None):
return is_endpoint
- def predict(self, records, features, threshold=None, file_url=None, predicted_at=None):
+ def predict(self, records, features, threshold=None, file_url=None, predicted_at=None, no_features_in_result=None):
if self.object_id is None:
raise AugerException('Please provide Auger Pipeline id')
@@ -72,7 +72,9 @@ def predict(self, records, features, threshold=None, file_url=None, predicted_at
prediction_api = AugerPredictionApi(self.ctx, self, use_endpoint=self.check_endpoint(props))
prediction_properties = \
- prediction_api.create(records, features, threshold=threshold, file_url=file_url, predicted_at=predicted_at)
+ prediction_api.create(records, features, threshold=threshold, file_url=file_url,
+ predicted_at=predicted_at, no_features_in_result=no_features_in_result)
+
return prediction_properties.get('result')
def actual(self, records, features, actuals_at, actuals_path, actual_date_column):
diff --git a/a2ml/api/auger/impl/cloud/pipeline_file.py b/a2ml/api/auger/impl/cloud/pipeline_file.py
index ddeee2ec..bb7e65db 100644
--- a/a2ml/api/auger/impl/cloud/pipeline_file.py
+++ b/a2ml/api/auger/impl/cloud/pipeline_file.py
@@ -31,7 +31,7 @@ def download(self, url, path_to_download, trial_id):
return file_name
def _get_status_name(self):
- return 's3_model_path_status'
+ return 'signed_s3_model_path_status'
def _log_status(self, status):
if status is None:
diff --git a/a2ml/api/auger/impl/cloud/prediction.py b/a2ml/api/auger/impl/cloud/prediction.py
index 67f85dd5..c9c76704 100644
--- a/a2ml/api/auger/impl/cloud/prediction.py
+++ b/a2ml/api/auger/impl/cloud/prediction.py
@@ -14,7 +14,7 @@ def __init__(self, ctx, pipeline_api, use_endpoint=False):
self.parent_id_name = "endpoint_id"
self._set_api_request_path("AugerEndpointPredictionApi")
- def create(self, records, features, threshold=None, file_url=None, predicted_at=None):
+ def create(self, records, features, threshold=None, file_url=None, predicted_at=None, no_features_in_result=None):
params = {
'records': records,
'features': features,
@@ -33,4 +33,7 @@ def create(self, records, features, threshold=None, file_url=None, predicted_at=
if predicted_at:
params['predicted_at'] = str(predicted_at)
+ if no_features_in_result is not None:
+ params['no_features_in_result'] = no_features_in_result
+
return self._call_create(params, ['requested', 'running'])
diff --git a/a2ml/api/auger/impl/model.py b/a2ml/api/auger/impl/model.py
index 3ece907e..c6afa0e7 100644
--- a/a2ml/api/auger/impl/model.py
+++ b/a2ml/api/auger/impl/model.py
@@ -30,23 +30,21 @@ def review(self, model_id):
def undeploy(self, model_id, locally=False):
return ModelUndeploy(self.ctx, self.project).execute(model_id, locally)
- def predict(self, filename, model_id, threshold=None, locally=False, data=None, columns=None, predicted_at=None, output=None):
+ def predict(self, filename, model_id, threshold=None, locally=False, data=None, columns=None, predicted_at=None, output=None, no_features_in_result=None):
if locally:
self.deploy(model_id, locally)
- return ModelPredict(self.ctx).execute(filename, model_id, threshold, locally, data, columns, predicted_at, output)
+ return ModelPredict(self.ctx).execute(filename, model_id, threshold, locally, data, columns, predicted_at, output, no_features_in_result)
def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None, actual_date_column=None, locally=False):
if locally:
- is_loaded, model_path, model_name = ModelDeploy(self.ctx, self.project).\
- verify_local_model(model_id)
+ is_loaded, model_path = ModelDeploy(self.ctx, self.project).verify_local_model(model_id)
if not is_loaded:
raise AugerException('Model should be deployed locally.')
- model_path, model_existed = ModelPredict(self.ctx)._extract_model(model_name)
params = {
- 'model_path': os.path.join(model_path, "model"),
+ 'model_path': model_path,
'roi': {
'filter': str(self.ctx.config.get('review/roi/filter')),
'revenue': str(self.ctx.config.get('review/roi/revenue')),
@@ -67,28 +65,22 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
def delete_actuals(self, model_id, with_predictions=False, begin_date=None, end_date=None, locally=False):
if locally:
- is_loaded, model_path, model_name = ModelDeploy(self.ctx, self.project).\
- verify_local_model(model_id)
-
+ is_loaded, model_path = ModelDeploy(self.ctx, self.project).verify_local_model(model_id)
if not is_loaded:
raise AugerException('Model should be deployed locally.')
- model_path, model_existed = ModelPredict(self.ctx)._extract_model(model_name)
- return ModelReview({'model_path': os.path.join(model_path, "model")}).delete_actuals(
+ return ModelReview({'model_path': model_path}).delete_actuals(
with_predictions=with_predictions, begin_date=begin_date, end_date=end_date)
else:
return ModelDeleteActual(self.ctx).execute(model_id, with_predictions, begin_date, end_date)
def build_review_data(self, model_id, locally, output):
if locally:
- is_loaded, model_path, model_name = ModelDeploy(self.ctx, self.project).\
- verify_local_model(model_id)
-
+ is_loaded, model_path = ModelDeploy(self.ctx, self.project).verify_local_model(model_id)
if not is_loaded:
raise AugerException('Model should be deployed locally.')
- model_path, model_existed = ModelPredict(self.ctx)._extract_model(model_name)
- return ModelReview({'model_path': os.path.join(model_path, "model")}).build_review_data(
+ return ModelReview({'model_path': model_path}).build_review_data(
data_path=self.ctx.config.get("source"), output=output)
else:
raise Exception("Not Implemented.")
diff --git a/a2ml/api/auger/impl/mparts/deploy.py b/a2ml/api/auger/impl/mparts/deploy.py
index be68bfcc..9eeeaf2a 100644
--- a/a2ml/api/auger/impl/mparts/deploy.py
+++ b/a2ml/api/auger/impl/mparts/deploy.py
@@ -23,7 +23,7 @@ def __init__(self, ctx, project):
def execute(self, model_id, locally=False, review=True, name=None, algorithm=None, score=None, data_path=None):
if locally:
- return self.deploy_model_locally(model_id, review, name, data_path)
+ return self.deploy_model_locally(model_id, review, name, data_path, locally)
else:
return self.deploy_model_in_cloud(model_id, review, name, algorithm, score, data_path)
@@ -159,36 +159,65 @@ def deploy_model_in_cloud(self, model_id, review, name, algorithm, score, data_p
return pipeline_properties.get('id')
- def deploy_model_locally(self, model_id, review, name, data_path):
- is_loaded, model_path, model_name = self.verify_local_model(model_id)
+ def deploy_model_locally(self, model_id, review, name, data_path, locally):
+ is_loaded, model_path = self.verify_local_model(model_id)
#TODO: support review flag
if not is_loaded:
self.ctx.log('Downloading model %s' % model_id)
self.project.start()
+ models_path = os.path.join(self.ctx.config.get_path(), 'models')
pipeline_file_api = AugerPipelineFileApi(self.ctx, None)
pipeline_file_properties = pipeline_file_api.create(model_id)
downloaded_model_file = pipeline_file_api.download(
pipeline_file_properties['signed_s3_model_path'],
- model_path, model_id)
+ models_path, model_id)
self.ctx.log('Downloaded model to %s' % downloaded_model_file)
- self.ctx.log('Pulling docker image required to predict')
- self._docker_pull_image()
+ if locally == 'docker':
+ self.ctx.log('Pulling docker image required to predict')
+ self._docker_pull_image()
+ else:
+ self.ctx.log('To run predict locally install a2ml[predict]')
else:
- self.ctx.log('Downloaded model is %s' % model_name)
+ self.ctx.log('Downloaded model is %s' % model_path)
return model_id
- def verify_local_model(self, model_id):
- model_path = os.path.join(self.ctx.config.get_path(), 'models')
- model_name = os.path.join(model_path, 'model-%s.zip' % model_id)
- is_exists = fsclient.is_folder_exists(os.path.join(model_path,"model-%s"%model_id))
- if not is_exists:
- is_exists = fsclient.is_file_exists(model_name)
- return is_exists, model_path, model_name
+ def get_local_model_paths(self, model_id):
+ models_path = os.path.join(self.ctx.config.get_path(), 'models')
+ model_zip_path = os.path.join(models_path, 'model-%s.zip' % model_id)
+ model_path = os.path.join(models_path,"model-%s"%model_id)
+
+ return model_path, model_zip_path
+
+ def verify_local_model(self, model_id, add_model_folder=True):
+ model_path, model_zip_path = self.get_local_model_paths(model_id)
+
+ is_exists = fsclient.is_folder_exists(model_path)
+ if not is_exists and fsclient.is_file_exists(model_zip_path):
+ self._extract_model(model_zip_path)
+
+ if add_model_folder:
+ model_path = os.path.join(model_path, "model")
+
+ is_exists = fsclient.is_folder_exists(model_path)
+
+ return is_exists, model_path
+
+ def _extract_model(self, model_name):
+ from zipfile import ZipFile
+
+ model_path = os.path.splitext(model_name)[0]
+ model_existed = os.path.exists(model_path)
+
+ if not model_existed:
+ with ZipFile(model_name, 'r') as zip_file:
+ zip_file.extractall(model_path)
+
+ return model_path, model_existed
def _docker_pull_image(self):
cluster_settings = AugerClusterApi.get_cluster_settings(self.ctx)
diff --git a/a2ml/api/auger/impl/mparts/predict.py b/a2ml/api/auger/impl/mparts/predict.py
index 510e4ce4..005de202 100644
--- a/a2ml/api/auger/impl/mparts/predict.py
+++ b/a2ml/api/auger/impl/mparts/predict.py
@@ -1,7 +1,6 @@
import os
import shutil
import subprocess
-from zipfile import ZipFile
import sys
import pandas as pd
@@ -23,16 +22,20 @@ def __init__(self, ctx):
super(ModelPredict, self).__init__()
self.ctx = ctx
- def execute(self, filename, model_id, threshold=None, locally=False, data=None, columns=None, predicted_at=None, output=None):
+ def execute(self, filename, model_id, threshold=None, locally=False, data=None, columns=None,
+ predicted_at=None, output=None, no_features_in_result=None):
if filename and not (filename.startswith("http:") or filename.startswith("https:")) and\
not fsclient.is_s3_path(filename):
self.ctx.log('Predicting on data in %s' % filename)
filename = os.path.abspath(filename)
if locally:
- predicted = self._predict_locally(filename, model_id, threshold, data, columns, predicted_at, output)
+ if locally == "docker":
+ predicted = self._predict_locally_in_docker(filename, model_id, threshold, data, columns, predicted_at, output, no_features_in_result)
+ else:
+ predicted = self._predict_locally(filename, model_id, threshold, data, columns, predicted_at, output, no_features_in_result)
else:
- predicted = self._predict_on_cloud(filename, model_id, threshold, data, columns, predicted_at, output)
+ predicted = self._predict_on_cloud(filename, model_id, threshold, data, columns, predicted_at, output, no_features_in_result)
return predicted
@@ -95,7 +98,7 @@ def _check_model_project(self, pipeline_api):
raise AugerException("Project name: %s in config.yml is different from model project name: %s. Please change name in config.yml."%(
self.ctx.config.get('name'), model_project_name))
- def _predict_on_cloud(self, filename, model_id, threshold, data, columns, predicted_at, output):
+ def _predict_on_cloud(self, filename, model_id, threshold, data, columns, predicted_at, output, no_features_in_result):
records, features, file_url, is_pandas_df = self._process_input(filename, data, columns)
temp_file = None
ds_result = None
@@ -103,7 +106,8 @@ def _predict_on_cloud(self, filename, model_id, threshold, data, columns, predic
ds_result = DataFrame.create_dataframe(None, [], features+[self.ctx.config.get('target')])
else:
pipeline_api = AugerPipelineApi(self.ctx, None, model_id)
- predictions = pipeline_api.predict(records, features, threshold=threshold, file_url=file_url, predicted_at=predicted_at)
+ predictions = pipeline_api.predict(records, features, threshold=threshold, file_url=file_url,
+ predicted_at=predicted_at, no_features_in_result=no_features_in_result)
try:
ds_result = DataFrame.create_dataframe(predictions.get('signed_prediction_url'),
@@ -121,11 +125,10 @@ def _predict_on_cloud(self, filename, model_id, threshold, data, columns, predic
ds_result.loaded_columns = columns
ds_result.from_pandas = is_pandas_df
- is_model_loaded, model_path_1, model_name = \
- ModelDeploy(self.ctx, None).verify_local_model(model_id)
- model_path = None
- if is_model_loaded:
- model_path = os.path.join(model_path_1, "model-%s"%model_id, 'model')
+ # Save prediction in local model folder if exist
+ is_model_loaded, model_path = ModelDeploy(self.ctx, None).verify_local_model(model_id)
+ if not is_model_loaded:
+ model_path = None
return ModelHelper.save_prediction(ds_result,
prediction_id = None, json_result=False, count_in_result=False, prediction_date=predicted_at,
@@ -134,17 +137,42 @@ def _predict_on_cloud(self, filename, model_id, threshold, data, columns, predic
if temp_file:
fsclient.remove_file(temp_file)
- def _predict_locally(self, filename_arg, model_id, threshold, data, columns, predicted_at, output):
- model_deploy = ModelDeploy(self.ctx, None)
- is_model_loaded, model_path, model_name = \
- model_deploy.verify_local_model(model_id)
+ def _predict_locally(self, filename_arg, model_id, threshold, data, columns, predicted_at, output, no_features_in_result):
+ from auger_ml.model_exporter import ModelExporter
+ is_model_loaded, model_path = ModelDeploy(self.ctx, None).verify_local_model(model_id)
if not is_model_loaded:
raise AugerException('Model isn\'t loaded locally. '
'Please use a2ml deploy command to download model.')
- model_path, model_existed = self._extract_model(model_name)
- model_options = fsclient.read_json_file(os.path.join(model_path, "model", "options.json"))
+ if columns is not None:
+ columns = list(columns)
+
+ res, options = ModelExporter({}).predict_by_model_to_ds(model_path,
+ path_to_predict=filename_arg, records=data, features=columns,
+ threshold=threshold, no_features_in_result=no_features_in_result)
+
+ ds_result = DataFrame({'data_path': None})
+ ds_result.df = res.df
+ ds_result.loaded_columns = columns
+ if isinstance(data, pd.DataFrame):
+ ds_result.from_pandas = True
+
+ return ModelHelper.save_prediction(ds_result,
+ prediction_id = None, json_result=False, count_in_result=False, prediction_date=predicted_at,
+ model_path=model_path, model_id=model_id, output=output)
+
+ # return ModelExporter({}).predict_by_model(model_path=model_path,
+ # path_to_predict=filename_arg, records=data, features=columns,
+ # threshold=threshold, prediction_date=predicted_at,
+ # no_features_in_result=no_features_in_result) #, output=output)
+
+ def _predict_locally_in_docker(self, filename_arg, model_id, threshold, data, columns, predicted_at, output, no_features_in_result):
+ model_deploy = ModelDeploy(self.ctx, None)
+ is_model_loaded, model_path = model_deploy.verify_local_model(model_id, add_model_folder=False)
+ if not is_model_loaded:
+ raise AugerException('Model isn\'t loaded locally. '
+ 'Please use a2ml deploy command to download model.')
filename = filename_arg
if not filename:
@@ -152,15 +180,7 @@ def _predict_locally(self, filename_arg, model_id, threshold, data, columns, pre
filename = os.path.join(self.ctx.config.get_path(), '.augerml', 'predict_data.csv')
ds.saveToCsvFile(filename, compression=None)
- try:
- predicted = \
- self._docker_run_predict(filename, threshold, model_path)
- finally:
- # clean up unzipped model
- # if it wasn't unzipped before
- if not model_existed:
- fsclient.remove_folder(model_path)
- model_path = None
+ predicted = self._docker_run_predict(filename, threshold, model_path)
if not filename_arg:
ds_result = DataFrame.create_dataframe(predicted)
@@ -178,16 +198,6 @@ def _predict_locally(self, filename_arg, model_id, threshold, data, columns, pre
return predicted
- def _extract_model(self, model_name):
- model_path = os.path.splitext(model_name)[0]
- model_existed = os.path.exists(model_path)
-
- if not model_existed:
- with ZipFile(model_name, 'r') as zip_file:
- zip_file.extractall(model_path)
-
- return model_path, model_existed
-
def _docker_run_predict(self, filename, threshold, model_path):
cluster_settings = AugerClusterApi.get_cluster_settings(self.ctx)
docker_tag = cluster_settings.get('kubernetes_stack')
diff --git a/a2ml/api/auger/impl/mparts/undeploy.py b/a2ml/api/auger/impl/mparts/undeploy.py
index 1dc26592..c15e2af2 100644
--- a/a2ml/api/auger/impl/mparts/undeploy.py
+++ b/a2ml/api/auger/impl/mparts/undeploy.py
@@ -17,15 +17,11 @@ def __init__(self, ctx, project):
def execute(self, model_id, locally=False):
if locally:
- is_loaded, model_path, model_name = \
- ModelDeploy(self.ctx, self.project).verify_local_model(model_id)
- self.ctx.log("Undeploy model. Remove local model: %s" % model_name)
+ model_path, model_zip_path = ModelDeploy(self.ctx, self.project).get_local_model_paths(model_id)
+ self.ctx.log("Undeploy model. Remove local model: %s" % model_path)
- if is_loaded:
- fsclient.remove_file(model_name)
-
- model_folder = os.path.splitext(model_name)[0]
- fsclient.remove_folder(model_folder)
+ fsclient.remove_file(model_zip_path)
+ fsclient.remove_folder(model_path)
else:
pipeline_api = AugerPipelineApi(self.ctx, None, model_id)
if pipeline_api.check_endpoint():
diff --git a/a2ml/api/auger/model.py b/a2ml/api/auger/model.py
index 24bbd661..84f8c13a 100644
--- a/a2ml/api/auger/model.py
+++ b/a2ml/api/auger/model.py
@@ -22,9 +22,9 @@ def deploy(self, project, model_id, locally, review, name, algorithm, score, dat
@error_handler
@authenticated
@with_project(autocreate=False)
- def predict(self, project, filename, model_id, threshold, locally, data, columns, predicted_at, output):
+ def predict(self, project, filename, model_id, threshold, locally, data, columns, predicted_at, output, no_features_in_result):
predicted = Model(self.ctx, project).predict(
- filename, model_id, threshold, locally, data, columns, predicted_at, output)
+ filename, model_id, threshold, locally, data, columns, predicted_at, output, no_features_in_result)
if filename:
self.ctx.log('Predictions stored in %s' % predicted)
diff --git a/a2ml/api/azure/a2ml.py b/a2ml/api/azure/a2ml.py
index 078a7c9b..ef265de8 100644
--- a/a2ml/api/azure/a2ml.py
+++ b/a2ml/api/azure/a2ml.py
@@ -26,12 +26,12 @@ def deploy(self, model_id, locally=False, review=True, name=None, algorithm=None
return AzureModel(self.ctx).deploy(model_id, locally, review, name, algorithm, score, data_path)
- def predict(self, filename, model_id, threshold=None, locally=False, data=None, columns=None, predicted_at=None, output=None):
+ def predict(self, filename, model_id, threshold=None, locally=False, data=None, columns=None, predicted_at=None, output=None, no_features_in_result=None):
from a2ml.api.azure.model import AzureModel
return AzureModel(self.ctx).predict(
filename, model_id, threshold=threshold, locally=locally, data=data, columns=columns,
- predicted_at=predicted_at, output=output)
+ predicted_at=predicted_at, output=output, no_features_in_result=no_features_in_result)
def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=None, actual_date_column=None, locally=False):
from a2ml.api.azure.model import AzureModel
diff --git a/a2ml/api/azure/model.py b/a2ml/api/azure/model.py
index 5f7fb49f..8b473826 100644
--- a/a2ml/api/azure/model.py
+++ b/a2ml/api/azure/model.py
@@ -239,8 +239,8 @@ def get_df(data):
@error_handler
@authenticated
def predict(self, filename, model_id, threshold=None, locally=False, data=None, columns=None,
- predicted_at=None, output=None, json_result=False, count_in_result=False, prediction_id=None
- ):
+ predicted_at=None, output=None, json_result=False, count_in_result=False, prediction_id=None,
+ no_features_in_result=None):
ds = DataFrame.create_dataframe(filename, data, columns)
model_path = self.ctx.config.get_model_path(model_id)
options = fsclient.read_json_file(os.path.join(model_path, "options.json"))
diff --git a/a2ml/api/model_review/model_helper.py b/a2ml/api/model_review/model_helper.py
index 355b49a1..d2244c38 100644
--- a/a2ml/api/model_review/model_helper.py
+++ b/a2ml/api/model_review/model_helper.py
@@ -100,7 +100,7 @@ def save_metric(metric_id, project_path, metric_name, metric_data):
# @staticmethod
# def _get_score_byname(scoring):
- # from sklearn.metrics.scorer import get_scorer
+ # from sklearn.metrics import get_scorer
# from sklearn.metrics import SCORERS
# #TODO: below metrics does not directly map to sklearn:
@@ -157,7 +157,7 @@ def save_metric(metric_id, project_path, metric_name, metric_data):
@staticmethod
def calculate_scores(options, y_test, X_test=None, estimator=None, y_pred=None, raise_main_score=True):
- from sklearn.metrics.scorer import get_scorer
+ from sklearn.metrics import get_scorer
from sklearn.model_selection._validation import _score
from sklearn.metrics import confusion_matrix
diff --git a/a2ml/api/model_review/scores/classification.py b/a2ml/api/model_review/scores/classification.py
index 76493cc3..3acd890d 100644
--- a/a2ml/api/model_review/scores/classification.py
+++ b/a2ml/api/model_review/scores/classification.py
@@ -3,7 +3,7 @@
from sklearn.metrics import make_scorer, recall_score, average_precision_score, roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import matthews_corrcoef as mcc
-from sklearn.metrics.scorer import SCORERS
+from sklearn.metrics import SCORERS
def kappa(y_true, y_pred, weights=None, allow_off_by_one=False):
diff --git a/a2ml/api/model_review/scores/regression.py b/a2ml/api/model_review/scores/regression.py
index b8cdf0bb..704f4539 100644
--- a/a2ml/api/model_review/scores/regression.py
+++ b/a2ml/api/model_review/scores/regression.py
@@ -1,6 +1,6 @@
import numpy as np
from sklearn.metrics import make_scorer, mean_squared_error, mean_squared_log_error, mean_absolute_error
-from sklearn.metrics.scorer import SCORERS
+from sklearn.metrics import SCORERS
EPSILON = 1e-10
diff --git a/a2ml/cmdl/commands/cmd_model.py b/a2ml/cmdl/commands/cmd_model.py
index a380e6ac..8fedc0a6 100644
--- a/a2ml/cmdl/commands/cmd_model.py
+++ b/a2ml/cmdl/commands/cmd_model.py
@@ -37,14 +37,19 @@ def deploy(ctx, provider, model_id, locally, no_review, name, algorithm, score,
@click.option('--model-id', '-m', type=click.STRING, required=True,
help='Deployed model id.')
@click.option('--locally', is_flag=True, default=False,
+ help='Predict locally using auger.ai.predict package.')
[email protected]('--docker', is_flag=True, default=False,
help='Predict locally using Docker image to run model.')
@click.option('--provider', '-p', type=click.Choice(['auger','azure']), required=False,
help='Cloud AutoML Provider.')
@click.option('--output', '-o', type=click.STRING, required=False,
help='Output csv file path.')
@pass_context
-def predict(ctx, provider, filename, model_id, threshold, locally, output):
+def predict(ctx, provider, filename, model_id, threshold, locally, docker, output):
"""Predict with deployed model."""
+ if docker:
+ locally = "docker"
+
A2MLModel(ctx, provider).predict(filename=filename, model_id=model_id, threshold=threshold, locally=locally, output=output)
@click.command('actuals', short_help='Send actual values for deployed model. Needed for review and monitoring.')
diff --git a/a2ml/cmdl/commands/cmd_predict.py b/a2ml/cmdl/commands/cmd_predict.py
index 5665273d..526c31cb 100644
--- a/a2ml/cmdl/commands/cmd_predict.py
+++ b/a2ml/cmdl/commands/cmd_predict.py
@@ -12,12 +12,16 @@
@click.option('--model-id', '-m', type=click.STRING, required=False,
help='Deployed model id.')
@click.option('--locally', is_flag=True, default=False,
+ help='Predict locally using auger.ai.predict package.')
[email protected]('--docker', is_flag=True, default=False,
help='Predict locally using Docker image to run model.')
@click.option('--output', '-o', type=click.STRING, required=False,
help='Output csv file path.')
@pass_context
-def cmdl(ctx, provider, filename, model_id, threshold, locally, output):
+def cmdl(ctx, provider, filename, model_id, threshold, locally, docker, output):
"""Predict with deployed model."""
ctx.setup_logger(format='')
+ if docker:
+ locally = "docker"
A2ML(ctx, provider).predict(
filename=filename, model_id=model_id, threshold=threshold, locally=locally, output=output)
diff --git a/a2ml/tasks_queue/tasks_hub_api.py b/a2ml/tasks_queue/tasks_hub_api.py
index 534adde5..5340fca5 100644
--- a/a2ml/tasks_queue/tasks_hub_api.py
+++ b/a2ml/tasks_queue/tasks_hub_api.py
@@ -531,7 +531,8 @@ def predict_by_model_task(params):
count_in_result=params.get('count_in_result'),
predicted_at=params.get('prediction_date'),
prediction_id = params.get('prediction_id'),
- locally = params.get('locally', False)
+ locally = params.get('locally', False),
+ no_features_in_result=params.get('no_features_in_result', False)
)
_update_hub_objects(ctx, params.get('provider'), params)
diff --git a/setup.py b/setup.py
index cf55b2a8..1a73562d 100644
--- a/setup.py
+++ b/setup.py
@@ -28,9 +28,9 @@ def run(self):
install_requires = [
- 'numpy<1.19.0,>=1.16.0', # version for azure
- 'pandas>=0.22', # version for azure
- 'joblib>=0.14.1', # version for azure
+ 'numpy==1.18.5', # version for azure
+ 'pandas==1.2.4', # version for azure
+ 'joblib==1.0.1', # version for azure
'ruamel.yaml>0.16.7', # version for azure
'pyarrow<2.0.0,>=0.17.0', # version for azure
'scipy==1.5.2',
@@ -72,23 +72,30 @@ def run(self):
'redis',
's3fs>=0.4.0,<0.5.0',
'uvicorn',
+ 'scikit-learn==0.24.2'
],
'azure': [
- 'scikit-learn~=0.22.2',
- 'xgboost<=0.90',
+ #'scikit-learn~=0.22.2',
+ #'xgboost<=0.90',
# https://github.com/Azure/azure-sdk-for-python/issues/13871
#'azure-mgmt-resource==10.2.0',
- 'azureml-sdk[automl]~=1.22.0'
+ #this needs to move to setup.azure.py and do not include default
+ 'azureml-sdk[automl]==1.29.0'
],
'google': [
'google-cloud-automl'
+ ],
+ 'predict': [
+ 'auger.ai.predict==1.0.72'
]
}
# Meta dependency groups.
all_deps = []
for group_name in extras:
- all_deps += extras[group_name]
+ if group_name != 'predict' and group_name != 'google' and group_name != 'azure':
+ all_deps += extras[group_name]
+
extras['all'] = all_deps
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-05-27T20:31:05 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-547 | 84094892b7a1865b4d6a0f1d13b2af695b5f154b | diff --git a/a2ml/api/roi/grammar.bnf b/a2ml/api/roi/grammar.bnf
index 73b56ba7..5701ae44 100644
--- a/a2ml/api/roi/grammar.bnf
+++ b/a2ml/api/roi/grammar.bnf
@@ -96,7 +96,7 @@ func_call_statement:
| NAME '(' (expression (',' expression)*)? ')'
top_expression:
- | top_expression_type' NUMBER 'by' expression ['per' expression] ['where' expression] ['from' '(' top_expression ')']
+ | top_expression_type' NUMBER 'by' shift_expr ['per' expression] ['where' expression] ['from' '(' top_expression ')']
top_expression_type:
| 'top'
diff --git a/a2ml/api/roi/parser.py b/a2ml/api/roi/parser.py
index d2b798fa..d9c2a920 100644
--- a/a2ml/api/roi/parser.py
+++ b/a2ml/api/roi/parser.py
@@ -376,7 +376,7 @@ def top_expression(self):
node.limit_node = self.const_node(Token.INT_CONST)
self.eat(Token.BY)
- node.order_node = self.expression()
+ node.order_node = self.shift_expr()
if self.current_token.type == Token.PER:
self.eat(Token.PER)
diff --git a/a2ml/api/roi/validator.py b/a2ml/api/roi/validator.py
index 03c2de16..b299bb03 100644
--- a/a2ml/api/roi/validator.py
+++ b/a2ml/api/roi/validator.py
@@ -1,6 +1,6 @@
from a2ml.api.roi.base_interpreter import BaseInterpreter
from a2ml.api.roi.lexer import AstError, Lexer
-from a2ml.api.roi.parser import Parser
+from a2ml.api.roi.parser import Parser, TopNode
class ValidationError(AstError):
pass
@@ -84,5 +84,8 @@ def evaluate_func_node(self, node):
raise ValidationError(f"unknown function '{node.func_name}' at position {node.position()}")
def evaluate_top_node(self, node):
+ if not isinstance(self.root, TopNode):
+ raise ValidationError(f"top or bottom expression cannot be used as an argument or operand")
+
return all(map(lambda n: self.evaluate(n), node.child_nodes()))
| Check how and/or works in top expression
e.g.
```
top 8 by P and $spread_pct < 0.5 from (top 1 by P per $symbol)
$spread_pct < 0.5 and top 8 by P from (top 1 by P per $symbol)
```
| 2021-04-26T13:57:29 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-545 | 0369a5aedaf72a0533c6e3890c1d668a8246fc23 | diff --git a/a2ml/__init__.py b/a2ml/__init__.py
index cc08f086..9b076950 100644
--- a/a2ml/__init__.py
+++ b/a2ml/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '1.0.14'
+__version__ = '1.0.15'
diff --git a/a2ml/api/a2ml.py b/a2ml/api/a2ml.py
index aa213b05..0a727c3b 100644
--- a/a2ml/api/a2ml.py
+++ b/a2ml/api/a2ml.py
@@ -300,7 +300,7 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
:widths: 50 50 50
:header-rows: 1
- * - target: predicted value
+ * - target: predicted value. If missed - predict called automatically
- actual
- baseline_target: predicted value for baseline model (OPTIONAL)
* - Iris-setosa
@@ -310,7 +310,7 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
- Iris-virginica
- Iris-virginica
- It may also contain train features to retrain while Review(if target missed) and for distribution chart
+ It may also contain train features to predict(if target missed), retrain model while Review and for distribution chart
This method support only one provider
diff --git a/a2ml/api/a2ml_model.py b/a2ml/api/a2ml_model.py
index c8764b19..da68a419 100644
--- a/a2ml/api/a2ml_model.py
+++ b/a2ml/api/a2ml_model.py
@@ -145,7 +145,7 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
:widths: 50 50 50
:header-rows: 1
- * - target: predicted value
+ * - target: predicted value. If missed - predict called automatically
- actual
- baseline_target: predicted value for baseline model (OPTIONAL)
* - Iris-setosa
@@ -155,7 +155,7 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
- Iris-virginica
- Iris-virginica
- It may also contain train features to retrain while Review(if target missed) and for distribution chart
+ It may also contain train features to predict(if target missed), retrain model while Review and for distribution chart
This method support only one provider
@@ -226,6 +226,12 @@ def review_alert(self, model_id, parameters = None, locally=False, provider=None
* threshold (float)
* sensitivity (int): The amount of time(in hours) this metric must be at or below the threshold to trigger the alert.
+ * threshold_policy (all_values/average_value/any_value)
+
+ - all_values: Default value. Trigger an alert when all values in sensitivity below threshold
+ - average_value: Trigger an alert when average of values in sensitivity below threshold
+ - any_value: Trigger an alert when any value in sensitivity below threshold
+
* action (no/retrain/retrain_deploy)
- no: no action should be executed
diff --git a/a2ml/api/auger/impl/cloud/endpoint.py b/a2ml/api/auger/impl/cloud/endpoint.py
index 27030333..e581af4a 100644
--- a/a2ml/api/auger/impl/cloud/endpoint.py
+++ b/a2ml/api/auger/impl/cloud/endpoint.py
@@ -14,8 +14,9 @@ def __init__(self, ctx, endpoint_api, endpoint_id=None):
def create(self, pipeline_id, name):
return self._call_create({'pipeline_id': pipeline_id, 'name': name},[])
- def update(self, name):
- return self._call_update({ 'id': self.object_id, 'name': name})
+ def update(self, params):
+ params['id'] = self.object_id
+ return self._call_update(params)
def update_roi(self):
roi_names = ['review/roi/filter', 'review/roi/investment', 'review/roi/revenue']
diff --git a/a2ml/api/auger/impl/cloud/experiment.py b/a2ml/api/auger/impl/cloud/experiment.py
index 65194f98..45a6c715 100644
--- a/a2ml/api/auger/impl/cloud/experiment.py
+++ b/a2ml/api/auger/impl/cloud/experiment.py
@@ -96,8 +96,6 @@ def get_experiment_options(config, ):
if config.get('experiment/search_space', None) is not None:
options['search_space'] = config.get('experiment/search_space')
- if config.get('review/metric'):
- options['review_metric'] = config.get('review/metric')
if config.get('review/alert/retrain_policy/type'):
options['retrain_policy_type'] = config.get('review/alert/retrain_policy/type')
if config.get('review/alert/retrain_policy/value'):
diff --git a/a2ml/api/auger/impl/cloud/review_alert.py b/a2ml/api/auger/impl/cloud/review_alert.py
index 30363621..821279ed 100644
--- a/a2ml/api/auger/impl/cloud/review_alert.py
+++ b/a2ml/api/auger/impl/cloud/review_alert.py
@@ -23,6 +23,7 @@ def create_update(self, parameters=None):
'active': parameters.get('active', config.get('review/alert/active')),
'kind': parameters.get('type', config.get('review/alert/type')),
'threshold': float(parameters.get('threshold', config.get('review/alert/threshold'))),
+ 'threshold_policy': parameters.get('threshold_policy', config.get('review/alert/threshold_policy')),
'sensitivity': int(parameters.get('sensitivity', config.get('review/alert/sensitivity'))),
'actions': parameters.get('action', config.get('review/alert/action')),
'notifications': parameters.get('notification', config.get('review/alert/notification'))
diff --git a/a2ml/api/auger/impl/mparts/deploy.py b/a2ml/api/auger/impl/mparts/deploy.py
index 61a32901..ddb7c98a 100644
--- a/a2ml/api/auger/impl/mparts/deploy.py
+++ b/a2ml/api/auger/impl/mparts/deploy.py
@@ -50,8 +50,12 @@ def create_update_review_alert(self, model_id, pipeline_properties=None, paramet
endpoint_api = AugerEndpointApi(self.ctx, None,
pipeline_properties['endpoint_pipelines'][0].get('endpoint_id'))
+ params = {'review_metric': self.ctx.config.get('review/metric')}
if name and update_name:
- endpoint_api.update(name)
+ params['name'] = name
+
+ if params:
+ endpoint_api.update(params)
session_id = endpoint_api.properties().get('primary_experiment_session_id')
if session_id:
diff --git a/a2ml/api/azure/model.py b/a2ml/api/azure/model.py
index 43eae7b5..5f7fb49f 100644
--- a/a2ml/api/azure/model.py
+++ b/a2ml/api/azure/model.py
@@ -47,7 +47,6 @@ def deploy(self, model_id, locally, review, name=None, algorithm=None, score=Non
'scoreNames': [self.ctx.config.get('experiment/metric')],
'scoring': self.ctx.config.get('experiment/metric'),
"score_name": self.ctx.config.get('experiment/metric'),
- "review_metric": self.ctx.config.get('review/metric'),
"originalFeatureColumns": model_features,
"model_type": self.ctx.config.get("model_type")
}
diff --git a/a2ml/api/model_review/model_review.py b/a2ml/api/model_review/model_review.py
index eec8cf41..916659c1 100644
--- a/a2ml/api/model_review/model_review.py
+++ b/a2ml/api/model_review/model_review.py
@@ -332,7 +332,6 @@ def score_model_performance_daily(self, date_from, date_to, extra_features=[]):
res[str(curr_date)] = {
'scores': scores,
'score_name': self.options.get('score_name'),
- 'review_metric': self.options.get('review_metric'),
'baseline_scores': baseline_score
}
diff --git a/a2ml/cmdl/template/config.yaml b/a2ml/cmdl/template/config.yaml
index 566e9a90..2bfc8cc3 100644
--- a/a2ml/cmdl/template/config.yaml
+++ b/a2ml/cmdl/template/config.yaml
@@ -72,13 +72,19 @@ review:
alert:
# Activate/Deactivate Review Alert
active: True
- #model_accuracy - Decrease in Model Accuracy: the model accuracy threshold allowed before trigger is initiated. Default threshold: 0.7. Default sensitivity: 72
- #feature_average_range - Feature Average Out-Of-Range: Trigger an alert if average feature value during time period goes beyond the standard deviation range calculated during training period by the specified number of times or more. Default threshold: 1. Default sensitivity: 168
- #runtime_errors_burst - Burst Of Runtime Errors: Trigger an alert if runtime error count exceeds threshold. Default threshold: 5. Default sensitivity: 1
+ # model_accuracy - Decrease in Model Accuracy: the model accuracy threshold allowed before trigger is initiated. Default threshold: 0.7. Default sensitivity: 72
+ # feature_average_range - Feature Average Out-Of-Range: Trigger an alert if average feature value during time period goes beyond the standard deviation range calculated during training period by the specified number of times or more. Default threshold: 1. Default sensitivity: 168
+ # runtime_errors_burst - Burst Of Runtime Errors: Trigger an alert if runtime error count exceeds threshold. Default threshold: 5. Default sensitivity: 1
type: model_accuracy
threshold: 0.7
#The amount of time(in hours) this metric must be at or below the threshold to trigger the alert.
sensitivity: 72
+
+ # all_values - Trigger an alert when all values in sensitivity below threshold.
+ # average_value - Trigger an alert when average of values in sensitivity below threshold.
+ # any_value - Trigger an alert when any value in sensitivity below threshold
+ threshold_policy: all_values
+
#no - no action should be executed
#retrain - Use new predictions and actuals as test set to retrain the model.
#retrain_deploy - Deploy retrained model and make it active model of this endpoint.
diff --git a/docs/source/dev/configuration.rst b/docs/source/dev/configuration.rst
index a8d2caee..c983ac6f 100644
--- a/docs/source/dev/configuration.rst
+++ b/docs/source/dev/configuration.rst
@@ -44,6 +44,7 @@ All Providers
type: model_accuracy
threshold: 0.7
sensitivity: 72
+ threshold_policy: all_values
action: retrain_deploy
notification: user
@@ -91,6 +92,13 @@ All Providers
* **review.alert.threshold** Float
* **review.alert.sensitivity** The amount of time(in hours) this metric must be at or below the threshold to trigger the alert.
+ * **review.alert.threshold_policy**
+
+ * **Supported Review Alert threshold policies**
+ * **all_values** Trigger an alert when all values in sensitivity below threshold.
+ * **average_value** Trigger an alert when average of values in sensitivity below threshold.
+ * **any_value** Trigger an alert when any value in sensitivity below threshold.
+
* **review.alert.action**
* **Supported Review Alert actions**
diff --git a/docs/source/dev/mlram.rst b/docs/source/dev/mlram.rst
index 941cf7d6..1da9d1cf 100644
--- a/docs/source/dev/mlram.rst
+++ b/docs/source/dev/mlram.rst
@@ -11,6 +11,8 @@ Auger model
Monitored model
===================
+See application example: https://github.com/augerai/mlram_apps
+
1. Create A2ML application with external provider:
.. code-block:: bash
@@ -66,6 +68,9 @@ To review distribution chart , send training features with target and actuals:
.. code-block:: python
+ # If call just after actuals, wait some time till server process the data
+ time.sleep(30)
+
ctx = Context()
result = A2ML(ctx).review(model_id='external_model_id')
if result['data']['status'] == 'retrain':
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-04-21T11:17:28 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-536 | 71d7180c37deba8a27bee7b1c67f9dae2d39553e | diff --git a/a2ml/__init__.py b/a2ml/__init__.py
index b49950b1..0fb5dff1 100644
--- a/a2ml/__init__.py
+++ b/a2ml/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '1.0.11'
+__version__ = '1.0.12'
diff --git a/a2ml/api/auger/impl/cloud/base.py b/a2ml/api/auger/impl/cloud/base.py
index 81c45ac1..5ea06a53 100644
--- a/a2ml/api/auger/impl/cloud/base.py
+++ b/a2ml/api/auger/impl/cloud/base.py
@@ -134,6 +134,7 @@ def _call_create(self, params=None, progress=None,has_return_object=True):
if has_return_object:
if object_properties:
self.object_id = object_properties.get('id')
+ self.object_name = object_properties.get('name') #name can be changed by hub
if progress:
self.wait_for_status(progress)
return self.properties()
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-03-22T06:21:43 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-535 | 3088a5db6d2dc5fdb4c4489740411cf140b77a62 | diff --git a/a2ml/__init__.py b/a2ml/__init__.py
index d521168a..b49950b1 100644
--- a/a2ml/__init__.py
+++ b/a2ml/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '1.0.10'
+__version__ = '1.0.11'
diff --git a/a2ml/api/a2ml.py b/a2ml/api/a2ml.py
index c92955a7..3a14194f 100644
--- a/a2ml/api/a2ml.py
+++ b/a2ml/api/a2ml.py
@@ -175,7 +175,8 @@ def evaluate(self, run_id = None):
return self.runner.execute('evaluate', run_id = run_id)
@show_result
- def deploy(self, model_id, locally=False, review=True, provider=None, name=None, algorithm=None, score=None):
+ def deploy(self, model_id, locally=False, review=True, provider=None,
+ name=None, algorithm=None, score=None, data_path=None):
"""Deploy a model locally or to specified provider(s).
Note:
@@ -190,6 +191,7 @@ def deploy(self, model_id, locally=False, review=True, provider=None, name=None,
name (str): Friendly name for the model. Used as name for Review Endpoint
algorithm (str): Self-hosted model(external provider) algorithm name.
score (float): Self-hosted model(external provider) score.
+ data_path (str): Data path to fit model when deploy. Return new deployed model-id
Returns:
::
@@ -214,7 +216,8 @@ def deploy(self, model_id, locally=False, review=True, provider=None, name=None,
model_id = result['model_id']
"""
- return self.get_runner(locally, model_id, provider).execute_one_provider('deploy', model_id, locally, review, name, algorithm, score)
+ return self.get_runner(locally, model_id, provider).execute_one_provider('deploy',
+ model_id, locally, review, name, algorithm, score, data_path)
@show_result
def predict(self, model_id, filename=None, data=None, columns=None, predicted_at=None,
diff --git a/a2ml/api/a2ml_model.py b/a2ml/api/a2ml_model.py
index 83db12dd..b5400042 100644
--- a/a2ml/api/a2ml_model.py
+++ b/a2ml/api/a2ml_model.py
@@ -25,7 +25,8 @@ def __init__(self, ctx, provider=None):
self.local_runner = lambda: self.build_runner(ctx, provider, force_local=True)
@show_result
- def deploy(self, model_id, locally=False, review=True, provider=None, name=None, algorithm=None, score=None):
+ def deploy(self, model_id, locally=False, review=True, provider=None,
+ name=None, algorithm=None, score=None, data_path=None):
"""Deploy a model locally or to specified provider(s).
Args:
@@ -36,6 +37,7 @@ def deploy(self, model_id, locally=False, review=True, provider=None, name=None,
name (str): Friendly name for the model. Used as name for Review Endpoint
algorithm (str): Self-hosted model(external provider) algorithm name.
score (float): Self-hosted model(external provider) score.
+ data_path (str): Data path to fit model when deploy. Return new deployed model-id
Returns:
::
@@ -59,7 +61,8 @@ def deploy(self, model_id, locally=False, review=True, provider=None, name=None,
model_id = result['model_id']
"""
- return self.get_runner(locally, model_id, provider).execute_one_provider('deploy', model_id, locally, review, name, algorithm, score)
+ return self.get_runner(locally, model_id, provider).execute_one_provider('deploy',
+ model_id, locally, review, name, algorithm, score, data_path)
@show_result
def predict(self, model_id, filename=None, data=None, columns=None, predicted_at=None,
diff --git a/a2ml/api/auger/a2ml.py b/a2ml/api/auger/a2ml.py
index 015fff4e..5bc25d38 100644
--- a/a2ml/api/auger/a2ml.py
+++ b/a2ml/api/auger/a2ml.py
@@ -18,8 +18,8 @@ def train(self):
def evaluate(self, run_id = None):
return AugerExperiment(self.ctx).leaderboard(run_id)
- def deploy(self, model_id, locally=False, review=True, name=None, algorithm=None, score=None):
- return AugerModel(self.ctx).deploy(model_id, locally, review, name, algorithm, score)
+ def deploy(self, model_id, locally=False, review=True, name=None, algorithm=None, score=None, data_path=None ):
+ return AugerModel(self.ctx).deploy(model_id, locally, review, name, algorithm, score, data_path)
def predict(self, model_id, filename, threshold=None, locally=False, data=None, columns=None, predicted_at=None, output=None):
return AugerModel(self.ctx).predict(
diff --git a/a2ml/api/auger/impl/cloud/pipeline.py b/a2ml/api/auger/impl/cloud/pipeline.py
index 7e289e20..8149b184 100644
--- a/a2ml/api/auger/impl/cloud/pipeline.py
+++ b/a2ml/api/auger/impl/cloud/pipeline.py
@@ -11,8 +11,9 @@ def __init__(self, ctx, experiment_api, pipeline_id=None):
super(AugerPipelineApi, self).__init__(
ctx, experiment_api, None, pipeline_id)
- def create(self, trial_id, review=True, name=None):
- return self._call_create({'trial_id': trial_id, 'is_review_model_enabled' : review, 'name': name},
+ def create(self, trial_id, review=True, name=None, refit_data_path=None):
+ return self._call_create({'trial_id': trial_id, 'is_review_model_enabled' : review, 'name': name,
+ 'refit_data_path': refit_data_path},
['creating_files', 'packaging', 'deploying'])
def create_external(self, review, name, project_id, algorithm, score):
diff --git a/a2ml/api/auger/impl/model.py b/a2ml/api/auger/impl/model.py
index 19ec06c7..3ece907e 100644
--- a/a2ml/api/auger/impl/model.py
+++ b/a2ml/api/auger/impl/model.py
@@ -18,8 +18,8 @@ def __init__(self, ctx, project):
self.project = project
self.ctx = ctx
- def deploy(self, model_id, locally=False, review=True, name=None, algorithm=None, score=None):
- return ModelDeploy(self.ctx, self.project).execute(model_id, locally, review, name, algorithm, score)
+ def deploy(self, model_id, locally=False, review=True, name=None, algorithm=None, score=None, data_path=None):
+ return ModelDeploy(self.ctx, self.project).execute(model_id, locally, review, name, algorithm, score, data_path)
def review_alert(self, model_id, parameters, name):
return ModelDeploy(self.ctx, self.project).create_update_review_alert(model_id, None, parameters, name)
diff --git a/a2ml/api/auger/impl/mparts/deploy.py b/a2ml/api/auger/impl/mparts/deploy.py
index 391cf465..90d020eb 100644
--- a/a2ml/api/auger/impl/mparts/deploy.py
+++ b/a2ml/api/auger/impl/mparts/deploy.py
@@ -21,11 +21,11 @@ def __init__(self, ctx, project):
self.project = project
self.ctx = ctx
- def execute(self, model_id, locally=False, review=True, name=None, algorithm=None, score=None):
+ def execute(self, model_id, locally=False, review=True, name=None, algorithm=None, score=None, data_path=None):
if locally:
- return self.deploy_model_locally(model_id, review, name)
+ return self.deploy_model_locally(model_id, review, name, data_path)
else:
- return self.deploy_model_in_cloud(model_id, review, name, algorithm, score)
+ return self.deploy_model_in_cloud(model_id, review, name, algorithm, score, data_path)
def create_update_review_alert(self, model_id, pipeline_properties=None, parameters=None, name=None):
if not self.ctx.config.get('review'):
@@ -121,7 +121,9 @@ def review(self, model_id):
}
return result
- def deploy_model_in_cloud(self, model_id, review, name, algorithm, score):
+ def deploy_model_in_cloud(self, model_id, review, name, algorithm, score, data_path):
+ from .predict import ModelPredict
+
self.ctx.log('Deploying model %s' % model_id)
if self.ctx.is_external_provider():
@@ -129,8 +131,12 @@ def deploy_model_in_cloud(self, model_id, review, name, algorithm, score):
self.ctx, None).create_external(review, name, self.project.object_id, algorithm, score)
else:
self.project.start()
+ data_url = None
+ if data_path:
+ _, _, data_url, _ = ModelPredict(self.ctx)._process_input(data_path, None, None)
+
pipeline_properties = AugerPipelineApi(
- self.ctx, None).create(model_id, review, name)
+ self.ctx, None).create(model_id, review, name, data_url)
if pipeline_properties.get('status') == 'ready':
if review:
@@ -144,7 +150,7 @@ def deploy_model_in_cloud(self, model_id, review, name, algorithm, score):
return pipeline_properties.get('id')
- def deploy_model_locally(self, model_id, review, name):
+ def deploy_model_locally(self, model_id, review, name, data_path):
is_loaded, model_path, model_name = self.verify_local_model(model_id)
#TODO: support review flag
if not is_loaded:
diff --git a/a2ml/api/auger/model.py b/a2ml/api/auger/model.py
index b901ee1c..24bbd661 100644
--- a/a2ml/api/auger/model.py
+++ b/a2ml/api/auger/model.py
@@ -15,8 +15,8 @@ def __init__(self, ctx):
@error_handler
@authenticated
@with_project(autocreate=False)
- def deploy(self, project, model_id, locally, review, name, algorithm, score):
- model_id = Model(self.ctx, project).deploy(model_id, locally, review, name, algorithm, score)
+ def deploy(self, project, model_id, locally, review, name, algorithm, score, data_path):
+ model_id = Model(self.ctx, project).deploy(model_id, locally, review, name, algorithm, score, data_path)
return {'model_id': model_id}
@error_handler
diff --git a/a2ml/api/azure/a2ml.py b/a2ml/api/azure/a2ml.py
index a2647eee..078a7c9b 100644
--- a/a2ml/api/azure/a2ml.py
+++ b/a2ml/api/azure/a2ml.py
@@ -21,10 +21,10 @@ def evaluate(self, run_id = None):
return AzureExperiment(self.ctx).leaderboard(run_id)
- def deploy(self, model_id, locally=False, review=True, name=None, algorithm=None, score=None):
+ def deploy(self, model_id, locally=False, review=True, name=None, algorithm=None, score=None, data_path=None):
from a2ml.api.azure.model import AzureModel
- return AzureModel(self.ctx).deploy(model_id, locally, review, name, algorithm, score)
+ return AzureModel(self.ctx).deploy(model_id, locally, review, name, algorithm, score, data_path)
def predict(self, filename, model_id, threshold=None, locally=False, data=None, columns=None, predicted_at=None, output=None):
from a2ml.api.azure.model import AzureModel
diff --git a/a2ml/api/azure/model.py b/a2ml/api/azure/model.py
index 83195e43..43eae7b5 100644
--- a/a2ml/api/azure/model.py
+++ b/a2ml/api/azure/model.py
@@ -20,7 +20,7 @@ def __init__(self, ctx):
@error_handler
@authenticated
- def deploy(self, model_id, locally, review, name=None, algorithm=None, score=None):
+ def deploy(self, model_id, locally, review, name=None, algorithm=None, score=None, data_path=None):
if locally:
is_loaded, model_path = self.verify_local_model(model_id)
if is_loaded:
diff --git a/a2ml/cmdl/commands/cmd_deploy.py b/a2ml/cmdl/commands/cmd_deploy.py
index 474b1dae..0793b065 100644
--- a/a2ml/cmdl/commands/cmd_deploy.py
+++ b/a2ml/cmdl/commands/cmd_deploy.py
@@ -17,8 +17,10 @@
help='Self-hosted model(external provider) algorithm name.')
@click.option('--score', '-s', required=False, type=float,
help='Self-hosted model(external provider) score.')
[email protected]('--data-path', '-d', type=click.STRING, required=False,
+ help='Data path to fit model when deploy. Return new deployed model-id')
@pass_context
-def cmdl(ctx, provider, model_id, locally, no_review, name, algorithm, score):
+def cmdl(ctx, provider, model_id, locally, no_review, name, algorithm, score, data_path):
"""Deploy trained model."""
ctx.setup_logger(format='')
- A2ML(ctx, provider).deploy(model_id, locally, not no_review, name=name, algorithm=algorithm, score=score)
+ A2ML(ctx, provider).deploy(model_id, locally, not no_review, name=name, algorithm=algorithm, score=score, data_path=data_path)
diff --git a/a2ml/cmdl/commands/cmd_model.py b/a2ml/cmdl/commands/cmd_model.py
index ccc5bacf..e6b187b4 100644
--- a/a2ml/cmdl/commands/cmd_model.py
+++ b/a2ml/cmdl/commands/cmd_model.py
@@ -23,10 +23,12 @@ def cmdl(ctx):
help='Self-hosted model(external provider) algorithm name.')
@click.option('--score', '-s', required=False, type=float,
help='Self-hosted model(external provider) score.')
[email protected]('--data-path', '-d', type=click.STRING, required=False,
+ help='Data path to fit model when deploy. Return new deployed model-id')
@pass_context
-def deploy(ctx, provider, model_id, locally, no_review, name, algorithm, score):
+def deploy(ctx, provider, model_id, locally, no_review, name, algorithm, score, data_path):
"""Deploy trained model."""
- A2MLModel(ctx, provider).deploy(model_id, locally, not no_review, name=name, algorithm=algorithm, score=score)
+ A2MLModel(ctx, provider).deploy(model_id, locally, not no_review, name=name, algorithm=algorithm, score=score, data_path=data_path)
@click.command('predict', short_help='Predict with deployed model.')
@click.argument('filename', required=True, type=click.STRING)
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-03-09T17:48:33 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-534 | 2e580917f270c6ed64528af1e34c08626d5a3fc0 | diff --git a/a2ml/__init__.py b/a2ml/__init__.py
index a67a991b..d521168a 100644
--- a/a2ml/__init__.py
+++ b/a2ml/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '1.0.09'
+__version__ = '1.0.10'
diff --git a/a2ml/api/a2ml_model.py b/a2ml/api/a2ml_model.py
index ed58746d..83db12dd 100644
--- a/a2ml/api/a2ml_model.py
+++ b/a2ml/api/a2ml_model.py
@@ -204,7 +204,7 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
return self.get_runner(locally, model_id, provider).execute_one_provider('actuals', model_id, filename, data, columns, actuals_at, actual_date_column, locally)
@show_result
- def review_alert(self, model_id, parameters = None, locally=False, provider=None):
+ def review_alert(self, model_id, parameters = None, locally=False, provider=None, name=None):
"""Update Review parameters.
Args:
@@ -230,6 +230,7 @@ def review_alert(self, model_id, parameters = None, locally=False, provider=None
locally(bool): Process review locally.
provider (str): The automl provider you wish to run. For example 'auger'. The default is None - use provider defined by model_id or set in costructor.
+ name (str): Friendly name for the model. Used as name for Review Endpoint
Returns:
::
@@ -244,7 +245,7 @@ def review_alert(self, model_id, parameters = None, locally=False, provider=None
ctx = Context()
model = A2MLModel(ctx).review_alert(model_id='D881079E1ED14FB')
"""
- return self.get_runner(locally, model_id, provider).execute_one_provider('review_alert', model_id, parameters)
+ return self.get_runner(locally, model_id, provider).execute_one_provider('review_alert', model_id, parameters, name)
@show_result
def review(self, model_id, locally=False, provider=None):
diff --git a/a2ml/api/auger/impl/cloud/endpoint.py b/a2ml/api/auger/impl/cloud/endpoint.py
index eed9eb81..27030333 100644
--- a/a2ml/api/auger/impl/cloud/endpoint.py
+++ b/a2ml/api/auger/impl/cloud/endpoint.py
@@ -14,6 +14,9 @@ def __init__(self, ctx, endpoint_api, endpoint_id=None):
def create(self, pipeline_id, name):
return self._call_create({'pipeline_id': pipeline_id, 'name': name},[])
+ def update(self, name):
+ return self._call_update({ 'id': self.object_id, 'name': name})
+
def update_roi(self):
roi_names = ['review/roi/filter', 'review/roi/investment', 'review/roi/revenue']
roi_values = []
diff --git a/a2ml/api/auger/impl/model.py b/a2ml/api/auger/impl/model.py
index aedc93d4..19ec06c7 100644
--- a/a2ml/api/auger/impl/model.py
+++ b/a2ml/api/auger/impl/model.py
@@ -21,8 +21,8 @@ def __init__(self, ctx, project):
def deploy(self, model_id, locally=False, review=True, name=None, algorithm=None, score=None):
return ModelDeploy(self.ctx, self.project).execute(model_id, locally, review, name, algorithm, score)
- def review_alert(self, model_id, parameters):
- return ModelDeploy(self.ctx, self.project).create_update_review_alert(model_id, None, parameters)
+ def review_alert(self, model_id, parameters, name):
+ return ModelDeploy(self.ctx, self.project).create_update_review_alert(model_id, None, parameters, name)
def review(self, model_id):
return ModelDeploy(self.ctx, self.project).review(model_id)
diff --git a/a2ml/api/auger/impl/mparts/deploy.py b/a2ml/api/auger/impl/mparts/deploy.py
index 1023a93b..391cf465 100644
--- a/a2ml/api/auger/impl/mparts/deploy.py
+++ b/a2ml/api/auger/impl/mparts/deploy.py
@@ -34,7 +34,8 @@ def create_update_review_alert(self, model_id, pipeline_properties=None, paramet
if not pipeline_properties:
pipeline_properties = AugerPipelineApi(self.ctx, None, model_id).properties()
- endpoint_api = None
+ endpoint_api = None
+ update_name = True
if not pipeline_properties.get('endpoint_pipelines'):
self.ctx.log('Creating review endpoint ...')
endpoint_api = AugerEndpointApi(self.ctx, None)
@@ -42,12 +43,16 @@ def create_update_review_alert(self, model_id, pipeline_properties=None, paramet
name = fsclient.get_path_base_name(self.ctx.config.get('source'))
endpoint_properties = endpoint_api.create(pipeline_properties.get('id'), name)
pipeline_properties['endpoint_pipelines'] = [endpoint_properties.get('id')]
+ update_name = False
if pipeline_properties.get('endpoint_pipelines'):
if endpoint_api is None:
endpoint_api = AugerEndpointApi(self.ctx, None,
pipeline_properties['endpoint_pipelines'][0].get('endpoint_id'))
+ if name and update_name:
+ endpoint_api.update(name)
+
session_id = endpoint_api.properties().get('primary_experiment_session_id')
if session_id:
AugerExperimentSessionApi(self.ctx, None, None, session_id).update_settings()
diff --git a/a2ml/api/auger/model.py b/a2ml/api/auger/model.py
index 516c5c0f..b901ee1c 100644
--- a/a2ml/api/auger/model.py
+++ b/a2ml/api/auger/model.py
@@ -46,8 +46,8 @@ def delete_actuals(self, project, model_id, with_predictions=False, begin_date=N
@error_handler
@authenticated
@with_project(autocreate=False)
- def review_alert(self, project, model_id, parameters):
- return Model(self.ctx, project).review_alert(model_id, parameters)
+ def review_alert(self, project, model_id, parameters, name):
+ return Model(self.ctx, project).review_alert(model_id, parameters, name)
@error_handler
@authenticated
diff --git a/a2ml/api/azure/model.py b/a2ml/api/azure/model.py
index e4342fd9..83195e43 100644
--- a/a2ml/api/azure/model.py
+++ b/a2ml/api/azure/model.py
@@ -554,7 +554,7 @@ def undeploy(self, model_id, locally):
@error_handler
@authenticated
- def review_alert(self, model_id, parameters):
+ def review_alert(self, model_id, parameters, name):
raise AzureException("Not Implemented. Set use_auger_cloud: True in config.yml")
@error_handler
diff --git a/a2ml/api/model_review/model_helper.py b/a2ml/api/model_review/model_helper.py
index 64ceeb18..6a0c3541 100644
--- a/a2ml/api/model_review/model_helper.py
+++ b/a2ml/api/model_review/model_helper.py
@@ -4,7 +4,7 @@
import numpy as np
import json
-from a2ml.api.utils import get_uid, get_uid4, fsclient, remove_dups_from_list
+from a2ml.api.utils import get_uid, get_uid4, fsclient, remove_dups_from_list, sort_arrays
from a2ml.api.utils.dataframe import DataFrame
@@ -184,6 +184,12 @@ def calculate_scores(options, y_test, X_test=None, estimator=None, y_pred=None,
else:
logging.error("calculate_scores: no scaling found for target fold group: %s"%options['fold_group'])
+ if options.get("score_top_count"):
+ if y_pred is None:
+ y_pred = estimator.predict(X_test)
+
+ y_pred, y_test = sort_arrays(y_pred, y_test, options.get("score_top_count"))
+
all_scores = {}
if y_pred is not None:
if options.get('binaryClassification'):
diff --git a/a2ml/api/utils/__init__.py b/a2ml/api/utils/__init__.py
index 876bbf2e..f39f229e 100644
--- a/a2ml/api/utils/__init__.py
+++ b/a2ml/api/utils/__init__.py
@@ -249,4 +249,13 @@ def retry_helper(func, retry_errors=[], num_try=10, delay=10, ctx=None):
time.sleep(delay*nTry)
else:
raise
-
+
+def sort_arrays(ar1, ar2, top_n=None, desc=True):
+ p = ar1.argsort()
+ if desc:
+ p = p[::-1]
+ if top_n:
+ p = p[:top_n]
+
+ return ar1[p], ar2[p]
+
diff --git a/a2ml/cmdl/commands/cmd_model.py b/a2ml/cmdl/commands/cmd_model.py
index d2275b79..ccc5bacf 100644
--- a/a2ml/cmdl/commands/cmd_model.py
+++ b/a2ml/cmdl/commands/cmd_model.py
@@ -62,10 +62,12 @@ def actuals(ctx, provider, filename, model_id, locally):
@click.argument('model-id', required=True, type=click.STRING)
@click.option('--provider', '-p', type=click.Choice(['auger','azure']), required=False,
help='Cloud AutoML Provider.')
[email protected]('--name', '-n', required=False, type=click.STRING,
+ help='Model friendly name.Used as name for Review Endpoint')
@pass_context
-def review_alert(ctx, provider, model_id):
+def review_alert(ctx, provider, model_id, name):
"""Predict with deployed model."""
- A2MLModel(ctx, provider).review_alert(model_id)
+ A2MLModel(ctx, provider).review_alert(model_id, name=name)
@click.command('review', short_help='Review information about deployed model.')
@click.argument('model-id', required=True, type=click.STRING)
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-03-03T11:26:13 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-531 | 7a945f592272729d0b867e3279a2d9bad73d23c9 | diff --git a/a2ml/api/auger/impl/cloud/experiment.py b/a2ml/api/auger/impl/cloud/experiment.py
index 8aa28e7f..5413e9d6 100644
--- a/a2ml/api/auger/impl/cloud/experiment.py
+++ b/a2ml/api/auger/impl/cloud/experiment.py
@@ -81,11 +81,16 @@ def get_experiment_options(config, ):
options["allowed_algorithms"] = config.get_list('experiment/allowed_models')
if config.get('experiment/exit_score'):
options['exit_score'] = config.get('experiment/exit_score')
- if config.get('review/metric'):
- options['review_metric'] = config.get('review/metric')
if config.get('experiment/score_top_count'):
options['score_top_count'] = config.get('experiment/score_top_count')
+ if config.get('review/metric'):
+ options['review_metric'] = config.get('review/metric')
+ if config.get('review/alert/retrain_policy/type'):
+ options['retrain_policy_type'] = config.get('review/alert/retrain_policy/type')
+ if config.get('review/alert/retrain_policy/value'):
+ options['retrain_policy_value'] = config.get('review/alert/retrain_policy/value')
+
split_options = {}
if config.get('experiment/validation_size'):
split_options['trainRatio'] = 1.0-float(config.get('experiment/validation_size'))
diff --git a/a2ml/api/model_review/model_review.py b/a2ml/api/model_review/model_review.py
index 15b4bb77..5be044a3 100644
--- a/a2ml/api/model_review/model_review.py
+++ b/a2ml/api/model_review/model_review.py
@@ -229,7 +229,8 @@ def delete_actuals(self, with_predictions=False, begin_date=None, end_date=None)
path = file if type(file) == str else file['path']
fsclient.remove_file(path)
- def build_review_data(self, data_path=None, output=None, date_col=None):
+ def build_review_data(self, data_path=None, output=None, date_col=None, retrain_policy=None,
+ date_to=None):
if not data_path:
data_path = self.options['data_path']
@@ -242,23 +243,39 @@ def build_review_data(self, data_path=None, output=None, date_col=None):
)
all_files.sort(key=lambda f: f['path'][0:10], reverse=True)
-
- if date_col and date_col in train_features:
+ if date_to:
new_files = []
+ date_to_date = convert_to_date(date_to)
+ for idx, file in enumerate(all_files):
+ file_date = os.path.basename(file['path']).split("_")[0]
+ if convert_to_date(file_date) > date_to_date:
+ continue
+ new_files.append(file)
+ all_files = new_files
+
+ start_date = None
+ if '_review_date_' in data_path:
+ start = data_path.index('_review_date_')+len('_review_date_')
+ end = data_path.index('_', start)
+ start_date = convert_to_date(data_path[start:end])
+
+ if not start_date and date_col and date_col in train_features:
try:
start_date = convert_to_date(ds_train.df[date_col].max())
logging.info("build_review_data with date_col: %s = %s"%(date_col,start_date))
+ except Exception as e:
+ logging.error("Getting latest date from data path %s failed: %s"%(data_path,e))
- for idx, file in enumerate(all_files):
- file_date = os.path.basename(file['path']).split("_")[0]
- if convert_to_date(file_date) <= start_date:
- continue
+ if start_date:
+ new_files = []
+ for idx, file in enumerate(all_files):
+ file_date = os.path.basename(file['path']).split("_")[0]
+ if convert_to_date(file_date) <= start_date:
+ continue
- new_files.append(file)
+ new_files.append(file)
- all_files = new_files
- except Exception as e:
- logging.error("Getting latest date from data path %s failed: %s"%(data_path,e))
+ all_files = new_files
logging.info("build_review_data adding files: %s"%all_files)
for (file, ds_actuals) in DataFrame.load_from_files(all_files):
@@ -268,11 +285,20 @@ def build_review_data(self, data_path=None, output=None, date_col=None):
#ds_train.drop_duplicates()
#ds_train.dropna()
+ if retrain_policy:
+ if retrain_policy.get('type') == 'days_limit' and date_col and date_col in train_features:
+ start_date = convert_to_date(ds_train.df[date_col].max())
+ end_date = start_date - datetime.timedelta(days=int(retrain_policy.get('value')))
+ ds_train.df.query("%s>='%s'"%(date_col, end_date), inplace=True)
if not output:
directory = os.path.dirname(data_path)
file_name = os.path.basename(data_path).split('_review_')[0]
- output = os.path.join(directory, file_name + "_review_%s.parquet"%(get_uid()))
+ date_suffix = ""
+ if date_to:
+ date_suffix = "date_%s_"%date_to
+
+ output = os.path.join(directory, file_name + "_review_%s%s.parquet"%(date_suffix, get_uid()))
ds_train.saveToFile(output)
return output
@@ -290,6 +316,7 @@ def score_model_performance_daily(self, date_from, date_to):
df_actuals.df = pd.concat([df_actuals.df, df.df])
if df_actuals.count() > 0:
+ print(df_actuals.df['a2ml_predicted']) #.query("%s>=0.10"%'a2ml_predicted'))
df_actuals.df.rename(columns={self.target_feature: 'a2ml_actual'}, inplace=True)
df_actuals.df.rename(columns={'a2ml_predicted': self.target_feature}, inplace=True)
diff --git a/a2ml/api/utils/formatter.py b/a2ml/api/utils/formatter.py
index a909b0c2..e7a118bb 100644
--- a/a2ml/api/utils/formatter.py
+++ b/a2ml/api/utils/formatter.py
@@ -1,6 +1,6 @@
import types
-def print_table(log, table_list, headers=None):
+def print_table(log, table_list, headers=None, hor_lines=True):
if isinstance(table_list, types.GeneratorType):
table_list = list(table_list)
@@ -16,13 +16,20 @@ def print_table(log, table_list, headers=None):
# maximun size of the col for each element
col_size = [max(map(len, col)) for col in zip(*row_list)]
# insert seperating line before every line, and extra one for ending.
- for i in range(0, len(row_list) + 1)[::-1]:
- row_list.insert(i, ['-' * i for i in col_size])
- # two format for each content line and each seperating line
+ if hor_lines:
+ for i in range(0, len(row_list) + 1)[::-1]:
+ row_list.insert(i, ['-' * i for i in col_size])
+ else:
+ i = 1
+ row_list.insert(i, ['-' * i for i in col_size])
+ # two format for each content line and each separating line
format_str = ' | '.join(["{{:<{}}}".format(i) for i in col_size])
format_sep = '-+-'.join(["{{:<{}}}".format(i) for i in col_size])
- for item in row_list:
- if item[0][0] == '-':
- log(format_sep.format(*item))
+ for idx, item in enumerate(row_list):
+ if hor_lines:
+ if idx%2==0: #item[0][0] == '-':
+ log(format_sep.format(*item))
+ else:
+ log(format_str.format(*item))
else:
- log(format_str.format(*item))
+ log(format_str.format(*item))
\ No newline at end of file
diff --git a/a2ml/tasks_queue/tasks_hub_api.py b/a2ml/tasks_queue/tasks_hub_api.py
index 7ed04aad..d401abad 100644
--- a/a2ml/tasks_queue/tasks_hub_api.py
+++ b/a2ml/tasks_queue/tasks_hub_api.py
@@ -198,6 +198,10 @@ def _read_hub_experiment_session(ctx, params):
ctx.config.set('experiment/exit_score', evaluation_options.get('exit_score'))
if evaluation_options.get('score_top_count'):
ctx.config.set('experiment/score_top_count', evaluation_options.get('score_top_count'))
+ if evaluation_options.get('retrain_policy_type'):
+ ctx.config.set('experiment/retrain_policy_type', evaluation_options.get('retrain_policy_type'))
+ if evaluation_options.get('retrain_policy_value'):
+ ctx.config.set('experiment/retrain_policy_value', evaluation_options.get('retrain_policy_value'))
return ctx
@@ -609,9 +613,15 @@ def build_review_data_task(params):
if ctx.config.get_list('experiment/date_time'):
date_col = ctx.config.get_list('experiment/date_time')[0]
+ retrain_policy = None
+ if ctx.config.get('experiment/retrain_policy_type'):
+ retrain_policy = {'type': ctx.config.get('experiment/retrain_policy_type'), 'value':ctx.config.get('experiment/retrain_policy_value')}
+
return ModelReview(params).build_review_data(
data_path=params.get('data_path'),
- date_col = date_col
+ date_col = date_col,
+ retrain_policy = retrain_policy,
+ date_to=params.get('date_to')
)
@celeryApp.task(ignore_result=True)
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-02-25T13:31:02 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-519 | 8b97a3ab4dd42d3ee20c82b2c373e9c988bc9a56 | diff --git a/a2ml/__init__.py b/a2ml/__init__.py
index 35e8e7c4..b8d5126b 100644
--- a/a2ml/__init__.py
+++ b/a2ml/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '1.0.02'
+__version__ = '1.0.03'
diff --git a/a2ml/api/auger/impl/cloud/endpoint.py b/a2ml/api/auger/impl/cloud/endpoint.py
index 05294355..eed9eb81 100644
--- a/a2ml/api/auger/impl/cloud/endpoint.py
+++ b/a2ml/api/auger/impl/cloud/endpoint.py
@@ -2,7 +2,7 @@
from ..exceptions import AugerException
from .review_alert_item import AugerReviewAlertItemApi
from .review_alert import AugerReviewAlertApi
-
+from .cluster_task import AugerClusterTaskApi
class AugerEndpointApi(AugerBaseApi):
"""Auger Endpoint API."""
@@ -13,3 +13,33 @@ def __init__(self, ctx, endpoint_api, endpoint_id=None):
def create(self, pipeline_id, name):
return self._call_create({'pipeline_id': pipeline_id, 'name': name},[])
+
+ def update_roi(self):
+ roi_names = ['review/roi/filter', 'review/roi/investment', 'review/roi/revenue']
+ roi_values = []
+ roi_exists = False
+ for name in roi_names:
+ if self.ctx.config.get(name):
+ roi_exists = True
+
+ roi_values.append(self.ctx.config.get(name))
+
+ if roi_exists:
+ res = self.rest_api.hub_client.create_endpoint_roi_validation(
+ endpoint_id=self.object_id,
+ expressions=roi_values,
+ )
+ cluster_task = AugerClusterTaskApi(self.ctx, cluster_task_id=res['data']['id'])
+ cluster_task.wait_for_status(['pending', 'received', 'started', 'retry'])
+ props = cluster_task.properties()
+ isValid = True
+ for idx, item in enumerate(props.get('result', [])):
+ if not item.get('is_valid'):
+ isValid = False
+ self.ctx.error("Review ROI config parameter '%s' = '%s' is invalid. Error: %s"%(
+ roi_names[idx], roi_values[idx], item.get('error')))
+
+ if isValid:
+ return self._call_update({ 'id': self.object_id,
+ 'roi_filter': roi_values[0], 'roi_investment': roi_values[1], 'roi_revenue': roi_values[2],
+ })
\ No newline at end of file
diff --git a/a2ml/api/auger/impl/cloud/experiment.py b/a2ml/api/auger/impl/cloud/experiment.py
index f8b2672b..8aa28e7f 100644
--- a/a2ml/api/auger/impl/cloud/experiment.py
+++ b/a2ml/api/auger/impl/cloud/experiment.py
@@ -81,8 +81,10 @@ def get_experiment_options(config, ):
options["allowed_algorithms"] = config.get_list('experiment/allowed_models')
if config.get('experiment/exit_score'):
options['exit_score'] = config.get('experiment/exit_score')
- if config.get('experiment/review_metric'):
- options['review_metric'] = config.get('experiment/review_metric')
+ if config.get('review/metric'):
+ options['review_metric'] = config.get('review/metric')
+ if config.get('experiment/score_top_count'):
+ options['score_top_count'] = config.get('experiment/score_top_count')
split_options = {}
if config.get('experiment/validation_size'):
diff --git a/a2ml/api/auger/impl/model.py b/a2ml/api/auger/impl/model.py
index 7d250d32..aedc93d4 100644
--- a/a2ml/api/auger/impl/model.py
+++ b/a2ml/api/auger/impl/model.py
@@ -45,7 +45,15 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
raise AugerException('Model should be deployed locally.')
model_path, model_existed = ModelPredict(self.ctx)._extract_model(model_name)
- return ModelReview({'model_path': os.path.join(model_path, "model")}).add_actuals(
+ params = {
+ 'model_path': os.path.join(model_path, "model"),
+ 'roi': {
+ 'filter': str(self.ctx.config.get('review/roi/filter')),
+ 'revenue': str(self.ctx.config.get('review/roi/revenue')),
+ 'investment': str(self.ctx.config.get('review/roi/investment')),
+ }
+ }
+ return ModelReview(params).add_actuals(
self.ctx,
actuals_path=filename,
data=data,
diff --git a/a2ml/api/auger/impl/mparts/deploy.py b/a2ml/api/auger/impl/mparts/deploy.py
index f8056755..1023a93b 100644
--- a/a2ml/api/auger/impl/mparts/deploy.py
+++ b/a2ml/api/auger/impl/mparts/deploy.py
@@ -53,6 +53,8 @@ def create_update_review_alert(self, model_id, pipeline_properties=None, paramet
AugerExperimentSessionApi(self.ctx, None, None, session_id).update_settings()
AugerReviewAlertApi(self.ctx, endpoint_api).create_update(parameters)
+
+ endpoint_api.update_roi()
else:
self.ctx.log('Model is not belong to any review endpoint. Skipping ...')
diff --git a/a2ml/api/auger/impl/mparts/undeploy.py b/a2ml/api/auger/impl/mparts/undeploy.py
index 5fa36bfa..1dc26592 100644
--- a/a2ml/api/auger/impl/mparts/undeploy.py
+++ b/a2ml/api/auger/impl/mparts/undeploy.py
@@ -43,8 +43,9 @@ def execute(self, model_id, locally=False):
self.ctx.log("Undeploy model and remove from Review endpoint.")
for pipeline in endpoint_pipelines:
if pipeline.get('pipeline_id') == model_id:
+ # AugerEndpointPipelineApi(self.ctx, pipeline.get('id')).delete()
AugerPipelineApi(self.ctx, None, pipeline.get('pipeline_id')).remove(pipeline.get('pipeline_id'))
- AugerEndpointPipelineApi(self.ctx, pipeline.get('id')).delete()
+
break
else:
pipeline_api.remove(model_id)
diff --git a/a2ml/api/azure/model.py b/a2ml/api/azure/model.py
index f03f9618..7f6b50b8 100644
--- a/a2ml/api/azure/model.py
+++ b/a2ml/api/azure/model.py
@@ -47,7 +47,7 @@ def deploy(self, model_id, locally, review, name=None):
'scoreNames': [self.ctx.config.get('experiment/metric')],
'scoring': self.ctx.config.get('experiment/metric'),
"score_name": self.ctx.config.get('experiment/metric'),
- "review_metric": self.ctx.config.get('experiment/review_metric'),
+ "review_metric": self.ctx.config.get('review/metric'),
"originalFeatureColumns": model_features,
"model_type": self.ctx.config.get("model_type")
}
diff --git a/a2ml/api/model_review/model_helper.py b/a2ml/api/model_review/model_helper.py
index 3b772ddf..64ceeb18 100644
--- a/a2ml/api/model_review/model_helper.py
+++ b/a2ml/api/model_review/model_helper.py
@@ -449,6 +449,7 @@ def create_model_options_file(options_path, scoring, target_column, task_type, b
options["classification"] = True
options['binaryClassification'] = True if binary_classification else False
+ options['external_model'] = True
fsclient.write_json_file(options_path, options)
return options
diff --git a/a2ml/api/model_review/model_review.py b/a2ml/api/model_review/model_review.py
index 476b4215..6ae47c0a 100644
--- a/a2ml/api/model_review/model_review.py
+++ b/a2ml/api/model_review/model_review.py
@@ -139,7 +139,7 @@ def add_actuals(
):
ds_actuals = DataFrame.create_dataframe(actuals_path, data, features=columns)
- if external_model:
+ if external_model or self.options.get('external_model'):
options = self.options.copy()
if 'hub_info' in options:
diff --git a/a2ml/cmdl/template/config.yaml b/a2ml/cmdl/template/config.yaml
index f400eed1..2c7992b3 100644
--- a/a2ml/cmdl/template/config.yaml
+++ b/a2ml/cmdl/template/config.yaml
@@ -56,6 +56,19 @@ experiment:
#validation_data:
review:
+ # Optional metric used for MLRAM review, can be any experiment metric + roi. By default same as experiment metric
+ #metric: accuracy
+
+ # ROI calculation
+ # Filter and formulas can contain any fields from actuals. P - predicted value, A- actual.
+ roi:
+ # Filter syntax - see Pandas.DataFrame.query method: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#indexing-query
+ #filter: P=1
+ # Revenue can contain formuala for calculating revenue based on fields from actual. See ROI formulas language for the syntax
+ #revenue: "@if(A=True,1050, 0)"
+ # Investment can contain formuala for calculating investment based on fields from actual. See ROI formulas language for the syntax
+ #investment: 1000
+
alert:
# Activate/Deactivate Review Alert
active: True
diff --git a/a2ml/tasks_queue/tasks_hub_api.py b/a2ml/tasks_queue/tasks_hub_api.py
index b4d2cae3..7ed04aad 100644
--- a/a2ml/tasks_queue/tasks_hub_api.py
+++ b/a2ml/tasks_queue/tasks_hub_api.py
@@ -188,8 +188,7 @@ def _read_hub_experiment_session(ctx, params):
evaluation_options.get('max_n_trials', 100))
ctx.config.set('experiment/use_ensemble',
evaluation_options.get('use_ensemble', True))
- ctx.config.set('experiment/metric',
- evaluation_options.get('scoring'), provider)
+ ctx.config.set('experiment/metric', evaluation_options.get('scoring'))
if evaluation_options.get('algorithms_to_exlude'):
ctx.config.set('experiment/blocked_models', evaluation_options.get('algorithms_to_exlude'))
@@ -197,6 +196,8 @@ def _read_hub_experiment_session(ctx, params):
ctx.config.set('experiment/allowed_models', evaluation_options.get('allowed_algorithms'))
if evaluation_options.get('exit_score'):
ctx.config.set('experiment/exit_score', evaluation_options.get('exit_score'))
+ if evaluation_options.get('score_top_count'):
+ ctx.config.set('experiment/score_top_count', evaluation_options.get('score_top_count'))
return ctx
diff --git a/docs/source/dev/configuration.rst b/docs/source/dev/configuration.rst
index e34561f9..b496a814 100644
--- a/docs/source/dev/configuration.rst
+++ b/docs/source/dev/configuration.rst
@@ -23,7 +23,7 @@ All Providers
target:
model_type:
experiment:
- metric:
+ metric:
cross_validation_folds:
max_total_time:
max_eval_time:
@@ -31,6 +31,22 @@ All Providers
use_ensemble:
validation_source:
+ review:
+ metric:
+
+ roi:
+ filter:
+ revenue:
+ investment:
+
+ alert:
+ active: True
+ type: model_accuracy
+ threshold: 0.7
+ sensitivity: 72
+ action: retrain_deploy
+ notification: user
+
**Attributes**
* **name** The project name.
@@ -55,6 +71,34 @@ All Providers
* **experiment.use_ensemble** Try to improve model performance by creating ensembles from the trial models true | false.
* **experiment.validation_source** Path to validation dataset. If not set your source dataset will be split to validate.
+ * **review.metric** Optional metric used for MLRAM review, can be any experiment metric + roi. By default same as experiment metric
+
+ * **review.roi.filter** Filter syntax - see <a href="https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#indexing-query" target="_blank">Pandas.DataFrame.query</a>
+ * **review.roi.revenue** Revenue can contain formuala for calculating revenue based on fields from actual. See ROI formulas language for the syntax
+ * **review.roi.investment** Investment can contain formuala for calculating investment based on fields from actual. See ROI formulas language for the syntax
+
+ * **Filter and formulas special fields**
+ * **P** predicted value
+ * **A** actual value
+
+ * **review.alert.active** Activate/Deactivate Review Alert (True/False)
+ * **review.alert.type**
+
+ * **Supported Review Alert types**
+ * **model_accuracy** Decrease in Model Accuracy: the model accuracy threshold allowed before trigger is initiated. Default threshold: 0.7. Default sensitivity: 72
+ * **feature_average_range** Feature Average Out-Of-Range: Trigger an alert if average feature value during time period goes beyond the standard deviation range calculated during training period by the specified number of times or more. Default threshold: 1. Default sensitivity: 168
+ * **runtime_errors_burst** Burst Of Runtime Errors: Trigger an alert if runtime error count exceeds threshold. Default threshold: 5. Default sensitivity: 1
+
+ * **review.alert.threshold** Float
+ * **review.alert.sensitivity** The amount of time(in hours) this metric must be at or below the threshold to trigger the alert.
+ * **review.alert.action**
+
+ * **Supported Review Alert actions**
+ * **no** no action should be executed
+ * **retrain** Use new predictions and actuals as test set to retrain the model.
+ * **retrain_deploy** Deploy retrained model and make it active model of this endpoint.
+
+ * **review.alert.notification** Send message via selected notification channel. (no/user/organization)
Provider Specfic
----------------
@@ -85,21 +129,13 @@ Auger
estimate_trial_time: False
trials_per_worker: 2
class_weight:
+ score_top_count:
oversampling:
name:
params:
sampling_strategy:
k_neighbors:
- review:
- alert:
- active: True
- type: model_accuracy
- threshold: 0.7
- sensitivity: 72
- action: retrain_deploy
- notification: user
-
**Attributes**
* **dataset** Name of the DataSet on Auger Cloud.
@@ -118,6 +154,7 @@ Auger
* **experiment.estimate_trial_time** Use it if you have a lot of timeouted trials. Set it to True will predict the training time of each individual model to avoid timeouts. Default is False.
* **experiment.trials_per_worker** Use it if you have a lot of failed trials. Set it to value < 8 to give trial fit process more memory. Default is None.
* **experiment.class_weight** Balanced | Balanced Subsample. Class Weights associated with classes. If None, all classes are supposed to have weight one. The Balanced mode automatically adjusts weights inversely proportional to class frequencies in the input data. The Balanced Subsample mode is the same as Balanced except that weights are computed based on the bootstrap sample for every tree grown.
+ * **experiment.score_top_count** Number of top N values(sorted in descending order) to calculate metrics while train values. For regression only.
* **experiment.oversampling.name** SMOTE, RandomOverSampler, ADASYN, SMOTEENN, SMOTETomek. Oversampling Methods to adjust the class distribution of a data set
* **experiment.oversampling.params.sampling_strategy** auto, minority, majority, not minority, not majority, all
* **experiment.oversampling.params.k_neighbors** Integer value of k_neighbors
@@ -125,25 +162,6 @@ Auger
.. note::
For more information on |oversampling|
-
- * **review.alert.active** Activate/Deactivate Review Alert (True/False)
- * **review.alert.type**
-
- * **Supported Review Alert types**
- * **model_accuracy** Decrease in Model Accuracy: the model accuracy threshold allowed before trigger is initiated. Default threshold: 0.7. Default sensitivity: 72
- * **feature_average_range** Feature Average Out-Of-Range: Trigger an alert if average feature value during time period goes beyond the standard deviation range calculated during training period by the specified number of times or more. Default threshold: 1. Default sensitivity: 168
- * **runtime_errors_burst** Burst Of Runtime Errors: Trigger an alert if runtime error count exceeds threshold. Default threshold: 5. Default sensitivity: 1
-
- * **review.alert.threshold** Float
- * **review.alert.sensitivity** The amount of time(in hours) this metric must be at or below the threshold to trigger the alert.
- * **review.alert.action**
-
- * **Supported Review Alert actions**
- * **no** no action should be executed
- * **retrain** Use new predictions and actuals as test set to retrain the model.
- * **retrain_deploy** Deploy retrained model and make it active model of this endpoint.
-
- * **review.alert.notification** Send message via selected notification channel. (no/user/organization)
Azure
^^^^^
diff --git a/docs/source/dev/mlram.rst b/docs/source/dev/mlram.rst
index 009bcd38..1b8204a5 100644
--- a/docs/source/dev/mlram.rst
+++ b/docs/source/dev/mlram.rst
@@ -28,6 +28,8 @@ Self-hosted model
metric: <metric to calculate using actuals>
review:
+ roi:
+ <See configuration section>
alert:
<See configuration section>
@@ -38,7 +40,7 @@ Self-hosted model
ctx = Context()
a2ml = A2ML(ctx)
result = a2ml.deploy(model_id=None, name="My self-hosted model.", algorithm="RandomForest", score=0.76)
- model_id = result['model_id']
+ model_id = result['data']['model_id']
4. Send actuals:
diff --git a/docs/source/dev/roi_language.rst b/docs/source/dev/roi_language.rst
index 079bbd61..216115c3 100644
--- a/docs/source/dev/roi_language.rst
+++ b/docs/source/dev/roi_language.rst
@@ -1,5 +1,6 @@
+*************
ROI formulas language
-=====================
+*************
Syntax
-----------------
diff --git a/docs/source/index.rst b/docs/source/index.rst
index dbcf96b6..9f067909 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -13,6 +13,7 @@
dev/mlram
dev/api
dev/cli
+ dev/roi_language
dev/advanced
diff --git a/setup.py b/setup.py
index bdedc7de..57c5344c 100644
--- a/setup.py
+++ b/setup.py
@@ -36,7 +36,7 @@ def run(self):
'scipy==1.5.2',
'asyncio',
'boto3',
- 'auger-hub-api-client==0.7.2',
+ 'auger-hub-api-client==0.7.3',
'click',
'shortuuid',
'docutils<0.16,>=0.10',
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-02-04T13:25:25 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-517 | a0642e5bff383cf36af8d3c5416f92626d735c0f | diff --git a/a2ml/api/model_review/model_review.py b/a2ml/api/model_review/model_review.py
index 8cd62058..83dbc5fe 100644
--- a/a2ml/api/model_review/model_review.py
+++ b/a2ml/api/model_review/model_review.py
@@ -205,7 +205,7 @@ def build_review_data(self, data_path=None, output=None, date_col=None):
wild=True, remove_folder_name=False, meta_info=True
)
- all_files.sort(key=lambda f: f['last_modified'], reverse=True)
+ all_files.sort(key=lambda f: f['path'][0:10], reverse=True)
if date_col and date_col in train_features:
new_files = []
@@ -284,6 +284,17 @@ def distribution_chart_stats(self, date_from, date_to):
date_from, date_to, "_*_data.feather.zstd", features, categoricalFeatures, mapper
)
+ if not self.options.get('data_path'):
+ all_files = []
+ date_stat = convert_to_date(date_to) - datetime.timedelta(days=1)
+ for (curr_date, files) in ModelReview._prediction_files_by_day(self.model_path, None,
+ date_stat, "_*_data.feather.zstd"):
+ all_files += files
+
+ base_stat = ModelReview._get_distribution_stats_files(all_files, features, categoricalFeatures, mapper)
+ if base_stat:
+ actuals_stats['base_stat'] = base_stat
+
return actuals_stats
def set_support_review_model_flag(self, flag_value):
@@ -302,58 +313,70 @@ def clear_model_results_and_actuals(self):
fsclient.remove_folder(os.path.join(self.model_path, "predictions"))
return True
- def _distribution_stats(self, date_from, date_to, path_suffix, features,
- categoricalFeatures=[], feature_mapper={}):
- res = {}
- feature_importances = self.get_feature_importances()
+ @staticmethod
+ def _get_distribution_stats_files(files, features, categoricalFeatures=[], feature_mapper={},
+ feature_importances={}):
- for (curr_date, files) in ModelReview._prediction_files_by_day(self.model_path, date_from, date_to, path_suffix):
- stats = {}
+ if not files:
+ return None
+
+ stats = {}
+ for feature in features:
+ stats[feature] = {
+ 'count': 0,
+ 'sum': 0,
+ 'sq_sum': 0,
+ 'dist': None,
+ 'imp': feature_importances.get(feature, 0)
+ }
+
+ df_list = []
+ for (file, df) in DataFrame.load_from_files(files):
+ df_list.append(df)
+
+ # First pass: calc sum and count in each column for average
+ for df in df_list:
for feature in features:
- stats[feature] = {
- 'count': 0,
- 'sum': 0,
- 'sq_sum': 0,
- 'dist': None,
- 'imp': feature_importances.get(feature, 0)
- }
+ if feature in df.columns:
+ stats[feature]['count'] += df.df[feature].count()
+
+ if df.df[feature].dtype.name in ['category', 'string', 'object'] or \
+ feature in categoricalFeatures:
+ stats[feature]['dist'] = merge_dicts(
+ stats[feature]['dist'] or {},
+ dict(df.df[feature].value_counts()),
+ lambda v, ov: v + ov
+ )
+ else:
+ stats[feature]['sum'] += df.df[feature].sum()
+
+ # Calc average
+ for feature in features:
+ if stats[feature]['count'] > 0 and stats[feature]['dist'] == None:
+ stats[feature]['average'] = stats[feature]['sum'] / stats[feature]['count']
- df_list = []
- for (file, df) in DataFrame.load_from_files(files):
- df_list.append(df)
-
- # First pass: calc sum and count in each column for average
- for df in df_list:
- for feature in features:
- if feature in df.columns:
- stats[feature]['count'] += df.df[feature].count()
-
- if df.df[feature].dtype.name in ['category', 'string', 'object'] or \
- feature in categoricalFeatures:
- stats[feature]['dist'] = merge_dicts(
- stats[feature]['dist'] or {},
- dict(df.df[feature].value_counts()),
- lambda v, ov: v + ov
- )
- else:
- stats[feature]['sum'] += df.df[feature].sum()
-
- # Calc average
+ # Second pass: sum of squares of value and average for std dev
+ for df in df_list:
for feature in features:
- if stats[feature]['count'] > 0 and stats[feature]['dist'] == None:
- stats[feature]['average'] = stats[feature]['sum'] / stats[feature]['count']
+ if 'average' in stats[feature] and feature in df.columns:
+ avg = stats[feature]['average']
+ stats[feature]['sq_sum'] += ((df.df[feature] - avg)**2).sum()
- # Second pass: sum of squares of value and average for std dev
- for df in df_list:
- for feature in features:
- if 'average' in stats[feature] and feature in df.columns:
- avg = stats[feature]['average']
- stats[feature]['sq_sum'] += ((df.df[feature] - avg)**2).sum()
+ # Calc std dev
+ return ModelReview._calc_stddev_for_features(stats, features, feature_mapper)
+ def _distribution_stats(self, date_from, date_to, path_suffix, features,
+ categoricalFeatures=[], feature_mapper={}):
+ res = {}
+ feature_importances = self.get_feature_importances()
+
+ for (curr_date, files) in ModelReview._prediction_files_by_day(self.model_path, date_from, date_to, path_suffix):
+
+ stats = ModelReview._get_distribution_stats_files(files, features, categoricalFeatures, feature_mapper, feature_importances)
# Calc std dev
- if len(files) > 0:
- res[str(curr_date)] = ModelReview._calc_stddev_for_features(stats, features, feature_mapper)
+ if stats:
+ res[str(curr_date)] = stats
return res
@@ -396,17 +419,38 @@ def _calc_stddev_for_features(stats, features, feature_mapper):
return res
+ @staticmethod
+ def _get_first_date_from_files(path):
+ all_files = fsclient.list_folder(path,
+ wild=True, remove_folder_name=True, meta_info=False
+ )
+ first_date = None
+ if all_files:
+ all_files.sort(key=lambda f: f[0:10], reverse=False)
+ idxDate = all_files[0].find("_")
+ if idxDate:
+ first_date = all_files[0][0:idxDate]
+
+ return first_date
+
@staticmethod
def _prediction_files_by_day(model_path, date_from, date_to, path_suffix):
- if (date_from and not date_to) or (not date_from and date_to):
+ if (date_from and not date_to):# or (not date_from and date_to):
# TODO: list all files by suffix, sort them by prefix date and return range of files
- raise Exception("Arguments error: please provide both start and end dates or do not pass any.")
+ raise Exception("Arguments error: please provide both start and end dates or date_to only or do not pass any.")
- if date_from:
- date_from = convert_to_date(date_from)
- date_to = convert_to_date(date_to)
+ if date_from or date_to:
+ if date_from:
+ date_from = convert_to_date(date_from)
+ else:
+ date_from = ModelReview._get_first_date_from_files(
+ os.path.join(model_path, "predictions/*" + path_suffix))
+ if not date_from:
+ return
+ date_from = convert_to_date(date_from)
curr_date = date_from
+ date_to = convert_to_date(date_to)
while curr_date <= date_to:
path = os.path.join(model_path, "predictions/" + str(curr_date) + "*" + path_suffix)
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-01-28T17:39:56 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-516 | 6e883a36804efcc266009cf2af9d3d5b4649ca86 | diff --git a/a2ml/api/model_review/model_review.py b/a2ml/api/model_review/model_review.py
index 83dbc5fe..f29ff13e 100644
--- a/a2ml/api/model_review/model_review.py
+++ b/a2ml/api/model_review/model_review.py
@@ -8,6 +8,7 @@
from a2ml.api.utils import get_uid, convert_to_date, merge_dicts, fsclient
from a2ml.api.utils.dataframe import DataFrame
from a2ml.api.a2ml import A2ML, Context
+import a2ml.api.utils.roi_calc as roi_calc
from .model_helper import ModelHelper
from .probabilistic_counter import ProbabilisticCounter
@@ -67,36 +68,53 @@ def _do_score_actual(self, df_data, predicted_feature=None):
res = ModelHelper.calculate_scores(self.options, y_test=y_true, y_pred=y_pred, raise_main_score=False)
- res['roi'] = self._calculate_roi(df_data, predicted_feature)
+ roi = self._calculate_roi(df_data, predicted_feature)
+ if roi != None:
+ res['roi'] = roi
return res
- def _calculate_roi(self, df_data, predicted_feature=None):
- if not self.params.get('roi'):
- return 0.0
+ def validate_roi_syntax(self, expressions):
+ res = []
+ known_vars = ["A", "P", self.target_feature] + self.original_features
+
+ for expression in expressions:
+ validation_result = roi_calc.Parser(expression).validate(known_vars=known_vars)
+ res.append(
+ {
+ "expression": expression,
+ "is_valid": validation_result.is_valid,
+ "error": validation_result.error,
+ }
+ )
- data_filter = self.params['roi']['filter']
- revenue = self.params['roi']['revenue']
- investment = self.params['roi']['investment']
+ return res
- #TODO: replace P to target, A to a2ml_actual
+ def _calculate_roi(self, df_data, predicted_feature=None):
+ if not self.params.get('roi'):
+ return None
- df_filtered = df_data.query(data_filter)
+ predicted_feature = predicted_feature or self.target_feature
+ known_vars = [predicted_feature] + self.original_features
- investment_value = 1.0
- revenue_value = 1.0
+ calc = roi_calc.Calculator(
+ filter=self.params['roi']['filter'],
+ revenue=self.params['roi']['revenue'],
+ investment=self.params['roi']['investment'],
+ known_vars=known_vars,
+ vars_mapping={"A": "a2ml_actual", "P": predicted_feature},
+ )
- #TODO: perform operations
+ res = calc.calculate(df_data)
+ return res["roi"]
- return (revenue_value-investment_value)/investment_value
-
def add_external_model(self, target_column, scoring, task_type, binary_classification):
ModelHelper.create_model_options_file(
options_path=self.options_path,
scoring=scoring,
target_column=target_column,
task_type=task_type,
- binary_classification = binary_classification,
+ binary_classification=binary_classification,
)
self._load_options()
diff --git a/a2ml/api/utils/roi_calc.py b/a2ml/api/utils/roi_calc.py
new file mode 100644
index 00000000..ecf6e0af
--- /dev/null
+++ b/a2ml/api/utils/roi_calc.py
@@ -0,0 +1,589 @@
+import json
+import pandas as pd
+
+PLUS = "+"
+MINUS = "-"
+MULTIPLICATION = "*"
+DIVISION = "/"
+
+OPENING_BRACKET = "("
+CLOSING_BRACKET = ")"
+
+COMMA = ","
+AT = "@"
+DOLLAR = "$"
+DOT = "."
+UNDERSCORE = "_"
+QUOT = '"'
+
+LT = "<"
+EQ = "="
+GT = ">"
+EXCLAMATION = "!"
+
+NOT = "not"
+OR = "or"
+AND = "and"
+
+OPERATORS = set([PLUS, MINUS, MULTIPLICATION, DIVISION])
+BRACKETS = set([OPENING_BRACKET, CLOSING_BRACKET])
+COMPARISON_SYMBOLS = set([LT, EQ, GT, EXCLAMATION])
+SYMBOLS = set([COMMA])
+WHITESPACES = set([" ", "\t", "\n", "\r"])
+NAME_PART = set([AT, DOLLAR, UNDERSCORE, QUOT])
+
+COMPARISON_OPS = set(["<", ">", "<=", ">=", "=", "!="])
+
+def is_digit(c):
+ return c >= '0' and c <= '9'
+
+def is_quot(c):
+ return c == QUOT
+
+def is_decimal_separator(c):
+ return c == DOT
+
+def is_opearator(c):
+ return c in OPERATORS
+
+def is_comparison_sym(c):
+ return c in COMPARISON_SYMBOLS
+
+def is_round_bracket(c):
+ return c in BRACKETS
+
+def is_name(c):
+ return (c >= 'A' and c <= 'Z') or (c >= 'a' and c <= 'z') or c in NAME_PART
+
+def is_symbol(c):
+ return c in SYMBOLS
+
+def is_whitespaces(c):
+ return c in WHITESPACES
+
+class AstError(Exception):
+ def __init__(self, msg, position=None):
+ self.position = position
+ super().__init__(msg)
+
+class LexerError(AstError):
+ pass
+
+class Lexer:
+ def __init__(self, str):
+ self.str = str
+ self.reset()
+
+ def reset(self):
+ self.offset = 0
+ self._curr_token = None
+ self.prev_token = None
+
+ @property
+ def curr_token(self):
+ return self._curr_token
+
+ @curr_token.setter
+ def curr_token(self, value):
+ self.prev_token = self.curr_token
+ self._curr_token = value
+
+ def done(self):
+ return self.offset == len(self.str)
+
+ def next_token(self):
+ if self.done():
+ return None
+
+ c = self.str[self.offset]
+ self.offset += 1
+
+ while self.offset < len(self.str) and is_whitespaces(c):
+ c = self.str[self.offset]
+ self.offset += 1
+
+ if is_opearator(c) or is_round_bracket(c) or is_symbol(c):
+ self.curr_token = c
+ return self.curr_token
+
+ token = [c]
+
+ if is_digit(c):
+ while self.offset < len(self.str) and (is_digit(self.str[self.offset]) or is_decimal_separator(self.str[self.offset])):
+ token.append(self.str[self.offset])
+ self.offset += 1
+
+ self.curr_token = "".join(token)
+ return self.curr_token
+
+ if is_name(c):
+ while self.offset < len(self.str) and (is_name(self.str[self.offset]) or is_digit(self.str[self.offset])):
+ token.append(self.str[self.offset])
+ self.offset += 1
+
+ self.curr_token = "".join(token)
+ return self.curr_token
+
+ if is_comparison_sym(c):
+ while self.offset < len(self.str) and is_comparison_sym(self.str[self.offset]):
+ token.append(self.str[self.offset])
+ self.offset += 1
+
+ self.curr_token = "".join(token)
+ return self.curr_token
+
+ if self.done():
+ return None
+ else:
+ raise LexerError(f"unknown character '{c}'")
+
+ def next_token_preview(self):
+ curr_token = self.curr_token
+ offset = self.offset
+
+ next_token = self.next_token()
+
+ self.curr_token = curr_token
+ self.offset = offset
+
+ return next_token
+
+ def all_tokens(self):
+ res = []
+
+ while not self.done():
+ res.append(self.next_token())
+
+ return res
+
+class ParserError(AstError):
+ pass
+
+class ExpressionError(AstError):
+ pass
+
+class BaseNode:
+ def __init__(self, position):
+ self.position = position
+
+ def has_variables(self):
+ return False
+
+ def evaluate_sum(self, rows, vars_mapping={}):
+ res = self.evaluate(rows, vars_mapping)
+ return sum(res)
+
+class ConstNode(BaseNode):
+ def __init__(self, value, position=None):
+ super().__init__(position)
+
+ if isinstance(value, str) and is_quot(value[0]) and is_quot(value[-1]):
+ self.value = value[1:-1]
+ elif isinstance(value, float) or isinstance(value, str):
+ try:
+ number = float(value)
+ except ValueError as e:
+ raise ParserError(str(e), position=self.position)
+
+ if number.is_integer():
+ number = int(number)
+
+ self.value = number
+ elif value == True or value == False:
+ self.value = value
+ else:
+ raise ParserError(f"unsupported const type: '{value}'", position=self.position)
+
+ def evaluate(self, rows, vars_mapping={}):
+ return [self.value] * len(rows)
+
+ def validate(self, known_vars):
+ return True
+
+ def __str__(self):
+ if isinstance(self.value, str):
+ return '"' + str(self.value) + '"'
+ else:
+ return str(self.value)
+
+class VariableNode(BaseNode):
+ def __init__(self, name, position=None):
+ super().__init__(position)
+
+ if name.startswith("$"):
+ self.name = name[1:]
+ else:
+ self.name = name
+
+ def evaluate(self, rows, vars_mapping={}):
+ res = []
+
+ for index, variables in enumerate(rows):
+ var_name = vars_mapping.get(self.name, self.name)
+ if var_name in variables:
+ res.append(variables[var_name])
+ else:
+ row = json.dumps(variables)
+
+ unknown_var = [self.name]
+ if self.name != var_name:
+ unknown_var.append(var_name)
+ unknown_var = " -> ".join(unknown_var)
+
+ raise ParserError(
+ f"unknown variable: '{unknown_var}' in row #{index} '{row}'",
+ position=self.position,
+ )
+ return res
+
+ def validate(self, known_vars):
+ if self.name in known_vars:
+ return True
+ else:
+ raise ParserError(f"unknown variable '{self.name}'", position=self.position)
+
+ def has_variables(self):
+ return True
+
+ def __str__(self):
+ return self.name
+
+class OperationNode(BaseNode):
+ def __init__(self, operator, left, right=None, position=None):
+ super().__init__(position)
+
+ self.operator = operator
+ self.left = left
+ self.right = right
+
+ def evaluate(self, rows, vars_mapping={}):
+ op1 = self.left.evaluate(rows, vars_mapping)
+ op2 = self.right.evaluate(rows, vars_mapping)
+
+ if self.operator == PLUS:
+ return [a + b for a, b in zip(op1, op2)]
+
+ if self.operator == MINUS:
+ return [a - b for a, b in zip(op1, op2)]
+
+ if self.operator == MULTIPLICATION:
+ return [a * b for a, b in zip(op1, op2)]
+
+ if self.operator == DIVISION:
+ return [a / b for a, b in zip(op1, op2)]
+
+ def has_variables(self):
+ return self.left.has_variables() or self.right.has_variables()
+
+ def validate(self, known_vars):
+ return self.left.validate(known_vars) and self.right.validate(known_vars)
+
+ def __str__(self):
+ return "(" + str(self.left) + " " + str(self.operator) + " " + str(self.right) + ")"
+
+class FuncNode(BaseNode):
+ def __init__(self, arg_nodes, func, position=None):
+ super().__init__(position)
+
+ self.arg_nodes = arg_nodes
+ self.func = func
+
+ def evaluate(self, rows, vars_mapping={}):
+ args_list = [list(map(lambda node: node.evaluate([row], vars_mapping)[0], self.arg_nodes)) for row in rows]
+
+ return [self.func(*args) for args in args_list]
+
+ def has_variables(self):
+ return any(map(lambda n: n.has_variables(), self.arg_nodes))
+
+ def validate(self, known_vars):
+ return all(map(lambda n: n.validate(known_vars), self.arg_nodes))
+
+ def __str__(self):
+ return str(self.func.__name__) + "(" + ", ".join(map(str, self.arg_nodes)) + ")"
+
+class Parser:
+ class ValidationResult:
+ def __init__(self, is_valid=True, error=None):
+ self.is_valid = is_valid
+ self.error = error
+
+ @staticmethod
+ def lt(a, b):
+ return a < b
+
+ def le(a, b):
+ return a <= b
+
+ @staticmethod
+ def eq(a, b):
+ return a == b
+
+ @staticmethod
+ def gt(a, b):
+ return a > b
+
+ @staticmethod
+ def ge(a, b):
+ return a >= b
+
+ @staticmethod
+ def ne(a, b):
+ return a != b
+
+ @staticmethod
+ def logic_and(a, b):
+ return a and b
+
+ @staticmethod
+ def logic_or(a, b):
+ return a or b
+
+ @staticmethod
+ def logic_not(a):
+ return not a
+
+ @staticmethod
+ def logic_if(predicate, true_value, false_value):
+ if predicate:
+ return true_value
+ else:
+ return false_value
+
+ FUNC_VALUES = {
+ # Math
+ "min": min,
+ "max": max,
+ # Comparison
+ "<": lt.__get__(object),
+ "<=": le.__get__(object),
+ "=": eq.__get__(object),
+ ">": gt.__get__(object),
+ ">=": ge.__get__(object),
+ "!=": ne.__get__(object),
+ # Logical
+ "and": logic_and.__get__(object),
+ "or": logic_or.__get__(object),
+ "not": logic_not.__get__(object),
+ "@if": logic_if.__get__(object)
+ }
+
+ CONST_VALUES = {
+ "True": True,
+ "False": False,
+ }
+
+ def __init__(self, str, const_values=None, func_values=None):
+ self.lexer = Lexer(str)
+ self.const_values = const_values or self.CONST_VALUES
+ self.func_values = func_values or self.FUNC_VALUES
+
+ def parse(self):
+ return self.parse_logic_expression()
+
+ def validate(self, force_raise=False, known_vars=[]):
+ try:
+ tree = self.parse()
+ tree.validate(known_vars=known_vars)
+
+ if not self.lexer.done():
+ raise ParserError("is not completely parsed")
+ else:
+ self.lexer.reset()
+ return self.ValidationResult()
+ except (AstError, ValueError) as e:
+ if force_raise:
+ raise e
+ else:
+ position = getattr(e, 'position', None) or self.lexer.offset
+ return self.ValidationResult(False, str(e) + f" at position {position}")
+
+ def parse_logic_expression(self, next_token=True):
+ left = self.parse_comparison(next_token)
+ node = None
+
+ while True:
+ token = self.lexer.curr_token
+
+ if token == OR:
+ func = self.func_values[token]
+ node = FuncNode(func=func, arg_nodes=[node or left, self.parse_logic_expression(next_token)])
+ elif token == AND:
+ func = self.func_values[token]
+ node = FuncNode(func=func, arg_nodes=[node or left, self.parse_comparison(next_token)])
+ else:
+ return node or left
+
+ def parse_comparison(self, next_token):
+ left = self.parse_sum(next_token)
+ node = None
+
+ while True:
+ token = self.lexer.curr_token
+
+ if token in COMPARISON_OPS:
+ if node:
+ left = node
+
+ func = self.func_values[token]
+ node = FuncNode(func=func, arg_nodes=[left, self.parse_sum(next_token)])
+ else:
+ return node or left
+
+ def parse_sum(self, next_token):
+ left = self.parse_product(next_token)
+ node = None
+
+ while True:
+ token = self.lexer.curr_token
+
+ if token == PLUS or token == MINUS:
+ if node:
+ node = OperationNode(operator=token[0], left=node, position=self.lexer.offset)
+ else:
+ node = OperationNode(operator=token[0], left=left, position=self.lexer.offset)
+
+ node.right = self.parse_product(next_token)
+ else:
+ if node:
+ return node
+ else:
+ return left
+
+ def parse_product(self, next_token):
+ left = self.parse_term(next_token)
+ node = None
+
+ while True:
+ token = self.lexer.curr_token
+
+ if token == MULTIPLICATION or token == DIVISION:
+ if node:
+ node = OperationNode(operator=token[0], left=node, position=self.lexer.offset)
+ else:
+ node = OperationNode(operator=token[0], left=left, position=self.lexer.offset)
+
+ node.right = self.parse_term(next_token)
+ else:
+ if node:
+ return node
+ else:
+ return left
+
+ def parse_term(self, next_token):
+ if next_token:
+ token = self.lexer.next_token()
+ else:
+ token = self.lexer.curr_token
+
+ if token == None:
+ raise ParserError(f"unexpected end of expression")
+
+ if len(token) == 0:
+ raise ParserError("invalid token: " + token)
+
+ if is_digit(token[0]) or is_quot(token[0]):
+ self.lexer.next_token()
+ return ConstNode(token, position=self.lexer.offset - len(token))
+
+ if len(token) > 1 and token[0] == DOLLAR and is_digit(token[1]):
+ self.lexer.next_token()
+ return ConstNode(token[1:], position=self.lexer.offset - len(token))
+
+ if is_name(token[0]):
+ func_token = token
+ func = self.func_values.get(token)
+
+ if func:
+ token = self.lexer.next_token()
+ if token == OPENING_BRACKET:
+ arg_nodes = [self.parse_logic_expression()]
+
+ while self.lexer.curr_token == COMMA:
+ arg_nodes.append(self.parse_logic_expression())
+
+ node = FuncNode(arg_nodes=arg_nodes, func=func)
+
+ if self.lexer.curr_token != CLOSING_BRACKET:
+ raise ParserError(") is expected, got:" + token)
+ else:
+ self.lexer.next_token()
+ if len(token) == 0:
+ raise ParserError("invalid token: " + token)
+
+ return node
+
+ return node
+ elif self.lexer.prev_token == NOT:
+ # Do not read next token because it's already read
+ return FuncNode(arg_nodes=[self.parse_logic_expression(False)], func=func)
+ else:
+ raise ParserError("( is expected, got:" + token)
+ else:
+ if token in self.const_values:
+ self.lexer.next_token()
+ return ConstNode(self.const_values.get(token), position=self.lexer.offset - len(token))
+ elif token == NOT:
+ func = self.func_values[token]
+ return FuncNode(func=func, arg_nodes=[self.parse_logic_expression()])
+ else:
+ self.lexer.next_token()
+
+ if self.lexer.curr_token == OPENING_BRACKET:
+ raise ParserError(f"unknown function '{func_token}'")
+ else:
+ return VariableNode(name=token, position=self.lexer.offset - len(token))
+
+ if token == OPENING_BRACKET:
+ node = self.parse_logic_expression()
+ token = self.lexer.curr_token
+
+ if token != CLOSING_BRACKET:
+ raise ParserError(") is expected, got:" + token)
+ else:
+ self.lexer.next_token()
+ if len(token) == 0:
+ raise ParserError("invalid token: " + token)
+
+ return node
+
+ raise ParserError("term is expected, got: " + token)
+
+class Calculator:
+ def __init__(self, revenue=None, investment=None, filter=None, known_vars=[], vars_mapping={}):
+ self.revenue = revenue
+ self.investment = investment
+ self.filter = filter
+ self.known_vars = known_vars + list(vars_mapping.keys())
+ self.vars_mapping = vars_mapping
+
+ self.revenue_ast = self.build_ast(self.revenue)
+ self.investment_ast = self.build_ast(self.investment)
+ self.filter_ast = self.build_ast(self.filter)
+
+ def build_ast(self, expression):
+ if expression:
+ parser = Parser(expression)
+ parser.validate(force_raise=True, known_vars=self.known_vars)
+ return parser.parse()
+
+ def calculate(self, rows):
+ if isinstance(rows, pd.DataFrame):
+ rows = list(map(lambda x: x[1].to_dict(), rows.iterrows()))
+
+ filtered_rows = rows
+
+ if self.filter_ast:
+ filtered_rows = [row for row, marked in zip(rows, self.filter_ast.evaluate(rows, self.vars_mapping)) if marked]
+
+ revenue = self.revenue_ast.evaluate_sum(filtered_rows, self.vars_mapping)
+ investment = self.investment_ast.evaluate_sum(filtered_rows, self.vars_mapping)
+ roi = (revenue - investment) / investment
+
+ return {
+ "count": len(filtered_rows),
+ "filtered_rows": filtered_rows,
+ "revenue": revenue,
+ "investment": investment,
+ "roi": roi,
+ }
diff --git a/a2ml/tasks_queue/tasks_hub_api.py b/a2ml/tasks_queue/tasks_hub_api.py
index 32053331..b4d2cae3 100644
--- a/a2ml/tasks_queue/tasks_hub_api.py
+++ b/a2ml/tasks_queue/tasks_hub_api.py
@@ -485,7 +485,7 @@ def add_external_model_task(params):
target_column=params.get('target_column'),
scoring=params.get('scoring'),
task_type=params.get('task_type'),
- binary_classification=params.get('binary_classification'),
+ binary_classification=params.get('binary_classification', False),
)
@celeryApp.task(ignore_result=True)
@@ -677,3 +677,10 @@ def presign_s3_url_task(params):
expires_in=params.get('expires_in'),
max_content_length=params.get('max_content_length'),
)
+
[email protected](ignore_result=True)
+@process_task_result
+def validate_roi_syntax_task(params):
+ return ModelReview(params).validate_roi_syntax(
+ expressions=params["expressions"],
+ )
| ROI: support predefined formulas with parameters
see https://docs.google.com/presentation/d/14aujTnwaB1d4nroandZgNsx2xF50o6MvQc31brd1eDw/edit#slide=id.gb263dfda1b_0_43
slide 10.
Let's use Excel syntax: =count()
I think it make sense to implement in a2ml, just calculate as metrics
Possible Ruby parsers:
here's a potential formula parser that could be used: https://github.com/rubysolo/dentaku
https://github.com/kschiess/parslet
| 2021-01-28T14:18:36 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-514 | ac76bc978f86d5b9b4cc9344b59acae2103d6c90 | diff --git a/a2ml/__init__.py b/a2ml/__init__.py
index 16aaff5b..35e8e7c4 100644
--- a/a2ml/__init__.py
+++ b/a2ml/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '1.0.01'
+__version__ = '1.0.02'
diff --git a/a2ml/api/auger/impl/cloud/dataset.py b/a2ml/api/auger/impl/cloud/dataset.py
index bab1dd8f..6419bfda 100644
--- a/a2ml/api/auger/impl/cloud/dataset.py
+++ b/a2ml/api/auger/impl/cloud/dataset.py
@@ -194,6 +194,13 @@ def _upload_to_multi_tenant(self, file_to_upload):
'HTTP error [%s] "%s" while uploading file'
' to Auger Cloud...' % (res.status_code, res.content))
- def _get_data_set_name(self, file_name):
- fname, fext = os.path.splitext(file_name)
- return self._get_uniq_object_name(fname, fext)
+ def _get_data_set_name(self, filename):
+ dot_index = filename.find('.')
+ if dot_index>=0:
+ fname = filename[:dot_index]
+ fext = filename[dot_index:]
+ else:
+ fname = filename
+
+ # fname, fext = os.path.splitext(file_name)
+ return self._get_uniq_object_name(fname, "")
diff --git a/a2ml/api/model_review/model_review.py b/a2ml/api/model_review/model_review.py
index 4239b1d7..8cd62058 100644
--- a/a2ml/api/model_review/model_review.py
+++ b/a2ml/api/model_review/model_review.py
@@ -133,7 +133,7 @@ def add_actuals(
missing_features = set(self.original_features) - set(ds_actuals.columns)
if len(missing_features) > 0:
missing_features = ', '.join(sorted(list(missing_features)))
- raise Exception(f'missing features to make prediction: {missing_features}')
+ raise Exception(f'Missing features to make prediction: {missing_features}. Please, provide target \'{self.target_feature}\' or all training features to run predict.')
logging.info("Actual data missing predicted value column: %s. Call predict with features from actual data: %s"%(self.target_feature, ds_actuals.columns))
res = A2ML(ctx).predict(self.model_id, data=ds_actuals.df, provider=provider)
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-01-26T10:31:16 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-513 | 6a5d341a2e3d4d83a3f4cd31e104fbd9b09cf2db | diff --git a/a2ml/api/a2ml.py b/a2ml/api/a2ml.py
index ac207bc0..b9fbf172 100644
--- a/a2ml/api/a2ml.py
+++ b/a2ml/api/a2ml.py
@@ -36,11 +36,16 @@ def import_data(self, source=None):
.. code-block:: yaml
- # Local file name or remote url to the data source file
+ # Local file name, remote url to the data source file or postgres url
source: './dataset.csv'
+ .. code-block:: yaml
+
+ # Postgres url parameters: dbname, tablename, offset(OPTIONAL), limit(OPTIONAL)
+ source: jdbc:postgresql://user:[email protected]:5432/dbname?tablename=table1&offset=0&limit=100
+
Args:
- source (str, optional): Local file name or remote url to the data source file or Pandas DataFrame
+ source (str, optional): Local file name, remote url to the data source file, Pandas DataFrame or postgres url
Returns:
Results for each provider. ::
diff --git a/a2ml/api/a2ml_dataset.py b/a2ml/api/a2ml_dataset.py
index 37286258..1a623e87 100644
--- a/a2ml/api/a2ml_dataset.py
+++ b/a2ml/api/a2ml_dataset.py
@@ -62,7 +62,7 @@ def create(self, source = None):
"""Create a new DataSet for the Project specified in the .yaml.
Args:
- source (str, optional): Local file name or remote url to the data source file or Pandas DataFrame
+ source (str, optional): Local file name, remote url to the data source file, Pandas DataFrame or postgres url
Returns:
Results for each provider. ::
diff --git a/a2ml/api/auger/impl/mparts/deploy.py b/a2ml/api/auger/impl/mparts/deploy.py
index 3647ff1b..f8056755 100644
--- a/a2ml/api/auger/impl/mparts/deploy.py
+++ b/a2ml/api/auger/impl/mparts/deploy.py
@@ -47,7 +47,7 @@ def create_update_review_alert(self, model_id, pipeline_properties=None, paramet
if endpoint_api is None:
endpoint_api = AugerEndpointApi(self.ctx, None,
pipeline_properties['endpoint_pipelines'][0].get('endpoint_id'))
-
+
session_id = endpoint_api.properties().get('primary_experiment_session_id')
if session_id:
AugerExperimentSessionApi(self.ctx, None, None, session_id).update_settings()
@@ -82,13 +82,16 @@ def review(self, model_id):
if retrain_status in error_states:
status = 'error'
error = retrain_status
- else:
- redeploy_status = alert_item.get('action_results', {}).get('redeploy')
- if redeploy_status in error_states:
- status = 'error'
- error = redeploy_status
- elif redeploy_status == 'endpoint_updated' or redeploy_status == 'endpoint_has_better_pipeline':
- status = 'completed'
+ else:
+ if retrain_status == 'external_pipeline_should_be_rebuilt':
+ status = 'retrain'
+ else:
+ redeploy_status = alert_item.get('action_results', {}).get('redeploy')
+ if redeploy_status in error_states:
+ status = 'error'
+ error = redeploy_status
+ elif redeploy_status == 'endpoint_updated' or redeploy_status == 'endpoint_has_better_pipeline':
+ status = 'completed'
elif alert.get('actions') == 'retrain':
retrain_status = alert_item.get('action_results', {}).get('retrain')
if retrain_status in error_states:
diff --git a/a2ml/api/auger/impl/mparts/undeploy.py b/a2ml/api/auger/impl/mparts/undeploy.py
index bf314eff..5fa36bfa 100644
--- a/a2ml/api/auger/impl/mparts/undeploy.py
+++ b/a2ml/api/auger/impl/mparts/undeploy.py
@@ -31,7 +31,7 @@ def execute(self, model_id, locally=False):
if pipeline_api.check_endpoint():
endpoint_api = AugerEndpointApi(self.ctx, None, pipeline_api.object_id)
endpoint_props = endpoint_api.properties()
- endpoint_pipelines = sorted(endpoint_props.get('endpoint_pipelines', []), key=lambda k: k['id'])
+ endpoint_pipelines = sorted(endpoint_props.get('endpoint_pipelines', []), key=lambda k: k['created_at'])
if endpoint_pipelines and endpoint_pipelines[0]['pipeline_id'] == model_id:
self.ctx.log("Undeploy Review endpoint and all models.")
for pipeline in endpoint_pipelines:
diff --git a/a2ml/api/base_a2ml.py b/a2ml/api/base_a2ml.py
index a8ba2e66..7c726a8e 100644
--- a/a2ml/api/base_a2ml.py
+++ b/a2ml/api/base_a2ml.py
@@ -19,7 +19,7 @@ def build_runner(self, ctx, provider, force_local=False):
return ProviderRunner(ctx, provider)
def get_runner(self, locally, model_id=None, provider=None):
- if provider is None and model_id:
+ if provider is None and model_id and not self.ctx.is_external_provider():
provider = self.ctx.get_model_provider(model_id)
if provider:
diff --git a/a2ml/api/model_review/model_helper.py b/a2ml/api/model_review/model_helper.py
index 5d0cd152..3b772ddf 100644
--- a/a2ml/api/model_review/model_helper.py
+++ b/a2ml/api/model_review/model_helper.py
@@ -460,6 +460,7 @@ def update_model_options_file(options_path, options, ds_actuals):
if "targetFeature" in feature_columns:
feature_columns.remove(options["targetFeature"])
ds_actuals.options["featureColumns"] = feature_columns
+ ds_actuals.options["originalFeatureColumns"] = feature_columns
summary = ds_actuals.getSummary()
options = ds_actuals.update_options_by_dataset_statistics(summary["stat_data"])
diff --git a/a2ml/api/utils/__init__.py b/a2ml/api/utils/__init__.py
index 9f429f6d..876bbf2e 100644
--- a/a2ml/api/utils/__init__.py
+++ b/a2ml/api/utils/__init__.py
@@ -59,6 +59,17 @@ def url_encode(path):
return quote(path, safe='#&%:/?*=\'')
+def parse_url(remote_path):
+ try:
+ from urllib.parse import urlparse, parse_qs, quote
+ except ImportError:
+ from urlparse import urlparse, parse_qs, quote
+
+ uri = urlparse(remote_path)
+ params = parse_qs(uri.query)
+
+ return uri.path, params
+
def get_remote_file_info(remote_path):
from urllib.request import urlopen
import urllib
diff --git a/a2ml/api/utils/dataframe.py b/a2ml/api/utils/dataframe.py
index fcec066f..caf7c45b 100644
--- a/a2ml/api/utils/dataframe.py
+++ b/a2ml/api/utils/dataframe.py
@@ -9,7 +9,7 @@
from functools import wraps
-from a2ml.api.utils import fsclient, get_uid, get_uid4, remove_dups_from_list, process_arff_line, download_file, retry_helper
+from a2ml.api.utils import fsclient, get_uid, get_uid4, remove_dups_from_list, process_arff_line, download_file, retry_helper, parse_url
from a2ml.api.utils.local_fsclient import LocalFSClient
@@ -197,14 +197,20 @@ def load(self, features=None, nrows=None):
import psycopg2
from psycopg2.extensions import parse_dsn
path = path.replace('sslfactory=org.postgresql.ssl.NonValidatingFactory&', '')
- ary = path.split('tablename')
- path = ary[0]
- tablename = ary[1]
- dataset_name = tablename
+ path, params = parse_url(path)
- self.dbconn_args = parse_dsn(path[5:])
+ self.dbconn_args = parse_dsn(path)
conn = psycopg2.connect(**self.dbconn_args)
- self.df = pd.read_sql("select * from %s"%tablename, con=conn)
+
+ sql_cmd = "select " + (",".join(features) if features else "*") +" from %s"%params['tablename'][0]
+ if 'limit' in params:
+ sql_cmd += " LIMIT %s"%params['limit'][0]
+
+ if 'offset' in params:
+ sql_cmd += " OFFSET %s"%params['offset'][0]
+
+ logging.info("Read data from remote DB: %s"%sql_cmd)
+ self.df = pd.read_sql(sql_cmd, con=conn)
else:
path, remote_path = self._check_remote_path()
try:
diff --git a/a2ml/cmdl/template/config.yaml b/a2ml/cmdl/template/config.yaml
index b7525079..f400eed1 100644
--- a/a2ml/cmdl/template/config.yaml
+++ b/a2ml/cmdl/template/config.yaml
@@ -5,7 +5,9 @@ name:
providers: auger
# Use Auger Cloud Use Auger Cloud for all providers
use_auger_cloud: true
-# Local file name or remote url to the data source file
+# Local file name, remote url to the data source file or postgres url
+# Postgres url example: jdbc:postgresql://user:[email protected]:5432/dbname?tablename=table1&offset=0&limit=100
+# Postgres url parameters: dbname, tablename, offset(OPTIONAL), limit(OPTIONAL)
source:
# List of columns to be excluded from the training data
exclude:
diff --git a/docs/source/dev/configuration.rst b/docs/source/dev/configuration.rst
index d0ecc45a..e34561f9 100644
--- a/docs/source/dev/configuration.rst
+++ b/docs/source/dev/configuration.rst
@@ -36,7 +36,7 @@ All Providers
* **name** The project name.
* **providers** List of providers: auger, google, azure.
* **use_auger_cloud** Use Auger Cloud for all providers true | false
- * **source** Local file name or remote url to the data source file.
+ * **source** # Local file name, remote url to the data source file or postgres url. Postgres url example: jdbc:postgresql://user:[email protected]:5432/dbname?tablename=table1&offset=0&limit=100. Postgres url parameters: dbname, tablename, offset(OPTIONAL), limit(OPTIONAL)
* **exclude** List of columns to be excluded from the training data.
* **target** Target column name.
* **model_type** Model type: classification|regression|timeseries.
@@ -111,9 +111,9 @@ Auger
* **experiment.allowed_models** A list of model names to search for an experiment.If not specified, then all models supported for the task are used minus any specified in blocked_models
* **Supported models**
- * **Classification** XGBClassifier,LGBMClassifier,SVC,SGDClassifier,AdaBoostClassifier,DecisionTreeClassifier,ExtraTreesClassifier,RandomForestClassifier,GradientBoostingClassifier,CatBoostClassifier
- * **Regression** SVR,XGBRegressor,LGBMRegressor,ElasticNet,SGDRegressor,AdaBoostRegressor,DecisionTreeRegressor,ExtraTreesRegressor,RandomForestRegressor,GradientBoostingRegressor,CatBoostRegressor
- * **Timeseries** SVR,XGBRegressor,LGBMRegressor,ElasticNet,SGDRegressor,AdaBoostRegressor,DecisionTreeRegressor,ExtraTreesRegressor,RandomForestRegressor,GradientBoostingRegressor,CatBoostRegressor,TimeSeriesLSTM,VARXBaseRegressor,DeepTimeSeriesRegressor
+ * **Classification** XGBClassifier, LGBMClassifier, SVC, SGDClassifier, AdaBoostClassifier, DecisionTreeClassifier, ExtraTreesClassifier, RandomForestClassifier, GradientBoostingClassifier, CatBoostClassifier
+ * **Regression** SVR,XGBRegressor, LGBMRegressor, ElasticNet, SGDRegressor, AdaBoostRegressor, DecisionTreeRegressor, ExtraTreesRegressor, RandomForestRegressor, GradientBoostingRegressor, CatBoostRegressor
+ * **Timeseries** SVR,XGBRegressor, LGBMRegressor, ElasticNet, SGDRegressor, AdaBoostRegressor, DecisionTreeRegressor, ExtraTreesRegressor, RandomForestRegressor, GradientBoostingRegressor, CatBoostRegressor, TimeSeriesLSTM, VARXBaseRegressor, DeepTimeSeriesRegressor
* **experiment.estimate_trial_time** Use it if you have a lot of timeouted trials. Set it to True will predict the training time of each individual model to avoid timeouts. Default is False.
* **experiment.trials_per_worker** Use it if you have a lot of failed trials. Set it to value < 8 to give trial fit process more memory. Default is None.
@@ -175,9 +175,9 @@ Azure
* **experiment.allowed_models** A list of model names to search for an experiment.If not specified, then all models supported for the task are used minus any specified in blocked_models
* **Supported models**
- * **Classification** AveragedPerceptronClassifier,BernoulliNaiveBayes,DecisionTree,ExtremeRandomTrees,GradientBoosting,KNN,LightGBM,LinearSVM,LogisticRegression,MultinomialNaiveBayes,SGD,RandomForest,SVM,XGBoostClassifier
- * **Regression** DecisionTree,ElasticNet,ExtremeRandomTrees,FastLinearRegressor,GradientBoosting,KNN,LassoLars,LightGBM,OnlineGradientDescentRegressor,RandomForest,SGD,XGBoostRegressor
- * **Timeseries** AutoArima,Average,Naive,Prophet,SeasonalAverage,SeasonalNaive,TCNForecaster
+ * **Classification** AveragedPerceptronClassifier, BernoulliNaiveBayes, DecisionTree, ExtremeRandomTrees,GradientBoosting, KNN, LightGBM, LinearSVM, LogisticRegression, MultinomialNaiveBayes, SGD, RandomForest, SVM, XGBoostClassifier
+ * **Regression** DecisionTree, ElasticNet, ExtremeRandomTrees, FastLinearRegressor, GradientBoosting, KNN, LassoLars, LightGBM, OnlineGradientDescentRegressor, RandomForest, SGD, XGBoostRegressor
+ * **Timeseries** AutoArima, Average, Naive, Prophet, SeasonalAverage, SeasonalNaive, TCNForecaster
* **cluster.region** Name of cluster region. For example: eastus2
* **cluster.min_nodes** Minimum number of nodes allocated for cluster. Minimum is 0.
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 1d2b4602..dbcf96b6 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -10,6 +10,7 @@
dev/configuration
dev/authentication
dev/quickstart
+ dev/mlram
dev/api
dev/cli
dev/advanced
| support postgres url as data source
| add offset and limit parameters
set limit to some value in case of free account on hub
needs document it | 2021-01-21T11:36:55 | 0.0 | [] | [] |
||
augerai/a2ml | augerai__a2ml-512 | ce0fdad05a62125ec008524d6fa6b0df59e8254e | diff --git a/a2ml/api/a2ml.py b/a2ml/api/a2ml.py
index 93c59340..ac207bc0 100644
--- a/a2ml/api/a2ml.py
+++ b/a2ml/api/a2ml.py
@@ -223,7 +223,7 @@ def predict(self, model_id, filename=None, data=None, columns=None, predicted_at
Args:
model_id(str): The deployed model id you want to use.
filename(str): The file with data to request predictions for.
- data: array of records [[target, actual]] or Pandas DataFrame (target, actual) or dict created with Pandas DataFramme to_dict method
+ data: array of records [[target, actual]] or Pandas DataFrame (target, actual) or dict created with Pandas DataFrame to_dict('list') method
columns(list): list of column names if data is array of records
predicted_at: Predict data date. Use for review of historical data.
threshold(float): For classification models only. This will return class probabilities with response.
@@ -306,7 +306,7 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
Args:
model_id(str): The deployed model id you want to use.
filename(str): The file with data to request predictions for.
- data: array of records [[target, actual]] or Pandas DataFrame (target, actual) or dict created with Pandas DataFramme to_dict method
+ data: array of records [[target, actual]] or Pandas DataFrame (target, actual) or dict created with Pandas DataFrame to_dict('list') method
columns(list): list of column names if data is array of records
actuals_at: Actuals date. Use for review of historical data.
actual_date_column(str): name of column in data which contains actual date
diff --git a/a2ml/api/a2ml_model.py b/a2ml/api/a2ml_model.py
index 7c50e4e4..a79abb71 100644
--- a/a2ml/api/a2ml_model.py
+++ b/a2ml/api/a2ml_model.py
@@ -73,7 +73,7 @@ def predict(self, model_id, filename=None, data=None, columns=None, predicted_at
Args:
model_id(str): The deployed model id you want to use.
filename(str): The file with data to request predictions for.
- data: array of records [[target, actual]] or Pandas DataFrame (target, actual) or dict created with Pandas DataFramme to_dict method
+ data: array of records [[target, actual]] or Pandas DataFrame (target, actual) or dict created with Pandas DataFrame to_dict('list') method
columns(list): list of column names if data is array of records
predicted_at: Predict data date. Use for review of historical data.
threshold(float): For classification models only. This will return class probabilities with response.
@@ -156,7 +156,7 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
Args:
model_id(str): The deployed model id you want to use.
filename(str): The file with data to request predictions for.
- data: array of records [[target, actual]] or Pandas DataFrame (target, actual) or dict created with Pandas DataFramme to_dict method
+ data: array of records [[target, actual]] or Pandas DataFrame (target, actual) or dict created with Pandas DataFrame to_dict('list') method
columns(list): list of column names if data is array of records
actuals_at: Actuals date. Use for review of historical data.
actual_date_column(str): name of column in data which contains actual date
diff --git a/a2ml/api/auger/impl/cloud/pipeline.py b/a2ml/api/auger/impl/cloud/pipeline.py
index fd46eafc..7e289e20 100644
--- a/a2ml/api/auger/impl/cloud/pipeline.py
+++ b/a2ml/api/auger/impl/cloud/pipeline.py
@@ -22,7 +22,8 @@ def create_external(self, review, name, project_id, algorithm, score):
'score_name': self.ctx.config.get('experiment/metric'),
'task_type': self.ctx.config.get('model_type'),
'algorithm_name': algorithm,
- 'score_value': score
+ 'score_value': score,
+ 'binary_classification': self.ctx.config.get('binary_classification')
},
['creating_files', 'packaging', 'deploying'])
diff --git a/a2ml/api/auger/impl/mparts/undeploy.py b/a2ml/api/auger/impl/mparts/undeploy.py
index fd9e844c..bf314eff 100644
--- a/a2ml/api/auger/impl/mparts/undeploy.py
+++ b/a2ml/api/auger/impl/mparts/undeploy.py
@@ -32,7 +32,6 @@ def execute(self, model_id, locally=False):
endpoint_api = AugerEndpointApi(self.ctx, None, pipeline_api.object_id)
endpoint_props = endpoint_api.properties()
endpoint_pipelines = sorted(endpoint_props.get('endpoint_pipelines', []), key=lambda k: k['id'])
- print(endpoint_pipelines)
if endpoint_pipelines and endpoint_pipelines[0]['pipeline_id'] == model_id:
self.ctx.log("Undeploy Review endpoint and all models.")
for pipeline in endpoint_pipelines:
diff --git a/a2ml/api/model_review/model_helper.py b/a2ml/api/model_review/model_helper.py
index 38b7d435..5d0cd152 100644
--- a/a2ml/api/model_review/model_helper.py
+++ b/a2ml/api/model_review/model_helper.py
@@ -437,7 +437,7 @@ def get_train_features(options):
return remove_dups_from_list(selected_cols)
@staticmethod
- def create_model_options_file(options_path, scoring, target_column, task_type):
+ def create_model_options_file(options_path, scoring, target_column, task_type, binary_classification):
options = {}
options["targetFeature"] = target_column
options["task_type"] = task_type
@@ -448,6 +448,7 @@ def create_model_options_file(options_path, scoring, target_column, task_type):
if task_type == "classification":
options["classification"] = True
+ options['binaryClassification'] = True if binary_classification else False
fsclient.write_json_file(options_path, options)
return options
diff --git a/a2ml/api/model_review/model_review.py b/a2ml/api/model_review/model_review.py
index 4cbd397e..4239b1d7 100644
--- a/a2ml/api/model_review/model_review.py
+++ b/a2ml/api/model_review/model_review.py
@@ -87,20 +87,20 @@ def _calculate_roi(self, df_data, predicted_feature=None):
revenue_value = 1.0
#TODO: perform operations
-
+
return (revenue_value-investment_value)/investment_value
- def add_external_model(self, target_column, scoring, task_type):
+ def add_external_model(self, target_column, scoring, task_type, binary_classification):
ModelHelper.create_model_options_file(
options_path=self.options_path,
scoring=scoring,
target_column=target_column,
task_type=task_type,
+ binary_classification = binary_classification,
)
self._load_options()
-
return True
def add_actuals(
@@ -129,7 +129,7 @@ def add_actuals(
actuals_count = ds_actuals.count()
ds_actuals.df.rename(columns={"actual": 'a2ml_actual'}, inplace=True)
- if not self.target_feature in ds_actuals.columns:
+ if provider is not None and not self.target_feature in ds_actuals.columns:
missing_features = set(self.original_features) - set(ds_actuals.columns)
if len(missing_features) > 0:
missing_features = ', '.join(sorted(list(missing_features)))
diff --git a/a2ml/cmdl/template/config.yaml b/a2ml/cmdl/template/config.yaml
index 359a4c73..b7525079 100644
--- a/a2ml/cmdl/template/config.yaml
+++ b/a2ml/cmdl/template/config.yaml
@@ -1,7 +1,7 @@
---
# Overall project name
name:
-# List of providers: auger, azure, google(not supported yet)
+# List of providers: auger, azure, external, google(not supported yet)
providers: auger
# Use Auger Cloud Use Auger Cloud for all providers
use_auger_cloud: true
@@ -13,6 +13,8 @@ exclude:
target:
# Model type: classification|regression|timeseries
model_type: classification
+# Is this binary classification? Used for self-hosted model. For all other models determine automatically based on dataset provided
+#binary_classification: True
# Experiment settings
experiment:
### Metric used to build Model
diff --git a/a2ml/tasks_queue/tasks_hub_api.py b/a2ml/tasks_queue/tasks_hub_api.py
index 21ae439b..32053331 100644
--- a/a2ml/tasks_queue/tasks_hub_api.py
+++ b/a2ml/tasks_queue/tasks_hub_api.py
@@ -485,6 +485,7 @@ def add_external_model_task(params):
target_column=params.get('target_column'),
scoring=params.get('scoring'),
task_type=params.get('task_type'),
+ binary_classification=params.get('binary_classification'),
)
@celeryApp.task(ignore_result=True)
@@ -492,7 +493,6 @@ def add_external_model_task(params):
def undeploy_model_task(params):
ctx = _create_provider_context(params)
provider = params.get('provider', 'auger')
- ctx = _read_hub_experiment_session(ctx, params)
model_id = params.get('hub_info', {}).get('pipeline_id')
if not model_id:
@@ -502,9 +502,7 @@ def undeploy_model_task(params):
raise Exception("undeploy_model_task: hub_info/pipeline_id should be provided.")
ctx.config.set('undeploy/service_only', params.get('service_only', False), provider)
- ctx.config.clean_changes()
res = A2MLModel(ctx).undeploy(model_id = model_id, locally=params.get('locally', False))
- _update_hub_objects(ctx, params.get('provider'), params)
return res
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-01-20T16:48:30 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-506 | c2d440d49b436754d1aadf627917d966633dbfda | diff --git a/a2ml/__init__.py b/a2ml/__init__.py
index edeb2fe8..16aaff5b 100644
--- a/a2ml/__init__.py
+++ b/a2ml/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '0.6.18'
+__version__ = '1.0.01'
diff --git a/a2ml/api/a2ml.py b/a2ml/api/a2ml.py
index 84f6581f..93c59340 100644
--- a/a2ml/api/a2ml.py
+++ b/a2ml/api/a2ml.py
@@ -170,7 +170,7 @@ def evaluate(self, run_id = None):
return self.runner.execute('evaluate', run_id = run_id)
@show_result
- def deploy(self, model_id, locally=False, review=True, provider=None, name=None):
+ def deploy(self, model_id, locally=False, review=True, provider=None, name=None, algorithm=None, score=None):
"""Deploy a model locally or to specified provider(s).
Note:
@@ -183,6 +183,8 @@ def deploy(self, model_id, locally=False, review=True, provider=None, name=None)
review(bool): Should model support review based on actual data. The default is True.
provider (str): The automl provider you wish to run. For example 'auger'. The default is None - use provider defined by model_id or set in costructor.
name (str): Friendly name for the model. Used as name for Review Endpoint
+ algorithm (str): Self-hosted model(external provider) algorithm name.
+ score (float): Self-hosted model(external provider) score.
Returns:
::
@@ -197,17 +199,17 @@ def deploy(self, model_id, locally=False, review=True, provider=None, name=None)
ctx = Context()
a2ml = A2ML(ctx, 'auger, azure')
- a2ml.deploy(model_id='A017AC8EAD094FD')
+ a2ml.deploy(model_id='A017AC8EAD094FD', name='FirstExperiment')
.. code-block:: python
ctx = Context()
a2ml = A2ML(ctx, 'external')
- result = a2ml.deploy(model_id=None, name="My external model.")
+ result = a2ml.deploy(model_id=None, name="My external model.", algorithm='RandomForest', score=0.75)
model_id = result['model_id']
"""
- return self.get_runner(locally, model_id, provider).execute_one_provider('deploy', model_id, locally, review, name)
+ return self.get_runner(locally, model_id, provider).execute_one_provider('deploy', model_id, locally, review, name, algorithm, score)
@show_result
def predict(self, model_id, filename=None, data=None, columns=None, predicted_at=None,
@@ -390,17 +392,21 @@ def review(self, model_id, locally=False, provider=None):
locally(bool): Process review locally.
Returns:
+ status(str): May be : started, error, completed, retrain
+ error(str): Description of error if status='error'
+ accuracy(float): Average accuracy of model(based on used metric) for review sensitivity period(see config.yml)
+
::
{
'result': True,
- 'data': {'status': 'completed', 'error': ''}
+ 'data': {'status': 'completed', 'error': '', 'accuracy': 0.76}
}
Examples:
.. code-block:: python
ctx = Context()
- model = A2ML(ctx).review(model_id='D881079E1ED14FB')
+ result = A2ML(ctx).review(model_id='D881079E1ED14FB')
"""
return self.get_runner(locally, model_id, provider).execute_one_provider('review', model_id)
diff --git a/a2ml/api/a2ml_model.py b/a2ml/api/a2ml_model.py
index 12acf693..7c50e4e4 100644
--- a/a2ml/api/a2ml_model.py
+++ b/a2ml/api/a2ml_model.py
@@ -25,7 +25,7 @@ def __init__(self, ctx, provider=None):
self.local_runner = lambda: self.build_runner(ctx, provider, force_local=True)
@show_result
- def deploy(self, model_id, locally=False, review=True, provider=None, name=None):
+ def deploy(self, model_id, locally=False, review=True, provider=None, name=None, algorithm=None, score=None):
"""Deploy a model locally or to specified provider(s).
Args:
@@ -34,6 +34,8 @@ def deploy(self, model_id, locally=False, review=True, provider=None, name=None)
review(bool): Should model support review based on actual data. The default is True.
provider (str): The automl provider you wish to run. For example 'auger'. The default is None - use provider defined by model_id or set in costructor.
name (str): Friendly name for the model. Used as name for Review Endpoint
+ algorithm (str): Self-hosted model(external provider) algorithm name.
+ score (float): Self-hosted model(external provider) score.
Returns:
::
@@ -47,17 +49,17 @@ def deploy(self, model_id, locally=False, review=True, provider=None, name=None)
.. code-block:: python
ctx = Context()
- model = A2MLModel(ctx).deploy(model_id='D881079E1ED14FB', locally=True)
+ model = A2MLModel(ctx).deploy(model_id='D881079E1ED14FB', name='FirstExperiment')
.. code-block:: python
ctx = Context()
model = A2MLModel(ctx, 'external')
- result = model.deploy(model_id=None, name="My external model.")
+ result = model.deploy(model_id=None, name="My external model.", algorithm='RandomForest', score=0.75)
model_id = result['model_id']
"""
- return self.get_runner(locally, model_id, provider).execute_one_provider('deploy', model_id, locally, review, name)
+ return self.get_runner(locally, model_id, provider).execute_one_provider('deploy', model_id, locally, review, name, algorithm, score)
@show_result
def predict(self, model_id, filename=None, data=None, columns=None, predicted_at=None,
@@ -254,18 +256,22 @@ def review(self, model_id, locally=False, provider=None):
provider (str): The automl provider you wish to run. For example 'auger'. The default is None - use provider defined by model_id or set in costructor.
Returns:
+ status(str): May be : started, error, completed, retrain
+ error(str): Description of error if status='error'
+ accuracy(float): Average accuracy of model(based on used metric) for review sensitivity period(see config.yml)
+
::
{
'result': True,
- 'data': {'status': 'completed', 'error': ''}
+ 'data': {'status': 'completed', 'error': '', 'accuracy': 0.76}
}
Examples:
.. code-block:: python
ctx = Context()
- model = A2MLModel(ctx).review(model_id='D881079E1ED14FB')
+ result = A2MLModel(ctx).review(model_id='D881079E1ED14FB')
"""
return self.get_runner(locally, model_id, provider).execute_one_provider('review', model_id)
diff --git a/a2ml/api/auger/a2ml.py b/a2ml/api/auger/a2ml.py
index 8a4e5f74..015fff4e 100644
--- a/a2ml/api/auger/a2ml.py
+++ b/a2ml/api/auger/a2ml.py
@@ -18,8 +18,8 @@ def train(self):
def evaluate(self, run_id = None):
return AugerExperiment(self.ctx).leaderboard(run_id)
- def deploy(self, model_id, locally=False, review=True, name=None):
- return AugerModel(self.ctx).deploy(model_id, locally, review, name)
+ def deploy(self, model_id, locally=False, review=True, name=None, algorithm=None, score=None):
+ return AugerModel(self.ctx).deploy(model_id, locally, review, name, algorithm, score)
def predict(self, model_id, filename, threshold=None, locally=False, data=None, columns=None, predicted_at=None, output=None):
return AugerModel(self.ctx).predict(
diff --git a/a2ml/api/auger/impl/cloud/dataset.py b/a2ml/api/auger/impl/cloud/dataset.py
index 72743b24..bab1dd8f 100644
--- a/a2ml/api/auger/impl/cloud/dataset.py
+++ b/a2ml/api/auger/impl/cloud/dataset.py
@@ -10,6 +10,7 @@
from .cluster import AugerClusterApi
from .project_file import AugerProjectFileApi
from ..exceptions import AugerException
+from .cluster_task import AugerClusterTaskApi
from a2ml.api.utils import fsclient
from a2ml.api.utils.file_uploader import FileUploader, NewlineProgressPercentage
@@ -144,10 +145,13 @@ def _upload_to_multi_tenant(self, file_to_upload):
res = self.rest_api.call('create_project_file_url', {
'project_id': self.parent_api.object_id,
'file_path': file_path,
- 'file_size': fsclient.get_file_size(file_to_upload)
+ 'file_size': fsclient.get_file_size(file_to_upload),
+ 'async': True
})
-
- if res is None:
+ cluster_task = AugerClusterTaskApi(self.ctx, cluster_task_id=res['id'])
+ cluster_task.wait_for_status(['pending', 'received', 'started', 'retry'])
+ res = cluster_task.properties().get('result')
+ if not res:
raise AugerException(
'Error while uploading file to Auger Cloud...')
diff --git a/a2ml/api/auger/impl/cloud/pipeline.py b/a2ml/api/auger/impl/cloud/pipeline.py
index 5b1e1f53..fd46eafc 100644
--- a/a2ml/api/auger/impl/cloud/pipeline.py
+++ b/a2ml/api/auger/impl/cloud/pipeline.py
@@ -11,16 +11,18 @@ def __init__(self, ctx, experiment_api, pipeline_id=None):
super(AugerPipelineApi, self).__init__(
ctx, experiment_api, None, pipeline_id)
- def create(self, trial_id, review=True):
- return self._call_create({'trial_id': trial_id, 'is_review_model_enabled' : review},
+ def create(self, trial_id, review=True, name=None):
+ return self._call_create({'trial_id': trial_id, 'is_review_model_enabled' : review, 'name': name},
['creating_files', 'packaging', 'deploying'])
- def create_external(self, review, name, project_id):
+ def create_external(self, review, name, project_id, algorithm, score):
return self._call_create({'name': name, 'is_review_model_enabled' : review,
'project_id': project_id,
'target_column': self.ctx.config.get('target'),
- 'scoring': self.ctx.config.get('experiment/metric'),
- 'task_type': self.ctx.config.get('model_type')
+ 'score_name': self.ctx.config.get('experiment/metric'),
+ 'task_type': self.ctx.config.get('model_type'),
+ 'algorithm_name': algorithm,
+ 'score_value': score
},
['creating_files', 'packaging', 'deploying'])
diff --git a/a2ml/api/auger/impl/model.py b/a2ml/api/auger/impl/model.py
index 5eb34ed5..7d250d32 100644
--- a/a2ml/api/auger/impl/model.py
+++ b/a2ml/api/auger/impl/model.py
@@ -18,8 +18,8 @@ def __init__(self, ctx, project):
self.project = project
self.ctx = ctx
- def deploy(self, model_id, locally=False, review=True, name=None):
- return ModelDeploy(self.ctx, self.project).execute(model_id, locally, review, name)
+ def deploy(self, model_id, locally=False, review=True, name=None, algorithm=None, score=None):
+ return ModelDeploy(self.ctx, self.project).execute(model_id, locally, review, name, algorithm, score)
def review_alert(self, model_id, parameters):
return ModelDeploy(self.ctx, self.project).create_update_review_alert(model_id, None, parameters)
diff --git a/a2ml/api/auger/impl/mparts/deploy.py b/a2ml/api/auger/impl/mparts/deploy.py
index 9c383027..3647ff1b 100644
--- a/a2ml/api/auger/impl/mparts/deploy.py
+++ b/a2ml/api/auger/impl/mparts/deploy.py
@@ -21,11 +21,11 @@ def __init__(self, ctx, project):
self.project = project
self.ctx = ctx
- def execute(self, model_id, locally=False, review=True, name=None):
+ def execute(self, model_id, locally=False, review=True, name=None, algorithm=None, score=None):
if locally:
return self.deploy_model_locally(model_id, review, name)
else:
- return self.deploy_model_in_cloud(model_id, review, name)
+ return self.deploy_model_in_cloud(model_id, review, name, algorithm, score)
def create_update_review_alert(self, model_id, pipeline_properties=None, parameters=None, name=None):
if not self.ctx.config.get('review'):
@@ -41,19 +41,23 @@ def create_update_review_alert(self, model_id, pipeline_properties=None, paramet
if not name:
name = fsclient.get_path_base_name(self.ctx.config.get('source'))
endpoint_properties = endpoint_api.create(pipeline_properties.get('id'), name)
- pipeline_properties['endpoint_pipelines']= [endpoint_properties.get('id')]
- else:
- endpoint_api = AugerEndpointApi(self.ctx, None,
- pipeline_properties['endpoint_pipelines'][0].get('endpoint_id'))
+ pipeline_properties['endpoint_pipelines'] = [endpoint_properties.get('id')]
+
+ if pipeline_properties.get('endpoint_pipelines'):
+ if endpoint_api is None:
+ endpoint_api = AugerEndpointApi(self.ctx, None,
+ pipeline_properties['endpoint_pipelines'][0].get('endpoint_id'))
+
session_id = endpoint_api.properties().get('primary_experiment_session_id')
if session_id:
AugerExperimentSessionApi(self.ctx, None, None, session_id).update_settings()
- AugerReviewAlertApi(self.ctx, endpoint_api).create_update(parameters)
+ AugerReviewAlertApi(self.ctx, endpoint_api).create_update(parameters)
+ else:
+ self.ctx.log('Model is not belong to any review endpoint. Skipping ...')
def review(self, model_id):
pipeline_properties = AugerPipelineApi(self.ctx, None, model_id).properties()
-
result = {}
if not pipeline_properties.get('endpoint_pipelines'):
return result
@@ -92,6 +96,8 @@ def review(self, model_id):
error = retrain_status
elif retrain_status == 'experiment_session_done':
status = 'completed'
+ elif retrain_status == 'external_pipeline_should_be_rebuilt':
+ status = 'retrain'
else:
status = 'completed'
@@ -105,16 +111,16 @@ def review(self, model_id):
}
return result
- def deploy_model_in_cloud(self, model_id, review, name):
+ def deploy_model_in_cloud(self, model_id, review, name, algorithm, score):
self.ctx.log('Deploying model %s' % model_id)
if self.ctx.is_external_provider():
pipeline_properties = AugerPipelineApi(
- self.ctx, None).create_external(review, name, self.project.object_id)
+ self.ctx, None).create_external(review, name, self.project.object_id, algorithm, score)
else:
self.project.start()
pipeline_properties = AugerPipelineApi(
- self.ctx, None).create(model_id, review)
+ self.ctx, None).create(model_id, review, name)
if pipeline_properties.get('status') == 'ready':
if review:
diff --git a/a2ml/api/auger/model.py b/a2ml/api/auger/model.py
index bf931f65..516c5c0f 100644
--- a/a2ml/api/auger/model.py
+++ b/a2ml/api/auger/model.py
@@ -15,8 +15,8 @@ def __init__(self, ctx):
@error_handler
@authenticated
@with_project(autocreate=False)
- def deploy(self, project, model_id, locally, review, name):
- model_id = Model(self.ctx, project).deploy(model_id, locally, review, name)
+ def deploy(self, project, model_id, locally, review, name, algorithm, score):
+ model_id = Model(self.ctx, project).deploy(model_id, locally, review, name, algorithm, score)
return {'model_id': model_id}
@error_handler
diff --git a/a2ml/api/model_review/model_review.py b/a2ml/api/model_review/model_review.py
index 3247c5c3..4cbd397e 100644
--- a/a2ml/api/model_review/model_review.py
+++ b/a2ml/api/model_review/model_review.py
@@ -65,8 +65,31 @@ def _do_score_actual(self, df_data, predicted_feature=None):
y_pred, _ = ModelHelper.preprocess_target_ds(self.model_path, ds_predict)
y_true, _ = ModelHelper.preprocess_target_ds(self.model_path, ds_true)
- return ModelHelper.calculate_scores(self.options, y_test=y_true, y_pred=y_pred, raise_main_score=False)
+ res = ModelHelper.calculate_scores(self.options, y_test=y_true, y_pred=y_pred, raise_main_score=False)
+ res['roi'] = self._calculate_roi(df_data, predicted_feature)
+
+ return res
+
+ def _calculate_roi(self, df_data, predicted_feature=None):
+ if not self.params.get('roi'):
+ return 0.0
+
+ data_filter = self.params['roi']['filter']
+ revenue = self.params['roi']['revenue']
+ investment = self.params['roi']['investment']
+
+ #TODO: replace P to target, A to a2ml_actual
+
+ df_filtered = df_data.query(data_filter)
+
+ investment_value = 1.0
+ revenue_value = 1.0
+
+ #TODO: perform operations
+
+ return (revenue_value-investment_value)/investment_value
+
def add_external_model(self, target_column, scoring, task_type):
ModelHelper.create_model_options_file(
options_path=self.options_path,
diff --git a/a2ml/cmdl/commands/cmd_deploy.py b/a2ml/cmdl/commands/cmd_deploy.py
index 58eaafd7..474b1dae 100644
--- a/a2ml/cmdl/commands/cmd_deploy.py
+++ b/a2ml/cmdl/commands/cmd_deploy.py
@@ -13,8 +13,12 @@
help='Do not support model review based on actual data.')
@click.option('--name', '-n', required=False, type=click.STRING,
help='Model friendly name.Used as name for Review Endpoint')
[email protected]('--algorithm', '-a', required=False, type=click.STRING,
+ help='Self-hosted model(external provider) algorithm name.')
[email protected]('--score', '-s', required=False, type=float,
+ help='Self-hosted model(external provider) score.')
@pass_context
-def cmdl(ctx, provider, model_id, locally, no_review, name):
+def cmdl(ctx, provider, model_id, locally, no_review, name, algorithm, score):
"""Deploy trained model."""
ctx.setup_logger(format='')
- A2ML(ctx, provider).deploy(model_id, locally, not no_review, name=name)
+ A2ML(ctx, provider).deploy(model_id, locally, not no_review, name=name, algorithm=algorithm, score=score)
diff --git a/a2ml/cmdl/commands/cmd_model.py b/a2ml/cmdl/commands/cmd_model.py
index e9965325..2a6945c4 100644
--- a/a2ml/cmdl/commands/cmd_model.py
+++ b/a2ml/cmdl/commands/cmd_model.py
@@ -19,10 +19,14 @@ def cmdl(ctx):
help='Cloud AutoML Provider.')
@click.option('--name', '-n', required=False, type=click.STRING,
help='Model friendly name.Used as name for Review Endpoint')
[email protected]('--algorithm', '-a', required=False, type=click.STRING,
+ help='Self-hosted model(external provider) algorithm name.')
[email protected]('--score', '-s', required=False, type=float,
+ help='Self-hosted model(external provider) score.')
@pass_context
def deploy(ctx, provider, model_id, locally, no_review, name):
"""Deploy trained model."""
- A2MLModel(ctx, provider).deploy(model_id, locally, not no_review, name=name)
+ A2MLModel(ctx, provider).deploy(model_id, locally, not no_review, name=name, algorithm=algorithm, score=score)
@click.command('predict', short_help='Predict with deployed model.')
@click.argument('filename', required=True, type=click.STRING)
diff --git a/docs/source/dev/mlram.rst b/docs/source/dev/mlram.rst
new file mode 100644
index 00000000..009bcd38
--- /dev/null
+++ b/docs/source/dev/mlram.rst
@@ -0,0 +1,72 @@
+************
+MLRAM
+************
+
+Auger-hosted model
+===================
+1. Import dataset and Train to get model
+2. Make sure review section is correct in config.yml
+3. Deploy model. It will add model to review section in Auger.ai
+4. Predict and send actuals. See actuals API
+
+Self-hosted model
+===================
+1. Create A2ML application with external provider:
+
+.. code-block:: bash
+
+ $ a2ml new test_app -p external
+
+2. Specify the following parameters in config.yml:
+
+ .. code-block:: YAML
+
+ target: the feature which is the target
+ model_type: Can be regression, classification or timeseries
+
+ experiment:
+ metric: <metric to calculate using actuals>
+
+ review:
+ alert:
+ <See configuration section>
+
+3. Deploy model without model id:
+
+ .. code-block:: python
+
+ ctx = Context()
+ a2ml = A2ML(ctx)
+ result = a2ml.deploy(model_id=None, name="My self-hosted model.", algorithm="RandomForest", score=0.76)
+ model_id = result['model_id']
+
+4. Send actuals:
+
+ .. code-block:: python
+
+ ctx = Context()
+ actual_records = [['predicted_value_1', 'actual_value_1'], ['predicted_value_2', 'actual_value_2']]
+ columns = [target, 'actual']
+
+ A2ML(ctx, "external").actuals('external_model_id', data=actual_records,columns=columns)
+
+To review distribution chart , send training features with target and actuals:
+
+ .. code-block:: python
+
+ ctx = Context()
+ actual_records = [['predicted_value_1', 'actual_value_1', 'value1', 'value2'], ['predicted_value_2', 'actual_value_2', 'value3', 'value4']]
+ columns = [target, 'actual', 'feature1', 'feature2']
+
+ A2ML(ctx, "external").actuals('external_model_id', data=actual_records,columns=columns)
+
+5. Call review to check if model retrain is required:
+
+ .. code-block:: python
+
+ ctx = Context()
+ result = A2ML(ctx).review(model_id='external_model_id')
+ if result['data']['status'] == 'retrain':
+ #Train new model using updated data
+ a2ml.deploy(model_id=None, name="My self-hosted model.", algorithm="RandomForest", score=0.77)
+
diff --git a/setup.py b/setup.py
index 374d981a..22feb59b 100644
--- a/setup.py
+++ b/setup.py
@@ -45,7 +45,8 @@ def run(self):
'smart_open==1.9.0', # version for azure
'jsonpickle',
'websockets',
- 'liac-arff'
+ 'liac-arff',
+ 'xlrd==1.2.0'
]
extras = {
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-01-06T12:12:03 | 0.0 | [] | [] |
|||
jslay88/qbt_migrate | jslay88__qbt_migrate-46 | e11bbf3364736a534baf22c526f010ce0146fa98 | diff --git a/.flake8 b/.flake8
index 43b9644..0f78852 100644
--- a/.flake8
+++ b/.flake8
@@ -1,4 +1,5 @@
[flake8]
+ignore = W503
select = C,E,F,W,B,B950
exclude =
venv
diff --git a/README.md b/README.md
index dce244a..f411023 100644
--- a/README.md
+++ b/README.md
@@ -12,8 +12,7 @@ Install from PyPi using `pip`, or jump to [Examples](#Examples) for Docker
Run the script and follow prompts or use CLI arguments with command `qbt_migrate`
- usage: qbt_migrate [-h] [-e EXISTING_PATH] [-n NEW_PATH] [-t {Windows,Linux,Mac}]
- [-b BT_BACKUP_PATH] [-l {DEBUG,INFO}]
+ usage: qbt_migrate [-h] [-e EXISTING_PATH] [-n NEW_PATH] [-r] [-t {Windows,Linux,Mac}] [-b BT_BACKUP_PATH] [-s] [-l {DEBUG,INFO}]
optional arguments:
-h, --help show this help message and exit
@@ -21,6 +20,7 @@ Run the script and follow prompts or use CLI arguments with command `qbt_migrate
Existing root of path to look for.
-n NEW_PATH, --new-path NEW_PATH
New root path to replace existing root path with.
+ -r, --regex Existing and New paths are regex patterns with capture groups.
-t {Windows,Linux,Mac}, --target-os {Windows,Linux,Mac}
Target OS (converts slashes). Default will auto-detect if conversion is needed based on existing vs new.
-b BT_BACKUP_PATH, --bt-backup-path BT_BACKUP_PATH
@@ -57,6 +57,11 @@ The correct pattern for this would be `-e X: -n /torrents` or `-e X:\ -n /torren
qbt_migrate -e /torrents -n Z:\Torrents -t Windows # Linux/Mac to Windows (converts slashes)
qbt_migrate -e /torrents -n Z:\\Torrents -t Windows # Linux/Mac to Windows (converts slashes) # When running on Linux machine \\ is needed for Windows Paths
+ # Adavanced Usage with RegEx
+ # Example would replace /some/test/with/a/path with /test/matched/path
+ qbt_migrate -r -e /some/(\w+)/.*$ -n \1/path -t Linux # Matches using regex patterns and replaces using capture groups.
+ qbt_migrate --regex -e /some/(\w+)/.*$ -n \1/matched/path -t Linux # Matches using regex patterns and replaces using capture groups.
+
#### Docker
You can also run this tool with Docker if you don't have Python, or don't want to install the package to your system directly.
The BT_backup path is automatically overridden to `/tmp/BT_backup`, so mount your `BT_backup` there.
diff --git a/qbt_migrate/__init__.py b/qbt_migrate/__init__.py
index cf2d954..f3e8f6f 100644
--- a/qbt_migrate/__init__.py
+++ b/qbt_migrate/__init__.py
@@ -6,6 +6,6 @@
from .methods import convert_slashes, discover_bt_backup_path
-__version__ = "2.2.0" + os.getenv("VERSION_TAG", "")
+__version__ = "2.2.1" + os.getenv("VERSION_TAG", "")
logging.getLogger(__name__).addHandler(logging.NullHandler())
diff --git a/qbt_migrate/classes.py b/qbt_migrate/classes.py
index bd4599f..b018cf2 100644
--- a/qbt_migrate/classes.py
+++ b/qbt_migrate/classes.py
@@ -92,10 +92,13 @@ def discover_relevant_fast_resume(
raise e
cls.logger.warning(f"Unable to parse {file}. Skipping!\n\n{e}")
continue
- if existing_path in fast_resume.save_path or existing_path in fast_resume.qbt_save_path:
+ if (fast_resume.save_path is not None and existing_path in fast_resume.save_path) or (
+ fast_resume.qbt_save_path is not None and existing_path in fast_resume.qbt_save_path
+ ):
yield fast_resume
elif regex_path and (
- re.match(existing_path, fast_resume.save_path) or re.match(existing_path, fast_resume.qbt_save_path)
+ (fast_resume.save_path is not None and re.match(existing_path, fast_resume.save_path))
+ or (fast_resume.qbt_save_path is not None and re.match(existing_path, fast_resume.qbt_save_path))
):
yield fast_resume
return
@@ -219,6 +222,8 @@ def replace_paths(
new_save_path = pattern.sub(new_path, self.save_path)
if self.qbt_save_path:
new_qbt_save_path = pattern.sub(new_path, self.qbt_save_path)
+ if not self.save_path:
+ new_save_path = new_qbt_save_path
if self.mapped_files:
self._data["mapped_files"] = [pattern.sub(new_path, path) for path in self.mapped_files]
else:
@@ -226,6 +231,8 @@ def replace_paths(
new_qbt_save_path = (
self.qbt_save_path.replace(existing_path, new_path) if self.qbt_save_path is not None else None
)
+ if not self.save_path:
+ new_save_path = new_qbt_save_path
if self.mapped_files:
self._data["mapped_files"] = [path.replace(existing_path, new_path) for path in self.mapped_files]
self.logger.debug(
diff --git a/qbt_migrate/methods.py b/qbt_migrate/methods.py
index 8beb4fa..54e3092 100644
--- a/qbt_migrate/methods.py
+++ b/qbt_migrate/methods.py
@@ -45,4 +45,4 @@ def discover_bt_backup_path():
return "/config/qBittorrent/BT_backup"
logger.debug("Linux/Mac System")
- return os.path.join(os.getenv("HOME", "/home"), ".local/share/data/qBittorrent/BT_backup")
+ return os.path.join(os.getenv("HOME"), ".local/share/data/qBittorrent/BT_backup")
diff --git a/tox.ini b/tox.ini
index 77cb641..dd809d7 100644
--- a/tox.ini
+++ b/tox.ini
@@ -33,6 +33,10 @@ commands=
lint: {[lint-config]commands}
unittest: {[unittest-config]commands}
+passenv=
+ HOME
+ LOCALAPPDATA
+
[lint-config]
# Configuration necessary to lint Python files.
| Hi I need some help
I already followed the wiki but for some reason it didn't work for me, hope I'm not a bother
My previous folder path is J:\Anime\ I want to change to G:\Anime\
I followed the guide but got this error, also when I reopen qbit nothing changed

Hi I need some help
I already followed the wiki but for some reason it didn't work for me, hope I'm not a bother
My previous folder path is J:\Anime\ I want to change to G:\Anime\
I followed the guide but got this error, also when I reopen qbit nothing changed

| Try it with this branch and report back.
```
pip install -U git+https://github.com/jslay88/qbt_migrate.git@fix-more-missing-qbt-save-path
```
Try it with this branch and report back.
```
pip install -U git+https://github.com/jslay88/qbt_migrate.git@fix-more-missing-qbt-save-path
```
| 2022-04-16T07:42:58 | 0.0 | [] | [] |
||
jslay88/qbt_migrate | jslay88__qbt_migrate-44 | e11bbf3364736a534baf22c526f010ce0146fa98 | diff --git a/.flake8 b/.flake8
index 43b9644..0f78852 100644
--- a/.flake8
+++ b/.flake8
@@ -1,4 +1,5 @@
[flake8]
+ignore = W503
select = C,E,F,W,B,B950
exclude =
venv
diff --git a/qbt_migrate/classes.py b/qbt_migrate/classes.py
index bd4599f..b018cf2 100644
--- a/qbt_migrate/classes.py
+++ b/qbt_migrate/classes.py
@@ -92,10 +92,13 @@ def discover_relevant_fast_resume(
raise e
cls.logger.warning(f"Unable to parse {file}. Skipping!\n\n{e}")
continue
- if existing_path in fast_resume.save_path or existing_path in fast_resume.qbt_save_path:
+ if (fast_resume.save_path is not None and existing_path in fast_resume.save_path) or (
+ fast_resume.qbt_save_path is not None and existing_path in fast_resume.qbt_save_path
+ ):
yield fast_resume
elif regex_path and (
- re.match(existing_path, fast_resume.save_path) or re.match(existing_path, fast_resume.qbt_save_path)
+ (fast_resume.save_path is not None and re.match(existing_path, fast_resume.save_path))
+ or (fast_resume.qbt_save_path is not None and re.match(existing_path, fast_resume.qbt_save_path))
):
yield fast_resume
return
@@ -219,6 +222,8 @@ def replace_paths(
new_save_path = pattern.sub(new_path, self.save_path)
if self.qbt_save_path:
new_qbt_save_path = pattern.sub(new_path, self.qbt_save_path)
+ if not self.save_path:
+ new_save_path = new_qbt_save_path
if self.mapped_files:
self._data["mapped_files"] = [pattern.sub(new_path, path) for path in self.mapped_files]
else:
@@ -226,6 +231,8 @@ def replace_paths(
new_qbt_save_path = (
self.qbt_save_path.replace(existing_path, new_path) if self.qbt_save_path is not None else None
)
+ if not self.save_path:
+ new_save_path = new_qbt_save_path
if self.mapped_files:
self._data["mapped_files"] = [path.replace(existing_path, new_path) for path in self.mapped_files]
self.logger.debug(
diff --git a/qbt_migrate/methods.py b/qbt_migrate/methods.py
index 8beb4fa..54e3092 100644
--- a/qbt_migrate/methods.py
+++ b/qbt_migrate/methods.py
@@ -45,4 +45,4 @@ def discover_bt_backup_path():
return "/config/qBittorrent/BT_backup"
logger.debug("Linux/Mac System")
- return os.path.join(os.getenv("HOME", "/home"), ".local/share/data/qBittorrent/BT_backup")
+ return os.path.join(os.getenv("HOME"), ".local/share/data/qBittorrent/BT_backup")
diff --git a/tox.ini b/tox.ini
index 77cb641..dd809d7 100644
--- a/tox.ini
+++ b/tox.ini
@@ -33,6 +33,10 @@ commands=
lint: {[lint-config]commands}
unittest: {[unittest-config]commands}
+passenv=
+ HOME
+ LOCALAPPDATA
+
[lint-config]
# Configuration necessary to lint Python files.
| Hi I need some help
I already followed the wiki but for some reason it didn't work for me, hope I'm not a bother
My previous folder path is J:\Anime\ I want to change to G:\Anime\
I followed the guide but got this error, also when I reopen qbit nothing changed

| 2022-04-16T07:13:49 | 0.0 | [] | [] |
|||
jslay88/qbt_migrate | jslay88__qbt_migrate-42 | bfda05eb4da49348889ef3e1d5e54a1d94563a7d | diff --git a/.flake8 b/.flake8
index 8ec75f5..43b9644 100644
--- a/.flake8
+++ b/.flake8
@@ -7,4 +7,5 @@ exclude =
*.egg-info
.tox
.git
+ __init__.py
max-line-length = 120
diff --git a/.github/workflows/docker-pr.yml b/.github/workflows/docker-pr.yml
new file mode 100644
index 0000000..f7824b0
--- /dev/null
+++ b/.github/workflows/docker-pr.yml
@@ -0,0 +1,37 @@
+# Builds and Pushes Docker image for PRs with label `pr-docker-build` to GHCR only.
+name: Docker Build and Push - PRs
+
+on:
+ pull_request:
+ types: [ opened, reopened, synchronize, labeled ]
+
+jobs:
+ push:
+ name: Build and Push Docker Image
+ if: ${{ contains(github.event.pull_request.labels.*.name, 'pr-docker-build') }}
+ runs-on: ubuntu-latest
+ permissions:
+ packages: write
+ contents: read
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Build Docker Image
+ run: docker build . --file Dockerfile --tag $(echo "${{ github.repository }}" | sed -e 's,.*/\(.*\),\1,') --label "runnumber=${GITHUB_RUN_ID}" --label "gitsha=${{ github.sha }}"
+
+ - name: GitHub Container Registry Login
+ run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u $ --password-stdin
+
+ - name: Push Docker Image to GHCR
+ run: |
+ IMAGE_NAME=$(echo "${{ github.repository }}" | sed -e 's,.*/\(.*\),\1,')
+ IMAGE_ID=ghcr.io/${{ github.repository }}
+ # Change all uppercase to lowercase
+ IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]')
+ # Lowercase branch name
+ VERSION=$(echo "${{ github.head_ref }}" | tr '[A-Z]' '[a-z]' | sed -e 's,.*/\(.*\),\1,')
+ echo IMAGE_ID=$IMAGE_ID
+ echo VERSION=$VERSION
+ docker tag $IMAGE_NAME $IMAGE_ID:$VERSION
+ docker push $IMAGE_ID:$VERSION
diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml
index 3537947..6145142 100644
--- a/.github/workflows/python.yml
+++ b/.github/workflows/python.yml
@@ -14,7 +14,6 @@ jobs:
toxenv:
- lint
- unittest
- - build
steps:
- uses: actions/checkout@v3
@@ -24,7 +23,6 @@ jobs:
filters: |
python:
- '**.py'
- - 'requirements.txt'
- 'tox.ini'
- name: Set up Python
@@ -67,7 +65,6 @@ jobs:
filters: |
python:
- '**.py'
- - 'requirements.txt'
- 'tox.ini'
- name: Setup Python
@@ -93,3 +90,46 @@ jobs:
if: steps.filter.outputs.python == 'true'
run: |
diff-cover coverage/coverage.xml --compare-branch=origin/master --fail-under=100
+
+ build:
+ runs-on: ubuntu-latest
+ name: Build Python Package and Upload to TestPyPi
+
+ steps:
+ - uses: actions/checkout@v3
+ - uses: dorny/[email protected]
+ id: filter
+ with:
+ filters: |
+ python:
+ - '**.py'
+ - 'pyproject.toml'
+
+ - name: Set up Python
+ uses: actions/[email protected]
+ if: steps.filter.outputs.python == 'true'
+ with:
+ python-version: 3.9
+
+ - name: Install Flit
+ if: steps.filter.outputs.python == 'true'
+ run: |
+ python -m pip install --upgrade pip
+ pip install flit
+
+ - name: Build Python Package and Upload to TestPyPi
+ if: steps.filter.outputs.python == 'true'
+ env:
+ FLIT_USERNAME: __token__
+ FLIT_PASSWORD: ${{ secrets.TESTPYPI_TOKEN }}
+ FLIT_INDEX_URL: https://test.pypi.org/legacy/
+ run: |
+ flit install --symlink --deps production
+ export VERSION_TAG=dev${GITHUB_RUN_ID}; flit publish
+
+ - name: Store Python Package as Artifact
+ uses: actions/[email protected]
+ if: steps.filter.outputs.python == 'true'
+ with:
+ name: dist
+ path: dist/
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 07579c7..4461c2e 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -11,7 +11,7 @@ on:
jobs:
build:
runs-on: ubuntu-latest
- name: Build Python Package
+ name: Build, Upload, and Release
steps:
- uses: actions/checkout@v3
@@ -21,53 +21,19 @@ jobs:
with:
python-version: 3.9
- - name: Install Tox
+ - name: Install Flit
run: |
python -m pip install --upgrade pip
- pip install tox
+ pip install flit
- - name: Build Python Package via Tox
+ - name: Build Python Package and Upload to PyPi
+ env:
+ FLIT_USERNAME: __token__
+ FLIT_PASSWORD: ${{ secrets.PYPI_TOKEN }}
+ FLIT_INDEX_URL: https://upload.pypi.org/legacy/
run: |
- tox -e build
-
- - name: Store Python Package as Artifact
- uses: actions/[email protected]
- with:
- name: dist
- path: dist/
-
- upload:
- runs-on: ubuntu-latest
- name: Upload Package and Create Release
- permissions:
- contents: write
- needs:
- - build
-
- steps:
- - uses: actions/checkout@v3
- with:
- fetch-depth: 0
-
- - name: Setup Python
- uses: actions/[email protected]
- with:
- python-version: 3.9
-
- - name: Install Twine
- run: |
- python -m pip install --upgrade pip
- pip install twine
-
- - name: Retreive Python Package Artifact
- uses: actions/[email protected]
- with:
- name: dist
- path: dist
-
- - name: Upload Package to PyPi
- run: |
- twine upload -u "__token__" -p "${{ secrets.PYPI_TOKEN }}" dist/*
+ flit install --symlink --deps production
+ flit publish
- name: Create GitHub Release with Python Package
uses: softprops/action-gh-release@v1
diff --git a/Dockerfile b/Dockerfile
index 8365713..7d27654 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -2,11 +2,12 @@ FROM python:3-alpine
ENV BT_BACKUP_PATH=/tmp/BT_backup
WORKDIR /opt/qbt_migrate
COPY qbt_migrate qbt_migrate
-COPY requirements.txt requirements.txt
-COPY setup.py setup.py
+COPY pyproject.toml pyproject.toml
COPY LICENSE.md LICENSE.md
COPY README.md README.md
-RUN pip install -e .
+RUN pip install flit
+ENV FLIT_ROOT_INSTALL=1
+RUN flit install --symlink --deps production
RUN printf '#!/bin/ash\nexec qbt_migrate -b $BT_BACKUP_PATH $@' > entrypoint.sh
RUN chmod +x entrypoint.sh
ENTRYPOINT ["./entrypoint.sh"]
diff --git a/LICENSE.md b/LICENSE.md
index 3c0c4e8..ae89c14 100644
--- a/LICENSE.md
+++ b/LICENSE.md
@@ -1,7 +1,7 @@
The MIT License (MIT)
-Copyright (c) 2020 Justin Slay
+Copyright (c) 2022 Justin Slay
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/README.md b/README.md
index c7a414f..dce244a 100644
--- a/README.md
+++ b/README.md
@@ -8,7 +8,7 @@ It can also convert slashes when migrating between Windows and Linux/Mac.
Install from PyPi using `pip`, or jump to [Examples](#Examples) for Docker
- pip install qbt-migrate
+ pip install qbt_migrate
Run the script and follow prompts or use CLI arguments with command `qbt_migrate`
diff --git a/pyproject.toml b/pyproject.toml
index 5c2fb6a..e7fb432 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,3 +1,50 @@
+[build-system]
+requires = ["flit_core>=3.2,<4"]
+build-backend = "flit_core.buildapi"
+
+[project]
+name = "qbt_migrate"
+authors = [
+ {name = "Justin Slay", email = "[email protected]"},
+]
+dependencies = [
+ "bencode.py==4.0.0",
+]
+requires-python = ">=3.8"
+readme = "README.md"
+classifiers = [
+ "Development Status :: 5 - Production/Stable",
+ "Intended Audience :: Developers",
+ "Intended Audience :: End Users/Desktop",
+ "Intended Audience :: Information Technology",
+ "License :: OSI Approved :: MIT License",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Topic :: Communications :: File Sharing",
+ "Topic :: Software Development :: Libraries",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ "Topic :: System :: Recovery Tools",
+ "Typing :: Typed",
+]
+dynamic = [
+ "version",
+ "description",
+]
+
+[project.optional-dependencies]
+test = [
+ "pytest>=2.7.3",
+ "tox"
+]
+
+[project.urls]
+Source = "https://github.com/jslay88/qbt_migrate"
+
+[project.scripts]
+qbt_migrate = "qbt_migrate.cli:main"
+
[tool.black]
line-length = 120
target-version = ['py39']
diff --git a/qbt_migrate/__init__.py b/qbt_migrate/__init__.py
index 54ef8b4..cf2d954 100644
--- a/qbt_migrate/__init__.py
+++ b/qbt_migrate/__init__.py
@@ -1,8 +1,11 @@
-# flake8: noqa F401
+"""qBt Migrate, change the paths of existing torrents in qBittorrent, as well as convert paths to Windows/Linux/Mac"""
import logging
+import os
from .classes import FastResume, QBTBatchMove
from .methods import convert_slashes, discover_bt_backup_path
+__version__ = "2.2.0" + os.getenv("VERSION_TAG", "")
+
logging.getLogger(__name__).addHandler(logging.NullHandler())
diff --git a/qbt_migrate/classes.py b/qbt_migrate/classes.py
index 731467c..bd4599f 100644
--- a/qbt_migrate/classes.py
+++ b/qbt_migrate/classes.py
@@ -1,5 +1,6 @@
import logging
import os
+import re
from datetime import datetime
from threading import Thread
from typing import Optional
@@ -28,6 +29,7 @@ def run(
self,
existing_path: str,
new_path: str,
+ regex_path: bool = False,
target_os: Optional[str] = None,
create_backup: bool = True,
skip_bad_files: bool = False,
@@ -38,6 +40,8 @@ def run(
:type existing_path: str
:param new_path: New Path to replace with
:type new_path: str
+ :param regex_path: Existing and New Paths are regex patterns with capture groups
+ :type regex_path: bool
:param target_os: If targeting a different OS than the source. Must be Windows, Linux, or Mac.
:type target_os: str
:param create_backup: Create a backup archive of the BT_backup directory?
@@ -52,19 +56,27 @@ def run(
backup_folder(self.bt_backup_path, os.path.join(self.bt_backup_path, backup_filename))
self.logger.info(f"Searching for .fastresume files with path {existing_path} ...")
- for fast_resume in self.discover_relevant_fast_resume(self.bt_backup_path, existing_path, not skip_bad_files):
+ for fast_resume in self.discover_relevant_fast_resume(
+ self.bt_backup_path, existing_path, regex_path, not skip_bad_files
+ ):
# Fire and forget
self.discovered_files.add(fast_resume)
- Thread(target=fast_resume.replace_paths, args=[existing_path, new_path, target_os, True, False]).start()
+ Thread(
+ target=fast_resume.replace_paths, args=[existing_path, new_path, regex_path, target_os, True, False]
+ ).start()
@classmethod
- def discover_relevant_fast_resume(cls, bt_backup_path: str, existing_path: str, raise_on_error: bool = True):
+ def discover_relevant_fast_resume(
+ cls, bt_backup_path: str, existing_path: str, regex_path: bool = False, raise_on_error: bool = True
+ ):
"""
Find .fastresume files that contain the existing path.
:param bt_backup_path: Path to BT_backup folder
:type bt_backup_path: str
:param existing_path: The existing path to look for
:type existing_path: str
+ :param regex_path: Existing Path is a regex pattern with capture groups
+ :type: bool
:param raise_on_error: Raise if error parsing .fastresume files
:type raise_on_error: bool
:return: List of FastResume Objects
@@ -82,6 +94,10 @@ def discover_relevant_fast_resume(cls, bt_backup_path: str, existing_path: str,
continue
if existing_path in fast_resume.save_path or existing_path in fast_resume.qbt_save_path:
yield fast_resume
+ elif regex_path and (
+ re.match(existing_path, fast_resume.save_path) or re.match(existing_path, fast_resume.qbt_save_path)
+ ):
+ yield fast_resume
return
@classmethod
@@ -94,13 +110,14 @@ def update_fastresume(
fast_resume: "FastResume",
existing_path: str,
new_path: str,
+ regex_path: bool = False,
target_os: Optional[str] = None,
save_file: bool = True,
create_backup: bool = True,
):
if not isinstance(fast_resume, FastResume):
raise TypeError("Not a FastResume object, cannot replace paths!")
- fast_resume.replace_paths(existing_path, new_path, target_os, save_file, create_backup)
+ fast_resume.replace_paths(existing_path, new_path, regex_path, target_os, save_file, create_backup)
class FastResume(object):
@@ -188,17 +205,29 @@ def replace_paths(
self,
existing_path: str,
new_path: str,
+ regex_path: bool = False,
target_os: Optional[str] = None,
save_file: bool = True,
create_backup: bool = True,
):
self.logger.info(f"Replacing Paths in FastResume {self.file_path}...")
- new_save_path = self.save_path.replace(existing_path, new_path) if self.save_path is not None else None
- new_qbt_save_path = (
- self.qbt_save_path.replace(existing_path, new_path) if self.qbt_save_path is not None else None
- )
- if self.mapped_files:
- self._data["mapped_files"] = [path.replace(existing_path, new_path) for path in self.mapped_files]
+ if regex_path:
+ new_save_path = None
+ new_qbt_save_path = None
+ pattern = re.compile(existing_path)
+ if self.save_path:
+ new_save_path = pattern.sub(new_path, self.save_path)
+ if self.qbt_save_path:
+ new_qbt_save_path = pattern.sub(new_path, self.qbt_save_path)
+ if self.mapped_files:
+ self._data["mapped_files"] = [pattern.sub(new_path, path) for path in self.mapped_files]
+ else:
+ new_save_path = self.save_path.replace(existing_path, new_path) if self.save_path is not None else None
+ new_qbt_save_path = (
+ self.qbt_save_path.replace(existing_path, new_path) if self.qbt_save_path is not None else None
+ )
+ if self.mapped_files:
+ self._data["mapped_files"] = [path.replace(existing_path, new_path) for path in self.mapped_files]
self.logger.debug(
f"Existing Save Path: {existing_path}, New Save Path: {new_path}, " f"Replaced Save Path: {new_save_path}"
)
diff --git a/qbt_migrate/cli.py b/qbt_migrate/cli.py
index cbe418a..39b3bd1 100644
--- a/qbt_migrate/cli.py
+++ b/qbt_migrate/cli.py
@@ -1,5 +1,6 @@
import argparse
import logging
+import sys
from . import QBTBatchMove, discover_bt_backup_path
@@ -7,10 +8,17 @@
logger = logging.getLogger(__name__)
-def parse_args():
+def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--existing-path", help="Existing root of path to look for.")
parser.add_argument("-n", "--new-path", help="New root path to replace existing root path with.")
+ parser.add_argument(
+ "-r",
+ "--regex",
+ help="Existing and New paths are regex patterns with capture groups.",
+ action="store_true",
+ default=None,
+ )
parser.add_argument(
"-t",
"--target-os",
@@ -34,11 +42,11 @@ def parse_args():
"-l", "--log-level", help="Log Level, Default is INFO.", choices=["DEBUG", "INFO"], default="INFO"
)
- return parser.parse_args()
+ return parser.parse_args(args)
def main():
- args = parse_args()
+ args = parse_args(sys.argv[1:])
logging.basicConfig()
logger.setLevel(args.log_level)
logging.getLogger("qbt_migrate").setLevel(args.log_level)
@@ -54,11 +62,32 @@ def main():
args.existing_path = input("Existing Path: ")
if args.new_path is None:
args.new_path = input("New Path: ")
+
+ # Get Valid Regex Input
+ if args.regex is None:
+ while (answer := input("Regex Paths with Capture Groups [y/N]: ").lower().strip()) not in (
+ "y",
+ "yes",
+ "n",
+ "no",
+ "",
+ ):
+ print("Please answer y, n, yes, or no")
+ args.regex = answer.lower().strip() in ["y", "yes"]
+
+ # Get Valid Target OS Input
if args.target_os is None:
- args.target_os = input("Target OS (Windows, Linux, Mac, Blank for auto-detect): ")
- if args.target_os.strip() and args.target_os.strip().lower() not in ("windows", "linux", "mac"):
- raise ValueError(f"Target OS is not valid. Must be Windows, Linux, or Mac. Received:{args.target_os}")
- elif not args.target_os.strip():
+ while (answer := input("Target OS (Windows, Linux, Mac, Blank for auto-detect): ").lower().strip()) not in (
+ "windows",
+ "linux",
+ "mac",
+ "",
+ ):
+ print("Please answer Windows, Linux, or Mac")
+ args.target_os = answer.lower().strip()
+
+ # Handle Target OS Auto-Detect if not specified
+ if not args.target_os.strip():
if "/" in args.existing_path and "\\" in args.new_path:
logger.info("Auto detected target OS change. Will convert slashes to Windows.")
args.target_os = "windows"
@@ -67,12 +96,13 @@ def main():
args.target_os = "linux"
else:
args.target_os = None
+
logger.debug(
f"Existing Path: {args.existing_path}, New Path: {args.new_path}, "
f"Target OS: {args.target_os}, Skip Bad Files: {args.skip_bad_files}"
)
- qbm.run(args.existing_path, args.new_path, args.target_os, True, args.skip_bad_files)
+ qbm.run(args.existing_path, args.new_path, args.regex, args.target_os, True, args.skip_bad_files)
if __name__ == "__main__":
- main()
+ main() # pragma: no cover
diff --git a/qbt_migrate/methods.py b/qbt_migrate/methods.py
index 54e3092..8beb4fa 100644
--- a/qbt_migrate/methods.py
+++ b/qbt_migrate/methods.py
@@ -45,4 +45,4 @@ def discover_bt_backup_path():
return "/config/qBittorrent/BT_backup"
logger.debug("Linux/Mac System")
- return os.path.join(os.getenv("HOME"), ".local/share/data/qBittorrent/BT_backup")
+ return os.path.join(os.getenv("HOME", "/home"), ".local/share/data/qBittorrent/BT_backup")
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index a439528..0000000
--- a/requirements.txt
+++ /dev/null
@@ -1,1 +0,0 @@
-bencode.py==4.0.0
diff --git a/setup.py b/setup.py
deleted file mode 100644
index bdecff1..0000000
--- a/setup.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from setuptools import find_packages, setup
-
-
-with open("README.md", "r") as f:
- long_description = f.read()
-
-
-with open("requirements.txt", "r") as f:
- dependencies = f.read().splitlines()
-
-
-setup(
- name="qbt_migrate",
- version="2.1.6",
- packages=find_packages(),
- install_requires=dependencies,
- description="Migrate qBittorrent FastResume files.",
- long_description=long_description,
- long_description_content_type="text/markdown",
- author="jslay88",
- url="https://github.com/jslay88/qbt_migrate",
- entry_points={"console_scripts": ["qbt_migrate = qbt_migrate.cli:main"]},
- python_requires=">=3.6.4",
-)
diff --git a/tox.ini b/tox.ini
index ed23a3b..77cb641 100644
--- a/tox.ini
+++ b/tox.ini
@@ -28,12 +28,10 @@ basepython=python3.9
deps=
lint: {[lint-config]deps}
unittest: {[unittest-config]deps}
- build: {[build-config]deps}
commands=
lint: {[lint-config]commands}
unittest: {[unittest-config]commands}
- build: {[build-config]commands}
[lint-config]
@@ -62,7 +60,7 @@ commands=
#
# To run unit tests.
deps=
- -r{toxinidir}/requirements.txt
+ bencode.py==4.0.0
pytest
coverage
commands=
@@ -71,14 +69,6 @@ commands=
coverage xml
-[build-config]
-deps=
- setuptools
- wheel
-commands=
- python setup.py sdist bdist_wheel
-
-
[testenv:lint]
# Invoke with: tox -e lint
recreate=False
@@ -101,7 +91,7 @@ commands={[unittest-config]commands}
# branch. This will fail if the coverage of changes is below 100%. This env
# requires a coverage file and should only be run after unittest
deps=
- -r{toxinidir}/requirements.txt
+ bencode.py==4.0.0
pytest
coverage
diff-cover
| [Feature Request] Make it possible to change only part of the path.
For example, if I am migrating my qBitorrent session but destination Linux install has different username, then the entire path remains the same but only the username changes.
Since I have a lot of sub-directories, changing the full path would not be ideal.
A regex option to only replace certain pattern matching string with another string would be amazing. This way I coud cutomize it to replace username in my case and I am sure other people coud find other uses for the same.
| See if this suits your needs. #29
```
pip install git+https://github.com/jslay88/qbt_migrate.git@regex-support
``` | 2022-04-16T00:33:52 | 0.0 | [] | [] |
||
jslay88/qbt_migrate | jslay88__qbt_migrate-33 | 8a9e84d73c2333f73d916d8098af589b24a888dc | diff --git a/qbt_migrate/classes.py b/qbt_migrate/classes.py
index 4409b45..c631478 100644
--- a/qbt_migrate/classes.py
+++ b/qbt_migrate/classes.py
@@ -151,7 +151,7 @@ def set_save_path(
self.save(self.backup_filename)
if target_os is not None:
path = convert_slashes(path, target_os)
- self.logger.debug(f"Setting {key}... Old: {self._data[key]}, New: {path}, Target OS: {target_os}")
+ self.logger.debug(f"Setting {key}... Old: {self._data.get(key, None)}, New: {path}, Target OS: {target_os}")
self._data[key] = path
if save_file:
self.save()
| KeyError at self._data[key] (classes.py@154)
Running the script I found the following error: `qBt-savePath` was not a key of `self._data` and it resulted in a KeyError.
To avoid it, I commented line 154 of [classes.py](https://github.com/jslay88/qbt_migrate/blob/master/qbt_migrate/classes.py):
`self.logger.debug(f"Setting {key}... Old: {self._data[key]}, New: {path}, Target OS: {target_os}")`
Btw, I can't thank you enough for your work! I have more than 100 torrents that I was moving from Windows to Linux and this tool was extremely useful
| 2022-04-15T06:00:24 | 0.0 | [] | [] |
|||
itt-ustutt/num-dual | itt-ustutt__num-dual-59 | b0846575309e48c9e5fdb4db5942a83ab58f0720 | diff --git a/Cargo.toml b/Cargo.toml
index 6ce203c..fab0df5 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -19,6 +19,8 @@ nalgebra = "0.32"
pyo3 = { version = "0.18", optional = true, features = ["multiple-pymethods"] }
ndarray = { version = "0.15", optional = true }
numpy = { version = "0.18", optional = true }
+approx = "0.5"
+simba = "0.8"
[profile.release]
lto = true
@@ -30,7 +32,6 @@ linalg = ["ndarray"]
[dev-dependencies]
criterion = "0.4"
-approx = "0.5"
[[bench]]
name = "benchmark"
diff --git a/src/derivative.rs b/src/derivative.rs
index cb3e3b2..7824362 100644
--- a/src/derivative.rs
+++ b/src/derivative.rs
@@ -4,7 +4,9 @@ use nalgebra::constraint::{SameNumberOfRows, ShapeConstraint};
use nalgebra::*;
use std::fmt;
use std::marker::PhantomData;
+use std::mem::MaybeUninit;
use std::ops::{Add, AddAssign, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign};
+use num_traits::Zero;
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct Derivative<T: DualNum<F>, F, R: Dim, C: Dim>(
@@ -35,6 +37,84 @@ where
Self::new(None)
}
+ pub(crate) fn map<T2, F2>(&self, mut f: impl FnMut(T) -> T2) -> Derivative<T2, F2, R, C>
+ where
+ T2: DualNum<F2>,
+ DefaultAllocator: Allocator<T2, R, C>,
+ {
+ let opt = self.0.as_ref().map(move |eps| eps.map(|e| f(e)));
+ Derivative::new(opt)
+ }
+
+ // A version of map that doesn't clone values before mapping. Useful for the SimdValue impl,
+ // which would be redundantly cloning all the lanes of each epsilon value before extracting
+ // just one of them.
+ //
+ // To implement, we inline a copy of Matrix::map, which implicitly clones values, and remove
+ // the cloning.
+ pub(crate) fn map_borrowed<T2, F2>(
+ &self,
+ mut f: impl FnMut(&T) -> T2,
+ ) -> Derivative<T2, F2, R, C>
+ where
+ T2: DualNum<F2>,
+ DefaultAllocator: Allocator<T2, R, C>,
+ {
+ let opt = self.0.as_ref().map(move |eps| {
+ let ref this = eps;
+ let mut f = |e| f(e);
+ let (nrows, ncols) = this.shape_generic();
+ let mut res: Matrix<MaybeUninit<T2>, R, C, _> = Matrix::uninit(nrows, ncols);
+
+ for j in 0..ncols.value() {
+ for i in 0..nrows.value() {
+ // Safety: all indices are in range.
+ unsafe {
+ let a = this.data.get_unchecked(i, j);
+ *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a));
+ }
+ }
+ }
+
+ // Safety: res is now fully initialized.
+ unsafe { res.assume_init() }
+ });
+ Derivative::new(opt)
+ }
+
+ /// Same but bails out if the closure returns None
+ pub(crate) fn try_map_borrowed<T2, F2>(
+ &self,
+ mut f: impl FnMut(&T) -> Option<T2>,
+ ) -> Option<Derivative<T2, F2, R, C>>
+ where
+ T2: DualNum<F2>,
+ DefaultAllocator: Allocator<T2, R, C>,
+ {
+ self.0
+ .as_ref()
+ .and_then(move |eps| {
+ let ref this = eps;
+ let mut f = |e| f(e);
+ let (nrows, ncols) = this.shape_generic();
+ let mut res: Matrix<MaybeUninit<T2>, R, C, _> = Matrix::uninit(nrows, ncols);
+
+ for j in 0..ncols.value() {
+ for i in 0..nrows.value() {
+ // Safety: all indices are in range.
+ unsafe {
+ let a = this.data.get_unchecked(i, j);
+ *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a)?);
+ }
+ }
+ }
+
+ // Safety: res is now fully initialized.
+ Some(unsafe { res.assume_init() })
+ })
+ .map(Derivative::some)
+ }
+
pub fn derivative_generic(r: R, c: C, i: usize) -> Self {
let mut m = OMatrix::zeros_generic(r, c);
m[i] = T::one();
@@ -304,3 +384,167 @@ where
}
}
}
+
+impl<T, R: Dim, C: Dim> nalgebra::SimdValue for Derivative<T, T::Element, R, C>
+where
+ DefaultAllocator: Allocator<T, R, C> + Allocator<T::Element, R, C>,
+ T: DualNum<T::Element> + SimdValue + Scalar,
+ T::Element: DualNum<T::Element> + Scalar + Zero,
+{
+ type Element = Derivative<T::Element, T::Element, R, C>;
+
+ type SimdBool = T::SimdBool;
+
+ #[inline]
+ fn lanes() -> usize {
+ T::lanes()
+ }
+
+ #[inline]
+ fn splat(val: Self::Element) -> Self {
+ val.map(|e| T::splat(e))
+ }
+
+ #[inline]
+ fn extract(&self, i: usize) -> Self::Element {
+ self.map_borrowed(|e| T::extract(e, i))
+ }
+
+ #[inline]
+ unsafe fn extract_unchecked(&self, i: usize) -> Self::Element {
+ let opt = self
+ .map_borrowed(|e| T::extract_unchecked(e, i))
+ .0
+ // Now check it's all zeros.
+ // Unfortunately there is no way to use the vectorized version of `is_zero`, which is
+ // only for matrices with statically known dimensions. Specialization would be
+ // required.
+ .filter(|x| Iterator::any(&mut x.iter(), |e| !e.is_zero()));
+ Derivative::new(opt)
+ }
+
+ // SIMD code will expect to be able to replace one lane with another Self::Element,
+ // even with a None Derivative, e.g.
+ //
+ // let single = Derivative::none();
+ // let mut x4 = Derivative::splat(single);
+ // let one = Derivative::some(...);
+ // x4.replace(1, one);
+ //
+ // So the implementation of `replace` will need to auto-upgrade to Some(zeros) in
+ // order to satisfy requests like that.
+ fn replace(&mut self, i: usize, val: Self::Element) {
+ match (&mut self.0, val.0) {
+ (Some(ours), Some(theirs)) => {
+ ours.zip_apply(&theirs, |e, replacement| e.replace(i, replacement));
+ }
+ (ours @ None, Some(theirs)) => {
+ let (r, c) = theirs.shape_generic();
+ let mut init: OMatrix<T, R, C> = OMatrix::zeros_generic(r, c);
+ init.zip_apply(&theirs, |e, replacement| e.replace(i, replacement));
+ *ours = Some(init);
+ }
+ (Some(ours), None) => {
+ ours.apply(|e| e.replace(i, T::Element::zero()));
+ }
+ _ => {}
+ }
+ }
+
+ unsafe fn replace_unchecked(&mut self, i: usize, val: Self::Element) {
+ match (&mut self.0, val.0) {
+ (Some(ours), Some(theirs)) => {
+ ours.zip_apply(&theirs, |e, replacement| {
+ e.replace_unchecked(i, replacement)
+ });
+ }
+ (ours @ None, Some(theirs)) => {
+ let (r, c) = theirs.shape_generic();
+ let mut init: OMatrix<T, R, C> = OMatrix::zeros_generic(r, c);
+ init.zip_apply(&theirs, |e, replacement| {
+ e.replace_unchecked(i, replacement)
+ });
+ *ours = Some(init);
+ }
+ (Some(ours), None) => {
+ ours.apply(|e| e.replace_unchecked(i, T::Element::zero()));
+ }
+ _ => {}
+ }
+ }
+
+ fn select(mut self, cond: Self::SimdBool, other: Self) -> Self {
+ // If cond is mixed, then we may need to generate big zero matrices to do the
+ // component-wise select on. So check if cond is all-true or all-first to avoid that.
+ if cond.all() {
+ self
+ } else if cond.none() {
+ other
+ } else {
+ match (&mut self.0, other.0) {
+ (Some(ours), Some(theirs)) => {
+ ours.zip_apply(&theirs, |e, other_e| {
+ // this will probably get optimized out
+ let e_ = std::mem::replace(e, T::zero());
+ *e = e_.select(cond, other_e)
+ });
+ self
+ }
+ (Some(ours), None) => {
+ ours.apply(|e| {
+ // this will probably get optimized out
+ let e_ = std::mem::replace(e, T::zero());
+ *e = e_.select(cond, T::zero());
+ });
+ self
+ }
+ (ours @ None, Some(mut theirs)) => {
+ use std::ops::Not;
+ let inverted: T::SimdBool = cond.not();
+ theirs.apply(|e| {
+ // this will probably get optimized out
+ let e_ = std::mem::replace(e, T::zero());
+ *e = e_.select(inverted, T::zero());
+ });
+ *ours = Some(theirs);
+ self
+ }
+ _ => self,
+ }
+ }
+ }
+}
+
+use simba::scalar::{SubsetOf, SupersetOf};
+
+impl<TSuper, FSuper, T, F, R: Dim, C: Dim> SubsetOf<Derivative<TSuper, FSuper, R, C>>
+ for Derivative<T, F, R, C>
+where
+ TSuper: DualNum<FSuper> + SupersetOf<T>,
+ T: DualNum<F>,
+ DefaultAllocator: Allocator<T, R, C>,
+ DefaultAllocator: Allocator<TSuper, R, C>,
+ // DefaultAllocator: Allocator<TSuper, D>
+ // + Allocator<TSuper, U1, D>
+ // + Allocator<TSuper, D, U1>
+ // + Allocator<TSuper, D, D>,
+{
+ #[inline(always)]
+ fn to_superset(&self) -> Derivative<TSuper, FSuper, R, C> {
+ self.map_borrowed(|elem| TSuper::from_subset(elem))
+ }
+ #[inline(always)]
+ fn from_superset(element: &Derivative<TSuper, FSuper, R, C>) -> Option<Self> {
+ element.try_map_borrowed(|elem| TSuper::to_subset(elem))
+ }
+ #[inline(always)]
+ fn from_superset_unchecked(element: &Derivative<TSuper, FSuper, R, C>) -> Self {
+ element.map_borrowed(|elem| TSuper::to_subset_unchecked(elem))
+ }
+ #[inline(always)]
+ fn is_in_subset(element: &Derivative<TSuper, FSuper, R, C>) -> bool {
+ element.0.as_ref().map_or(true, |matrix| {
+ matrix.iter().all(|elem| TSuper::is_in_subset(elem))
+ })
+ }
+}
diff --git a/src/dual.rs b/src/dual.rs
index 6ddd347..dcaf877 100644
--- a/src/dual.rs
+++ b/src/dual.rs
@@ -1,4 +1,5 @@
use crate::{Derivative, DualNum, DualNumFloat};
+use approx::{AbsDiffEq, RelativeEq, UlpsEq};
use nalgebra::allocator::Allocator;
use nalgebra::*;
use num_traits::{Float, FloatConst, FromPrimitive, Inv, Num, One, Signed, Zero};
@@ -11,7 +12,7 @@ use std::ops::{
};
/// A dual number for the calculations of gradients or Jacobians.
-#[derive(PartialEq, Eq, Clone, Debug)]
+#[derive(Clone, Debug)]
pub struct DualVec<T: DualNum<F>, F, D: Dim>
where
DefaultAllocator: Allocator<T, D>,
@@ -283,3 +284,727 @@ where
impl_first_derivatives!(DualVec, [eps], [D]);
impl_dual!(DualVec, [eps], [D]);
+
+/**
+ * The SimdValue trait is for rearranging data into a form more suitable for Simd,
+ * and rearranging it back into a usable form. It is not documented particularly well.
+ *
+ * The primary job of this SimdValue impl is to allow people to use `simba::simd::f32x4` etc,
+ * instead of f32/f64. Those types implement nalgebra::SimdRealField/ComplexField, so they
+ * behave like scalars. When we use them, we would have `DualVec<f32x4, f32, N>` etc, with our
+ * F parameter set to <T as SimdValue>::Element. We will need to be able to split up that type
+ * into four of DualVec in order to get out of simd-land. That's what the SimdValue trait is for.
+ *
+ * Ultimately, someone will have to to implement SimdRealField on DualVec and call the
+ * simd_ functions of <T as SimdRealField>. That's future work for someone who finds
+ * num_dual is not fast enough.
+ *
+ * Unfortunately, doing anything with SIMD is blocked on
+ * <https://github.com/dimforge/simba/issues/44>.
+ *
+ */
+impl<T, D: Dim> nalgebra::SimdValue for DualVec<T, T::Element, D>
+where
+ DefaultAllocator: Allocator<T, D> + Allocator<T::Element, D>,
+ T: DualNum<T::Element> + SimdValue + Scalar,
+ T::Element: DualNum<T::Element> + Scalar,
+{
+ // Say T = simba::f32x4. T::Element is f32. T::SimdBool is AutoSimd<[bool; 4]>.
+ // AutoSimd<[f32; 4]> stores an actual [f32; 4], i.e. four floats in one slot.
+ // So our DualVec<AutoSimd<[f32; 4], f32, N> has 4 * (1+N) floats in it, stored in blocks of
+ // four. When we want to do any math on it but ignore its f32x4 storage mode, we need to break
+ // that type into FOUR of DualVec<f32, f32, N>; then we do math on it, then we bring it back
+ // together.
+ //
+ // Hence this definition of Element:
+ type Element = DualVec<T::Element, T::Element, D>;
+ type SimdBool = T::SimdBool;
+
+ #[inline]
+ fn lanes() -> usize {
+ T::lanes()
+ }
+
+ #[inline]
+ fn splat(val: Self::Element) -> Self {
+ // Need to make `lanes` copies of each of:
+ // - the real part
+ // - each of the N epsilon parts
+ let re = T::splat(val.re);
+ let eps = Derivative::splat(val.eps);
+ Self {
+ re,
+ eps,
+ f: PhantomData,
+ }
+ }
+
+ #[inline]
+ fn extract(&self, i: usize) -> Self::Element {
+ let re = self.re.extract(i);
+ let eps = self.eps.extract(i);
+ Self::Element {
+ re,
+ eps,
+ f: PhantomData,
+ }
+ }
+
+ #[inline]
+ unsafe fn extract_unchecked(&self, i: usize) -> Self::Element {
+ let re = self.re.extract_unchecked(i);
+ let eps = self.eps.extract_unchecked(i);
+ Self::Element {
+ re,
+ eps,
+ f: PhantomData,
+ }
+ }
+
+ #[inline]
+ fn replace(&mut self, i: usize, val: Self::Element) {
+ self.re.replace(i, val.re);
+ self.eps.replace(i, val.eps);
+ }
+
+ #[inline]
+ unsafe fn replace_unchecked(&mut self, i: usize, val: Self::Element) {
+ self.re.replace_unchecked(i, val.re);
+ self.eps.replace_unchecked(i, val.eps);
+ }
+
+ #[inline]
+ fn select(self, cond: Self::SimdBool, other: Self) -> Self {
+ let re = self.re.select(cond, other.re);
+ let eps = self.eps.select(cond, other.eps);
+ Self {
+ re,
+ eps,
+ f: PhantomData,
+ }
+ }
+}
+
+/// Comparisons are only made based on the real part. This allows the code to follow the
+/// same execution path as real-valued code would.
+impl<T: DualNum<F> + PartialEq, F: Float, D: Dim> PartialEq for DualVec<T, F, D>
+where
+ DefaultAllocator: Allocator<T, D>,
+{
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.re.eq(&other.re)
+ }
+}
+/// Like PartialEq, comparisons are only made based on the real part. This allows the code to follow the
+/// same execution path as real-valued code would.
+impl<T: DualNum<F> + PartialOrd, F: Float, D: Dim> PartialOrd for DualVec<T, F, D>
+where
+ DefaultAllocator: Allocator<T, D>,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+ self.re.partial_cmp(&other.re)
+ }
+}
+/// Like PartialEq, comparisons are only made based on the real part. This allows the code to follow the
+/// same execution path as real-valued code would.
+impl<T: DualNum<F> + approx::AbsDiffEq<Epsilon = T>, F: Float, D: Dim> approx::AbsDiffEq
+ for DualVec<T, F, D>
+where
+ DefaultAllocator: Allocator<T, D>,
+{
+ type Epsilon = Self;
+ #[inline]
+ fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {
+ self.re.abs_diff_eq(&other.re, epsilon.re)
+ }
+
+ #[inline]
+ fn default_epsilon() -> Self::Epsilon {
+ Self::from_re(T::default_epsilon())
+ }
+}
+/// Like PartialEq, comparisons are only made based on the real part. This allows the code to follow the
+/// same execution path as real-valued code would.
+impl<T: DualNum<F> + approx::RelativeEq<Epsilon = T>, F: Float, D: Dim> approx::RelativeEq
+ for DualVec<T, F, D>
+where
+ DefaultAllocator: Allocator<T, D>,
+{
+ #[inline]
+ fn default_max_relative() -> Self::Epsilon {
+ Self::from_re(T::default_max_relative())
+ }
+
+ #[inline]
+ fn relative_eq(
+ &self,
+ other: &Self,
+ epsilon: Self::Epsilon,
+ max_relative: Self::Epsilon,
+ ) -> bool {
+ self.re.relative_eq(&other.re, epsilon.re, max_relative.re)
+ }
+}
+impl<T: DualNum<F> + UlpsEq<Epsilon = T>, F: Float, D: Dim> UlpsEq for DualVec<T, F, D>
+where
+ DefaultAllocator: Allocator<T, D>,
+{
+ #[inline]
+ fn default_max_ulps() -> u32 {
+ T::default_max_ulps()
+ }
+
+ #[inline]
+ fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
+ T::ulps_eq(&self.re, &other.re, epsilon.re, max_ulps)
+ }
+}
+
+impl<T, D: Dim> nalgebra::Field for DualVec<T, T::Element, D>
+where
+ T: DualNum<T::Element> + SimdValue,
+ T::Element: DualNum<T::Element> + Scalar + Float,
+ DefaultAllocator:
+ Allocator<T, D> + Allocator<T, U1, D> + Allocator<T, D, U1> + Allocator<T, D, D>,
+ DefaultAllocator: Allocator<T::Element, D>
+ + Allocator<T::Element, U1, D>
+ + Allocator<T::Element, D, U1>
+ + Allocator<T::Element, D, D>,
+{
+}
+
+use simba::scalar::{SubsetOf, SupersetOf};
+
+impl<TSuper, FSuper, T, F, D: Dim> SubsetOf<DualVec<TSuper, FSuper, D>> for DualVec<T, F, D>
+where
+ TSuper: DualNum<FSuper> + SupersetOf<T>,
+ T: DualNum<F>,
+ DefaultAllocator:
+ Allocator<T, D> + Allocator<T, U1, D> + Allocator<T, D, U1> + Allocator<T, D, D>,
+ DefaultAllocator: Allocator<TSuper, D>
+ + Allocator<TSuper, U1, D>
+ + Allocator<TSuper, D, U1>
+ + Allocator<TSuper, D, D>,
+{
+ #[inline(always)]
+ fn to_superset(&self) -> DualVec<TSuper, FSuper, D> {
+ let re = TSuper::from_subset(&self.re);
+ let eps = Derivative::from_subset(&self.eps);
+ DualVec {
+ re,
+ eps,
+ f: PhantomData,
+ }
+ }
+ #[inline(always)]
+ fn from_superset(element: &DualVec<TSuper, FSuper, D>) -> Option<Self> {
+ let re = TSuper::to_subset(&element.re)?;
+ let eps = Derivative::to_subset(&element.eps)?;
+ Some(Self {
+ re,
+ eps,
+ f: PhantomData,
+ })
+ }
+ #[inline(always)]
+ fn from_superset_unchecked(element: &DualVec<TSuper, FSuper, D>) -> Self {
+ let re = TSuper::to_subset_unchecked(&element.re);
+ let eps = Derivative::to_subset_unchecked(&element.eps);
+ Self {
+ re,
+ eps,
+ f: PhantomData,
+ }
+ }
+ #[inline(always)]
+ fn is_in_subset(element: &DualVec<TSuper, FSuper, D>) -> bool {
+ TSuper::is_in_subset(&element.re)
+ && <Derivative<_, _, _, _> as SupersetOf<Derivative<_, _, _, _>>>::is_in_subset(
+ &element.eps,
+ )
+ }
+}
+
+impl<TSuper, FSuper, D: Dim> SupersetOf<f32> for DualVec<TSuper, FSuper, D>
+where
+ TSuper: DualNum<FSuper> + SupersetOf<f32>,
+ DefaultAllocator: Allocator<TSuper, D>
+ + Allocator<TSuper, U1, D>
+ + Allocator<TSuper, D, U1>
+ + Allocator<TSuper, D, D>,
+{
+ #[inline(always)]
+ fn is_in_subset(&self) -> bool {
+ self.re.is_in_subset()
+ }
+
+ #[inline(always)]
+ fn to_subset_unchecked(&self) -> f32 {
+ self.re.to_subset_unchecked()
+ }
+
+ #[inline(always)]
+ fn from_subset(element: &f32) -> Self {
+ // Interpret as a purely real number
+ let re = TSuper::from_subset(element);
+ let eps = Derivative::none();
+ Self {
+ re,
+ eps,
+ f: PhantomData,
+ }
+ }
+}
+
+impl<TSuper, FSuper, D: Dim> SupersetOf<f64> for DualVec<TSuper, FSuper, D>
+where
+ TSuper: DualNum<FSuper> + SupersetOf<f64>,
+ DefaultAllocator: Allocator<TSuper, D>
+ + Allocator<TSuper, U1, D>
+ + Allocator<TSuper, D, U1>
+ + Allocator<TSuper, D, D>,
+{
+ #[inline(always)]
+ fn is_in_subset(&self) -> bool {
+ self.re.is_in_subset()
+ }
+
+ #[inline(always)]
+ fn to_subset_unchecked(&self) -> f64 {
+ self.re.to_subset_unchecked()
+ }
+
+ #[inline(always)]
+ fn from_subset(element: &f64) -> Self {
+ // Interpret as a purely real number
+ let re = TSuper::from_subset(element);
+ let eps = Derivative::none();
+ Self {
+ re,
+ eps,
+ f: PhantomData,
+ }
+ }
+}
+
+// We can't do a simd implementation until simba lets us implement SimdPartialOrd
+// using _T_'s SimdBool. The blanket impl gets in the way. So we must constrain
+// T to SimdValue<Element = T, SimdBool = bool>, which is basically the same as
+// saying f32 or f64 only.
+//
+// Limitation of simba. See https://github.com/dimforge/simba/issues/44
+
+use nalgebra::{ComplexField, RealField};
+// This impl is modelled on `impl ComplexField for f32`. The imaginary part is nothing.
+impl<T, D: Dim> ComplexField for DualVec<T, T::Element, D>
+where
+ T: DualNum<T::Element> + SupersetOf<T> + AbsDiffEq<Epsilon = T> + Sync + Send,
+ T::Element: DualNum<T::Element> + Scalar + DualNumFloat + Sync + Send,
+ T: SupersetOf<T::Element>,
+ T: SupersetOf<f64>,
+ T: SimdPartialOrd + PartialOrd,
+ T: SimdValue<Element = T, SimdBool = bool>,
+ T: RelativeEq + UlpsEq + AbsDiffEq,
+ DefaultAllocator:
+ Allocator<T, D> + Allocator<T, U1, D> + Allocator<T, D, U1> + Allocator<T, D, D>,
+ <DefaultAllocator as Allocator<T, D>>::Buffer: Sync + Send,
+{
+ type RealField = Self;
+
+ #[inline]
+ fn from_real(re: Self::RealField) -> Self {
+ re
+ }
+
+ #[inline]
+ fn real(self) -> Self::RealField {
+ self
+ }
+
+ #[inline]
+ fn imaginary(self) -> Self::RealField {
+ Self::zero()
+ }
+
+ #[inline]
+ fn modulus(self) -> Self::RealField {
+ self.abs()
+ }
+
+ #[inline]
+ fn modulus_squared(self) -> Self::RealField {
+ &self * &self
+ }
+
+ #[inline]
+ fn argument(self) -> Self::RealField {
+ Self::zero()
+ }
+
+ #[inline]
+ fn norm1(self) -> Self::RealField {
+ self.abs()
+ }
+
+ #[inline]
+ fn scale(self, factor: Self::RealField) -> Self {
+ self * factor
+ }
+
+ #[inline]
+ fn unscale(self, factor: Self::RealField) -> Self {
+ self / factor
+ }
+
+ #[inline]
+ fn floor(self) -> Self {
+ panic!("called floor() on a dual number")
+ }
+
+ #[inline]
+ fn ceil(self) -> Self {
+ panic!("called ceil() on a dual number")
+ }
+
+ #[inline]
+ fn round(self) -> Self {
+ panic!("called round() on a dual number")
+ }
+
+ #[inline]
+ fn trunc(self) -> Self {
+ panic!("called trunc() on a dual number")
+ }
+
+ #[inline]
+ fn fract(self) -> Self {
+ panic!("called fract() on a dual number")
+ }
+
+ #[inline]
+ fn mul_add(self, a: Self, b: Self) -> Self {
+ DualNum::mul_add(&self, a, b)
+ }
+
+ #[inline]
+ fn abs(self) -> Self::RealField {
+ Signed::abs(&self)
+ }
+
+ #[inline]
+ fn hypot(self, other: Self) -> Self::RealField {
+ let sum_sq = self.powi(2) + other.powi(2);
+ DualNum::sqrt(&sum_sq)
+ }
+
+ #[inline]
+ fn recip(self) -> Self {
+ DualNum::recip(&self)
+ }
+
+ #[inline]
+ fn conjugate(self) -> Self {
+ self
+ }
+
+ #[inline]
+ fn sin(self) -> Self {
+ DualNum::sin(&self)
+ }
+
+ #[inline]
+ fn cos(self) -> Self {
+ DualNum::cos(&self)
+ }
+
+ #[inline]
+ fn sin_cos(self) -> (Self, Self) {
+ DualNum::sin_cos(&self)
+ }
+
+ #[inline]
+ fn tan(self) -> Self {
+ DualNum::tan(&self)
+ }
+
+ #[inline]
+ fn asin(self) -> Self {
+ DualNum::asin(&self)
+ }
+
+ #[inline]
+ fn acos(self) -> Self {
+ DualNum::acos(&self)
+ }
+
+ #[inline]
+ fn atan(self) -> Self {
+ DualNum::atan(&self)
+ }
+
+ #[inline]
+ fn sinh(self) -> Self {
+ DualNum::sinh(&self)
+ }
+
+ #[inline]
+ fn cosh(self) -> Self {
+ DualNum::cosh(&self)
+ }
+
+ #[inline]
+ fn tanh(self) -> Self {
+ DualNum::tanh(&self)
+ }
+
+ #[inline]
+ fn asinh(self) -> Self {
+ DualNum::asinh(&self)
+ }
+
+ #[inline]
+ fn acosh(self) -> Self {
+ DualNum::acosh(&self)
+ }
+
+ #[inline]
+ fn atanh(self) -> Self {
+ DualNum::atanh(&self)
+ }
+
+ #[inline]
+ fn log(self, base: Self::RealField) -> Self {
+ DualNum::ln(&self) / DualNum::ln(&base)
+ }
+
+ #[inline]
+ fn log2(self) -> Self {
+ DualNum::log2(&self)
+ }
+
+ #[inline]
+ fn log10(self) -> Self {
+ DualNum::log10(&self)
+ }
+
+ #[inline]
+ fn ln(self) -> Self {
+ DualNum::ln(&self)
+ }
+
+ #[inline]
+ fn ln_1p(self) -> Self {
+ DualNum::ln_1p(&self)
+ }
+
+ #[inline]
+ fn sqrt(self) -> Self {
+ DualNum::sqrt(&self)
+ }
+
+ #[inline]
+ fn exp(self) -> Self {
+ DualNum::exp(&self)
+ }
+
+ #[inline]
+ fn exp2(self) -> Self {
+ DualNum::exp2(&self)
+ }
+
+ #[inline]
+ fn exp_m1(self) -> Self {
+ DualNum::exp_m1(&self)
+ }
+
+ #[inline]
+ fn powi(self, n: i32) -> Self {
+ DualNum::powi(&self, n)
+ }
+
+ #[inline]
+ fn powf(self, n: Self::RealField) -> Self {
+ // n could be a dual.
+ DualNum::powd(&self, n)
+ }
+
+ #[inline]
+ fn powc(self, n: Self) -> Self {
+ // same as powf, Self isn't complex
+ self.powf(n)
+ }
+
+ #[inline]
+ fn cbrt(self) -> Self {
+ DualNum::cbrt(&self)
+ }
+
+ #[inline]
+ fn is_finite(&self) -> bool {
+ self.re.is_finite()
+ }
+
+ #[inline]
+ fn try_sqrt(self) -> Option<Self> {
+ if self > Self::zero() {
+ Some(DualNum::sqrt(&self))
+ } else {
+ None
+ }
+ }
+}
+
+impl<T, D: Dim> RealField for DualVec<T, T::Element, D>
+where
+ T: DualNum<T::Element> + SupersetOf<T> + Sync + Send,
+ T::Element: DualNum<T::Element> + Scalar + DualNumFloat,
+ T: SupersetOf<T::Element>,
+ T: SupersetOf<f64>,
+ T: SimdPartialOrd + PartialOrd,
+ T: RelativeEq + AbsDiffEq<Epsilon = T>,
+ T: SimdValue<Element = T, SimdBool = bool>,
+ T: UlpsEq,
+ T: AbsDiffEq,
+ DefaultAllocator:
+ Allocator<T, D> + Allocator<T, U1, D> + Allocator<T, D, U1> + Allocator<T, D, D>,
+ <DefaultAllocator as Allocator<T, D>>::Buffer: Sync + Send,
+{
+ #[inline]
+ fn copysign(self, _sign: Self) -> Self {
+ todo!("copysign not yet implemented on dual numbers")
+ }
+
+ #[inline]
+ fn atan2(self, _other: Self) -> Self {
+ todo!("atan2 not yet implemented on dual numbers")
+ }
+
+ #[inline]
+ fn pi() -> Self {
+ Self::from_re(<T as FloatConst>::PI())
+ }
+
+ #[inline]
+ fn two_pi() -> Self {
+ Self::from_re(<T as FloatConst>::TAU())
+ }
+
+ #[inline]
+ fn frac_pi_2() -> Self {
+ Self::from_re(<T as FloatConst>::FRAC_PI_4())
+ }
+
+ #[inline]
+ fn frac_pi_3() -> Self {
+ Self::from_re(<T as FloatConst>::FRAC_PI_3())
+ }
+
+ #[inline]
+ fn frac_pi_4() -> Self {
+ Self::from_re(<T as FloatConst>::FRAC_PI_4())
+ }
+
+ #[inline]
+ fn frac_pi_6() -> Self {
+ Self::from_re(<T as FloatConst>::FRAC_PI_6())
+ }
+
+ #[inline]
+ fn frac_pi_8() -> Self {
+ Self::from_re(<T as FloatConst>::FRAC_PI_8())
+ }
+
+ #[inline]
+ fn frac_1_pi() -> Self {
+ Self::from_re(<T as FloatConst>::FRAC_1_PI())
+ }
+
+ #[inline]
+ fn frac_2_pi() -> Self {
+ Self::from_re(<T as FloatConst>::FRAC_2_PI())
+ }
+
+ #[inline]
+ fn frac_2_sqrt_pi() -> Self {
+ Self::from_re(<T as FloatConst>::FRAC_2_SQRT_PI())
+ }
+
+ #[inline]
+ fn e() -> Self {
+ Self::from_re(<T as FloatConst>::E())
+ }
+
+ #[inline]
+ fn log2_e() -> Self {
+ Self::from_re(<T as FloatConst>::LOG2_E())
+ }
+
+ #[inline]
+ fn log10_e() -> Self {
+ Self::from_re(<T as FloatConst>::LOG10_E())
+ }
+
+ #[inline]
+ fn ln_2() -> Self {
+ Self::from_re(<T as FloatConst>::LN_2())
+ }
+
+ #[inline]
+ fn ln_10() -> Self {
+ Self::from_re(<T as FloatConst>::LN_10())
+ }
+
+ #[inline]
+ fn is_sign_positive(&self) -> bool {
+ self.re.is_sign_positive()
+ }
+
+ #[inline]
+ fn is_sign_negative(&self) -> bool {
+ self.re.is_sign_negative()
+ }
+
+ /// Got to be careful using this, because it throws away the derivatives of the one not chosen
+ #[inline]
+ fn max(self, other: Self) -> Self {
+ if other > self {
+ other
+ } else {
+ self
+ }
+ }
+
+ /// Got to be careful using this, because it throws away the derivatives of the one not chosen
+ #[inline]
+ fn min(self, other: Self) -> Self {
+ if other < self {
+ other
+ } else {
+ self
+ }
+ }
+
+ /// If the min/max values are constants and the clamping has an effect, you lose your gradients.
+ #[inline]
+ fn clamp(self, min: Self, max: Self) -> Self {
+ if self < min {
+ min
+ } else if self > max {
+ max
+ } else {
+ self
+ }
+ }
+
+ #[inline]
+ fn min_value() -> Option<Self> {
+ Some(Self::from_re(T::min_value()))
+ }
+
+ #[inline]
+ fn max_value() -> Option<Self> {
+ Some(Self::from_re(T::max_value()))
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
index ccbf725..fc73411 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -42,7 +42,7 @@
#![warn(clippy::all)]
#![allow(clippy::needless_range_loop)]
-use num_traits::{Float, FromPrimitive, Inv, NumAssignOps, NumOps, Signed};
+use num_traits::{Float, FloatConst, FromPrimitive, Inv, NumAssignOps, NumOps, Signed};
use std::fmt;
use std::iter::{Product, Sum};
@@ -212,11 +212,11 @@ pub trait DualNum<F>:
/// The underlying data type of individual derivatives. Usually f32 or f64.
pub trait DualNumFloat:
- Float + FromPrimitive + Signed + fmt::Display + fmt::Debug + Sync + Send + 'static
+ Float + FloatConst + FromPrimitive + Signed + fmt::Display + fmt::Debug + Sync + Send + 'static
{
}
impl<T> DualNumFloat for T where
- T: Float + FromPrimitive + Signed + fmt::Display + fmt::Debug + Sync + Send + 'static
+ T: Float + FloatConst + FromPrimitive + Signed + fmt::Display + fmt::Debug + Sync + Send + 'static
{
}
diff --git a/src/macros.rs b/src/macros.rs
index 1f73583..d6baf87 100644
--- a/src/macros.rs
+++ b/src/macros.rs
@@ -557,66 +557,82 @@ macro_rules! impl_float_const {
$($(DefaultAllocator: Allocator<T, $dim> + Allocator<T, U1, $dim> + Allocator<T, $dim, $dim>,)*
DefaultAllocator: Allocator<T$(, $dim)*>)?
{
+ #[allow(non_snake_case)]
fn E() -> Self {
Self::from(F::E())
}
+ #[allow(non_snake_case)]
fn FRAC_1_PI() -> Self {
Self::from(F::FRAC_1_PI())
}
+ #[allow(non_snake_case)]
fn FRAC_1_SQRT_2() -> Self {
Self::from(F::FRAC_1_SQRT_2())
}
+ #[allow(non_snake_case)]
fn FRAC_2_PI() -> Self {
Self::from(F::FRAC_2_PI())
}
+ #[allow(non_snake_case)]
fn FRAC_2_SQRT_PI() -> Self {
Self::from(F::FRAC_2_SQRT_PI())
}
+ #[allow(non_snake_case)]
fn FRAC_PI_2() -> Self {
Self::from(F::FRAC_PI_2())
}
+ #[allow(non_snake_case)]
fn FRAC_PI_3() -> Self {
Self::from(F::FRAC_PI_3())
}
+ #[allow(non_snake_case)]
fn FRAC_PI_4() -> Self {
Self::from(F::FRAC_PI_4())
}
+ #[allow(non_snake_case)]
fn FRAC_PI_6() -> Self {
Self::from(F::FRAC_PI_6())
}
+ #[allow(non_snake_case)]
fn FRAC_PI_8() -> Self {
Self::from(F::FRAC_PI_8())
}
+ #[allow(non_snake_case)]
fn LN_10() -> Self {
Self::from(F::LN_10())
}
+ #[allow(non_snake_case)]
fn LN_2() -> Self {
Self::from(F::LN_2())
}
+ #[allow(non_snake_case)]
fn LOG10_E() -> Self {
Self::from(F::LOG10_E())
}
+ #[allow(non_snake_case)]
fn LOG2_E() -> Self {
Self::from(F::LOG2_E())
}
+ #[allow(non_snake_case)]
fn PI() -> Self {
Self::from(F::PI())
}
+ #[allow(non_snake_case)]
fn SQRT_2() -> Self {
Self::from(F::SQRT_2())
}
| Compatibility of 'DualNum' trait with `nalgebra`
By implementing [nalgebra::ComplexField](https://docs.rs/nalgebra/latest/nalgebra/trait.ComplexField.html) for dual numbers (analogous to the implementation for `f64`) the full linear algebra capabilities of nalgebra (in particular eigenvalues) can be used with arbitrary generalized (hyper-) dual numbers. Currently this is only possible with first order dual numbers.
In this context it is worth reconsidering whether the `Float` trait should also be part of the supertraits of `DualNum`.
If this is implemented, the `linalg` module can be discarded in favor of the well established functionalities in `nalgebra`.
| The required traits for using the eigenvalue function in nalgebra are so unfathomably restrictive (e.g., including `SimdValue`) that it is unlikely that they will ever be implemented for structs other than `f32` and `f64`. Therefore, it is unlikely that we can completely get rid of the eigenvalue algorithm in `num-dual`. | 2023-04-21T10:32:38 | 0.0 | [] | [] |
||
Bluetooth-Devices/oralb-ble | Bluetooth-Devices__oralb-ble-29 | e65ecc69ea11d96e52ece1c0ffaaa2887df7612c | diff --git a/src/oralb_ble/parser.py b/src/oralb_ble/parser.py
index ab19ea7..8b70d79 100644
--- a/src/oralb_ble/parser.py
+++ b/src/oralb_ble/parser.py
@@ -41,7 +41,7 @@ class Models(Enum):
TriumphV2 = auto()
IOSeries8 = auto()
IOSeries9 = auto()
- IOSeries78 = auto()
+ IOSeries67 = auto()
IOSeries4 = auto()
SmartSeries4000 = auto()
SmartSeries6000 = auto()
@@ -80,6 +80,7 @@ class ModelDescription:
3: "whiten",
4: "intense",
5: "super sensitive",
+ 6: "tongue cleaning",
8: "settings",
}
@@ -87,8 +88,8 @@ class ModelDescription:
DEVICE_TYPES = {
Models.Pro6000: ModelDescription("Pro 6000", SMART_SERIES_MODES),
Models.TriumphV2: ModelDescription("Triumph V2", SMART_SERIES_MODES),
- Models.IOSeries78: ModelDescription(
- device_type="IO Series 7/8",
+ Models.IOSeries67: ModelDescription(
+ device_type="IO Series 6/7",
modes=IO_SERIES_MODES,
),
Models.IOSeries8: ModelDescription(
@@ -165,7 +166,7 @@ class ModelDescription:
BYTES_TO_MODEL = {
- b"\x062k": Models.IOSeries78,
+ b"\x062k": Models.IOSeries67,
b"\x074\x0c": Models.IOSeries4,
b"\x03V\x04": Models.SmartSeries4000,
b"\x04'\r": Models.SmartSeries6000,
| IO 8 reports as Smart Series 7000
We have an IO 8 but it reports in HA as Smart Series 7000. Most attributes work with some success but I think its not quite right.
Output from brushing this morning:
`2022-11-09 06:26:14.716 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x08r\x00\x00\x00\x07\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-96) connectable: True match: set() rssi: -96
2022-11-09 06:26:44.648 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x00\x0f\x00\x01\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-95) connectable: True match: set() rssi: -95
2022-11-09 06:27:03.151 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x00"\x00\x02\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-97) connectable: True match: set() rssi: -97
2022-11-09 06:27:03.998 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x00#\x00\x02\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-97) connectable: True match: set() rssi: -97
2022-11-09 06:27:06.224 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x00%\x00\x02\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-94) connectable: True match: set() rssi: -94
2022-11-09 06:27:17.698 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x000\x00\x02\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-93) connectable: True match: set() rssi: -93
2022-11-09 06:27:19.332 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x002\x00\x02\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-98) connectable: True match: set() rssi: -98
2022-11-09 06:27:24.362 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x007\x00\x02\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-97) connectable: True match: set() rssi: -97
2022-11-09 06:27:43.190 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x01\x0e\x00\x03\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-95) connectable: True match: set() rssi: -95
2022-11-09 06:27:44.013 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x01\x0f\x00\x03\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-93) connectable: True match: set() rssi: -93
2022-11-09 06:27:46.067 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x01\x11\x00\x03\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-99) connectable: True match: set() rssi: -99
2022-11-09 06:27:48.328 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x01\x13\x00\x03\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-99) connectable: True match: set() rssi: -99
2022-11-09 06:27:49.127 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x01\x14\x00\x03\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-96) connectable: True match: set() rssi: -96
2022-11-09 06:28:15.864 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] Oral-B Toothbrush (F8:8A:5E:E1:DB:58): Switching from esp32-bluetooth-proxy-c814ac[True] to esp32-bluetooth-proxy-cb54fc[True] (time elapsed:26.735744516015984 > stale seconds:25.111759460996836)
2022-11-09 06:28:15.865 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-cb54fc: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x01/\x00\x04\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-94) connectable: True match: set() rssi: -94
2022-11-09 06:28:18.624 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-cb54fc: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x011\x00\x04\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-96) connectable: True match: set() rssi: -96
2022-11-09 06:28:18.827 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-cb54fc: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x012\x00\x04\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-98) connectable: True match: set() rssi: -98
2022-11-09 06:28:40.537 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-cb54fc: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x02\x0b\x00\x07\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-98) connectable: True match: set() rssi: -98
2022-11-09 06:29:02.244 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-cb54fc: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x02\x1e\x00*\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-98) connectable: True match: set() rssi: -98
2022-11-09 06:29:40.784 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-cb54fc: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x02r\x025\x002\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-99) connectable: True match: set() rssi: -99
2022-11-09 06:29:48.841 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] Oral-B Toothbrush (F8:8A:5E:E1:DB:58): Switching from esp32-bluetooth-proxy-cb54fc[True] to esp32-bluetooth-proxy-c814ac[True] (new rssi:-93 - threshold:5 > old rssi:-99)
2022-11-09 06:29:59.283 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] Oral-B Toothbrush (F8:8A:5E:E1:DB:58): Switching from esp32-bluetooth-proxy-c814ac[True] to esp32-bluetooth-proxy-cb54fc[True] (time elapsed:1.2080062919994816 > stale seconds:1.1120059869717807)`
| Thanks for the `AdvertisementData`
@bdraco if you need any more logging to be done for different features, just tag me, this was just from a standard clean, so aware there are menu options etc to go through and maybe config for the pressure?
It was using the mapping for Smart Series instead of IO Series so it should just work with the right mapping now | 2023-01-07T18:47:19 | 0.0 | [] | [] |
||
Bluetooth-Devices/oralb-ble | Bluetooth-Devices__oralb-ble-25 | 584a3fe620c9bc95f90a9d54b4fb3cd7436a001b | diff --git a/src/oralb_ble/parser.py b/src/oralb_ble/parser.py
index b4781f1..eade1ec 100644
--- a/src/oralb_ble/parser.py
+++ b/src/oralb_ble/parser.py
@@ -39,6 +39,7 @@ class Models(Enum):
Pro6000 = auto()
TriumphV2 = auto()
+ IOSeries8 = auto()
IOSeries9 = auto()
IOSeries78 = auto()
IOSeries4 = auto()
@@ -90,6 +91,10 @@ class ModelDescription:
device_type="IO Series 7/8",
modes=IO_SERIES_MODES,
),
+ Models.IOSeries8: ModelDescription(
+ device_type="IO Series 8",
+ modes=IO_SERIES_MODES,
+ ),
Models.IOSeries9: ModelDescription(
device_type="IO Series 9",
modes=IO_SERIES_MODES,
@@ -166,6 +171,7 @@ class ModelDescription:
b"\x04'\r": Models.SmartSeries6000,
b'\x03"\x0c': Models.SmartSeries8000,
b"\x03!\x0c": Models.SmartSeries9000,
+ b"\x061\x19": Models.IOSeries8,
b"\x061\x16": Models.IOSeries9,
b"\x02\x02\x06": Models.TriumphV2,
b"\x01\x02\x05": Models.Pro6000,
| IO 8 reports as Smart Series 7000
We have an IO 8 but it reports in HA as Smart Series 7000. Most attributes work with some success but I think its not quite right.
Output from brushing this morning:
`2022-11-09 06:26:14.716 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x08r\x00\x00\x00\x07\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-96) connectable: True match: set() rssi: -96
2022-11-09 06:26:44.648 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x00\x0f\x00\x01\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-95) connectable: True match: set() rssi: -95
2022-11-09 06:27:03.151 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x00"\x00\x02\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-97) connectable: True match: set() rssi: -97
2022-11-09 06:27:03.998 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x00#\x00\x02\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-97) connectable: True match: set() rssi: -97
2022-11-09 06:27:06.224 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x00%\x00\x02\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-94) connectable: True match: set() rssi: -94
2022-11-09 06:27:17.698 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x000\x00\x02\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-93) connectable: True match: set() rssi: -93
2022-11-09 06:27:19.332 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x002\x00\x02\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-98) connectable: True match: set() rssi: -98
2022-11-09 06:27:24.362 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x007\x00\x02\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-97) connectable: True match: set() rssi: -97
2022-11-09 06:27:43.190 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x01\x0e\x00\x03\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-95) connectable: True match: set() rssi: -95
2022-11-09 06:27:44.013 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x01\x0f\x00\x03\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-93) connectable: True match: set() rssi: -93
2022-11-09 06:27:46.067 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x01\x11\x00\x03\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-99) connectable: True match: set() rssi: -99
2022-11-09 06:27:48.328 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x01\x13\x00\x03\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-99) connectable: True match: set() rssi: -99
2022-11-09 06:27:49.127 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-c814ac: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x01\x14\x00\x03\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-96) connectable: True match: set() rssi: -96
2022-11-09 06:28:15.864 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] Oral-B Toothbrush (F8:8A:5E:E1:DB:58): Switching from esp32-bluetooth-proxy-c814ac[True] to esp32-bluetooth-proxy-cb54fc[True] (time elapsed:26.735744516015984 > stale seconds:25.111759460996836)
2022-11-09 06:28:15.865 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-cb54fc: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x01/\x00\x04\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-94) connectable: True match: set() rssi: -94
2022-11-09 06:28:18.624 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-cb54fc: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x011\x00\x04\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-96) connectable: True match: set() rssi: -96
2022-11-09 06:28:18.827 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-cb54fc: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x012\x00\x04\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-98) connectable: True match: set() rssi: -98
2022-11-09 06:28:40.537 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-cb54fc: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x02\x0b\x00\x07\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-98) connectable: True match: set() rssi: -98
2022-11-09 06:29:02.244 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-cb54fc: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x03r\x02\x1e\x00*\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-98) connectable: True match: set() rssi: -98
2022-11-09 06:29:40.784 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] esp32-bluetooth-proxy-cb54fc: F8:8A:5E:E1:DB:58 AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x061\x19\x02r\x025\x002\x00\x00'}, service_uuids=['0000fe0d-0000-1000-8000-00805f9b34fb'], tx_power=-127, rssi=-99) connectable: True match: set() rssi: -99
2022-11-09 06:29:48.841 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] Oral-B Toothbrush (F8:8A:5E:E1:DB:58): Switching from esp32-bluetooth-proxy-cb54fc[True] to esp32-bluetooth-proxy-c814ac[True] (new rssi:-93 - threshold:5 > old rssi:-99)
2022-11-09 06:29:59.283 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] Oral-B Toothbrush (F8:8A:5E:E1:DB:58): Switching from esp32-bluetooth-proxy-c814ac[True] to esp32-bluetooth-proxy-cb54fc[True] (time elapsed:1.2080062919994816 > stale seconds:1.1120059869717807)`
| 2022-11-09T14:46:18 | 0.0 | [] | [] |
|||
Bluetooth-Devices/oralb-ble | Bluetooth-Devices__oralb-ble-21 | 79bf876f4abbf694d0e78fecd2415758d2f2f5e5 | diff --git a/src/oralb_ble/parser.py b/src/oralb_ble/parser.py
index ceb866b..b4781f1 100644
--- a/src/oralb_ble/parser.py
+++ b/src/oralb_ble/parser.py
@@ -45,6 +45,7 @@ class Models(Enum):
SmartSeries4000 = auto()
SmartSeries6000 = auto()
SmartSeries7000 = auto()
+ SmartSeries8000 = auto()
SmartSeries9000 = auto()
@@ -109,6 +110,10 @@ class ModelDescription:
device_type="Smart Series 7000",
modes=SMART_SERIES_MODES,
),
+ Models.SmartSeries8000: ModelDescription(
+ device_type="Smart Series 8000",
+ modes=SMART_SERIES_MODES,
+ ),
Models.SmartSeries9000: ModelDescription(
device_type="Smart Series 9000",
modes=SMART_SERIES_MODES,
@@ -159,6 +164,7 @@ class ModelDescription:
b"\x074\x0c": Models.IOSeries4,
b"\x03V\x04": Models.SmartSeries4000,
b"\x04'\r": Models.SmartSeries6000,
+ b'\x03"\x0c': Models.SmartSeries8000,
b"\x03!\x0c": Models.SmartSeries9000,
b"\x061\x16": Models.IOSeries9,
b"\x02\x02\x06": Models.TriumphV2,
| Genius 8000 Support
As per https://github.com/home-assistant/core/issues/81594
I have a Genius 8000, which is not on the supported list according to the documentation, however, I can confirm it works with some missing info. It's also showing as a smarter series 7000.
**AdvertisementData**
`2022-11-06 08:02:34.946 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] 00:1A:7D:DA:71:15: 58:7A:62:41:59:7F AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x03"\x0c\x02 \x00\x00\x01\x01\x00\x04'}, rssi=-77) connectable: True match: {'oralb'} rssi: -77`
`2022-11-06 08:03:17.402 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] 00:1A:7D:DA:71:15: 58:7A:62:41:59:7F AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x03"\x0c\x02 \x00\r\x03\x01+\x04'}, rssi=-76) connectable: True match: set() rssi: -76`
`2022-11-06 11:20:05.828 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] 00:1A:7D:DA:71:15: 58:7A:62:41:59:7F AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x03"\x0c\x02 \x00\x00\x03\x01\x00\x04'}, rssi=-73) connectable: True match: set() rssi: -73`
`2022-11-06 11:20:28.476 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] 00:1A:7D:DA:71:15: 58:7A:62:41:59:7F AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x03"\x0c\x03\x00\x00\x0b\x07\x01$\x04'}, rssi=-80) connectable: True match: set() rssi: -80`
`2022-11-06 11:21:10.786 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] 00:1A:7D:DA:71:15: 58:7A:62:41:59:7F AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x03"\x0c\x03\x00\x005\x07\x02L\x04'}, rssi=-76) connectable: True match: set() rssi: -76`
`2022-11-06 11:22:32.132 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] 00:1A:7D:DA:71:15: 58:7A:62:41:59:7F AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x03"\x0c\x02 \x02\x03\x07\x0f\n\x04'}, rssi=-76) connectable: True match: set() rssi: -76`
| What value does pressure report when you have normal and high pressure ?
I'm struggling to figure out how to identify this to be honest.
This was when I turned it on, so perhaps normal?
`2022-11-06 08:02:34.946 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] 00:1A:7D:DA:71:15: 58:7A:62:41:59:7F AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x03"\x0c\x02 \x00\x00\x01\x01\x00\x04'}, rssi=-77) connectable: True match: {'oralb'} rssi: -77`
After that I was pushing down, so I would assume this is high pressure.
`2022-11-06 08:03:17.402 DEBUG (MainThread) [homeassistant.components.bluetooth.manager] 00:1A:7D:DA:71:15: 58:7A:62:41:59:7F AdvertisementData(local_name='Oral-B Toothbrush', manufacturer_data={220: b'\x03"\x0c\x02 \x00\r\x03\x01+\x04'}, rssi=-76) connectable: True match: set() rssi: -76`
| 2022-11-06T14:40:50 | 0.0 | [] | [] |
||
Bluetooth-Devices/oralb-ble | Bluetooth-Devices__oralb-ble-18 | fe92aa9714965716eb5da98a36fa515a9ad4ba56 | diff --git a/src/oralb_ble/parser.py b/src/oralb_ble/parser.py
index c252301..8ef8451 100644
--- a/src/oralb_ble/parser.py
+++ b/src/oralb_ble/parser.py
@@ -37,6 +37,7 @@ class OralBBinarySensor(StrEnum):
class Models(Enum):
+ Pro6000 = auto()
TriumphV2 = auto()
IOSeries9 = auto()
IOSeries78 = auto()
@@ -81,6 +82,7 @@ class ModelDescription:
DEVICE_TYPES = {
+ Models.Pro6000: ModelDescription("Pro 6000", SMART_SERIES_MODES),
Models.TriumphV2: ModelDescription("Triumph V2", SMART_SERIES_MODES),
Models.IOSeries78: ModelDescription(
device_type="IO Series 7/8",
@@ -154,6 +156,7 @@ class ModelDescription:
b"\x03!\x0c": Models.SmartSeries9000,
b"\x061\x16": Models.IOSeries9,
b"\x02\x02\x06": Models.TriumphV2,
+ b"\x01\x02\x05": Models.Pro6000,
}
SECTOR_MAP = {
254: "last sector",
@@ -176,7 +179,7 @@ def _start_update(self, service_info: BluetoothServiceInfo) -> None:
self.set_device_manufacturer("Oral-B")
_LOGGER.debug("Parsing Oral-B sensor: %s", data)
msg_length = len(data)
- if msg_length != 11:
+ if msg_length not in (9, 11):
return
device_bytes = data[0:3]
@@ -185,8 +188,11 @@ def _start_update(self, service_info: BluetoothServiceInfo) -> None:
time = data[5] * 60 + data[6]
mode = data[7]
sector = data[8]
- sector_timer = data[9]
- no_of_sectors = data[10]
+ sector_timer = None
+ no_of_sectors = None
+ if msg_length >= 11:
+ sector_timer = data[9]
+ no_of_sectors = data[10]
model = BYTES_TO_MODEL.get(device_bytes, Models.SmartSeries7000)
model_info = DEVICE_TYPES[model]
@@ -203,16 +209,18 @@ def _start_update(self, service_info: BluetoothServiceInfo) -> None:
self.update_sensor(str(OralBSensor.TIME), None, time, None, "Time")
self.update_sensor(str(OralBSensor.SECTOR), None, tb_sector, None, "Sector")
- self.update_sensor(
- str(OralBSensor.NUMBER_OF_SECTORS),
- None,
- no_of_sectors,
- None,
- "Number of sectors",
- )
- self.update_sensor(
- str(OralBSensor.SECTOR_TIMER), None, sector_timer, None, "Sector Timer"
- )
+ if no_of_sectors is not None:
+ self.update_sensor(
+ str(OralBSensor.NUMBER_OF_SECTORS),
+ None,
+ no_of_sectors,
+ None,
+ "Number of sectors",
+ )
+ if sector_timer is not None:
+ self.update_sensor(
+ str(OralBSensor.SECTOR_TIMER), None, sector_timer, None, "Sector Timer"
+ )
self.update_sensor(
str(OralBSensor.TOOTHBRUSH_STATE), None, tb_state, None, "Toothbrush State"
)
| Support for Oral-B Pro 6200
Currently the Oral-B Pro 6200 is not supported. I have one of these. I've captured a pcap of the BTLE traffic and a few years ago I wrote some decoder that's now lost somewhere, but I know it's possible to support it at least. It supports five modes but no sector detection.
I haven't played around with developing or debugging integrations or their dependencies for Home Assistant but I know Python and might be able to hack something together if I find some time. But also maybe someone already up to speed can take my pcap and give it a quick look to see if the implementation is trivial? If not, some pointers to how to best develop and test something (under Linux, or maybe the docker thing) are welcome.
| [capture.zip](https://github.com/Bluetooth-Devices/oralb-ble/files/9939411/capture.zip)
Capture of some seconds cycling up and modes stepped through.
If you turn on debug logs for homeassistant.components.bluetooth you will get the AdvertisementData. That’s what we need to add new devices
[oralb-pro-6200-ha.txt](https://github.com/Bluetooth-Devices/oralb-ble/files/9939896/oralb-pro-6200-ha.txt)
Here you go.
@bdraco My Oral-B works with Smart 6 - 6000N reports the following pressure numbers:
* normal: 50
* high: 242
@dannytsang Thanks. I'll add those mappings
@hnrkp It looks like the 6200 has a completely different format but it should be supportable.
You'll need to provide an explanation of what the toothbrush is doing for each Advertisment so we can build a mapping.
@dannytsang Can you post your `AdvertisementData` as well to compare? | 2022-11-05T20:55:42 | 0.0 | [] | [] |
||
Systems-Theory-in-Systems-Biology/EPI | Systems-Theory-in-Systems-Biology__EPI-95 | cf6e235e5163b9329a9cfea223948730f2635426 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index f55b0c6..5fed522 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -22,7 +22,7 @@ All notable changes to this project will be documented in this file.
### Added
- Basic plotting function for sample results
-- Test for the plotting function (based on Covid model)
+- Users can now check the models they want to use with a basic functionality check and a quick inference check on artificial data.
### Changed
diff --git a/eulerpi/core/model_check.py b/eulerpi/core/model_check.py
new file mode 100644
index 0000000..c51fe92
--- /dev/null
+++ b/eulerpi/core/model_check.py
@@ -0,0 +1,221 @@
+import jax.numpy as jnp
+import numpy as np
+from jax import vmap
+
+from eulerpi.core.inference import InferenceType, inference
+from eulerpi.core.model import JaxModel, Model
+from eulerpi.core.plotting import sample_violin_plot
+
+
+def basic_model_check(model: Model) -> None:
+ """Perform a simple sanity check on the model. It tests the following:
+ - The model has a positive parameter dimension
+ - The model has a positive data dimension
+ - The model has a valid combination of parameter and data dimension
+ - The central parameter has the correct shape
+ - The parameter limits have the correct shape
+ - The model can be instantiated
+ - The model forward pass can be calculated
+ - The model jacobi matrix can be calculated
+ - The return values of the forward pass and the jacobi matrix have the correct shape
+ Args:
+ model(Model): The model describing the mapping from parameters to data.
+ Returns:
+ None
+ """
+
+ print(
+ f"Checking model {model.name} at location \n{model} \nfor basic functionality.\n"
+ )
+
+ # test the shapes
+ assert (
+ model.param_dim > 0
+ ), f"Model {model} has a non-positive parameter dimension"
+ assert (
+ model.data_dim > 0
+ ), f"Model {model} has a non-positive data dimension"
+ assert model.data_dim >= model.param_dim, (
+ f"Model {model} has a data dimension smaller than the parameter dimension. "
+ "This is not supported by the inference."
+ )
+ assert model.central_param.shape == (model.param_dim,), (
+ f"Model {model} has a central parameter with the wrong shape. "
+ f"Expected {(model.param_dim,)}, got {model.central_param.shape}"
+ )
+ assert model.param_limits.shape == (model.param_dim, 2), (
+ f"Model {model} has parameter limits with the wrong shape. "
+ f"Expected {(model.param_dim, 2)}, got {model.param_limits.shape}"
+ )
+
+ print("Successfully checked shapes and dimensions of model attributes.\n")
+ print(
+ f"Evaluate model {model.name} and its jacobian in its central parameter \n{model.central_param}."
+ )
+
+ model_forward = model.forward(model.central_param)
+ assert (
+ model_forward.shape == (1, model.data_dim)
+ or model_forward.shape == (model.data_dim,)
+ or model_forward.shape == ()
+ ), (
+ f"Model {model} has a forward function with the wrong shape. "
+ f"Expected {(1, model.data_dim)}, {(model.data_dim,)} or {()}, got {model_forward.shape}"
+ )
+
+ model_jac = model.jacobian(model.central_param)
+ assert (
+ model_jac.shape == (model.data_dim, model.param_dim)
+ or (model.data_dim == 1 and model_jac.shape == (model.param_dim,))
+ or (model.param_dim == 1 and model_jac.shape == (model.data_dim,))
+ ), (
+ f"Model {model} has a jacobian function with the wrong shape. "
+ f"Expected {(model.data_dim, model.param_dim)}, {(model.param_dim,)} or {(model.data_dim,)}, got {model_jac.shape}"
+ )
+
+ # check rank of jacobian
+ assert jnp.linalg.matrix_rank(model_jac) == model.param_dim, (
+ f"The Jacobian of the model {model} does not have full rank. This is a requirement for the inference. "
+ "Please check the model implementation."
+ )
+
+ fw, jc = model.forward_and_jacobian(model.central_param)
+ assert fw.shape == model_forward.shape, (
+ f"The shape {fw.shape} of the forward function extracted from the forward_and_jacobian function does not match the shape {model_forward.shape} of the forward function. "
+ "Please check the model implementation."
+ )
+ assert jc.shape == model_jac.shape, (
+ f"The shape {jc.shape} of the jacobian extracted from the forward_and_jacobian function does not match the shape {model_jac.shape} of the jacobian. "
+ "Please check the model implementation."
+ )
+ assert jnp.allclose(fw, model_forward), (
+ f"The forward function of the model {model} does not match the forward function extracted from the forward_and_jacobian function. "
+ "Please check the model implementation."
+ )
+ assert jnp.allclose(jc, model_jac), (
+ f"The jacobian of the model {model} does not match the jacobian extracted from the forward_and_jacobian function. "
+ "Please check the model implementation."
+ )
+
+ print(
+ "Successfully checked model forward simulation and corresponding jacobian.\n"
+ )
+
+
+def inference_model_check(
+ model: Model,
+ num_data_points: int = 1000,
+ num_model_evaluations: int = 11000,
+) -> None:
+ """Check your model in a quick inference run on an artificially created dataset.
+ It produces a violin plot comparing the artificially created parameters and data to the respectively inferred samples.
+
+ Args:
+ model(Model): The model describing the mapping from parameters to data.
+ num_data_points (int, optional): The number of data data points to artificially generate (Default value = 1000)
+ num_model_evaluations (int, optional): The number of model evaluations to perform in the inference. (Default value = 11000)
+ Returns:
+ None
+ """
+
+ print(
+ f"Checking model {model.name} at location \n{model} \nfor inference functionality on artificially created data.\n"
+ )
+
+ # create artificial parametrs similar to how we create initial walker positions for emcee sampling
+
+ central_param = model.central_param
+ param_limits = model.param_limits
+
+ # sample parameters from a uniform distribution around the central parameter and between the parameter limits
+ d_min = np.minimum(
+ central_param - param_limits[:, 0], param_limits[:, 1] - central_param
+ )
+ param_sample = central_param + d_min * (
+ (np.random.rand(num_data_points, model.param_dim) - 0.5) / 3.0
+ )
+
+ # try to use jax vmap to perform the forward pass on multiple parameters at once
+ if isinstance(model, JaxModel):
+ data_sample = vmap(model.forward, in_axes=0)(param_sample)
+ else:
+ data_sample = np.vectorize(model.forward, signature="(n)->(m)")(
+ param_sample
+ )
+
+ print(
+ f"Successfully created an artificial data set of size {num_data_points}.\n"
+ )
+
+ # choose sensible values for the sampling hyper-parameters and print them
+ num_inference_evaluations = num_model_evaluations - num_data_points
+
+ num_walkers = int(np.sqrt(num_inference_evaluations / 10))
+ num_steps = int(num_inference_evaluations / num_walkers)
+
+ num_burn_in_samples = num_walkers
+ thinning_factor = int(np.ceil(num_walkers / 10))
+
+ print("Attempting inference with hyperparameters chosen as follows:")
+ print(f"num_data_points: {num_data_points}")
+ print(f"num_walkers: {num_walkers}")
+ print(f"num_steps: {num_steps}")
+ print(f"num_burn_in_samples: {num_burn_in_samples}")
+ print(f"thinning_factor: {thinning_factor}")
+
+ run_name = "test_model_run"
+
+ # perform the inference
+ inference(
+ model,
+ data=data_sample,
+ inference_type=InferenceType.MCMC,
+ slices=[np.arange(model.param_dim)],
+ run_name=run_name,
+ num_runs=1,
+ num_walkers=num_walkers,
+ num_steps=num_steps,
+ num_burn_in_samples=num_burn_in_samples,
+ thinning_factor=thinning_factor,
+ )
+
+ print(
+ f"Successfully finishes inference run with {num_walkers*num_steps} samples.\n"
+ )
+
+ # plot the results
+ sample_violin_plot(
+ model,
+ reference_sample=param_sample,
+ run_name=run_name,
+ credibility_level=0.999,
+ what_to_plot="param",
+ )
+ sample_violin_plot(
+ model,
+ reference_sample=data_sample,
+ run_name=run_name,
+ credibility_level=0.999,
+ what_to_plot="data",
+ )
+
+
+def full_model_check(
+ model: Model,
+ num_data_points: int = 1000,
+ num_model_evaluations: int = 11000,
+) -> None:
+ """Check your model for basic functionality and in a quick inference run on an artificially created dataset.
+ We recommend to run this function for every new model you create.
+ It runs the functions basic_model_check and inference_model_check to perform the checks.
+
+ Args:
+ model(Model): The model describing the mapping from parameters to data.
+ num_data_points (int, optional): The number of data data points to artificially generate (Default value = 1000)
+ num_model_evaluations (int, optional): The number of model evaluations to perform in the inference. (Default value = 11000)
+ Returns:
+ None
+ """
+
+ basic_model_check(model)
+ inference_model_check(model, num_data_points, num_model_evaluations)
| Model test
Ideally, a new model could be tested for eulerpi-compatibility with a single function call.
| 2023-11-23T12:54:01 | 0.0 | [] | [] |
|||
Systems-Theory-in-Systems-Biology/EPI | Systems-Theory-in-Systems-Biology__EPI-93 | 6d59007999b479e4959126b849776c089de6e655 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3cbeb62..f55b0c6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -21,6 +21,9 @@ All notable changes to this project will be documented in this file.
### Added
+- Basic plotting function for sample results
+- Test for the plotting function (based on Covid model)
+
### Changed
### Fixed
diff --git a/eulerpi/core/plotting.py b/eulerpi/core/plotting.py
new file mode 100644
index 0000000..23119c7
--- /dev/null
+++ b/eulerpi/core/plotting.py
@@ -0,0 +1,388 @@
+"""Basic plotting of eulerpi sampling results.
+
+This module provides a basic plotting functionality to visualise sampling results for eulerpi.
+Uses burn_in and thinning accordinng to the simulation settings.
+"""
+
+import os
+import pathlib
+from typing import Optional, Union
+
+import jax.numpy as jnp
+import matplotlib.pyplot as plt
+import numpy as np
+from matplotlib import axes
+
+from eulerpi.core.kde import calc_kernel_width, eval_kde_gauss
+from eulerpi.core.model import Model
+from eulerpi.core.result_manager import ResultManager
+
+# general plotting function for joint runs
+
+
+def sample_violin_plot(
+ model: Model,
+ reference_sample: Union[str, os.PathLike, np.ndarray, None] = None,
+ run_name: str = "default_run",
+ what_to_plot: str = "param",
+ credibility_level: float = 0.95,
+ num_vertical_grid_points: int = 100,
+ axis_labels: Optional[list[str]] = None,
+) -> axes:
+
+ """Creates an overview figure with one violin plot for each marginal distribution.
+ Can be used for parameters and simulation results and compares reference (or true underlying) and inferred values when possible.
+
+ Args:
+ model(Model): The model describing the mapping from parameters to data.
+ reference_sample(Union[str, os.PathLike, np.ndarray]): Depending on what_to_plot, this is either the data sample used for the inference, or a reference sample of "true" parameter samples. If a string is given, it is assumed to be a path to a file containing the respective sample. (Default value = None)
+ run_name(str): The name of the inference run. (Default value = "default_run")
+ what_to_plot(str): Choose between "param" and "data" to respectively visualize either the model parameters or output. (Default value = "param")
+ credibility_level(float): Defines the probability mass (between 0 and 1) that is included within each of the violin plots. Choose 1 if you do not wand any cut-off. (Default value = 0.95)
+ num_vertical_grid_points(int): Defines the resolution of the vertical violin plots. Can be increased for smoother plots or decresed for faster runtime. (default value = 100)
+ axis_labels(list[str], optional): The labels depicted on the ordinate of the plot. Its size needs to be identical with the dimensionality of the plotted distribution. (Default value = None)
+
+ Returns:
+ axes: The overview figure with all violin plots as a matplotlib axes object.
+ """
+
+ # set figure font and color (also depending on what to plot)
+ plt.rcParams.update({"font.size": 16})
+ plt.rcParams["mathtext.fontset"] = "dejavuserif"
+
+ reference_available = reference_sample is not None
+
+ if what_to_plot == "param":
+ dim = model.param_dim
+ variable_name = "Q"
+ colorOrig = np.array([132.0, 143.0, 162.0]) / 255.0
+ colorAppr = np.array([5.0, 142.0, 217.0]) / 255.0
+
+ elif what_to_plot == "data":
+ dim = model.data_dim
+ variable_name = "Y"
+ colorOrig = np.array([255.0, 147.0, 79.0]) / 255.0
+ colorAppr = np.array([204.0, 45.0, 53.0]) / 255.0
+
+ else:
+ raise ValueError(
+ "This function only supports plotting of model parameters (what_to_plot = 'param') or model outputs and data (what_to_plot = 'data')."
+ )
+
+ color3 = np.array([45.0, 49.0, 66.0]) / 255.0
+ color4 = np.array([255.0, 218.0, 174.0]) / 255.0
+
+ # Load data from file if necessary
+ if reference_available:
+ if isinstance(reference_sample, (str, os.PathLike, pathlib.Path)):
+ reference_sample = np.loadtxt(
+ reference_sample, delimiter=",", ndmin=2
+ )
+ elif not isinstance(reference_sample, (np.ndarray, jnp.ndarray)):
+ raise TypeError(
+ f"The data argument must be a path to a file or a numpy array. The argument passed was of type {type(reference_sample)}."
+ )
+
+ # define the locations and extends of the violin plots on the abscissa
+ unit_locations = np.linspace(1, 2 * dim - 1, dim) / (2.0 * dim)
+ envelope_width = 1.0 / (dim + 1)
+
+ # load the results from the inference sampling
+ rm = ResultManager(model_name=model.name, run_name=run_name)
+
+ (
+ reconstructed_param_sample,
+ reconstructed_data_sample,
+ _,
+ ) = rm.load_slice_inference_results(
+ slice=np.linspace(0, model.param_dim - 1, model.param_dim, dtype=int)
+ )
+
+ if what_to_plot == "param":
+ reconstructed_sample = reconstructed_param_sample
+ elif what_to_plot == "data":
+ reconstructed_sample = reconstructed_data_sample
+
+ # determine upper and lower plot bounds according to the credibility levels and what to plot
+ if reference_available:
+ percentile_sample = reference_sample
+
+ else:
+ percentile_sample = reconstructed_sample
+
+ upper_percentile = np.percentile(
+ percentile_sample, 100.0 * credibility_level, axis=0
+ )
+ lower_percentile = np.percentile(
+ percentile_sample, 100.0 * (1 - credibility_level), axis=0
+ )
+
+ max_val = np.amax(upper_percentile)
+ min_val = np.amin(lower_percentile)
+
+ # create single figure with variable width
+ fig, ax = plt.subplots(figsize=(2 * dim, 6))
+
+ # set the plot axis limits and labels
+ ax.set_ylim(min_val, max_val)
+ x_labels = axis_labels or [
+ r"$\mathcal{" + variable_name + "}_{" + str(d + 1) + "}$"
+ for d in range(dim)
+ ]
+ ax.set_xticks(unit_locations, x_labels)
+
+ # plot the vertical axes for all violin plots
+ ax.vlines(unit_locations, min_val, max_val, color=color3, linewidth=1.0)
+
+ # create one shared grid for all KDEs
+ vertical_grid = np.transpose(
+ np.array([np.linspace(min_val, max_val, num_vertical_grid_points)])
+ )
+
+ # also create a 1d array for the param grid
+ vertical_grid_1d = np.squeeze(np.asarray(vertical_grid))
+
+ # loop over all dimensions of either the param or the data
+ for i in range(dim):
+
+ reconstructed_matrix = np.transpose(
+ np.array([reconstructed_sample[:, i]])
+ )
+
+ # in case there is a reference for the plotted qunatity avaialbe
+ if reference_available:
+ # cast to 2d array
+ reference_matrix = np.transpose(np.array([reference_sample[:, i]]))
+
+ # calculate kernel width for KDE
+ scales = calc_kernel_width(reference_matrix)
+
+ # evaluate KDEs over the grid
+ reference_KDE = eval_kde_gauss(
+ reference_matrix, vertical_grid, scales
+ )
+
+ # normalize the KDEs and caluculate their cumulative distribution
+ reference_KDE_norm_cumsum = np.cumsum(
+ reference_KDE / np.sum(reference_KDE)
+ )
+
+ # create boolean arrays to filter the KDEs for the specified credibility level
+ reference_konfidence_index = (
+ reference_KDE_norm_cumsum > (1 - credibility_level) / 2.0
+ ) & (
+ reference_KDE_norm_cumsum
+ < credibility_level + (1 - credibility_level) / 2.0
+ )
+
+ # calculate the maximum density of the KDEs and the corresponding incidence
+ max_density = np.amax(reference_KDE)
+ max_density_argument = vertical_grid_1d[np.argmax(reference_KDE)]
+
+ # calculate violin envelopes for reference and reconstruction
+ reference_left_bound = (
+ -0.5 * envelope_width / max_density * reference_KDE
+ + unit_locations[i]
+ )
+ reference_right_bound = (
+ 0.5 * envelope_width / max_density * reference_KDE
+ + unit_locations[i]
+ )
+
+ # filter the violin envelopes for the specified credibility level
+ reference_left_bound_konf = reference_left_bound[
+ reference_konfidence_index
+ ]
+ reference_right_bound_konf = reference_right_bound[
+ reference_konfidence_index
+ ]
+
+ # plot the filtered violin envelopes for the reference
+ ax.plot(
+ reference_left_bound_konf,
+ vertical_grid_1d[reference_konfidence_index],
+ linewidth=3.0,
+ color=colorOrig,
+ )
+ ax.plot(
+ reference_right_bound_konf,
+ vertical_grid_1d[reference_konfidence_index],
+ linewidth=3.0,
+ color=colorOrig,
+ )
+
+ # close the envelopes by connecting the last and the first point
+ for j in [0, -1]:
+ ax.plot(
+ [
+ reference_left_bound_konf[j],
+ reference_right_bound_konf[j],
+ ],
+ [
+ vertical_grid_1d[reference_konfidence_index][j],
+ vertical_grid_1d[reference_konfidence_index][j],
+ ],
+ linewidth=3.0,
+ color=colorOrig,
+ )
+
+ # fill the violin envelopes
+
+ ax.fill_betweenx(
+ vertical_grid_1d[reference_konfidence_index],
+ reference_left_bound_konf,
+ reference_right_bound_konf,
+ color=colorOrig,
+ label=r"$\Phi_\mathcal{" + variable_name + "}$"
+ if i == 0
+ else "",
+ alpha=0.3,
+ )
+
+ # draw arrows to show the width of the violin envelopes
+ ax.arrow(
+ np.amin(reference_left_bound),
+ max_density_argument,
+ np.amax(reference_right_bound) - np.amin(reference_left_bound),
+ 0,
+ length_includes_head=True,
+ color=color3,
+ head_width=(max_val - min_val) / 100.0,
+ head_length=0.02,
+ linewidth=1.0,
+ )
+
+ ax.arrow(
+ np.amax(reference_right_bound),
+ max_density_argument,
+ -np.amax(reference_right_bound)
+ + np.amin(reference_left_bound),
+ 0,
+ length_includes_head=True,
+ color=color3,
+ head_width=(max_val - min_val) / 100.0,
+ head_length=0.02,
+ linewidth=1.0,
+ )
+
+ ax.text(
+ unit_locations[i] + 0.01,
+ max_density_argument + (max_val - min_val) / 50.0,
+ "%.2f" % (np.round(max_density, 2)),
+ )
+
+ # in case of no reference, caluclate the kernel bandwidth from the reconstruction
+ else:
+ scales = calc_kernel_width(reconstructed_matrix)
+
+ # repeat all plotting for the reconstruction
+ reconstructed_KDE = eval_kde_gauss(
+ reconstructed_matrix, vertical_grid, scales
+ )
+
+ if not reference_available:
+ max_density = np.amax(reconstructed_KDE)
+ max_density_argument = vertical_grid_1d[
+ np.argmax(reconstructed_KDE)
+ ]
+
+ reconstructed_KDE_norm_cumsum = np.cumsum(
+ reconstructed_KDE / np.sum(reconstructed_KDE)
+ )
+ reconstructed_konfidence_index = (
+ reconstructed_KDE_norm_cumsum > (1 - credibility_level) / 2.0
+ ) & (
+ reconstructed_KDE_norm_cumsum
+ < credibility_level + (1 - credibility_level) / 2.0
+ )
+ reconstructed_left_bound = (
+ -0.5 * envelope_width / max_density * reconstructed_KDE
+ + unit_locations[i]
+ )
+ reconstructed_right_bound = (
+ 0.5 * envelope_width / max_density * reconstructed_KDE
+ + unit_locations[i]
+ )
+ reconstructed_left_bound_konf = reconstructed_left_bound[
+ reconstructed_konfidence_index
+ ]
+ reconstructed_right_bound_konf = reconstructed_right_bound[
+ reconstructed_konfidence_index
+ ]
+
+ ax.plot(
+ reconstructed_left_bound_konf,
+ vertical_grid_1d[reconstructed_konfidence_index],
+ linewidth=3.0,
+ color=colorAppr,
+ )
+
+ ax.plot(
+ reconstructed_right_bound_konf,
+ vertical_grid_1d[reconstructed_konfidence_index],
+ linewidth=3.0,
+ color=colorAppr,
+ )
+
+ for j in [0, -1]:
+ ax.plot(
+ [
+ reconstructed_left_bound_konf[j],
+ reconstructed_right_bound_konf[j],
+ ],
+ [
+ vertical_grid_1d[reconstructed_konfidence_index][j],
+ vertical_grid_1d[reconstructed_konfidence_index][j],
+ ],
+ linewidth=3.0,
+ color=colorAppr,
+ )
+
+ ax.fill_betweenx(
+ vertical_grid_1d[reconstructed_konfidence_index],
+ reconstructed_left_bound_konf,
+ reconstructed_right_bound_konf,
+ color=colorAppr,
+ label=r"$\Phi_{\hat{\mathcal{" + variable_name + "}}}$"
+ if i == 0
+ else "",
+ alpha=0.3,
+ )
+
+ if not reference_available:
+ # draw arrows to show the width of the violin envelopes
+ ax.arrow(
+ np.amin(reconstructed_left_bound),
+ max_density_argument,
+ np.amax(reconstructed_right_bound)
+ - np.amin(reconstructed_left_bound),
+ 0,
+ length_includes_head=True,
+ color=color3,
+ head_width=(max_val - min_val) / 50.0,
+ head_length=0.02,
+ linewidth=1.0,
+ )
+ ax.arrow(
+ np.amax(reconstructed_right_bound),
+ max_density_argument,
+ -np.amax(reconstructed_right_bound)
+ + np.amin(reconstructed_left_bound),
+ 0,
+ length_includes_head=True,
+ color=color3,
+ head_width=(max_val - min_val) / 50.0,
+ head_length=0.02,
+ linewidth=1.0,
+ )
+ ax.text(
+ unit_locations[i] + 0.01,
+ max_density_argument + (max_val - min_val) / 50.0,
+ "%.2f" % (np.round(max_density, 2)),
+ )
+
+ ax.legend()
+ ax.set_xlim(0.0, 1.0)
+ plt.tight_layout()
+
+ return ax
| Basic plotting quick EPI results check
We are often interested in quickly checking inference results especially when tuning (sampling) parameters, etc.
It would be very helpful to have a basic plotting functionality.
Ideally, one could see all dimensions of the parameters and simulation results in a concise overview figure.
| 2023-11-20T10:01:06 | 0.0 | [] | [] |
|||
Systems-Theory-in-Systems-Biology/EPI | Systems-Theory-in-Systems-Biology__EPI-72 | 24f83ee0996cfa3ea257c835e6fbfee3e38343e7 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index ff0614f4..30917edb 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -19,6 +19,14 @@ All notable changes to this project will be documented in this file.
## [Unreleased]
+### Added
+
+- Added support for for sbml models to select parameters and species used in the inference
+- Added support for evaluating sbml models at multiple time points
+
+### Changed
+
+- Switched from using parameter names to using parameter ids in the sbml model
## [0.4.0]
@@ -26,6 +34,7 @@ All notable changes to this project will be documented in this file.
- Added a new example model, a 2d heat conduction equation
- Added a function to model to specify more complex parameter domains
+
### Changed
- Removed old functions from corona model
diff --git a/README.md b/README.md
index 41dcd563..f9fe60b2 100644
--- a/README.md
+++ b/README.md
@@ -75,11 +75,13 @@ from eulerpi.sampling import inference
from my_model import MyModel
-central_param = np.array([0.5, -1.5, ...])
-param_limits = np.array([[0.0, 1.0], [-3.0, 0.0], ...])
+# This line is needed for multiprocessing in python
+if __name__ == "__main__":
+ central_param = np.array([0.5, -1.5, ...])
+ param_limits = np.array([[0.0, 1.0], [-3.0, 0.0], ...])
-model = MyModel(central_param, param_limits)
-inference(model=model, data="my_data.csv")
+ model = MyModel(central_param, param_limits)
+ inference(model=model, data="my_data.csv")
```
The `data` argument can be a numpy-2d-array or a PathLike object that points to a CSV file. In the example shown above, the CSV file `my_data.csv` should contain the data in the following format:
diff --git a/docs/source/examples/sbml_example.rst b/docs/source/examples/sbml_example.rst
index ae71b496..220ecc06 100644
--- a/docs/source/examples/sbml_example.rst
+++ b/docs/source/examples/sbml_example.rst
@@ -20,15 +20,19 @@ Here's a code snippet to load your own sbml model and to do the parameter infere
from eulerpi.core.model import SBMLModel
from eulerpi.core.inference import inference
- central_param = np.array([1.0, 1.0]) # initial guess, evaluation must have nonzero density
- param_limits = np.array([[0.0, 2.0], [0.0, 2.0]])
- param_names = ['k1', 'k2']
-
- model = SBMLModel('model.xml',
- central_param=central_param,
- param_limits=param_limits,
- param_names=param_names)
- inference(model, 'data.csv')
+ # This line is needed for multiprocessing in python
+ if __name__ == "__main__":
+ central_param = np.array([1.0, 1.0]) # initial guess, evaluation must have nonzero density
+ param_limits = np.array([[0.0, 2.0], [0.0, 2.0]])
+ param_ids = ['k1', 'k2']
+ timepoints = np.array([1.0])
+
+ model = SBMLModel(sbml_file='model.xml',
+ central_param=central_param,
+ param_limits=param_limits,
+ timepoints=timepoints,
+ param_ids=param_ids)
+ inference(model, 'data.csv')
The attribute :py:attr:`~eulerpi.core.model.SBMLModel.param_names` contains the names of the parameters in the sbml model, for which the inference should be performed.
Per default it contains all parameters from the sbml model file.
diff --git a/docs/source/tutorial_material/tutorial.rst b/docs/source/tutorial_material/tutorial.rst
index 0f304c37..332567a9 100644
--- a/docs/source/tutorial_material/tutorial.rst
+++ b/docs/source/tutorial_material/tutorial.rst
@@ -125,8 +125,10 @@ Now we can now use EPI to infer the parameter distribution from the data.
from eulerpi.core.inference import inference
- model = Temperature()
- inference(model, data = "TemperatureData.csv")
+ # This line is needed for multiprocessing in python
+ if __name__ == "__main__":
+ model = Temperature()
+ inference(model, data = "TemperatureData.csv")
Depending on the complexity of your model the sampling can take a long time.
Due to this reason, not only the final results but also intermediate sampling results are saved.
diff --git a/docs/source/welcome.rst b/docs/source/welcome.rst
index 74e8411e..97a8990e 100644
--- a/docs/source/welcome.rst
+++ b/docs/source/welcome.rst
@@ -89,11 +89,13 @@ To evaluate the model and infer the parameter distribution, call:
from my_model import MyModel
- central_param = np.array([0.5, -1.5, ...])
- param_limits = np.array([[0.0, 1.0], [-3.0, 0.0], ...])
+ # This line is needed for multiprocessing in python
+ if __name__ == "__main__":
+ central_param = np.array([0.5, -1.5, ...])
+ param_limits = np.array([[0.0, 1.0], [-3.0, 0.0], ...])
- model = MyModel(central_param, param_limits)
- inference(model=model, data="my_data.csv")
+ model = MyModel(central_param, param_limits)
+ inference(model=model, data="my_data.csv")
The parameter :py:attr:`data` can be a numpy-2d-array or a PathLike object that points to a CSV file. In the example shown above, the CSV file :file:`my_data.csv` should contain the data in the following format:
diff --git a/eulerpi/core/model.py b/eulerpi/core/model.py
index dc18c4cd..4f7ff0ba 100644
--- a/eulerpi/core/model.py
+++ b/eulerpi/core/model.py
@@ -10,7 +10,6 @@
from jax import jacrev, jit, vmap
import amici
-from eulerpi import logger
from eulerpi.jax_extension import value_and_jacrev
@@ -279,60 +278,88 @@ class SBMLModel(Model):
Args:
sbml_file(str): The path to the SBML model file.
- param_names(list): A list of parameter names. If None the parameter names are extracted from the SBML model.
- time(list): List of measurement time points, this is where the ODE is evaluated and compared to the data
+ param_ids(list): A list of ids of parameter, which will be estimated during the inference. If None all parameter ids are extracted from the SBML model.
+ state_ids(list): A list of state ids, for which data will be given during the inference. If None all state ids are extracted from the SBML model.
+ timepoints(list): List of measurement time points, this is where the sbml model is evaluated and compared to the data
skip_creation(bool): If True the model is not created againg based on the SBML file. Instead the model is loaded from a previously created model. (Default value = False)
central_param(np.ndarray): The central parameter for the model
param_limits(np.ndarray): The parameter limits for the model
"""
+ @staticmethod
+ def indices_from_ids(ids: list, all_ids: list) -> list:
+ """Returns the indices of the ids in the all_ids list.
+
+ Args:
+ ids(list): The ids for which the indices should be returned.
+ all_ids(list): The list of all ids.
+
+ Returns:
+ list: The indices of the ids in the all_ids list.
+
+ Throws:
+ ValueError: If one of the ids is not in the all_ids list.
+
+ """
+ indices = []
+ for id in ids:
+ try:
+ indices.append(all_ids.index(id))
+ except ValueError:
+ raise ValueError(
+ f"Parameter / State id '{id}' is not in the list of the relevant ids {all_ids}"
+ )
+ return indices
+
@property
def param_dim(self):
"""The number of parameters of the model."""
- return len(self.param_names) # len(self.amici_model.getParameterIds())
+ return len(self.param_ids)
@property
def data_dim(self):
- """The number of observables of the model."""
- return len(self.amici_model.getStateIds())
+ """The dimension of a data point returned by the model."""
+ return len(self.state_ids) * len(self.timepoints)
def __init__(
self,
sbml_file: str,
- time: list,
central_param: np.ndarray,
param_limits: np.ndarray,
- param_names=None,
+ timepoints: list,
+ param_ids: Optional[list] = None,
+ state_ids: Optional[list] = None,
skip_creation: bool = False,
name: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(central_param, param_limits, name, **kwargs)
- self.time = time
-
self.amici_model_name = self.name
self.amici_model_dir = "./amici/" + self.amici_model_name
- # TODO test if observables are mandatory
- observables = {
- "observable_x1": {"name": "y_obs", "formula": "y"},
- }
-
# Generate python code
if not skip_creation:
sbml_importer = amici.SbmlImporter(sbml_file)
sbml_importer.sbml2amici(
self.amici_model_name,
self.amici_model_dir,
- observables=observables,
)
- self.param_names = param_names
-
# Load the generated model
+ self.timepoints = timepoints
self.load_amici_model_and_solver()
+ self.param_ids = param_ids or self.amici_model.getParametersIds()
+ self.state_ids = state_ids or self.amici_model.getStateIds()
+ self.param_indices = self.indices_from_ids(
+ self.param_ids, self.amici_model.getParameterIds()
+ )
+ self.state_indices = self.indices_from_ids(
+ self.state_ids, self.amici_model.getStateIds()
+ )
+ self.setSensitivities()
+
def load_amici_model_and_solver(self):
"""Loads the AMICI model from the previously generated model."""
amici_model_module = amici.import_model_module(
@@ -341,45 +368,48 @@ def load_amici_model_and_solver(self):
self.amici_model = amici_model_module.getModel()
self.amici_solver = self.amici_model.getSolver()
- # TODO: Maybe this is redundant when using settings to hdf5
- self.amici_model.setTimepoints(self.time)
-
- if self.param_names is not None:
- # We need the indices for setParameterList, not the ids or names
- amici_param_indices = []
- for i, param_id in enumerate(self.amici_model.getParameterIds()):
- if param_id in self.param_names:
- amici_param_indices.append(i)
- else:
- logger.warning(
- f"Parameter {param_id} is specified in the sbml file, but not in the passed parameter list. It will be ignored."
- )
- self.amici_model.setParameterList(amici_param_indices)
- else:
- self.param_names = self.amici_model.getParameterIds()
+ self.amici_model.setTimepoints(self.timepoints)
+ self.amici_solver.setAbsoluteTolerance(1e-10)
+
+ def setSensitivities(self):
+ if self.param_ids == self.amici_model.getParameterIds():
self.amici_model.requireSensitivitiesForAllParameters()
+ else:
+ self.amici_model.setParameterList(self.param_indices)
+
self.amici_solver.setSensitivityMethod(amici.SensitivityMethod.forward)
self.amici_solver.setSensitivityOrder(amici.SensitivityOrder.first)
def forward(self, params):
for i, param in enumerate(params):
- self.amici_model.setParameterById(self.param_names[i], param)
+ self.amici_model.setParameterById(self.param_ids[i], param)
rdata = amici.runAmiciSimulation(self.amici_model, self.amici_solver)
- return rdata.x[0]
+ return rdata.x[:, self.state_indices].reshape(self.data_dim)
def jacobian(self, params):
for i, param in enumerate(params):
- self.amici_model.setParameterById(self.param_names[i], param)
+ self.amici_model.setParameterById(self.param_ids[i], param)
rdata = amici.runAmiciSimulation(self.amici_model, self.amici_solver)
- return rdata.sx[0].T
+ return (
+ rdata.sx[:, :, self.state_indices]
+ .transpose(1, 0, 2)
+ .reshape(self.param_dim, self.data_dim)
+ .T
+ )
def forward_and_jacobian(
self, params: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
for i, param in enumerate(params):
- self.amici_model.setParameterById(self.param_names[i], param)
+ self.amici_model.setParameterById(self.param_ids[i], param)
rdata = amici.runAmiciSimulation(self.amici_model, self.amici_solver)
- return rdata.x[0], rdata.sx[0].T
+ return (
+ rdata.x[:, self.state_indices].reshape(self.data_dim),
+ rdata.sx[:, :, self.state_indices]
+ .transpose(1, 0, 2)
+ .reshape(self.param_dim, self.data_dim)
+ .T,
+ )
# Allow the model to be pickled
def __getstate__(self):
@@ -388,6 +418,7 @@ def __getstate__(self):
# Save the amici solver settings to
_fd, _file = tempfile.mkstemp()
+
try:
# write amici solver settings to file
try:
@@ -420,6 +451,7 @@ def __setstate__(self, state):
# Restore amici model and solver
self.load_amici_model_and_solver()
+ self.setSensitivities()
_fd, _file = tempfile.mkstemp()
try:
diff --git a/eulerpi/core/sampling.py b/eulerpi/core/sampling.py
index d2bf4642..068813bf 100644
--- a/eulerpi/core/sampling.py
+++ b/eulerpi/core/sampling.py
@@ -112,7 +112,7 @@ def run_emcee_once(
)
except ValueError as e:
# If the message equals "Probability function returned NaN."
- if str(e) == "Probability function returned NaN.":
+ if "Probability function returned NaN" in str(e):
raise ValueError(
"Probability function returned NaN. "
"You possibly have to exclude data dimensions which do not depend on the paramaters. "
diff --git a/eulerpi/examples/sbml/sbml_caffeine_model.py b/eulerpi/examples/sbml/sbml_caffeine_model.py
index 688c9faf..2b4251ba 100644
--- a/eulerpi/examples/sbml/sbml_caffeine_model.py
+++ b/eulerpi/examples/sbml/sbml_caffeine_model.py
@@ -7,9 +7,6 @@
class CaffeineSBMLModel(SBMLModel, ArtificialModelInterface):
- param_dim = 2
- data_dim = 1
-
CENTRAL_PARAM = np.array([1.0, 1.0])
PARAM_LIMITS = np.array([[0.0, 2.0], [0.0, 2.0]])
@@ -22,12 +19,15 @@ def __init__(
sbml_file = importlib.resources.path(
"eulerpi.examples.sbml", "Caffeine_2Wks_Exponential_decay.xml"
)
- param_names = ["A", "B"]
+ param_ids = ["A", "B"]
+ timepoints = np.array([0.5, 1.0])
+
super().__init__(
sbml_file,
central_param,
param_limits,
- param_names,
+ timepoints,
+ param_ids,
**kwargs,
)
diff --git a/eulerpi/examples/sbml/sbml_menten_model.py b/eulerpi/examples/sbml/sbml_menten_model.py
index c78a69b1..e7567e62 100644
--- a/eulerpi/examples/sbml/sbml_menten_model.py
+++ b/eulerpi/examples/sbml/sbml_menten_model.py
@@ -24,29 +24,20 @@ def __init__(
sbml_file = importlib.resources.path(
"eulerpi.examples.sbml", "sbml_menten_model.xml"
)
- param_names = ["Km", "kcat"]
+ timepoints = np.array([0.5, 1.0])
+ param_ids = ["Km", "kcat"]
+ state_ids = ["s1"]
+
super().__init__(
sbml_file,
central_param,
param_limits,
- param_names,
+ timepoints,
+ param_ids,
+ state_ids,
**kwargs,
)
- @property
- def data_dim(self) -> int:
- return 1
-
- def forward(self, params) -> np.ndarray:
- return super().forward(params)[2:]
-
- def jacobian(self, params) -> np.ndarray:
- return super().jacobian(params)[2:, :]
-
- def forward_and_jacobian(self, params) -> np.ndarray:
- val, jac = super().forward_and_jacobian(params)
- return val[2:], jac[2:, :]
-
def generate_artificial_params(self, num_samples: int) -> np.ndarray:
diff0 = 5.0
diff1 = 0.2
| SBML Model not supporting time series
**Describe the bug**
SBML models returns the solution at `time[0]` instead of the solution at all timepoints in `time`.
**To Reproduce**
```python
import numpy as np
import importlib
from eulerpi.core.model import SBMLModel
from eulerpi.core.inference import inference
CENTRAL_PARAM = np.array([1.0, 1.0])
PARAM_LIMITS = np.array([[0.0, 2.0], [0.0, 2.0]])
sbml_file = importlib.resources.path(
"eulerpi.examples.sbml", "Caffeine_2Wks_Exponential_decay.xml"
)
model = SBMLModel(sbml_file=sbml_file, time=[1.0], central_param=CENTRAL_PARAM, param_limits=PARAM_LIMITS, skip_creation=True)
print(model.forward(CENTRAL_PARAM).shape)
model = SBMLModel(sbml_file=sbml_file, time=[0.5, 1.0], central_param=CENTRAL_PARAM, param_limits=PARAM_LIMITS, skip_creation=True)
print(model.forward(CENTRAL_PARAM).shape)
```
**Expected behavior**
The sbml model returns the solution at all timepoints in `time` as flat numpy array.
**Observed behavior**
The sbml model returns the solution at the timepoint `time[0]`.
| 2023-07-15T13:33:43 | 0.0 | [] | [] |
|||
Systems-Theory-in-Systems-Biology/EPI | Systems-Theory-in-Systems-Biology__EPI-55 | c5d52b0f540ac9d0f3153874b898bdf95ea1d290 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 43755202..5659654c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -16,10 +16,16 @@ All notable changes to this project will be documented in this file.
### Fixed
- Bug in module B that caused a crash
-- Bug that caused thinning to be performed incorrectly.
## [Unreleased]
+## [0.3.1] - 2023-05-02
+
+### Fixed
+
+- Bug in result manager causing burn-in and thinning to be performed on the wrong samples.
+- Bug in result manager that caused density evals to be saved as data samples for non-full slices.
+
## [0.3.0] - 2023-04-27
### Added
diff --git a/eulerpi/core/result_manager.py b/eulerpi/core/result_manager.py
index 15750600..53290999 100644
--- a/eulerpi/core/result_manager.py
+++ b/eulerpi/core/result_manager.py
@@ -220,9 +220,7 @@ def save_run(
# Save the simulation results
np.savetxt(
results_path + "/SimResults/sim_results_" + str(run) + ".csv",
- sampler_results[
- :, sampling_dim : model.param_dim + model.data_dim
- ],
+ sampler_results[:, sampling_dim : sampling_dim + model.data_dim],
delimiter=",",
)
@@ -411,6 +409,9 @@ def load_slice_inference_results(
if thinning_factor is None:
thinning_factor = inference_information["thinning_factor"]
+ num_steps = inference_information["num_steps"]
+ num_walkers = inference_information["num_walkers"]
+
# load samples from raw chains
for i in range(inference_information["num_runs"]):
density_evals = np.loadtxt(
@@ -428,37 +429,46 @@ def load_slice_inference_results(
ndmin=2,
)
if i == 0:
- overall_density_evals = density_evals
- overall_sim_results = sim_results
- overall_params = params
+ param_dim = params.shape[1]
+ data_dim = sim_results.shape[1]
+ overall_density_evals = density_evals.reshape(
+ num_steps, num_walkers, 1
+ )
+ overall_sim_results = sim_results.reshape(
+ num_steps, num_walkers, data_dim
+ )
+ overall_params = params.reshape(
+ num_steps, num_walkers, param_dim
+ )
else:
overall_density_evals = np.concatenate(
- (overall_density_evals, density_evals)
+ (
+ overall_density_evals,
+ density_evals.reshape(num_steps, num_walkers, 1),
+ )
)
overall_sim_results = np.concatenate(
- (overall_sim_results, sim_results)
+ (
+ overall_sim_results,
+ sim_results.reshape(num_steps, num_walkers, data_dim),
+ )
+ )
+ overall_params = np.concatenate(
+ (
+ overall_params,
+ params.reshape(num_steps, num_walkers, param_dim),
+ )
)
- overall_params = np.concatenate((overall_params, params))
-
- num_walkers = inference_information["num_walkers"]
# thin and burn in
return (
- overall_params[
- num_burn_in_samples
- * num_walkers :: thinning_factor
- * num_walkers,
- :,
- ],
+ overall_params[num_burn_in_samples::thinning_factor, :, :].reshape(
+ -1, param_dim
+ ),
overall_sim_results[
- num_burn_in_samples
- * num_walkers :: thinning_factor
- * num_walkers,
- :,
- ],
+ num_burn_in_samples::thinning_factor, :, :
+ ].reshape(-1, data_dim),
overall_density_evals[
- num_burn_in_samples
- * num_walkers :: thinning_factor
- * num_walkers
- ],
+ num_burn_in_samples::thinning_factor, :, :
+ ].reshape(-1, 1),
)
diff --git a/eulerpi/core/sampling.py b/eulerpi/core/sampling.py
index c133d691..d2bf4642 100644
--- a/eulerpi/core/sampling.py
+++ b/eulerpi/core/sampling.py
@@ -132,9 +132,6 @@ def run_emcee_once(
sampler_results = sampler_results.reshape(
num_steps * num_walkers, sampling_dim + data_dim + 1
)
- sampler_results = sampler_results.reshape(
- num_walkers * num_steps, sampling_dim + data_dim + 1
- )
logger.info(
f"The acceptance fractions of the emcee sampler per walker are: {np.round(sampler.acceptance_fraction, 2)}"
diff --git a/pyproject.toml b/pyproject.toml
index 70f3d517..a5e9a857 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "eulerpi"
-version = "0.3.0"
+version = "0.3.1"
description = "The eulerian parameter inference (eulerpi) returns a parameter distribution, which is consistent with the observed data by solving the inverse problem directly. In the case of a one-to-one mapping, this is the true underlying distribution."
authors = ["Lars Kaiser <[email protected]>", "Sebastian Hoepfl <[email protected]>", "Vincent Wagner <[email protected]>"]
readme = "README.md"
| Numburn samples has no sensible default / no documentation for recommended values
**Describe the bug**
MCMC with a even number of walkers and and e.g. occurence=2 ignores half of the walkers.
Inspect load_sim_results for correct implementation (order of loaded samples etc.)
also for loading dense grid results
...
| We should at the same time clearly indicate how the runs, walkers and steps are precisely concatenated.
Does #46 solve that issue for you? Now inference defaults to burning 10% of the samples and no thinning. I would argue that independence of subsequent samples in the parameter chain isn't very important for most of the users (or at least less important than having higher accuracy in the parameter density estimate), so I set the default to no thinning. Is there consensus on that or should we discuss this further?
> **Describe the bug** MCMC with a even number of walkers and and e.g. occurence=2 ignores half of the walkers.
I think we should also change the behavior of num_burn_in_samples: Right now, this is the total number of samples (the sum over all chains). I think num_burn_in_samples should be the number of burned samples per chain instead.
Turns out the last fix also undid the changes made before, I'll have to look into this again. | 2023-05-02T15:09:12 | 0.0 | [] | [] |
||
Systems-Theory-in-Systems-Biology/EPI | Systems-Theory-in-Systems-Biology__EPI-14 | 705c8f9598e5cd04f39ac9e3b715d35e5a27e8f7 | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index d0519e5b..7dcef109 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -47,6 +47,11 @@ jobs:
installer-parallel: true
- name: Verify poetry installation
run: poetry --version
+ - name: Install amici dependencies
+ run: |
+ sudo apt install -y swig
+ sudo apt install -y libblas-dev
+ sudo apt install -y libatlas-base-dev
- name: Load cached venv
id: cached-poetry-dependencies
uses: actions/cache@v3
@@ -72,8 +77,9 @@ jobs:
run: |
source $VENV
pytest -v
- - name: Run coverage
- run: |
- source $VENV
- coverage run -m pytest -v
- coverage report
+ # TODO: coverage run creates segmentation fault on Github ci, but not locally. Debug it
+ # - name: Run coverage
+ # run: |
+ # source $VENV
+ # coverage run -m pytest -v
+ # coverage report
diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml
new file mode 100644
index 00000000..cf45e226
--- /dev/null
+++ b/.github/workflows/publish.yml
@@ -0,0 +1,44 @@
+name: Publish to PyPI
+on:
+ push:
+ tags:
+ - '*'
+
+jobs:
+ publish:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: "3.10"
+ cache: pip
+ - name: Install Poetry
+ uses: snok/install-poetry@v1
+ with:
+ #version: 1.2.2
+ virtualenvs-create: true
+ virtualenvs-in-project: true
+ installer-parallel: true
+ - name: Verify poetry installation
+ run: poetry --version
+ - name: Install amici dependencies
+ run: |
+ sudo apt install -y swig
+ sudo apt install -y libblas-dev
+ sudo apt install -y libatlas-base-dev
+ - name: Load cached venv
+ id: cached-poetry-dependencies
+ uses: actions/cache@v3
+ with:
+ path: .venv
+ key: venv-${{ runner.os }}-${{ hashFiles('**/poetry.lock') }}
+ - name: Install dependencies
+ run: poetry install --no-interaction --with=dev
+ if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true'
+ - name: Publish to PyPI
+ env:
+ PYPI_USERNAME: __token__
+ PYPI_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
+ run: |
+ poetry publish --build -u $PYPY_USERNAME -p $PYPI_PASSWORD
diff --git a/.gitignore b/.gitignore
index 8317a664..f3926a03 100644
--- a/.gitignore
+++ b/.gitignore
@@ -131,6 +131,9 @@ dmypy.json
# End of https://www.gitignore.io/api/python
-# Custom
+# Project specific
Data
Applications
+amici
+swig
+generated_sbml_model
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 2052a194..ceb265dc 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -18,7 +18,7 @@ repos:
entry: pflake8
additional_dependencies: [pyproject-flake8]
- repo: https://github.com/PyCQA/isort
- rev: 5.10.1
+ rev: 5.12.0
hooks:
- id: isort
args: [--settings-path=pyproject.toml]
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 00000000..d931b3bd
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,24 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+## [Unreleased]
+
+### Added
+
+- Feature X
+- Feature Y
+
+### Changed
+
+- Refactored module A to improve performance
+
+### Fixed
+
+- Bug in module B that caused a crash
+
+## [0.1.0] - 2022-03-xx
+
+### Added
+
+- Initial release
diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md
index d07b181b..d971e80a 100644
--- a/DEVELOPMENT.md
+++ b/DEVELOPMENT.md
@@ -22,6 +22,24 @@
curl -sSL https://install.python-poetry.org | python3 -
```
+- Install dependencies:
+
+ - For amici (sbml):
+
+ ```bash
+ sudo apt install swig
+ sudo apt install libblas-dev
+ sudo apt install libatlas-base-dev
+ ```
+
+ - For cpp:
+
+ ```bash
+ sudo apt install cmake
+ sudo apt install libeigen3-dev
+ sudo apt install pybind11-dev
+ ```
+
- Install epi:
```bash
@@ -40,53 +58,59 @@
Here are the most important infos on how to maintain this repository.
-- **Dependency Management with Poetry**: \
- We use poetry as build system, for the dependency management and the virtual environment. During the [Quickstart](#quickstart) we installed all dependencies into the virtual environment, therefore:
+### Dependency Management with Poetry
- ---
- **IMPORTANT**
+We use poetry as build system, for the dependency management and the virtual environment. During the [Quickstart](#quickstart) we installed all dependencies into the virtual environment, therefore:
- Run all commands in the next section in the poetry shell. It can be started with `poetry shell`. Alternatively you can run commands with `poetry run <yourcommand>`.
+---
+**IMPORTANT**
- ---
+Run all commands in the next section in the poetry shell. It can be started with `poetry shell`. Alternatively you can run commands with `poetry run <yourcommand>`.
- Run ```poetry add package_name``` to add the library/package with the name ```package_name``` as dependencie to your project. Use ```poetry add --group dev package_name``` to add ```package_name``` to your ```dev``` dependencies. You can have arbitrary group names.
-
- For more information read the [Poetry Documentation](https://python-poetry.org/docs/basic-usage/#initialising-a-pre-existing-project).
+---
-- **Code quality checks**: \
- We use black, flake8, isort to maintain a common style and check the code. Please check your code install the pre-commit hook:
+Run ```poetry add package_name``` to add the library/package with the name ```package_name``` as dependencie to your project. Use ```poetry add --group dev package_name``` to add ```package_name``` to your ```dev``` dependencies. You can have arbitrary group names.
+
+For more information read the [Poetry Documentation](https://python-poetry.org/docs/basic-usage/#initialising-a-pre-existing-project).
- ``` bash
- pre-commit install
- ```
+### Code quality checks
- You can also check your changes manually:
+We use black, flake8, isort to maintain a common style and check the code. Please check your code install the pre-commit hook:
- ``` bash
- pre-commit run --all-files
- ```
+ ``` bash
+ pre-commit install
+ ```
-- **Testing with pytest**:
+ You can also check your changes manually:
- ```bash
- pytest
+ ``` bash
+ pre-commit run --all-files
```
- You can generate a coverage report by running the following code block in your terminal. Please be aware that it might take a long time, think about lowering the number of steps in the sampling.
+Testing with pytest
- ```bash
- coverage run -m pytest -v
- coverage report
- coverage html
- ```
+```bash
+pytest
+```
-<!-- TODO:
-- **Profiling with scalene**:
+You can generate a coverage report by running the following code block in your terminal. Please be aware that it might take a long time, think about lowering the number of steps in the sampling.
- You can profile epi with scalene (or gprofile) using the commands
+```bash
+coverage run -m pytest -v
+coverage report
+coverage html
+```
+
+### Profiling with scalene
- -->
+You can profile epi with scalene (or gprofile) using the commands:
+
+```bash
+python3 -m pip install -U scalene
+scalene tests/profiling.py
+```
+
+This will create a `profile.html` file, which you can open using your browser. Do not rely on the OPENAI optimization proposals. They are often plain wrong in scalene.
<!-- TODO: Add a docker development environment -->
<!-- - **Working with docker**:
@@ -98,65 +122,86 @@ Here are the most important infos on how to maintain this repository.
sudo service docker stop
``` -->
-- **Documentation with Sphinx**:
+### Documentation with Sphinx
- ``` bash
- cd docs
- sphinx-apidoc -f -o source/ ../
- make html
- ```
+``` bash
+cd docs
+sphinx-apidoc -e -f -o source/ ../
+make html
+```
- All extensions of sphinx which are used to create this documentation and further settings are stored in the file `docs/source/conf.py`.
- If you add extensions to `conf.py` which are not part of sphinx, add them to the `docs/source/requirement.txt` file to allow github action `mmaraskar/sphinx-action@master` to still build the documentation.
+All extensions of sphinx which are used to create this documentation and further settings are stored in the file `docs/source/conf.py`.
+If you add extensions to `conf.py` which are not part of sphinx, add them to the `docs/source/requirement.txt` file to allow github action `mmaraskar/sphinx-action@master` to still build the documentation.
-- **Hosting with GitHub Pages**: \
- To publish the documentation on github pages you probably have to change some settings in the [GitHub Repository](https://github.com/Systems-Theory-in-Systems-Biology/EPI)
+A [cheatsheet](https://docs.typo3.org/m/typo3/docs-how-to-document/main/en-us/WritingReST/CheatSheet.html) for reStructuredText with Sphinx.
- ``` text
- Settings -> Code and automation -> Pages -> Build and Deployment:
- - Source: Deploy from a branch
- - Branch: gh-pages && /(root)
- ```
+### Hosting with GitHub Pages
-- **Test Deployment with TestPyPi**: \
- You have to setup testpypi once:
+To publish the documentation on github pages you probably have to change some settings in the [GitHub Repository](https://github.com/Systems-Theory-in-Systems-Biology/EPI)
- ```bash
- poetry config repositories.testpypi https://test.pypi.org/legacy/
- poetry config http-basic.testpypi __token__ pypi-your-api-token-here
- ```
+``` text
+Settings -> Code and automation -> Pages -> Build and Deployment:
+- Source: Deploy from a branch
+- Branch: gh-pages && /(root)
+```
- Build and deploy:
+### Changelog
- ```bash
- poetry build
- poetry publish -r testpypi
- ```
+We use the [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) format for the changelog. It should be updated with every pull request.
- Test this with
+### Versioning
- ```bash
- python3 -m pip install --index-url https://test.pypi.org/simple/ --no-deps epi
- ```
+We use [Semantic Versioning](https://semver.org/). A version number is composed of three parts: major.minor.patch
-- **Deployment with PyPi**: \
- You have to setup pypi once:
+1. The major version should be incremented when you make incompatible changes.
+2. The minor version should be incremented when you add new functionality in a backwards-compatible manner.
+3. The patch version should be incremented when you make backwards-compatible bug fixes.
- ```bash
- poetry config pypi-token.pypi pypi-your-token-here
- ```
+Every time a new version is tagged, a GitHub Action workflow is trigger which builds and uploads the version to pypi.
- Build and deploy:
+Please update the version number in the `pyproject.toml` file before tagging the version.
- ```bash
- poetry publish --build
- ```
+### Test Deployment with TestPyPi
- Test this with
+You have to setup testpypi once:
- ```bash
- pip install epi
- ```
+```bash
+poetry config repositories.testpypi https://test.pypi.org/legacy/
+poetry config http-basic.testpypi __token__ pypi-your-api-token-here
+```
+
+Build and deploy:
+
+```bash
+poetry build
+poetry publish -r testpypi
+```
+
+Test this with
+
+```bash
+python3 -m pip install --index-url https://test.pypi.org/simple/ --no-deps epi
+```
+
+### Deployment with PyPi
+
+You have to setup pypi once:
+
+```bash
+poetry config pypi-token.pypi pypi-your-token-here
+```
+
+Build and deploy:
+
+```bash
+poetry publish --build
+```
+
+Test this with
+
+```bash
+pip install epi
+```
## Jax with CUDA
diff --git a/README.md b/README.md
index 5cae3692..45603e52 100644
--- a/README.md
+++ b/README.md
@@ -7,37 +7,40 @@
[](https://github.com/Systems-Theory-in-Systems-Biology/EPI/actions/workflows/pages/pages-build-deployment)
[](https://github.com/Systems-Theory-in-Systems-Biology/EPI/actions/workflows/sphinx.yml)
[](https://github.com/Systems-Theory-in-Systems-Biology/EPI/actions/workflows/ci.yml)
+[](https://github.com/Systems-Theory-in-Systems-Biology/EPI/actions/workflows/ci.yml)
-The Euler Parameter Inference (EPI) is a python package for inverse parameter inference.
+[](https://flake8.pycqa.org/)
+[](https://github.com/psf/black)
+[](./LICENSE.md)
+[](https://www.python.org/)
+
+Euler Parameter Inference (EPI) is a Python package for inverse parameter inference. It provides an implementation of the EPI algorithm, which takes observed data and a model as input and returns a parameter distribution consistent with the observed data by solving the inverse problem directly. In the case of a one-to-one mapping, this is the true underlying distribution.
## Documentation
The full documentation to this software, including a detailed tutorial on how to use EPI and the api documentation, can be found under [Documentation](https://Systems-Theory-in-Systems-Biology.github.io/EPI/).
-## About
+## Features
-The EPI algorithm takes observed data and a model as input and returns a parameter distribution, which is consistent with the observed data by solving the inverse problem directly. In the case of a one-to-one mapping, this is the true underlying distribution.
+EPI supports
-We support SBML ode models and user provided models.
+- SBML ode models
+- User provided models
+- Models with automatic differentation using jax
## Installation
- ---
- **IMPORTANT**
-
- The package is not yet available on pypi.
+**IMPORTANT**: The package is not yet available on pypi.
- <!-- ```text
- pip install epi
- ``` -->
+<!-- ```text
+pip install epi
+``` -->
- ---
-
-You can build the library from the newest source code by following the [Development Quickstart Guide](./DEVELOPMENT.md#quickstart).
+You can build the library from the latest source code by following the [Development Quickstart Guide](./DEVELOPMENT.md#quickstart).
## Using the library
-Derive your model from ```Model``` class and implement the abstract functions.
+To use EPI, derive your model from the `Model` class and implement the abstract functions. Here's an example code snippet:
```python
import jax.numpy as jnp
@@ -46,18 +49,12 @@ from epi.core.model import Model
class MyModel(Model):
- paramDim = N # The dimension of a parameter point
- dataDim = M # The dimension of a data point
+ param_dim = N # The dimension of a parameter point
+ data_dim = M # The dimension of a data point
def forward(self, param):
return jnp.array(...)
- def getParamSamplingLimits(self):
- return jnp.array([[-1.,1.], [-101.1, 13.4],...]) # [[UpperBound_dim1,LowerBound_dim1],...]
-
- def getCentralParam(self):
- return jnp.array([0.5, -30.0,...])
-
def jacobian(self, param):
return jnp.array(...)
```
@@ -69,11 +66,14 @@ from epi.sampling import inference
from my_model import MyModel
-model = MyModel()
-inference(model=model, dataPath="my_data.csv", numRuns=1, numWalkers=10, numSteps=2500, numProcesses=4)
+central_param = np.array([0.5, -1.5, ...])
+param_limits = np.array([[0.0, 1.0], [-3.0, 0.0], ...])
+
+model = MyModel(central_param, param_limits)
+inference(model=model, data="my_data.csv")
```
-The file `my_data.csv` has to contain the data in csv format with `seperator=","` in the format
+The `data` argument can be a numpy-2d-array or a PathLike object that points to a CSV file. In the example shown above, the CSV file `my_data.csv` should contain the data in the following format:
```text
datapoint_dim1, datapoint_dim2, datapoint_dim3, ..., datapoint_dimN
@@ -83,11 +83,12 @@ datapoint_dim1, datapoint_dim2, datapoint_dim3, ..., datapoint_dimN
datapoint_dim1, datapoint_dim2, datapoint_dim3, ..., datapoint_dimN
```
-which corresponds to a matrix with the shape `nSamples x dataDim`.
-The parameter dataPath defaults to `Data/<ModelName>/<ModelName>Data.csv`. The other parameters `numRuns`, `numWalkers`, `numSteps`, `numProcesses` have fixed defaults. The results are written to three files:
+This corresponds to a matrix with the shape `nSamples x data_dim`. For more available options and parameters for the `inference` method, please refer to the [api documentation](https://systems-theory-in-systems-biology.github.io/EPI/epi.core.html#module-epi.core.inference). Note that the inference can be done with grid-based methods (dense grids, sparse grids) or sampling methods (mcmc).
+
+The results are stored in the following location:
-* `./Applications/<ModelName>/OverallParams.csv`
-* `./Applications/<ModelName>/OverallSimResults.csv`
-* `./Applications/<ModelName>/OverallDensityEvals.csv`
+* `./Applications/<ModelName>/.../OverallParams.csv`
+* `./Applications/<ModelName>/.../OverallSimResults.csv`
+* `./Applications/<ModelName>/.../OverallDensityEvals.csv`
-and contain the sampled parameters, the corresponding data points obtained from the model forward pass and the corresponding density evaluation.
+These files contain the sampled parameters, the corresponding data points obtained from the model forward pass, and the corresponding density evaluation.
diff --git a/TODO.md b/TODO.md
index ac415e5b..cbbe641c 100644
--- a/TODO.md
+++ b/TODO.md
@@ -1,9 +1,5 @@
# TODOs
-## Redesign
-
-- Change from data "file" flow to data flow?
-
## Pickling and jax
- [ ] Document classmethod solution for pickling function, how to use fixed params
@@ -14,31 +10,13 @@
## Documentation
- [ ] [Dependabot badge](https://github.com/dependabot/dependabot-core/issues/1912)
-- [ ] Deactivate todos in conf.py
-- [ ] Remove unfinished documentation and references to unfinished code, e.g. temperature model fixed params
- [ ] How to run jupyternotebook with poetry in vs code and in terminal
## Deployment
- [ ] Deployment to pypi
-## SBML
-
-- Complete sbml class using one of
- - [ ] [SymbolicSBML](https://gitlab.com/wurssb/Modelling/symbolicsbml)
- - [ ] [RoadRunner](https://sys-bio.github.io/roadrunner/docs-build/index.html) Could be a good option
- - [ ] [Sbmltodepy](https://github.com/AnabelSMRuggiero/sbmltoodepy) they dont want users to make issues/ pull requests / .. :(
-
## Postponed
-- [ ] Fix [Development Quickstart Guide](./DEVELOPMENT.md#quickstart) link in sphinx
- [ ] create single function ```Model.plot()``` that allows the user to visualize his results
- [ ] create single function ```Model.test()``` that allows the user to test the inversion for his model on artificial data
-- [ ] Use save, load from numpy and not savetxt, loadtxt
-- [ ] Or maybe better: use hdf5 backend for intermediate results, and inference takes data array as input and returns the three arrays which are currently saved as files
-- [ ] Allow to use the library with own data flow without writing and loading all the files. Return everything we save to files at the moment. So give more control to the user and rely less on the file system and default paths.
-
-## Outlook
-
-- [ ] Testing on other systems than linux on github: Replace apt install and then also test on windows and mac machine using github test matrix.
-- [ ] More / Systematic profiling with scalene
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 353d6bfb..5e13ed03 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,5 +1,6 @@
myst-parser>=0.18
sphinx_rtd_theme>=1.2.0
+pydata-sphinx-theme>=0.12.0
docutils>=0.18
sphinx_copybutton
# Install epi and its dependencies so sphinx can generate the docs.
diff --git a/docs/source/MarkdownLinks/changelog.rst b/docs/source/MarkdownLinks/changelog.rst
new file mode 100644
index 00000000..1bb9df33
--- /dev/null
+++ b/docs/source/MarkdownLinks/changelog.rst
@@ -0,0 +1,6 @@
+.. ------------
+.. Contributing
+.. ------------
+
+.. include:: ../../../CHANGELOG.md
+ :parser: myst_parser.sphinx_
diff --git a/docs/source/_static/custom.css b/docs/source/_static/custom.css
index bec0c950..085f9d40 100644
--- a/docs/source/_static/custom.css
+++ b/docs/source/_static/custom.css
@@ -7,7 +7,9 @@ button.copybtn {
clear: both;
}
-.toggle .header {display: inline;}
+.toggle .header {
+ display: inline;
+}
.toggle .header p:after {
content: " ▶";
@@ -17,6 +19,11 @@ button.copybtn {
content: " ▼";
}
+/* Set the background color of the sidebar */
+.bd-sidebar-primary {
+ background-color: #ececec;
+}
+
/*Toggle from: https://stackoverflow.com/questions/2454577/sphinx-restructuredtext-show-hide-code-snippets */
/* .. container:: toggle
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 6c5a20df..789e7745 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -40,18 +40,10 @@
"sphinx.ext.todo",
"sphinx.ext.autosectionlabel",
"sphinx.ext.coverage",
+ "sphinx.ext.napoleon",
"sphinx_copybutton",
]
-# TODO: Switch to a nicer docstyle like numpy or google and also maybe change the theme
-# 'sphinxcontrib.napoleon',
-# 'numpydoc',
-# ]
-# Depending on what is chosen:
-# html_theme = "pydata_sphinx_theme"
-# poetry add pydata-sphinx-theme --group=dev
-# poetry add sphinxcontrib-napoleon --group=dev
-# poetry add numpydoc --group=dev
myst_enable_extensions = [
"tasklist",
@@ -88,6 +80,7 @@
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
+# html_theme = "sphinx_book_theme"
html_logo = "../../epi.png"
@@ -97,3 +90,33 @@
html_static_path = ["_static"]
todo_include_todos = 1
+
+html_theme_options = {
+ "navigation_depth": -1,
+}
+
+if html_theme == "sphinx_book_theme":
+ # Add searchbar to sidebar in book theme
+ html_sidebars = {
+ "**": [
+ "navbar-logo.html",
+ "search-field.html",
+ "sbt-sidebar-nav.html",
+ ] # "sidebar-nav-bs"]
+ }
+ # Set book theme options
+ html_theme_options = {
+ "repository_url": "https://github.com/Systems-Theory-in-Systems-Biology/EPI",
+ "use_repository_button": True,
+ "show_navbar_depth": 1,
+ "show_toc_level": 2,
+ "use_download_button": True,
+ "home_page_in_toc": True,
+ # "use_source_button": True,
+ # "repository_branch": "gh-pages",
+ # "use_edit_page_button": True,
+ }
+
+# Configurate autodoc
+autodoc_member_order = "groupwise"
+add_module_names = False
diff --git a/docs/source/epi.core.dense_grid.rst b/docs/source/epi.core.dense_grid.rst
new file mode 100644
index 00000000..615d6055
--- /dev/null
+++ b/docs/source/epi.core.dense_grid.rst
@@ -0,0 +1,7 @@
+epi.core.dense\_grid module
+===========================
+
+.. automodule:: epi.core.dense_grid
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/epi.core.inference.rst b/docs/source/epi.core.inference.rst
new file mode 100644
index 00000000..e554e1f2
--- /dev/null
+++ b/docs/source/epi.core.inference.rst
@@ -0,0 +1,7 @@
+epi.core.inference module
+=========================
+
+.. automodule:: epi.core.inference
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/epi.core.kde.rst b/docs/source/epi.core.kde.rst
new file mode 100644
index 00000000..9bbe8e84
--- /dev/null
+++ b/docs/source/epi.core.kde.rst
@@ -0,0 +1,7 @@
+epi.core.kde module
+===================
+
+.. automodule:: epi.core.kde
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/epi.core.model.rst b/docs/source/epi.core.model.rst
new file mode 100644
index 00000000..236bdb7e
--- /dev/null
+++ b/docs/source/epi.core.model.rst
@@ -0,0 +1,7 @@
+epi.core.model module
+=====================
+
+.. automodule:: epi.core.model
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/epi.core.result_manager.rst b/docs/source/epi.core.result_manager.rst
new file mode 100644
index 00000000..a2a82997
--- /dev/null
+++ b/docs/source/epi.core.result_manager.rst
@@ -0,0 +1,7 @@
+epi.core.result\_manager module
+===============================
+
+.. automodule:: epi.core.result_manager
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/epi.core.rst b/docs/source/epi.core.rst
index 65ab75f5..107b01bc 100644
--- a/docs/source/epi.core.rst
+++ b/docs/source/epi.core.rst
@@ -4,45 +4,17 @@ epi.core package
Submodules
----------
-epi.core.functions module
--------------------------
-
-.. automodule:: epi.core.functions
- :members:
- :undoc-members:
- :show-inheritance:
-
-epi.core.kde module
--------------------
-
-.. automodule:: epi.core.kde
- :members:
- :undoc-members:
- :show-inheritance:
-
-epi.core.model module
----------------------
-
-.. automodule:: epi.core.model
- :members:
- :undoc-members:
- :show-inheritance:
-
-epi.core.sampling module
-------------------------
-
-.. automodule:: epi.core.sampling
- :members:
- :undoc-members:
- :show-inheritance:
-
-epi.core.sparsegrid module
---------------------------
-
-.. automodule:: epi.core.sparsegrid
- :members:
- :undoc-members:
- :show-inheritance:
+.. toctree::
+ :maxdepth: 4
+
+ Dense Grid<epi.core.dense_grid>
+ Inference<epi.core.inference>
+ KDE<epi.core.kde>
+ Model<epi.core.model>
+ Result Manager<epi.core.result_manager>
+ Sampling<epi.core.sampling>
+ Sparse Grid<epi.core.sparsegrid>
+ Transformations<epi.core.transformations>
Module contents
---------------
diff --git a/docs/source/epi.core.sampling.rst b/docs/source/epi.core.sampling.rst
new file mode 100644
index 00000000..37793725
--- /dev/null
+++ b/docs/source/epi.core.sampling.rst
@@ -0,0 +1,7 @@
+epi.core.sampling module
+========================
+
+.. automodule:: epi.core.sampling
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/epi.core.sparsegrid.rst b/docs/source/epi.core.sparsegrid.rst
new file mode 100644
index 00000000..b5897c34
--- /dev/null
+++ b/docs/source/epi.core.sparsegrid.rst
@@ -0,0 +1,7 @@
+epi.core.sparsegrid module
+==========================
+
+.. automodule:: epi.core.sparsegrid
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/epi.core.transformations.rst b/docs/source/epi.core.transformations.rst
new file mode 100644
index 00000000..8b17e8de
--- /dev/null
+++ b/docs/source/epi.core.transformations.rst
@@ -0,0 +1,7 @@
+epi.core.transformations module
+===============================
+
+.. automodule:: epi.core.transformations
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/epi.jax_extension.rst b/docs/source/epi.jax_extension.rst
new file mode 100644
index 00000000..80a1ac94
--- /dev/null
+++ b/docs/source/epi.jax_extension.rst
@@ -0,0 +1,7 @@
+epi.jax\_extension module
+=========================
+
+.. automodule:: epi.jax_extension
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/epi.plotting.marginals.rst b/docs/source/epi.plotting.marginals.rst
new file mode 100644
index 00000000..8b2ee3fd
--- /dev/null
+++ b/docs/source/epi.plotting.marginals.rst
@@ -0,0 +1,7 @@
+epi.plotting.marginals module
+=============================
+
+.. automodule:: epi.plotting.marginals
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/epi.plotting.plots.rst b/docs/source/epi.plotting.plots.rst
new file mode 100644
index 00000000..01bf52e3
--- /dev/null
+++ b/docs/source/epi.plotting.plots.rst
@@ -0,0 +1,7 @@
+epi.plotting.plots module
+=========================
+
+.. automodule:: epi.plotting.plots
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/epi.plotting.plotter.rst b/docs/source/epi.plotting.plotter.rst
new file mode 100644
index 00000000..c737e2c7
--- /dev/null
+++ b/docs/source/epi.plotting.plotter.rst
@@ -0,0 +1,7 @@
+epi.plotting.plotter module
+===========================
+
+.. automodule:: epi.plotting.plotter
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/epi.plotting.rst b/docs/source/epi.plotting.rst
index fda47a9d..19d1ba58 100644
--- a/docs/source/epi.plotting.rst
+++ b/docs/source/epi.plotting.rst
@@ -4,29 +4,12 @@ epi.plotting package
Submodules
----------
-epi.plotting.marginals module
------------------------------
+.. toctree::
+ :maxdepth: 4
-.. automodule:: epi.plotting.marginals
- :members:
- :undoc-members:
- :show-inheritance:
-
-epi.plotting.plots module
--------------------------
-
-.. automodule:: epi.plotting.plots
- :members:
- :undoc-members:
- :show-inheritance:
-
-epi.plotting.plotter module
----------------------------
-
-.. automodule:: epi.plotting.plotter
- :members:
- :undoc-members:
- :show-inheritance:
+ Marginals<epi.plotting.marginals>
+ Plots<epi.plotting.plots>
+ Plotting<epi.plotting.plotter>
Module contents
---------------
diff --git a/docs/source/epi.rst b/docs/source/epi.rst
index bc81d851..a665870e 100644
--- a/docs/source/epi.rst
+++ b/docs/source/epi.rst
@@ -7,19 +7,16 @@ Subpackages
.. toctree::
:maxdepth: 4
- epi.core
- epi.plotting
+ Core<epi.core>
+ Plotting<epi.plotting>
Submodules
----------
-epi.jax\_extension module
--------------------------
+.. toctree::
+ :maxdepth: 4
-.. automodule:: epi.jax_extension
- :members:
- :undoc-members:
- :show-inheritance:
+ Jax Extension<epi.jax_extension>
Module contents
---------------
diff --git a/docs/source/examples.rst b/docs/source/examples.rst
index 4f9d3bf9..1f45915b 100644
--- a/docs/source/examples.rst
+++ b/docs/source/examples.rst
@@ -15,7 +15,7 @@ a sbml model; and a problem defined through external C++ code.
External C++ Model <examples/cpp_example>
.. * :ref:`1D-Temperature model from the tutorial<Temperature Model>`
-.. * :ref:`High-Dimensional Stock Data<StockData Model>`
+.. * :ref:`High-Dimensional Stock Data<stock_data Model>`
.. * :ref:`Corona ODE Model<Corona ODE Model>`
.. * :ref:`SBML Model<SBML Model>`
.. * :ref:`External C++ Model<C++ Model>`
diff --git a/docs/source/examples/corona_example.rst b/docs/source/examples/corona_example.rst
index ad63071a..ceb2f5a5 100644
--- a/docs/source/examples/corona_example.rst
+++ b/docs/source/examples/corona_example.rst
@@ -1,24 +1,15 @@
Corona ODE Model
----------------
-The corona ode model is contained in :file:`epi/examples/corona/corona.py`.
-
-.. TODO::
-
- The model implements the function :math:`y_i(q_i)=???`.
+A corona ode model is contained in :file:`epi/examples/corona/corona.py`.
Specialities
____________
* ODE solver: To solve the ODE problem the jax based ode solver library :code:`diffrax` is used: https://github.com/patrick-kidger/diffrax.
* Automatic Differentiation: The derivatives are calculated automatically with jax by deriving from the class :py:class:`~epi.core.model.JaxModel`,
- which automatically calculates the :py:meth:`epi.core.model.jacobian`.
+ which automatically calculates sets :py:meth:`~epi.core.model.Model.jacobian`.
* JIT compilation: Inheriting from :py:class:`~epi.core.model.JaxModel` also enables jit compilation / optimization for the forward and jacobian method.
This usually results in a significant execution speedup. It also allows to run your model on the gpu.
-* vectorization of model calls using :code:`jax.vmap`:
-
- .. code-block:: python
-
- artificialData = vmap(self.forward, in_axes=0)(trueParamSample)
.. literalinclude:: ../../../epi/examples/corona/corona.py
:language: python
diff --git a/docs/source/examples/cpp_example.rst b/docs/source/examples/cpp_example.rst
index 54c209e5..b8c61f31 100644
--- a/docs/source/examples/cpp_example.rst
+++ b/docs/source/examples/cpp_example.rst
@@ -9,7 +9,7 @@ Specialities
____________
* Calling C++ Code: Calls external c++ code using pybind11
-* Performance Comparison: The file :file:`python_reference_plants.py` includes
+* Performance Comparison: The file :file:`epi/examples/cpp/python_reference_plants.py` includes
python models implementing the same mapping. You can compare the performance of the different approaches.
Preparation
@@ -25,12 +25,14 @@ ___________
C++ Model Definition
____________________
-.. literalinclude:: ../../../epi/examples/cpp/cpp_plant.py
+.. literalinclude:: ../../../epi/examples/cpp/cpp_model.hpp
:language: c++
-.. TODO::
+Wrapping the C++ Code
+_____________________
- Why is pygments not parsing the c++ code?
+.. literalinclude:: ../../../epi/examples/cpp/wrapper.cpp
+ :language: c++
The example code is inconsistent in the following way:
It uses a normal array for the forward method,
@@ -39,15 +41,11 @@ for the jacobian method. This allows to show us how to write wrapper code
for normal arrays as well as for eigen objects. On the python side exclusively
numpy 1d/2d arrays will be used.
-.. TODO::
-
- What about vectorization? Jax will likely not help with speeding up.
-
.. note::
- For more information on how to use pybind11 see:
- PyBind11 Documentation: https://pybind11.readthedocs.io/en/stable/
- PyBind11 Eigen Notes: https://pybind11.readthedocs.io/en/stable/advanced/cast/eigen.html
+ The C++ code is wrapped using pybind11. For more information on how to use pybind11 see:
+ * PyBind11 Documentation: https://pybind11.readthedocs.io/en/stable/
+ * PyBind11 Eigen Notes: https://pybind11.readthedocs.io/en/stable/advanced/cast/eigen.html
Compilation
@@ -66,4 +64,4 @@ _________________
.. literalinclude:: ../../../epi/examples/cpp/cpp_plant.py
:language: python
-You can use the example model as template for your own C++ Model.
+You can use the cpp example model as template for your own C++ Model.
diff --git a/docs/source/examples/sbml_example.rst b/docs/source/examples/sbml_example.rst
index 5a88a53f..1628878d 100644
--- a/docs/source/examples/sbml_example.rst
+++ b/docs/source/examples/sbml_example.rst
@@ -1,14 +1,33 @@
-SBML Model
+SBML Models
----------
-The sbml model loads the biological model from an external file using the sbml standard and generates the forward and jacobian method automatically.
+The :py:class:`~epi.core.model.SBMLModel` loads a biological model from an external file written in the sbml standard format.
+It generates the forward and jacobian method automatically and derives the parameter and data dimension from the sbml model.
-.. TODO::
+The two example models are included in :file:`epi/examples/sbml/`.
- * The sbml model is not implemented yet. We are working on it.
Specialities
____________
-* You can visualize sbml files with https://sbml4humans.de/.
+
+* Super simple setup
+* No need to write any model code
+
+Here's a code snippet to load your own sbml model and to do the parameter inference:
+
+.. code-block:: python
+
+ from epi.core.model import SBMLModel
+ from epi.core.inference import inference
+
+ model = SBMLModel('model.xml', central_param=[1.0, 1.0], param_limits=[[0.0, 2.0], [0.0, 2.0]], param_names=['k1', 'k2'])
+ model.inference(model, 'data.csv')
+
+The attribute :py:attr:`~epi.core.model.SBMLModel.param_names` contains the names of the parameters in the sbml model, for which the inference should be performed.
+Per default it contains all parameters from the sbml model file.
+
+.. note::
+ For the SBML Standard see https://sbml.org/.
+ You can visualize sbml files with https://sbml4humans.de/.
.. .. literalinclude:: ../../../epi/examples/sbml/sbml_model.py
.. :language: python
diff --git a/docs/source/examples/stock_example.rst b/docs/source/examples/stock_example.rst
index 1cbee54f..40ae329a 100644
--- a/docs/source/examples/stock_example.rst
+++ b/docs/source/examples/stock_example.rst
@@ -1,22 +1,16 @@
-StockData Model
----------------
-The high-dimensional stock data model is contained in :file:`epi/examples/stock/stock.py`.
-
-.. TODO::
-
- The model implements the function :math:`y_i(q_i)`
+Stock Data Model
+----------------
+A high-dimensional stock data model is contained in :file:`epi/examples/stock/stock.py`.
Specialities
____________
-* External Data Source: The model shows how to use an external data source in the workflow by overwriting the method :py:meth:`epi.core.model.Model.dataLoader`.
-* High-Dimensional: The model has a high number of dimensions: DataDim = 19, ParamDim = 6
- * Large Number of Walkers in MCMC Sampling
- * Visualization: The visualization can be done for each dimension separately, for two selected dimensions or using spider web plots.
-
-.. TODO::
-
- Visualization
+* External Data Source: The model loads stock data from the web.
+* High-Dimensional: The model has a high number of dimensions: data_dim = 19, param_dim = 6. The samples emcee strongly recommended to use at least 12 walkers for this model.
+* Automatic Differentiation: The derivatives are calculated automatically with jax by deriving from the class :py:class:`~epi.core.model.JaxModel`,
+ which automatically calculates sets :py:meth:`~epi.core.model.Model.jacobian`.
+* JIT compilation: Inheriting from :py:class:`~epi.core.model.JaxModel` also enables jit compilation / optimization for the forward and jacobian method.
+ This usually results in a significant execution speedup. It also allows to run your model on the gpu.
.. literalinclude:: ../../../epi/examples/stock/stock.py
:language: python
diff --git a/docs/source/examples/temperature_example.rst b/docs/source/examples/temperature_example.rst
index a6c51217..b5dd0dff 100644
--- a/docs/source/examples/temperature_example.rst
+++ b/docs/source/examples/temperature_example.rst
@@ -1,21 +1,32 @@
Temperature Model
-----------------
The temperature model is contained in :file:`epi/examples/temperature/temperature.py`.
-The model :math:`y_i(q_i)=60 \cos(q_i)-30=s(q_i)` describes the temperature for a place on the earth :math:`y_i` by using the latitude coordinates :math:`q_i`.
-The jacobian :math:`{\frac{dy}{dq}]_i(q_i)=-30 \sin(q_i)` can be calculated analytically.
+The model :math:`y(q)=60 \cos(q)-30=s(q)` describes the temperature for a place on the earth :math:`y` by using the latitude coordinates :math:`q`.
+The jacobian :math:`{\frac{dy}{dq}}(q_i)=-60 \sin(q_i)` can be calculated analytically.
+
+.. literalinclude:: ../../../epi/examples/temperature/temperature.py
+ :language: python
+ :pyobject: Temperature
Specialities
____________
-* Additional fixed parameters: The model includes fixed parameters :code:`self.lowT=30.0` and :code:`self.highT=30.0`.
+* Additional fixed parameters: The model includes fixed parameters :code:`self.low_T=30.0` and :code:`self.high_T=30.0`.
These fixed parameters are passed to the forward function separately. You can create models with different parameters by
- creating several model objects. However you should think about overwriting the method :py:meth:`epi.core.model.Model.getModelName()`
- to include the fixed parameters of the model object. Else your results for different fixed parameter sets will be mixed.
-
-.. TODO::
+ creating several model objects.
- Fix _forward, call, ... and maybe adapt this documentation part then
+ The best way to seperate the outputs for the parametrized models is to pass a string based on the fixed_params to the attribute :py:attr:`run_name` of the :py:func:`~epi.core.inference.inference` function.
.. literalinclude:: ../../../epi/examples/temperature/temperature.py
:language: python
- :pyobject: Temperature
+ :pyobject: TemperatureWithFixedParams
+
+.. literalinclude:: ../../../tests/test_fixed_params.py
+ :language: python
+ :pyobject: test_fixed_params
+ :lines: 9-
+
+.. note::
+
+ The functions :py:meth:`~epi.examples.temperature.temperature.TemperatureWithFixedParams.calc_forward` is not strictly necessary.
+ However it can help to make it work with jax.
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 45bb31fa..046e9712 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -8,11 +8,11 @@ Welcome to EPI's documentation!
.. role:: python(code)
:language: python
:class: highlight
-
+
.. include:: welcome.rst
.. toctree::
- :maxdepth: 1
+ :maxdepth: 2
:caption: Contents:
API DOC<modules>
@@ -22,6 +22,7 @@ Welcome to EPI's documentation!
MarkdownLinks/contributing
MarkdownLinks/license
MarkdownLinks/citation
+ MarkdownLinks/changelog
..
Indices and tables
diff --git a/docs/source/tutorial_material/tutorial.ipynb b/docs/source/tutorial_material/tutorial.ipynb
index f849731b..a320d5e9 100644
--- a/docs/source/tutorial_material/tutorial.ipynb
+++ b/docs/source/tutorial_material/tutorial.ipynb
@@ -92,21 +92,9 @@
},
{
"cell_type": "code",
- "execution_count": 1,
+ "execution_count": null,
"metadata": {},
- "outputs": [
- {
- "ename": "ModuleNotFoundError",
- "evalue": "No module named 'epi'",
- "output_type": "error",
- "traceback": [
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
- "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)",
- "\u001b[0;32m/tmp/ipykernel_20770/3791781029.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mjax\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumpy\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mjnp\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mnumpy\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mepi\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mModel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
- "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'epi'"
- ]
- }
- ],
+ "outputs": [],
"source": [
"import importlib\n",
"import jax.numpy as jnp\n",
@@ -124,7 +112,7 @@
"- `jacobian`\n",
"\n",
"In addition it must implement the methods\n",
- "- `getCentralParam`\n",
+ "- `getcentral_param`\n",
"- `getParamSamplingLimits`\n",
"This provides the sampling algorithm with sensible starting values and boundary values.\n",
"\n",
@@ -138,27 +126,21 @@
"outputs": [],
"source": [
"class Temperature(Model):\n",
- " def __init__(self, delete: bool = False, create: bool = True) -> None:\n",
- " super().__init__(delete, create)\n",
"\n",
- " self.dataPath = importlib.resources.path(\n",
- " \"epi.examples.temperature\", \"TemperatureData.csv\"\n",
- " )\n",
+ " param_dim = 1\n",
+ " data_dim = 1\n",
+ "\n",
+ " defaultcentral_param = np.array([np.pi / 4.0])\n",
+ " defaultParamSamplingLimits = np.array([[0, np.pi / 2]])\n",
"\n",
" def forward(self, param):\n",
- " lowT = -30.0\n",
- " highT = 30.0\n",
- " res = jnp.array([lowT + (highT - lowT) * jnp.cos(jnp.abs(param[0]))])\n",
+ " low_T = -30.0\n",
+ " high_T = 30.0\n",
+ " res = jnp.array([low_T + (high_T - low_T) * jnp.cos(jnp.abs(param[0]))])\n",
" return res\n",
"\n",
" def jacobian(self, param):\n",
- " return jnp.array([60.0 * jnp.sin(jnp.abs(param[0]))])\n",
- "\n",
- " def getCentralParam(self):\n",
- " return np.array([np.pi / 4.0])\n",
- "\n",
- " def getParamSamplingLimits(self):\n",
- " return np.array([[0, np.pi / 2]])"
+ " return jnp.array([60.0 * jnp.sin(jnp.abs(param[0]))])"
]
},
{
@@ -177,10 +159,10 @@
"metadata": {},
"outputs": [],
"source": [
- "from epi.core.sampling import inference\n",
+ "from epi.core.inference import inference\n",
"\n",
"model = Temperature()\n",
- "inference(model, dataPath = \"TemperatureData.csv\")"
+ "inference(model, data = \"TemperatureData.csv\")"
]
},
{
@@ -188,7 +170,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Depending on the complexity of your model the sampling can take a long time. Due to this reason, not only the final results but also intermediate sampling results are saved. You can find them in the folder `Applications/Temperature/`. The final results are stored in the file `Applications/Temperature/OverallSimResults.csv`."
+ "Depending on the complexity of your model the sampling can take a long time. Due to this reason, not only the final results but also intermediate sampling results are saved. You can find them in the folder `Applications/Temperature/`."
]
}
],
diff --git a/docs/source/tutorial_material/tutorial.rst b/docs/source/tutorial_material/tutorial.rst
index 7c9aba21..d460607e 100644
--- a/docs/source/tutorial_material/tutorial.rst
+++ b/docs/source/tutorial_material/tutorial.rst
@@ -90,7 +90,7 @@ Of course, you also need the imports:
from epi.core.model import Model
A model inhereting from :py:class:`~epi.core.model.Model` must implement the methods :py:meth:`~epi.core.model.Model.forward` and :py:meth:`~epi.core.model.Model.jacobian`.
-In addition it must provide the methods :py:meth:`~epi.core.model.Model.getCentralParam` and :py:meth:`~epi.core.model.Model.getParamSamplingLimits` to provide the sampling algorithm with sensible starting values and boundary values.
+In addition it must provide the methods :py:meth:`~epi.core.model.Model.getcentral_param` and :py:meth:`~epi.core.model.Model.getParamSamplingLimits` to provide the sampling algorithm with sensible starting values and boundary values.
The jacobian is derived analytically here and implemented explicitly.
.. important::
@@ -123,14 +123,15 @@ Now we can now use EPI to infer the parameter distribution from the data.
.. code-block:: python
- from epic.sampling import inference
+ from epi.core.inference import inference
model = Temperature()
- inference(model, dataPath = "TemperatureData.csv")
+ inference(model, data = "TemperatureData.csv")
Depending on the complexity of your model the sampling can take a long time.
Due to this reason, not only the final results but also intermediate sampling results are saved.
-You can find them in the folder :file:`Applications/Temperature/`. The final results are stored in the file :file:`Applications/Temperature/OverallSimResults.csv`.
+You can find them in the folder :file:`Applications/Temperature/`. The final results are stored in the file :file:`Applications/Temperature/<run_nam>/<slice_name>/OverallSimResults.csv`.
+The ``slice_name`` results from the optional parameter :py:attr:`slice` of the :py:func:`~epi.core.inference.inference` function.
.. .. code-block:: python
diff --git a/docs/source/welcome.rst b/docs/source/welcome.rst
index e9eb40cb..07b25c52 100644
--- a/docs/source/welcome.rst
+++ b/docs/source/welcome.rst
@@ -1,108 +1,116 @@
.. image:: images/epi.png
- :width: 200pt
+ :width: 100pt
-------------------------------
EPI - Euler Parameter Inference
-------------------------------
-
-The Euler Parameter Inference Codebase (EPI) is a python package for inverse parameter inference.
-The EPI algorithm takes observed data and a model as input and returns a parameter distribution, which is consistent with the observed data by solving the inverse problem directly. In the case of a one-to-one mapping, this is the true underlying distribution.
-We support SBML ode models and user provided models.
+Euler Parameter Inference (EPI) is a Python package for inverse parameter inference. It provides an implementation of the EPI algorithm, which takes observed data and a model as input and returns a parameter distribution consistent with the observed data by solving the inverse problem directly. In the case of a one-to-one mapping, this is the true underlying distribution.
.. Put the badges here?
+.. image:: https://github.com/Systems-Theory-in-Systems-Biology/EPI/actions/workflows/pages/pages-build-deployment/badge.svg
+ :target: https://github.com/Systems-Theory-in-Systems-Biology/EPI/actions/workflows/pages/pages-build-deployment
+.. image:: https://github.com/Systems-Theory-in-Systems-Biology/EPI/actions/workflows/sphinx.yml/badge.svg
+ :target: https://github.com/Systems-Theory-in-Systems-Biology/EPI/actions/workflows/sphinx.yml
+.. image:: https://github.com/Systems-Theory-in-Systems-Biology/EPI/actions/workflows/ci.yml/badge.svg
+ :target: https://github.com/Systems-Theory-in-Systems-Biology/EPI/actions/workflows/ci.yml
+.. image:: https://img.shields.io/github/actions/workflow/status/Systems-Theory-in-Systems-Biology/EPI/ci.yml?label=pytest&logo=pytest
+ :target: https://github.com/Systems-Theory-in-Systems-Biology/EPI/actions/workflows/ci.yml
+ :alt: pytest
+
+.. image:: https://img.shields.io/badge/flake8-checked-blue.svg
+ :target: https://flake8.pycqa.org/
+.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
+ :target: https://github.com/psf/black
+.. image:: https://img.shields.io/badge/License-MIT-yellow.svg
+ :target: ./LICENSE.md
+.. image:: https://img.shields.io/badge/python-3.10-purple.svg
+ :target: https://www.python.org/
+
+--------
+Features
+--------
+
+EPI supports
+
+* SBML ode models
+* User provided models
+* Models with automatic differentation using jax
------------
Installation
------------
-The package is no yet available on pypi.
+**Important**: The package is no yet available on pypi.
+
.. and can be installed with:
.. .. code-block:: bash
.. pip install epi
-You can build the library from the newest source code by following the :doc:`Development Quickstart Guide </MarkdownLinks/development>`.
+You can build the library from the latest source code by following the :doc:`Development Quickstart Guide </MarkdownLinks/development>`.
------------
How to start
------------
-| Derive your model from :py:class:`epi.core.model.Model` and implement the abstract functions :py:meth:`~epi.core.model.Model.forward` and :py:meth:`~epi.core.model.Model.jacobian`.
-| You also need to define the parameter sampling limits and the central parameter. This is done by implementing the functions :py:meth:`~epi.core.model.Model.getParamSamplingLimits` and :py:meth:`~epi.core.model.Model.getCentralParam`.
-| The last requirement is to define the data and parameter Dimension, dataDim and paramDim.
+.. To use EPI, derive your model from :py:class:`epi.core.model.Model` and implement the abstract functions :py:meth:`~epi.core.model.Model.forward` and :py:meth:`~epi.core.model.Model.jacobian`. You also need to define the data and parameter dimension, :py:attr:`~epi.core.model.Model.data_dim` and :py:attr:`~epi.core.model.Model.param_dim` of your model.
+To use EPI, derive your model from the Model class and implement the abstract functions. Here's an example code snippet:
.. code-block:: python
-
- # my_model.py
-
- import jax.numpy as jnp
- from epi.core.model import Model
+
+ # my_model.py
+ import jax.numpy as jnp
- class MyModel(Model):
+ from epi.core.model import Model
- paramDim = N # The dimension of a parameter point
- dataDim = M # The dimension of a data point
+ class MyModel(Model):
- def forward(self, param):
- return jnp.array(...)
+ param_dim = N # The dimension of a parameter point
+ data_dim = M # The dimension of a data point
- def getParamSamplingLimits(self):
- return jnp.array([[-1.,1.], [-101.1, 13.4],...]) # [[UpperBound_dim1,LowerBound_dim1],...]
+ def forward(self, param):
+ return jnp.array(...)
- def getCentralParam(self):
- return jnp.array([0.5, -30.0,...])
-
- def jacobian(self, param):
- return jnp.array(...)
+ def jacobian(self, param):
+ return jnp.array(...)
To evaluate the model and infer the parameter distribution, call:
.. code-block:: python
- from epi.core.sampling import inference
- from my_model import MyModel
-
- model = MyModel()
- inference(model=model, dataPath="my_data.csv")
+ from epi.core.inference import inference
-The file :file:`my_data.csv` has to contain the data in csv format with :code:`seperator=,` in the format
-
-.. code-block:: text
-
- # my_data.csv
+ from my_model import MyModel
- datapoint_dim1, datapoint_dim2, datapoint_dim3, ..., datapoint_dimN
- datapoint_dim1, datapoint_dim2, datapoint_dim3, ..., datapoint_dimN
- datapoint_dim1, datapoint_dim2, datapoint_dim3, ..., datapoint_dimN
- ...
- datapoint_dim1, datapoint_dim2, datapoint_dim3, ..., datapoint_dimN
+ central_param = np.array([0.5, -1.5, ...])
+ param_limits = np.array([[0.0, 1.0], [-3.0, 0.0], ...])
-which corresponds to a matrix with the shape `nSamples x dataDim`.
-The parameter dataPath defaults to `Data/<ModelName>/<ModelName>Data.csv`. The other parameters `numRuns`, `numWalkers`, `numSteps`, `numProcesses` have fixed defaults. The results are written to three files:
+ model = MyModel(central_param, param_limits)
+ inference(model=model, data="my_data.csv")
-* `./Applications/<ModelName>/OverallParams.csv`
-* `./Applications/<ModelName>/OverallSimResults.csv`
-* `./Applications/<ModelName>/OverallDensityEvals.csv`
+The parameter :py:attr:`data` can be a numpy-2d-array or a PathLike object that points to a CSV file. In the example shown above, the CSV file :file:`my_data.csv` should contain the data in the following format:
-and contain the sampled parameters, the corresponding data points obtained from the model forward pass and the corresponding density evaluation.
+.. code-block:: text
-.. note::
-
- Please read the documentation for our :doc:`Examples </examples>`.
+ datapoint_dim1, datapoint_dim2, datapoint_dim3, ..., datapoint_dimN
+ datapoint_dim1, datapoint_dim2, datapoint_dim3, ..., datapoint_dimN
+ datapoint_dim1, datapoint_dim2, datapoint_dim3, ..., datapoint_dimN
+ ...
+ datapoint_dim1, datapoint_dim2, datapoint_dim3, ..., datapoint_dimN
-.. TODO: move this ?
+This corresponds to a matrix with the shape :py:attr:`nSamples` x :py:attr:`data_dim`. For more available options and parameters for the :py:mod:`~epi.core.inference` method, please refer to the API documentation.
+Note that the inference can be done with grid-based methods (dense grids, sparse grids) or sampling methods (mcmc).
-.. You can also derive your model from
+The results are stored in the following locations
-.. * :py:class:`~epi.core.model.JaxModel`: The jacobian of your forward method is automatically calculated. Use jax.numpy instead of numpy for the forward method implementation!
-.. * :py:class:`~epi.core.model.SBMLModel`: The complete model is derived from the given sbml file. You don't need to define the Model manually.
+* :file:`./Applications/<ModelName>/.../OverallParams.csv`
+* :file:`./Applications/<ModelName>/.../OverallSimResults.csv`
+* :file:`./Applications/<ModelName>/.../OverallDensityEvals.csv`
-.. Optionally you can also inherit, and implement the abstract functions from
+These files contain the sampled parameters, the corresponding data points obtained from the model forward pass, and the corresponding density evaluation.
-.. * :py:class:`~epi.core.model.ArtificialModelInterface`: This allows you to check if the inversion algorithm is working for your model using the function :py:meth:`~epi.core.model.Model.test`.
+.. note::
-.. * :py:class:`~epi.core.model.VisualizationModelInterface`: This allows you to plot the results of the data inference using the function :py:meth:`~epi.core.model.Model.plot`.
-
-.. .. warning:: TODO: The functions plot and test may not exist yet!!!
+ Please read the documentation for our :doc:`Examples </examples>`.
diff --git a/epi/core/dense_grid.py b/epi/core/dense_grid.py
new file mode 100644
index 00000000..db0df59a
--- /dev/null
+++ b/epi/core/dense_grid.py
@@ -0,0 +1,213 @@
+from enum import Enum
+from multiprocessing import Pool
+from typing import Union
+
+import numpy as np
+from numpy.polynomial.chebyshev import chebpts1
+
+from epi.core.model import Model
+from epi.core.result_manager import ResultManager
+from epi.core.sampling import calc_kernel_width
+from epi.core.transformations import evaluate_density
+
+
+class DenseGridType(Enum):
+ """The type of grid to be used."""
+
+ EQUIDISTANT = 0 #: The equidistant grid has the same distance between two grid points in each dimension.
+ CHEBYSHEV = 1 #: The Chebyshev grid is a tensor product of Chebyshev polynomial roots. They are optimal for polynomial interpolation and quadrature.
+
+
+def generate_chebyshev_grid(
+ num_grid_points: np.ndarray, limits: np.ndarray, flatten=False
+) -> Union[np.ndarray, list[np.ndarray]]:
+ """Generate a grid with the given number of grid points for each dimension.
+
+ Args:
+ num_grid_points(np.ndarray): The number of grid points for each dimension.
+ limits(np.ndarray): The limits for each dimension.
+ flatten(bool): If True, the grid is returned as a flatten array. If False, the grid is returned as a list of arrays, one for each dimension. (Default value = False)
+
+ Returns:
+ np.ndarray: The grid containing the grid points.
+
+ """
+ ndim = num_grid_points.size
+ axes = [
+ chebpts1(num_grid_points[i]) * (limits[i][1] - limits[i][0]) / 2
+ + (limits[i][1] + limits[i][0]) / 2
+ for i in range(ndim)
+ ]
+ mesh = np.meshgrid(*axes, indexing="ij")
+ if flatten:
+ return np.array(mesh).reshape(ndim, -1).T
+ else:
+ return mesh
+
+
+def generate_regular_grid(
+ num_grid_points: np.ndarray, limits: np.ndarray, flatten=False
+) -> Union[np.ndarray, list[np.ndarray]]:
+ """Generate a grid with the given number of grid points for each dimension.
+
+ Args:
+ num_grid_points(np.ndarray): The number of grid points for each dimension.
+ limits(np.ndarray): The limits for each dimension.
+ flatten(bool): If True, the grid is returned as a flatten array. If False, the grid is returned as a list of arrays, one for each dimension. (Default value = False)
+
+ Returns:
+ np.ndarray: The grid containing the grid points.
+
+ """
+ ndim = num_grid_points.size
+ axes = [
+ np.linspace(limits[i][0], limits[i][1], num=num_grid_points[i])
+ for i in range(ndim)
+ ]
+ mesh = np.meshgrid(*axes, indexing="ij")
+ if flatten:
+ return np.array(mesh).reshape(ndim, -1).T
+ else:
+ return mesh
+
+
+def run_dense_grid_evaluation(
+ model: Model,
+ data: np.ndarray,
+ slice: np.ndarray,
+ result_manager: ResultManager,
+ num_grid_points: np.ndarray,
+ dense_grid_type: DenseGridType,
+ num_processes: int,
+ load_balancing_safety_faktor: int,
+) -> None:
+ """This function runs a dense grid evaluation for the given model and data.
+
+ Args:
+ model(Model): The model for which the evaluation should be performed.
+ data(np.ndarray): The data for which the evaluation should be performed.
+ slice(np.ndarray): The slice for which the evaluation should be performed.
+ result_manager(ResultManager): The result manager that should be used to save the results.
+ num_grid_points(np.ndarray): The number of grid points for each dimension.
+ dense_grid_type(DenseGridType): The type of grid that should be used. (Default value = DenseGridType.EQUIDISTANT)
+ num_processes(int): The number of processes that should be used for the evaluation. (Default value = NUM_PROCESSES)
+ load_balancing_safety_faktor(int): Split the grid into num_processes * load_balancing_safety_faktor chunks.
+ This will ensure that each process can be loaded with a similar amount of work if the run time difference between the evaluations
+ does not exceed the load_balancing_safety_faktor. (Default value = 4)
+
+ Raises:
+ ValueError: If the dimension of the numbers of grid points does not match the number of parameters in the slice.
+ ValueError: If the grid type is unknown.
+
+ """
+
+ if slice.shape[0] != num_grid_points.shape[0]:
+ raise ValueError(
+ f"The dimension of the numbers of grid points {num_grid_points} does not match the number of parameters in the slice {slice}"
+ )
+ limits = model.param_limits
+ data_stdevs = calc_kernel_width(data)
+
+ if dense_grid_type == DenseGridType.CHEBYSHEV:
+ grid = generate_chebyshev_grid(num_grid_points, limits, flatten=True)
+ elif dense_grid_type == DenseGridType.EQUIDISTANT:
+ grid = generate_regular_grid(num_grid_points, limits, flatten=True)
+ else:
+ raise ValueError(f"Unknown grid type: {dense_grid_type}")
+
+ # Split the grid into chunks that can be evaluated by each process
+ grid_chunks = np.array_split(
+ grid, num_processes * load_balancing_safety_faktor
+ )
+ # Calc cumsum for indexing
+ grid_chunks_cumsum = np.cumsum(
+ [0] + [grid_chunk.shape[0] for grid_chunk in grid_chunks]
+ )
+ # Define a function which evaluates the density for a given grid chunk
+ global evaluate_on_grid_chunk # Needed to make this function pickleable
+
+ def evaluate_on_grid_chunk(args):
+ grid_chunk, model, data, data_stdevs, slice = args
+ # Init the result array
+ evaluation_results = np.zeros(
+ (grid_chunk.shape[0], data.shape[1] + slice.shape[0] + 1)
+ )
+ # Evaluate the grid points
+ for i, gridPoint in enumerate(grid_chunk):
+ density, param_sim_res_density = evaluate_density(
+ gridPoint, model, data, data_stdevs, slice
+ )
+ evaluation_results[i] = param_sim_res_density
+ return evaluation_results
+
+ pool = Pool(processes=num_processes)
+ results = np.zeros((grid.shape[0], data.shape[1] + slice.shape[0] + 1))
+ for i, result in enumerate(
+ pool.imap(
+ evaluate_on_grid_chunk,
+ [
+ (grid_chunks[i], model, data, data_stdevs, slice)
+ for i in range(len(grid_chunks))
+ ],
+ )
+ ):
+ results[grid_chunks_cumsum[i] : grid_chunks_cumsum[i + 1]] = result
+
+ pool.close()
+ pool.join()
+
+ result_manager.save_overall(
+ slice,
+ results[:, 0 : data.shape[1]],
+ results[:, data.shape[1] : data.shape[1] + slice.shape[0]],
+ results[:, data.shape[1] + slice.shape[0] :],
+ )
+
+
+def inference_dense_grid(
+ model: Model,
+ data: np.ndarray,
+ result_manager: ResultManager,
+ slices: list[np.ndarray],
+ num_processes: int,
+ num_grid_points: Union[int, list[np.ndarray]] = 10,
+ dense_grid_type: DenseGridType = DenseGridType.EQUIDISTANT,
+ load_balancing_safety_faktor: int = 4,
+) -> None:
+ """This function runs a dense grid evaluation for the given model and data. The grid points are distributed evenly over the parameter space.
+
+ Args:
+ model (Model): The model describing the mapping from parameters to data.
+ data (np.ndarray): The data to be used for the inference.
+ result_manager (ResultManager): The result manager to be used for the inference.
+ slices (np.ndarray): A list of slices to be used for the inference.
+ num_processes (int): The number of processes to be used for the inference.
+ num_grid_points (Union[int, list[np.ndarray]], optional): The number of grid points to be used for each parameter. If an int is given, it is assumed to be the same for all parameters. Defaults to 10.
+ load_balancing_safety_faktor (int, optional): Split the grid into num_processes * load_balancing_safety_faktor chunks. Defaults to 4.
+
+ Raises:
+ TypeError: If the num_grid_points argument has the wrong type.
+ """
+
+ # If the number of grid points is given as an int, we construct a list of arrays with the same number of grid points for each parameter in the slice
+ if isinstance(num_grid_points, int):
+ num_grid_points = [
+ np.full(len(slice), num_grid_points) for slice in slices
+ ]
+ elif isinstance(num_grid_points, list[np.ndarray]):
+ pass
+ else:
+ raise TypeError(
+ f"The num_grid_points argument has to be either an int or a list of arrays. The passed argument was of type {type(num_grid_points)}"
+ )
+ for slice, n_points in zip(slices, num_grid_points):
+ run_dense_grid_evaluation(
+ model=model,
+ data=data,
+ slice=slice,
+ result_manager=result_manager,
+ num_grid_points=n_points,
+ dense_grid_type=dense_grid_type,
+ num_processes=num_processes,
+ load_balancing_safety_faktor=load_balancing_safety_faktor,
+ )
diff --git a/epi/core/functions.py b/epi/core/functions.py
deleted file mode 100644
index 4b3aa720..00000000
--- a/epi/core/functions.py
+++ /dev/null
@@ -1,92 +0,0 @@
-from typing import Tuple
-
-import jax.numpy as jnp
-import numpy as np
-from jax import jit
-
-from epi import logger
-from epi.core.kde import evalKDEGauss
-from epi.core.model import Model
-
-
-def evalLogTransformedDensity(
- param: np.ndarray, model: Model, data: np.ndarray, dataStdevs: np.ndarray
-) -> Tuple[np.double, np.ndarray]:
- """Given a simulation model, its derivative and corresponding data, evaluate the natural log of the parameter density that is the backtransformed data distribution.
- This function is intended to be used with the emcee sampler and can be implemented more efficiently at some points.
-
- Input: param (parameter for which the transformed density shall be evaluated)
- model
- data (data for the model: 2D array with shape (#numDataPoints, #dataDim))
- dataStdevs (array of suitable kernel standard deviations for each data dimension)
- Output: logTransformedDensity (natural log of parameter density at the point param)
- : allRes (array concatenation of parameters, simulation results and evaluated density, stored as "blob" by the emcee sampler)
- """
- limits = model.getParamSamplingLimits()
-
- # Check if the tried parameter is within the just-defined bounds and return the lowest possible log density if not.
- if np.any((param < limits[:, 0]) | (param > limits[:, 1])):
- logger.info(
- "Parameters outside of predefined range"
- ) # Slows down the sampling to much? -> Change logger level to warning or even error
- return -np.inf, np.zeros(model.paramDim + model.dataDim + 1)
-
- # If the parameter is within the valid ranges...
- else:
- # Evaluating the model and the jacobian for the specified parameter simultaneously provide a little speedup over calculating it separately in some cases.
- simRes, jac = model.valjac(param)
-
- # Evaluate the data density in the simulation result.
- densityEvaluation = evalKDEGauss(data, simRes, dataStdevs)
-
- # Calculate the simulation model's pseudo-determinant in the parameter point (also called the correction factor).
- correction = calcGramDeterminant(jac)
-
- # Multiply data density and correction factor.
- trafoDensityEvaluation = densityEvaluation * correction
-
- # Use the log of the transformed density because emcee requires this.
- logTransformedDensity = np.log(trafoDensityEvaluation)
-
- # Store the current parameter, its simulation result as well as its density in a large vector that is stored separately by emcee.
- allRes = np.concatenate(
- (param, simRes, np.array([trafoDensityEvaluation]))
- )
-
- return logTransformedDensity, allRes
-
-
-def calcGramDeterminant(jac: jnp.ndarray) -> jnp.double:
- """Evaluate the pseudo-determinant of the jacobian (that serves as a correction term) in one specific parameter point.
- Returns 0 if the correction factor is not finite.
-
- :param jac: The jacobian for which the pseudo determinant shall be calculated
- :type jac: jnp.ndarray
- """
- correction = _calcGramDeterminant(jac)
- # If the correction factor is not finite, return 0 instead to not affect the sampling.
- if not jnp.isfinite(correction):
- correction = 0.0
- logger.warning("Invalid value encountered for correction factor")
- return correction
-
-
-@jit
-def _calcGramDeterminant(jac: jnp.ndarray) -> jnp.double:
- """Jitted calculation of the pseudo-determinant of the jacobian. This function is called by calcGramDeterminant() and should not be called directly.
- It does not check if the correction factor is finite.
-
- Not much faster than a similar numpy version. However it can run on gpu and is maybe a bit faster because we can jit compile the sequence of operations.
- """
-
- jac = jnp.atleast_2d(jac)
-
- if jac.shape[0] == jac.shape[1]:
- return jnp.abs(jnp.linalg.det(jac))
- else:
- jacT = jnp.transpose(jac)
- # The pseudo-determinant is calculated as the square root of the determinant of the matrix-product of the Jacobian and its transpose.
- # For numerical reasons, one can regularize the matrix product by adding a diagonal matrix of ones before calculating the determinant.
- # correction = np.sqrt(np.linalg.det(np.matmul(jacT,jac) + np.eye(param.shape[0])))
- correction = jnp.sqrt(jnp.linalg.det(jnp.matmul(jacT, jac)))
- return correction
diff --git a/epi/core/inference.py b/epi/core/inference.py
new file mode 100644
index 00000000..f7d2fd9b
--- /dev/null
+++ b/epi/core/inference.py
@@ -0,0 +1,102 @@
+import os
+import pathlib
+from enum import Enum
+from typing import Optional, Union
+
+import jax.numpy as jnp
+import numpy as np
+
+from epi.core.dense_grid import inference_dense_grid
+from epi.core.model import Model
+from epi.core.result_manager import ResultManager
+from epi.core.sampling import inference_mcmc
+from epi.core.sparsegrid import inference_sparse_grid
+
+
+# Define an enum for the inference types: DenseGrid, MCMC
+class InferenceType(Enum):
+ """The type of inference to be used."""
+
+ DENSE_GRID = 0 #: The dense grid inference uses a dense grid to evaluate the joint distribution.
+ MCMC = 1 #: The MCMC inference uses a Markov Chain Monte Carlo sampler to sample from the joint distribution.
+ SPARSE_GRID = 2 #: The sparse grid inference uses a sparse grid to evaluate the joint distribution. It is not tested and not recommended.
+
+
+def inference(
+ model: Model,
+ data: Union[str, os.PathLike, np.ndarray],
+ inference_type: InferenceType = InferenceType.MCMC,
+ slices: Optional[list[np.ndarray]] = None,
+ num_processes: int = 4,
+ run_name: str = "default_run",
+ result_manager: ResultManager = None,
+ continue_sampling: bool = False,
+ **kwargs,
+) -> None:
+ """Starts the parameter inference for the given model. If a data path is given, it is used to load the data for the model. Else, the default data path of the model is used.
+
+ Args:
+ model(Model): The model describing the mapping from parameters to data.
+ data(Union[str, os.PathLike, np.ndarray]): The data to be used for the inference. If a string is given, it is assumed to be a path to a file containing the data.
+ inference_type(InferenceType, optional): The type of inference to be used. (Default value = InferenceType.MCMC)
+ slices(list[np.ndarray], optional): A list of slices to be used for the inference. If None, the full joint distribution is computed. (Default value = None)
+ num_processes(int, optional): The number of processes to be used for the inference. (Default value = 4)
+ run_name(str): The name of the run. (Default value = "default_run")
+ result_manager(ResultManager, optional): The result manager to be used for the inference. If None, a new result manager is created with default paths and saving methods. (Default value = None)
+ continue_sampling(bool, optional): If True, the inference will continue sampling from the last saved point. (Default value = False)
+ **kwargs: Additional keyword arguments to be passed to the inference function. The possible parameters depend on the inference type.
+
+ Returns:
+
+ """
+
+ # Load data from file if necessary
+ if isinstance(data, (str, os.PathLike, pathlib.Path)):
+ data = np.loadtxt(data, delimiter=",", ndmin=2)
+ elif not isinstance(data, (np.ndarray, jnp.ndarray)):
+ raise TypeError(
+ f"The data argument must be a path to a file or a numpy array. The argument passed was of type {type(data)}."
+ )
+
+ slices = slices or [
+ np.arange(model.param_dim)
+ ] # If no slice is given, compute full joint distribution, i.e. a slice with all parameters
+ result_manager = result_manager or ResultManager(
+ model.name, run_name
+ ) # If no result_manager is given, create one with default paths
+
+ if not continue_sampling:
+ result_manager.delete_application_folder_structure(model, slices)
+ result_manager.create_application_folder_structure(model, slices)
+
+ if inference_type == InferenceType.DENSE_GRID:
+ inference_dense_grid(
+ model=model,
+ data=data,
+ result_manager=result_manager,
+ slices=slices,
+ num_processes=num_processes,
+ **kwargs,
+ )
+ elif inference_type == InferenceType.MCMC:
+ inference_mcmc(
+ model=model,
+ data=data,
+ result_manager=result_manager,
+ slices=slices,
+ num_processes=num_processes,
+ **kwargs,
+ )
+ elif inference_type == InferenceType.SPARSE_GRID:
+ inference_sparse_grid(
+ model=model,
+ data=data,
+ result_manager=result_manager,
+ slices=slices,
+ num_processes=num_processes,
+ **kwargs,
+ )
+ else:
+ raise NotImplementedError(
+ f"The inference type {inference_type} is not implemented yet."
+ )
diff --git a/epi/core/kde.py b/epi/core/kde.py
index c30dab39..dc0bd89a 100644
--- a/epi/core/kde.py
+++ b/epi/core/kde.py
@@ -4,39 +4,41 @@
.. _KDE: https://en.wikipedia.org/wiki/Kernel_density_estimation
"""
+import typing
+
import jax.numpy as jnp
from jax import jit
from jax.scipy.stats import cauchy, norm
@jit
-def evalKDECauchy(
- data: jnp.ndarray, simRes: jnp.ndarray, scales: jnp.ndarray
-) -> jnp.double:
+def eval_kde_cauchy(
+ data: jnp.ndarray, sim_res: jnp.ndarray, scales: jnp.ndarray
+) -> typing.Union[jnp.double, jnp.ndarray]:
r"""Evaluates a Cauchy Kernel Density estimator in one or several simulation results.
Assumes that each data point is a potentially high-dimensional sample
from a joint data distribution.
This is for example given for time-series data, where each evaluation
time is one dimension of the data point.
- In the following formula x are the evaluation points (simRes) and y is the data.
+ In the following formula x are the evaluation points (sim_res) and y is the data.
.. math::
density_{i} = \frac{1}{samples} \sum_{s=1}^{samples} \prod_{d=1}^{dims} \frac{1}{(\frac{x_{i,d} - y_{s,d}}{scales_d})^2 \; \pi \; scales_d}
- :param data: data for the model: 2D array with shape (#Samples, #MeasurementDimensions)
- :type data: jnp.ndarray
- :param simRes: evaluation coordinates array of shape (#nEvals, #MeasurementDimensions) or (#MeasurementDimensions,)
- :type simRes: jnp.ndarray
- :param scales: one scale for each dimension
- :type scales: jnp.ndarray
- :return: estimated kernel density evaluated at the simulation result(s), shape: (#nEvals,) or ()
- :rtype: jnp.double
+ Args:
+ data(jnp.ndarray): data for the model: 2D array with shape (#Samples, #MeasurementDimensions)
+ sim_res(jnp.ndarray): evaluation coordinates array of shape (#nEvals, #MeasurementDimensions) or (#MeasurementDimensions,)
+ scales(jnp.ndarray): one scale for each dimension
+
+ Returns:
+ typing.Union[jnp.double, jnp.ndarray]: estimated kernel density evaluated at the simulation result(s), shape: (#nEvals,) or ()
+
"""
return (
jnp.sum(
jnp.prod(
- cauchy.pdf(simRes[..., jnp.newaxis, :], data, scales),
+ cauchy.pdf(sim_res[..., jnp.newaxis, :], data, scales),
axis=-1, # prod over #measurementDimensions
),
axis=-1, # sum over sampleDim
@@ -46,28 +48,27 @@ def evalKDECauchy(
@jit
-def evalKDEGauss(
- data: jnp.ndarray, simRes: jnp.ndarray, scales: jnp.ndarray
-) -> jnp.double:
+def eval_kde_gauss(
+ data: jnp.ndarray, sim_res: jnp.ndarray, scales: jnp.ndarray
+) -> typing.Union[jnp.double, jnp.ndarray]:
"""Evaluates a Gaussian Kernel Density estimator in one or severalsimulation result.
Assumes that each data point is a potentially high-dimensional sample from a joint data distribution.
This is for example given for time-series data, where each evaluation time is one dimension of the data point.
While it is possible to define different standard deviations for different measurement dimensions, it is so far not possible to define covariances.
- :param data: data for the model: 2D array with shape (#Samples, #MeasurementDimensions)
- :type data: jnp.ndarray
- :param simRes: evaluation coordinates array of shape (#nEvals, #MeasurementDimensions) or (#MeasurementDimensions,)
- :type simRes: jnp.ndarray
- :param scales: one scale for each dimension
- :type scales: jnp.ndarray
- :return: estimated kernel density evaluated at the simulation result(s), shape: (#nEvals,) or ()
- :rtype: jnp.double
+ Args:
+ data(jnp.ndarray): data for the model: 2D array with shape (#Samples, #MeasurementDimensions)
+ sim_res(jnp.ndarray): evaluation coordinates array of shape (#nEvals, #MeasurementDimensions) or (#MeasurementDimensions,)
+ scales(jnp.ndarray): one scale for each dimension
+
+ Returns:
+ typing.Union[jnp.double, jnp.ndarray]: estimated kernel density evaluated at the simulation result(s), shape: (#nEvals,) or ()
"""
return (
jnp.sum(
jnp.prod(
- norm.pdf(simRes[..., jnp.newaxis, :], data, scales),
+ norm.pdf(sim_res[..., jnp.newaxis, :], data, scales),
axis=-1, # prod over #measurementDimensions
),
axis=-1, # sum over sampleDim
@@ -77,17 +78,20 @@ def evalKDEGauss(
@jit
-def calcKernelWidth(data: jnp.ndarray) -> jnp.double:
+def calc_kernel_width(data: jnp.ndarray) -> jnp.ndarray:
"""Sets the width of the kernels used for density estimation of the data according to the Silverman rule
- Input: data: 2d array with shape (#Samples, #MeasurementDimensions): data for the model
+ Args:
+ data(jnp.ndarray): data for the model: 2D array with shape (#Samples, #MeasurementDimensions)
+
+ Returns:
+ jnp.ndarray: kernel width for each data dimension, shape: (#MeasurementDimensions,)
- Output: stdevs: array with shape (#MeasurementDimensions): suitable kernel standard deviations for each measurement dimension
"""
- numDataPoints, dataDim = data.shape
+ num_data_points, data_dim = data.shape
stdevs = jnp.std(data, axis=0, ddof=1)
# Silvermans rule
- return stdevs * (numDataPoints * (dataDim + 2) / 4.0) ** (
- -1.0 / (dataDim + 4)
+ return stdevs * (num_data_points * (data_dim + 2) / 4.0) ** (
+ -1.0 / (data_dim + 4)
)
diff --git a/epi/core/model.py b/epi/core/model.py
index 3e678620..7a60a177 100644
--- a/epi/core/model.py
+++ b/epi/core/model.py
@@ -1,34 +1,39 @@
import inspect
import os
-import shutil
from abc import ABC, abstractmethod
from functools import partial
-from typing import Tuple
+from typing import Optional, Tuple, Union
+import jax.numpy as jnp
import numpy as np
-import seedir
-from jax import jacrev, jit
-from seedir import FakeDir, FakeFile
+from jax import jacrev, jit, vmap
-from epi import logger
-from epi.core.kde import calcKernelWidth
+import amici
from epi.jax_extension import value_and_jacrev
class Model(ABC):
"""The base class for all models using the EPI algorithm.
- It contains three abstract methods which need to be implemented by subclasses
+ Args:
+ central_param(np.ndarray): The central parameter for the model. (Default value = None)
+ param_limits(np.ndarray): The limits for the parameters. The limits are given as a 2D array with shape (param_dim, 2). (Default value = None)
+ name(str): The name of the model. The class name is used if no name is given. (Default value = None)
"""
- paramDim = None
- dataDim = None
+ param_dim: Optional[
+ np.ndarray
+ ] = None #: The dimension of the parameter space of the model. It must be defined in the subclass.
+ data_dim: Optional[
+ np.ndarray
+ ] = None #: The dimension of the data space of the model. It must be defined in the subclass.
def __init_subclass__(cls, **kwargs):
+ """Check if the required attributes are set."""
if not inspect.isabstract(cls):
for required in (
- "paramDim",
- "dataDim",
+ "param_dim",
+ "data_dim",
):
if not getattr(cls, required):
raise AttributeError(
@@ -36,333 +41,137 @@ def __init_subclass__(cls, **kwargs):
)
return cls
- def __init__(self, delete: bool = False, create: bool = True) -> None:
- if delete:
- self.deleteApplicationFolderStructure()
- self.createApplicationFolderStructure()
- elif create:
- self.createApplicationFolderStructure()
+ def __init__(
+ self,
+ central_param: np.ndarray,
+ param_limits: np.ndarray,
+ name: Optional[str] = None,
+ ) -> None:
+ self.central_param = central_param
+ self.param_limits = param_limits
- self.setDataPath(
- "Data/" + self.getModelName() + "Data.csv"
- ) # Set default data path
+ self.name = name or self.__class__.__name__
@abstractmethod
- def getParamSamplingLimits(self) -> np.ndarray:
- """Define model-specific lower and upper limits for the sampling
- to avoid parameter regions where the evaluation of the model is instable.
+ def forward(self, param: np.ndarray) -> np.ndarray:
+ """Executed the forward pass of the model to obtain data from a parameter. You can also do equivalently :code:`model(param)`.
- :raises NotImplementedError: Implement this method allow the mcmc sampler to work stably
- :return: The limits in the format np.array([lower_dim1, upper_dim1], [lower_dim2, upper_dim2], ...)
- :rtype: np.ndarray
- """
- raise NotImplementedError
+ Args:
+ param(np.ndarray): The parameter for which the data should be generated.
- @abstractmethod
- def getCentralParam(self) -> np.ndarray:
- """Define a model-specific central parameter point, which will be used as starting point for the mcmc sampler.
+ Returns:
+ np.ndarray: The data generated from the parameter.
- :raises NotImplementedError: Implement this method to provide a good starting point for the mcmc sampler.
- :return: A single parameter point in the format np.array([p_dim1, p_dim2, ...])
- :rtype: np.ndarray
"""
raise NotImplementedError
@abstractmethod
- def forward(self, param: np.ndarray):
- """Executed the forward pass of the model to obtain data from a parameter. You can also do equivalently :code:`model(param)`.
+ def jacobian(self, param: np.ndarray) -> np.ndarray:
+ """Evaluates the jacobian of the :func:`~epi.core.model.Model.forward` method.
- :param param: The parameter(set) for which the model should be evaluated.
- :raises NotImplementedError: Implement this method to make you model callable.
- """
- raise NotImplementedError
+ Args:
+ param(np.ndarray): The parameter for which the jacobian should be evaluated.
- @abstractmethod
- def jacobian(self, param: np.ndarray):
- """Evaluates the jacobian of the :func:`~epic.core.model.Model.forward` method.
+ Returns:
+ np.ndarray: The jacobian for the variables returned by the :func:`~epi.core.model.Model.forward` method with respect to the parameters.
- :param param: The parameter(set) for which the jacobian of your model should be evaluated.
- :type param: np.ndarray
- :return: The jacobian for the variables returned by the :func:`~epic.core.model.Model.forward` method with respect to the parameters.
- :rtype: np.ndarray
"""
raise NotImplementedError
- def valjac(self, param: np.ndarray):
+ def forward_and_jacobian(
+ self, param: np.ndarray
+ ) -> Tuple[np.ndarray, np.ndarray]:
"""Evaluates the jacobian and the forward pass of the model at the same time. If the method is not overwritten in a subclass it,
- it simply calls :func:`~epic.core.model.Model.forward` and :func:`~epic.core.model.Model.jacobian`.
-
- :param param: The parameter(set) for which the model and the jacobian should be evaluated.
- :type param: np.ndarray
- """
+ it simply calls :func:`~epi.core.model.Model.forward` and :func:`~epi.core.model.Model.jacobian`.
- return self.forward(param), self.jacobian(param)
-
- def setDataPath(self, path: str) -> None:
- """Set the path to the data file which shall be used from now on.
-
- :param path: The path to the data file.
- :type path: str
- """
- self.dataPath = path
-
- def dataLoader(
- self,
- ) -> Tuple[int, np.ndarray, np.ndarray, np.ndarray]:
- """Load the data from the data file found under the models current data path and calculate several properties of the data.
-
- :return: The dimension of the data space, the data and the estimated optimal kernel width for each dimension of the data.
- """
-
- data = np.loadtxt(self.dataPath, delimiter=",", ndmin=2)
- dataStdevs = calcKernelWidth(data)
-
- return (
- self.dataDim,
- data,
- dataStdevs,
- )
-
- def loadSimResults(self, numBurnSamples: int, occurrence: int):
- """Load the files generated by the EPI algorithm through sampling
-
- :param model: Model from which the results will be loaded
- :type model: Model
- :param numBurnSamples: Ignore the first samples of each chain
- :type numBurnSamples: int
- :param occurrence: step of sampling from chains
- :type occurrence: int
- :return: _description_
- :rtype: _type_
- """
- densityEvals = np.loadtxt(
- self.getApplicationPath() + "/OverallDensityEvals.csv",
- delimiter=",",
- )[numBurnSamples::occurrence]
- simResults = np.loadtxt(
- self.getApplicationPath() + "/OverallSimResults.csv",
- delimiter=",",
- ndmin=2,
- )[numBurnSamples::occurrence, :]
- paramChain = np.loadtxt(
- self.getApplicationPath() + "/OverallParams.csv",
- delimiter=",",
- ndmin=2,
- )[numBurnSamples::occurrence, :]
- return densityEvals, simResults, paramChain
-
- def getModelName(self) -> str:
- """Returns the name of the class to which the object belongs. Overwrite it if you want to
- give your model a custom name, e. g. depending on the name of your parameters.
-
- :return: The class name of the calling object.
- :rtype: str
- """
- return self.__class__.__name__
+ Args:
+ param(np.ndarray): The parameter for which the jacobian should be evaluated.
- def getApplicationPath(self) -> str:
- """Returns the path to the simulation results folder, containing also intermediate results
-
- :return: path as string to the simulation folder
- :rtype: str
- """
- path = "Applications/" + self.getModelName()
- return path
+ Returns:
+ typing.Tuple[np.ndarray, np.ndarray]: The data generated from the parameter and the jacobian for the variables returned by the :func:`~epi.core.model.Model.forward` method with respect to the parameters.
- def createApplicationFolderStructure(self) -> None:
- """Creates the `Application` folder including subfolder where all simulation results
- are stored for this model. No files are deleted during this action.
"""
- indent = 4
- plotFolderTree = (
- (" " * indent + "- SpiderWebs/ \n" + " " * indent + "- Plots/")
- if self.isVisualizable()
- else ""
- )
- os.makedirs("Data", exist_ok=True)
- applicationFolderStructure = (
- "Applications/ \n"
- " - {modelName}/ \n"
- " - DensityEvals/ \n"
- " - Params/ \n"
- " - SimResults/ \n"
- )
- path = "."
- structure = applicationFolderStructure + plotFolderTree
-
- def create(f, root):
- fpath = f.get_path()
- joined = os.path.join(root, fpath)
- if isinstance(f, FakeDir):
- try:
- os.mkdir(joined)
- except FileExistsError:
- logger.info(f"Directory `{joined}` already exists")
- elif isinstance(f, FakeFile):
- try:
- with open(joined, "w"):
- pass
- except FileExistsError:
- logger.info(f"File `{joined}` already exists")
-
- fakeStructure = seedir.fakedir_fromstring(
- structure.format(modelName=self.getModelName())
- )
- fakeStructure.realize = lambda path_arg: fakeStructure.walk_apply(
- create, root=path_arg
- )
- fakeStructure.realize(path)
-
- def deleteApplicationFolderStructure(self):
- """Deletes the models `Applications` subfolder"""
- try:
- shutil.rmtree(self.getApplicationPath())
- except FileNotFoundError:
- logger.info(
- f"Directory `{self.getApplicationPath()}` can't be deleted, "
- "because it does not exist."
- )
+ return self.forward(param), self.jacobian(param)
- def __call__(self, param):
- return self.forward(param)
+ def is_artificial(self) -> bool:
+ """Determines whether the model provides artificial parameter and data sets.
- def isArtificial(self) -> bool:
- """Determines whether the model provides artificial data
+ Returns:
+ bool: True if the model inherits from the ArtificialModelInterface
- :return: True if the model inherits from the ArtificialModelInterface
- :rtype: bool
"""
return issubclass(self.__class__, ArtificialModelInterface)
- def isVisualizable(self) -> bool:
- """Determines whether the model provides bounds for the visualization grids
-
- :return: True if the model inherits from the VisualizationModelInterface
- :rtype: bool
- """
- return issubclass(self.__class__, VisualizationModelInterface)
-
class ArtificialModelInterface(ABC):
"""By inheriting from this interface you indicate that you are providing an artificial parameter dataset,
and the corresponding artificial data dataset, which can be used to compare the results from epi with the ground truth.
The comparison can be done using the plotEmceeResults.
- :raises NotImplementedError: Implement the generateArtificialData function to implement this interface.
"""
- NUM_ARTIFICIAL_SAMPLES = 1000
-
@abstractmethod
- def generateArtificialData(
- self, numSamples: int = NUM_ARTIFICIAL_SAMPLES
- ) -> None:
- """
- .. note::
+ def generate_artificial_params(self, num_samples: int) -> np.ndarray:
+ """This method must be overwritten an return an numpy array of num_samples parameters.
- This method returns None. You have to do the following:
+ Args:
+ num_samples(int): The number of parameters to generate.
- .. code-block:: python
-
- np.savetxt(trueParams,
- "Data/" + self.getModelName() + "Params.csv", delimiter=","
- )
- np.savetxt(trueData,
- "Data/" + self.getModelName() + "Data.csv", delimiter=","
- )
+ Returns:
+ np.ndarray: The generated parameters.
- To create the true data from the true params, you can simply call your model.
+ Raises:
+ NotImplementedError: If the method is not overwritten in a subclass.
- :raises NotImplementedError: Generating the artificial data is up to the user
"""
raise NotImplementedError
- def paramLoader(self) -> Tuple[np.ndarray, np.ndarray]:
- """Load and return all parameters for artificial set ups
-
- :return: Loaded parameters and the optimal kernel width for each parameter
- :rtype: Tuple[np.ndarray, np.ndarray]
- """
- trueParams = np.loadtxt(
- "Data/" + self.getModelName() + "Params.csv",
- delimiter=",",
- ndmin=2,
- )
- paramStdevs = calcKernelWidth(trueParams)
-
- return trueParams, paramStdevs
-
-
-class VisualizationModelInterface(ABC):
- """Provides the function for the generation of the dataGrid and paramGrid for the visualization of the distributions.
- It forces subclasses to implement the abstract methods getParamBounds: and getDataBounds:.
- """
+ def generate_artificial_data(
+ self,
+ params: Union[os.PathLike, str, np.ndarray],
+ ) -> np.ndarray:
+ """This method is called when the user wants to generate artificial data from the model.
- @abstractmethod
- def getParamBounds(self) -> np.ndarray:
- """Returns the bounds on the parameters used to visualize the parameter distribution.
+ Args:
+ params: typing.Union[os.PathLike, str, np.ndarray]: The parameters for which the data should be generated. Can be either a path to a file, a numpy array or a string.
Returns:
- np.array: An array of the form [[lowerLimit_dim1, upperLimit_dim1], [lowerLimit_dim2, upperLimit_dim2],...]
+ np.ndarray: The data generated from the parameters.
Raises:
- NotImplementedError: If the method is not implemented in the subclass
+ TypeError: If the params argument is not a path to a file, a numpy array or a string.
+
"""
- raise NotImplementedError
+ if isinstance(params, str) or isinstance(params, os.PathLike):
+ params = np.loadtxt(params, delimiter=",", ndmin=2)
+ elif isinstance(params, np.ndarray) or isinstance(params, jnp.ndarray):
+ pass
+ else:
+ raise TypeError(
+ f"The params argument has to be either a path to a file or a numpy array. The passed argument was of type {type(params)}"
+ )
- @abstractmethod
- def getDataBounds(self) -> np.ndarray:
- """Returns the bounds on the data used to visualize the data distribution.
+ # try to use jax vmap to perform the forward pass on multiple parameters at once
+ if isinstance(self, JaxModel):
+ return vmap(self.forward, in_axes=0)(params)
+ else:
+ return np.vectorize(self.forward, signature="(n)->(m)")(params)
- Returns:
- np.array: An array of the form [[lowerLimit_dim1, upperLimit_dim1], [lowerLimit_dim2, upperLimit_dim2],...]
- Raises:
- NotImplementedError: If the method is not implemented in the subclass
- """
- raise NotImplementedError
-
- def scale(interval: np.array, scale: float):
- middle = (interval[1, :] - interval[0, :]) / 2.0
- return (interval - middle) * scale + middle
+def add_autodiff(_cls):
+ """
+ Decorator to automatically create the jacobian method based on the forward method.
+ Additionally it jit compiles the forward and jacobian method with jax.
- def generateVisualizationGrid(
- self, resolution: int
- ) -> Tuple[np.ndarray, np.ndarray]:
- """This function creates a grid for the data as well as the parameters with a
- constant number of points in each dimension. It saves the grids as csv files in the `Plots/*grid.csv`
- in your Application folder.
-
- :param resolution: The number of grid points in each dimension
- :type resolution: int
- :return: The dataGrid and teh paramGrid.
- :rtype: Tuple[np.ndarray, np.ndarray]
- """
- # allocate storage for the parameter and data plotting grid
- paramGrid = np.zeros((resolution, self.paramDim))
- dataGrid = np.zeros((resolution, self.dataDim))
- for d in range(self.dataDim):
- dataGrid[:, d] = np.linspace(*self.dataBounds[d, :], resolution)
- for d in range(self.paramDim):
- paramGrid[:, d] = np.linspace(*self.paramBounds[d, :], resolution)
-
- # store both grids as csv files into the model-specific plot directory
- np.savetxt(
- "Applications/" + self.getModelName() + "/Plots/dataGrid.csv",
- dataGrid,
- delimiter=",",
- )
- np.savetxt(
- "Applications/" + self.getModelName() + "/Plots/paramGrid.csv",
- paramGrid,
- delimiter=",",
- )
- return dataGrid, paramGrid
+ Args:
+ _cls: The class to decorate.
+ Returns:
+ The decorated class with the jacobian method and the forward and jacobian method jit compiled with jax.
-def autodiff(_cls):
- _cls.initFwAndBw()
+ """
+ _cls.init_fw_and_bw()
return _cls
@@ -373,33 +182,157 @@ class JaxModel(Model):
Dont overwrite the __init__ method of JaxModel without calling the super constructor.
Else your forward method wont be jitted.
- :param Model: Abstract parent class
- :type Model: Model
"""
- def __init__(self, delete: bool = False, create: bool = True) -> None:
- super().__init__(delete, create)
+ def __init__(
+ self,
+ central_param: np.ndarray,
+ param_limits: np.ndarray,
+ name: Optional[str] = None,
+ **kwargs,
+ ) -> None:
+ """Constructor of the JaxModel class.
+
+ Args:
+ name: str: The name of the model. If None the name of the class is used.
+ """
+ super().__init__(
+ central_param=central_param,
+ param_limits=param_limits,
+ name=name,
+ **kwargs,
+ )
# TODO: Check performance implications of not setting this at the class level but for each instance.
type(self).forward = partial(JaxModel.forward_method, self)
def __init_subclass__(cls, **kwargs):
- return autodiff(super().__init_subclass__(**kwargs))
+ """Automatically create the jacobian method based on the forward method for the subclass."""
+ return add_autodiff(super().__init_subclass__(**kwargs))
@classmethod
- def initFwAndBw(cls):
- # Calculate jitted methods for the subclass(es)
- # It is an unintended sideeffect that this happens for all intermediate classes also.
- # E.g. for: class CoronaArtificial(Corona)
+ def init_fw_and_bw(cls):
+ """Calculates the jitted methods for the subclass(es).
+ It is an unintended sideeffect that this happens for all intermediate classes also.
+ E.g. for: class CoronaArtificial(Corona)
+ """
cls.fw = jit(cls.forward)
cls.bw = jit(jacrev(cls.forward))
cls.vj = jit(value_and_jacrev(cls.forward))
@staticmethod
- def forward_method(self, param):
+ def forward_method(self, param: np.ndarray) -> np.ndarray:
+ """This method is called by the jitted forward method. It is not intended to be called directly.
+
+ Args:
+ param(np.ndarray): The parameter for which the data should be generated.
+
+ Returns:
+ np.ndarray: The data generated from the parameter.
+
+ """
return type(self).fw(param)
- def jacobian(self, param):
+ def jacobian(self, param: np.ndarray) -> np.ndarray:
+ """Jacobian of the forward pass with respect to the parameters.
+
+ Args:
+ param(np.ndarray): The parameter for which the jacobian should be evaluated.
+
+ Returns:
+ np.ndarray: The jacobian for the variables returned by the :func:`~epi.core.model.Model.forward` method with respect to the parameters.
+
+ """
return type(self).bw(param)
- def valjac(self, param):
+ def forward_and_jacobian(
+ self, param: np.ndarray
+ ) -> Tuple[np.ndarray, np.ndarray]:
+ """Evaluates the jacobian and the forward pass of the model at the same time. This can be more efficient than calling the :func:`~epi.core.model.Model.forward` and :func:`~epi.core.model.Model.jacobian` methods separately.
+
+ Args:
+ param(np.ndarray): The parameter for which the jacobian should be evaluated.
+
+ Returns:
+ typing.Tuple[np.ndarray, np.ndarray]: The data and the jacobian for a given parameter.
+
+ """
return type(self).vj(param)
+
+
+class SBMLModel(Model):
+ """The SBMLModel class is a wrapper for the AMICI python interface to simulate SBML models using this package.
+
+ Args:
+ sbml_file(str): The path to the SBML model file.
+ param_names(list): A list of parameter names. If None the parameter names are extracted from the SBML model.
+ tEnd(float): The end time of the simulation. (Default value = 1.0)
+ skip_creation(bool): If True the model is not created againg based on the SBML file. Instead the model is loaded from a previously created model. (Default value = False)
+ central_param(np.ndarray): The central parameter for the model
+ param_limits(np.ndarray): The parameter limits for the model
+ """
+
+ @property
+ def param_dim(self):
+ """The number of parameters of the model."""
+ return len(self.model.getParameterIds())
+
+ @property
+ def data_dim(self):
+ """The number of observables of the model."""
+ return len(self.model.getObservableIds())
+
+ def __init__(
+ self,
+ sbml_file: str,
+ central_param: np.ndarray,
+ param_limits: np.ndarray,
+ param_names=None,
+ tEnd=1.0,
+ skip_creation: bool = False,
+ name: Optional[str] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(central_param, param_limits, name, **kwargs)
+
+ model_name = self.name
+ model_dir = "./amici"
+
+ # Generate python code
+ if not skip_creation:
+ sbml_importer = amici.SbmlImporter(sbml_file)
+ sbml_importer.sbml2amici(model_name, model_dir)
+
+ # Load the generated model
+ model_module = amici.import_model_module(model_name, model_dir)
+ model = model_module.getModel()
+ solver = model.getSolver()
+
+ model.setTimepoints([tEnd])
+ model.requireSensitivitiesForAllParameters()
+ solver.setSensitivityMethod(amici.SensitivityMethod.forward)
+ solver.setSensitivityOrder(amici.SensitivityOrder.first)
+
+ self.param_names = param_names or model.getParameterNames()
+
+ self.model = model
+ self.solver = solver
+
+ def forward(self, params):
+ for i, param in enumerate(params):
+ self.model.setParameterByName(self.param_names[i], param)
+ rdata = amici.runAmiciSimulation(self.model, self.solver)
+ return rdata.x[-1]
+
+ def jacobian(self, params):
+ for i, param in enumerate(params):
+ self.model.setParameterByName(self.param_names[i], param)
+ rdata = amici.runAmiciSimulation(self.model, self.solver)
+ return rdata.sx[-1].T
+
+ def forward_and_jacobian(
+ self, params: np.ndarray
+ ) -> Tuple[np.ndarray, np.ndarray]:
+ for i, param in enumerate(params):
+ self.model.setParameterByName(self.param_names[i], param)
+ rdata = amici.runAmiciSimulation(self.model, self.solver)
+ return rdata.x[-1], rdata.sx[-1].T
diff --git a/epi/core/result_manager.py b/epi/core/result_manager.py
new file mode 100644
index 00000000..79552aeb
--- /dev/null
+++ b/epi/core/result_manager.py
@@ -0,0 +1,304 @@
+# TODO: Import Path from pathlib?
+import os
+import shutil
+from os import path
+from typing import Optional, Tuple
+
+import numpy as np
+import seedir
+from seedir import FakeDir, FakeFile
+
+from epi import logger
+from epi.core.model import Model
+
+
+class ResultManager:
+ """The result manager is responsible for saving the results of the inference and loading them again.
+
+ Attributes:
+ model_name(str): The name of the model (e.g. "temperature"). It is used to create the folder structure.
+ run_name(str): The name of the run which shall be saved. It is used to create subfolders for different runs.
+ """
+
+ def __init__(self, model_name: str, run_name: str) -> None:
+ self.model_name = model_name
+ self.run_name = run_name
+
+ def count_emcee_sub_runs(self, slice: np.ndarray) -> int:
+ """This data organization function counts how many sub runs are saved for the specified scenario.
+
+ Args:
+ slice(np.ndarray): The slice for which the number of sub runs should be counted.
+
+ Returns:
+ num_existing_files(int): The number of completed sub runs of the emcee particle swarm sampler.
+
+ """
+ # Initialize the number of existing files to be 0
+ num_existing_files = 0
+
+ # Increase the just defined number until no corresponding file is found anymore ...
+ while path.isfile(
+ self.get_slice_path(slice)
+ + "/SimResults/"
+ + "sim_results_"
+ + str(num_existing_files)
+ + ".csv"
+ ):
+ num_existing_files += 1
+
+ return num_existing_files
+
+ def get_slice_name(self, slice: np.ndarray) -> str:
+ """This organization function returns the name of the folder for the current slice.
+
+ Args:
+ slice(np.ndarray): The slice for which the name of the folder will be returned.
+
+ Returns:
+ str: The name of the folder for the current slice.
+
+ """
+
+ return "Slice_" + "Q".join([str(i) for i in slice])
+
+ def get_slice_path(self, slice: np.ndarray) -> str:
+ """Returns the path to the folder where the results for the given slice are stored.
+
+ Args:
+ slice(np.ndarray): The slice for which the path will be returned.
+
+ Returns:
+ str: The path to the folder where the results for the given slice are stored.
+
+ """
+ sliceName = self.get_slice_name(slice)
+ return os.path.join(
+ "Applications", self.model_name, self.run_name, sliceName
+ )
+
+ def create_application_folder_structure(
+ self, model: Model, slices: Optional[list[np.ndarray]] = None
+ ) -> None:
+ """Creates the `Application` folder including subfolder where all simulation results
+ are stored for this model. No files are deleted during this action.
+
+ Args:
+ model(Model): The model for which the folder structure will be created
+ slices(list[np.ndarray]): The slices for which the folder structure will be created
+
+ """
+
+ if slices is None:
+ slices = [np.arange(model.param_dim)]
+ for slice in slices:
+ self.create_slice_folder_structure(slice)
+
+ def create_slice_folder_structure(self, slice: np.ndarray) -> None:
+ """Creates the subfolders in `Aplication` for the given slice where all simulation results
+ are stored for this model and slice. No files are deleted during this action.
+
+ Args:
+ slice(np.ndarray): The slice for which the folder structure will be created
+
+ """
+ applicationFolderStructure = (
+ "Applications/ \n"
+ " - {modelName}/ \n"
+ " - {runName}/ \n"
+ " - {sliceName}/ \n"
+ " - DensityEvals/ \n"
+ " - Params/ \n"
+ " - SimResults/ \n"
+ )
+ path = "."
+ structure = applicationFolderStructure
+
+ def create(f, root):
+ """
+
+ Args:
+ f:
+ root:
+
+ Returns:
+
+ """
+ fpath = f.get_path()
+ joined = os.path.join(root, fpath)
+ if isinstance(f, FakeDir):
+ try:
+ os.mkdir(joined)
+ except FileExistsError:
+ logger.info(f"Directory `{joined}` already exists")
+ elif isinstance(f, FakeFile):
+ try:
+ with open(joined, "w"):
+ pass
+ except FileExistsError:
+ logger.info(f"File `{joined}` already exists")
+
+ sliceName = self.get_slice_name(slice)
+ fakeStructure = seedir.fakedir_fromstring(
+ structure.format(
+ modelName=self.model_name,
+ runName=self.run_name,
+ sliceName=sliceName,
+ )
+ )
+ fakeStructure.realize = lambda path_arg: fakeStructure.walk_apply(
+ create, root=path_arg
+ )
+ fakeStructure.realize(path)
+
+ def delete_application_folder_structure(self, model, slices) -> None:
+ """Deletes the `Applications` subfolder
+
+ Args:
+ slices(list[np.ndarray]): The slices for which the folder structure will be deleted
+ model(Model): The model for which the folder structure will be deleted
+
+ """
+ for slice in slices:
+ try:
+ self.delete_slice_folder_structure(slice)
+ except FileNotFoundError:
+ logger.info(
+ f"Folder structure for slice {slice} does not exist"
+ )
+
+ def delete_slice_folder_structure(self, slice: np.ndarray) -> None:
+ """Deletes the `Applications/[slice]` subfolder
+
+ Args:
+ slice(np.ndarray): The slice for which the folder structure will be deleted
+
+ """
+ path = self.get_slice_path(slice)
+ shutil.rmtree(path)
+
+ def get_application_path(self) -> str:
+ """Returns the path to the simulation results folder, containing also intermediate results.
+
+ Returns:
+ str: The path to the simulation results folder, containing also intermediate results.
+
+ """
+ path = "Applications/" + self.model_name
+ return path
+
+ def save_run(
+ self,
+ model: Model,
+ slice: np.ndarray,
+ run,
+ sampler_results: np.ndarray,
+ final_walker_positions: np.ndarray,
+ ) -> None:
+ """Saves the results of a single run of the emcee particle swarm sampler.
+ sampler_results has the shape (num_walkers * num_steps, sampling_dim + data_dim + 1), we save them
+ as seperate files in the folders 'Params' and'SimResults' and 'DensityEvals'.
+
+ Args:
+ model(Model): The model for which the results will be saved
+ slice(np.ndarray): The slice for which the results will be saved
+ run(int): The run for which the results will be saved
+ sampler_results(np.ndarray): The results of the sampler, expects an np.array with shape (num_walkers * num_steps, sampling_dim + data_dim + 1)
+ final_walker_positions(np.ndarray): The final positions of the walkers, expects an np.array with shape (num_walkers, sampling_dim)
+
+ """
+
+ sampling_dim = final_walker_positions.shape[1]
+
+ results_path = self.get_slice_path(slice)
+
+ # Save the parameters
+ np.savetxt(
+ results_path + "/Params/params_" + str(run) + ".csv",
+ sampler_results[:, :sampling_dim],
+ delimiter=",",
+ )
+
+ # Save the density evaluations
+ np.savetxt(
+ results_path + "/DensityEvals/density_evals_" + str(run) + ".csv",
+ sampler_results[:, -1],
+ delimiter=",",
+ )
+
+ # Save the simulation results
+ np.savetxt(
+ results_path + "/SimResults/sim_results_" + str(run) + ".csv",
+ sampler_results[
+ :, sampling_dim : model.param_dim + model.data_dim
+ ],
+ delimiter=",",
+ )
+
+ # Save the final walker positions
+ np.savetxt(
+ results_path + "/final_walker_positions_" + str(run) + ".csv",
+ final_walker_positions,
+ delimiter=",",
+ )
+
+ def save_overall(
+ self, slice, overall_params, overall_sim_results, overall_density_evals
+ ):
+ """Saves the results of all runs of the emcee particle swarm sampler for the given slice.
+
+ Args:
+ slice(np.ndarray): The slice for which the results will be saved. TODO document dimensions of overall_params, overall_sim_results, overall_density_evals
+ overall_params(np.ndarray): The results of the sampler.
+ overall_sim_results(np.ndarray): The results of the sampler.
+ overall_density_evals(np.ndarray): The results of the sampler.
+
+ """
+ # Save the three just-created files.
+ np.savetxt(
+ self.get_slice_path(slice) + "/overall_density_evals.csv",
+ overall_density_evals,
+ delimiter=",",
+ )
+ np.savetxt(
+ self.get_slice_path(slice) + "/overall_sim_results.csv",
+ overall_sim_results,
+ delimiter=",",
+ )
+ np.savetxt(
+ self.get_slice_path(slice) + "/overall_params.csv",
+ overall_params,
+ delimiter=",",
+ )
+
+ def load_sim_results(
+ self, slice: np.ndarray, num_burn_samples: int, occurrence: int
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
+ """Load the files generated by the EPI algorithm through sampling
+
+ Args:
+ slice(np.ndarray): Slice for which the results will be loaded
+ num_burn_samples(int): Ignore the first samples of each chain
+ occurrence(int): step of sampling from chains
+
+ Returns:
+ typing.Tuple[np.ndarray, np.ndarray, np.ndarray]: The density evaluations, the simulation results and the parameter chain
+
+ """
+ results_path = self.get_slice_path(slice)
+
+ density_evals = np.loadtxt(
+ results_path + "/overall_density_evals.csv",
+ delimiter=",",
+ )[num_burn_samples::occurrence]
+ sim_results = np.loadtxt(
+ results_path + "/overall_sim_results.csv",
+ delimiter=",",
+ ndmin=2,
+ )[num_burn_samples::occurrence, :]
+ param_chain = np.loadtxt(
+ results_path + "/overall_params.csv",
+ delimiter=",",
+ ndmin=2,
+ )[num_burn_samples::occurrence, :]
+ return density_evals, sim_results, param_chain
diff --git a/epi/core/sampling.py b/epi/core/sampling.py
index 9eeb04e6..771646e9 100644
--- a/epi/core/sampling.py
+++ b/epi/core/sampling.py
@@ -1,281 +1,317 @@
-from multiprocessing import Pool
+"""Sampling methods for the EPI package.
-# from pathos.multiprocessing import Pool
+This module provides functions to handle the sampling in EPI. It is based on the emcee package.
+
+.. _emcee: https://emcee.readthedocs.io/en/stable/
+
+Attributes:
+ NUM_RUNS (int): Default number of runs of the emcee sampler.
+ NUM_WALKERS (int): Default number of walkers in the emcee sampler.
+ NUM_STEPS (int): Default number of steps each walker performs before storing the sub run.
+ NUM_PROCESSES (int): Default number of parallel threads.
+
+"""
+
+import typing
from os import path
import emcee
import numpy as np
+from schwimmbad import MultiPool
from epi import logger
-from epi.core.functions import evalLogTransformedDensity
+from epi.core.kde import calc_kernel_width
from epi.core.model import Model
+from epi.core.result_manager import ResultManager
+from epi.core.transformations import eval_log_transformed_density
-NUM_RUNS = 2
-NUM_WALKERS = 10
-NUM_STEPS = 2500
-NUM_PROCESSES = 4
+# TODO: This works on the blob
+# Return the samples.
+# return sampler.get_chain(discard=0, thin=1, flat=True)
+# TODO: This stores the sample as 2d array in the format walker1_step1, walker2_step1, walker3_step1, walker1_step2, walker2_step2, walker3_step2, ...
+# sampler_results = samplerBlob.reshape(
+# num_walkers * num_steps, sampling_dim + data_dim + 1
+# )
-def countEmceeSubRuns(model: Model) -> int:
- """This data organization function counts how many sub runs are saved for the specified scenario.
+def run_emcee_once(
+ model: Model,
+ data: np.ndarray,
+ data_stdevs: np.ndarray,
+ slice: np.ndarray,
+ initial_walker_positions: np.ndarray,
+ num_walkers: int,
+ num_steps: int,
+ num_processes: int,
+) -> np.ndarray:
+ """Run the emcee particle swarm sampler once.
+
+ Args:
+ model (Model): The model which will be sampled
+ data (np.ndarray): data
+ data_stdevs (np.ndarray): kernel width for the data
+ slice (np.ndarray): slice of the parameter space which will be sampled
+ initial_walker_positions (np.ndarray): initial parameter values for the walkers
+ num_walkers (int): number of particles in the particle swarm sampler
+ num_steps (int): number of samples each particle performs before storing the sub run
+ num_processes (int): number of parallel threads
+
+ Returns:
+ np.ndarray: samples from the transformed parameter density
- :param model: The model for which the files will be counted
- :return: numExistingFiles (number of completed sub runs of the emcee particle swarm sampler)
"""
- # Initialize the number of existing files to be 0
- numExistingFiles = 0
- # Increase the just defined number until no corresponding file is found anymore ...
- while path.isfile(
- model.getApplicationPath()
- + "/DensityEvals/"
- + str(numExistingFiles)
- + ".csv"
- ):
- numExistingFiles += 1
+ global work
+
+ def work(params):
+ s = eval_log_transformed_density(
+ params, model, data, data_stdevs, slice
+ )
+ return s
+
+ pool = MultiPool(processes=num_processes)
+
+ # define a custom move policy
+ movePolicy = [
+ (emcee.moves.WalkMove(), 0.1),
+ (emcee.moves.StretchMove(), 0.1),
+ (
+ emcee.moves.GaussianMove(0.00001, mode="sequential", factor=None),
+ 0.8,
+ ),
+ ]
+ # movePolicy = [(emcee.moves.GaussianMove(0.00001, mode='sequential', factor=None), 1.0)]
+ sampling_dim = slice.shape[0]
+
+ # Call the sampler for all parallel workers (possibly use arg moves = movePolicy)
+ try:
+ sampler = emcee.EnsembleSampler(
+ num_walkers,
+ sampling_dim,
+ # eval_log_transformed_density,
+ work,
+ pool=pool,
+ moves=movePolicy,
+ # args=[model, data, data_stdevs, slice],
+ )
+ # Extract the final walker position and close the pool of worker processes.
+ final_walker_positions, _, _, _ = sampler.run_mcmc(
+ initial_walker_positions, num_steps, tune=True, progress=True
+ )
+ except ValueError as e:
+ # If the message equals "Probability function returned NaN."
+ if str(e) == "Probability function returned NaN.":
+ raise ValueError(
+ "Probability function returned NaN. "
+ "You possibly have to exclude data dimensions which do not depend on the paramaters. "
+ "In addition your parameters should not be linearly dependent."
+ )
+ else:
+ raise e
+
+ if pool is not None:
+ pool.close()
+ pool.join()
+
+ # TODO: Keep as 3d array?
+ # Should have shape (num_steps, num_walkers, param_dim+data_dim+1)
+ sampler_results = sampler.get_blobs()
+ data_dim = data.shape[1]
+ sampler_results = sampler_results.reshape(
+ num_steps * num_walkers, sampling_dim + data_dim + 1
+ )
+ sampler_results = sampler_results.reshape(
+ num_walkers * num_steps, sampling_dim + data_dim + 1
+ )
+
+ logger.info(
+ f"The acceptance fractions of the emcee sampler per walker are: {np.round(sampler.acceptance_fraction, 2)}"
+ )
+ try:
+ corrTimes = sampler.get_autocorr_time()
+ logger.info(f"autocorrelation time: {corrTimes[0]}")
+ except emcee.autocorr.AutocorrError as e:
+ logger.warning(
+ "The autocorrelation time could not be calculate reliable"
+ )
- return numExistingFiles
+ return sampler_results, final_walker_positions
-def runEmceeSampling(
+def run_emcee_sampling(
model: Model,
- numRuns: int = NUM_RUNS,
- numWalkers: int = NUM_WALKERS,
- numSteps: int = NUM_STEPS,
- numProcesses: int = NUM_PROCESSES,
-) -> None:
+ data: np.ndarray,
+ slice: np.ndarray,
+ result_manager: ResultManager,
+ num_processes: int,
+ num_runs: int,
+ num_walkers: int,
+ num_steps: int,
+) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Create a representative sample from the transformed parameter density using the emcee particle swarm sampler.
- Inital values are not stored in the chain and each file contains <numSteps> blocks of size numWalkers.
-
- :param model: The model which will be sampled
- :param numRuns: (number of stored sub runs)
- :param numWalkers: (number of particles in the particle swarm sampler)
- :param numSteps: (number of samples each particle performs before storing the sub run)
- :param numProcesses: (number of parallel threads)
- :return: None, except for stored files
+ Inital values are not stored in the chain and each file contains <num_steps> blocks of size num_walkers.
+
+ Args:
+ model (Model): The model which will be sampled
+ data (np.ndarray): data
+ slice (np.ndarray): slice of the parameter space which will be sampled
+ result_manager (ResultManager): ResultManager which will store the results
+ num_processes (int): number of parallel threads.
+ num_runs (int): number of stored sub runs.
+ num_walkers (int): number of particles in the particle swarm sampler.
+ num_steps (int): number of samples each particle performs before storing the sub run.
+
+ Returns:
+ typing.Tuple[np.ndarray, np.ndarray, np.ndarray]: Array with all params, array with all data, array with all log probabilities
+
"""
- # Load data, data standard deviations and model characteristics for the specified model.
- (
- dataDim,
- data,
- dataStdevs,
- ) = model.dataLoader()
+ data_stdevs = calc_kernel_width(data)
+ sampling_dim = slice.shape[0]
+ central_param = model.central_param
# Initialize each walker at a Gaussian-drawn random, slightly different parameter close to the central parameter.
- walkerInitParams = model.getCentralParam() + 0.002 * (
- np.random.rand(numWalkers, model.paramDim) - 0.5
+ # TODO Make random variation of initial walker positions dependent on sampling limits?
+ initial_walker_positions = central_param[slice] + 0.002 * (
+ np.random.rand(num_walkers, sampling_dim) - 0.5
)
# Count and print how many runs have already been performed for this model
- numExistingFiles = countEmceeSubRuns(model)
- logger.debug(f"{numExistingFiles} existing files found")
+ num_existing_files = result_manager.count_emcee_sub_runs(slice)
+ logger.debug(f"{num_existing_files} existing files found")
# Loop over the remaining sub runs and contiune the counter where it ended.
- for run in range(numExistingFiles, numExistingFiles + numRuns):
- logger.info(f"Run {run} of {numRuns}")
+ for run in range(num_existing_files, num_existing_files + num_runs):
+ logger.info(f"Run {run} of {num_runs}")
# If there are current walker positions defined by runs before this one, use them.
- positionPath = model.getApplicationPath() + "/currentPos.csv"
- if path.isfile(positionPath):
- walkerInitParams = np.loadtxt(
- positionPath,
+ position_path = (
+ result_manager.get_slice_path(slice) + "/currentPos.csv"
+ )
+ if path.isfile(position_path):
+ initial_walker_positions = np.loadtxt(
+ position_path,
delimiter=",",
ndmin=2,
)
logger.info(
- f"Continue sampling from saved sampler position in {positionPath}"
+ f"Continue sampling from saved sampler position in {position_path}"
)
- else:
- logger.info("Start sampling from start")
-
- # Create a pool of worker processes.
- pool = Pool(processes=numProcesses)
-
- # define a custom move policy
- movePolicy = [
- (emcee.moves.WalkMove(), 0.1),
- (emcee.moves.StretchMove(), 0.1),
- (
- emcee.moves.GaussianMove(
- 0.00001, mode="sequential", factor=None
- ),
- 0.8,
- ),
- ]
- # movePolicy = [(emcee.moves.GaussianMove(0.00001, mode='sequential', factor=None), 1.0)]
-
- # Call the sampler for all parallel workers (possibly use arg moves = movePolicy)
- sampler = emcee.EnsembleSampler(
- numWalkers,
- model.paramDim,
- evalLogTransformedDensity,
- pool=pool,
- moves=movePolicy,
- args=[model, data, dataStdevs],
- )
-
- # Extract the final walker position and close the pool of worker processes.
- finalPos, _, _, _ = sampler.run_mcmc(
- walkerInitParams, numSteps, tune=True, progress=True
- )
- pool.close()
- pool.join()
-
- # Save the current walker positions as initial values for the next run.
- np.savetxt(
- positionPath,
- finalPos,
- delimiter=",",
+ # Run the sampler.
+ sampler_results, final_walker_positions = run_emcee_once(
+ model,
+ data,
+ data_stdevs,
+ slice,
+ initial_walker_positions,
+ num_walkers,
+ num_steps,
+ num_processes,
)
- # Should have shape (numSteps, numWalkers, paramDim+dataDim+1)
- samplerBlob = sampler.get_blobs()
-
- # Create a large container for all sampling results (sampled parameters, corresponding simulation results and parameter densities) and fill it using the emcee blob option.
- allRes = samplerBlob.reshape(numWalkers * numSteps, -1)
-
- # Save all sampling results in .csv files.
- np.savetxt(
- "Applications/"
- + model.getModelName()
- + "/Params/"
- + str(run)
- + ".csv",
- allRes[:, 0 : model.paramDim],
- delimiter=",",
- )
- np.savetxt(
- "Applications/"
- + model.getModelName()
- + "/SimResults/"
- + str(run)
- + ".csv",
- allRes[:, model.paramDim : model.paramDim + dataDim],
- delimiter=",",
- )
- np.savetxt(
- "Applications/"
- + model.getModelName()
- + "/DensityEvals/"
- + str(run)
- + ".csv",
- allRes[:, -1],
- delimiter=",",
+ result_manager.save_run(
+ model, slice, run, sampler_results, final_walker_positions
)
- logger.info(
- f"The acceptance fractions of the emcee sampler per walker are: {np.round(sampler.acceptance_fraction, 2)}"
- )
- try:
- corrTimes = sampler.get_autocorr_time()
- logger.info(f"autocorrelation time: {corrTimes[0]}")
- except emcee.autocorr.AutocorrError as e:
- logger.warning(
- "The autocorrelation time could not be calculate reliable"
- )
+ (
+ overall_params,
+ overall_sim_results,
+ overall_density_evals,
+ ) = concatenate_emcee_sampling_results(model, result_manager, slice)
+ result_manager.save_overall(
+ slice, overall_params, overall_sim_results, overall_density_evals
+ )
+ return overall_params, overall_sim_results, overall_density_evals
-def concatenateEmceeSamplingResults(model: Model):
+# TODO: Make this a method of the ResultManager? It uses the ResultManager to load the results and many hard coded paths.
+def concatenate_emcee_sampling_results(
+ model: Model, result_manager: ResultManager, slice: np.ndarray
+) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Concatenate many sub runs of the emcee sampler to create 3 large files for sampled parameters, corresponding simulation results and density evaluations.
These files are later used for result visualization.
- Input: model
- Output: <none except for stored files>
- """
+ Args:
+ model (Model): The model for which the results should be concatenated
+ result_manager (ResultManager): ResultManager to load the results from
+ slice (np.ndarray): slice for which the results should be concatenated
- # Count and print how many sub runs are ready to be merged.
- numExistingFiles = countEmceeSubRuns(model)
- logger.info(f"{numExistingFiles} existing files found for concatenation")
+ Returns:
+ typing.Tuple[np.ndarray, np.ndarray, np.ndarray]: Array with all params, array with all data, array with all log probabilities
- # Load one example file and use it to extract how many samples are stored per file.
- numSamplesPerFile = np.loadtxt(
- model.getApplicationPath() + "/Params/0.csv",
- delimiter=",",
- ndmin=2,
- ).shape[0]
-
- # The overall number of sampled is the number of sub runs multiplied with the number of samples per file.
- numSamples = numExistingFiles * numSamplesPerFile
+ """
- # Create containers large enough to store all sampling information.
- overallDensityEvals = np.zeros(numSamples)
- overallSimResults = np.zeros((numSamples, model.dataDim))
- overallParams = np.zeros((numSamples, model.paramDim))
+ # Count and print how many sub runs are ready to be merged.
+ num_existing_files = result_manager.count_emcee_sub_runs(slice)
+ logger.info(f"{num_existing_files} existing files found for concatenation")
densityFiles = (
- "Applications/" + model.getModelName() + "/DensityEvals/{}.csv"
+ result_manager.get_slice_path(slice)
+ + "/DensityEvals/density_evals_{}.csv"
)
- simResultsFiles = (
- "Applications/" + model.getModelName() + "/SimResults/{}.csv"
+ sim_result_files = (
+ result_manager.get_slice_path(slice) + "/SimResults/sim_results_{}.csv"
)
- paramFiles = "Applications/" + model.getModelName() + "/Params/{}.csv"
- # Loop over all sub runs, load the respective sample files and store them at their respective places in the overall containers.
- for i in range(numExistingFiles):
- overallDensityEvals[
- i * numSamplesPerFile : (i + 1) * numSamplesPerFile
- ] = np.loadtxt(
- densityFiles.format(i),
- delimiter=",",
- )
- overallSimResults[
- i * numSamplesPerFile : (i + 1) * numSamplesPerFile, :
- ] = np.loadtxt(
- simResultsFiles.format(i),
- delimiter=",",
- ndmin=2,
- )
- overallParams[
- i * numSamplesPerFile : (i + 1) * numSamplesPerFile, :
- ] = np.loadtxt(
- paramFiles.format(i),
- delimiter=",",
- ndmin=2,
- )
+ paramFiles = result_manager.get_slice_path(slice) + "/Params/params_{}.csv"
- # Save the three just-created files.
- np.savetxt(
- model.getApplicationPath() + "/OverallDensityEvals.csv",
- overallDensityEvals,
- delimiter=",",
+ overall_params = np.vstack(
+ [
+ np.loadtxt(paramFiles.format(i), delimiter=",", ndmin=2)
+ for i in range(num_existing_files)
+ ]
)
- np.savetxt(
- model.getApplicationPath() + "/OverallSimResults.csv",
- overallSimResults,
- delimiter=",",
+ overall_sim_results = np.vstack(
+ [
+ np.loadtxt(sim_result_files.format(i), delimiter=",", ndmin=2)
+ for i in range(num_existing_files)
+ ]
)
- np.savetxt(
- model.getApplicationPath() + "/OverallParams.csv",
- overallParams,
- delimiter=",",
+ overall_density_evals = np.hstack(
+ [
+ np.loadtxt(densityFiles.format(i), delimiter=",")
+ for i in range(num_existing_files)
+ ]
)
+ return overall_params, overall_sim_results, overall_density_evals
+
-def calcWalkerAcceptance(model: Model, numWalkers: int, numBurnSamples: int):
+def calc_walker_acceptance(
+ model: Model,
+ slice: np.ndarray,
+ num_walkers: int,
+ num_burn_samples: int,
+ result_manager: ResultManager,
+):
"""Calculate the acceptance ratio for each individual walker of the emcee chain.
- This is especially important to find "zombie" walkers, that are never moving.
+ This is especially important to find "zombie" walkers, that are never moving.
+
+ Args:
+ model (Model): The model for which the acceptance ratio should be calculated
+ slice (np.ndarray): slice for which the acceptance ratio should be calculated
+ num_walkers (int): number of walkers in the emcee chain
+ num_burn_samples (int): number of samples that were ignored at the beginning of each chain
+ result_manager (ResultManager): ResultManager to load the results from
- Input: model
- numBurnSamples (integer number of ignored first samples of each chain)
- numWalkers (integer number of emcee walkers) that were used for the emcee chain which is analyzed here
+ Returns:
+ np.ndarray: Array with the acceptance ratio for each walker
- Output: acceptanceRatios (np.array of size numWalkers)
"""
# load the emcee parameter chain
params = np.loadtxt(
- model.getApplicationPath() + "/OverallParams.csv",
+ result_manager.get_slice_path(slice) + "/overall_params.csv",
delimiter=",",
ndmin=2,
- )[numBurnSamples:, :]
+ )[num_burn_samples:, :]
# calculate the number of steps each walker walked
# subtract 1 because we count the steps between the parameters
- numSteps = int(params.shape[0] / numWalkers) - 1
+ num_steps = int(params.shape[0] / num_walkers) - 1
# Unflatten the parameter chain and count the number of accepted steps for each walker
- params = params.reshape(numSteps + 1, numWalkers, model.paramDim)
+ params = params.reshape(num_steps + 1, num_walkers, model.param_dim)
# Build a boolean array that is true if the parameters of the current step are the same as the parameters of the next step and sum over it
# If the parameters are the same, the step is not accepted and we add 0 to the number of accepted steps
@@ -286,43 +322,51 @@ def calcWalkerAcceptance(model: Model, numWalkers: int, numBurnSamples: int):
)
# calculate the acceptance ratio by dividing the number of accepted steps by the overall number of steps
- acceptanceRatios = numAcceptedSteps / numSteps
+ acceptanceRatios = numAcceptedSteps / num_steps
return acceptanceRatios
-def inference(
+def inference_mcmc(
model: Model,
- dataPath: str = None,
- numRuns: int = NUM_RUNS,
- numWalkers: int = NUM_WALKERS,
- numSteps: int = NUM_STEPS,
- numProcesses: int = NUM_PROCESSES,
-):
- """Starts the parameter inference for the given model. If a data path is given, it is used to load the data for the model. Else, the default data path of the model is used.
-
-
- :param model: The model describing the mapping from parameters to data.
- :type model: Model
- :param dataPath: path to the data relative to the current working directory.
- If None, the default path defined in the Model class initializer is used, defaults to None
- :type dataPath: str, optional
- :param numRuns: Number of independent runs, defaults to NUM_RUNS
- :type numRuns: int, optional
- :param numWalkers: Number of walkers for each run, influencing each other, defaults to NUM_WALKERS
- :type numWalkers: int, optional
- :param numSteps: Number of steps each walker does in each run, defaults to NUM_STEPS
- :type numSteps: int, optional
- :param numProcesses: number of processes to use, defaults to NUM_PROCESSES
- :type numProcesses: int, optional
+ data: np.ndarray,
+ result_manager: ResultManager,
+ slices: list[np.ndarray],
+ num_processes: int,
+ num_runs: int = 2,
+ num_walkers: int = 10,
+ num_steps: int = 2500,
+ calc_walker_acceptance_bool: bool = False,
+) -> None:
+ """This function runs a MCMC sampling for the given model and data.
+
+ Args:
+ model (Model): The model describing the mapping from parameters to data.
+ data (np.ndarray): The data to be used for the inference.
+ result_manager (ResultManager): The result manager to be used for the inference.
+ slices (np.ndarray): A list of slices to be used for the inference.
+ num_processes (int): The number of processes to be used for the inference.
+ num_runs (int, optional): The number of runs to be used for the inference. Defaults to 2.
+ num_walkers (int, optional): The number of walkers to be used for the inference. Defaults to 10.
+ num_steps (int, optional): The number of steps to be used for the inference. Defaults to 2500.
+ calc_walker_acceptance_bool (bool, optional): If True, the acceptance rate of the walkers is calculated and printed. Defaults to False.
+
"""
- if dataPath is not None:
- model.setDataPath(dataPath)
- else:
- logger.warning(
- f"No data path provided for this inference call. Using the data path of the model: {model.dataPath}"
+ for slice in slices:
+ run_emcee_sampling(
+ model=model,
+ data=data,
+ slice=slice,
+ result_manager=result_manager,
+ num_runs=num_runs,
+ num_walkers=num_walkers,
+ num_steps=num_steps,
+ num_processes=num_processes,
)
-
- runEmceeSampling(model, numRuns, numWalkers, numSteps, numProcesses)
- concatenateEmceeSamplingResults(model)
+ if calc_walker_acceptance_bool:
+ num_burn_in_steps = int(num_steps * 0.01)
+ acceptance = calc_walker_acceptance(
+ model, slice, num_walkers, num_burn_in_steps, result_manager
+ )
+ logger.info(f"Acceptance rate for slice {slice}: {acceptance}")
diff --git a/epi/core/sparsegrid.py b/epi/core/sparsegrid.py
index ce8e418f..2f05845f 100644
--- a/epi/core/sparsegrid.py
+++ b/epi/core/sparsegrid.py
@@ -1,35 +1,39 @@
-"""This module provides functions to handle create Sparse Grids (SGs_) and work with them.
- All pure SG functions are defined on the unit hypercube $[0,1]^d$.
+""" This module provides functions to handle create Sparse Grids (SGs_) and work with them.
+ All pure SG functions are defined on the unit hypercube $[0,1]^d$.
+ .. warning::
+
+ The inference with this class is not tested and not recommended for use!
.. _SGs: https://en.wikipedia.org/wiki/Sparse_grid
"""
+import typing
from functools import partial
from multiprocessing import Pool
import numpy as np
from epi import logger
-from epi.core.functions import evalLogTransformedDensity
+from epi.core.kde import calc_kernel_width
from epi.core.model import Model
-
-NUM_LEVELS = 5
-NUM_PROCESSES = 4
+from epi.core.result_manager import ResultManager
+from epi.core.transformations import eval_log_transformed_density
-def basis1D(
+def basis_1d(
points1D: np.ndarray, centre1D: np.double, level: int
) -> np.ndarray:
-
"""Evaluate a 1D hat function in an array of doubles. The hat is centered around centre1D
- and the hat's level defines its support. The support shrinks exponentially with growing level and a level of 0 is equivalent with full support on [0,1].
+ and the hat's level defines its support. The support shrinks exponentially with growing level and a level of 0 is equivalent with full support on [0,1].
- Input: points1D (np.1darray of 1D evaluation coordinate doubles)
- centre1D (np.double indicating the centre of the hat within the interval [0,1])
- level (int specifying the size/extend/support of the hat function)
+ Args:
+ points1D(np.ndarray): The points at which the hat function should be evaluated.
+ centre1D(np.double): The centre of the hat function.
+ level(int): The level of the hat function. The level defines the support of the hat function.
- Output: (np.1darray (size equivalent to size of points1D) of hat function evaluations)
+ Returns:
+ np.ndarray: The hat function evaluated at the given points.
"""
@@ -38,17 +42,19 @@ def basis1D(
)
-def basisnD(
+def basis_nd(
points: np.ndarray, centre: np.ndarray, levels: np.ndarray
) -> np.ndarray:
-
"""Use a tensor product to generalise the 1D basis function to arbitrarily high dimensions.
- Input: points (np.2darray of shape #Points x #Dims indicating the basis evaluation coordinates in nD)
- centre (np.1darray of shape #Dims defining the nD centre of an nD basis function)
- levels (np.1darray of type int and shape #Dims defining one basis function level per dimension)
+ Args:
+ points(np.ndarray): The points at which the basis function should be evaluated. Shape: (numPoints, numDims)
+ centre(np.ndarray): The centre of the basis function. Shape: (numDims,)
+ levels(np.ndarray): The levels of the basis function. Shape: (numDims,)
+
+ Returns:
+ np.ndarray: The basis function evaluated at the given points. Shape: (numPoints,)
- Output: basisEval (np.1darray of shape #Points returning one nD basis evaluation per specified evaluation point)
"""
# Initialize the basis evaluation of each point as 1
@@ -57,31 +63,34 @@ def basisnD(
# loop over all dimensions
for d in range(points.shape[1]):
# Multipy the current basis evaluation with the evaluation result of the current dimension
- basisEval *= basis1D(points[:, d], centre[d], levels[d])
+ basisEval *= basis_1d(points[:, d], centre[d], levels[d])
return basisEval
-def meshgrid2Matrix(meshgrid: list) -> np.ndarray:
+def meshgrid2matrix(meshgrid: list) -> np.ndarray:
"""Convert a np.meshgrid into a np.2darray of grid points.
The function is mainly used when assigning grid points to Smolnyak-Subspaces.
- Input: meshgrid (list of np.arrays returned by np.meshgrid)
+ Args:
+ meshgrid(list): A list of np.arrays returned by np.meshgrid
+
+ Returns:
+ np.ndarray: A matrix of shape #Points x #Dims defining all grid points
- Output: matrix (np.2darray of shape #Points x #Dims defining all grid points)
"""
# calculate the shape of the matrix and initialize with 0s
dim = len(meshgrid)
- nPoints = np.prod(meshgrid[0].shape)
+ n_points = np.prod(meshgrid[0].shape)
- matrix = np.zeros((nPoints, dim))
+ matrix = np.zeros((n_points, dim))
# read out the respective meshgrid entry for each matrix entry
for d in range(dim):
- linearMeshSlice = np.reshape(meshgrid[d], -1)
- for p in range(nPoints):
- matrix[p, d] = linearMeshSlice[p]
+ linear_mesh_slice = np.reshape(meshgrid[d], -1)
+ for p in range(n_points):
+ matrix[p, d] = linear_mesh_slice[p]
return matrix
@@ -90,169 +99,191 @@ class SparseGrid(object):
"""Each object of this class respresents a sparse grid.
In this implementation, a sparse grid is a list of Smolnyak-subspaces.
Each subspace is in principle a regular grid of a certain grid width but every second grid point is negelcted.
+
+ Attributes:
+ dim (int): The dimension of the sparse grid. This is the same as the dimension of the parameter space.
+ max_level_sum (int): The maximum sum of all levels of the subspaces. This is the same as the maximum level of the sparse grid.
+ subspace_list (list): A list of all subspaces that are part of the sparse grid.
+ levels2index (dict): A dictionary that maps the level combination of a subspace to its index in the subspace_list.
+ nSubspaces (int): The number of subspaces in the sparse grid.
+ n_points (int): The number of grid points in the sparse grid.
+ index_list4top_down_sparse_grid_traverse[ (list): A list of indices that defines an ordering of subspaces where low-level subspaces come before high-level ones.
+ allPoints (np.ndarray): A matrix of shape #Points x #Dims defining all grid points in the sparse grid.
+
"""
- def __init__(self, dim: int, maxLevelSum: int) -> None:
+ def __init__(self, dim: int, max_level_sum: int) -> None:
"""Constructor for a sparse grid.
A sparse grid is uniquely defined by its dimension and a level sum that must not be exceeded by any of the Smolnyak subspaces.
A subspace's levels define how fine the grid is resolved in each of the respective dimensions.
The position of a certain subspace within the list of subspaces can be tracked using the levels2index dictionary.
As we only limit the sum of all levels, the sparse grids implemented here are not refined in a dimension-dependent way.
- Input: dim (int specifying the (parameter) dimension)
- maxLevelSum (int specifying how refined the sparse grid shall be)
+ Args:
+ dim (int): The dimension of the sparse grid. This is the same as the dimension of the parameter space.
+ max_level_sum (int): The maximum sum of all levels of the subspaces. This is the same as the maximum level of the sparse grid.
+
"""
self.dim = dim
- self.maxLevelSum = maxLevelSum
+ self.max_level_sum = max_level_sum
# initiation of the root, list of subspaces and dictionary that maps the level-combination to the list-index
root = Subspace(np.zeros(dim, dtype="int"), self)
- self.subspaceList = [root]
+ self.subspace_list = [root]
self.levels2index = {}
self.levels2index[tuple(np.zeros(dim, dtype="int"))] = 0
- # refine root by calling the recursive function refineSubspace and count resulting subspaces and grid points
- self.refineSubspace(np.zeros(dim, dtype="int"), 0)
- self.nSubspaces = len(self.subspaceList)
- self.computeNPoints()
+ # refine root by calling the recursive function refine_subspace and count resulting subspaces and grid points
+ self.refine_subspace(np.zeros(dim, dtype="int"), 0)
+ self.nSubspaces = len(self.subspace_list)
+ self.compute_n_points()
# create an ordering of subspaces where low-level subspaces come before high-level ones
- self.computeIndexList4TopDownSparseGridTraverse()
+ self.compute_index_list4top_down_sparse_grid_traverse()
# collect all points from all subspaces
- self.computeAllPoints()
+ self.compute_all_points()
- def refineSubspace(
- self, currentLevels: np.ndarray, indexRefinedLevel: int
+ def refine_subspace(
+ self, current_levels: np.ndarray, indexRefinedLevel: int
) -> None:
"""Recursive function used to accumulate all subspaces up to a specified level sum in the form of a list
It returns the list itself together with a dictionary that maps the level-combination of each subspace onto its index inside the list.
This function only lists each subspace once.
- Input: currentLevels (np.1darray of type int and shape #Dims that specifies the subspace we are currently considering)
- indexRefinedLevel (int that stores the index that got altered to form the current subspace)
+ Args:
+ current_levels (np.ndarray): The level combination of the subspace that is currently being refined. Shape (dim,)
+ indexRefinedLevel (int): The index of the level that was refined to form the current subspace.
+
"""
# This loop makes sure that each subspace is only counted once.
# Achieved by storing the index that got altered to form the current subspace and letting the current
# ... subspace only refine level indices with similar or higher entry number in the levels array.
for i in range(indexRefinedLevel, self.dim):
-
# derive the level increment array and calculate new level
- levelsIncrement = np.zeros(self.dim, dtype="int")
- levelsIncrement[i] = 1
+ levels_increment = np.zeros(self.dim, dtype="int")
+ levels_increment[i] = 1
- newLevels = currentLevels + levelsIncrement
+ new_levels = current_levels + levels_increment
# kill-condition for recursion if max level is reached
- if np.sum(newLevels) <= self.maxLevelSum:
-
+ if np.sum(new_levels) <= self.max_level_sum:
# store refined subspace in list and dictionary
- self.levels2index[tuple(newLevels)] = len(self.subspaceList)
- self.subspaceList.append(Subspace(newLevels, self))
+ self.levels2index[tuple(new_levels)] = len(self.subspace_list)
+ self.subspace_list.append(Subspace(new_levels, self))
# recursive call to refine refined subspace
- self.refineSubspace(newLevels, i)
+ self.refine_subspace(new_levels, i)
- def computeNPoints(self):
+ def compute_n_points(self):
"""Iterates over all subspaces of the sparse grid and accumulates the total number of gridpoints."""
# initiate the counter to be 0
- self.nPoints = 0
+ self.n_points = 0
# loop over all subspaces
for s in range(self.nSubspaces):
-
# get current subspace
- currentSubspace = self.subspaceList[s]
+ current_subspace = self.subspace_list[s]
# add the number of points in the current subspace
- self.nPoints += currentSubspace.nPoints
+ self.n_points += current_subspace.n_points
- def computeAllPoints(self):
+ def compute_all_points(self):
"""Collect all SG points in one array by iterating over all subspaces."""
# allocate enough storage for all points
- self.points = np.zeros((self.nPoints, self.dim))
+ self.points = np.zeros((self.n_points, self.dim))
# initiate a counter for the number of already counted points
- numIncludedPoints = 0
+ num_included_points = 0
# loop over all subspaces of the SG
for i in range(self.nSubspaces):
# traverse the SG in a top-down manner
- currentSubspace = self.subspaceList[
- self.indexList4TopDownSparseGridTraverse[i]
+ current_subspace = self.subspace_list[
+ self.index_list4top_down_sparse_grid_traverse[i]
]
# copy the points from the subspace into the array of the SG
self.points[
- numIncludedPoints : numIncludedPoints
- + currentSubspace.nPoints,
+ num_included_points : num_included_points
+ + current_subspace.n_points,
:,
- ] = currentSubspace.points
+ ] = current_subspace.points
# increase the counter accordingly
- numIncludedPoints += currentSubspace.nPoints
+ num_included_points += current_subspace.n_points
- def computeIndexList4TopDownSparseGridTraverse(self):
+ def compute_index_list4top_down_sparse_grid_traverse(self):
"""Create an ordering of subspaces where low-level subspaces come before high-level ones."""
+
# allocate storage to count the sum of levels of each subspace
- levelSums = np.zeros(self.nSubspaces, dtype="int")
+ level_sums = np.zeros(self.nSubspaces, dtype="int")
# loop over all subspaces and sum over their levels array
for i in range(self.nSubspaces):
- levelSums[i] = np.sum(list(self.levels2index)[i])
+ level_sums[i] = np.sum(list(self.levels2index)[i])
# argument sort by the just-calculated level-sum
- self.indexList4TopDownSparseGridTraverse = np.argsort(levelSums)
+ self.index_list4top_down_sparse_grid_traverse = np.argsort(level_sums)
- def evalFunctionSG(self, function):
- """Evaluate the provided function for all subspaces of a sparse grid by using Subspace.evalFunction
+ # TODO: Shouldn't an eval function return something?
+ def eval_function_sg(self, function: typing.Callable):
+ """Evaluate the provided function for all subspaces of a sparse grid by using Subspace.eval_function
- Input: function (python function that can be evaluated in one sparse grid point)
+ Args:
+ function (typing.Callable): The function that is to be evaluated. It must be possible to evaluate the function in a single sparse grid point.
"""
# loop over all subspaces
for s in range(self.nSubspaces):
- # call evalFunction for the current subspace
- self.subspaceList[s].evalFunction(function)
+ # call eval_function for the current subspace
+ self.subspace_list[s].eval_function(function)
- def computeCoefficients(self):
+ def compute_coefficients(self):
"""When using sparse grids for function interpolation (and quadrature),
this function computes the coefficients of all basis function of the whole sparse grid.
+
+ Args:
+
+ Returns:
+
"""
# loop over all smolnyak subspaces in a low to high level order
for s in range(self.nSubspaces):
- currentSubspace = self.subspaceList[
- self.indexList4TopDownSparseGridTraverse[s]
+ current_subspace = self.subspace_list[
+ self.index_list4top_down_sparse_grid_traverse[s]
]
# calculate coefficients for the current subspace (consider contributions from "larger" basis functions)
- currentSubspace.coeffs = (
- currentSubspace.fEval - currentSubspace.lowerLevelContributions
+ current_subspace.coeffs = (
+ current_subspace.f_eval
+ - current_subspace.lower_level_contributions
)
# pass up contributions arising from the just-computed coefficients to
# ... all higher levels if there are any
- if np.sum(currentSubspace.levels) < self.maxLevelSum:
- currentSubspace.passContributions2HigherLevels()
+ if np.sum(current_subspace.levels) < self.max_level_sum:
+ current_subspace.pass_contributions2higher_levels()
- def computeIntegral(self):
- """Perform sparse grid integration over whole Sparse Grid using the computed coefficients (coeffs) and the volume of each basis function (basisFuncVol)"""
+ def compute_integral(self):
+ """Perform sparse grid integration over whole Sparse Grid using the computed coefficients (coeffs) and the volume of each basis function (basis_func_vol)"""
# initialise the integral to be 0
self.integral = 0
# loop over all subspaces
for s in range(self.nSubspaces):
# exrtact the current subspace
- currentSubspace = self.subspaceList[s]
+ current_subspace = self.subspace_list[s]
# multiply the volume of each basis function with the sum of all coefficients of this subspace and add the result to the integral
# (this implicitely uses that all basis functions of a given subspace have the same volume)
self.integral += (
- np.sum(currentSubspace.coeffs) * currentSubspace.basisFuncVol
+ np.sum(current_subspace.coeffs)
+ * current_subspace.basis_func_vol
)
@@ -262,29 +293,30 @@ class Subspace(object):
def __init__(self, levels: np.ndarray, SG: SparseGrid) -> None:
"""Initialize the subspace by assigning a level, dimension, number of points and the actual points themselves.
- Input: levels (np.1darray of type int and shape #Dims that specifies the refinement of the current subspace in each dimension)
- SG: (SparseGrid of which the current subspace is a part)
+ Args:
+ levels (np.ndarray): The level of the subspace in each dimension. Shape: (#Dims, )
+ SG (SparseGrid): The sparse grid of which the current subspace is a part.
+
"""
# fill all known information into the class variables
self.SG = SG
self.levels = levels
self.dim = levels.shape[0]
- self.basisFuncVol = np.power(0.5, np.sum(self.levels + 1))
- self.nPoints = np.prod(np.power(2, levels))
+ self.basis_func_vol = np.power(0.5, np.sum(self.levels + 1))
+ self.n_points = np.prod(np.power(2, levels))
# this variable is created without being directly filled -> Caution when using it; Check for reliable data
- self.lowerLevelContributions = np.zeros(self.nPoints)
+ self.lower_level_contributions = np.zeros(self.n_points)
# Create all points of the current subspace and fill self.points
# Start by creating an empty list to store the coordinates of all single dimensions
- singleDimPoints = []
+ single_dim_points = []
# loop over all dimensions
for d in range(self.dim):
-
# append a list of 1d coordinates for each dimension
- singleDimPoints.append(
+ single_dim_points.append(
np.linspace(
1 / np.power(2, levels[d] + 1),
1 - 1 / np.power(2, levels[d] + 1),
@@ -293,39 +325,40 @@ def __init__(self, levels: np.ndarray, SG: SparseGrid) -> None:
)
# create all possible combinations from the 1d coordinate arrays
- meshgrid = np.meshgrid(*singleDimPoints)
+ meshgrid = np.meshgrid(*single_dim_points)
- # convert the numpy meshgrid to a matrix of all points with shape (nPoints,dim)
- self.points = meshgrid2Matrix(meshgrid)
+ # convert the numpy meshgrid to a matrix of all points with shape (n_points,dim)
+ self.points = meshgrid2matrix(meshgrid)
- def evalFunction(self, function):
+ def eval_function(self, function: typing.Callable):
"""Evaluate a function in all points of the respective subspace.
- This function is typically called by SparseGrid.evalFunctionSG.
+ This function is typically called by SparseGrid.eval_function_sg.
+
+ Args:
+ function (typing.Callable): The function that is to be evaluated. It must be possible to evaluate the function in a single sparse grid point.
- Input: function (python function that can be evaluated in one sparse grid point)
"""
# create an empty array of size #Points
- self.fEval = np.zeros(self.nPoints)
+ self.f_eval = np.zeros(self.n_points)
# loop over all grid points of the subspace
- for i in range(self.nPoints):
+ for i in range(self.n_points):
# evaluate the provided function in the current grid point
- self.fEval[i] = function(self.points[i, :])
+ self.f_eval[i] = function(self.points[i, :])
- def passContributions2HigherLevels(self):
+ def pass_contributions2higher_levels(self):
"""During sparse grid interpolation, this function passes contributions to all subspaces with higher level."""
# loop over all subspaces of the SG (this can be made more efficient)
for s in range(self.SG.nSubspaces):
- higherLevelSubspace = self.SG.subspaceList[s]
+ higherLevelSubspace = self.SG.subspace_list[s]
# check if the higherLevelSubspace indeed has a higher level
if np.sum(higherLevelSubspace.levels) > np.sum(self.levels):
-
# loop over all points in the mother subspace and add contributions to lower levels
- for p in range(self.nPoints):
- higherLevelSubspace.lowerLevelContributions += (
- basisnD(
+ for p in range(self.n_points):
+ higherLevelSubspace.lower_level_contributions += (
+ basis_nd(
higherLevelSubspace.points,
self.points[p, :],
self.levels,
@@ -334,109 +367,75 @@ def passContributions2HigherLevels(self):
)
-def sparseGridInference(
+def inference_sparse_grid(
model: Model,
- dataPath: str = None,
- numLevels: int = NUM_LEVELS,
- numProcesses: int = NUM_PROCESSES,
+ data: np.ndarray,
+ result_manager: ResultManager,
+ slices: typing.List[np.ndarray],
+ num_processes: int,
+ numLevels: int = 5,
):
"""Evaluates the transformed parameter density over a set of points resembling a sparse grid, thereby attempting parameter inference. If a data path is given, it is used to load the data for the model. Else, the default data path of the model is used.
+ Args:
+ model(Model): The model describing the mapping from parameters to data.
+ data(np.ndarray): The data to be used for inference.
+ num_processes(int): number of processes to use for parallel evaluation of the model.
+ numLevels(int, optional): Maximum sparse grid level depth that mainly defines the number of points. Defaults to 5.
- :param model: The model describing the mapping from parameters to data.
- :type model: Model
- :param dataPath: path to the data relative to the current working directory.
- If None, the default path defined in the Model class initializer is used, defaults to None
- :type dataPath: str, optional
- :param numLevels: Maximum sparse grid level depth that mainly defines the number of points, defaults to NUM_LEVELS
- :type numLevels: int, optional
- :param numProcesses: number of processes to use, defaults to NUM_PROCESSES
- :type numProcesses: int, optional
"""
- # check if a data path is specified
- if dataPath is not None:
- model.setDataPath(dataPath)
- # default to the path defined in the model if no other indication is given.
- else:
- logger.warning(
- f"No data path provided for this inference call. Using the data path of the model: {model.dataPath}"
- )
+ logger.warning(
+ "The inference_sparse_grid function is not tested and not recommended for use."
+ )
# Load data, data standard deviations and model characteristics for the specified model.
- (
- dataDim,
- data,
- dataStdevs,
- ) = model.dataLoader()
+ data_stdevs = calc_kernel_width(data)
- paramDim = model.paramDim
+ for slice in slices:
+ # build the sparse grid over [0,1]^param_dim
+ grid = SparseGrid(slice.shape[0], numLevels)
- # build the sparse grid over [0,1]^paramDim
- grid = SparseGrid(paramDim, numLevels)
+ # get the model's parameter limits
+ param_limits = model.param_limits
- # get the model's parameter limits
- parameterLimits = model.getParamSamplingLimits()
+ # scale the sparse grid points from [0,1]^param_dim to the scaled parameter space
+ scaledSparseGridPoints = param_limits[slice, 0] + grid.points * (
+ param_limits[slice, 1] - param_limits[slice, 0]
+ )
- # scale the sparse grid points from [0,1]^paramDim to the scaled parameter space
- scaledSparseGridPoints = parameterLimits[:, 0] + grid.points * (
- parameterLimits[:, 1] - parameterLimits[:, 0]
- )
+ # allocate Memory for the parameters, their simulation evaluation and their probability density
+ results = np.zeros(
+ (grid.n_points, slice.shape[0] + model.data_dim + 1)
+ )
- # allocate Memory for the parameters, their simulation evaluation and their probability density
- allRes = np.zeros((grid.nPoints, paramDim + model.dataDim + 1))
-
- # Create a pool of worker processes
- pool = Pool(processes=numProcesses)
-
- # evaluate the probability density transformation for all sparse grid points in parallel
- parResults = pool.map(
- partial(
- evalLogTransformedDensity,
- model=model,
- data=data,
- dataStdevs=dataStdevs,
- ),
- scaledSparseGridPoints,
- )
+ # Create a pool of worker processes
+ pool = Pool(processes=num_processes)
+
+ # evaluate the probability density transformation for all sparse grid points in parallel
+ parResults = pool.map(
+ partial(
+ eval_log_transformed_density,
+ model=model,
+ data=data,
+ data_stdevs=data_stdevs,
+ slice=slice,
+ ),
+ scaledSparseGridPoints,
+ )
- # close the worker pool
- pool.close()
- pool.join()
-
- # extract the parameter, simulation result and transformed density evaluation
- for i in range(grid.nPoints):
- allRes[i, :] = parResults[i][1]
-
- # Save all sparse grid evaluation results in separate .csv files that also indicate the sparse grid level.
- np.savetxt(
- "Applications/"
- + model.getModelName()
- + "/Params/SG"
- + str(numLevels)
- + "Levels.csv",
- allRes[:, 0:paramDim],
- delimiter=",",
- )
- np.savetxt(
- "Applications/"
- + model.getModelName()
- + "/SimResults/SG"
- + str(numLevels)
- + "Levels.csv",
- allRes[:, paramDim : paramDim + dataDim],
- delimiter=",",
- )
- np.savetxt(
- "Applications/"
- + model.getModelName()
- + "/DensityEvals/SG"
- + str(numLevels)
- + "Levels.csv",
- allRes[:, -1],
- delimiter=",",
- )
+ # close the worker pool
+ pool.close()
+ pool.join()
+ # extract the parameter, simulation result and transformed density evaluation
+ for i in range(grid.n_points):
+ results[i, :] = parResults[i][1]
-# TODO: Use maybe only one function for storing allRes
-# TODO: Plotting for general dimension
+ # save the results
+ result_manager.save_overall(
+ slice,
+ results[:, 0 : data.shape[1]],
+ results[:, data.shape[1] : data.shape[1] + slice.shape[0]],
+ results[:, data.shape[1] + slice.shape[0] :],
+ )
diff --git a/epi/core/transformations.py b/epi/core/transformations.py
new file mode 100644
index 00000000..e3dc2000
--- /dev/null
+++ b/epi/core/transformations.py
@@ -0,0 +1,144 @@
+from typing import Tuple
+
+import jax.numpy as jnp
+import numpy as np
+from jax import jit
+
+from epi import logger
+from epi.core.kde import eval_kde_gauss
+from epi.core.model import Model
+
+
+def evaluate_density(
+ param: np.ndarray,
+ model: Model,
+ data: np.ndarray,
+ data_stdevs: np.ndarray,
+ slice: np.ndarray,
+) -> Tuple[np.double, np.ndarray]:
+ """Given a simulation model, its derivative and corresponding data, evaluate the parameter density that is the backtransformed data distribution.
+
+ Args:
+ param (np.ndarray): parameter for which the transformed density shall be evaluated
+ model (Model): model to be evaluated
+ data (np.ndarray): data for the model. 2D array with shape (#num_data_points, #data_dim)
+ data_stdevs (np.ndarray): array of suitable kernel width for each data dimension
+ slice (np.ndarray): slice of the parameter vector that is to be evaluated
+
+ Returns:
+ Tuple[np.double, np.ndarray]:
+ : parameter density at the point param
+ : vector containing the parameter, the simulation result and the density
+ """
+
+ limits = model.param_limits
+
+ # Build the full parameter vector for evaluation based on the passed param slice and the constant central points
+ fullParam = model.central_param
+ fullParam[slice] = param
+
+ # Check if the tried parameter is within the just-defined bounds and return the lowest possible density if not.
+ if np.any((param < limits[slice, 0]) | (param > limits[slice, 1])):
+ logger.info(
+ "Parameters outside of predefined range"
+ ) # Slows down the sampling to much? -> Change logger level to warning or even error
+ return 0, np.zeros(slice.shape[0] + model.data_dim + 1)
+
+ # If the parameter is within the valid ranges...
+ else:
+ # Evaluating the model and the jacobian for the specified parameter simultaneously provide a little speedup over calculating it separately in some cases.
+ sim_res, jac = model.forward_and_jacobian(fullParam)
+
+ # Evaluate the data density in the simulation result.
+ densityEvaluation = eval_kde_gauss(data, sim_res, data_stdevs)
+
+ # Calculate the simulation model's pseudo-determinant in the parameter point (also called the correction factor).
+ correction = calc_gram_determinant(jac)
+
+ # Multiply data density and correction factor.
+ trafo_density_evaluation = densityEvaluation * correction
+
+ # Store the current parameter, its simulation result as well as its density in a large vector that is stored separately by emcee.
+ evaluation_results = np.concatenate(
+ (param, sim_res, np.array([trafo_density_evaluation]))
+ )
+
+ return trafo_density_evaluation, evaluation_results
+
+
+def eval_log_transformed_density(
+ param: np.ndarray,
+ model: Model,
+ data: np.ndarray,
+ data_stdevs: np.ndarray,
+ slice: np.ndarray,
+) -> Tuple[np.double, np.ndarray]:
+ """Given a simulation model, its derivative and corresponding data, evaluate the natural log of the parameter density that is the backtransformed data distribution.
+ This function is intended to be used with the emcee sampler and can be implemented more efficiently at some points.
+
+ Args:
+ param (np.ndarray): parameter for which the transformed density shall be evaluated
+ model (Model): model to be evaluated
+ data (np.ndarray): data for the model. 2D array with shape (#num_data_points, #data_dim)
+ data_stdevs (np.ndarray): array of suitable kernel width for each data dimension
+ slice (np.ndarray): slice of the parameter vector that is to be evaluated
+
+ Returns:
+ Tuple[np.double, np.ndarray]:
+ : natural log of the parameter density at the point param
+ : sampler_results (array concatenation of parameters, simulation results and evaluated density, stored as "blob" by the emcee sampler)
+
+ """
+ trafo_density_evaluation, evaluation_results = evaluate_density(
+ param, model, data, data_stdevs, slice
+ )
+ if trafo_density_evaluation == 0:
+ return -np.inf, evaluation_results
+ return np.log(trafo_density_evaluation), evaluation_results
+
+
+def calc_gram_determinant(jac: jnp.ndarray) -> jnp.double:
+ """Evaluate the pseudo-determinant of the jacobian (that serves as a correction term) in one specific parameter point.
+ Returns 0 if the correction factor is not finite.
+
+ Args:
+ jac(jnp.ndarray): The jacobian for which the pseudo determinant shall be calculated
+
+ Returns:
+ jnp.double: The pseudo-determinant of the jacobian
+
+ """
+ correction = _calc_gram_determinant(jac)
+ # If the correction factor is not finite, return 0 instead to not affect the sampling.
+ if not jnp.isfinite(correction):
+ correction = 0.0
+ logger.warning("Invalid value encountered for correction factor")
+ return correction
+
+
+@jit
+def _calc_gram_determinant(jac: jnp.ndarray) -> jnp.double:
+ """Jitted calculation of the pseudo-determinant of the jacobian. This function is called by calc_gram_determinant() and should not be called directly.
+ It does not check if the correction factor is finite.
+
+ Not much faster than a similar numpy version. However it can run on gpu and is maybe a bit faster because we can jit compile the sequence of operations.
+
+ Args:
+ jac (jnp.ndarray): The jacobian for which the pseudo determinant shall be calculated
+
+ Returns:
+ jnp.double: The pseudo-determinant of the jacobian
+
+ """
+
+ jac = jnp.atleast_2d(jac)
+
+ if jac.shape[0] == jac.shape[1]:
+ return jnp.abs(jnp.linalg.det(jac))
+ else:
+ jacT = jnp.transpose(jac)
+ # The pseudo-determinant is calculated as the square root of the determinant of the matrix-product of the Jacobian and its transpose.
+ # For numerical reasons, one can regularize the matrix product by adding a diagonal matrix of ones before calculating the determinant.
+ # correction = np.sqrt(np.linalg.det(np.matmul(jacT,jac) + np.eye(param.shape[0])))
+ correction = jnp.sqrt(jnp.linalg.det(jnp.matmul(jacT, jac)))
+ return correction
diff --git a/epi/examples/corona/corona.py b/epi/examples/corona/corona.py
index b7e0ab66..2b9a9b9e 100644
--- a/epi/examples/corona/corona.py
+++ b/epi/examples/corona/corona.py
@@ -1,44 +1,40 @@
-import importlib.resources
+from typing import Optional
import diffrax as dx
import jax.numpy as jnp
import numpy as np
-from jax import vmap
from epi import logger
-from epi.core.model import (
- ArtificialModelInterface,
- JaxModel,
- VisualizationModelInterface,
-)
+from epi.core.model import ArtificialModelInterface, JaxModel
-class Corona(JaxModel, VisualizationModelInterface):
+class Corona(JaxModel):
+ """ """
- paramDim = 3
- dataDim = 4
+ param_dim = 3
+ data_dim = 4
- def __init__(self, delete=False, create=False):
- super().__init__(delete, create)
- self.dataPath = importlib.resources.path(
- "epi.examples.corona", "CoronaData.csv"
- )
+ PARAM_LIMITS = np.array([[-4.5, 0.5], [-2.0, 3.0], [-2.0, 3.0]])
+ CENTRAL_PARAM = np.array([-1.8, 0.0, 0.7])
- def getDataBounds(self):
- return np.array([[0.0, 4.0], [0.0, 40.0], [0.0, 80.0], [0.0, 3.5]])
-
- def getParamBounds(self):
- return np.array([[-4.0, 0.0], [-2.0, 2.0], [-1.0, 3.0]])
+ def __init__(
+ self,
+ central_param: np.ndarray = CENTRAL_PARAM,
+ param_limits: np.ndarray = PARAM_LIMITS,
+ name: Optional[str] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(central_param, param_limits, name=name, **kwargs)
- def getParamSamplingLimits(self):
- return np.array([[-4.5, 0.5], [-2.0, 3.0], [-2.0, 3.0]])
+ def get_data_bounds(self):
+ return np.array([[0.0, 4.0], [0.0, 40.0], [0.0, 80.0], [0.0, 3.5]])
- def getCentralParam(self):
- return np.array([-1.8, 0.0, 0.7])
+ def get_param_bounds(self):
+ return np.array([[-3.5, -0.5], [-1.0, 2.0], [-1.0, 2.0]])
@classmethod
- def forward(cls, logParam):
- param = jnp.power(10, logParam)
+ def forward(cls, log_param):
+ param = jnp.power(10, log_param)
xInit = jnp.array([999.0, 0.0, 1.0, 0.0])
def rhs(t, x, param):
@@ -57,7 +53,7 @@ def rhs(t, x, param):
stepsize_controller = dx.PIDController(rtol=1e-5, atol=1e-5)
try:
- odeSol = dx.diffeqsolve(
+ ode_sol = dx.diffeqsolve(
term,
solver,
t0=0.0,
@@ -68,7 +64,7 @@ def rhs(t, x, param):
saveat=saveat,
stepsize_controller=stepsize_controller,
)
- return odeSol.ys[1:5, 2]
+ return ode_sol.ys[1:5, 2]
except Exception as e:
logger.warning("ODE solution not possible!", exc_info=e)
@@ -76,28 +72,14 @@ def rhs(t, x, param):
class CoronaArtificial(Corona, ArtificialModelInterface):
- def generateArtificialData(
- self, numSamples=ArtificialModelInterface.NUM_ARTIFICIAL_SAMPLES
- ):
- lowerBound = np.array([-1.9, -0.1, 0.6])
- upperBound = np.array([-1.7, 0.1, 0.8])
-
- trueParamSample = lowerBound + (
- upperBound - lowerBound
- ) * np.random.rand(numSamples, 3)
-
- artificialData = vmap(self.forward, in_axes=0)(trueParamSample)
-
- np.savetxt(
- f"Data/{self.getModelName()}Data.csv",
- artificialData,
- delimiter=",",
- )
- np.savetxt(
- f"Data/{self.getModelName()}Params.csv",
- trueParamSample,
- delimiter=",",
- )
-
- def getParamSamplingLimits(self):
- return np.array([[-2.5, -1.0], [-0.75, 0.75], [0.0, 1.5]])
+ param_limits = np.array([[-2.5, -1.0], [-0.75, 0.75], [0.0, 1.5]])
+
+ def generate_artificial_params(self, num_samples):
+ lower_bound = np.array([-1.9, -0.1, 0.6])
+ upper_bound = np.array([-1.7, 0.1, 0.8])
+
+ true_param_sample = lower_bound + (
+ upper_bound - lower_bound
+ ) * np.random.rand(num_samples, 3)
+
+ return true_param_sample
diff --git a/epi/examples/cpp/__init__.py b/epi/examples/cpp/__init__.py
index 27c31675..e69de29b 100644
--- a/epi/examples/cpp/__init__.py
+++ b/epi/examples/cpp/__init__.py
@@ -1,3 +0,0 @@
-from .cpp_plant import CppPlant as CppPlant
-from .python_reference_plants import ExternalPlant as ExternalPlant
-from .python_reference_plants import JaxPlant as JaxPlant
diff --git a/epi/examples/cpp/cpp_plant.py b/epi/examples/cpp/cpp_plant.py
index 1aaaf79e..8a9cd264 100644
--- a/epi/examples/cpp/cpp_plant.py
+++ b/epi/examples/cpp/cpp_plant.py
@@ -11,10 +11,28 @@ class CppPlant(Model, ArtificialModelInterface):
Data0: Size [0,2] # the more water and sun the better
Data1: Health [0,1], to much water is not good, too much sun is not good
Data2: Sciarid ;)
+
"""
- paramDim = 2
- dataDim = 3
+ param_dim = 2
+ data_dim = 3
+
+ PARAM_LIMITS = np.array([[0, 1], [0, 1]])
+ CENTRAL_PARAM = np.array([0.5, 0.5])
+
+ def __init__(
+ self,
+ central_param: np.ndarray = CENTRAL_PARAM,
+ param_limits: np.ndarray = PARAM_LIMITS,
+ name: str = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(
+ central_param,
+ param_limits,
+ name,
+ **kwargs,
+ )
def forward(self, param):
return cpp_model.forward(param)
@@ -22,28 +40,5 @@ def forward(self, param):
def jacobian(self, param):
return cpp_model.jacobian(param)
- def getCentralParam(self) -> np.ndarray:
- return np.array([0.5, 0.5])
-
- def getParamSamplingLimits(self) -> np.ndarray:
- return np.array([[0.0, 1.0], [0.0, 1.0]])
-
- def generateArtificialData(
- self, numSamples=ArtificialModelInterface.NUM_ARTIFICIAL_SAMPLES
- ):
- # randomly create true parameters in [0,1]x[0,1]
- trueParamSample = np.random.rand(numSamples, 2)
-
- artificialData = np.zeros((trueParamSample.shape[0], 3))
- for i in range(trueParamSample.shape[0]):
- artificialData[i, :] = self.forward(trueParamSample[i, :])
- np.savetxt(
- f"Data/{self.getModelName()}Data.csv",
- artificialData,
- delimiter=",",
- )
- np.savetxt(
- f"Data/{self.getModelName()}Params.csv",
- trueParamSample,
- delimiter=",",
- )
+ def generate_artificial_params(self, num_samples: int):
+ return np.random.rand(num_samples, 2)
diff --git a/epi/examples/cpp/python_reference_plants.py b/epi/examples/cpp/python_reference_plants.py
index 097e572b..89ff3386 100644
--- a/epi/examples/cpp/python_reference_plants.py
+++ b/epi/examples/cpp/python_reference_plants.py
@@ -1,6 +1,6 @@
import jax.numpy as jnp
import numpy as np
-from jax import jacrev, jit, vmap
+from jax import jacrev, jit
from epi.core.model import ArtificialModelInterface, JaxModel, Model
@@ -12,10 +12,28 @@ class JaxPlant(JaxModel, ArtificialModelInterface):
Data0: Size [0,2] # the more water and sun the better
Data1: Health [0,1], to much water is not good, too much sun is not good
Data2: Sciarid :P
+
"""
- paramDim = 2
- dataDim = 3
+ param_dim = 2
+ data_dim = 3
+
+ PARAM_LIMITS = np.array([[0, 1], [0, 1]])
+ CENTRAL_PARAM = np.array([0.5, 0.5])
+
+ def __init__(
+ self,
+ central_param: np.ndarray = CENTRAL_PARAM,
+ param_limits: np.ndarray = PARAM_LIMITS,
+ name: str = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(
+ central_param,
+ param_limits,
+ name,
+ **kwargs,
+ )
@classmethod
def forward(cls, param):
@@ -27,28 +45,8 @@ def forward(cls, param):
]
)
- def getCentralParam(self) -> np.ndarray:
- return np.array([0.5, 0.5])
-
- def getParamSamplingLimits(self) -> np.ndarray:
- return np.array([[0.0, 1.0], [0.0, 1.0]])
-
- def generateArtificialData(self, numSamples=1000):
- # randomly create true parameters in [0,1]x[0,1]
- trueParamSample = np.random.rand(numSamples, 2)
-
- artificialData = vmap(self.forward, in_axes=0)(trueParamSample)
-
- np.savetxt(
- f"Data/{self.getModelName()}Data.csv",
- artificialData,
- delimiter=",",
- )
- np.savetxt(
- f"Data/{self.getModelName()}Params.csv",
- trueParamSample,
- delimiter=",",
- )
+ def generate_artificial_params(self, num_samples: int):
+ return np.random.rand(num_samples, 2)
@jit
@@ -79,8 +77,23 @@ class ExternalPlant(Model, ArtificialModelInterface):
Data2: Trauerfliegen :P
"""
- paramDim = 2
- dataDim = 3
+ param_dim = 2
+ data_dim = 3
+
+ PARAM_LIMITS = np.array([[0, 1], [0, 1]])
+ CENTRAL_PARAM = np.array([0.5, 0.5])
+
+ def __init__(
+ self,
+ central_param: np.ndarray = CENTRAL_PARAM,
+ param_limits: np.ndarray = PARAM_LIMITS,
+ name: str = None,
+ ) -> None:
+ super().__init__(
+ central_param,
+ param_limits,
+ name,
+ )
def forward(self, param):
return fw(param)
@@ -88,27 +101,5 @@ def forward(self, param):
def jacobian(self, param):
return bw(param)
- def getCentralParam(self) -> np.ndarray:
- return np.array([0.5, 0.5])
-
- def getParamSamplingLimits(self) -> np.ndarray:
- return np.array([[0.0, 1.0], [0.0, 1.0]])
-
- def generateArtificialData(
- self, numSamples=ArtificialModelInterface.NUM_ARTIFICIAL_SAMPLES
- ):
- # randomly create true parameters in [0,1]x[0,1]
- trueParamSample = np.random.rand(numSamples, 2)
-
- artificialData = vmap(self.forward, in_axes=0)(trueParamSample)
-
- np.savetxt(
- f"Data/{self.getModelName()}Data.csv",
- artificialData,
- delimiter=",",
- )
- np.savetxt(
- f"Data/{self.getModelName()}Params.csv",
- trueParamSample,
- delimiter=",",
- )
+ def generate_artificial_params(self, num_samples: int):
+ return np.random.rand(num_samples, 2)
diff --git a/epi/examples/sbml/Caffeine_2Wks_Exponential_decay.xml b/epi/examples/sbml/Caffeine_2Wks_Exponential_decay.xml
new file mode 100644
index 00000000..0fc46bc5
--- /dev/null
+++ b/epi/examples/sbml/Caffeine_2Wks_Exponential_decay.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<sbml xmlns="http://www.sbml.org/sbml/level3/version1/core" level="3" version="1">
+ <model id="Caffeine_2Wks_Exponential_decay" name="Caffeine_2Wks_Exponential_decay">
+ <listOfCompartments>
+ <compartment id="Compartment" size="1" constant="true"/>
+ </listOfCompartments>
+ <listOfSpecies>
+ <species id="y" compartment="Compartment" initialAmount="0" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"/>
+ </listOfSpecies>
+ <listOfParameters>
+ <parameter id="t" name="t" constant="false"/>
+ <parameter id="A" name="A" value="1" constant="true"/>
+ <parameter id="B" name="B" value="1" constant="true"/>
+ <parameter id="sd_y_obs" name="sd_y_obs" value="0.1" constant="true"/>
+ </listOfParameters>
+ <listOfRules>
+ <rateRule variable="y">
+ <math xmlns="http://www.w3.org/1998/Math/MathML">
+ <apply>
+ <times/>
+ <apply>
+ <minus/>
+ <cn type="integer"> 1 </cn>
+ <apply>
+ <times/>
+ <ci> A </ci>
+ <ci> t </ci>
+ </apply>
+ </apply>
+ <apply>
+ <exp/>
+ <apply>
+ <minus/>
+ <ci> B </ci>
+ <apply>
+ <times/>
+ <ci> A </ci>
+ <ci> t </ci>
+ </apply>
+ </apply>
+ </apply>
+ </apply>
+ </math>
+ </rateRule>
+ <assignmentRule variable="t">
+ <math xmlns="http://www.w3.org/1998/Math/MathML">
+ <csymbol encoding="text" definitionURL="http://www.sbml.org/sbml/symbols/time"> time </csymbol>
+ </math>
+ </assignmentRule>
+ </listOfRules>
+ </model>
+</sbml>
diff --git a/epi/examples/sbml/sbml_caffeine_model.py b/epi/examples/sbml/sbml_caffeine_model.py
new file mode 100644
index 00000000..5c625eea
--- /dev/null
+++ b/epi/examples/sbml/sbml_caffeine_model.py
@@ -0,0 +1,48 @@
+import importlib
+from typing import Optional
+
+import numpy as np
+
+from epi.core.model import ArtificialModelInterface, SBMLModel
+
+
+class CaffeineSBMLModel(SBMLModel, ArtificialModelInterface):
+
+ param_dim = 2
+ data_dim = 1
+
+ CENTRAL_PARAM = np.array([1.0, 1.0])
+ PARAM_LIMITS = np.array([[0.0, 2.0], [0.0, 2.0]])
+
+ def __init__(
+ self,
+ central_param: np.ndarray = CENTRAL_PARAM,
+ param_limits: np.ndarray = PARAM_LIMITS,
+ name: Optional[str] = None,
+ **kwargs,
+ ) -> None:
+ sbml_file = importlib.resources.path(
+ "epi.examples.sbml", "Caffeine_2Wks_Exponential_decay.xml"
+ )
+ param_names = ["A", "B"]
+ super().__init__(
+ sbml_file,
+ central_param,
+ param_limits,
+ param_names,
+ 1.0,
+ False,
+ name,
+ **kwargs,
+ )
+
+ def generate_artificial_params(self, num_samples: int) -> np.ndarray:
+ diff0 = 0.2
+ diff1 = 0.2
+ params = np.random.rand(num_samples, self.param_dim)
+ params[:, 0] *= diff0
+ params[:, 0] += 1.0 - diff0 / 2.0
+
+ params[:, 1] *= diff1
+ params[:, 1] += 1.0 - diff1 / 2.0
+ return params
diff --git a/epi/examples/sbml/sbml_menten_model.py b/epi/examples/sbml/sbml_menten_model.py
new file mode 100644
index 00000000..8e337a81
--- /dev/null
+++ b/epi/examples/sbml/sbml_menten_model.py
@@ -0,0 +1,56 @@
+import importlib
+from typing import Optional
+
+import numpy as np
+
+from epi.core.model import ArtificialModelInterface, SBMLModel
+
+
+class MentenSBMLModel(SBMLModel, ArtificialModelInterface):
+
+ CENTRAL_PARAM = np.array([50.0, 1.0])
+ PARAM_LIMITS = np.array([[0.0, 100.0], [0.0, 2.0]])
+
+ def __init__(
+ self,
+ central_param: np.ndarray = CENTRAL_PARAM,
+ param_limits: np.ndarray = PARAM_LIMITS,
+ name: Optional[str] = None,
+ **kwargs,
+ ) -> None:
+ sbml_file = importlib.resources.path(
+ "epi.examples.sbml", "sbml_menten_model.xml"
+ )
+ param_names = ["Km", "kcat"]
+ super().__init__(
+ sbml_file,
+ central_param,
+ param_limits,
+ param_names,
+ 1.0,
+ False,
+ name,
+ **kwargs,
+ )
+
+ # Overwrite the forward, jacobian, and forward_and_jacobian methods to remove the first variable which is not dependent on the parameters
+ def forward(self, params) -> np.ndarray:
+ return super().forward(params)[1:]
+
+ def jacobian(self, params) -> np.ndarray:
+ return super().jacobian(params)[1:, :]
+
+ def forward_and_jacobian(self, params) -> np.ndarray:
+ val, jac = super().forward_and_jacobian(params)
+ return val[1:], jac[1:, :]
+
+ def generate_artificial_params(self, num_samples: int) -> np.ndarray:
+ diff0 = 5.0
+ diff1 = 0.2
+ params = np.random.rand(num_samples, self.param_dim)
+ params[:, 0] *= diff0
+ params[:, 0] += 50.0 - diff0 / 2.0
+
+ params[:, 1] *= diff1
+ params[:, 1] += 1.0 - diff1 / 2.0
+ return params
diff --git a/epi/examples/sbml/sbml_menten_model.xml b/epi/examples/sbml/sbml_menten_model.xml
new file mode 100644
index 00000000..38859edd
--- /dev/null
+++ b/epi/examples/sbml/sbml_menten_model.xml
@@ -0,0 +1,109 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<sbml xmlns="http://www.sbml.org/sbml/level3/version2/core" level="3" version="2">
+ <model id="Simple_Menten" name="Simple_Menten">
+ <listOfUnitDefinitions>
+ <unitDefinition metaid="METAID_U0" id="u0" name="ml">
+ <listOfUnits>
+ <unit kind="litre" exponent="1" scale="-3" multiplier="1"/>
+ </listOfUnits>
+ </unitDefinition>
+ <unitDefinition metaid="METAID_U1" id="u1" name="mmole / l">
+ <listOfUnits>
+ <unit kind="litre" exponent="-1" scale="1" multiplier="1"/>
+ <unit kind="mole" exponent="1" scale="-3" multiplier="1"/>
+ </listOfUnits>
+ </unitDefinition>
+ <unitDefinition metaid="METAID_U2" id="u2" name="umole / l">
+ <listOfUnits>
+ <unit kind="litre" exponent="-1" scale="1" multiplier="1"/>
+ <unit kind="mole" exponent="1" scale="-6" multiplier="1"/>
+ </listOfUnits>
+ </unitDefinition>
+ <unitDefinition metaid="METAID_U3" id="u3" name="1 / s">
+ <listOfUnits>
+ <unit kind="second" exponent="-1" scale="1" multiplier="1"/>
+ </listOfUnits>
+ </unitDefinition>
+ <unitDefinition metaid="METAID_U4" id="u4" name="s">
+ <listOfUnits>
+ <unit kind="second" exponent="1" scale="1" multiplier="1"/>
+ </listOfUnits>
+ </unitDefinition>
+ </listOfUnitDefinitions>
+ <listOfCompartments>
+ <compartment id="v0" name="Tube" spatialDimensions="3" size="10" units="u0" constant="true"/>
+ </listOfCompartments>
+ <listOfSpecies>
+ <species metaid="METAID_P0" sboTerm="SBO:0000252" id="p0" name="Protein" compartment="v0" initialConcentration="100" substanceUnits="u2" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="true"/>
+ <species metaid="METAID_S0" sboTerm="SBO:0000247" id="s0" name="Substrate" compartment="v0" initialConcentration="10" substanceUnits="u1" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"/>
+ <species metaid="METAID_S1" sboTerm="SBO:0000247" id="s1" name="Product" compartment="v0" initialConcentration="0" substanceUnits="u1" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"/>
+ </listOfSpecies>
+ <listOfParameters>
+ <parameter id="Km" value="50" units="u1" constant="false"/>
+ <parameter id="kcat" value="1" units="u3" constant="false"/>
+ </listOfParameters>
+ <listOfReactions>
+ <annotation>
+ <enzymeml:data xmlns:enzymeml="http://sbml.org/enzymeml/version2">
+ <enzymeml:formats>
+ <enzymeml:format id="format0">
+ <enzymeml:column type="time" unit="u4" index="0"/>
+ <enzymeml:column replica="simu_simulation1.yaml_s0_25.0" species="s0" type="conc" unit="u1" index="1" isCalculated="True"/>
+ <enzymeml:column replica="simu_simulation1.yaml_s1_0.0" species="s1" type="conc" unit="u1" index="2" isCalculated="True"/>
+ </enzymeml:format>
+ <enzymeml:format id="format1">
+ <enzymeml:column type="time" unit="u4" index="0"/>
+ <enzymeml:column replica="simu_simulation2.yaml_s0_100.0" species="s0" type="conc" unit="u1" index="1" isCalculated="True"/>
+ <enzymeml:column replica="simu_simulation2.yaml_s1_0.0" species="s1" type="conc" unit="u1" index="2" isCalculated="True"/>
+ </enzymeml:format>
+ </enzymeml:formats>
+ <enzymeml:listOfMeasurements>
+ <enzymeml:measurement file="file0" id="m0" name="simulation1.yaml">
+ <enzymeml:initConc reactant="s0" value="25.0" unit="u1"/>
+ <enzymeml:initConc reactant="s1" value="0.0" unit="u1"/>
+ <enzymeml:initConc reactant="p0" value="1.0" unit="u2"/>
+ </enzymeml:measurement>
+ <enzymeml:measurement file="file1" id="m1" name="simulation2.yaml">
+ <enzymeml:initConc reactant="s0" value="100.0" unit="u1"/>
+ <enzymeml:initConc reactant="s1" value="0.0" unit="u1"/>
+ <enzymeml:initConc reactant="p0" value="1.0" unit="u2"/>
+ </enzymeml:measurement>
+ </enzymeml:listOfMeasurements>
+ <enzymeml:files>
+ <enzymeml:file file="./data/m0.csv" format="format0" id="file0"/>
+ <enzymeml:file file="./data/m1.csv" format="format1" id="file1"/>
+ </enzymeml:files>
+ </enzymeml:data>
+ </annotation>
+ <reaction metaid="METAID_R0" sboTerm="SBO:0000176" id="r0" name="Reaction" reversible="false">
+ <listOfReactants>
+ <speciesReference sboTerm="SBO:0000015" species="s0" stoichiometry="1" constant="false"/>
+ </listOfReactants>
+ <listOfProducts>
+ <speciesReference sboTerm="SBO:0000011" species="s1" stoichiometry="1" constant="false"/>
+ </listOfProducts>
+ <listOfModifiers>
+ <modifierSpeciesReference sboTerm="SBO:0000013" species="p0"/>
+ </listOfModifiers>
+ <kineticLaw name="Menten-Model">
+ <math xmlns="http://www.w3.org/1998/Math/MathML">
+ <apply>
+ <divide/>
+ <apply>
+ <times/>
+ <ci> kcat </ci>
+ <ci> p0 </ci>
+ <ci> s0 </ci>
+ </apply>
+ <apply>
+ <plus/>
+ <ci> Km </ci>
+ <ci> s0 </ci>
+ </apply>
+ </apply>
+ </math>
+ </kineticLaw>
+ </reaction>
+ </listOfReactions>
+ </model>
+</sbml>
diff --git a/epi/examples/simple_models.py b/epi/examples/simple_models.py
index 2a8651c6..2566592a 100644
--- a/epi/examples/simple_models.py
+++ b/epi/examples/simple_models.py
@@ -1,86 +1,73 @@
+from typing import Optional
+
import jax.numpy as jnp
import numpy as np
-from jax import vmap
-from epi.core.model import (
- ArtificialModelInterface,
- JaxModel,
- VisualizationModelInterface,
-)
+from epi.core.model import ArtificialModelInterface, JaxModel
+
+class Linear(JaxModel, ArtificialModelInterface):
+ param_dim = 2
+ data_dim = 2
-class Linear(JaxModel, ArtificialModelInterface, VisualizationModelInterface):
+ PARAM_LIMITS = np.array([[-0.2, 1.2], [-0.2, 1.2]])
+ CENTRAL_PARAM = np.array([0.5, 0.5])
- paramDim = 2
- dataDim = 2
+ def __init__(
+ self,
+ central_param: np.ndarray = CENTRAL_PARAM,
+ param_limits: np.ndarray = PARAM_LIMITS,
+ name: Optional[str] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(central_param, param_limits, name=name, **kwargs)
@classmethod
def forward(cls, param):
return jnp.array([param[0] * 10, (-2.0) * param[1] - 2.0])
- def getCentralParam(self):
- return np.array([0.5, 0.5])
-
- def getParamSamplingLimits(self):
- return np.array([[-10.0, 11.0], [-10.0, 11.0]])
-
- def generateArtificialData(
- self, numSamples=ArtificialModelInterface.NUM_ARTIFICIAL_SAMPLES
- ):
-
- # randomly create true parameters in [0,1]^2
- trueParamSample = np.random.rand(numSamples, 2)
-
- artificialData = vmap(self.forward, in_axes=0)(trueParamSample)
-
- np.savetxt(
- f"Data/{self.getModelName()}Data.csv",
- artificialData,
- delimiter=",",
- )
- np.savetxt(
- f"Data/{self.getModelName()}Params.csv",
- trueParamSample,
- delimiter=",",
- )
-
- def getParamBounds(self, scale=1.0) -> np.ndarray:
- return np.array([[-0.2, 1.2], [-0.2, 1.2]])
+ def generate_artificial_params(self, num_samples: int):
+ return np.random.rand(num_samples, self.param_dim)
- def getDataBounds(self, scale=1.0) -> np.ndarray:
- return np.array([[-2.0, 12.0], [-4.4, -1.6]])
+class Exponential(JaxModel):
+ param_dim = 2
+ data_dim = 2
-class Exponential(JaxModel, VisualizationModelInterface):
+ PARAM_LIMITS = np.array([[1.0, 2.0], [1.0, 2.0]])
+ CENTRAL_PARAM = np.array([1.0, 1.0])
- paramDim = 2
- dataDim = 2
+ def __init__(
+ self,
+ central_param: np.ndarray = CENTRAL_PARAM,
+ param_limits: np.ndarray = PARAM_LIMITS,
+ name: Optional[str] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(central_param, param_limits, name=name, **kwargs)
@classmethod
def forward(cls, param):
return jnp.array([param[0] * jnp.exp(1), jnp.exp(param[1])])
- def getCentralParam(self) -> np.ndarray:
- return np.array([1.0, 1.0])
- def getParamSamplingLimits(self) -> np.ndarray:
- return np.array([[1.0, 2.0], [1.0, 2.0]])
-
- def getParamBounds(self) -> np.ndarray:
- return np.array([0.8, 2.2], [0.8, 2.2])
+class LinearODE(JaxModel, ArtificialModelInterface):
+ param_dim = 2
+ data_dim = 2
- # TODO: ???
- # KDExGrid = np.linspace(0.8 * np.exp(1), 2.2 * np.exp(1), KDEresolution)
- # KDEyGrid = np.linspace(np.exp(0.8), np.exp(2.2), KDEresolution)
- # KDExMesh, KDEyMesh = np.meshgrid(KDExGrid, KDEyGrid)
- def getDataBounds(self) -> np.ndarray:
- return self.forward(self.getParamBounds())
+ PARAM_LIMITS = np.array([[-2.0, 4.0], [-2.0, 4.0]])
+ CENTRAL_PARAM = np.array([1.5, 1.5])
+ TRUE_PARAM_LIMITS = np.array([[1.0, 2.0], [1.0, 2.0]])
-class LinearODE(JaxModel, ArtificialModelInterface):
-
- paramDim = 2
- dataDim = 2
+ def __init__(
+ self,
+ central_param: np.ndarray = CENTRAL_PARAM,
+ param_limits: np.ndarray = PARAM_LIMITS,
+ name: Optional[str] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(central_param, param_limits, name=name, **kwargs)
@classmethod
def forward(cls, param):
@@ -91,25 +78,5 @@ def forward(cls, param):
]
)
- def getParamSamplingLimits(self):
- return np.array([[-10.0, 23.0], [-10.0, 23.0]])
-
- def getCentralParam(self):
- return np.array([1.5, 1.5])
-
- def generateArtificialData(self, numSamples=1000):
- # randomly create true parameters in [1,2]^2
- trueParamSample = np.random.rand(numSamples, 2) + 1
-
- artificialData = vmap(self.forward, in_axes=0)(trueParamSample)
-
- np.savetxt(
- f"Data/{self.getModelName()}Data.csv",
- artificialData,
- delimiter=",",
- )
- np.savetxt(
- f"Data/{self.getModelName()}Params.csv",
- trueParamSample,
- delimiter=",",
- )
+ def generate_artificial_params(self, num_samples: int):
+ return np.random.rand(num_samples, 2) + 1
diff --git a/epi/examples/stock/stock.py b/epi/examples/stock/stock.py
index c3ac60f8..3d2b560c 100644
--- a/epi/examples/stock/stock.py
+++ b/epi/examples/stock/stock.py
@@ -1,18 +1,11 @@
-import importlib
-import os
+from typing import Optional
import jax.numpy as jnp
import numpy as np
-import pandas as pd
import yfinance as yf
-from jax import vmap
from epi import logger
-from epi.core.model import (
- ArtificialModelInterface,
- JaxModel,
- VisualizationModelInterface,
-)
+from epi.core.model import ArtificialModelInterface, JaxModel
# Ticker source: https://investexcel.net/all-yahoo-finance-stock-tickers/#google_vignette, Date:27.10.2022
TICKERS = [
@@ -27,91 +20,51 @@
]
-class Stock(JaxModel, VisualizationModelInterface):
+class Stock(JaxModel):
"""Model simulating stock data."""
- dataDim = 19
- paramDim = 6
+ data_dim = 19
+ param_dim = 6
+
+ PARAM_LIMITS = np.array([[-10.0, 10.0] for _ in range(param_dim)])
+ CENTRAL_PARAM = np.array(
+ [
+ 0.41406223,
+ 1.04680993,
+ 1.21173553,
+ 0.8078955,
+ 1.07772437,
+ 0.64869251,
+ ]
+ )
def __init__(
- self, delete: bool = False, create: bool = True, ticker="ETF50"
+ self,
+ central_param: np.ndarray = CENTRAL_PARAM,
+ param_limits: np.ndarray = PARAM_LIMITS,
+ name: Optional[str] = None,
+ **kwargs,
) -> None:
- """Initialize the model and set a ticker. Can be chosen from the list of available tickers TICKERS.
- Possibly outdated list: [ETF, Index1, Index2, Mutual, Stocks1, Stocks2, Stocks3]
+ super().__init__(central_param, param_limits, name=name, **kwargs)
- :param ticker: The ticker from which the data should be used, defaults to "ETF"
- :type ticker: str, optional
- """
- super().__init__(delete, create)
- self.dataPath = f"Data/{self.getModelName()}/{ticker}Data.csv"
-
- # Check if data for the given ticker exists
- if not os.path.isfile(self.dataPath):
- logger.warning("Ticker data not found. Downloading data...")
- tickerPath = importlib.resources.path(
- "epi.examples.stock", f"{ticker}.csv"
- )
- self.downloadData(tickerPath)
-
- def getDataBounds(self):
- return np.array([[-7.5, 7.5] * self.dataDim])
-
- def getParamBounds(self):
- return np.array([[-2.0, 2.0] * self.paramDim])
-
- def getParamSamplingLimits(self):
- return np.array([[-10.0, 10.0] * self.paramDim])
+ def download_data(self, ticker_list_path: str):
+ """Download stock data for a ticker list from yahoo finance.
- def getCentralParam(self):
- return np.array(
- [
- 0.41406223,
- 1.04680993,
- 1.21173553,
- 0.8078955,
- 1.07772437,
- 0.64869251,
- ]
- )
+ Args:
+ ticker_list_path(str): path to the ticker list csv file
- def downloadData(self, tickerListPath: str):
- """Download stock data for a ticker list from yahoo finance.
+ Returns:
+ stock_data, stock_ids and the name of the tickerList
- :param tickerListPath: path to the ticker list csv file
- :type tickerListPath: str
"""
logger.info("Downloading stock data...")
start = "2022-01-31"
end = "2022-03-01"
- dates = np.array(
- [
- "2022-01-31",
- "2022-02-01",
- "2022-02-02",
- "2022-02-03",
- "2022-02-04",
- "2022-02-07",
- "2022-02-08",
- "2022-02-09",
- "2022-02-10",
- "2022-02-11",
- "2022-02-14",
- "2022-02-15",
- "2022-02-16",
- "2022-02-17",
- "2022-02-18",
- "2022-02-22",
- "2022-02-23",
- "2022-02-24",
- "2022-02-25",
- "2022-02-28",
- ]
- )
- stocks = np.loadtxt(tickerListPath, dtype="str")
+ stocks = np.loadtxt(ticker_list_path, dtype="str")
try:
- df: pd.DataFrame = yf.download(
+ df = yf.download(
stocks.tolist(), start, end, interval="1d", repair=True
)
except Exception as e:
@@ -137,29 +90,17 @@ def downloadData(self, tickerListPath: str):
# Drop the row of the first day
df = df.iloc[1:, :]
- # get the remaining stockIDs and create a numpy array from the dataframe
- stockIDs = df.columns.get_level_values(1) # .unique()
- stockData = df.to_numpy()
+ # get the remaining stock_ids and create a numpy array from the dataframe
+ stock_ids = df.columns.get_level_values(1) # .unique()
+ stock_data = df.to_numpy()
# get the name of the tickerList
- if type(tickerListPath) != str:
- tickerListName = tickerListPath.name.split(".")[0]
+ if type(ticker_list_path) != str:
+ ticker_list_name = ticker_list_path.name.split(".")[0]
else:
- tickerListName = tickerListPath.split("/")[-1].split(".")[0]
-
- # save all time points except for the first
- os.makedirs(f"Data/{self.getModelName()}", exist_ok=True)
- np.savetxt(
- f"Data/{self.getModelName()}/{tickerListName}Data.csv",
- stockData.T,
- delimiter=",",
- )
- np.savetxt(
- f"Data/{self.getModelName()}/{tickerListName}IDs.csv",
- stockIDs,
- delimiter=",",
- fmt="% s",
- )
+ ticker_list_name = ticker_list_path.split("/")[-1].split(".")[0]
+
+ return stock_data.T, stock_ids, ticker_list_name
@classmethod
def forward(cls, param):
@@ -173,72 +114,36 @@ def iteration(x, param):
]
)
- def repetition(x, param, numRepetitions):
- for i in range(numRepetitions):
+ def repetition(x, param, num_repetitions):
+ for i in range(num_repetitions):
x = iteration(x, param)
-
return x
- x0 = jnp.zeros(2)
- x1 = repetition(x0, param, 1)
- x2 = repetition(x1, param, 1)
- x3 = repetition(x2, param, 1)
- x4 = repetition(x3, param, 1)
- x5 = repetition(x4, param, 3)
- x6 = repetition(x5, param, 1)
- x7 = repetition(x6, param, 1)
- x8 = repetition(x7, param, 1)
- x9 = repetition(x8, param, 1)
- x10 = repetition(x9, param, 3)
- x11 = repetition(x10, param, 1)
- x12 = repetition(x11, param, 1)
- x13 = repetition(x12, param, 1)
- x14 = repetition(x13, param, 1)
- x15 = repetition(x14, param, 4)
- x16 = repetition(x15, param, 1)
- x17 = repetition(x16, param, 1)
- x18 = repetition(x17, param, 1)
- x19 = repetition(x18, param, 3)
-
- timeCourse = jnp.array(
- [
- x1,
- x2,
- x3,
- x4,
- x5,
- x6,
- x7,
- x8,
- x9,
- x10,
- x11,
- x12,
- x13,
- x14,
- x15,
- x16,
- x17,
- x18,
- x19,
- ]
- )
-
- return timeCourse[:, 0]
+ x = jnp.zeros(2)
+ time_course = [
+ repetition(x, param, n)
+ for n in [1, 1, 1, 1, 3, 1, 1, 1, 1, 3, 1, 1, 1, 1, 4, 1, 1, 1, 3]
+ ]
+ return jnp.array([x[0] for x in time_course])
class StockArtificial(Stock, ArtificialModelInterface):
- def __init__(self, *args, **kwargs):
- super(Stock, self).__init__(*args, **kwargs)
-
- def generateArtificialData(
- self, numSamples=ArtificialModelInterface.NUM_ARTIFICIAL_SAMPLES
- ):
- logger.info(
- f"Generating {numSamples} data samples by evaluating the model. "
- "This might take a very long time!"
+ """ """
+
+ PARAM_LIMITS = np.array([[-1.0, 3.0] for _ in range(Stock.param_dim)])
+
+ def __init__(
+ self,
+ central_param: np.ndarray = Stock.CENTRAL_PARAM,
+ param_limits: np.ndarray = PARAM_LIMITS,
+ name: Optional[str] = None,
+ **kwargs,
+ ) -> None:
+ super(Stock, self).__init__(
+ central_param, param_limits, name=name, **kwargs
)
+ def generate_artificial_params(self, num_samples):
mean = np.array(
[
0.41406223,
@@ -251,23 +156,8 @@ def generateArtificialData(
)
stdevs = np.array([0.005, 0.01, 0.05, 0.005, 0.01, 0.05])
- trueParamSample = np.random.randn(numSamples, self.paramDim)
- trueParamSample *= stdevs
- trueParamSample += mean
-
- artificialData = vmap(self.forward, in_axes=0)(trueParamSample)
-
- np.savetxt(
- f"Data/{self.getModelName()}Data.csv",
- artificialData,
- delimiter=",",
- )
-
- np.savetxt(
- f"Data/{self.getModelName()}Params.csv",
- trueParamSample,
- delimiter=",",
- )
+ true_param_sample = np.random.randn(num_samples, self.param_dim)
+ true_param_sample *= stdevs
+ true_param_sample += mean
- def getParamSamplingLimits(self):
- return np.array([[-1.0, 3.0] * self.paramDim])
+ return true_param_sample
diff --git a/epi/examples/temperature/temperature.py b/epi/examples/temperature/temperature.py
index 7c9dc738..221607cb 100644
--- a/epi/examples/temperature/temperature.py
+++ b/epi/examples/temperature/temperature.py
@@ -1,8 +1,8 @@
import importlib
+from typing import Optional
import jax.numpy as jnp
import numpy as np
-from jax import vmap
from epi.core.model import ArtificialModelInterface, Model
@@ -11,63 +11,67 @@
class Temperature(Model):
+ """ """
- paramDim = 1
- dataDim = 1
+ param_dim = 1
+ data_dim = 1
- def __init__(self, delete: bool = False, create: bool = True) -> None:
- super().__init__(delete, create)
+ PARAM_LIMITS = np.array([[0, np.pi / 2]])
+ CENTRAL_PARAM = np.array([np.pi / 4.0])
- self.dataPath = importlib.resources.path(
- "epi.examples.temperature", "TemperatureData.csv"
- )
+ def __init__(
+ self,
+ central_param: np.ndarray = CENTRAL_PARAM,
+ param_limits: np.ndarray = PARAM_LIMITS,
+ name: Optional[str] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(central_param, param_limits, name=name, **kwargs)
def forward(self, param):
- lowT = -30.0
- highT = 30.0
- res = jnp.array([lowT + (highT - lowT) * jnp.cos(jnp.abs(param[0]))])
+ low_T = -30.0
+ high_T = 30.0
+ res = jnp.array(
+ [low_T + (high_T - low_T) * jnp.cos(jnp.abs(param[0]))]
+ )
return res
def jacobian(self, param):
return jnp.array([60.0 * jnp.sin(jnp.abs(param[0]))])
- def getCentralParam(self):
- return np.array([np.pi / 4.0])
-
- def getParamSamplingLimits(self):
- return np.array([[0, np.pi / 2]])
-
class TemperatureArtificial(Temperature, ArtificialModelInterface):
- def generateArtificialData(self):
+ def generate_artificial_params(self, num_data_points: int = -1):
paramPath = importlib.resources.path(
"epi.examples.temperature", "TemperatureArtificialParams.csv"
)
- trueParamSample = np.loadtxt(paramPath, delimiter=",", ndmin=2)
-
- artificialData = vmap(self.forward, in_axes=0)(trueParamSample)
-
- np.savetxt(
- f"Data/{self.getModelName()}Data.csv",
- artificialData,
- delimiter=",",
- )
+ true_param_sample = np.loadtxt(paramPath, delimiter=",", ndmin=2)
+ return true_param_sample
class TemperatureWithFixedParams(Temperature):
- def __init__(self, delete: bool = False, create: bool = True) -> None:
- super().__init__(delete, create)
- self.lowT = -30.0
- self.highT = 30.0
+ def __init__(
+ self,
+ low_T: np.double = -30.0,
+ high_T: np.double = 30.0,
+ name: Optional[str] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(name=name, **kwargs)
+ self.low_T = low_T
+ self.high_T = high_T
def forward(self, param):
- return self.calcForward(param, self.highT, self.lowT)
+ return self.calc_forward(param, self.high_T, self.low_T)
- def calcForward(self, param, highT, lowT):
- res = jnp.array([lowT + (highT - lowT) * jnp.cos(jnp.abs(param[0]))])
+ def calc_forward(self, param, high_T, low_T):
+ res = jnp.array(
+ [low_T + (high_T - low_T) * jnp.cos(jnp.abs(param[0]))]
+ )
return res
def jacobian(self, param):
- return jnp.array(
- [(self.highT - self.lowT) * jnp.sin(jnp.abs(param[0]))]
- )
+ return self.calc_jacobian(param, self.high_T, self.low_T)
+
+ def calc_jacobian(self, param, high_T, low_T):
+ return jnp.array([(high_T - low_T) * jnp.sin(jnp.abs(param[0]))])
diff --git a/epi/jax_extension.py b/epi/jax_extension.py
index 84a103fc..e70d2d27 100644
--- a/epi/jax_extension.py
+++ b/epi/jax_extension.py
@@ -5,14 +5,31 @@
import jax.numpy as jnp
-def value_and_jacfwd(fun: Callable[[jnp.ndarray], jnp.ndarray]):
+def value_and_jacfwd(
+ fun: Callable[[jnp.ndarray], jnp.ndarray]
+) -> Callable[[jnp.ndarray], Callable[[jnp.ndarray], jnp.ndarray]]:
"""Returns a function that computes the value and the jacobian of the passed function using forward mode AD.
- :param fun: The function to supplement with the jacobian
- :type fun: Callable[[jnp.ndarray], jnp.ndarray]
+ Args:
+ fun(Callable[[jnp.ndarray], jnp.ndarray]): The function to supplement with the jacobian
+
+ Returns:
+ typing.Callable[[jnp.ndarray], typing.Tuple[jnp.ndarray, jnp.ndarray]]: A function that computes the value and the jacobian of the passed function using forward mode AD.
+
"""
- def value_and_jacfwd_fun(x: jnp.ndarray):
+ def value_and_jacfwd_fun(
+ x: jnp.ndarray,
+ ) -> Callable[[jnp.ndarray], jnp.ndarray]:
+ """
+
+ Args:
+ x(jnp.ndarray): The input to the function
+
+ Returns:
+ typing.Tuple[jnp.ndarray, jnp.ndarray]: The value and the jacobian of the passed function using forward mode AD.
+
+ """
pushfwd = partial(jax.jvp, fun, (x,))
y, jac = jax.vmap(pushfwd, out_axes=(None, -1))(
(jnp.eye(x.shape[0], dtype=x.dtype),)
@@ -22,14 +39,29 @@ def value_and_jacfwd_fun(x: jnp.ndarray):
return value_and_jacfwd_fun
-def value_and_jacrev(fun: Callable[..., jnp.ndarray]):
+def value_and_jacrev(
+ fun: Callable[..., jnp.ndarray]
+) -> Callable[[jnp.ndarray], Callable[[jnp.ndarray], jnp.ndarray]]:
"""Returns a function that computes the value and the jacobian of the passed function using reverse mode AD.
- :param fun: The function to supplement with the jacobian
- :type fun: Callable[..., jnp.ndarray]
+ Args:
+ fun(Callable[..., jnp.ndarray]): The function to supplement with the jacobian
+
+ Returns:
+ typing.Callable[[jnp.ndarray], typing.Tuple[jnp.ndarray, jnp.ndarray]]: A function that computes the value and the jacobian of the passed function using reverse mode AD.
+
"""
def value_and_jacrev_fun(x):
+ """
+
+ Args:
+ x(jnp.ndarray): The input to the function
+
+ Returns:
+ typing.Tuple[jnp.ndarray, jnp.ndarray]: The value and the jacobian of the passed function using reverse mode AD.
+
+ """
y, pullback = jax.vjp(fun, x)
jac = jax.vmap(pullback)(jnp.eye(y.shape[0], dtype=y.dtype))[0]
return y, jac
diff --git a/epi/plotting/__init__.py b/epi/plotting/__init__.py
index 392e687a..2d93c6b6 100644
--- a/epi/plotting/__init__.py
+++ b/epi/plotting/__init__.py
@@ -1,7 +1,7 @@
# In the whole plotting module:
# TODO: Extract duplicate code into functions
# TODO: Provide some "goto" plotting functions, which are applicable to all models / choose an adequate plotting
-# function based on model attributes like dataDim, ...
+# function based on model attributes like data_dim, ...
# TODO: Set default values for resolution everywhere or nowhere
# TODO: If accessing original data: respect model.dataPath attribute. E.g. by using model.dataLoader() to load the data
# or change the implementation of model
diff --git a/epi/plotting/marginals.py b/epi/plotting/marginals.py
index c04ee4d1..1a205246 100644
--- a/epi/plotting/marginals.py
+++ b/epi/plotting/marginals.py
@@ -1,6 +1,6 @@
import numpy as np
-from epi.core.kde import calcKernelWidth, evalKDEGauss
+from epi.core.kde import calc_kernel_width, eval_kde_gauss
from epi.core.model import Model
@@ -8,152 +8,173 @@ def calcDataMarginals(model: Model, resolution: int) -> None:
"""Evaluate the one-dimensional marginals of the original data over equi-distant grids.
The stored evaluations can then be used for result visualization.
- :parameter model: The model which manages the data and provides the visualization grid
- :parameter resolution: defines the number of grid points for each marginal evaluation is directly proportional to the runtime
- :return: None, but stores results as files
+ Args:
+ eter: model: The model which manages the data and provides the visualization grid
+ eter: resolution: defines the number of grid points for each marginal evaluation is directly proportional to the runtime
+ model: Model:
+ resolution: int:
+
+ Returns:
+ None, but stores results as files
+
"""
# Load data, data standard deviations and model characteristics for the specified model.
(
- dataDim,
+ data_dim,
data,
- dataStdevs,
+ data_stdevs,
) = model.dataLoader()
# Create containers for the data marginal evaluations.
- trueDataMarginals = np.zeros((resolution, dataDim))
+ trueDataMarginals = np.zeros((resolution, data_dim))
# Load the grid over which the data marginal will be evaluated
dataGrid, _ = model.generateVisualizationGrid(resolution)
# Loop over each simulation result dimension and marginalize over the rest.
- for dim in range(dataDim):
+ for dim in range(data_dim):
# The 1D-arrays of true data have to be casted to 2D-arrays, as this format is obligatory for kernel density estimation.
marginalData = np.zeros((data.shape[0], 1))
marginalData[:, 0] = data[:, dim]
# Loop over all grid points and evaluate the 1D kernel marginal density estimation of the data sample.
for i in range(resolution):
- trueDataMarginals[i, dim] = evalKDEGauss(
+ trueDataMarginals[i, dim] = eval_kde_gauss(
marginalData,
np.array([dataGrid[i, dim]]),
- np.array([dataStdevs[dim]]),
+ np.array([data_stdevs[dim]]),
)
# Store the marginal KDE approximation of the data
np.savetxt(
- model.getApplicationPath() + "/Plots/trueDataMarginals.csv",
+ model.get_application_path() + "/Plots/trueDataMarginals.csv",
trueDataMarginals,
delimiter=",",
)
def calcEmceeSimResultsMarginals(
- model: Model, numBurnSamples: int, occurrence: int, resolution: int
+ model: Model, num_burn_samples: int, occurrence: int, resolution: int
) -> None:
"""Evaluate the one-dimensional marginals of the emcee sampling simulation results over equi-distant grids.
The stores evaluations can then be used for result visualization.
- :param model:
- :param numBurnSamples: (Number of ignored first samples of each chain), defaults to 20% of all samples
- :param occurence: (step of sampling from chains), defaults to numWalkers+1 (ensures that the chosen samples are nearly uncorrelated)
- :param resolution: (defines the number of grid points for each marginal evaluation is directly proportional to the runtime), defaults to 100
- :return: None, except for stored files
+ Args:
+ model: param num_burn_samples: (Number of ignored first samples of each chain), defaults to 20% of all samples
+ occurence: step of sampling from chains), defaults to num_walkers+1 (ensures that the chosen samples are nearly uncorrelated)
+ resolution: defines the number of grid points for each marginal evaluation is directly proportional to the runtime), defaults to 100
+ model: Model:
+ num_burn_samples: int:
+ occurrence: int:
+ resolution: int:
+
+ Returns:
+ None, except for stored files
+
"""
# Load the emcee simulation results chain
- simResults = np.loadtxt(
- model.getApplicationPath() + "/OverallSimResults.csv",
+ sim_results = np.loadtxt(
+ model.get_application_path() + "/OverallSimResults.csv",
delimiter=",",
ndmin=2,
- )[numBurnSamples::occurrence, :]
+ )[num_burn_samples::occurrence, :]
# Load data, data standard deviations and model characteristics for the specified model.
(
- dataDim,
+ data_dim,
data,
- dataStdevs,
+ data_stdevs,
) = model.dataLoader()
# Create containers for the simulation results marginal evaluations.
- inferredDataMarginals = np.zeros((resolution, dataDim))
+ inferredDataMarginals = np.zeros((resolution, data_dim))
# Load the grid over which the simulation results marginal will be evaluated
dataGrid, _ = model.generateVisualizationGrid(resolution)
# Loop over each data dimension and marginalize over the rest.
- for dim in range(dataDim):
+ for dim in range(data_dim):
# The 1D-arrays of simulation resultshave to be casted to 2D-arrays, as this format is obligatory for kernel density estimation.
- marginalSimResults = np.zeros((simResults.shape[0], 1))
- marginalSimResults[:, 0] = simResults[:, dim]
+ marginalSimResults = np.zeros((sim_results.shape[0], 1))
+ marginalSimResults[:, 0] = sim_results[:, dim]
# Loop over all grid points and evaluate the 1D kernel marginal density estimation of the emcee simulation results.
for i in range(resolution):
- inferredDataMarginals[i, dim] = evalKDEGauss(
+ inferredDataMarginals[i, dim] = eval_kde_gauss(
marginalSimResults,
np.array([dataGrid[i, dim]]),
- np.array([dataStdevs[dim]]),
+ np.array([data_stdevs[dim]]),
)
# Store the marginal KDE approximation of the simulation results emcee sample
np.savetxt(
- model.getApplicationPath() + "/Plots/inferredDataMarginals.csv",
+ model.get_application_path() + "/Plots/inferredDataMarginals.csv",
inferredDataMarginals,
delimiter=",",
)
def calcParamMarginals(
- model: Model, numBurnSamples: int, occurrence: int, resolution: int
+ model: Model, num_burn_samples: int, occurrence: int, resolution: int
) -> None:
"""Evaluate the one-dimensional marginals of the emcee sampling parameters (and potentially true parameters) over equi-distant grids.
The stores evaluations can then be used for result visualization.
- :param model:(model ID)
- :param numBurnSamples: (Number of ignored first samples of each chain), defaults to 20% of all samples
- :param occurrence: (step of sampling from chains), defaults to numWalkers+1 (ensures that the chosen samples are nearly uncorrelated)
- :param resolution: (defines the number of grid points for each marginal evaluation is directly proportional to the runtime), defaults to 100
- :return: None, except for stored files
+ Args:
+ model: model ID)
+ num_burn_samples: Number of ignored first samples of each chain), defaults to 20% of all samples
+ occurrence: step of sampling from chains), defaults to num_walkers+1 (ensures that the chosen samples are nearly uncorrelated)
+ resolution: defines the number of grid points for each marginal evaluation is directly proportional to the runtime), defaults to 100
+ model: Model:
+ num_burn_samples: int:
+ occurrence: int:
+ resolution: int:
+
+ Returns:
+ None, except for stored files
+
"""
# If the model name indicates an artificial setting, indicate that true parameter information is available
- artificialModel = model.isArtificial()
+ artificialModel = model.is_artificial()
# Load the emcee parameter chain
- paramChain = np.loadtxt(
- model.getApplicationPath() + "/OverallParams.csv",
+ param_chain = np.loadtxt(
+ model.get_application_path() + "/OverallParams.csv",
delimiter=",",
ndmin=2,
- )[numBurnSamples::occurrence, :]
+ )[num_burn_samples::occurrence, :]
- paramDim = model.paramDim
+ param_dim = model.param_dim
# Define the standard deviation for plotting the parameters based on the sampled parameters and not the true ones.
- paramStdevs = calcKernelWidth(paramChain)
+ paramStdevs = calc_kernel_width(param_chain)
# Create containers for the parameter marginal evaluations and the underlying grid.
_, paramGrid = model.generateVisualizationGrid(resolution)
- inferredParamMarginals = np.zeros((resolution, paramDim))
+ inferredParamMarginals = np.zeros((resolution, param_dim))
# If there are true parameter values available, load them and allocate storage similar to the just-defined one.
if artificialModel:
- trueParamSample, _ = model.paramLoader()
- trueParamMarginals = np.zeros((resolution, paramDim))
+ true_param_sample, _ = model.paramLoader()
+ trueParamMarginals = np.zeros((resolution, param_dim))
# Loop over each parameter dimension and marginalize over the rest.
- for dim in range(paramDim):
+ for dim in range(param_dim):
# As the kernel density estimators only work for 2D-arrays of data, we have to cast the column of parameter samples into a 1-column matrix (or 2D-array).
- marginalParamChain = np.zeros((paramChain.shape[0], 1))
- marginalParamChain[:, 0] = paramChain[:, dim]
+ marginalParamChain = np.zeros((param_chain.shape[0], 1))
+ marginalParamChain[:, 0] = param_chain[:, dim]
# If there is true parameter information available, we have to do the same type cast for the true parameter samples.
if artificialModel:
- trueMarginalParamSample = np.zeros((trueParamSample.shape[0], 1))
- trueMarginalParamSample[:, 0] = trueParamSample[:, dim]
+ trueMarginalParamSample = np.zeros((true_param_sample.shape[0], 1))
+ trueMarginalParamSample[:, 0] = true_param_sample[:, dim]
# Loop over all grid points and evaluate the 1D kernel density estimation of the reconstructed marginal parameter distribution.
for i in range(resolution):
- inferredParamMarginals[i, dim] = evalKDEGauss(
+ inferredParamMarginals[i, dim] = eval_kde_gauss(
marginalParamChain,
np.array([paramGrid[i, dim]]),
np.array(paramStdevs[dim]),
@@ -161,7 +182,7 @@ def calcParamMarginals(
# If true parameter information is available, evaluate a similar 1D marginal distribution based on the true parameter samples.
if artificialModel:
- trueParamMarginals[i, dim] = evalKDEGauss(
+ trueParamMarginals[i, dim] = eval_kde_gauss(
trueMarginalParamSample,
np.array([paramGrid[i, dim]]),
np.array(paramStdevs[dim]),
@@ -169,16 +190,14 @@ def calcParamMarginals(
# Store the (potentially 2) marginal distribution(s) for later plotting
np.savetxt(
- model.getApplicationPath() + "/Plots/inferredParamMarginals.csv",
+ model.get_application_path() + "/Plots/inferredParamMarginals.csv",
inferredParamMarginals,
delimiter=",",
)
if artificialModel:
np.savetxt(
- "Applications/"
- + model.getModelName()
- + "/Plots/trueParamMarginals.csv",
+ "Applications/" + model.name + "/Plots/trueParamMarginals.csv",
trueParamMarginals,
delimiter=",",
)
diff --git a/epi/plotting/plots.py b/epi/plotting/plots.py
index 5f224c60..4d51086d 100644
--- a/epi/plotting/plots.py
+++ b/epi/plotting/plots.py
@@ -2,9 +2,9 @@
import numpy as np
from matplotlib import cm
-from epi.core.functions import evalLogTransformedDensity
-from epi.core.kde import calcKernelWidth, evalKDEGauss
+from epi.core.kde import calc_kernel_width, eval_kde_gauss
from epi.core.model import Model
+from epi.core.transformations import eval_log_transformed_density
colorQ = np.array([255.0, 147.0, 79.0]) / 255.0
colorQApprox = np.array([204.0, 45.0, 53.0]) / 255.0
@@ -17,44 +17,47 @@
# TODO: Fix np.loadtxt(..., ndmin=2) in all functions
-def plotEmceeResults(model: Model, numBurnSamples, occurrence, resolution=100):
+def plotEmceeResults(
+ model: Model, num_burn_samples, occurrence, resolution=100
+):
"""Plot sampling results in comparison to true results
- :param model: Model from which the results will be plotted
- :type model: Model
- :param numBurnSamples: Ignore the first samples of each chain
- :type numBurnSamples: _type_
- :param occurrence: step of sampling from chains
- :type occurrence: _type_
- :param resolution: number of points on the plotting grid?, defaults to 100
- :type resolution: int, optional
+ Args:
+ model(Model): Model from which the results will be plotted
+ num_burn_samples(_type_): Ignore the first samples of each chain
+ occurrence(_type_): step of sampling from chains
+ resolution(int, optional, optional): number of points on the plotting grid?, defaults to 100
+ model: Model:
+
+ Returns:
+
"""
- artificialModel = model.isArtificial()
+ artificialModel = model.is_artificial()
(
- dataDim,
+ data_dim,
data,
- dataStdevs,
+ data_stdevs,
) = model.dataLoader()
- densityEvals, simResults, paramChain = model.loadSimResults(
- numBurnSamples, occurrence
+ density_evals, sim_results, param_chain = model.load_sim_results(
+ num_burn_samples, occurrence
)
if artificialModel:
trueParams, paramStdevs = model.paramLoader()
else:
- paramStdevs = calcKernelWidth(paramChain)
+ paramStdevs = calc_kernel_width(param_chain)
# plot sampled parameters in comparison to true ones
- for dim in range(model.paramDim):
+ for dim in range(model.param_dim):
evaluations = np.zeros(resolution)
- singleParamChain = np.zeros((paramChain.shape[0], 1))
- singleParamChain[:, 0] = paramChain[:, dim]
+ singleParamChain = np.zeros((param_chain.shape[0], 1))
+ singleParamChain[:, 0] = param_chain[:, dim]
paramGrid = np.linspace(
- np.amin(paramChain[:, dim]),
- np.amax(paramChain[:, dim]),
+ np.amin(param_chain[:, dim]),
+ np.amax(param_chain[:, dim]),
resolution,
)
@@ -64,14 +67,14 @@ def plotEmceeResults(model: Model, numBurnSamples, occurrence, resolution=100):
trueSingleParamSample[:, 0] = trueParams[:, dim]
for i in range(resolution):
- evaluations[i] = evalKDEGauss(
+ evaluations[i] = eval_kde_gauss(
singleParamChain,
np.array([paramGrid[i]]),
np.array(paramStdevs[dim]),
)
if artificialModel:
- trueEvaluations[i] = evalKDEGauss(
+ trueEvaluations[i] = eval_kde_gauss(
trueSingleParamSample,
np.array([paramGrid[i]]),
np.array(paramStdevs[dim]),
@@ -94,12 +97,12 @@ def plotEmceeResults(model: Model, numBurnSamples, occurrence, resolution=100):
plt.plot()
# plot sampled simulation results in comparison to original data
- for dim in range(dataDim):
+ for dim in range(data_dim):
simResEvaluations = np.zeros(resolution)
dataEvaluations = np.zeros(resolution)
- singleSimResults = np.zeros((simResults.shape[0], 1))
- singleSimResults[:, 0] = simResults[:, dim]
+ singleSimResults = np.zeros((sim_results.shape[0], 1))
+ singleSimResults[:, 0] = sim_results[:, dim]
singleData = np.zeros((data.shape[0], 1))
singleData[:, 0] = data[:, dim]
@@ -113,15 +116,15 @@ def plotEmceeResults(model: Model, numBurnSamples, occurrence, resolution=100):
evalGrid = np.linspace(globalMin, globalMax, resolution)
for i in range(resolution):
- simResEvaluations[i] = evalKDEGauss(
+ simResEvaluations[i] = eval_kde_gauss(
singleSimResults,
np.array([evalGrid[i]]),
- np.array([dataStdevs[dim]]),
+ np.array([data_stdevs[dim]]),
)
- dataEvaluations[i] = evalKDEGauss(
+ dataEvaluations[i] = eval_kde_gauss(
singleData,
np.array([evalGrid[i]]),
- np.array([dataStdevs[dim]]),
+ np.array([data_stdevs[dim]]),
)
plt.figure()
@@ -137,25 +140,33 @@ def plotEmceeResults(model: Model, numBurnSamples, occurrence, resolution=100):
def plotDataMarginals(model: Model):
+ """
+
+ Args:
+ model: Model:
+
+ Returns:
+
+ """
(
- paramDim,
- dataDim,
- numDataPoints,
- centralParam,
+ param_dim,
+ data_dim,
+ num_data_points,
+ central_param,
data,
- dataStdevs,
+ data_stdevs,
) = model.loadData()
dataGrid = np.loadtxt(
- model.getApplicationPath() + "/Plots/dataGrid.csv",
+ model.get_application_path() + "/Plots/dataGrid.csv",
delimiter=",",
)
trueDataMarginals = np.loadtxt(
- model.getApplicationPath() + "/Plots/trueDataMarginals.csv",
+ model.get_application_path() + "/Plots/trueDataMarginals.csv",
delimiter=",",
)
- for dim in range(dataDim):
+ for dim in range(data_dim):
plt.figure()
plt.plot(
dataGrid[:, dim],
@@ -174,50 +185,58 @@ def plotDataMarginals(model: Model):
plt.show()
-def plotMarginals(model: Model, numBurnSamples, occurrence):
- artificialModel = model.isArtificial()
+def plotMarginals(model: Model, num_burn_samples, occurrence):
+ """
+
+ Args:
+ model: Model:
+ num_burn_samples:
+ occurrence:
+
+ Returns:
+
+ """
+ artificialModel = model.is_artificial()
- simResults = np.loadtxt(
- model.getApplicationPath() + "/OverallSimResults.csv",
+ sim_results = np.loadtxt(
+ model.get_application_path() + "/OverallSimResults.csv",
delimiter=",",
- )[numBurnSamples::occurrence, :]
- paramChain = np.loadtxt(
- model.getApplicationPath() + "/OverallParams.csv",
+ )[num_burn_samples::occurrence, :]
+ param_chain = np.loadtxt(
+ model.get_application_path() + "/OverallParams.csv",
delimiter=",",
- )[numBurnSamples::occurrence, :]
+ )[num_burn_samples::occurrence, :]
paramGrid = np.loadtxt(
- model.getApplicationPath() + "/Plots/paramGrid.csv",
+ model.get_application_path() + "/Plots/paramGrid.csv",
delimiter=",",
)
inferredParamMarginals = np.loadtxt(
- model.getApplicationPath() + "/Plots/inferredParamMarginals.csv",
+ model.get_application_path() + "/Plots/inferredParamMarginals.csv",
delimiter=",",
)
if artificialModel:
trueParams, paramStdevs = model.paramLoader()
trueParamMarginals = np.loadtxt(
- "Applications/"
- + model.getModelName()
- + "/Plots/trueParamMarginals.csv",
+ "Applications/" + model.name + "/Plots/trueParamMarginals.csv",
delimiter=",",
)
dataGrid = np.loadtxt(
- model.getApplicationPath() + "/Plots/dataGrid.csv",
+ model.get_application_path() + "/Plots/dataGrid.csv",
delimiter=",",
)
trueDataMarginals = np.loadtxt(
- model.getApplicationPath() + "/Plots/trueDataMarginals.csv",
+ model.get_application_path() + "/Plots/trueDataMarginals.csv",
delimiter=",",
)
inferredDataMarginals = np.loadtxt(
- model.getApplicationPath() + "/Plots/inferredDataMarginals.csv",
+ model.get_application_path() + "/Plots/inferredDataMarginals.csv",
delimiter=",",
)
- for dim in range(model.paramDim):
+ for dim in range(model.param_dim):
plt.figure()
plt.plot(
paramGrid[:, dim],
@@ -225,7 +244,7 @@ def plotMarginals(model: Model, numBurnSamples, occurrence):
c=colorQApprox,
label="inferred param. marg. KDE",
)
- # plt.hist(paramChain[:,dim], bins = paramGrid[:,dim], color = np.concatenate((colorQApprox, np.array([0.5]))), label = "inferred param. marg. hist.", density = True)
+ # plt.hist(param_chain[:,dim], bins = paramGrid[:,dim], color = np.concatenate((colorQApprox, np.array([0.5]))), label = "inferred param. marg. hist.", density = True)
if artificialModel:
plt.plot(
@@ -239,7 +258,7 @@ def plotMarginals(model: Model, numBurnSamples, occurrence):
plt.legend()
plt.show()
- for dim in range(model.dataDim):
+ for dim in range(model.data_dim):
plt.figure()
plt.plot(
dataGrid[:, dim],
@@ -247,7 +266,7 @@ def plotMarginals(model: Model, numBurnSamples, occurrence):
c=colorYApprox,
label="reconstr. data marg. KDE",
)
- # plt.hist(simResults[:,dim], bins = dataGrid[:,dim], color = np.concatenate((colorYApprox, np.array([0.5]))), label = "recontr. data. marg. hist.", density = True)
+ # plt.hist(sim_results[:,dim], bins = dataGrid[:,dim], color = np.concatenate((colorYApprox, np.array([0.5]))), label = "recontr. data. marg. hist.", density = True)
plt.plot(
dataGrid[:, dim],
@@ -260,48 +279,52 @@ def plotMarginals(model: Model, numBurnSamples, occurrence):
plt.show()
-def plotSpiderWebs(model: Model, numBurnSamples, occurrence):
+def plotSpiderWebs(model: Model, num_burn_samples, occurrence):
"""Draw each sample (row of the matrix) as a circle of linear interpolations of its dimensions.
Loads all necessary data and subsequently calls the method singleWeb 3 or 4 times
- :param model: (model ID)
- :param numBurnSamples: (Number of ignored first samples of each chain)
- :param occurence: (step of sampling from chains)
- :return: None, shows a plot
+ Args:
+ model: model ID)
+ num_burn_samples: Number of ignored first samples of each chain)
+ occurence: step of sampling from chains)
+ model: Model:
+ occurrence:
+
+ Returns:
+ None, shows a plot
+
"""
# If the model name indicates an artificial setting, indicate that true parameter information is available
- artificialModel = model.isArtificial()
+ artificialModel = model.is_artificial()
# load emcee parameter samples and corresponding simulation results
emceeParams = np.loadtxt(
- model.getApplicationPath() + "/OverallParams.csv",
+ model.get_application_path() + "/OverallParams.csv",
delimiter=",",
- )[numBurnSamples::occurrence, :]
+ )[num_burn_samples::occurrence, :]
emceeSimResults = np.loadtxt(
- model.getApplicationPath() + "/OverallSimResults.csv",
+ model.get_application_path() + "/OverallSimResults.csv",
delimiter=",",
- )[numBurnSamples::occurrence, :]
+ )[num_burn_samples::occurrence, :]
# load underlying data
- trueData = np.loadtxt(
- "Data/" + model.getModelName() + "Data.csv", delimiter=","
- )
+ trueData = np.loadtxt("Data/" + model.name + "Data.csv", delimiter=",")
# if available, load also the true parameter values
if artificialModel:
trueParams = np.loadtxt(
- "Data/" + model.getModelName() + "Params.csv", delimiter=","
+ "Data/" + model.name + "Params.csv", delimiter=","
)
# compute the upper and lower bound of each data dimension
# this serves as the identical scaling of every data plot
- upperBoundsSimResults = np.max(emceeSimResults, 0)
- lowerBoundsSimResults = np.min(emceeSimResults, 0)
+ upper_boundsSimResults = np.max(emceeSimResults, 0)
+ lower_boundsSimResults = np.min(emceeSimResults, 0)
# do the same for the parameters
- upperBoundsParams = np.max(emceeParams, 0)
- lowerBoundsParams = np.min(emceeParams, 0)
+ upper_boundsParams = np.max(emceeParams, 0)
+ lower_boundsParams = np.min(emceeParams, 0)
# set the image quality by defining dots per inch
dpi = 1000
@@ -309,65 +332,73 @@ def plotSpiderWebs(model: Model, numBurnSamples, occurrence):
# create all web figures
emceeSimResultsWeb = singleWeb(
emceeSimResults,
- lowerBoundsSimResults,
- upperBoundsSimResults,
+ lower_boundsSimResults,
+ upper_boundsSimResults,
colorYApprox,
dpi,
)
plt.savefig(
- model.getApplicationPath() + "/SpiderWebs/emceeSimResults.png",
+ model.get_application_path() + "/SpiderWebs/emceeSimResults.png",
dpi=dpi,
)
trueDataWeb = singleWeb(
- trueData, lowerBoundsSimResults, upperBoundsSimResults, colorY, dpi
+ trueData, lower_boundsSimResults, upper_boundsSimResults, colorY, dpi
)
plt.savefig(
- model.getApplicationPath() + "/SpiderWebs/trueData.png",
+ model.get_application_path() + "/SpiderWebs/trueData.png",
dpi=dpi,
)
emceeParamsWeb = singleWeb(
- emceeParams, lowerBoundsParams, upperBoundsParams, colorQApprox, dpi
+ emceeParams, lower_boundsParams, upper_boundsParams, colorQApprox, dpi
)
plt.savefig(
- model.getApplicationPath() + "/SpiderWebs/emceeParams.png",
+ model.get_application_path() + "/SpiderWebs/emceeParams.png",
dpi=dpi,
)
if artificialModel:
trueParamsWeb = singleWeb(
- trueParams, lowerBoundsParams, upperBoundsParams, colorQ, dpi
+ trueParams, lower_boundsParams, upper_boundsParams, colorQ, dpi
)
plt.savefig(
- "Applications/"
- + model.getModelName()
- + "/SpiderWebs/trueParams.png",
+ "Applications/" + model.name + "/SpiderWebs/trueParams.png",
dpi=dpi,
)
-def singleWeb(matrix, lowerBounds, upperBounds, color, dpi):
+def singleWeb(matrix, lower_bounds, upper_bounds, color, dpi):
"""Create a single spider web plot for one data or parameter matrix and given bounds
Input: matrix (2D np.array of size #samples x #dimensions)
- lowerBounds (np.arraay of size #dimensions that defines the lower bound of all regarded values)
- upperBounds (np.arraay of size #dimensions that defines the upper bound of all regarded values)
+ lower_bounds (np.arraay of size #dimensions that defines the lower bound of all regarded values)
+ upper_bounds (np.arraay of size #dimensions that defines the upper bound of all regarded values)
color (np.array with 3 entries indicating RGB values of the plot)
dpi (int that defines the quality of the image)
Output: web (matplotlib figure representing the single spider web)
+
+ Args:
+ matrix:
+ lower_bounds:
+ upper_bounds:
+ color:
+ dpi:
+
+ Returns:
+
"""
# extract matrix dimensions
- numSamples, numDims = matrix.shape
+ num_samples, numDims = matrix.shape
# create a normalized matrix where each column aka. dimension is scaled to [0, 1]
- normMatrix = (matrix - lowerBounds) / (upperBounds - lowerBounds)
+ normMatrix = (matrix - lower_bounds) / (upper_bounds - lower_bounds)
# create an augmented matrix that is identical to the normalized matrix
# except for an additional last column that is obtained by copying the first one
- augMatrix = np.zeros((numSamples, numDims + 1))
+ augMatrix = np.zeros((num_samples, numDims + 1))
augMatrix[:, 0:numDims] = normMatrix
# copy the first column to the end
@@ -396,13 +427,13 @@ def singleWeb(matrix, lowerBounds, upperBounds, color, dpi):
plt.xticks([])
plt.yticks([])
- for i in range(numSamples):
+ for i in range(num_samples):
# modColor = color*(1 - 0.3*np.random.rand(3))
plt.plot(
xProjCoords[i, :],
yProjCoords[i, :],
color=color,
- linewidth=1000.0 / numSamples,
+ linewidth=1000.0 / num_samples,
alpha=0.05,
)
@@ -423,20 +454,24 @@ def plotTest(
"""Visualize the results of EPI applied to a model with 2 parameters and
2 output dimensions as surface plots.
- :param model: model from which the results shall be loaded
- :type model: Model
- :param dataPlotResolution: number of grid points per data dimension, defaults to 25
- :type dataPlotResolution: int, optional
- :param paramPlotResolution: number of grid points per parameter dimension
- :type dataPlotResolution: int, optional
+ Args:
+ model(Model): model from which the results shall be loaded
+ dataPlotResolution(int, optional): number of grid points per data dimension, defaults to 25
+ paramPlotResolution: number of grid points per parameter dimension
+ model: Model:
+ dataPlotResolution: int: (Default value = 25)
+ paramPlotResolution: int: (Default value = 25)
+
+ Returns:
+
"""
# ---------------------------------------------------------------------------
# First, we load and visualize the underlying data
(
- dataDim,
+ data_dim,
data,
- dataStdevs,
+ data_stdevs,
) = model.dataLoader()
# create the grid over which the data KDE will be evaluated
@@ -457,7 +492,7 @@ def plotTest(
# define the evaluation grid point
dataEvalPoint = np.array([dataxMesh[i, j], datayMesh[i, j]])
# evaluate the data density at the defined grid point
- dataEvals[i, j] = evalKDEGauss(data, dataEvalPoint, dataStdevs)
+ dataEvals[i, j] = eval_kde_gauss(data, dataEvalPoint, data_stdevs)
# plot the data KDE using a surface plot
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
@@ -475,13 +510,13 @@ def plotTest(
# ---------------------------------------------------------------------------
# Second, we load the emcee parameter sampling results and als visualize them
- paramChain = np.loadtxt(
- model.getApplicationPath() + "/OverallParams.csv",
+ param_chain = np.loadtxt(
+ model.get_application_path() + "/OverallParams.csv",
delimiter=",",
)
# calculate reasonable standard deviations for the KDE
- paramChainStdevs = calcKernelWidth(paramChain)
+ paramChainStdevs = calc_kernel_width(param_chain)
# define the grid over which the inferred parameter density will be evaluated
paramxGrid = np.linspace(0.8, 2.2, paramPlotResolution)
@@ -498,8 +533,8 @@ def plotTest(
# define the evaluation parameter point
paramEvalPoint = np.array([paramxMesh[i, j], paramyMesh[i, j]])
# evaluate the parameter KDE at the defined grid point
- paramEvals[i, j] = evalKDEGauss(
- paramChain, paramEvalPoint, paramChainStdevs
+ paramEvals[i, j] = eval_kde_gauss(
+ param_chain, paramEvalPoint, paramChainStdevs
)
# plot the inferred parameter distribution
@@ -525,12 +560,12 @@ def plotTest(
# Load the MCMC simulation results
simResultsChain = np.loadtxt(
- model.getApplicationPath() + "/OverallSimResults.csv",
+ model.get_application_path() + "/OverallSimResults.csv",
delimiter=",",
)
# calculate reasonable standard deviations for the KDE
- simResultsChainStdevs = calcKernelWidth(simResultsChain)
+ simResultsChainStdevs = calc_kernel_width(simResultsChain)
# allocate storage for the data density evaluation
simResultsEvals = np.zeros((dataPlotResolution, dataPlotResolution))
@@ -541,7 +576,7 @@ def plotTest(
# define the evaluation grid point
simResultsEvalPoint = np.array([dataxMesh[i, j], datayMesh[i, j]])
# evaluate the data density at the defined grid point
- simResultsEvals[i, j] = evalKDEGauss(
+ simResultsEvals[i, j] = eval_kde_gauss(
simResultsChain, simResultsEvalPoint, simResultsChainStdevs
)
@@ -563,12 +598,16 @@ def plotTest(
def plotGridResults(model: Model) -> None:
"""The temperature model for artificial and real data is evaluated over a grid and plotted
- :param model: _description_
- :type model: Model
+ Args:
+ model(Model): _description_
+ model: Model:
+
+ Returns:
+
"""
(
- dataDim,
+ data_dim,
data,
stdevs,
) = model.dataLoader()
@@ -577,7 +616,7 @@ def plotGridResults(model: Model) -> None:
# TODO: Fix var names etc.
trueParams = paramStdevs = None
- artificialModel = model.isArtificial()
+ artificialModel = model.is_artificial()
if artificialModel:
trueParams, paramStdevs = model.paramLoader()
@@ -585,7 +624,7 @@ def plotGridResults(model: Model) -> None:
"Applications/Temperature/Latitudes.csv", delimiter=","
)
trueLatitudes = rawTrueLatitudes[..., np.newaxis]
- trueLatitudesStdevs = calcKernelWidth(trueLatitudes)
+ trueLatitudesStdevs = calc_kernel_width(trueLatitudes)
resolution = -1
if artificialModel:
@@ -598,35 +637,35 @@ def plotGridResults(model: Model) -> None:
trueDensity = np.zeros(resolution)
trafoDensity = np.zeros(resolution)
- if model.getModelName() == "TemperatureArtificial":
+ if model.name == "TemperatureArtificial":
simulatedTemperatures = np.zeros(resolution)
for i in range(resolution):
- trueDensity[i] = evalKDEGauss(
+ trueDensity[i] = eval_kde_gauss(
trueLatitudes,
np.array([latitudesGrid[i]]),
trueLatitudesStdevs,
)
- trafoDensity[i], _ = evalLogTransformedDensity(
+ trafoDensity[i], _ = eval_log_transformed_density(
model, np.array([latitudesGrid[i]]), data, stdevs
)
- simulatedTemperatures[i] = evalKDEGauss(
+ simulatedTemperatures[i] = eval_kde_gauss(
data, np.array([temperaturesGrid[i]]), stdevs
)
- elif model.getModelName() == "Temperature":
+ elif model.name == "Temperature":
measuredTemperatures = np.zeros(resolution)
for i in range(resolution):
- trueDensity[i] = evalKDEGauss(
+ trueDensity[i] = eval_kde_gauss(
trueLatitudes,
np.array([latitudesGrid[i]]),
trueLatitudesStdevs,
)
- trafoDensity[i], _ = evalLogTransformedDensity(
+ trafoDensity[i], _ = eval_log_transformed_density(
model, np.array([latitudesGrid[i]]), data, stdevs
)
- measuredTemperatures[i] = evalKDEGauss(
+ measuredTemperatures[i] = eval_kde_gauss(
data, np.array([temperaturesGrid[i]]), stdevs
)
@@ -646,7 +685,7 @@ def plotGridResults(model: Model) -> None:
inferredTemperatures = np.zeros(resolution)
for i in range(resolution):
- inferredTemperatures[i] = evalKDEGauss(
+ inferredTemperatures[i] = eval_kde_gauss(
inferredTemperaturesSample,
np.array([temperaturesGrid[i]]),
stdevs,
@@ -671,7 +710,7 @@ def plotGridResults(model: Model) -> None:
)
plt.legend()
plt.savefig(
- "Images/" + model.getModelName() + "/TrueLatSample.svg",
+ "Images/" + model.name + "/TrueLatSample.svg",
bbox_inches="tight",
)
plt.show()
@@ -689,9 +728,7 @@ def plotGridResults(model: Model) -> None:
label="True Latitudes (KDE)",
)
plt.legend()
- plt.savefig(
- "Images/model.getModelName()/TrueLatKDE.svg", bbox_inches="tight"
- )
+ plt.savefig("Images/model.name/TrueLatKDE.svg", bbox_inches="tight")
plt.show()
sim_measure_temp = (
@@ -718,7 +755,7 @@ def plotGridResults(model: Model) -> None:
)
plt.legend()
plt.savefig(
- "Images/" + model.getModelName() + "/" + name + "TempSample.svg",
+ "Images/" + model.name + "/" + name + "TempSample.svg",
bbox_inches="tight",
)
plt.show()
@@ -737,7 +774,7 @@ def plotGridResults(model: Model) -> None:
plt.legend()
plt.savefig(
- "Images/" + model.getModelName() + "/" + name + "TempKDE.svg",
+ "Images/" + model.name + "/" + name + "TempKDE.svg",
bbox_inches="tight",
)
plt.show()
@@ -763,7 +800,7 @@ def plotGridResults(model: Model) -> None:
)
plt.legend()
plt.savefig(
- "Images/" + model.getModelName() + "/TrueLatVsITAKDE.svg",
+ "Images/" + model.name + "/TrueLatVsITAKDE.svg",
bbox_inches="tight",
)
plt.show()
@@ -825,7 +862,7 @@ def plotGridResults(model: Model) -> None:
)
plt.show()
- if model.getModelName() == "Temperature":
+ if model.name == "Temperature":
# plot ITA inferred latitude density alone
plt.figure(figsize=(6, 4))
plt.axis([-0.05, np.pi / 2.0 + 0.05, 0.0, 1.6])
diff --git a/epi/plotting/plotter.py b/epi/plotting/plotter.py
index 2072e8ab..32825ca9 100644
--- a/epi/plotting/plotter.py
+++ b/epi/plotting/plotter.py
@@ -8,7 +8,7 @@
import matplotlib.pyplot as plt
import numpy as np
-from epi.core.kde import evalKDEGauss
+from epi.core.kde import eval_kde_gauss
from epi.core.model import Model
# Colors
@@ -26,12 +26,16 @@ def plotKDEoverGrid(
) -> None:
"""Plot the 1D kernel density estimation of different data sets over a grid
- :param data: array of array of samples
- :type data: np.ndarray
- :param stdevs: one kernel standard deviation for each dimension
- :type stdevs: np.ndarray
- :param resolution: _description_, defaults to 101
- :type resolution: int, optional
+ Args:
+ data(np.ndarray): array of array of samples
+ stdevs(np.ndarray): one kernel standard deviation for each dimension
+ resolution(int, optional): _description_, defaults to 101
+ data: np.ndarray:
+ stdevs: np.ndarray:
+ resolution: int: (Default value = 101)
+
+ Returns:
+
"""
for dim in range(data.shape[1]):
@@ -45,7 +49,7 @@ def plotKDEoverGrid(
evaluations = np.zeros(resolution)
for i in range(resolution):
- evaluations[i] = evalKDEGauss(
+ evaluations[i] = eval_kde_gauss(
np.transpose(np.array([data[:, dim]])),
np.array([evalPoints[i]]),
np.array([stdevs[dim]]),
@@ -58,6 +62,8 @@ def plotKDEoverGrid(
class DataParamEnum(Enum):
+ """ """
+
Data = (0,)
Params = 1
@@ -66,11 +72,15 @@ class DataParamEnum(Enum):
def plotDataSamples(model: Model):
"""Scatter plot of data samples?
- :param model: _description_
- :type model: Model
+ Args:
+ model(Model): _description_
+ model: Model:
+
+ Returns:
+
"""
- artificialModel = model.isArtificial()
+ artificialModel = model.is_artificial()
name = "Sim" if artificialModel else "Meas"
sim_measure_label = (
"Simulation data" if artificialModel else "Measured data"
@@ -92,10 +102,10 @@ def plotDataSamples(model: Model):
# if not artificial: plot inferred data samples from result param samples
# if not artificialModel:
# # Second, we load the emcee parameter sampling results and als visualize them
- # simResults = model.loadSimResults(0,1)[1] # Picking simResults
+ # sim_results = model.load_sim_results(0,1)[1] # Picking sim_results
# plt.scatter(
- # simResults[:,dim],
- # np.zeros(simResults.shape[0]),
+ # sim_results[:,dim],
+ # np.zeros(sim_results.shape[0]),
# color=colorYApprox,
# alpha=0.1,
# label="Inferred Data samples"
@@ -106,8 +116,12 @@ def plotDataSamples(model: Model):
def plotDataKDE(model: Model):
"""Continuos plot of data kde?
- :param model: _description_
- :type model: Model
+ Args:
+ model(Model): _description_
+ model: Model:
+
+ Returns:
+
"""
# plot data kde
# if not artificial: plot inferred data kde from inferred data samples
@@ -117,8 +131,12 @@ def plotDataKDE(model: Model):
def plotParamSamples(model: Model):
"""Scatter plot of param samples?
- :param model: _description_
- :type model: Model
+ Args:
+ model(Model): _description_
+ model: Model:
+
+ Returns:
+
"""
# if artificial: plot param samples from file
# else: sample from inferred param distr to get samples
@@ -128,11 +146,15 @@ def plotParamSamples(model: Model):
def plotParamKDE(model: Model):
"""Continuos plot of param kde?
- :param model: _description_
- :type model: Model
+ Args:
+ model(Model): _description_
+ model: Model:
+
+ Returns:
+
"""
# if artificial: plot param kde from param samples file
- # plot inferred param kde from paramChain sim results
+ # plot inferred param kde from param_chain sim results
pass
@@ -140,9 +162,13 @@ def sampleFromResults(model: Model, sampleSize: int = 1000):
"""Samples from the calculated param distribution
and calculates the data points for these parameters
- :param model: The model from which the results shall be loaded
- :type model: Model
- :param sampleSize: number of drawn samples, defaults to 1000
- :type sampleSize: int, optional
+ Args:
+ model(Model): The model from which the results shall be loaded
+ sampleSize(int, optional): number of drawn samples, defaults to 1000
+ model: Model:
+ sampleSize: int: (Default value = 1000)
+
+ Returns:
+
"""
pass
diff --git a/poetry.lock b/poetry.lock
index 854e82cf..8f0731e9 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,5 +1,20 @@
# This file is automatically @generated by Poetry and should not be changed by hand.
+[[package]]
+name = "accessible-pygments"
+version = "0.0.3"
+description = "A collection of accessible pygments styles"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "accessible-pygments-0.0.3.tar.gz", hash = "sha256:0917b507646a6b3393eb49091a62506f6b2a3935f5d95e4d1249dc505f8a4eae"},
+ {file = "accessible_pygments-0.0.3-py2.py3-none-any.whl", hash = "sha256:6a643a2979b1f14fead79a8bbf51a6ae09e9df70d5ed60bb61a6eb9092cdcb81"},
+]
+
+[package.dependencies]
+pygments = ">=1.5"
+
[[package]]
name = "alabaster"
version = "0.7.13"
@@ -12,6 +27,38 @@ files = [
{file = "alabaster-0.7.13.tar.gz", hash = "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"},
]
+[[package]]
+name = "amici"
+version = "0.16.1"
+description = "Advanced multi-language Interface to CVODES and IDAS"
+category = "main"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "amici-0.16.1.tar.gz", hash = "sha256:aec799184cc96fcbda075ab2a4599890ab42474145e064cc57da995e38a49b28"},
+]
+
+[package.dependencies]
+h5py = "*"
+mpmath = "*"
+numpy = [
+ {version = ">=1.17.5", markers = "python_version == \"3.8\""},
+ {version = ">=1.19.3", markers = "python_version == \"3.9\""},
+ {version = ">=1.21.4", markers = "python_version >= \"3.10\""},
+]
+pandas = "*"
+pkgconfig = "*"
+python-libsbml = "*"
+setuptools = ">=48"
+sympy = ">=1.9"
+toposort = "*"
+wurlitzer = "*"
+
+[package.extras]
+petab = ["petab (>=0.1.27)"]
+pysb = ["pysb (>=1.13.1)"]
+test = ["coverage", "pytest", "pytest-cov", "pytest-rerunfailures", "shyaml"]
+
[[package]]
name = "appdirs"
version = "1.4.4"
@@ -24,6 +71,36 @@ files = [
{file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"},
]
+[[package]]
+name = "appnope"
+version = "0.1.3"
+description = "Disable App Nap on macOS >= 10.9"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "appnope-0.1.3-py2.py3-none-any.whl", hash = "sha256:265a455292d0bd8a72453494fa24df5a11eb18373a60c7c0430889f22548605e"},
+ {file = "appnope-0.1.3.tar.gz", hash = "sha256:02bd91c4de869fbb1e1c50aafc4098827a7a54ab2f39d9dcba6c9547ed920e24"},
+]
+
+[[package]]
+name = "asttokens"
+version = "2.2.1"
+description = "Annotate AST trees with source code positions"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "asttokens-2.2.1-py2.py3-none-any.whl", hash = "sha256:6b0ac9e93fb0335014d382b8fa9b3afa7df546984258005da0b9e7095b3deb1c"},
+ {file = "asttokens-2.2.1.tar.gz", hash = "sha256:4622110b2a6f30b77e1473affaa97e711bc2f07d3f10848420ff1898edbe94f3"},
+]
+
+[package.dependencies]
+six = "*"
+
+[package.extras]
+test = ["astroid", "pytest"]
+
[[package]]
name = "attrs"
version = "22.2.0"
@@ -45,18 +122,49 @@ tests-no-zope = ["cloudpickle", "cloudpickle", "hypothesis", "hypothesis", "mypy
[[package]]
name = "babel"
-version = "2.11.0"
+version = "2.12.1"
description = "Internationalization utilities"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
files = [
- {file = "Babel-2.11.0-py3-none-any.whl", hash = "sha256:1ad3eca1c885218f6dce2ab67291178944f810a10a9b5f3cb8382a5a232b64fe"},
- {file = "Babel-2.11.0.tar.gz", hash = "sha256:5ef4b3226b0180dedded4229651c8b0e1a3a6a2837d45a073272f313e4cf97f6"},
+ {file = "Babel-2.12.1-py3-none-any.whl", hash = "sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610"},
+ {file = "Babel-2.12.1.tar.gz", hash = "sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455"},
]
[package.dependencies]
-pytz = ">=2015.7"
+pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""}
+
+[[package]]
+name = "backcall"
+version = "0.2.0"
+description = "Specifications for callback functions passed in to an API"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "backcall-0.2.0-py2.py3-none-any.whl", hash = "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"},
+ {file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"},
+]
+
+[[package]]
+name = "beautifulsoup4"
+version = "4.11.2"
+description = "Screen-scraping library"
+category = "dev"
+optional = false
+python-versions = ">=3.6.0"
+files = [
+ {file = "beautifulsoup4-4.11.2-py3-none-any.whl", hash = "sha256:0e79446b10b3ecb499c1556f7e228a53e64a2bfcebd455f370d8927cb5b59e39"},
+ {file = "beautifulsoup4-4.11.2.tar.gz", hash = "sha256:bc4bdda6717de5a2987436fb8d72f45dc90dd856bdfd512a1314ce90349a0106"},
+]
+
+[package.dependencies]
+soupsieve = ">1.2"
+
+[package.extras]
+html5lib = ["html5lib"]
+lxml = ["lxml"]
[[package]]
name = "black"
@@ -197,100 +305,87 @@ files = [
[[package]]
name = "charset-normalizer"
-version = "3.0.1"
+version = "3.1.0"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
category = "main"
optional = false
-python-versions = "*"
-files = [
- {file = "charset-normalizer-3.0.1.tar.gz", hash = "sha256:ebea339af930f8ca5d7a699b921106c6e29c617fe9606fa7baa043c1cdae326f"},
- {file = "charset_normalizer-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:88600c72ef7587fe1708fd242b385b6ed4b8904976d5da0893e31df8b3480cb6"},
- {file = "charset_normalizer-3.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c75ffc45f25324e68ab238cb4b5c0a38cd1c3d7f1fb1f72b5541de469e2247db"},
- {file = "charset_normalizer-3.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db72b07027db150f468fbada4d85b3b2729a3db39178abf5c543b784c1254539"},
- {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62595ab75873d50d57323a91dd03e6966eb79c41fa834b7a1661ed043b2d404d"},
- {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ff6f3db31555657f3163b15a6b7c6938d08df7adbfc9dd13d9d19edad678f1e8"},
- {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:772b87914ff1152b92a197ef4ea40efe27a378606c39446ded52c8f80f79702e"},
- {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70990b9c51340e4044cfc394a81f614f3f90d41397104d226f21e66de668730d"},
- {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:292d5e8ba896bbfd6334b096e34bffb56161c81408d6d036a7dfa6929cff8783"},
- {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2edb64ee7bf1ed524a1da60cdcd2e1f6e2b4f66ef7c077680739f1641f62f555"},
- {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:31a9ddf4718d10ae04d9b18801bd776693487cbb57d74cc3458a7673f6f34639"},
- {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:44ba614de5361b3e5278e1241fda3dc1838deed864b50a10d7ce92983797fa76"},
- {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:12db3b2c533c23ab812c2b25934f60383361f8a376ae272665f8e48b88e8e1c6"},
- {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c512accbd6ff0270939b9ac214b84fb5ada5f0409c44298361b2f5e13f9aed9e"},
- {file = "charset_normalizer-3.0.1-cp310-cp310-win32.whl", hash = "sha256:502218f52498a36d6bf5ea77081844017bf7982cdbe521ad85e64cabee1b608b"},
- {file = "charset_normalizer-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:601f36512f9e28f029d9481bdaf8e89e5148ac5d89cffd3b05cd533eeb423b59"},
- {file = "charset_normalizer-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0298eafff88c99982a4cf66ba2efa1128e4ddaca0b05eec4c456bbc7db691d8d"},
- {file = "charset_normalizer-3.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a8d0fc946c784ff7f7c3742310cc8a57c5c6dc31631269876a88b809dbeff3d3"},
- {file = "charset_normalizer-3.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:87701167f2a5c930b403e9756fab1d31d4d4da52856143b609e30a1ce7160f3c"},
- {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14e76c0f23218b8f46c4d87018ca2e441535aed3632ca134b10239dfb6dadd6b"},
- {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c0a590235ccd933d9892c627dec5bc7511ce6ad6c1011fdf5b11363022746c1"},
- {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8c7fe7afa480e3e82eed58e0ca89f751cd14d767638e2550c77a92a9e749c317"},
- {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79909e27e8e4fcc9db4addea88aa63f6423ebb171db091fb4373e3312cb6d603"},
- {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ac7b6a045b814cf0c47f3623d21ebd88b3e8cf216a14790b455ea7ff0135d18"},
- {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:72966d1b297c741541ca8cf1223ff262a6febe52481af742036a0b296e35fa5a"},
- {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f9d0c5c045a3ca9bedfc35dca8526798eb91a07aa7a2c0fee134c6c6f321cbd7"},
- {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:5995f0164fa7df59db4746112fec3f49c461dd6b31b841873443bdb077c13cfc"},
- {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4a8fcf28c05c1f6d7e177a9a46a1c52798bfe2ad80681d275b10dcf317deaf0b"},
- {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:761e8904c07ad053d285670f36dd94e1b6ab7f16ce62b9805c475b7aa1cffde6"},
- {file = "charset_normalizer-3.0.1-cp311-cp311-win32.whl", hash = "sha256:71140351489970dfe5e60fc621ada3e0f41104a5eddaca47a7acb3c1b851d6d3"},
- {file = "charset_normalizer-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:9ab77acb98eba3fd2a85cd160851816bfce6871d944d885febf012713f06659c"},
- {file = "charset_normalizer-3.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:84c3990934bae40ea69a82034912ffe5a62c60bbf6ec5bc9691419641d7d5c9a"},
- {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74292fc76c905c0ef095fe11e188a32ebd03bc38f3f3e9bcb85e4e6db177b7ea"},
- {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c95a03c79bbe30eec3ec2b7f076074f4281526724c8685a42872974ef4d36b72"},
- {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c39b0e3eac288fedc2b43055cfc2ca7a60362d0e5e87a637beac5d801ef478"},
- {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df2c707231459e8a4028eabcd3cfc827befd635b3ef72eada84ab13b52e1574d"},
- {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93ad6d87ac18e2a90b0fe89df7c65263b9a99a0eb98f0a3d2e079f12a0735837"},
- {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:59e5686dd847347e55dffcc191a96622f016bc0ad89105e24c14e0d6305acbc6"},
- {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:cd6056167405314a4dc3c173943f11249fa0f1b204f8b51ed4bde1a9cd1834dc"},
- {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:083c8d17153ecb403e5e1eb76a7ef4babfc2c48d58899c98fcaa04833e7a2f9a"},
- {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:f5057856d21e7586765171eac8b9fc3f7d44ef39425f85dbcccb13b3ebea806c"},
- {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:7eb33a30d75562222b64f569c642ff3dc6689e09adda43a082208397f016c39a"},
- {file = "charset_normalizer-3.0.1-cp36-cp36m-win32.whl", hash = "sha256:95dea361dd73757c6f1c0a1480ac499952c16ac83f7f5f4f84f0658a01b8ef41"},
- {file = "charset_normalizer-3.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:eaa379fcd227ca235d04152ca6704c7cb55564116f8bc52545ff357628e10602"},
- {file = "charset_normalizer-3.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3e45867f1f2ab0711d60c6c71746ac53537f1684baa699f4f668d4c6f6ce8e14"},
- {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cadaeaba78750d58d3cc6ac4d1fd867da6fc73c88156b7a3212a3cd4819d679d"},
- {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:911d8a40b2bef5b8bbae2e36a0b103f142ac53557ab421dc16ac4aafee6f53dc"},
- {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:503e65837c71b875ecdd733877d852adbc465bd82c768a067badd953bf1bc5a3"},
- {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a60332922359f920193b1d4826953c507a877b523b2395ad7bc716ddd386d866"},
- {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:16a8663d6e281208d78806dbe14ee9903715361cf81f6d4309944e4d1e59ac5b"},
- {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a16418ecf1329f71df119e8a65f3aa68004a3f9383821edcb20f0702934d8087"},
- {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9d9153257a3f70d5f69edf2325357251ed20f772b12e593f3b3377b5f78e7ef8"},
- {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:02a51034802cbf38db3f89c66fb5d2ec57e6fe7ef2f4a44d070a593c3688667b"},
- {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:2e396d70bc4ef5325b72b593a72c8979999aa52fb8bcf03f701c1b03e1166918"},
- {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:11b53acf2411c3b09e6af37e4b9005cba376c872503c8f28218c7243582df45d"},
- {file = "charset_normalizer-3.0.1-cp37-cp37m-win32.whl", hash = "sha256:0bf2dae5291758b6f84cf923bfaa285632816007db0330002fa1de38bfcb7154"},
- {file = "charset_normalizer-3.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:2c03cc56021a4bd59be889c2b9257dae13bf55041a3372d3295416f86b295fb5"},
- {file = "charset_normalizer-3.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:024e606be3ed92216e2b6952ed859d86b4cfa52cd5bc5f050e7dc28f9b43ec42"},
- {file = "charset_normalizer-3.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4b0d02d7102dd0f997580b51edc4cebcf2ab6397a7edf89f1c73b586c614272c"},
- {file = "charset_normalizer-3.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:358a7c4cb8ba9b46c453b1dd8d9e431452d5249072e4f56cfda3149f6ab1405e"},
- {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81d6741ab457d14fdedc215516665050f3822d3e56508921cc7239f8c8e66a58"},
- {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8b8af03d2e37866d023ad0ddea594edefc31e827fee64f8de5611a1dbc373174"},
- {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9cf4e8ad252f7c38dd1f676b46514f92dc0ebeb0db5552f5f403509705e24753"},
- {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e696f0dd336161fca9adbb846875d40752e6eba585843c768935ba5c9960722b"},
- {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c22d3fe05ce11d3671297dc8973267daa0f938b93ec716e12e0f6dee81591dc1"},
- {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:109487860ef6a328f3eec66f2bf78b0b72400280d8f8ea05f69c51644ba6521a"},
- {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:37f8febc8ec50c14f3ec9637505f28e58d4f66752207ea177c1d67df25da5aed"},
- {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:f97e83fa6c25693c7a35de154681fcc257c1c41b38beb0304b9c4d2d9e164479"},
- {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a152f5f33d64a6be73f1d30c9cc82dfc73cec6477ec268e7c6e4c7d23c2d2291"},
- {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:39049da0ffb96c8cbb65cbf5c5f3ca3168990adf3551bd1dee10c48fce8ae820"},
- {file = "charset_normalizer-3.0.1-cp38-cp38-win32.whl", hash = "sha256:4457ea6774b5611f4bed5eaa5df55f70abde42364d498c5134b7ef4c6958e20e"},
- {file = "charset_normalizer-3.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:e62164b50f84e20601c1ff8eb55620d2ad25fb81b59e3cd776a1902527a788af"},
- {file = "charset_normalizer-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8eade758719add78ec36dc13201483f8e9b5d940329285edcd5f70c0a9edbd7f"},
- {file = "charset_normalizer-3.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8499ca8f4502af841f68135133d8258f7b32a53a1d594aa98cc52013fff55678"},
- {file = "charset_normalizer-3.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3fc1c4a2ffd64890aebdb3f97e1278b0cc72579a08ca4de8cd2c04799a3a22be"},
- {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00d3ffdaafe92a5dc603cb9bd5111aaa36dfa187c8285c543be562e61b755f6b"},
- {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2ac1b08635a8cd4e0cbeaf6f5e922085908d48eb05d44c5ae9eabab148512ca"},
- {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6f45710b4459401609ebebdbcfb34515da4fc2aa886f95107f556ac69a9147e"},
- {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ae1de54a77dc0d6d5fcf623290af4266412a7c4be0b1ff7444394f03f5c54e3"},
- {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b590df687e3c5ee0deef9fc8c547d81986d9a1b56073d82de008744452d6541"},
- {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab5de034a886f616a5668aa5d098af2b5385ed70142090e2a31bcbd0af0fdb3d"},
- {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9cb3032517f1627cc012dbc80a8ec976ae76d93ea2b5feaa9d2a5b8882597579"},
- {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:608862a7bf6957f2333fc54ab4399e405baad0163dc9f8d99cb236816db169d4"},
- {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0f438ae3532723fb6ead77e7c604be7c8374094ef4ee2c5e03a3a17f1fca256c"},
- {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:356541bf4381fa35856dafa6a965916e54bed415ad8a24ee6de6e37deccf2786"},
- {file = "charset_normalizer-3.0.1-cp39-cp39-win32.whl", hash = "sha256:39cf9ed17fe3b1bc81f33c9ceb6ce67683ee7526e65fde1447c772afc54a1bb8"},
- {file = "charset_normalizer-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:0a11e971ed097d24c534c037d298ad32c6ce81a45736d31e0ff0ad37ab437d59"},
- {file = "charset_normalizer-3.0.1-py3-none-any.whl", hash = "sha256:7e189e2e1d3ed2f4aebabd2d5b0f931e883676e51c7624826e0a4e5fe8a0bf24"},
+python-versions = ">=3.7.0"
+files = [
+ {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"},
+ {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"},
]
[[package]]
@@ -320,6 +415,24 @@ files = [
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
+[[package]]
+name = "comm"
+version = "0.1.2"
+description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc."
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "comm-0.1.2-py3-none-any.whl", hash = "sha256:9f3abf3515112fa7c55a42a6a5ab358735c9dccc8b5910a9d8e3ef5998130666"},
+ {file = "comm-0.1.2.tar.gz", hash = "sha256:3e2f5826578e683999b93716285b3b1f344f157bf75fa9ce0a797564e742f062"},
+]
+
+[package.dependencies]
+traitlets = ">=5.3"
+
+[package.extras]
+test = ["pytest"]
+
[[package]]
name = "contourpy"
version = "1.0.7"
@@ -397,63 +510,63 @@ test-no-images = ["pytest"]
[[package]]
name = "coverage"
-version = "7.1.0"
+version = "7.2.1"
description = "Code coverage measurement for Python"
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "coverage-7.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3b946bbcd5a8231383450b195cfb58cb01cbe7f8949f5758566b881df4b33baf"},
- {file = "coverage-7.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ec8e767f13be637d056f7e07e61d089e555f719b387a7070154ad80a0ff31801"},
- {file = "coverage-7.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4a5a5879a939cb84959d86869132b00176197ca561c664fc21478c1eee60d75"},
- {file = "coverage-7.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b643cb30821e7570c0aaf54feaf0bfb630b79059f85741843e9dc23f33aaca2c"},
- {file = "coverage-7.1.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32df215215f3af2c1617a55dbdfb403b772d463d54d219985ac7cd3bf124cada"},
- {file = "coverage-7.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:33d1ae9d4079e05ac4cc1ef9e20c648f5afabf1a92adfaf2ccf509c50b85717f"},
- {file = "coverage-7.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:29571503c37f2ef2138a306d23e7270687c0efb9cab4bd8038d609b5c2393a3a"},
- {file = "coverage-7.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:63ffd21aa133ff48c4dff7adcc46b7ec8b565491bfc371212122dd999812ea1c"},
- {file = "coverage-7.1.0-cp310-cp310-win32.whl", hash = "sha256:4b14d5e09c656de5038a3f9bfe5228f53439282abcab87317c9f7f1acb280352"},
- {file = "coverage-7.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:8361be1c2c073919500b6601220a6f2f98ea0b6d2fec5014c1d9cfa23dd07038"},
- {file = "coverage-7.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:da9b41d4539eefd408c46725fb76ecba3a50a3367cafb7dea5f250d0653c1040"},
- {file = "coverage-7.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c5b15ed7644ae4bee0ecf74fee95808dcc34ba6ace87e8dfbf5cb0dc20eab45a"},
- {file = "coverage-7.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d12d076582507ea460ea2a89a8c85cb558f83406c8a41dd641d7be9a32e1274f"},
- {file = "coverage-7.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2617759031dae1bf183c16cef8fcfb3de7617f394c813fa5e8e46e9b82d4222"},
- {file = "coverage-7.1.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4e4881fa9e9667afcc742f0c244d9364d197490fbc91d12ac3b5de0bf2df146"},
- {file = "coverage-7.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9d58885215094ab4a86a6aef044e42994a2bd76a446dc59b352622655ba6621b"},
- {file = "coverage-7.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:ffeeb38ee4a80a30a6877c5c4c359e5498eec095878f1581453202bfacc8fbc2"},
- {file = "coverage-7.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3baf5f126f30781b5e93dbefcc8271cb2491647f8283f20ac54d12161dff080e"},
- {file = "coverage-7.1.0-cp311-cp311-win32.whl", hash = "sha256:ded59300d6330be27bc6cf0b74b89ada58069ced87c48eaf9344e5e84b0072f7"},
- {file = "coverage-7.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:6a43c7823cd7427b4ed763aa7fb63901ca8288591323b58c9cd6ec31ad910f3c"},
- {file = "coverage-7.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7a726d742816cb3a8973c8c9a97539c734b3a309345236cd533c4883dda05b8d"},
- {file = "coverage-7.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc7c85a150501286f8b56bd8ed3aa4093f4b88fb68c0843d21ff9656f0009d6a"},
- {file = "coverage-7.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f5b4198d85a3755d27e64c52f8c95d6333119e49fd001ae5798dac872c95e0f8"},
- {file = "coverage-7.1.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddb726cb861c3117a553f940372a495fe1078249ff5f8a5478c0576c7be12050"},
- {file = "coverage-7.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:51b236e764840a6df0661b67e50697aaa0e7d4124ca95e5058fa3d7cbc240b7c"},
- {file = "coverage-7.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:7ee5c9bb51695f80878faaa5598040dd6c9e172ddcf490382e8aedb8ec3fec8d"},
- {file = "coverage-7.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c31b75ae466c053a98bf26843563b3b3517b8f37da4d47b1c582fdc703112bc3"},
- {file = "coverage-7.1.0-cp37-cp37m-win32.whl", hash = "sha256:3b155caf3760408d1cb903b21e6a97ad4e2bdad43cbc265e3ce0afb8e0057e73"},
- {file = "coverage-7.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2a60d6513781e87047c3e630b33b4d1e89f39836dac6e069ffee28c4786715f5"},
- {file = "coverage-7.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f2cba5c6db29ce991029b5e4ac51eb36774458f0a3b8d3137241b32d1bb91f06"},
- {file = "coverage-7.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:beeb129cacea34490ffd4d6153af70509aa3cda20fdda2ea1a2be870dfec8d52"},
- {file = "coverage-7.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c45948f613d5d18c9ec5eaa203ce06a653334cf1bd47c783a12d0dd4fd9c851"},
- {file = "coverage-7.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef382417db92ba23dfb5864a3fc9be27ea4894e86620d342a116b243ade5d35d"},
- {file = "coverage-7.1.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c7c0d0827e853315c9bbd43c1162c006dd808dbbe297db7ae66cd17b07830f0"},
- {file = "coverage-7.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e5cdbb5cafcedea04924568d990e20ce7f1945a1dd54b560f879ee2d57226912"},
- {file = "coverage-7.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:9817733f0d3ea91bea80de0f79ef971ae94f81ca52f9b66500c6a2fea8e4b4f8"},
- {file = "coverage-7.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:218fe982371ac7387304153ecd51205f14e9d731b34fb0568181abaf7b443ba0"},
- {file = "coverage-7.1.0-cp38-cp38-win32.whl", hash = "sha256:04481245ef966fbd24ae9b9e537ce899ae584d521dfbe78f89cad003c38ca2ab"},
- {file = "coverage-7.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:8ae125d1134bf236acba8b83e74c603d1b30e207266121e76484562bc816344c"},
- {file = "coverage-7.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2bf1d5f2084c3932b56b962a683074a3692bce7cabd3aa023c987a2a8e7612f6"},
- {file = "coverage-7.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:98b85dd86514d889a2e3dd22ab3c18c9d0019e696478391d86708b805f4ea0fa"},
- {file = "coverage-7.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38da2db80cc505a611938d8624801158e409928b136c8916cd2e203970dde4dc"},
- {file = "coverage-7.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3164d31078fa9efe406e198aecd2a02d32a62fecbdef74f76dad6a46c7e48311"},
- {file = "coverage-7.1.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db61a79c07331e88b9a9974815c075fbd812bc9dbc4dc44b366b5368a2936063"},
- {file = "coverage-7.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ccb092c9ede70b2517a57382a601619d20981f56f440eae7e4d7eaafd1d1d09"},
- {file = "coverage-7.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:33ff26d0f6cc3ca8de13d14fde1ff8efe1456b53e3f0273e63cc8b3c84a063d8"},
- {file = "coverage-7.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d47dd659a4ee952e90dc56c97d78132573dc5c7b09d61b416a9deef4ebe01a0c"},
- {file = "coverage-7.1.0-cp39-cp39-win32.whl", hash = "sha256:d248cd4a92065a4d4543b8331660121b31c4148dd00a691bfb7a5cdc7483cfa4"},
- {file = "coverage-7.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:7ed681b0f8e8bcbbffa58ba26fcf5dbc8f79e7997595bf071ed5430d8c08d6f3"},
- {file = "coverage-7.1.0-pp37.pp38.pp39-none-any.whl", hash = "sha256:755e89e32376c850f826c425ece2c35a4fc266c081490eb0a841e7c1cb0d3bda"},
- {file = "coverage-7.1.0.tar.gz", hash = "sha256:10188fe543560ec4874f974b5305cd1a8bdcfa885ee00ea3a03733464c4ca265"},
+ {file = "coverage-7.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:49567ec91fc5e0b15356da07a2feabb421d62f52a9fff4b1ec40e9e19772f5f8"},
+ {file = "coverage-7.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d2ef6cae70168815ed91388948b5f4fcc69681480a0061114db737f957719f03"},
+ {file = "coverage-7.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3004765bca3acd9e015794e5c2f0c9a05587f5e698127ff95e9cfba0d3f29339"},
+ {file = "coverage-7.2.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cca7c0b7f5881dfe0291ef09ba7bb1582cb92ab0aeffd8afb00c700bf692415a"},
+ {file = "coverage-7.2.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2167d116309f564af56f9aa5e75ef710ef871c5f9b313a83050035097b56820"},
+ {file = "coverage-7.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:cb5f152fb14857cbe7f3e8c9a5d98979c4c66319a33cad6e617f0067c9accdc4"},
+ {file = "coverage-7.2.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:87dc37f16fb5e3a28429e094145bf7c1753e32bb50f662722e378c5851f7fdc6"},
+ {file = "coverage-7.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e191a63a05851f8bce77bc875e75457f9b01d42843f8bd7feed2fc26bbe60833"},
+ {file = "coverage-7.2.1-cp310-cp310-win32.whl", hash = "sha256:e3ea04b23b114572b98a88c85379e9e9ae031272ba1fb9b532aa934c621626d4"},
+ {file = "coverage-7.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:0cf557827be7eca1c38a2480484d706693e7bb1929e129785fe59ec155a59de6"},
+ {file = "coverage-7.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:570c21a29493b350f591a4b04c158ce1601e8d18bdcd21db136fbb135d75efa6"},
+ {file = "coverage-7.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9e872b082b32065ac2834149dc0adc2a2e6d8203080501e1e3c3c77851b466f9"},
+ {file = "coverage-7.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fac6343bae03b176e9b58104a9810df3cdccd5cfed19f99adfa807ffbf43cf9b"},
+ {file = "coverage-7.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abacd0a738e71b20e224861bc87e819ef46fedba2fb01bc1af83dfd122e9c319"},
+ {file = "coverage-7.2.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9256d4c60c4bbfec92721b51579c50f9e5062c21c12bec56b55292464873508"},
+ {file = "coverage-7.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:80559eaf6c15ce3da10edb7977a1548b393db36cbc6cf417633eca05d84dd1ed"},
+ {file = "coverage-7.2.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0bd7e628f6c3ec4e7d2d24ec0e50aae4e5ae95ea644e849d92ae4805650b4c4e"},
+ {file = "coverage-7.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09643fb0df8e29f7417adc3f40aaf379d071ee8f0350ab290517c7004f05360b"},
+ {file = "coverage-7.2.1-cp311-cp311-win32.whl", hash = "sha256:1b7fb13850ecb29b62a447ac3516c777b0e7a09ecb0f4bb6718a8654c87dfc80"},
+ {file = "coverage-7.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:617a94ada56bbfe547aa8d1b1a2b8299e2ec1ba14aac1d4b26a9f7d6158e1273"},
+ {file = "coverage-7.2.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8649371570551d2fd7dee22cfbf0b61f1747cdfb2b7587bb551e4beaaa44cb97"},
+ {file = "coverage-7.2.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d2b9b5e70a21474c105a133ba227c61bc95f2ac3b66861143ce39a5ea4b3f84"},
+ {file = "coverage-7.2.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae82c988954722fa07ec5045c57b6d55bc1a0890defb57cf4a712ced65b26ddd"},
+ {file = "coverage-7.2.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:861cc85dfbf55a7a768443d90a07e0ac5207704a9f97a8eb753292a7fcbdfcfc"},
+ {file = "coverage-7.2.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0339dc3237c0d31c3b574f19c57985fcbe494280153bbcad33f2cdf469f4ac3e"},
+ {file = "coverage-7.2.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:5928b85416a388dd557ddc006425b0c37e8468bd1c3dc118c1a3de42f59e2a54"},
+ {file = "coverage-7.2.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8d3843ca645f62c426c3d272902b9de90558e9886f15ddf5efe757b12dd376f5"},
+ {file = "coverage-7.2.1-cp37-cp37m-win32.whl", hash = "sha256:6a034480e9ebd4e83d1aa0453fd78986414b5d237aea89a8fdc35d330aa13bae"},
+ {file = "coverage-7.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:6fce673f79a0e017a4dc35e18dc7bb90bf6d307c67a11ad5e61ca8d42b87cbff"},
+ {file = "coverage-7.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7f099da6958ddfa2ed84bddea7515cb248583292e16bb9231d151cd528eab657"},
+ {file = "coverage-7.2.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:97a3189e019d27e914ecf5c5247ea9f13261d22c3bb0cfcfd2a9b179bb36f8b1"},
+ {file = "coverage-7.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a81dbcf6c6c877986083d00b834ac1e84b375220207a059ad45d12f6e518a4e3"},
+ {file = "coverage-7.2.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78d2c3dde4c0b9be4b02067185136b7ee4681978228ad5ec1278fa74f5ca3e99"},
+ {file = "coverage-7.2.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a209d512d157379cc9ab697cbdbb4cfd18daa3e7eebaa84c3d20b6af0037384"},
+ {file = "coverage-7.2.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f3d07edb912a978915576a776756069dede66d012baa503022d3a0adba1b6afa"},
+ {file = "coverage-7.2.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8dca3c1706670297851bca1acff9618455122246bdae623be31eca744ade05ec"},
+ {file = "coverage-7.2.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b1991a6d64231a3e5bbe3099fb0dd7c9aeaa4275ad0e0aeff4cb9ef885c62ba2"},
+ {file = "coverage-7.2.1-cp38-cp38-win32.whl", hash = "sha256:22c308bc508372576ffa3d2dbc4824bb70d28eeb4fcd79d4d1aed663a06630d0"},
+ {file = "coverage-7.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:b0c0d46de5dd97f6c2d1b560bf0fcf0215658097b604f1840365296302a9d1fb"},
+ {file = "coverage-7.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4dd34a935de268a133e4741827ae951283a28c0125ddcdbcbba41c4b98f2dfef"},
+ {file = "coverage-7.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0f8318ed0f3c376cfad8d3520f496946977abde080439d6689d7799791457454"},
+ {file = "coverage-7.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:834c2172edff5a08d78e2f53cf5e7164aacabeb66b369f76e7bb367ca4e2d993"},
+ {file = "coverage-7.2.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4d70c853f0546855f027890b77854508bdb4d6a81242a9d804482e667fff6e6"},
+ {file = "coverage-7.2.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a6450da4c7afc4534305b2b7d8650131e130610cea448ff240b6ab73d7eab63"},
+ {file = "coverage-7.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:99f4dd81b2bb8fc67c3da68b1f5ee1650aca06faa585cbc6818dbf67893c6d58"},
+ {file = "coverage-7.2.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bdd3f2f285ddcf2e75174248b2406189261a79e7fedee2ceeadc76219b6faa0e"},
+ {file = "coverage-7.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f29351393eb05e6326f044a7b45ed8e38cb4dcc38570d12791f271399dc41431"},
+ {file = "coverage-7.2.1-cp39-cp39-win32.whl", hash = "sha256:e2b50ebc2b6121edf352336d503357321b9d8738bb7a72d06fc56153fd3f4cd8"},
+ {file = "coverage-7.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:bd5a12239c0006252244f94863f1c518ac256160cd316ea5c47fb1a11b25889a"},
+ {file = "coverage-7.2.1-pp37.pp38.pp39-none-any.whl", hash = "sha256:436313d129db7cf5b4ac355dd2bd3f7c7e5294af077b090b85de75f8458b8616"},
+ {file = "coverage-7.2.1.tar.gz", hash = "sha256:c77f2a9093ccf329dd523a9b2b3c854c20d2a3d968b6def3b820272ca6732242"},
]
[package.extras]
@@ -461,35 +574,35 @@ toml = ["tomli"]
[[package]]
name = "cryptography"
-version = "39.0.1"
+version = "39.0.2"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
category = "main"
optional = false
python-versions = ">=3.6"
files = [
- {file = "cryptography-39.0.1-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:6687ef6d0a6497e2b58e7c5b852b53f62142cfa7cd1555795758934da363a965"},
- {file = "cryptography-39.0.1-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:706843b48f9a3f9b9911979761c91541e3d90db1ca905fd63fee540a217698bc"},
- {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:5d2d8b87a490bfcd407ed9d49093793d0f75198a35e6eb1a923ce1ee86c62b41"},
- {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83e17b26de248c33f3acffb922748151d71827d6021d98c70e6c1a25ddd78505"},
- {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e124352fd3db36a9d4a21c1aa27fd5d051e621845cb87fb851c08f4f75ce8be6"},
- {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:5aa67414fcdfa22cf052e640cb5ddc461924a045cacf325cd164e65312d99502"},
- {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:35f7c7d015d474f4011e859e93e789c87d21f6f4880ebdc29896a60403328f1f"},
- {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f24077a3b5298a5a06a8e0536e3ea9ec60e4c7ac486755e5fb6e6ea9b3500106"},
- {file = "cryptography-39.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:f0c64d1bd842ca2633e74a1a28033d139368ad959872533b1bab8c80e8240a0c"},
- {file = "cryptography-39.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:0f8da300b5c8af9f98111ffd512910bc792b4c77392a9523624680f7956a99d4"},
- {file = "cryptography-39.0.1-cp36-abi3-win32.whl", hash = "sha256:fe913f20024eb2cb2f323e42a64bdf2911bb9738a15dba7d3cce48151034e3a8"},
- {file = "cryptography-39.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:ced4e447ae29ca194449a3f1ce132ded8fcab06971ef5f618605aacaa612beac"},
- {file = "cryptography-39.0.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:807ce09d4434881ca3a7594733669bd834f5b2c6d5c7e36f8c00f691887042ad"},
- {file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c5caeb8188c24888c90b5108a441c106f7faa4c4c075a2bcae438c6e8ca73cef"},
- {file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4789d1e3e257965e960232345002262ede4d094d1a19f4d3b52e48d4d8f3b885"},
- {file = "cryptography-39.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:96f1157a7c08b5b189b16b47bc9db2332269d6680a196341bf30046330d15388"},
- {file = "cryptography-39.0.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e422abdec8b5fa8462aa016786680720d78bdce7a30c652b7fadf83a4ba35336"},
- {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:b0afd054cd42f3d213bf82c629efb1ee5f22eba35bf0eec88ea9ea7304f511a2"},
- {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:6f8ba7f0328b79f08bdacc3e4e66fb4d7aab0c3584e0bd41328dce5262e26b2e"},
- {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ef8b72fa70b348724ff1218267e7f7375b8de4e8194d1636ee60510aae104cd0"},
- {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:aec5a6c9864be7df2240c382740fcf3b96928c46604eaa7f3091f58b878c0bb6"},
- {file = "cryptography-39.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fdd188c8a6ef8769f148f88f859884507b954cc64db6b52f66ef199bb9ad660a"},
- {file = "cryptography-39.0.1.tar.gz", hash = "sha256:d1f6198ee6d9148405e49887803907fe8962a23e6c6f83ea7d98f1c0de375695"},
+ {file = "cryptography-39.0.2-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:2725672bb53bb92dc7b4150d233cd4b8c59615cd8288d495eaa86db00d4e5c06"},
+ {file = "cryptography-39.0.2-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:23df8ca3f24699167daf3e23e51f7ba7334d504af63a94af468f468b975b7dd7"},
+ {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:eb40fe69cfc6f5cdab9a5ebd022131ba21453cf7b8a7fd3631f45bbf52bed612"},
+ {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc0521cce2c1d541634b19f3ac661d7a64f9555135e9d8af3980965be717fd4a"},
+ {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffd394c7896ed7821a6d13b24657c6a34b6e2650bd84ae063cf11ccffa4f1a97"},
+ {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:e8a0772016feeb106efd28d4a328e77dc2edae84dfbac06061319fdb669ff828"},
+ {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8f35c17bd4faed2bc7797d2a66cbb4f986242ce2e30340ab832e5d99ae60e011"},
+ {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b49a88ff802e1993b7f749b1eeb31134f03c8d5c956e3c125c75558955cda536"},
+ {file = "cryptography-39.0.2-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5f8c682e736513db7d04349b4f6693690170f95aac449c56f97415c6980edef5"},
+ {file = "cryptography-39.0.2-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:d7d84a512a59f4412ca8549b01f94be4161c94efc598bf09d027d67826beddc0"},
+ {file = "cryptography-39.0.2-cp36-abi3-win32.whl", hash = "sha256:c43ac224aabcbf83a947eeb8b17eaf1547bce3767ee2d70093b461f31729a480"},
+ {file = "cryptography-39.0.2-cp36-abi3-win_amd64.whl", hash = "sha256:788b3921d763ee35dfdb04248d0e3de11e3ca8eb22e2e48fef880c42e1f3c8f9"},
+ {file = "cryptography-39.0.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d15809e0dbdad486f4ad0979753518f47980020b7a34e9fc56e8be4f60702fac"},
+ {file = "cryptography-39.0.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:50cadb9b2f961757e712a9737ef33d89b8190c3ea34d0fb6675e00edbe35d074"},
+ {file = "cryptography-39.0.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:103e8f7155f3ce2ffa0049fe60169878d47a4364b277906386f8de21c9234aa1"},
+ {file = "cryptography-39.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6236a9610c912b129610eb1a274bdc1350b5df834d124fa84729ebeaf7da42c3"},
+ {file = "cryptography-39.0.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e944fe07b6f229f4c1a06a7ef906a19652bdd9fd54c761b0ff87e83ae7a30354"},
+ {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:35d658536b0a4117c885728d1a7032bdc9a5974722ae298d6c533755a6ee3915"},
+ {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:30b1d1bfd00f6fc80d11300a29f1d8ab2b8d9febb6ed4a38a76880ec564fae84"},
+ {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e029b844c21116564b8b61216befabca4b500e6816fa9f0ba49527653cae2108"},
+ {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fa507318e427169ade4e9eccef39e9011cdc19534f55ca2f36ec3f388c1f70f3"},
+ {file = "cryptography-39.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8bc0008ef798231fac03fe7d26e82d601d15bd16f3afaad1c6113771566570f3"},
+ {file = "cryptography-39.0.2.tar.gz", hash = "sha256:bc5b871e977c8ee5a1bbc42fa8d19bcc08baf0c51cbf1586b0e87a2694dde42f"},
]
[package.dependencies]
@@ -517,6 +630,45 @@ files = [
{file = "cycler-0.11.0.tar.gz", hash = "sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f"},
]
+[[package]]
+name = "debugpy"
+version = "1.6.6"
+description = "An implementation of the Debug Adapter Protocol for Python"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "debugpy-1.6.6-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:0ea1011e94416e90fb3598cc3ef5e08b0a4dd6ce6b9b33ccd436c1dffc8cd664"},
+ {file = "debugpy-1.6.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dff595686178b0e75580c24d316aa45a8f4d56e2418063865c114eef651a982e"},
+ {file = "debugpy-1.6.6-cp310-cp310-win32.whl", hash = "sha256:87755e173fcf2ec45f584bb9d61aa7686bb665d861b81faa366d59808bbd3494"},
+ {file = "debugpy-1.6.6-cp310-cp310-win_amd64.whl", hash = "sha256:72687b62a54d9d9e3fb85e7a37ea67f0e803aaa31be700e61d2f3742a5683917"},
+ {file = "debugpy-1.6.6-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:78739f77c58048ec006e2b3eb2e0cd5a06d5f48c915e2fc7911a337354508110"},
+ {file = "debugpy-1.6.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23c29e40e39ad7d869d408ded414f6d46d82f8a93b5857ac3ac1e915893139ca"},
+ {file = "debugpy-1.6.6-cp37-cp37m-win32.whl", hash = "sha256:7aa7e103610e5867d19a7d069e02e72eb2b3045b124d051cfd1538f1d8832d1b"},
+ {file = "debugpy-1.6.6-cp37-cp37m-win_amd64.whl", hash = "sha256:f6383c29e796203a0bba74a250615ad262c4279d398e89d895a69d3069498305"},
+ {file = "debugpy-1.6.6-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:23363e6d2a04d726bbc1400bd4e9898d54419b36b2cdf7020e3e215e1dcd0f8e"},
+ {file = "debugpy-1.6.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b5d1b13d7c7bf5d7cf700e33c0b8ddb7baf030fcf502f76fc061ddd9405d16c"},
+ {file = "debugpy-1.6.6-cp38-cp38-win32.whl", hash = "sha256:70ab53918fd907a3ade01909b3ed783287ede362c80c75f41e79596d5ccacd32"},
+ {file = "debugpy-1.6.6-cp38-cp38-win_amd64.whl", hash = "sha256:c05349890804d846eca32ce0623ab66c06f8800db881af7a876dc073ac1c2225"},
+ {file = "debugpy-1.6.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a771739902b1ae22a120dbbb6bd91b2cae6696c0e318b5007c5348519a4211c6"},
+ {file = "debugpy-1.6.6-cp39-cp39-win32.whl", hash = "sha256:549ae0cb2d34fc09d1675f9b01942499751d174381b6082279cf19cdb3c47cbe"},
+ {file = "debugpy-1.6.6-cp39-cp39-win_amd64.whl", hash = "sha256:de4a045fbf388e120bb6ec66501458d3134f4729faed26ff95de52a754abddb1"},
+ {file = "debugpy-1.6.6-py2.py3-none-any.whl", hash = "sha256:be596b44448aac14eb3614248c91586e2bc1728e020e82ef3197189aae556115"},
+ {file = "debugpy-1.6.6.zip", hash = "sha256:b9c2130e1c632540fbf9c2c88341493797ddf58016e7cba02e311de9b0a96b67"},
+]
+
+[[package]]
+name = "decorator"
+version = "5.1.1"
+description = "Decorators for Humans"
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"},
+ {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"},
+]
+
[[package]]
name = "diffrax"
version = "0.2.2"
@@ -607,6 +759,21 @@ files = [
[package.extras]
test = ["pytest (>=6)"]
+[[package]]
+name = "executing"
+version = "1.2.0"
+description = "Get the currently executing AST node of a frame, and other information"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "executing-1.2.0-py2.py3-none-any.whl", hash = "sha256:0314a69e37426e3608aada02473b4161d4caf5a4b244d1d0c48072b8fee7bacc"},
+ {file = "executing-1.2.0.tar.gz", hash = "sha256:19da64c18d2d851112f09c287f8d3dbbdf725ab0e569077efb6cdcbd3497c107"},
+]
+
+[package.extras]
+tests = ["asttokens", "littleutils", "pytest", "rich"]
+
[[package]]
name = "filelock"
version = "3.9.0"
@@ -642,18 +809,18 @@ pyflakes = ">=3.0.0,<3.1.0"
[[package]]
name = "fonttools"
-version = "4.38.0"
+version = "4.39.0"
description = "Tools to manipulate font files"
category = "main"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "fonttools-4.38.0-py3-none-any.whl", hash = "sha256:820466f43c8be8c3009aef8b87e785014133508f0de64ec469e4efb643ae54fb"},
- {file = "fonttools-4.38.0.zip", hash = "sha256:2bb244009f9bf3fa100fc3ead6aeb99febe5985fa20afbfbaa2f8946c2fbdaf1"},
+ {file = "fonttools-4.39.0-py3-none-any.whl", hash = "sha256:f5e764e1fd6ad54dfc201ff32af0ba111bcfbe0d05b24540af74c63db4ed6390"},
+ {file = "fonttools-4.39.0.zip", hash = "sha256:909c104558835eac27faeb56be5a4c32694192dca123d073bf746ce9254054af"},
]
[package.extras]
-all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=14.0.0)", "xattr", "zopfli (>=0.1.4)"]
+all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.0.0)", "xattr", "zopfli (>=0.1.4)"]
graphite = ["lz4 (>=1.7.4.2)"]
interpolatable = ["munkres", "scipy"]
lxml = ["lxml (>=4.0,<5)"]
@@ -663,19 +830,52 @@ repacker = ["uharfbuzz (>=0.23.0)"]
symfont = ["sympy"]
type1 = ["xattr"]
ufo = ["fs (>=2.2.0,<3)"]
-unicode = ["unicodedata2 (>=14.0.0)"]
+unicode = ["unicodedata2 (>=15.0.0)"]
woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"]
+[[package]]
+name = "h5py"
+version = "3.8.0"
+description = "Read and write HDF5 files from Python"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "h5py-3.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:533d7dad466ddb7e3b30af274b630eb7c1a6e4ddf01d1c373a0334dc2152110a"},
+ {file = "h5py-3.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c873ba9fd4fa875ad62ce0e4891725e257a8fe7f5abdbc17e51a5d54819be55c"},
+ {file = "h5py-3.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3389b63222b1c7a158bb7fe69d11ca00066740ec5574596d47a2fe5317f563a"},
+ {file = "h5py-3.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:7f3350fc0a8407d668b13247861c2acd23f7f5fe7d060a3ad9b0820f5fcbcae0"},
+ {file = "h5py-3.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:db03e3f2c716205fbdabb34d0848459840585225eb97b4f08998c743821ca323"},
+ {file = "h5py-3.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:36761693efbe53df179627a775476dcbc37727d6e920958277a7efbc18f1fb73"},
+ {file = "h5py-3.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33b15aae79e9147aebe1d0e54099cbcde8d65e3e227cd5b59e49b1272aa0e09d"},
+ {file = "h5py-3.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:9f6f6ffadd6bfa9b2c5b334805eb4b19ca0a5620433659d8f7fb86692c40a359"},
+ {file = "h5py-3.8.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8f55d9c6c84d7d09c79fb85979e97b81ec6071cc776a97eb6b96f8f6ec767323"},
+ {file = "h5py-3.8.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:377865821fe80ad984d003723d6f8890bd54ceeb5981b43c0313b9df95411b30"},
+ {file = "h5py-3.8.0-cp37-cp37m-win_amd64.whl", hash = "sha256:0fef76e10b9216657fa37e7edff6d8be0709b25bd5066474c229b56cf0098df9"},
+ {file = "h5py-3.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:26ffc344ec9984d2cd3ca0265007299a8bac8d85c1ad48f4639d8d3aed2af171"},
+ {file = "h5py-3.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bacaa1c16810dd2b3e4417f8e730971b7c4d53d234de61fe4a918db78e80e1e4"},
+ {file = "h5py-3.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f47f757d1b76f0ecb8aa0508ec8d1b390df67a8b67ee2515dc1b046f3a1596ea"},
+ {file = "h5py-3.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:f891b17e3a3e974e93f9e34e7cca9f530806543571ce078998676a555837d91d"},
+ {file = "h5py-3.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:290e00fa2de74a10688d1bac98d5a9cdd43f14f58e562c580b5b3dfbd358ecae"},
+ {file = "h5py-3.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:03890b1c123d024fb0239a3279737d5432498c1901c354f8b10d8221d1d16235"},
+ {file = "h5py-3.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49bc857635f935fa30e92e61ac1e87496df8f260a6945a3235e43a9890426866"},
+ {file = "h5py-3.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:5fd2252d1fc364ba0e93dd0b7089f4906b66805cb4e6aca7fa8874ac08649647"},
+ {file = "h5py-3.8.0.tar.gz", hash = "sha256:6fead82f0c4000cf38d53f9c030780d81bfa0220218aee13b90b7701c937d95f"},
+]
+
+[package.dependencies]
+numpy = ">=1.14.5"
+
[[package]]
name = "identify"
-version = "2.5.18"
+version = "2.5.19"
description = "File identification library for Python"
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "identify-2.5.18-py2.py3-none-any.whl", hash = "sha256:93aac7ecf2f6abf879b8f29a8002d3c6de7086b8c28d88e1ad15045a15ab63f9"},
- {file = "identify-2.5.18.tar.gz", hash = "sha256:89e144fa560cc4cffb6ef2ab5e9fb18ed9f9b3cb054384bab4b95c12f6c309fe"},
+ {file = "identify-2.5.19-py2.py3-none-any.whl", hash = "sha256:3ee3533e7f6f5023157fbebbd5687bb4b698ce6f305259e0d24b2d7d9efb72bc"},
+ {file = "identify-2.5.19.tar.gz", hash = "sha256:4102ecd051f6884449e7359e55b38ba6cd7aafb6ef27b8e2b38495a5723ea106"},
]
[package.extras]
@@ -756,6 +956,79 @@ files = [
{file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
]
+[[package]]
+name = "ipykernel"
+version = "6.21.3"
+description = "IPython Kernel for Jupyter"
+category = "dev"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "ipykernel-6.21.3-py3-none-any.whl", hash = "sha256:24ebd9715e317c185e37156ab3a87382410185230dde7aeffce389d6c7d4428a"},
+ {file = "ipykernel-6.21.3.tar.gz", hash = "sha256:c8ff581905d70e7299bc1473a2f7c113bec1744fb3746d58e5b4b93bd8ee7001"},
+]
+
+[package.dependencies]
+appnope = {version = "*", markers = "platform_system == \"Darwin\""}
+comm = ">=0.1.1"
+debugpy = ">=1.6.5"
+ipython = ">=7.23.1"
+jupyter-client = ">=6.1.12"
+jupyter-core = ">=4.12,<5.0.0 || >=5.1.0"
+matplotlib-inline = ">=0.1"
+nest-asyncio = "*"
+packaging = "*"
+psutil = "*"
+pyzmq = ">=20"
+tornado = ">=6.1"
+traitlets = ">=5.4.0"
+
+[package.extras]
+cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"]
+docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"]
+pyqt5 = ["pyqt5"]
+pyside6 = ["pyside6"]
+test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio", "pytest-cov", "pytest-timeout"]
+
+[[package]]
+name = "ipython"
+version = "8.11.0"
+description = "IPython: Productive Interactive Computing"
+category = "dev"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "ipython-8.11.0-py3-none-any.whl", hash = "sha256:5b54478e459155a326bf5f42ee4f29df76258c0279c36f21d71ddb560f88b156"},
+ {file = "ipython-8.11.0.tar.gz", hash = "sha256:735cede4099dbc903ee540307b9171fbfef4aa75cfcacc5a273b2cda2f02be04"},
+]
+
+[package.dependencies]
+appnope = {version = "*", markers = "sys_platform == \"darwin\""}
+backcall = "*"
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+decorator = "*"
+jedi = ">=0.16"
+matplotlib-inline = "*"
+pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""}
+pickleshare = "*"
+prompt-toolkit = ">=3.0.30,<3.0.37 || >3.0.37,<3.1.0"
+pygments = ">=2.4.0"
+stack-data = "*"
+traitlets = ">=5"
+
+[package.extras]
+all = ["black", "curio", "docrepr", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.21)", "pandas", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"]
+black = ["black"]
+doc = ["docrepr", "ipykernel", "matplotlib", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"]
+kernel = ["ipykernel"]
+nbconvert = ["nbconvert"]
+nbformat = ["nbformat"]
+notebook = ["ipywidgets", "notebook"]
+parallel = ["ipyparallel"]
+qtconsole = ["qtconsole"]
+test = ["pytest (<7.1)", "pytest-asyncio", "testpath"]
+test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pandas", "pytest (<7.1)", "pytest-asyncio", "testpath", "trio"]
+
[[package]]
name = "isort"
version = "5.12.0"
@@ -831,14 +1104,14 @@ scipy = ">=1.5"
[[package]]
name = "jaxtyping"
-version = "0.2.12"
+version = "0.2.14"
description = "Type annotations and runtime checking for shape and dtype of JAX arrays, and PyTrees."
category = "main"
optional = false
-python-versions = "~=3.7"
+python-versions = "~=3.8"
files = [
- {file = "jaxtyping-0.2.12-py3-none-any.whl", hash = "sha256:ae17bb386d3f0b7af8306d910bec85be793bdad427e28b4ea7a23d409488fb70"},
- {file = "jaxtyping-0.2.12.tar.gz", hash = "sha256:7d3bca2bdbcb2f74d2f54d18d1957a2836a11dff09e44c2c023da254ddaf20e8"},
+ {file = "jaxtyping-0.2.14-py3-none-any.whl", hash = "sha256:91de0ef79a7ee497c4b5c08f096edbcfc7627bec71e865caaf6748e8175e8627"},
+ {file = "jaxtyping-0.2.14.tar.gz", hash = "sha256:5b78abbba18e0ca5b4c0870c965e204e0cdfadc1d385c24a0e1fcc15d6b3680d"},
]
[package.dependencies]
@@ -846,6 +1119,26 @@ numpy = ">=1.20.0"
typeguard = ">=2.13.3"
typing-extensions = ">=3.7.4.1"
+[[package]]
+name = "jedi"
+version = "0.18.2"
+description = "An autocompletion tool for Python that can be used for text editors."
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "jedi-0.18.2-py2.py3-none-any.whl", hash = "sha256:203c1fd9d969ab8f2119ec0a3342e0b49910045abe6af0a3ae83a5764d54639e"},
+ {file = "jedi-0.18.2.tar.gz", hash = "sha256:bae794c30d07f6d910d32a7048af09b5a39ed740918da923c6b780790ebac612"},
+]
+
+[package.dependencies]
+parso = ">=0.8.0,<0.9.0"
+
+[package.extras]
+docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"]
+qa = ["flake8 (==3.8.3)", "mypy (==0.782)"]
+testing = ["Django (<3.1)", "attrs", "colorama", "docopt", "pytest (<7.0.0)"]
+
[[package]]
name = "jinja2"
version = "3.1.2"
@@ -864,6 +1157,51 @@ MarkupSafe = ">=2.0"
[package.extras]
i18n = ["Babel (>=2.7)"]
+[[package]]
+name = "jupyter-client"
+version = "8.0.3"
+description = "Jupyter protocol implementation and client libraries"
+category = "dev"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "jupyter_client-8.0.3-py3-none-any.whl", hash = "sha256:be48ac6bd659cbbddb7a674cf06b3b8afbf53f228253cf58bde604c03bd487b0"},
+ {file = "jupyter_client-8.0.3.tar.gz", hash = "sha256:ed65498bea6d876ef9d8da3e0db3dd33c5d129f5b2645f56ae03993782966bd0"},
+]
+
+[package.dependencies]
+importlib-metadata = {version = ">=4.8.3", markers = "python_version < \"3.10\""}
+jupyter-core = ">=4.12,<5.0.0 || >=5.1.0"
+python-dateutil = ">=2.8.2"
+pyzmq = ">=23.0"
+tornado = ">=6.2"
+traitlets = ">=5.3"
+
+[package.extras]
+docs = ["ipykernel", "myst-parser", "pydata-sphinx-theme", "sphinx (>=4)", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"]
+test = ["codecov", "coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pytest", "pytest-cov", "pytest-jupyter[client] (>=0.4.1)", "pytest-timeout"]
+
+[[package]]
+name = "jupyter-core"
+version = "5.2.0"
+description = "Jupyter core package. A base package on which Jupyter projects rely."
+category = "dev"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "jupyter_core-5.2.0-py3-none-any.whl", hash = "sha256:4bdc2928c37f6917130c667d8b8708f20aee539d8283c6be72aabd2a4b4c83b0"},
+ {file = "jupyter_core-5.2.0.tar.gz", hash = "sha256:1407cdb4c79ee467696c04b76633fc1884015fa109323365a6372c8e890cc83f"},
+]
+
+[package.dependencies]
+platformdirs = ">=2.5"
+pywin32 = {version = ">=1.0", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""}
+traitlets = ">=5.3"
+
+[package.extras]
+docs = ["myst-parser", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"]
+test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"]
+
[[package]]
name = "kiwisolver"
version = "1.4.4"
@@ -1122,53 +1460,53 @@ files = [
[[package]]
name = "matplotlib"
-version = "3.7.0"
+version = "3.7.1"
description = "Python plotting package"
category = "main"
optional = false
python-versions = ">=3.8"
files = [
- {file = "matplotlib-3.7.0-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:3da8b9618188346239e51f1ea6c0f8f05c6e218cfcc30b399dd7dd7f52e8bceb"},
- {file = "matplotlib-3.7.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:c0592ba57217c22987b7322df10f75ef95bc44dce781692b4b7524085de66019"},
- {file = "matplotlib-3.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:21269450243d6928da81a9bed201f0909432a74e7d0d65db5545b9fa8a0d0223"},
- {file = "matplotlib-3.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb2e76cd429058d8954121c334dddfcd11a6186c6975bca61f3f248c99031b05"},
- {file = "matplotlib-3.7.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de20eb1247725a2f889173d391a6d9e7e0f2540feda24030748283108b0478ec"},
- {file = "matplotlib-3.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5465735eaaafd1cfaec3fed60aee776aeb3fd3992aa2e49f4635339c931d443"},
- {file = "matplotlib-3.7.0-cp310-cp310-win32.whl", hash = "sha256:092e6abc80cdf8a95f7d1813e16c0e99ceda8d5b195a3ab859c680f3487b80a2"},
- {file = "matplotlib-3.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:4f640534ec2760e270801056bc0d8a10777c48b30966eef78a7c35d8590915ba"},
- {file = "matplotlib-3.7.0-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:f336e7014889c38c59029ebacc35c59236a852e4b23836708cfd3f43d1eaeed5"},
- {file = "matplotlib-3.7.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:3a10428d4f8d1a478ceabd652e61a175b2fdeed4175ab48da4a7b8deb561e3fa"},
- {file = "matplotlib-3.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:46ca923e980f76d34c1c633343a72bb042d6ba690ecc649aababf5317997171d"},
- {file = "matplotlib-3.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c849aa94ff2a70fb71f318f48a61076d1205c6013b9d3885ade7f992093ac434"},
- {file = "matplotlib-3.7.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:827e78239292e561cfb70abf356a9d7eaf5bf6a85c97877f254009f20b892f89"},
- {file = "matplotlib-3.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:691ef1f15360e439886186d0db77b5345b24da12cbc4fc57b26c4826db4d6cab"},
- {file = "matplotlib-3.7.0-cp311-cp311-win32.whl", hash = "sha256:21a8aeac39b4a795e697265d800ce52ab59bdeb6bb23082e2d971f3041074f02"},
- {file = "matplotlib-3.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:01681566e95b9423021b49dea6a2395c16fa054604eacb87f0f4c439750f9114"},
- {file = "matplotlib-3.7.0-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:cf119eee4e57389fba5ac8b816934e95c256535e55f0b21628b4205737d1de85"},
- {file = "matplotlib-3.7.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:21bd4033c40b95abd5b8453f036ed5aa70856e56ecbd887705c37dce007a4c21"},
- {file = "matplotlib-3.7.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:111ef351f28fd823ed7177632070a6badd6f475607122bc9002a526f2502a0b5"},
- {file = "matplotlib-3.7.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f91d35b3ef51d29d9c661069b9e4ba431ce283ffc533b981506889e144b5b40e"},
- {file = "matplotlib-3.7.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0a776462a4a63c0bfc9df106c15a0897aa2dbab6795c693aa366e8e283958854"},
- {file = "matplotlib-3.7.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dfd4a0cbd151f6439e6d7f8dca5292839ca311e7e650596d073774847ca2e4f"},
- {file = "matplotlib-3.7.0-cp38-cp38-win32.whl", hash = "sha256:56b7b79488209041a9bf7ddc34f1b069274489ce69e34dc63ae241d0d6b4b736"},
- {file = "matplotlib-3.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:8665855f3919c80551f377bc16df618ceabf3ef65270bc14b60302dce88ca9ab"},
- {file = "matplotlib-3.7.0-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:f910d924da8b9fb066b5beae0b85e34ed1b6293014892baadcf2a51da1c65807"},
- {file = "matplotlib-3.7.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:cf6346644e8fe234dc847e6232145dac199a650d3d8025b3ef65107221584ba4"},
- {file = "matplotlib-3.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3d1e52365d8d5af699f04581ca191112e1d1220a9ce4386b57d807124d8b55e6"},
- {file = "matplotlib-3.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c869b646489c6a94375714032e5cec08e3aa8d3f7d4e8ef2b0fb50a52b317ce6"},
- {file = "matplotlib-3.7.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4ddac5f59e78d04b20469bc43853a8e619bb6505c7eac8ffb343ff2c516d72f"},
- {file = "matplotlib-3.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb0304c1cd802e9a25743414c887e8a7cd51d96c9ec96d388625d2cd1c137ae3"},
- {file = "matplotlib-3.7.0-cp39-cp39-win32.whl", hash = "sha256:a06a6c9822e80f323549c6bc9da96d4f233178212ad9a5f4ab87fd153077a507"},
- {file = "matplotlib-3.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:cb52aa97b92acdee090edfb65d1cb84ea60ab38e871ba8321a10bbcebc2a3540"},
- {file = "matplotlib-3.7.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3493b48e56468c39bd9c1532566dff3b8062952721b7521e1f394eb6791495f4"},
- {file = "matplotlib-3.7.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d0dcd1a0bf8d56551e8617d6dc3881d8a1c7fb37d14e5ec12cbb293f3e6170a"},
- {file = "matplotlib-3.7.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51fb664c37714cbaac69c16d6b3719f517a13c96c3f76f4caadd5a0aa7ed0329"},
- {file = "matplotlib-3.7.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4497d88c559b76da320b7759d64db442178beeea06a52dc0c629086982082dcd"},
- {file = "matplotlib-3.7.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9d85355c48ef8b9994293eb7c00f44aa8a43cad7a297fbf0770a25cdb2244b91"},
- {file = "matplotlib-3.7.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:03eb2c8ff8d85da679b71e14c7c95d16d014c48e0c0bfa14db85f6cdc5c92aad"},
- {file = "matplotlib-3.7.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71b751d06b2ed1fd017de512d7439c0259822864ea16731522b251a27c0b2ede"},
- {file = "matplotlib-3.7.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:b51ab8a5d5d3bbd4527af633a638325f492e09e45e78afdf816ef55217a09664"},
- {file = "matplotlib-3.7.0.tar.gz", hash = "sha256:8f6efd313430d7ef70a38a3276281cb2e8646b3a22b3b21eb227da20e15e6813"},
+ {file = "matplotlib-3.7.1-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:95cbc13c1fc6844ab8812a525bbc237fa1470863ff3dace7352e910519e194b1"},
+ {file = "matplotlib-3.7.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:08308bae9e91aca1ec6fd6dda66237eef9f6294ddb17f0d0b3c863169bf82353"},
+ {file = "matplotlib-3.7.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:544764ba51900da4639c0f983b323d288f94f65f4024dc40ecb1542d74dc0500"},
+ {file = "matplotlib-3.7.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56d94989191de3fcc4e002f93f7f1be5da476385dde410ddafbb70686acf00ea"},
+ {file = "matplotlib-3.7.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e99bc9e65901bb9a7ce5e7bb24af03675cbd7c70b30ac670aa263240635999a4"},
+ {file = "matplotlib-3.7.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb7d248c34a341cd4c31a06fd34d64306624c8cd8d0def7abb08792a5abfd556"},
+ {file = "matplotlib-3.7.1-cp310-cp310-win32.whl", hash = "sha256:ce463ce590f3825b52e9fe5c19a3c6a69fd7675a39d589e8b5fbe772272b3a24"},
+ {file = "matplotlib-3.7.1-cp310-cp310-win_amd64.whl", hash = "sha256:3d7bc90727351fb841e4d8ae620d2d86d8ed92b50473cd2b42ce9186104ecbba"},
+ {file = "matplotlib-3.7.1-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:770a205966d641627fd5cf9d3cb4b6280a716522cd36b8b284a8eb1581310f61"},
+ {file = "matplotlib-3.7.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:f67bfdb83a8232cb7a92b869f9355d677bce24485c460b19d01970b64b2ed476"},
+ {file = "matplotlib-3.7.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2bf092f9210e105f414a043b92af583c98f50050559616930d884387d0772aba"},
+ {file = "matplotlib-3.7.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89768d84187f31717349c6bfadc0e0d8c321e8eb34522acec8a67b1236a66332"},
+ {file = "matplotlib-3.7.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83111e6388dec67822e2534e13b243cc644c7494a4bb60584edbff91585a83c6"},
+ {file = "matplotlib-3.7.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a867bf73a7eb808ef2afbca03bcdb785dae09595fbe550e1bab0cd023eba3de0"},
+ {file = "matplotlib-3.7.1-cp311-cp311-win32.whl", hash = "sha256:fbdeeb58c0cf0595efe89c05c224e0a502d1aa6a8696e68a73c3efc6bc354304"},
+ {file = "matplotlib-3.7.1-cp311-cp311-win_amd64.whl", hash = "sha256:c0bd19c72ae53e6ab979f0ac6a3fafceb02d2ecafa023c5cca47acd934d10be7"},
+ {file = "matplotlib-3.7.1-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:6eb88d87cb2c49af00d3bbc33a003f89fd9f78d318848da029383bfc08ecfbfb"},
+ {file = "matplotlib-3.7.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:cf0e4f727534b7b1457898c4f4ae838af1ef87c359b76dcd5330fa31893a3ac7"},
+ {file = "matplotlib-3.7.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:46a561d23b91f30bccfd25429c3c706afe7d73a5cc64ef2dfaf2b2ac47c1a5dc"},
+ {file = "matplotlib-3.7.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8704726d33e9aa8a6d5215044b8d00804561971163563e6e6591f9dcf64340cc"},
+ {file = "matplotlib-3.7.1-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4cf327e98ecf08fcbb82685acaf1939d3338548620ab8dfa02828706402c34de"},
+ {file = "matplotlib-3.7.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:617f14ae9d53292ece33f45cba8503494ee199a75b44de7717964f70637a36aa"},
+ {file = "matplotlib-3.7.1-cp38-cp38-win32.whl", hash = "sha256:7c9a4b2da6fac77bcc41b1ea95fadb314e92508bf5493ceff058e727e7ecf5b0"},
+ {file = "matplotlib-3.7.1-cp38-cp38-win_amd64.whl", hash = "sha256:14645aad967684e92fc349493fa10c08a6da514b3d03a5931a1bac26e6792bd1"},
+ {file = "matplotlib-3.7.1-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:81a6b377ea444336538638d31fdb39af6be1a043ca5e343fe18d0f17e098770b"},
+ {file = "matplotlib-3.7.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:28506a03bd7f3fe59cd3cd4ceb2a8d8a2b1db41afede01f66c42561b9be7b4b7"},
+ {file = "matplotlib-3.7.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8c587963b85ce41e0a8af53b9b2de8dddbf5ece4c34553f7bd9d066148dc719c"},
+ {file = "matplotlib-3.7.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8bf26ade3ff0f27668989d98c8435ce9327d24cffb7f07d24ef609e33d582439"},
+ {file = "matplotlib-3.7.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:def58098f96a05f90af7e92fd127d21a287068202aa43b2a93476170ebd99e87"},
+ {file = "matplotlib-3.7.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f883a22a56a84dba3b588696a2b8a1ab0d2c3d41be53264115c71b0a942d8fdb"},
+ {file = "matplotlib-3.7.1-cp39-cp39-win32.whl", hash = "sha256:4f99e1b234c30c1e9714610eb0c6d2f11809c9c78c984a613ae539ea2ad2eb4b"},
+ {file = "matplotlib-3.7.1-cp39-cp39-win_amd64.whl", hash = "sha256:3ba2af245e36990facf67fde840a760128ddd71210b2ab6406e640188d69d136"},
+ {file = "matplotlib-3.7.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3032884084f541163f295db8a6536e0abb0db464008fadca6c98aaf84ccf4717"},
+ {file = "matplotlib-3.7.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a2cb34336110e0ed8bb4f650e817eed61fa064acbefeb3591f1b33e3a84fd96"},
+ {file = "matplotlib-3.7.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b867e2f952ed592237a1828f027d332d8ee219ad722345b79a001f49df0936eb"},
+ {file = "matplotlib-3.7.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:57bfb8c8ea253be947ccb2bc2d1bb3862c2bccc662ad1b4626e1f5e004557042"},
+ {file = "matplotlib-3.7.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:438196cdf5dc8d39b50a45cb6e3f6274edbcf2254f85fa9b895bf85851c3a613"},
+ {file = "matplotlib-3.7.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:21e9cff1a58d42e74d01153360de92b326708fb205250150018a52c70f43c290"},
+ {file = "matplotlib-3.7.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75d4725d70b7c03e082bbb8a34639ede17f333d7247f56caceb3801cb6ff703d"},
+ {file = "matplotlib-3.7.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:97cc368a7268141afb5690760921765ed34867ffb9655dd325ed207af85c7529"},
+ {file = "matplotlib-3.7.1.tar.gz", hash = "sha256:7b73305f25eab4541bd7ee0b96d87e53ae9c9f1823be5659b806cd85786fe882"},
]
[package.dependencies]
@@ -1183,6 +1521,21 @@ pillow = ">=6.2.0"
pyparsing = ">=2.3.1"
python-dateutil = ">=2.7"
+[[package]]
+name = "matplotlib-inline"
+version = "0.1.6"
+description = "Inline Matplotlib backend for Jupyter"
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "matplotlib-inline-0.1.6.tar.gz", hash = "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304"},
+ {file = "matplotlib_inline-0.1.6-py3-none-any.whl", hash = "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311"},
+]
+
+[package.dependencies]
+traitlets = "*"
+
[[package]]
name = "mccabe"
version = "0.7.0"
@@ -1197,14 +1550,14 @@ files = [
[[package]]
name = "mdit-py-plugins"
-version = "0.3.4"
+version = "0.3.5"
description = "Collection of plugins for markdown-it-py"
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "mdit-py-plugins-0.3.4.tar.gz", hash = "sha256:3278aab2e2b692539082f05e1243f24742194ffd92481f48844f057b51971283"},
- {file = "mdit_py_plugins-0.3.4-py3-none-any.whl", hash = "sha256:4f1441264ac5cb39fa40a5901921c2acf314ea098d75629750c138f80d552cdf"},
+ {file = "mdit-py-plugins-0.3.5.tar.gz", hash = "sha256:eee0adc7195e5827e17e02d2a258a2ba159944a0748f59c5099a4a27f78fcf6a"},
+ {file = "mdit_py_plugins-0.3.5-py3-none-any.whl", hash = "sha256:ca9a0714ea59a24b2b044a1831f48d817dd0c817e84339f20e7889f392d77c4e"},
]
[package.dependencies]
@@ -1227,6 +1580,24 @@ files = [
{file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"},
]
+[[package]]
+name = "mpmath"
+version = "1.3.0"
+description = "Python library for arbitrary-precision floating-point arithmetic"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"},
+ {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"},
+]
+
+[package.extras]
+develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"]
+docs = ["sphinx"]
+gmpy = ["gmpy2 (>=2.1.0a4)"]
+tests = ["pytest (>=4.6)"]
+
[[package]]
name = "multitasking"
version = "0.0.11"
@@ -1280,20 +1651,32 @@ testing = ["beautifulsoup4", "coverage[toml]", "pytest (>=6,<7)", "pytest-cov",
[[package]]
name = "natsort"
-version = "8.2.0"
+version = "8.3.1"
description = "Simple yet flexible natural sorting in Python."
category = "main"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
files = [
- {file = "natsort-8.2.0-py3-none-any.whl", hash = "sha256:04fe18fdd2b9e5957f19f687eb117f102ef8dde6b574764e536e91194bed4f5f"},
- {file = "natsort-8.2.0.tar.gz", hash = "sha256:57f85b72c688b09e053cdac302dd5b5b53df5f73ae20b4874fcbffd8bf783d11"},
+ {file = "natsort-8.3.1-py3-none-any.whl", hash = "sha256:d583bc9050dd10538de36297c960b93f873f0cd01671a3c50df5bd86dd391dcb"},
+ {file = "natsort-8.3.1.tar.gz", hash = "sha256:517595492dde570a4fd6b6a76f644440c1ba51e2338c8a671d7f0475fda8f9fd"},
]
[package.extras]
fast = ["fastnumbers (>=2.0.0)"]
icu = ["PyICU (>=1.0.0)"]
+[[package]]
+name = "nest-asyncio"
+version = "1.5.6"
+description = "Patch asyncio to allow nested event loops"
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "nest_asyncio-1.5.6-py3-none-any.whl", hash = "sha256:b9a953fb40dceaa587d109609098db21900182b16440652454a146cffb06e8b8"},
+ {file = "nest_asyncio-1.5.6.tar.gz", hash = "sha256:d267cc1ff794403f7df692964d1d2a3fa9418ffea2a3f6859a439ff482fef290"},
+]
+
[[package]]
name = "nodeenv"
version = "1.7.0"
@@ -1309,19 +1692,6 @@ files = [
[package.dependencies]
setuptools = "*"
-[[package]]
-name = "nose"
-version = "1.3.7"
-description = "nose extends unittest to make testing easier"
-category = "main"
-optional = false
-python-versions = "*"
-files = [
- {file = "nose-1.3.7-py2-none-any.whl", hash = "sha256:dadcddc0aefbf99eea214e0f1232b94f2fa9bd98fa8353711dacb112bfcbbb2a"},
- {file = "nose-1.3.7-py3-none-any.whl", hash = "sha256:9ff7c6cc443f8c51994b34a667bbcf45afd6d945be7477b52e97516fd17c53ac"},
- {file = "nose-1.3.7.tar.gz", hash = "sha256:f1bffef9cbc82628f6e7d7b40d7e255aefaa1adb6a1b1d26c69a8b79e6208a98"},
-]
-
[[package]]
name = "numpy"
version = "1.24.2"
@@ -1440,6 +1810,22 @@ pytz = ">=2020.1"
[package.extras]
test = ["hypothesis (>=5.5.3)", "pytest (>=6.0)", "pytest-xdist (>=1.31)"]
+[[package]]
+name = "parso"
+version = "0.8.3"
+description = "A Python Parser"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"},
+ {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"},
+]
+
+[package.extras]
+qa = ["flake8 (==3.8.3)", "mypy (==0.782)"]
+testing = ["docopt", "pytest (<6.0.0)"]
+
[[package]]
name = "pathspec"
version = "0.11.0"
@@ -1452,6 +1838,33 @@ files = [
{file = "pathspec-0.11.0.tar.gz", hash = "sha256:64d338d4e0914e91c1792321e6907b5a593f1ab1851de7fc269557a21b30ebbc"},
]
+[[package]]
+name = "pexpect"
+version = "4.8.0"
+description = "Pexpect allows easy control of interactive console applications."
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"},
+ {file = "pexpect-4.8.0.tar.gz", hash = "sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c"},
+]
+
+[package.dependencies]
+ptyprocess = ">=0.5"
+
+[[package]]
+name = "pickleshare"
+version = "0.7.5"
+description = "Tiny 'shelve'-like database with concurrency support"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pickleshare-0.7.5-py2.py3-none-any.whl", hash = "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"},
+ {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"},
+]
+
[[package]]
name = "pillow"
version = "9.4.0"
@@ -1536,16 +1949,28 @@ files = [
docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"]
tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
+[[package]]
+name = "pkgconfig"
+version = "1.5.5"
+description = "Interface Python with pkg-config"
+category = "main"
+optional = false
+python-versions = ">=3.3,<4.0"
+files = [
+ {file = "pkgconfig-1.5.5-py3-none-any.whl", hash = "sha256:d20023bbeb42ee6d428a0fac6e0904631f545985a10cdd71a20aa58bc47a4209"},
+ {file = "pkgconfig-1.5.5.tar.gz", hash = "sha256:deb4163ef11f75b520d822d9505c1f462761b4309b1bb713d08689759ea8b899"},
+]
+
[[package]]
name = "platformdirs"
-version = "3.0.0"
+version = "3.1.1"
description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "platformdirs-3.0.0-py3-none-any.whl", hash = "sha256:b1d5eb14f221506f50d6604a561f4c5786d9e80355219694a1b244bcd96f4567"},
- {file = "platformdirs-3.0.0.tar.gz", hash = "sha256:8a1228abb1ef82d788f74139988b137e78692984ec7b08eaa6c65f1723af28f9"},
+ {file = "platformdirs-3.1.1-py3-none-any.whl", hash = "sha256:e5986afb596e4bb5bde29a79ac9061aa955b94fca2399b7aaac4090860920dd8"},
+ {file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"},
]
[package.extras]
@@ -1587,6 +2012,75 @@ nodeenv = ">=0.11.1"
pyyaml = ">=5.1"
virtualenv = ">=20.10.0"
+[[package]]
+name = "prompt-toolkit"
+version = "3.0.38"
+description = "Library for building powerful interactive command lines in Python"
+category = "dev"
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "prompt_toolkit-3.0.38-py3-none-any.whl", hash = "sha256:45ea77a2f7c60418850331366c81cf6b5b9cf4c7fd34616f733c5427e6abbb1f"},
+ {file = "prompt_toolkit-3.0.38.tar.gz", hash = "sha256:23ac5d50538a9a38c8bde05fecb47d0b403ecd0662857a86f886f798563d5b9b"},
+]
+
+[package.dependencies]
+wcwidth = "*"
+
+[[package]]
+name = "psutil"
+version = "5.9.4"
+description = "Cross-platform lib for process and system monitoring in Python."
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "psutil-5.9.4-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:c1ca331af862803a42677c120aff8a814a804e09832f166f226bfd22b56feee8"},
+ {file = "psutil-5.9.4-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:68908971daf802203f3d37e78d3f8831b6d1014864d7a85937941bb35f09aefe"},
+ {file = "psutil-5.9.4-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:3ff89f9b835100a825b14c2808a106b6fdcc4b15483141482a12c725e7f78549"},
+ {file = "psutil-5.9.4-cp27-cp27m-win32.whl", hash = "sha256:852dd5d9f8a47169fe62fd4a971aa07859476c2ba22c2254d4a1baa4e10b95ad"},
+ {file = "psutil-5.9.4-cp27-cp27m-win_amd64.whl", hash = "sha256:9120cd39dca5c5e1c54b59a41d205023d436799b1c8c4d3ff71af18535728e94"},
+ {file = "psutil-5.9.4-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:6b92c532979bafc2df23ddc785ed116fced1f492ad90a6830cf24f4d1ea27d24"},
+ {file = "psutil-5.9.4-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:efeae04f9516907be44904cc7ce08defb6b665128992a56957abc9b61dca94b7"},
+ {file = "psutil-5.9.4-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:54d5b184728298f2ca8567bf83c422b706200bcbbfafdc06718264f9393cfeb7"},
+ {file = "psutil-5.9.4-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:16653106f3b59386ffe10e0bad3bb6299e169d5327d3f187614b1cb8f24cf2e1"},
+ {file = "psutil-5.9.4-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54c0d3d8e0078b7666984e11b12b88af2db11d11249a8ac8920dd5ef68a66e08"},
+ {file = "psutil-5.9.4-cp36-abi3-win32.whl", hash = "sha256:149555f59a69b33f056ba1c4eb22bb7bf24332ce631c44a319cec09f876aaeff"},
+ {file = "psutil-5.9.4-cp36-abi3-win_amd64.whl", hash = "sha256:fd8522436a6ada7b4aad6638662966de0d61d241cb821239b2ae7013d41a43d4"},
+ {file = "psutil-5.9.4-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:6001c809253a29599bc0dfd5179d9f8a5779f9dffea1da0f13c53ee568115e1e"},
+ {file = "psutil-5.9.4.tar.gz", hash = "sha256:3d7f9739eb435d4b1338944abe23f49584bde5395f27487d2ee25ad9a8774a62"},
+]
+
+[package.extras]
+test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"]
+
+[[package]]
+name = "ptyprocess"
+version = "0.7.0"
+description = "Run a subprocess in a pseudo terminal"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"},
+ {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"},
+]
+
+[[package]]
+name = "pure-eval"
+version = "0.2.2"
+description = "Safely evaluate AST nodes without side effects"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pure_eval-0.2.2-py3-none-any.whl", hash = "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350"},
+ {file = "pure_eval-0.2.2.tar.gz", hash = "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3"},
+]
+
+[package.extras]
+tests = ["pytest"]
+
[[package]]
name = "py"
version = "1.11.0"
@@ -1623,6 +2117,32 @@ files = [
{file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
]
+[[package]]
+name = "pydata-sphinx-theme"
+version = "0.13.1"
+description = "Bootstrap-based Sphinx theme from the PyData community"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pydata_sphinx_theme-0.13.1-py3-none-any.whl", hash = "sha256:ce29c1de7961d616dfa25f4c3a9619818d4bb2d4a9985ed078367af294fbccc2"},
+ {file = "pydata_sphinx_theme-0.13.1.tar.gz", hash = "sha256:a37d967207c2d787cfe5cf74abfc6b3bcaf0606cac6b283a3876e603d67d6a71"},
+]
+
+[package.dependencies]
+accessible-pygments = "*"
+Babel = "*"
+beautifulsoup4 = "*"
+docutils = "!=0.17.0"
+packaging = "*"
+pygments = ">=2.7"
+sphinx = ">=4.2"
+
+[package.extras]
+dev = ["nox", "pre-commit", "pydata-sphinx-theme[doc,test]", "pyyaml"]
+doc = ["ablog (>=0.11.0rc2)", "colorama", "ipyleaflet", "jupyter_sphinx", "linkify-it-py", "matplotlib", "myst-nb", "nbsphinx", "numpy", "numpydoc", "pandas", "plotly", "rich", "sphinx-copybutton", "sphinx-design", "sphinx-favicon (>=1.0.1)", "sphinx-sitemap", "sphinx-togglebutton", "sphinxcontrib-youtube", "sphinxext-rediraffe", "xarray"]
+test = ["codecov", "pytest", "pytest-cov", "pytest-regressions"]
+
[[package]]
name = "pyflakes"
version = "3.0.1"
@@ -1681,49 +2201,16 @@ files = [
flake8 = "6.0.0"
tomli = {version = "*", markers = "python_version < \"3.11\""}
-[[package]]
-name = "pysces"
-version = "1.0.3"
-description = "The Python Simulator for Cellular Systems - simulation and analysis tools for modelling biological systems"
-category = "main"
-optional = false
-python-versions = "*"
-files = [
- {file = "pysces-1.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a785d2c0f558bdf414cda3375a996f921f2a912422346530560b663cbc793fd3"},
- {file = "pysces-1.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d42daf24b75db2efbce46d062d20cf5b53860e00607cce5094a0ddf7e2a7e0c1"},
- {file = "pysces-1.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:0903cd136c378ec84dee800fdf29be7619615f86fdc4b16be201955e3f5d692c"},
- {file = "pysces-1.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cbdaef41bdcc60038ea93815bc3fe456f7fa482d5761699ad76732f265185cc3"},
- {file = "pysces-1.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e15f486ceb1e43cce58f96197e77933513672d1167e1c92989f0685c4d5b6bb9"},
- {file = "pysces-1.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:d067e051b8733572cbbe0243eb623a441d78d604a1dcb85a18b5cc681f2d6135"},
- {file = "pysces-1.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:36f7da6e353ae700bc74ab548d5686d8da2824826d0cacdd930ca3feee6be2ba"},
- {file = "pysces-1.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d171525222a02b51783e9c754b4e7c53e4d49ada4d0dd171cc39288bb2ae80b3"},
- {file = "pysces-1.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:a46064b9a54f145c93ad93e7605d88dc28e1b1467f4cc10d86351cda7b938292"},
- {file = "pysces-1.0.3.tar.gz", hash = "sha256:19bb2da5a02b9e5b3e4ef77924d4eaa62505e6ac8c4373f9f3704f2118db0059"},
-]
-
-[package.dependencies]
-matplotlib = "*"
-nose = "*"
-numpy = "*"
-python-libsbml = {version = "*", optional = true, markers = "extra == \"sbml\""}
-scipy = "*"
-
-[package.extras]
-all = ["assimulo", "ipyparallel", "python-libsbml"]
-cvode = ["assimulo"]
-parscan = ["ipyparallel"]
-sbml = ["python-libsbml"]
-
[[package]]
name = "pytest"
-version = "7.2.1"
+version = "7.2.2"
description = "pytest: simple powerful testing with Python"
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "pytest-7.2.1-py3-none-any.whl", hash = "sha256:c7c6ca206e93355074ae32f7403e8ea12163b1163c976fee7d4d84027c162be5"},
- {file = "pytest-7.2.1.tar.gz", hash = "sha256:d45e0952f3727241918b8fd0f376f5ff6b301cc0777c6f9a556935c92d8a7d42"},
+ {file = "pytest-7.2.2-py3-none-any.whl", hash = "sha256:130328f552dcfac0b1cec75c12e3f005619dc5f874f0a06e8ff7263f0ee6225e"},
+ {file = "pytest-7.2.2.tar.gz", hash = "sha256:c99ab0c73aceb050f68929bc93af19ab6db0558791c6a0715723abe9d0ade9d4"},
]
[package.dependencies]
@@ -1840,6 +2327,30 @@ files = [
{file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"},
]
+[[package]]
+name = "pywin32"
+version = "305"
+description = "Python for Window Extensions"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pywin32-305-cp310-cp310-win32.whl", hash = "sha256:421f6cd86e84bbb696d54563c48014b12a23ef95a14e0bdba526be756d89f116"},
+ {file = "pywin32-305-cp310-cp310-win_amd64.whl", hash = "sha256:73e819c6bed89f44ff1d690498c0a811948f73777e5f97c494c152b850fad478"},
+ {file = "pywin32-305-cp310-cp310-win_arm64.whl", hash = "sha256:742eb905ce2187133a29365b428e6c3b9001d79accdc30aa8969afba1d8470f4"},
+ {file = "pywin32-305-cp311-cp311-win32.whl", hash = "sha256:19ca459cd2e66c0e2cc9a09d589f71d827f26d47fe4a9d09175f6aa0256b51c2"},
+ {file = "pywin32-305-cp311-cp311-win_amd64.whl", hash = "sha256:326f42ab4cfff56e77e3e595aeaf6c216712bbdd91e464d167c6434b28d65990"},
+ {file = "pywin32-305-cp311-cp311-win_arm64.whl", hash = "sha256:4ecd404b2c6eceaca52f8b2e3e91b2187850a1ad3f8b746d0796a98b4cea04db"},
+ {file = "pywin32-305-cp36-cp36m-win32.whl", hash = "sha256:48d8b1659284f3c17b68587af047d110d8c44837736b8932c034091683e05863"},
+ {file = "pywin32-305-cp36-cp36m-win_amd64.whl", hash = "sha256:13362cc5aa93c2beaf489c9c9017c793722aeb56d3e5166dadd5ef82da021fe1"},
+ {file = "pywin32-305-cp37-cp37m-win32.whl", hash = "sha256:a55db448124d1c1484df22fa8bbcbc45c64da5e6eae74ab095b9ea62e6d00496"},
+ {file = "pywin32-305-cp37-cp37m-win_amd64.whl", hash = "sha256:109f98980bfb27e78f4df8a51a8198e10b0f347257d1e265bb1a32993d0c973d"},
+ {file = "pywin32-305-cp38-cp38-win32.whl", hash = "sha256:9dd98384da775afa009bc04863426cb30596fd78c6f8e4e2e5bbf4edf8029504"},
+ {file = "pywin32-305-cp38-cp38-win_amd64.whl", hash = "sha256:56d7a9c6e1a6835f521788f53b5af7912090674bb84ef5611663ee1595860fc7"},
+ {file = "pywin32-305-cp39-cp39-win32.whl", hash = "sha256:9d968c677ac4d5cbdaa62fd3014ab241718e619d8e36ef8e11fb930515a1e918"},
+ {file = "pywin32-305-cp39-cp39-win_amd64.whl", hash = "sha256:50768c6b7c3f0b38b7fb14dd4104da93ebced5f1a50dc0e834594bff6fbe1271"},
+]
+
[[package]]
name = "pyyaml"
version = "6.0"
@@ -1890,6 +2401,96 @@ files = [
{file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"},
]
+[[package]]
+name = "pyzmq"
+version = "25.0.0"
+description = "Python bindings for 0MQ"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "pyzmq-25.0.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:2d05d904f03ddf1e0d83d97341354dfe52244a619b5a1440a5f47a5b3451e84e"},
+ {file = "pyzmq-25.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a154ef810d44f9d28868be04641f837374a64e7449df98d9208e76c260c7ef1"},
+ {file = "pyzmq-25.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:487305c2a011fdcf3db1f24e8814bb76d23bc4d2f46e145bc80316a59a9aa07d"},
+ {file = "pyzmq-25.0.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e7b87638ee30ab13230e37ce5331b3e730b1e0dda30120b9eeec3540ed292c8"},
+ {file = "pyzmq-25.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75243e422e85a62f0ab7953dc315452a56b2c6a7e7d1a3c3109ac3cc57ed6b47"},
+ {file = "pyzmq-25.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:31e523d067ce44a04e876bed3ff9ea1ff8d1b6636d16e5fcace9d22f8c564369"},
+ {file = "pyzmq-25.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:8539216173135e9e89f6b1cc392e74e6b935b91e8c76106cf50e7a02ab02efe5"},
+ {file = "pyzmq-25.0.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2754fa68da08a854f4816e05160137fa938a2347276471103d31e04bcee5365c"},
+ {file = "pyzmq-25.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4a1bc30f0c18444d51e9b0d0dd39e3a4e7c53ee74190bebef238cd58de577ea9"},
+ {file = "pyzmq-25.0.0-cp310-cp310-win32.whl", hash = "sha256:01d53958c787cfea34091fcb8ef36003dbb7913b8e9f8f62a0715234ebc98b70"},
+ {file = "pyzmq-25.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:58fc3ad5e1cfd2e6d24741fbb1e216b388115d31b0ca6670f894187f280b6ba6"},
+ {file = "pyzmq-25.0.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:e4bba04ea779a3d7ef25a821bb63fd0939142c88e7813e5bd9c6265a20c523a2"},
+ {file = "pyzmq-25.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:af1fbfb7ad6ac0009ccee33c90a1d303431c7fb594335eb97760988727a37577"},
+ {file = "pyzmq-25.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85456f0d8f3268eecd63dede3b99d5bd8d3b306310c37d4c15141111d22baeaf"},
+ {file = "pyzmq-25.0.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0645b5a2d2a06fd8eb738018490c514907f7488bf9359c6ee9d92f62e844b76f"},
+ {file = "pyzmq-25.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f72ea279b2941a5203e935a4588b9ba8a48aeb9a926d9dfa1986278bd362cb8"},
+ {file = "pyzmq-25.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:4e295f7928a31ae0f657e848c5045ba6d693fe8921205f408ca3804b1b236968"},
+ {file = "pyzmq-25.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ac97e7d647d5519bcef48dd8d3d331f72975afa5c4496c95f6e854686f45e2d9"},
+ {file = "pyzmq-25.0.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:656281d496aaf9ca4fd4cea84e6d893e3361057c4707bd38618f7e811759103c"},
+ {file = "pyzmq-25.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1f6116991568aac48b94d6d8aaed6157d407942ea385335a6ed313692777fb9d"},
+ {file = "pyzmq-25.0.0-cp311-cp311-win32.whl", hash = "sha256:0282bba9aee6e0346aa27d6c69b5f7df72b5a964c91958fc9e0c62dcae5fdcdc"},
+ {file = "pyzmq-25.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:526f884a27e8bba62fe1f4e07c62be2cfe492b6d432a8fdc4210397f8cf15331"},
+ {file = "pyzmq-25.0.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:ccb3e1a863222afdbda42b7ca8ac8569959593d7abd44f5a709177d6fa27d266"},
+ {file = "pyzmq-25.0.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4046d03100aca266e70d54a35694cb35d6654cfbef633e848b3c4a8d64b9d187"},
+ {file = "pyzmq-25.0.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3100dddcada66ec5940ed6391ebf9d003cc3ede3d320748b2737553019f58230"},
+ {file = "pyzmq-25.0.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7877264aa851c19404b1bb9dbe6eed21ea0c13698be1eda3784aab3036d1c861"},
+ {file = "pyzmq-25.0.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:5049e75cc99db65754a3da5f079230fb8889230cf09462ec972d884d1704a3ed"},
+ {file = "pyzmq-25.0.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:81f99fb1224d36eb91557afec8cdc2264e856f3464500b55749020ce4c848ef2"},
+ {file = "pyzmq-25.0.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:a1cd4a95f176cdc0ee0a82d49d5830f13ae6015d89decbf834c273bc33eeb3d3"},
+ {file = "pyzmq-25.0.0-cp36-cp36m-win32.whl", hash = "sha256:926236ca003aec70574754f39703528947211a406f5c6c8b3e50eca04a9e87fc"},
+ {file = "pyzmq-25.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:94f0a7289d0f5c80807c37ebb404205e7deb737e8763eb176f4770839ee2a287"},
+ {file = "pyzmq-25.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f3f96d452e9580cb961ece2e5a788e64abaecb1232a80e61deffb28e105ff84a"},
+ {file = "pyzmq-25.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:930e6ad4f2eaac31a3d0c2130619d25db754b267487ebc186c6ad18af2a74018"},
+ {file = "pyzmq-25.0.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e1081d7030a1229c8ff90120346fb7599b54f552e98fcea5170544e7c6725aab"},
+ {file = "pyzmq-25.0.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:531866c491aee5a1e967c286cfa470dffac1e2a203b1afda52d62b58782651e9"},
+ {file = "pyzmq-25.0.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:fc7c1421c5b1c916acf3128bf3cc7ea7f5018b58c69a6866d70c14190e600ce9"},
+ {file = "pyzmq-25.0.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9a2d5e419bd39a1edb6cdd326d831f0120ddb9b1ff397e7d73541bf393294973"},
+ {file = "pyzmq-25.0.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:183e18742be3621acf8908903f689ec520aee3f08449bfd29f583010ca33022b"},
+ {file = "pyzmq-25.0.0-cp37-cp37m-win32.whl", hash = "sha256:02f5cb60a7da1edd5591a15efa654ffe2303297a41e1b40c3c8942f8f11fc17c"},
+ {file = "pyzmq-25.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:cac602e02341eaaf4edfd3e29bd3fdef672e61d4e6dfe5c1d065172aee00acee"},
+ {file = "pyzmq-25.0.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:e14df47c1265356715d3d66e90282a645ebc077b70b3806cf47efcb7d1d630cb"},
+ {file = "pyzmq-25.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:293a7c2128690f496057f1f1eb6074f8746058d13588389981089ec45d8fdc77"},
+ {file = "pyzmq-25.0.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:731b208bc9412deeb553c9519dca47136b5a01ca66667cafd8733211941b17e4"},
+ {file = "pyzmq-25.0.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b055a1cddf8035966ad13aa51edae5dc8f1bba0b5d5e06f7a843d8b83dc9b66b"},
+ {file = "pyzmq-25.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17e1cb97d573ea84d7cd97188b42ca6f611ab3ee600f6a75041294ede58e3d20"},
+ {file = "pyzmq-25.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:60ecbfe7669d3808ffa8a7dd1487d6eb8a4015b07235e3b723d4b2a2d4de7203"},
+ {file = "pyzmq-25.0.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4c25c95416133942280faaf068d0fddfd642b927fb28aaf4ab201a738e597c1e"},
+ {file = "pyzmq-25.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:be05504af0619d1cffa500af1e0ede69fb683f301003851f5993b5247cc2c576"},
+ {file = "pyzmq-25.0.0-cp38-cp38-win32.whl", hash = "sha256:6bf3842af37af43fa953e96074ebbb5315f6a297198f805d019d788a1021dbc8"},
+ {file = "pyzmq-25.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:b90bb8dfbbd138558f1f284fecfe328f7653616ff9a972433a00711d9475d1a9"},
+ {file = "pyzmq-25.0.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:62b9e80890c0d2408eb42d5d7e1fc62a5ce71be3288684788f74cf3e59ffd6e2"},
+ {file = "pyzmq-25.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:484c2c4ee02c1edc07039f42130bd16e804b1fe81c4f428e0042e03967f40c20"},
+ {file = "pyzmq-25.0.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9ca6db34b26c4d3e9b0728841ec9aa39484eee272caa97972ec8c8e231b20c7e"},
+ {file = "pyzmq-25.0.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:610d2d112acd4e5501fac31010064a6c6efd716ceb968e443cae0059eb7b86de"},
+ {file = "pyzmq-25.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3594c0ff604e685d7e907860b61d0e10e46c74a9ffca168f6e9e50ea934ee440"},
+ {file = "pyzmq-25.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c21a5f4e54a807df5afdef52b6d24ec1580153a6bcf0607f70a6e1d9fa74c5c3"},
+ {file = "pyzmq-25.0.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4725412e27612f0d7d7c2f794d89807ad0227c2fc01dd6146b39ada49c748ef9"},
+ {file = "pyzmq-25.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4d3d604fe0a67afd1aff906e54da557a5203368a99dcc50a70eef374f1d2abef"},
+ {file = "pyzmq-25.0.0-cp39-cp39-win32.whl", hash = "sha256:3670e8c5644768f214a3b598fe46378a4a6f096d5fb82a67dfd3440028460565"},
+ {file = "pyzmq-25.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:e99629a976809fe102ef73e856cf4b2660acd82a412a51e80ba2215e523dfd0a"},
+ {file = "pyzmq-25.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:66509c48f7446b640eeae24b60c9c1461799a27b1b0754e438582e36b5af3315"},
+ {file = "pyzmq-25.0.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a9c464cc508177c09a5a6122b67f978f20e2954a21362bf095a0da4647e3e908"},
+ {file = "pyzmq-25.0.0-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:28bcb2e66224a7ac2843eb632e4109d6b161479e7a2baf24e37210461485b4f1"},
+ {file = "pyzmq-25.0.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0e7ef9ac807db50b4eb6f534c5dcc22f998f5dae920cc28873d2c1d080a4fc9"},
+ {file = "pyzmq-25.0.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:5050f5c50b58a6e38ccaf9263a356f74ef1040f5ca4030225d1cb1a858c5b7b6"},
+ {file = "pyzmq-25.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2a73af6504e0d2805e926abf136ebf536735a13c22f709be7113c2ec65b4bec3"},
+ {file = "pyzmq-25.0.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0e8d00228db627ddd1b418c7afd81820b38575f237128c9650365f2dd6ac3443"},
+ {file = "pyzmq-25.0.0-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5605621f2181f20b71f13f698944deb26a0a71af4aaf435b34dd90146092d530"},
+ {file = "pyzmq-25.0.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6136bfb0e5a9cf8c60c6ac763eb21f82940a77e6758ea53516c8c7074f4ff948"},
+ {file = "pyzmq-25.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:0a90b2480a26aef7c13cff18703ba8d68e181facb40f78873df79e6d42c1facc"},
+ {file = "pyzmq-25.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:00c94fd4c9dd3c95aace0c629a7fa713627a5c80c1819326b642adf6c4b8e2a2"},
+ {file = "pyzmq-25.0.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20638121b0bdc80777ce0ec8c1f14f1ffec0697a1f88f0b564fa4a23078791c4"},
+ {file = "pyzmq-25.0.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6f75b4b8574f3a8a0d6b4b52606fc75b82cb4391471be48ab0b8677c82f9ed4"},
+ {file = "pyzmq-25.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cbb885f347eba7ab7681c450dee5b14aed9f153eec224ec0c3f299273d9241f"},
+ {file = "pyzmq-25.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c48f257da280b3be6c94e05bd575eddb1373419dbb1a72c3ce64e88f29d1cd6d"},
+ {file = "pyzmq-25.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:866eabf7c1315ef2e93e34230db7cbf672e0d7c626b37c11f7e870c8612c3dcc"},
+ {file = "pyzmq-25.0.0.tar.gz", hash = "sha256:f330a1a2c7f89fd4b0aa4dcb7bf50243bf1c8da9a2f1efc31daf57a2046b31f2"},
+]
+
+[package.dependencies]
+cffi = {version = "*", markers = "implementation_name == \"pypy\""}
+
[[package]]
name = "requests"
version = "2.28.2"
@@ -1913,20 +2514,19 @@ socks = ["PySocks (>=1.5.6,!=1.5.7)"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]]
-name = "sbmltoodepy"
-version = "1.0.4"
-description = "A package that creates a python implementation of an SBML model."
+name = "schwimmbad"
+version = "0.3.2"
+description = "A common interface for parallel processing pools."
category = "main"
optional = false
-python-versions = "*"
+python-versions = ">=3.6"
files = [
- {file = "sbmltoodepy-1.0.4.tar.gz", hash = "sha256:8c212f630580c882dab60b5d548fa331848412d3ae8e0a626319342c7a69dbe0"},
+ {file = "schwimmbad-0.3.2.tar.gz", hash = "sha256:d453b735fcd0d4a5c5a6e18af2f72f678358f7614db3b78b6cd38d6b07debc9f"},
]
-[package.dependencies]
-numpy = "*"
-python-libsbml = "*"
-scipy = "*"
+[package.extras]
+docs = ["alabaster", "emcee"]
+test = ["dill", "joblib", "mpi4py", "numpy", "pytest", "pytest-astropy", "pytest-cov"]
[[package]]
name = "scipy"
@@ -1987,14 +2587,14 @@ emoji = ["emoji"]
[[package]]
name = "setuptools"
-version = "67.4.0"
+version = "67.6.0"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
-category = "dev"
+category = "main"
optional = false
python-versions = ">=3.7"
files = [
- {file = "setuptools-67.4.0-py3-none-any.whl", hash = "sha256:f106dee1b506dee5102cc3f3e9e68137bbad6d47b616be7991714b0c62204251"},
- {file = "setuptools-67.4.0.tar.gz", hash = "sha256:e5fd0a713141a4a105412233c63dc4e17ba0090c8e8334594ac790ec97792330"},
+ {file = "setuptools-67.6.0-py3-none-any.whl", hash = "sha256:b78aaa36f6b90a074c1fa651168723acbf45d14cb1196b6f02c0fd07f17623b2"},
+ {file = "setuptools-67.6.0.tar.gz", hash = "sha256:2ee892cd5f29f3373097f5a814697e397cf3ce313616df0af11231e2ad118077"},
]
[package.extras]
@@ -2026,6 +2626,18 @@ files = [
{file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"},
]
+[[package]]
+name = "soupsieve"
+version = "2.4"
+description = "A modern CSS selector implementation for Beautiful Soup."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "soupsieve-2.4-py3-none-any.whl", hash = "sha256:49e5368c2cda80ee7e84da9dbe3e110b70a4575f196efb74e51b94549d921955"},
+ {file = "soupsieve-2.4.tar.gz", hash = "sha256:e28dba9ca6c7c00173e34e4ba57448f0688bb681b7c5e8bf4971daafc093d69a"},
+]
+
[[package]]
name = "sphinx"
version = "5.3.0"
@@ -2062,6 +2674,27 @@ docs = ["sphinxcontrib-websupport"]
lint = ["docutils-stubs", "flake8 (>=3.5.0)", "flake8-bugbear", "flake8-comprehensions", "flake8-simplify", "isort", "mypy (>=0.981)", "sphinx-lint", "types-requests", "types-typed-ast"]
test = ["cython", "html5lib", "pytest (>=4.6)", "typed_ast"]
+[[package]]
+name = "sphinx-book-theme"
+version = "1.0.0"
+description = "A clean book theme for scientific explanations and documentation with Sphinx"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "sphinx_book_theme-1.0.0-py3-none-any.whl", hash = "sha256:f6baba7888d5a63328a210e9960a7a6e951e9632d74edb6fce48c9d5d8c28768"},
+ {file = "sphinx_book_theme-1.0.0.tar.gz", hash = "sha256:7c76ea51347e55d801504bfb1b2433cba49e7212a25fbebd0ce44b3c098ea946"},
+]
+
+[package.dependencies]
+pydata-sphinx-theme = ">=0.13.0"
+sphinx = ">=4,<7"
+
+[package.extras]
+code-style = ["pre-commit"]
+doc = ["ablog", "docutils (==0.17.1)", "folium", "ipywidgets", "matplotlib", "myst-nb", "nbclient", "numpy", "numpydoc", "pandas", "plotly", "sphinx-copybutton", "sphinx-design", "sphinx-examples", "sphinx-tabs (<=3.4.0)", "sphinx-thebe", "sphinx-togglebutton", "sphinxcontrib-bibtex", "sphinxcontrib-youtube", "sphinxext-opengraph"]
+test = ["beautifulsoup4", "coverage", "myst-nb", "pytest", "pytest-cov", "pytest-regressions", "sphinx_thebe"]
+
[[package]]
name = "sphinx-copybutton"
version = "0.5.1"
@@ -2232,6 +2865,41 @@ files = [
lint = ["docutils-stubs", "flake8", "mypy"]
test = ["pytest"]
+[[package]]
+name = "stack-data"
+version = "0.6.2"
+description = "Extract data from python stack frames and tracebacks for informative displays"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "stack_data-0.6.2-py3-none-any.whl", hash = "sha256:cbb2a53eb64e5785878201a97ed7c7b94883f48b87bfb0bbe8b623c74679e4a8"},
+ {file = "stack_data-0.6.2.tar.gz", hash = "sha256:32d2dd0376772d01b6cb9fc996f3c8b57a357089dec328ed4b6553d037eaf815"},
+]
+
+[package.dependencies]
+asttokens = ">=2.1.0"
+executing = ">=1.2.0"
+pure-eval = "*"
+
+[package.extras]
+tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"]
+
+[[package]]
+name = "sympy"
+version = "1.11.1"
+description = "Computer algebra system (CAS) in Python"
+category = "main"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "sympy-1.11.1-py3-none-any.whl", hash = "sha256:938f984ee2b1e8eae8a07b884c8b7a1146010040fccddc6539c54f401c8f6fcf"},
+ {file = "sympy-1.11.1.tar.gz", hash = "sha256:e32380dce63cb7c0108ed525570092fd45168bdae2faa17e528221ef72e88658"},
+]
+
+[package.dependencies]
+mpmath = ">=0.19"
+
[[package]]
name = "tomli"
version = "2.0.1"
@@ -2244,16 +2912,49 @@ files = [
{file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
]
+[[package]]
+name = "toposort"
+version = "1.10"
+description = "Implements a topological sort algorithm."
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "toposort-1.10-py3-none-any.whl", hash = "sha256:cbdbc0d0bee4d2695ab2ceec97fe0679e9c10eab4b2a87a9372b929e70563a87"},
+ {file = "toposort-1.10.tar.gz", hash = "sha256:bfbb479c53d0a696ea7402601f4e693c97b0367837c8898bc6471adfca37a6bd"},
+]
+
+[[package]]
+name = "tornado"
+version = "6.2"
+description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed."
+category = "dev"
+optional = false
+python-versions = ">= 3.7"
+files = [
+ {file = "tornado-6.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:20f638fd8cc85f3cbae3c732326e96addff0a15e22d80f049e00121651e82e72"},
+ {file = "tornado-6.2-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:87dcafae3e884462f90c90ecc200defe5e580a7fbbb4365eda7c7c1eb809ebc9"},
+ {file = "tornado-6.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba09ef14ca9893954244fd872798b4ccb2367c165946ce2dd7376aebdde8e3ac"},
+ {file = "tornado-6.2-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8150f721c101abdef99073bf66d3903e292d851bee51910839831caba341a75"},
+ {file = "tornado-6.2-cp37-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3a2f5999215a3a06a4fc218026cd84c61b8b2b40ac5296a6db1f1451ef04c1e"},
+ {file = "tornado-6.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5f8c52d219d4995388119af7ccaa0bcec289535747620116a58d830e7c25d8a8"},
+ {file = "tornado-6.2-cp37-abi3-musllinux_1_1_i686.whl", hash = "sha256:6fdfabffd8dfcb6cf887428849d30cf19a3ea34c2c248461e1f7d718ad30b66b"},
+ {file = "tornado-6.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:1d54d13ab8414ed44de07efecb97d4ef7c39f7438cf5e976ccd356bebb1b5fca"},
+ {file = "tornado-6.2-cp37-abi3-win32.whl", hash = "sha256:5c87076709343557ef8032934ce5f637dbb552efa7b21d08e89ae7619ed0eb23"},
+ {file = "tornado-6.2-cp37-abi3-win_amd64.whl", hash = "sha256:e5f923aa6a47e133d1cf87d60700889d7eae68988704e20c75fb2d65677a8e4b"},
+ {file = "tornado-6.2.tar.gz", hash = "sha256:9b630419bde84ec666bfd7ea0a4cb2a8a651c2d5cccdbdd1972a0c859dfc3c13"},
+]
+
[[package]]
name = "tqdm"
-version = "4.64.1"
+version = "4.65.0"
description = "Fast, Extensible Progress Meter"
category = "main"
optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
+python-versions = ">=3.7"
files = [
- {file = "tqdm-4.64.1-py2.py3-none-any.whl", hash = "sha256:6fee160d6ffcd1b1c68c65f14c829c22832bc401726335ce92c52d395944a6a1"},
- {file = "tqdm-4.64.1.tar.gz", hash = "sha256:5f4f682a004951c1b450bc753c710e9280c5746ce6ffedee253ddbcbf54cf1e4"},
+ {file = "tqdm-4.65.0-py3-none-any.whl", hash = "sha256:c4f53a17fe37e132815abceec022631be8ffe1b9381c2e6e30aa70edc99e9671"},
+ {file = "tqdm-4.65.0.tar.gz", hash = "sha256:1871fb68a86b8fb3b59ca4cdd3dcccbc7e6d613eeed31f4c332531977b89beb5"},
]
[package.dependencies]
@@ -2265,6 +2966,22 @@ notebook = ["ipywidgets (>=6)"]
slack = ["slack-sdk"]
telegram = ["requests"]
+[[package]]
+name = "traitlets"
+version = "5.9.0"
+description = "Traitlets Python configuration system"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "traitlets-5.9.0-py3-none-any.whl", hash = "sha256:9e6ec080259b9a5940c797d58b613b5e31441c2257b87c2e795c5228ae80d2d8"},
+ {file = "traitlets-5.9.0.tar.gz", hash = "sha256:f6cde21a9c68cf756af02035f72d5a723bf607e862e7be33ece505abf4a3bad9"},
+]
+
+[package.extras]
+docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"]
+test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"]
+
[[package]]
name = "typeguard"
version = "2.13.3"
@@ -2312,14 +3029,14 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
[[package]]
name = "virtualenv"
-version = "20.19.0"
+version = "20.20.0"
description = "Virtual Python Environment builder"
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "virtualenv-20.19.0-py3-none-any.whl", hash = "sha256:54eb59e7352b573aa04d53f80fc9736ed0ad5143af445a1e539aada6eb947dd1"},
- {file = "virtualenv-20.19.0.tar.gz", hash = "sha256:37a640ba82ed40b226599c522d411e4be5edb339a0c0de030c0dc7b646d61590"},
+ {file = "virtualenv-20.20.0-py3-none-any.whl", hash = "sha256:3c22fa5a7c7aa106ced59934d2c20a2ecb7f49b4130b8bf444178a16b880fa45"},
+ {file = "virtualenv-20.20.0.tar.gz", hash = "sha256:a8a4b8ca1e28f864b7514a253f98c1d62b64e31e77325ba279248c65fb4fcef4"},
]
[package.dependencies]
@@ -2331,6 +3048,18 @@ platformdirs = ">=2.4,<4"
docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=22.12)"]
test = ["covdefaults (>=2.2.2)", "coverage (>=7.1)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23)", "pytest (>=7.2.1)", "pytest-env (>=0.8.1)", "pytest-freezegun (>=0.4.2)", "pytest-mock (>=3.10)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)"]
+[[package]]
+name = "wcwidth"
+version = "0.2.6"
+description = "Measures the displayed width of unicode strings in a terminal"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "wcwidth-0.2.6-py2.py3-none-any.whl", hash = "sha256:795b138f6875577cd91bba52baf9e445cd5118fd32723b460e30a0af30ea230e"},
+ {file = "wcwidth-0.2.6.tar.gz", hash = "sha256:a5220780a404dbe3353789870978e472cfe477761f06ee55077256e509b156d0"},
+]
+
[[package]]
name = "wheel"
version = "0.38.4"
@@ -2346,6 +3075,18 @@ files = [
[package.extras]
test = ["pytest (>=3.0.0)"]
+[[package]]
+name = "wurlitzer"
+version = "3.0.3"
+description = "Capture C-level output in context managers"
+category = "main"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "wurlitzer-3.0.3-py3-none-any.whl", hash = "sha256:ffcc584109f5ecd5244abfde4534f22140f8735a4890ce1abd90b4e503f5f427"},
+ {file = "wurlitzer-3.0.3.tar.gz", hash = "sha256:224f5fe70618be3872c05dfddc8c457191ec1870654596279fcc1edadebe3e5b"},
+]
+
[[package]]
name = "yfinance"
version = "0.1.96"
@@ -2369,21 +3110,21 @@ requests = ">=2.26"
[[package]]
name = "zipp"
-version = "3.14.0"
+version = "3.15.0"
description = "Backport of pathlib-compatible object wrapper for zip files"
category = "main"
optional = false
python-versions = ">=3.7"
files = [
- {file = "zipp-3.14.0-py3-none-any.whl", hash = "sha256:188834565033387710d046e3fe96acfc9b5e86cbca7f39ff69cf21a4128198b7"},
- {file = "zipp-3.14.0.tar.gz", hash = "sha256:9e5421e176ef5ab4c0ad896624e87a7b2f07aca746c9b2aa305952800cb8eecb"},
+ {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"},
+ {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"},
]
[package.extras]
docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
-testing = ["flake8 (<5)", "func-timeout", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
+testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
[metadata]
lock-version = "2.0"
python-versions = "^3.8.1"
-content-hash = "c20cfe4630bff95aba10b218c7af027da20d25c7766599f092174006027914dc"
+content-hash = "91cfccfd68a4f9f761b002293b7c8950c3deb709f39dc3e8f95a7d9b7f685088"
diff --git a/pyproject.toml b/pyproject.toml
index ad8090be..d96c0628 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -20,34 +20,32 @@ include = []
python = "^3.8.1"
jax = "^0.3.25"
numpy = "^1.23.5"
-scipy = "^1.9.3"
-pandas = "^1.5.2"
matplotlib = "^3.6.2"
diffrax = "^0.2.2"
emcee = "^3.1.3"
jaxlib = "^0.3.25"
tqdm = "^4.64.1"
-python-libsbml = "^5.19.7"
seedir = "^0.4.2"
-sbmltoodepy = "^1.0.4"
-pysces = {extras = ["sbml"], version = "^1.0.3"}
yfinance = "^0.1.87"
-
+amici = "^0.16.1"
+schwimmbad = "^0.3.2"
[tool.poetry.group.dev.dependencies]
pre-commit = "^2.20.0"
black = "^22.10.0"
flake8 = "^6.0.0"
-isort = "^5.10.1"
+isort = "^5.12.0"
pytest = "^7.2.0"
pytest-datafiles = "^2.0.1"
pyproject-flake8 = "^6.0.0"
sphinx-copybutton = "^0.5.1"
sphinx = "^5"
-sphinx-rtd-theme = "^1.2.0"
myst-parser = "^0.18.1"
coverage = "^7.0.5"
sphinx-togglebutton = "^0.3.2"
+ipykernel = "^6.21.2"
+sphinx-book-theme = "^1.0.0"
+sphinx-rtd-theme = "^1.2.0"
[build-system]
requires = ["poetry-core"]
@@ -61,6 +59,7 @@ profile = "black"
skip_gitignore = "true"
float_to_top = "true"
line_length = 79
+known_first_party = "amici"
[tool.flake8]
extend-ignore = ["E501", "E203", "F841"] # Length of lines already handled by black, spacing, unused variables
@@ -70,10 +69,15 @@ per-file-ignores = ["__init__.py: F401"]
testpaths = [
"tests",
]
+norecursedirs = [
+ "swig",
+ "generated_sbml_model",
+ "Data",
+ "Applications",
+ "build",
+]
addopts = [
"--import-mode=importlib",
- "--ignore=tests/test_pysces.py",
- "--ignore=tests/test_sbml_model.py",
]
pythonpath = ["epi"]
filterwarnings = [
| Erroneous definition of bound and limit arrays in stock.py
There is a minor bug in the definition of the dataBounds, paramBounds and paramSamplingLimits for the stock example.
For example for the limits, the code in EPIC/epi/examples/stock/stock.py currently states
def getParamSamplingLimits(self):
return np.array([[-10.0, 10.0] * self.paramDim])
However, we would probably like to have an array definition like
def getParamSamplingLimits(self):
return np.array([[-10.0, 10.0], [-10.0, 10.0], [-10.0, 10.0], [-10.0, 10.0], [-10.0, 10.0], [-10.0, 10.0]])
We implicitly assume the parameter dimension to be 6 but this is similar for the other examples.
I checked the other examples and from my understanding, this is the only example with this problem.
It therefore should be done by simply replacing the three array definitions.
Thanks in advance,
Vincent
Erroneous definition of bound and limit arrays in stock.py
There is a minor bug in the definition of the dataBounds, paramBounds and paramSamplingLimits for the stock example.
For example for the limits, the code in EPIC/epi/examples/stock/stock.py currently states
def getParamSamplingLimits(self):
return np.array([[-10.0, 10.0] * self.paramDim])
However, we would probably like to have an array definition like
def getParamSamplingLimits(self):
return np.array([[-10.0, 10.0], [-10.0, 10.0], [-10.0, 10.0], [-10.0, 10.0], [-10.0, 10.0], [-10.0, 10.0]])
We implicitly assume the parameter dimension to be 6 but this is similar for the other examples.
I checked the other examples and from my understanding, this is the only example with this problem.
It therefore should be done by simply replacing the three array definitions.
Thanks in advance,
Vincent
Better sampling limits for corona example
The corona example does currently not pass its test due to ODE solver troubles.
This can be solved by replacing the parameter sampling limits in file EPIC/epi/examples/corona/corona.py with the following, new limits:
def getParamSamplingLimits(self):
return np.array([[-3.5, -0.5], [-1.0, 2.0], [-1.0, 2.0]])
Using these limits, the test passes with up to 50.000 samples.
Thank you in advance,
Vincent
| 2023-03-09T11:37:19 | 0.0 | [] | [] |
|||
bigmlcom/bigmler | bigmlcom__bigmler-260 | acf1d947ea6f9a6e58f888f8f009a5bf074b6efe | diff --git a/HISTORY.rst b/HISTORY.rst
index 83c03602..ff0648a6 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -3,6 +3,11 @@
History
-------
+5.8.1 (2024-05-31)
+~~~~~~~~~~~~~~~~~~
+
+- Fixing Pascal VOC to BigML-COCO translator.
+
5.8.0 (2024-04-05)
~~~~~~~~~~~~~~~~~~
diff --git a/bigmler/__init__.py b/bigmler/__init__.py
index 5411dd58..18a66c5c 100644
--- a/bigmler/__init__.py
+++ b/bigmler/__init__.py
@@ -1,2 +1,2 @@
# -*- coding: utf-8 -*-
-__version__ = '5.8.0'
+__version__ = '5.8.1'
diff --git a/bigmler/processing/annotations.py b/bigmler/processing/annotations.py
index a00b3195..fa1ddc1a 100644
--- a/bigmler/processing/annotations.py
+++ b/bigmler/processing/annotations.py
@@ -173,10 +173,9 @@ def bigml_coco_file(args, session_file):
def get_image_info(annotation_root):
"""Returns the basic image descriptors"""
+ filename = annotation_root.findtext('filename')
path = annotation_root.findtext('path')
- if path is None:
- filename = annotation_root.findtext('filename')
- else:
+ if filename is None and path is not None:
filename = os.path.basename(path)
img_name = os.path.basename(filename)
img_id = os.path.splitext(img_name)[0]
@@ -530,6 +529,7 @@ def voc_to_cocojson(voc_dir, args, session_file):
if args.images_dir is not None:
base_dir = os.path.join(base_dir, args.images_dir)
image_filename_base = os.path.join(folder, img_info['filename'])
+ filename = image_filename_base
if args.images_dir is not None:
filename = os.path.join(base_dir, image_filename_base)
diff --git a/check_files/evaluation_iris_dn.json b/check_files/evaluation_iris_dn.json
index 647c7d22..b228fc4d 100644
--- a/check_files/evaluation_iris_dn.json
+++ b/check_files/evaluation_iris_dn.json
@@ -1,1 +1,1 @@
-{"class_names": ["Iris-setosa", "Iris-versicolor", "Iris-virginica"], "mode": {"accuracy": 0.43333, "average_area_under_pr_curve": 0, "average_area_under_roc_curve": 0, "average_balanced_accuracy": 0.5, "average_f_measure": 0.20155, "average_kendalls_tau_b": 0, "average_ks_statistic": 0, "average_max_phi": 0, "average_phi": 0, "average_precision": 0.14444, "average_recall": 0.33333, "average_spearmans_rho": 0, "confusion_matrix": [[13, 0, 0], [8, 0, 0], [9, 0, 0]], "per_class_statistics": [{"accuracy": 0.43333, "balanced_accuracy": 0.5, "class_name": "Iris-setosa", "f_measure": 0.60465, "phi_coefficient": 0, "precision": 0.43333, "present_in_test_data": true, "recall": 1}, {"accuracy": 0.73333, "balanced_accuracy": 0.5, "class_name": "Iris-versicolor", "f_measure": 0, "phi_coefficient": 0, "precision": 0, "present_in_test_data": true, "recall": 0}, {"accuracy": 0.7, "balanced_accuracy": 0.5, "class_name": "Iris-virginica", "f_measure": 0, "phi_coefficient": 0, "precision": 0, "present_in_test_data": true, "recall": 0}]}, "model": {"accuracy": 0.93333, "average_area_under_pr_curve": 0.99117, "average_area_under_roc_curve": 0.99634, "average_balanced_accuracy": 0.94781, "average_f_measure": 0.9213, "average_kendalls_tau_b": 0.76919, "average_ks_statistic": 0.96898, "average_max_phi": 0.94898, "average_phi": 0.8985, "average_precision": 0.93333, "average_recall": 0.92593, "average_spearmans_rho": 0.85377, "confusion_matrix": [[13, 0, 0], [0, 8, 0], [0, 2, 7]], "per_class_statistics": [{"accuracy": 1, "area_under_pr_curve": 1, "area_under_roc_curve": 1, "balanced_accuracy": 1, "class_name": "Iris-setosa", "f_measure": 1, "gain_curve": [[0, 0, 1], [0.13333, 0.30769, 0.99999], [0.33333, 0.76923, 0.99998], [0.4, 0.92308, 0.99996], [0.43333, 1, 0.00021], [0.46667, 1, 1e-05], [0.5, 1, 0], [1, 1, null]], "kendalls_tau_b": 0.84707, "ks_statistic": [1, 0.00021], "lift_curve": [[0, 0, 1], [0.13333, 2.30769, 0.99999], [0.33333, 2.30769, 0.99998], [0.4, 2.30769, 0.99996], [0.43333, 2.30769, 0.00021], [0.46667, 2.14286, 1e-05], [0.5, 2, 0], [1, 1, null]], "max_phi": [1, 0.00021], "negative_cdf": [[0, 0, 1], [0.13333, 0, 0.99999], [0.33333, 0, 0.99998], [0.4, 0, 0.99996], [0.43333, 0, 0.00021], [0.46667, 0.05882, 1e-05], [0.5, 0.11765, 0], [1, 1, null]], "per_threshold_confusion_matrices": [[[13, 17, 0, 0], null], [[13, 2, 15, 0], 0], [[13, 1, 16, 0], 1e-05], [[13, 0, 17, 0], 0.00021], [[12, 0, 17, 1], 0.99996], [[10, 0, 17, 3], 0.99998], [[4, 0, 17, 9], 0.99999], [[0, 0, 17, 13], 1]], "phi_coefficient": 1, "pr_curve": [[0, 1, 1], [0.30769, 1, 0.99999], [0.76923, 1, 0.99998], [0.92308, 1, 0.99996], [1, 1, 0.00021], [1, 0.92857, 1e-05], [1, 0.86667, 0], [1, 0.43333, null]], "precision": 1, "present_in_test_data": true, "recall": 1, "roc_curve": [[0, 0, 1], [0, 0.30769, 0.99999], [0, 0.76923, 0.99998], [0, 0.92308, 0.99996], [0, 1, 0.00021], [0.05882, 1, 1e-05], [0.11765, 1, 0], [1, 1, null]], "spearmans_rho": 0.92326}, {"accuracy": 0.93333, "area_under_pr_curve": 0.98524, "area_under_roc_curve": 0.99432, "balanced_accuracy": 0.95455, "class_name": "Iris-versicolor", "f_measure": 0.88889, "gain_curve": [[0, 0, 1], [0.1, 0.375, 0.99997], [0.13333, 0.5, 0.99996], [0.16667, 0.625, 0.99983], [0.2, 0.75, 0.99979], [0.23333, 0.875, 0.87021], [0.26667, 0.875, 0.80792], [0.3, 1, 0.71352], [0.33333, 1, 0.01229], [0.36667, 1, 0.00363], [0.4, 1, 0.00017], [0.43333, 1, 4e-05], [0.46667, 1, 2e-05], [0.53333, 1, 1e-05], [0.73333, 1, 0], [1, 1, null]], "kendalls_tau_b": 0.66585, "ks_statistic": [0.95455, 0.71352], "lift_curve": [[0, 0, 1], [0.1, 3.75, 0.99997], [0.13333, 3.75, 0.99996], [0.16667, 3.75, 0.99983], [0.2, 3.75, 0.99979], [0.23333, 3.75, 0.87021], [0.26667, 3.28125, 0.80792], [0.3, 3.33333, 0.71352], [0.33333, 3, 0.01229], [0.36667, 2.72727, 0.00363], [0.4, 2.5, 0.00017], [0.43333, 2.30769, 4e-05], [0.46667, 2.14286, 2e-05], [0.53333, 1.875, 1e-05], [0.73333, 1.36364, 0], [1, 1, null]], "max_phi": [0.92113, 0.71352], "negative_cdf": [[0, 0, 1], [0.1, 0, 0.99997], [0.13333, 0, 0.99996], [0.16667, 0, 0.99983], [0.2, 0, 0.99979], [0.23333, 0, 0.87021], [0.26667, 0.04545, 0.80792], [0.3, 0.04545, 0.71352], [0.33333, 0.09091, 0.01229], [0.36667, 0.13636, 0.00363], [0.4, 0.18182, 0.00017], [0.43333, 0.22727, 4e-05], [0.46667, 0.27273, 2e-05], [0.53333, 0.36364, 1e-05], [0.73333, 0.63636, 0], [1, 1, null]], "per_threshold_confusion_matrices": [[[8, 22, 0, 0], null], [[8, 14, 8, 0], 0], [[8, 8, 14, 0], 1e-05], [[8, 6, 16, 0], 2e-05], [[8, 5, 17, 0], 4e-05], [[8, 4, 18, 0], 0.00017], [[8, 3, 19, 0], 0.00363], [[8, 2, 20, 0], 0.01229], [[8, 1, 21, 0], 0.71352], [[7, 1, 21, 1], 0.80792], [[7, 0, 22, 1], 0.87021], [[6, 0, 22, 2], 0.99979], [[5, 0, 22, 3], 0.99983], [[4, 0, 22, 4], 0.99996], [[3, 0, 22, 5], 0.99997], [[0, 0, 22, 8], 1]], "phi_coefficient": 0.8528, "pr_curve": [[0, 1, 1], [0.375, 1, 0.99997], [0.5, 1, 0.99996], [0.625, 1, 0.99983], [0.75, 1, 0.99979], [0.875, 1, 0.87021], [0.875, 0.875, 0.80792], [1, 0.88889, 0.71352], [1, 0.8, 0.01229], [1, 0.72727, 0.00363], [1, 0.66667, 0.00017], [1, 0.61538, 4e-05], [1, 0.57143, 2e-05], [1, 0.5, 1e-05], [1, 0.36364, 0], [1, 0.26667, null]], "precision": 0.8, "present_in_test_data": true, "recall": 1, "roc_curve": [[0, 0, 1], [0, 0.375, 0.99997], [0, 0.5, 0.99996], [0, 0.625, 0.99983], [0, 0.75, 0.99979], [0, 0.875, 0.87021], [0.04545, 0.875, 0.80792], [0.04545, 1, 0.71352], [0.09091, 1, 0.01229], [0.13636, 1, 0.00363], [0.18182, 1, 0.00017], [0.22727, 1, 4e-05], [0.27273, 1, 2e-05], [0.36364, 1, 1e-05], [0.63636, 1, 0], [1, 1, null]], "spearmans_rho": 0.76833}, {"accuracy": 0.93333, "area_under_pr_curve": 0.98827, "area_under_roc_curve": 0.99471, "balanced_accuracy": 0.88889, "class_name": "Iris-virginica", "f_measure": 0.875, "gain_curve": [[0, 0, 1], [0.13333, 0.44444, 0.99983], [0.16667, 0.55556, 0.99637], [0.2, 0.66667, 0.98771], [0.23333, 0.77778, 0.28648], [0.26667, 0.88889, 0.19208], [0.3, 0.88889, 0.12979], [0.33333, 1, 0.00017], [0.36667, 1, 4e-05], [0.4, 1, 2e-05], [0.43333, 1, 0], [1, 1, null]], "kendalls_tau_b": 0.79465, "ks_statistic": [0.95238, 0.00017], "lift_curve": [[0, 0, 1], [0.13333, 3.33333, 0.99983], [0.16667, 3.33333, 0.99637], [0.2, 3.33333, 0.98771], [0.23333, 3.33333, 0.28648], [0.26667, 3.33333, 0.19208], [0.3, 2.96296, 0.12979], [0.33333, 3, 0.00017], [0.36667, 2.72727, 4e-05], [0.4, 2.5, 2e-05], [0.43333, 2.30769, 0], [1, 1, null]], "max_phi": [0.92582, 0.00017], "negative_cdf": [[0, 0, 1], [0.13333, 0, 0.99983], [0.16667, 0, 0.99637], [0.2, 0, 0.98771], [0.23333, 0, 0.28648], [0.26667, 0, 0.19208], [0.3, 0.04762, 0.12979], [0.33333, 0.04762, 0.00017], [0.36667, 0.09524, 4e-05], [0.4, 0.14286, 2e-05], [0.43333, 0.19048, 0], [1, 1, null]], "per_threshold_confusion_matrices": [[[9, 21, 0, 0], null], [[9, 4, 17, 0], 0], [[9, 3, 18, 0], 2e-05], [[9, 2, 19, 0], 4e-05], [[9, 1, 20, 0], 0.00017], [[8, 1, 20, 1], 0.12979], [[8, 0, 21, 1], 0.19208], [[7, 0, 21, 2], 0.28648], [[6, 0, 21, 3], 0.98771], [[5, 0, 21, 4], 0.99637], [[4, 0, 21, 5], 0.99983], [[0, 0, 21, 9], 1]], "phi_coefficient": 0.8427, "pr_curve": [[0, 1, 1], [0.44444, 1, 0.99983], [0.55556, 1, 0.99637], [0.66667, 1, 0.98771], [0.77778, 1, 0.28648], [0.88889, 1, 0.19208], [0.88889, 0.88889, 0.12979], [1, 0.9, 0.00017], [1, 0.81818, 4e-05], [1, 0.75, 2e-05], [1, 0.69231, 0], [1, 0.3, null]], "precision": 1, "present_in_test_data": true, "recall": 0.77778, "roc_curve": [[0, 0, 1], [0, 0.44444, 0.99983], [0, 0.55556, 0.99637], [0, 0.66667, 0.98771], [0, 0.77778, 0.28648], [0, 0.88889, 0.19208], [0.04762, 0.88889, 0.12979], [0.04762, 1, 0.00017], [0.09524, 1, 4e-05], [0.14286, 1, 2e-05], [0.19048, 1, 0], [1, 1, null]], "spearmans_rho": 0.86973}]}, "random": {"accuracy": 0.36667, "average_area_under_pr_curve": 0, "average_area_under_roc_curve": 0, "average_balanced_accuracy": 0.52447, "average_f_measure": 0.35892, "average_kendalls_tau_b": 0, "average_ks_statistic": 0, "average_max_phi": 0, "average_phi": 0.05351, "average_precision": 0.37306, "average_recall": 0.35969, "average_spearmans_rho": 0, "confusion_matrix": [[5, 4, 4], [3, 2, 3], [1, 4, 4]], "per_class_statistics": [{"accuracy": 0.6, "balanced_accuracy": 0.57466, "class_name": "Iris-setosa", "f_measure": 0.45455, "phi_coefficient": 0.16147, "precision": 0.55556, "present_in_test_data": true, "recall": 0.38462}, {"accuracy": 0.53333, "balanced_accuracy": 0.44318, "class_name": "Iris-versicolor", "f_measure": 0.22222, "phi_coefficient": -0.1066, "precision": 0.2, "present_in_test_data": true, "recall": 0.25}, {"accuracy": 0.6, "balanced_accuracy": 0.55556, "class_name": "Iris-virginica", "f_measure": 0.4, "phi_coefficient": 0.10566, "precision": 0.36364, "present_in_test_data": true, "recall": 0.44444}]}}
+{"class_names": ["Iris-setosa", "Iris-versicolor", "Iris-virginica"], "mode": {"accuracy": 0.43333, "average_area_under_pr_curve": 0, "average_area_under_roc_curve": 0, "average_balanced_accuracy": 0.5, "average_f_measure": 0.20155, "average_kendalls_tau_b": 0, "average_ks_statistic": 0, "average_max_phi": 0, "average_phi": 0, "average_precision": 0.14444, "average_recall": 0.33333, "average_spearmans_rho": 0, "confusion_matrix": [[13, 0, 0], [8, 0, 0], [9, 0, 0]], "per_class_statistics": [{"accuracy": 0.43333, "balanced_accuracy": 0.5, "class_name": "Iris-setosa", "f_measure": 0.60465, "phi_coefficient": 0, "precision": 0.43333, "present_in_test_data": true, "recall": 1}, {"accuracy": 0.73333, "balanced_accuracy": 0.5, "class_name": "Iris-versicolor", "f_measure": 0, "phi_coefficient": 0, "precision": 0, "present_in_test_data": true, "recall": 0}, {"accuracy": 0.7, "balanced_accuracy": 0.5, "class_name": "Iris-virginica", "f_measure": 0, "phi_coefficient": 0, "precision": 0, "present_in_test_data": true, "recall": 0}]}, "model": {"accuracy": 1, "average_area_under_pr_curve": 1, "average_area_under_roc_curve": 1, "average_balanced_accuracy": 1, "average_f_measure": 1, "average_kendalls_tau_b": 0.79062, "average_ks_statistic": 1, "average_max_phi": 1, "average_phi": 1, "average_precision": 1, "average_recall": 1, "average_spearmans_rho": 0.86887, "confusion_matrix": [[13, 0, 0], [0, 8, 0], [0, 0, 9]], "per_class_statistics": [{"accuracy": 1, "area_under_pr_curve": 1, "area_under_roc_curve": 1, "balanced_accuracy": 1, "class_name": "Iris-setosa", "f_measure": 1, "gain_curve": [[0, 0, 1], [0.43333, 1, 0.00021], [0.46667, 1, 0.00015], [0.5, 1, 5e-05], [0.53333, 1, 4e-05], [0.56667, 1, 1e-05], [0.63333, 1, 0], [1, 1, null]], "kendalls_tau_b": 0.85687, "ks_statistic": [1, 0.00021], "lift_curve": [[0, 0, 1], [0.43333, 2.30769, 0.00021], [0.46667, 2.14286, 0.00015], [0.5, 2, 5e-05], [0.53333, 1.875, 4e-05], [0.56667, 1.76471, 1e-05], [0.63333, 1.57895, 0], [1, 1, null]], "max_phi": [1, 0.00021], "negative_cdf": [[0, 0, 1], [0.43333, 0, 0.00021], [0.46667, 0.05882, 0.00015], [0.5, 0.11765, 5e-05], [0.53333, 0.17647, 4e-05], [0.56667, 0.23529, 1e-05], [0.63333, 0.35294, 0], [1, 1, null]], "per_threshold_confusion_matrices": [[[13, 17, 0, 0], null], [[13, 6, 11, 0], 0], [[13, 4, 13, 0], 1e-05], [[13, 3, 14, 0], 4e-05], [[13, 2, 15, 0], 5e-05], [[13, 1, 16, 0], 0.00015], [[13, 0, 17, 0], 0.00021], [[0, 0, 17, 13], 1]], "phi_coefficient": 1, "pr_curve": [[0, 1, 1], [1, 1, 0.00021], [1, 0.92857, 0.00015], [1, 0.86667, 5e-05], [1, 0.8125, 4e-05], [1, 0.76471, 1e-05], [1, 0.68421, 0], [1, 0.43333, null]], "precision": 1, "present_in_test_data": true, "recall": 1, "roc_curve": [[0, 0, 1], [0, 1, 0.00021], [0.05882, 1, 0.00015], [0.11765, 1, 5e-05], [0.17647, 1, 4e-05], [0.23529, 1, 1e-05], [0.35294, 1, 0], [1, 1, null]], "spearmans_rho": 0.92077}, {"accuracy": 1, "area_under_pr_curve": 1, "area_under_roc_curve": 1, "balanced_accuracy": 1, "class_name": "Iris-versicolor", "f_measure": 1, "gain_curve": [[0, 0, 0.99997], [0.03333, 0.125, 0.99996], [0.06667, 0.25, 0.99989], [0.1, 0.375, 0.99979], [0.13333, 0.5, 0.99972], [0.16667, 0.625, 0.9992], [0.2, 0.75, 0.99878], [0.23333, 0.875, 0.69706], [0.26667, 1, 0.17107], [0.3, 1, 0.04435], [0.33333, 1, 0.02242], [0.36667, 1, 0.00553], [0.4, 1, 0.00082], [0.43333, 1, 0], [1, 1, null]], "kendalls_tau_b": 0.76722, "ks_statistic": [1, 0.17107], "lift_curve": [[0, 0, 0.99997], [0.03333, 3.75, 0.99996], [0.06667, 3.75, 0.99989], [0.1, 3.75, 0.99979], [0.13333, 3.75, 0.99972], [0.16667, 3.75, 0.9992], [0.2, 3.75, 0.99878], [0.23333, 3.75, 0.69706], [0.26667, 3.75, 0.17107], [0.3, 3.33333, 0.04435], [0.33333, 3, 0.02242], [0.36667, 2.72727, 0.00553], [0.4, 2.5, 0.00082], [0.43333, 2.30769, 0], [1, 1, null]], "max_phi": [1, 0.17107], "negative_cdf": [[0, 0, 0.99997], [0.03333, 0, 0.99996], [0.06667, 0, 0.99989], [0.1, 0, 0.99979], [0.13333, 0, 0.99972], [0.16667, 0, 0.9992], [0.2, 0, 0.99878], [0.23333, 0, 0.69706], [0.26667, 0, 0.17107], [0.3, 0.04545, 0.04435], [0.33333, 0.09091, 0.02242], [0.36667, 0.13636, 0.00553], [0.4, 0.18182, 0.00082], [0.43333, 0.22727, 0], [1, 1, null]], "per_threshold_confusion_matrices": [[[8, 22, 0, 0], null], [[8, 5, 17, 0], 0], [[8, 4, 18, 0], 0.00082], [[8, 3, 19, 0], 0.00553], [[8, 2, 20, 0], 0.02242], [[8, 1, 21, 0], 0.04435], [[8, 0, 22, 0], 0.17107], [[7, 0, 22, 1], 0.69706], [[6, 0, 22, 2], 0.99878], [[5, 0, 22, 3], 0.9992], [[4, 0, 22, 4], 0.99972], [[3, 0, 22, 5], 0.99979], [[2, 0, 22, 6], 0.99989], [[1, 0, 22, 7], 0.99996], [[0, 0, 22, 8], 0.99997]], "phi_coefficient": 1, "pr_curve": [[0, 1, 0.99997], [0.125, 1, 0.99996], [0.25, 1, 0.99989], [0.375, 1, 0.99979], [0.5, 1, 0.99972], [0.625, 1, 0.9992], [0.75, 1, 0.99878], [0.875, 1, 0.69706], [1, 1, 0.17107], [1, 0.88889, 0.04435], [1, 0.8, 0.02242], [1, 0.72727, 0.00553], [1, 0.66667, 0.00082], [1, 0.61538, 0], [1, 0.26667, null]], "precision": 1, "present_in_test_data": true, "recall": 1, "roc_curve": [[0, 0, 0.99997], [0, 0.125, 0.99996], [0, 0.25, 0.99989], [0, 0.375, 0.99979], [0, 0.5, 0.99972], [0, 0.625, 0.9992], [0, 0.75, 0.99878], [0, 0.875, 0.69706], [0, 1, 0.17107], [0.04545, 1, 0.04435], [0.09091, 1, 0.02242], [0.13636, 1, 0.00553], [0.18182, 1, 0.00082], [0.22727, 1, 0], [1, 1, null]], "spearmans_rho": 0.8471}, {"accuracy": 1, "area_under_pr_curve": 1, "area_under_roc_curve": 1, "balanced_accuracy": 1, "class_name": "Iris-virginica", "f_measure": 1, "gain_curve": [[0, 0, 1], [0.13333, 0.44444, 0.99918], [0.16667, 0.55556, 0.99447], [0.2, 0.66667, 0.97758], [0.23333, 0.77778, 0.95565], [0.26667, 0.88889, 0.82893], [0.3, 1, 0.30294], [0.33333, 1, 0.00117], [0.36667, 1, 0.00079], [0.4, 1, 0.00013], [0.43333, 1, 7e-05], [0.46667, 1, 4e-05], [0.5, 1, 2e-05], [0.53333, 1, 0], [1, 1, null]], "kendalls_tau_b": 0.74778, "ks_statistic": [1, 0.30294], "lift_curve": [[0, 0, 1], [0.13333, 3.33333, 0.99918], [0.16667, 3.33333, 0.99447], [0.2, 3.33333, 0.97758], [0.23333, 3.33333, 0.95565], [0.26667, 3.33333, 0.82893], [0.3, 3.33333, 0.30294], [0.33333, 3, 0.00117], [0.36667, 2.72727, 0.00079], [0.4, 2.5, 0.00013], [0.43333, 2.30769, 7e-05], [0.46667, 2.14286, 4e-05], [0.5, 2, 2e-05], [0.53333, 1.875, 0], [1, 1, null]], "max_phi": [1, 0.30294], "negative_cdf": [[0, 0, 1], [0.13333, 0, 0.99918], [0.16667, 0, 0.99447], [0.2, 0, 0.97758], [0.23333, 0, 0.95565], [0.26667, 0, 0.82893], [0.3, 0, 0.30294], [0.33333, 0.04762, 0.00117], [0.36667, 0.09524, 0.00079], [0.4, 0.14286, 0.00013], [0.43333, 0.19048, 7e-05], [0.46667, 0.2381, 4e-05], [0.5, 0.28571, 2e-05], [0.53333, 0.33333, 0], [1, 1, null]], "per_threshold_confusion_matrices": [[[9, 21, 0, 0], null], [[9, 7, 14, 0], 0], [[9, 6, 15, 0], 2e-05], [[9, 5, 16, 0], 4e-05], [[9, 4, 17, 0], 7e-05], [[9, 3, 18, 0], 0.00013], [[9, 2, 19, 0], 0.00079], [[9, 1, 20, 0], 0.00117], [[9, 0, 21, 0], 0.30294], [[8, 0, 21, 1], 0.82893], [[7, 0, 21, 2], 0.95565], [[6, 0, 21, 3], 0.97758], [[5, 0, 21, 4], 0.99447], [[4, 0, 21, 5], 0.99918], [[0, 0, 21, 9], 1]], "phi_coefficient": 1, "pr_curve": [[0, 1, 1], [0.44444, 1, 0.99918], [0.55556, 1, 0.99447], [0.66667, 1, 0.97758], [0.77778, 1, 0.95565], [0.88889, 1, 0.82893], [1, 1, 0.30294], [1, 0.9, 0.00117], [1, 0.81818, 0.00079], [1, 0.75, 0.00013], [1, 0.69231, 7e-05], [1, 0.64286, 4e-05], [1, 0.6, 2e-05], [1, 0.5625, 0], [1, 0.3, null]], "precision": 1, "present_in_test_data": true, "recall": 1, "roc_curve": [[0, 0, 1], [0, 0.44444, 0.99918], [0, 0.55556, 0.99447], [0, 0.66667, 0.97758], [0, 0.77778, 0.95565], [0, 0.88889, 0.82893], [0, 1, 0.30294], [0.04762, 1, 0.00117], [0.09524, 1, 0.00079], [0.14286, 1, 0.00013], [0.19048, 1, 7e-05], [0.2381, 1, 4e-05], [0.28571, 1, 2e-05], [0.33333, 1, 0], [1, 1, null]], "spearmans_rho": 0.83873}]}, "random": {"accuracy": 0.33333, "average_area_under_pr_curve": 0, "average_area_under_roc_curve": 0, "average_balanced_accuracy": 0.50398, "average_f_measure": 0.32715, "average_kendalls_tau_b": 0, "average_ks_statistic": 0, "average_max_phi": 0, "average_phi": 0.01973, "average_precision": 0.35967, "average_recall": 0.32942, "average_spearmans_rho": 0, "confusion_matrix": [[4, 7, 2], [2, 1, 5], [1, 3, 5]], "per_class_statistics": [{"accuracy": 0.6, "balanced_accuracy": 0.56561, "class_name": "Iris-setosa", "f_measure": 0.4, "phi_coefficient": 0.15374, "precision": 0.57143, "present_in_test_data": true, "recall": 0.30769}, {"accuracy": 0.43333, "balanced_accuracy": 0.33523, "class_name": "Iris-versicolor", "f_measure": 0.10526, "phi_coefficient": -0.30241, "precision": 0.09091, "present_in_test_data": true, "recall": 0.125}, {"accuracy": 0.63333, "balanced_accuracy": 0.61111, "class_name": "Iris-virginica", "f_measure": 0.47619, "phi_coefficient": 0.20787, "precision": 0.41667, "present_in_test_data": true, "recall": 0.55556}]}}
\ No newline at end of file
diff --git a/check_files/evaluation_iris_fs.json b/check_files/evaluation_iris_fs.json
index 0e77e055..3b28d016 100644
--- a/check_files/evaluation_iris_fs.json
+++ b/check_files/evaluation_iris_fs.json
@@ -1,1 +1,1 @@
-{"class_names": ["Iris-setosa", "Iris-versicolor", "Iris-virginica"], "mode": {"accuracy": 0.43333, "average_area_under_pr_curve": 0, "average_area_under_roc_curve": 0, "average_balanced_accuracy": 0.5, "average_f_measure": 0.20155, "average_kendalls_tau_b": 0, "average_ks_statistic": 0, "average_max_phi": 0, "average_phi": 0, "average_precision": 0.14444, "average_recall": 0.33333, "average_spearmans_rho": 0, "confusion_matrix": [[13, 0, 0], [8, 0, 0], [9, 0, 0]], "per_class_statistics": [{"accuracy": 0.43333, "balanced_accuracy": 0.5, "class_name": "Iris-setosa", "f_measure": 0.60465, "phi_coefficient": 0, "precision": 0.43333, "present_in_test_data": true, "recall": 1}, {"accuracy": 0.73333, "balanced_accuracy": 0.5, "class_name": "Iris-versicolor", "f_measure": 0, "phi_coefficient": 0, "precision": 0, "present_in_test_data": true, "recall": 0}, {"accuracy": 0.7, "balanced_accuracy": 0.5, "class_name": "Iris-virginica", "f_measure": 0, "phi_coefficient": 0, "precision": 0, "present_in_test_data": true, "recall": 0}]}, "model": {"accuracy": 1, "average_area_under_pr_curve": 1, "average_area_under_roc_curve": 1, "average_balanced_accuracy": 1, "average_f_measure": 1, "average_kendalls_tau_b": 0.75578, "average_ks_statistic": 1, "average_max_phi": 1, "average_phi": 1, "average_precision": 1, "average_recall": 1, "average_spearmans_rho": 0.84513, "confusion_matrix": [[13, 0, 0], [0, 8, 0], [0, 0, 9]], "per_class_statistics": [{"accuracy": 1, "area_under_pr_curve": 1, "area_under_roc_curve": 1, "balanced_accuracy": 1, "class_name": "Iris-setosa", "f_measure": 1, "gain_curve": [[0, 0, 0.99346], [0.43333, 1, 0.04167], [0.46667, 1, 0.00463], [0.5, 1, 0.00379], [0.76667, 1, 0.00353], [0.8, 1, 0.00349], [0.86667, 1, 0.00348], [0.93333, 1, 0.00347], [1, 1, null]], "kendalls_tau_b": 0.82336, "ks_statistic": [1, 0.04167], "lift_curve": [[0, 0, 0.99346], [0.43333, 2.30769, 0.04167], [0.46667, 2.14286, 0.00463], [0.5, 2, 0.00379], [0.76667, 1.30435, 0.00353], [0.8, 1.25, 0.00349], [0.86667, 1.15385, 0.00348], [0.93333, 1.07143, 0.00347], [1, 1, null]], "max_phi": [1, 0.04167], "negative_cdf": [[0, 0, 0.99346], [0.43333, 0, 0.04167], [0.46667, 0.05882, 0.00463], [0.5, 0.11765, 0.00379], [0.76667, 0.58824, 0.00353], [0.8, 0.64706, 0.00349], [0.86667, 0.76471, 0.00348], [0.93333, 0.88235, 0.00347], [1, 1, null]], "per_threshold_confusion_matrices": [[[13, 17, 0, 0], null], [[13, 15, 2, 0], 0.00347], [[13, 13, 4, 0], 0.00348], [[13, 11, 6, 0], 0.00349], [[13, 10, 7, 0], 0.00353], [[13, 2, 15, 0], 0.00379], [[13, 1, 16, 0], 0.00463], [[13, 0, 17, 0], 0.04167], [[0, 0, 17, 13], 0.99346]], "phi_coefficient": 1, "pr_curve": [[0, 1, 0.99346], [1, 1, 0.04167], [1, 0.92857, 0.00463], [1, 0.86667, 0.00379], [1, 0.56522, 0.00353], [1, 0.54167, 0.00349], [1, 0.5, 0.00348], [1, 0.46429, 0.00347], [1, 0.43333, null]], "precision": 1, "present_in_test_data": true, "recall": 1, "roc_curve": [[0, 0, 0.99346], [0, 1, 0.04167], [0.05882, 1, 0.00463], [0.11765, 1, 0.00379], [0.58824, 1, 0.00353], [0.64706, 1, 0.00349], [0.76471, 1, 0.00348], [0.88235, 1, 0.00347], [1, 1, null]], "spearmans_rho": 0.90539}, {"accuracy": 1, "area_under_pr_curve": 1, "area_under_roc_curve": 1, "balanced_accuracy": 1, "class_name": "Iris-versicolor", "f_measure": 1, "gain_curve": [[0, 0, 0.99302], [0.06667, 0.25, 0.99298], [0.1, 0.375, 0.99291], [0.13333, 0.5, 0.99267], [0.16667, 0.625, 0.99249], [0.2, 0.75, 0.99188], [0.23333, 0.875, 0.91661], [0.26667, 1, 0.09012], [0.3, 1, 0.02647], [0.33333, 1, 0.01361], [0.36667, 1, 0.00928], [0.4, 1, 0.00399], [0.43333, 1, 0.00379], [0.56667, 1, 0.00327], [1, 1, null]], "kendalls_tau_b": 0.70912, "ks_statistic": [1, 0.09012], "lift_curve": [[0, 0, 0.99302], [0.06667, 3.75, 0.99298], [0.1, 3.75, 0.99291], [0.13333, 3.75, 0.99267], [0.16667, 3.75, 0.99249], [0.2, 3.75, 0.99188], [0.23333, 3.75, 0.91661], [0.26667, 3.75, 0.09012], [0.3, 3.33333, 0.02647], [0.33333, 3, 0.01361], [0.36667, 2.72727, 0.00928], [0.4, 2.5, 0.00399], [0.43333, 2.30769, 0.00379], [0.56667, 1.76471, 0.00327], [1, 1, null]], "max_phi": [1, 0.09012], "negative_cdf": [[0, 0, 0.99302], [0.06667, 0, 0.99298], [0.1, 0, 0.99291], [0.13333, 0, 0.99267], [0.16667, 0, 0.99249], [0.2, 0, 0.99188], [0.23333, 0, 0.91661], [0.26667, 0, 0.09012], [0.3, 0.04545, 0.02647], [0.33333, 0.09091, 0.01361], [0.36667, 0.13636, 0.00928], [0.4, 0.18182, 0.00399], [0.43333, 0.22727, 0.00379], [0.56667, 0.40909, 0.00327], [1, 1, null]], "per_threshold_confusion_matrices": [[[8, 22, 0, 0], null], [[8, 9, 13, 0], 0.00327], [[8, 5, 17, 0], 0.00379], [[8, 4, 18, 0], 0.00399], [[8, 3, 19, 0], 0.00928], [[8, 2, 20, 0], 0.01361], [[8, 1, 21, 0], 0.02647], [[8, 0, 22, 0], 0.09012], [[7, 0, 22, 1], 0.91661], [[6, 0, 22, 2], 0.99188], [[5, 0, 22, 3], 0.99249], [[4, 0, 22, 4], 0.99267], [[3, 0, 22, 5], 0.99291], [[2, 0, 22, 6], 0.99298], [[0, 0, 22, 8], 0.99302]], "phi_coefficient": 1, "pr_curve": [[0, 1, 0.99302], [0.25, 1, 0.99298], [0.375, 1, 0.99291], [0.5, 1, 0.99267], [0.625, 1, 0.99249], [0.75, 1, 0.99188], [0.875, 1, 0.91661], [1, 1, 0.09012], [1, 0.88889, 0.02647], [1, 0.8, 0.01361], [1, 0.72727, 0.00928], [1, 0.66667, 0.00399], [1, 0.61538, 0.00379], [1, 0.47059, 0.00327], [1, 0.26667, null]], "precision": 1, "present_in_test_data": true, "recall": 1, "roc_curve": [[0, 0, 0.99302], [0, 0.25, 0.99298], [0, 0.375, 0.99291], [0, 0.5, 0.99267], [0, 0.625, 0.99249], [0, 0.75, 0.99188], [0, 0.875, 0.91661], [0, 1, 0.09012], [0.04545, 1, 0.02647], [0.09091, 1, 0.01361], [0.13636, 1, 0.00928], [0.18182, 1, 0.00399], [0.22727, 1, 0.00379], [0.40909, 1, 0.00327], [1, 1, null]], "spearmans_rho": 0.80049}, {"accuracy": 1, "area_under_pr_curve": 1, "area_under_roc_curve": 1, "balanced_accuracy": 1, "class_name": "Iris-virginica", "f_measure": 1, "gain_curve": [[0, 0, 0.99242], [0.13333, 0.44444, 0.99222], [0.16667, 0.55556, 0.98693], [0.2, 0.66667, 0.9826], [0.23333, 0.77778, 0.96974], [0.26667, 0.88889, 0.86822], [0.3, 1, 0.07992], [0.33333, 1, 0.00402], [0.36667, 1, 0.00386], [0.4, 1, 0.00357], [0.43333, 1, 0.00354], [0.46667, 1, 0.00351], [0.5, 1, 0.00349], [0.56667, 1, 0.00327], [1, 1, null]], "kendalls_tau_b": 0.73485, "ks_statistic": [1, 0.07992], "lift_curve": [[0, 0, 0.99242], [0.13333, 3.33333, 0.99222], [0.16667, 3.33333, 0.98693], [0.2, 3.33333, 0.9826], [0.23333, 3.33333, 0.96974], [0.26667, 3.33333, 0.86822], [0.3, 3.33333, 0.07992], [0.33333, 3, 0.00402], [0.36667, 2.72727, 0.00386], [0.4, 2.5, 0.00357], [0.43333, 2.30769, 0.00354], [0.46667, 2.14286, 0.00351], [0.5, 2, 0.00349], [0.56667, 1.76471, 0.00327], [1, 1, null]], "max_phi": [1, 0.07992], "negative_cdf": [[0, 0, 0.99242], [0.13333, 0, 0.99222], [0.16667, 0, 0.98693], [0.2, 0, 0.9826], [0.23333, 0, 0.96974], [0.26667, 0, 0.86822], [0.3, 0, 0.07992], [0.33333, 0.04762, 0.00402], [0.36667, 0.09524, 0.00386], [0.4, 0.14286, 0.00357], [0.43333, 0.19048, 0.00354], [0.46667, 0.2381, 0.00351], [0.5, 0.28571, 0.00349], [0.56667, 0.38095, 0.00327], [1, 1, null]], "per_threshold_confusion_matrices": [[[9, 21, 0, 0], null], [[9, 8, 13, 0], 0.00327], [[9, 6, 15, 0], 0.00349], [[9, 5, 16, 0], 0.00351], [[9, 4, 17, 0], 0.00354], [[9, 3, 18, 0], 0.00357], [[9, 2, 19, 0], 0.00386], [[9, 1, 20, 0], 0.00402], [[9, 0, 21, 0], 0.07992], [[8, 0, 21, 1], 0.86822], [[7, 0, 21, 2], 0.96974], [[6, 0, 21, 3], 0.9826], [[5, 0, 21, 4], 0.98693], [[4, 0, 21, 5], 0.99222], [[0, 0, 21, 9], 0.99242]], "phi_coefficient": 1, "pr_curve": [[0, 1, 0.99242], [0.44444, 1, 0.99222], [0.55556, 1, 0.98693], [0.66667, 1, 0.9826], [0.77778, 1, 0.96974], [0.88889, 1, 0.86822], [1, 1, 0.07992], [1, 0.9, 0.00402], [1, 0.81818, 0.00386], [1, 0.75, 0.00357], [1, 0.69231, 0.00354], [1, 0.64286, 0.00351], [1, 0.6, 0.00349], [1, 0.52941, 0.00327], [1, 0.3, null]], "precision": 1, "present_in_test_data": true, "recall": 1, "roc_curve": [[0, 0, 0.99242], [0, 0.44444, 0.99222], [0, 0.55556, 0.98693], [0, 0.66667, 0.9826], [0, 0.77778, 0.96974], [0, 0.88889, 0.86822], [0, 1, 0.07992], [0.04762, 1, 0.00402], [0.09524, 1, 0.00386], [0.14286, 1, 0.00357], [0.19048, 1, 0.00354], [0.2381, 1, 0.00351], [0.28571, 1, 0.00349], [0.38095, 1, 0.00327], [1, 1, null]], "spearmans_rho": 0.82952}]}, "random": {"accuracy": 0.4, "average_area_under_pr_curve": 0, "average_area_under_roc_curve": 0, "average_balanced_accuracy": 0.5536, "average_f_measure": 0.39966, "average_kendalls_tau_b": 0, "average_ks_statistic": 0, "average_max_phi": 0, "average_phi": 0.11235, "average_precision": 0.41667, "average_recall": 0.40135, "average_spearmans_rho": 0, "confusion_matrix": [[5, 6, 2], [2, 3, 3], [2, 3, 4]], "per_class_statistics": [{"accuracy": 0.6, "balanced_accuracy": 0.57466, "class_name": "Iris-setosa", "f_measure": 0.45455, "phi_coefficient": 0.16147, "precision": 0.55556, "present_in_test_data": true, "recall": 0.38462}, {"accuracy": 0.53333, "balanced_accuracy": 0.48295, "class_name": "Iris-versicolor", "f_measure": 0.3, "phi_coefficient": -0.03077, "precision": 0.25, "present_in_test_data": true, "recall": 0.375}, {"accuracy": 0.66667, "balanced_accuracy": 0.60317, "class_name": "Iris-virginica", "f_measure": 0.44444, "phi_coefficient": 0.20635, "precision": 0.44444, "present_in_test_data": true, "recall": 0.44444}]}}
+{"class_names": ["Iris-setosa", "Iris-versicolor", "Iris-virginica"], "mode": {"accuracy": 0.43333, "average_area_under_pr_curve": 0, "average_area_under_roc_curve": 0, "average_balanced_accuracy": 0.5, "average_f_measure": 0.20155, "average_kendalls_tau_b": 0, "average_ks_statistic": 0, "average_max_phi": 0, "average_phi": 0, "average_precision": 0.14444, "average_recall": 0.33333, "average_spearmans_rho": 0, "confusion_matrix": [[13, 0, 0], [8, 0, 0], [9, 0, 0]], "per_class_statistics": [{"accuracy": 0.43333, "balanced_accuracy": 0.5, "class_name": "Iris-setosa", "f_measure": 0.60465, "phi_coefficient": 0, "precision": 0.43333, "present_in_test_data": true, "recall": 1}, {"accuracy": 0.73333, "balanced_accuracy": 0.5, "class_name": "Iris-versicolor", "f_measure": 0, "phi_coefficient": 0, "precision": 0, "present_in_test_data": true, "recall": 0}, {"accuracy": 0.7, "balanced_accuracy": 0.5, "class_name": "Iris-virginica", "f_measure": 0, "phi_coefficient": 0, "precision": 0, "present_in_test_data": true, "recall": 0}]}, "model": {"accuracy": 1, "average_area_under_pr_curve": 1, "average_area_under_roc_curve": 1, "average_balanced_accuracy": 1, "average_f_measure": 1, "average_kendalls_tau_b": 0.71889, "average_ks_statistic": 1, "average_max_phi": 1, "average_phi": 1, "average_precision": 1, "average_recall": 1, "average_spearmans_rho": 0.82438, "confusion_matrix": [[13, 0, 0], [0, 8, 0], [0, 0, 9]], "per_class_statistics": [{"accuracy": 1, "area_under_pr_curve": 1, "area_under_roc_curve": 1, "balanced_accuracy": 1, "class_name": "Iris-setosa", "f_measure": 1, "gain_curve": [[0, 0, 0.99346], [0.13333, 0.30769, 0.99346], [0.33333, 0.76923, 0.99345], [0.43333, 1, 0.04167], [0.46667, 1, 0.00432], [0.5, 1, 0.00379], [0.76667, 1, 0.00356], [0.8, 1, 0.0035], [0.83333, 1, 0.00349], [0.86667, 1, 0.00348], [0.96667, 1, 0.00347], [1, 1, null]], "kendalls_tau_b": 0.76261, "ks_statistic": [1, 0.04167], "lift_curve": [[0, 0, 0.99346], [0.13333, 2.30769, 0.99346], [0.33333, 2.30769, 0.99345], [0.43333, 2.30769, 0.04167], [0.46667, 2.14286, 0.00432], [0.5, 2, 0.00379], [0.76667, 1.30435, 0.00356], [0.8, 1.25, 0.0035], [0.83333, 1.2, 0.00349], [0.86667, 1.15385, 0.00348], [0.96667, 1.03448, 0.00347], [1, 1, null]], "max_phi": [1, 0.04167], "negative_cdf": [[0, 0, 0.99346], [0.13333, 0, 0.99346], [0.33333, 0, 0.99345], [0.43333, 0, 0.04167], [0.46667, 0.05882, 0.00432], [0.5, 0.11765, 0.00379], [0.76667, 0.58824, 0.00356], [0.8, 0.64706, 0.0035], [0.83333, 0.70588, 0.00349], [0.86667, 0.76471, 0.00348], [0.96667, 0.94118, 0.00347], [1, 1, null]], "per_threshold_confusion_matrices": [[[13, 17, 0, 0], null], [[13, 16, 1, 0], 0.00347], [[13, 13, 4, 0], 0.00348], [[13, 12, 5, 0], 0.00349], [[13, 11, 6, 0], 0.0035], [[13, 10, 7, 0], 0.00356], [[13, 2, 15, 0], 0.00379], [[13, 1, 16, 0], 0.00432], [[13, 0, 17, 0], 0.04167], [[10, 0, 17, 3], 0.99345], [[4, 0, 17, 9], 0.99346], [[0, 0, 17, 13], 0.99346]], "phi_coefficient": 1, "pr_curve": [[0, 1, 0.99346], [0.30769, 1, 0.99346], [0.76923, 1, 0.99345], [1, 1, 0.04167], [1, 0.92857, 0.00432], [1, 0.86667, 0.00379], [1, 0.56522, 0.00356], [1, 0.54167, 0.0035], [1, 0.52, 0.00349], [1, 0.5, 0.00348], [1, 0.44828, 0.00347], [1, 0.43333, null]], "precision": 1, "present_in_test_data": true, "recall": 1, "roc_curve": [[0, 0, 0.99346], [0, 0.30769, 0.99346], [0, 0.76923, 0.99345], [0, 1, 0.04167], [0.05882, 1, 0.00432], [0.11765, 1, 0.00379], [0.58824, 1, 0.00356], [0.64706, 1, 0.0035], [0.70588, 1, 0.00349], [0.76471, 1, 0.00348], [0.94118, 1, 0.00347], [1, 1, null]], "spearmans_rho": 0.87216}, {"accuracy": 1, "area_under_pr_curve": 1, "area_under_roc_curve": 1, "balanced_accuracy": 1, "class_name": "Iris-versicolor", "f_measure": 1, "gain_curve": [[0, 0, 0.99305], [0.03333, 0.125, 0.99303], [0.06667, 0.25, 0.99295], [0.1, 0.375, 0.9928], [0.13333, 0.5, 0.99244], [0.16667, 0.625, 0.99207], [0.2, 0.75, 0.9915], [0.23333, 0.875, 0.94401], [0.26667, 1, 0.06915], [0.3, 1, 0.01385], [0.33333, 1, 0.01097], [0.36667, 1, 0.00617], [0.4, 1, 0.00413], [0.43333, 1, 0.00379], [0.56667, 1, 0.00328], [0.66667, 1, 0.00328], [0.86667, 1, 0.00327], [1, 1, null]], "kendalls_tau_b": 0.65922, "ks_statistic": [1, 0.06915], "lift_curve": [[0, 0, 0.99305], [0.03333, 3.75, 0.99303], [0.06667, 3.75, 0.99295], [0.1, 3.75, 0.9928], [0.13333, 3.75, 0.99244], [0.16667, 3.75, 0.99207], [0.2, 3.75, 0.9915], [0.23333, 3.75, 0.94401], [0.26667, 3.75, 0.06915], [0.3, 3.33333, 0.01385], [0.33333, 3, 0.01097], [0.36667, 2.72727, 0.00617], [0.4, 2.5, 0.00413], [0.43333, 2.30769, 0.00379], [0.56667, 1.76471, 0.00328], [0.66667, 1.5, 0.00328], [0.86667, 1.15385, 0.00327], [1, 1, null]], "max_phi": [1, 0.06915], "negative_cdf": [[0, 0, 0.99305], [0.03333, 0, 0.99303], [0.06667, 0, 0.99295], [0.1, 0, 0.9928], [0.13333, 0, 0.99244], [0.16667, 0, 0.99207], [0.2, 0, 0.9915], [0.23333, 0, 0.94401], [0.26667, 0, 0.06915], [0.3, 0.04545, 0.01385], [0.33333, 0.09091, 0.01097], [0.36667, 0.13636, 0.00617], [0.4, 0.18182, 0.00413], [0.43333, 0.22727, 0.00379], [0.56667, 0.40909, 0.00328], [0.66667, 0.54545, 0.00328], [0.86667, 0.81818, 0.00327], [1, 1, null]], "per_threshold_confusion_matrices": [[[8, 22, 0, 0], null], [[8, 18, 4, 0], 0.00327], [[8, 12, 10, 0], 0.00328], [[8, 9, 13, 0], 0.00328], [[8, 5, 17, 0], 0.00379], [[8, 4, 18, 0], 0.00413], [[8, 3, 19, 0], 0.00617], [[8, 2, 20, 0], 0.01097], [[8, 1, 21, 0], 0.01385], [[8, 0, 22, 0], 0.06915], [[7, 0, 22, 1], 0.94401], [[6, 0, 22, 2], 0.9915], [[5, 0, 22, 3], 0.99207], [[4, 0, 22, 4], 0.99244], [[3, 0, 22, 5], 0.9928], [[2, 0, 22, 6], 0.99295], [[1, 0, 22, 7], 0.99303], [[0, 0, 22, 8], 0.99305]], "phi_coefficient": 1, "pr_curve": [[0, 1, 0.99305], [0.125, 1, 0.99303], [0.25, 1, 0.99295], [0.375, 1, 0.9928], [0.5, 1, 0.99244], [0.625, 1, 0.99207], [0.75, 1, 0.9915], [0.875, 1, 0.94401], [1, 1, 0.06915], [1, 0.88889, 0.01385], [1, 0.8, 0.01097], [1, 0.72727, 0.00617], [1, 0.66667, 0.00413], [1, 0.61538, 0.00379], [1, 0.47059, 0.00328], [1, 0.4, 0.00328], [1, 0.30769, 0.00327], [1, 0.26667, null]], "precision": 1, "present_in_test_data": true, "recall": 1, "roc_curve": [[0, 0, 0.99305], [0, 0.125, 0.99303], [0, 0.25, 0.99295], [0, 0.375, 0.9928], [0, 0.5, 0.99244], [0, 0.625, 0.99207], [0, 0.75, 0.9915], [0, 0.875, 0.94401], [0, 1, 0.06915], [0.04545, 1, 0.01385], [0.09091, 1, 0.01097], [0.13636, 1, 0.00617], [0.18182, 1, 0.00413], [0.22727, 1, 0.00379], [0.40909, 1, 0.00328], [0.54545, 1, 0.00328], [0.81818, 1, 0.00327], [1, 1, null]], "spearmans_rho": 0.77145}, {"accuracy": 1, "area_under_pr_curve": 1, "area_under_roc_curve": 1, "balanced_accuracy": 1, "class_name": "Iris-virginica", "f_measure": 1, "gain_curve": [[0, 0, 0.99242], [0.13333, 0.44444, 0.99208], [0.16667, 0.55556, 0.99004], [0.2, 0.66667, 0.98524], [0.23333, 0.77778, 0.98236], [0.26667, 0.88889, 0.88919], [0.3, 1, 0.05252], [0.33333, 1, 0.00502], [0.36667, 1, 0.00409], [0.4, 1, 0.0037], [0.43333, 1, 0.00361], [0.46667, 1, 0.0035], [0.5, 1, 0.00348], [0.56667, 1, 0.00327], [1, 1, null]], "kendalls_tau_b": 0.73485, "ks_statistic": [1, 0.05252], "lift_curve": [[0, 0, 0.99242], [0.13333, 3.33333, 0.99208], [0.16667, 3.33333, 0.99004], [0.2, 3.33333, 0.98524], [0.23333, 3.33333, 0.98236], [0.26667, 3.33333, 0.88919], [0.3, 3.33333, 0.05252], [0.33333, 3, 0.00502], [0.36667, 2.72727, 0.00409], [0.4, 2.5, 0.0037], [0.43333, 2.30769, 0.00361], [0.46667, 2.14286, 0.0035], [0.5, 2, 0.00348], [0.56667, 1.76471, 0.00327], [1, 1, null]], "max_phi": [1, 0.05252], "negative_cdf": [[0, 0, 0.99242], [0.13333, 0, 0.99208], [0.16667, 0, 0.99004], [0.2, 0, 0.98524], [0.23333, 0, 0.98236], [0.26667, 0, 0.88919], [0.3, 0, 0.05252], [0.33333, 0.04762, 0.00502], [0.36667, 0.09524, 0.00409], [0.4, 0.14286, 0.0037], [0.43333, 0.19048, 0.00361], [0.46667, 0.2381, 0.0035], [0.5, 0.28571, 0.00348], [0.56667, 0.38095, 0.00327], [1, 1, null]], "per_threshold_confusion_matrices": [[[9, 21, 0, 0], null], [[9, 8, 13, 0], 0.00327], [[9, 6, 15, 0], 0.00348], [[9, 5, 16, 0], 0.0035], [[9, 4, 17, 0], 0.00361], [[9, 3, 18, 0], 0.0037], [[9, 2, 19, 0], 0.00409], [[9, 1, 20, 0], 0.00502], [[9, 0, 21, 0], 0.05252], [[8, 0, 21, 1], 0.88919], [[7, 0, 21, 2], 0.98236], [[6, 0, 21, 3], 0.98524], [[5, 0, 21, 4], 0.99004], [[4, 0, 21, 5], 0.99208], [[0, 0, 21, 9], 0.99242]], "phi_coefficient": 1, "pr_curve": [[0, 1, 0.99242], [0.44444, 1, 0.99208], [0.55556, 1, 0.99004], [0.66667, 1, 0.98524], [0.77778, 1, 0.98236], [0.88889, 1, 0.88919], [1, 1, 0.05252], [1, 0.9, 0.00502], [1, 0.81818, 0.00409], [1, 0.75, 0.0037], [1, 0.69231, 0.00361], [1, 0.64286, 0.0035], [1, 0.6, 0.00348], [1, 0.52941, 0.00327], [1, 0.3, null]], "precision": 1, "present_in_test_data": true, "recall": 1, "roc_curve": [[0, 0, 0.99242], [0, 0.44444, 0.99208], [0, 0.55556, 0.99004], [0, 0.66667, 0.98524], [0, 0.77778, 0.98236], [0, 0.88889, 0.88919], [0, 1, 0.05252], [0.04762, 1, 0.00502], [0.09524, 1, 0.00409], [0.14286, 1, 0.0037], [0.19048, 1, 0.00361], [0.2381, 1, 0.0035], [0.28571, 1, 0.00348], [0.38095, 1, 0.00327], [1, 1, null]], "spearmans_rho": 0.82952}]}, "random": {"accuracy": 0.26667, "average_area_under_pr_curve": 0, "average_area_under_roc_curve": 0, "average_balanced_accuracy": 0.43104, "average_f_measure": 0.23018, "average_kendalls_tau_b": 0, "average_ks_statistic": 0, "average_max_phi": 0, "average_phi": -0.13603, "average_precision": 0.23333, "average_recall": 0.22792, "average_spearmans_rho": 0, "confusion_matrix": [[6, 2, 5], [5, 0, 3], [1, 6, 2]], "per_class_statistics": [{"accuracy": 0.56667, "balanced_accuracy": 0.5543, "class_name": "Iris-setosa", "f_measure": 0.48, "phi_coefficient": 0.10985, "precision": 0.5, "present_in_test_data": true, "recall": 0.46154}, {"accuracy": 0.46667, "balanced_accuracy": 0.31818, "class_name": "Iris-versicolor", "f_measure": 0, "phi_coefficient": -0.36364, "precision": 0, "present_in_test_data": true, "recall": 0}, {"accuracy": 0.5, "balanced_accuracy": 0.42063, "class_name": "Iris-virginica", "f_measure": 0.21053, "phi_coefficient": -0.1543, "precision": 0.2, "present_in_test_data": true, "recall": 0.22222}]}}
\ No newline at end of file
diff --git a/check_files/predictions_iris_fs.csv b/check_files/predictions_iris_fs.csv
index 796ac2bf..d1c04868 100644
--- a/check_files/predictions_iris_fs.csv
+++ b/check_files/predictions_iris_fs.csv
@@ -1,30 +1,30 @@
-Iris-versicolor,0.6641
+Iris-versicolor,0.43695
Iris-setosa,0.99346
-Iris-setosa,0.66653
+Iris-setosa,0.66624
Iris-setosa,0.99346
+Iris-setosa,0.99345
Iris-setosa,0.99346
+Iris-setosa,0.99345
Iris-setosa,0.99346
Iris-setosa,0.99346
Iris-setosa,0.99346
-Iris-setosa,0.99346
-Iris-setosa,0.99346
-Iris-versicolor,0.55547
-Iris-versicolor,0.99296
-Iris-versicolor,0.99124
-Iris-versicolor,0.99294
-Iris-versicolor,0.9891
-Iris-virginica,0.85968
-Iris-versicolor,0.99302
+Iris-versicolor,0.57099
+Iris-versicolor,0.99303
+Iris-versicolor,0.9922
Iris-versicolor,0.9929
+Iris-versicolor,0.97285
+Iris-virginica,0.86632
+Iris-versicolor,0.99304
+Iris-versicolor,0.99286
Iris-versicolor,0.99296
-Iris-versicolor,0.99246
+Iris-versicolor,0.99267
Iris-virginica,0.99242
-Iris-virginica,0.99238
+Iris-virginica,0.9924
Iris-virginica,0.99242
-Iris-virginica,0.99229
+Iris-virginica,0.99237
Iris-virginica,0.99242
Iris-virginica,0.99242
-Iris-virginica,0.82592
-Iris-versicolor,0.4819
-Iris-versicolor,0.51524
+Iris-virginica,0.83058
+Iris-virginica,0.59938
+Iris-virginica,0.54864
Iris-virginica,0.99242
diff --git a/check_files/predictions_iris_fs2.csv b/check_files/predictions_iris_fs2.csv
new file mode 100644
index 00000000..298b0d0e
--- /dev/null
+++ b/check_files/predictions_iris_fs2.csv
@@ -0,0 +1,30 @@
+Iris-versicolor,0.43695
+Iris-setosa,0.99346
+Iris-setosa,0.66624
+Iris-setosa,0.993455
+Iris-setosa,0.99345
+Iris-setosa,0.993455
+Iris-setosa,0.99345
+Iris-setosa,0.99346
+Iris-setosa,0.99346
+Iris-setosa,0.993455
+Iris-versicolor,0.57099
+Iris-versicolor,0.99303
+Iris-versicolor,0.9922
+Iris-versicolor,0.9929
+Iris-versicolor,0.97285
+Iris-virginica,0.86633
+Iris-versicolor,0.99304
+Iris-versicolor,0.99286
+Iris-versicolor,0.99296
+Iris-versicolor,0.99267
+Iris-virginica,0.99242
+Iris-virginica,0.9924
+Iris-virginica,0.99242
+Iris-virginica,0.99237
+Iris-virginica,0.99242
+Iris-virginica,0.99242
+Iris-virginica,0.83058
+Iris-virginica,0.59938
+Iris-virginica,0.54864
+Iris-virginica,0.99242
diff --git a/check_files/predictions_iris_fs_prob.csv b/check_files/predictions_iris_fs_prob.csv
index fade423e..494b8726 100644
--- a/check_files/predictions_iris_fs_prob.csv
+++ b/check_files/predictions_iris_fs_prob.csv
@@ -1,30 +1,30 @@
-4.1,2.4,,,Iris-versicolor,0.6641,0.16698,0.6641,0.16892
+4.1,2.4,,,Iris-versicolor,0.43695,0.16805,0.43695,0.39501
5.0,3.7,1.3,0.2,Iris-setosa,0.99346,0.99346,0.00327,0.00327
-4.5,,,0.2,Iris-setosa,0.66653,0.66653,0.1668,0.16667
-4.9,3.2,1.3,0.2,Iris-setosa,0.99346,0.99346,0.00327,0.00327
-5.0,3.5,1.6,0.6,Iris-setosa,0.99346,0.99346,0.00327,0.00327
-5.1,3.8,1.9,0.4,Iris-setosa,0.99346,0.99346,0.00327,0.00327
-4.8,3.0,1.4,0.2,Iris-setosa,0.99346,0.99346,0.00327,0.00327
+4.5,,,0.2,Iris-setosa,0.66624,0.66624,0.16709,0.16667
+4.9,3.2,1.3,0.2,Iris-setosa,0.993455,0.993455,0.003275,0.00327
+5.0,3.5,1.6,0.6,Iris-setosa,0.99345,0.99345,0.00328,0.00327
+5.1,3.8,1.9,0.4,Iris-setosa,0.993455,0.993455,0.003275,0.00327
+4.8,3.0,1.4,0.2,Iris-setosa,0.99345,0.99345,0.00328,0.00327
5.1,3.8,1.6,0.2,Iris-setosa,0.99346,0.99346,0.00327,0.00327
4.6,3.2,1.4,0.2,Iris-setosa,0.99346,0.99346,0.00327,0.00327
-5.3,3.7,1.5,0.2,Iris-setosa,0.99346,0.99346,0.00327,0.00327
-6.7,3.1,4.7,1.7,Iris-versicolor,0.55547,0.08334,0.55547,0.36119
-6.3,2.3,4.4,1.2,Iris-versicolor,0.99296,0.00347,0.99296,0.00357
-5.6,3.0,4.1,1.2,Iris-versicolor,0.99124,0.00528,0.99124,0.00349
-5.5,2.5,4.0,1.2,Iris-versicolor,0.99294,0.00348,0.99294,0.00359
-5.5,2.6,4.9,1.2,Iris-versicolor,0.9891,0.00347,0.9891,0.00743
-6.1,3.0,4.6,1.9,Iris-virginica,0.85968,0.05556,0.08477,0.85968
-5.8,2.6,4.0,1.2,Iris-versicolor,0.99302,0.00348,0.99302,0.00351
-5.0,2.3,3.3,1.0,Iris-versicolor,0.9929,0.00362,0.9929,0.00349
-5.6,2.7,4.2,1.2,Iris-versicolor,0.99296,0.00349,0.99296,0.00356
-5.7,3.0,4.2,1.2,Iris-versicolor,0.99246,0.00405,0.99246,0.00349
+5.3,3.7,1.5,0.2,Iris-setosa,0.993455,0.993455,0.003275,0.00327
+6.7,3.1,4.7,1.7,Iris-versicolor,0.57099,0.08333,0.57099,0.34568
+6.3,2.3,4.4,1.2,Iris-versicolor,0.99303,0.00347,0.99303,0.0035
+5.6,3.0,4.1,1.2,Iris-versicolor,0.9922,0.00433,0.9922,0.00347
+5.5,2.5,4.0,1.2,Iris-versicolor,0.9929,0.00348,0.9929,0.00362
+5.5,2.6,4.9,1.2,Iris-versicolor,0.97285,0.00348,0.97285,0.02367
+6.1,3.0,4.6,1.9,Iris-virginica,0.86633,0.05556,0.07812,0.86633
+5.8,2.6,4.0,1.2,Iris-versicolor,0.99304,0.00348,0.99304,0.00349
+5.0,2.3,3.3,1.0,Iris-versicolor,0.99286,0.00356,0.99286,0.00358
+5.6,2.7,4.2,1.2,Iris-versicolor,0.99296,0.0035,0.99296,0.00355
+5.7,3.0,4.2,1.2,Iris-versicolor,0.99267,0.00386,0.99267,0.00348
6.3,3.3,6.0,2.7,Iris-virginica,0.99242,0.00379,0.00379,0.99242
-5.1,2.7,5.1,1.9,Iris-virginica,0.99238,0.00379,0.00383,0.99238
+5.1,2.7,5.1,1.9,Iris-virginica,0.9924,0.00379,0.00381,0.9924
7.1,3.0,5.9,2.1,Iris-virginica,0.99242,0.00379,0.00379,0.99242
-6.3,2.9,5.6,1.8,Iris-virginica,0.99229,0.00379,0.00392,0.99229
+6.3,2.9,5.6,1.8,Iris-virginica,0.99237,0.00379,0.00384,0.99237
6.5,3.0,5.8,2.2,Iris-virginica,0.99242,0.00379,0.00379,0.99242
7.6,3.0,6.6,2.1,Iris-virginica,0.99242,0.00379,0.00379,0.99242
-4.9,2.7,4.7,1.7,Iris-virginica,0.82592,0.08333,0.09075,0.82592
-7.3,2.9,6.3,1.1,Iris-versicolor,0.4819,0.04167,0.4819,0.47644
-6.7,2.5,5.8,1.1,Iris-versicolor,0.51524,0.04167,0.51524,0.4431
+4.9,2.7,4.7,1.7,Iris-virginica,0.83058,0.08333,0.08609,0.83058
+7.3,2.9,6.3,1.1,Iris-virginica,0.59938,0.04167,0.35896,0.59938
+6.7,2.5,5.8,1.1,Iris-virginica,0.54864,0.04167,0.4097,0.54864
7.2,3.6,6.1,2.5,Iris-virginica,0.99242,0.00379,0.00379,0.99242
diff --git a/data/images/VOC_annotations/000004.xml b/data/images/VOC_annotations/000004.xml
index ee0047f7..5bf30011 100644
--- a/data/images/VOC_annotations/000004.xml
+++ b/data/images/VOC_annotations/000004.xml
@@ -1,6 +1,7 @@
<annotation>
<folder>f1</folder>
<filename>fruits1b.png</filename>
+ <path>C:\fruits1b.png</path>
<source>
<database>The VOC2007 Database</database>
<annotation>PASCAL VOC2007</annotation>
| Changing to ntpath support both Windows and Unix style paths in the XML
| I wonder if using pathlib would make more sense | 2024-05-31T21:37:18 | 0.0 | [] | [] |
||
cloudcomponents/cdk-constructs | cloudcomponents__cdk-constructs-144 | dfbf1fc66bac0a9f6658a70eccdfb68f2b096140 | diff --git a/packages/cdk-codepipeline-slack/src/slack-approval-action.ts b/packages/cdk-codepipeline-slack/src/slack-approval-action.ts
index d07d513b1..8a7be83cf 100644
--- a/packages/cdk-codepipeline-slack/src/slack-approval-action.ts
+++ b/packages/cdk-codepipeline-slack/src/slack-approval-action.ts
@@ -63,7 +63,7 @@ export class SlackApprovalAction extends Action {
}
const approvalRequester = new Function(scope, 'SlackApprovalRequesterFunction', {
- runtime: Runtime.NODEJS_10_X,
+ runtime: Runtime.NODEJS_14_X,
handler: 'index.handler',
code: Code.fromAsset(path.join(__dirname, 'lambdas', 'approval-requester')),
environment,
@@ -74,7 +74,7 @@ export class SlackApprovalAction extends Action {
topic.addSubscription(new LambdaSubscription(approvalRequester));
const approvalHandler = new Function(scope, 'SlackApprovalHandlerFunction', {
- runtime: Runtime.NODEJS_10_X,
+ runtime: Runtime.NODEJS_14_X,
handler: 'index.handler',
code: Code.fromAsset(path.join(__dirname, 'lambdas', 'approval-handler')),
environment,
diff --git a/packages/cdk-codepipeline-slack/src/slack-notifier.ts b/packages/cdk-codepipeline-slack/src/slack-notifier.ts
index 395558f6b..3cc39e8b2 100644
--- a/packages/cdk-codepipeline-slack/src/slack-notifier.ts
+++ b/packages/cdk-codepipeline-slack/src/slack-notifier.ts
@@ -46,7 +46,7 @@ export class SlackNotifier extends Construct {
}
const notifier = new Function(scope, 'SlackNotifierFunction', {
- runtime: Runtime.NODEJS_12_X,
+ runtime: Runtime.NODEJS_14_X,
handler: 'index.handler',
code: Code.fromAsset(path.join(__dirname, 'lambdas', 'notifier')),
environment: this.environment,
diff --git a/packages/cdk-contentful-webhook/lambda-file-sizes.json b/packages/cdk-contentful-webhook/lambda-file-sizes.json
index 2c273fcca..4db5f10f2 100644
--- a/packages/cdk-contentful-webhook/lambda-file-sizes.json
+++ b/packages/cdk-contentful-webhook/lambda-file-sizes.json
@@ -1,1 +1,1 @@
-[{"timestamp":1637517394655,"files":[{"filename":"contentful-webhook/index.js","previous":225899,"size":225902,"diff":3}]},{"timestamp":1633809671663,"files":[{"filename":"contentful-webhook/index.js","previous":231384,"size":225899,"diff":-5485}]},{"timestamp":1633807456780,"files":[{"filename":"contentful-webhook/index.js","previous":231381,"size":231384,"diff":3}]},{"timestamp":1620565790484,"files":[{"filename":"contentful-webhook/index.js","previous":231292,"size":231381,"diff":89}]},{"timestamp":1620265629961,"files":[{"filename":"contentful-webhook/index.js","previous":231381,"size":231292,"diff":-89}]},{"timestamp":1615923664936,"files":[{"filename":"contentful-webhook/index.js","previous":1050356,"size":231381,"diff":-818975}]},{"timestamp":1615923376037,"files":[{"filename":"contentful-webhook/index.js","previous":1047464,"size":1050356,"diff":2892}]},{"timestamp":1615809010241,"files":[{"filename":"contentful-webhook/index.js","previous":221347,"size":1047464,"diff":826117}]},{"timestamp":1615022511778,"files":[{"filename":"contentful-webhook/index.js","previous":221344,"size":221347,"diff":3}]},{"timestamp":1609970894222,"files":[{"filename":"contentful-webhook/index.js","previous":217379,"size":221344,"diff":3965}]},{"timestamp":1609970375754,"files":[{"filename":"contentful-webhook/index.js","previous":203415,"size":217379,"diff":13964}]},{"timestamp":1607360020128,"files":[{"filename":"contentful-webhook/index.js","previous":203418,"size":203415,"diff":-3}]},{"timestamp":1600966253111,"files":[{"filename":"contentful-webhook/index.js","previous":203440,"size":203418,"diff":-22}]},{"timestamp":1599246565057,"files":[{"filename":"contentful-webhook/index.js","previous":202993,"size":203440,"diff":447}]},{"timestamp":1598552652514,"files":[{"filename":"contentful-webhook/index.js","previous":202992,"size":202993,"diff":1}]},{"timestamp":1596542526761,"files":[{"filename":"contentful-webhook/index.js","previous":207102,"size":202992,"diff":-4110}]},{"timestamp":1596540034638,"files":[{"filename":"contentful-webhook/index.js","previous":206593,"size":207102,"diff":509}]},{"timestamp":1596457456615,"files":[{"filename":"contentful-webhook/index.js","previous":206357,"size":206593,"diff":236}]},{"timestamp":1596440551356,"files":[{"filename":"contentful-webhook/index.js","previous":206424,"size":206357,"diff":-67}]},{"timestamp":1596437759972,"files":[{"filename":"contentful-webhook/index.js","previous":541950,"size":206424,"diff":-335526}]},{"timestamp":1596413723535,"files":[{"filename":"contentful-webhook/index.js","previous":542071,"size":541950,"diff":-121}]},{"timestamp":1596407623809,"files":[{"filename":"contentful-webhook/index.js","previous":191169,"size":542071,"diff":350902}]}]
+[{"timestamp":1639158550913,"files":[{"filename":"contentful-webhook/index.js","previous":225902,"size":225836,"diff":-66}]},{"timestamp":1637517394655,"files":[{"filename":"contentful-webhook/index.js","previous":225899,"size":225902,"diff":3}]},{"timestamp":1633809671663,"files":[{"filename":"contentful-webhook/index.js","previous":231384,"size":225899,"diff":-5485}]},{"timestamp":1633807456780,"files":[{"filename":"contentful-webhook/index.js","previous":231381,"size":231384,"diff":3}]},{"timestamp":1620565790484,"files":[{"filename":"contentful-webhook/index.js","previous":231292,"size":231381,"diff":89}]},{"timestamp":1620265629961,"files":[{"filename":"contentful-webhook/index.js","previous":231381,"size":231292,"diff":-89}]},{"timestamp":1615923664936,"files":[{"filename":"contentful-webhook/index.js","previous":1050356,"size":231381,"diff":-818975}]},{"timestamp":1615923376037,"files":[{"filename":"contentful-webhook/index.js","previous":1047464,"size":1050356,"diff":2892}]},{"timestamp":1615809010241,"files":[{"filename":"contentful-webhook/index.js","previous":221347,"size":1047464,"diff":826117}]},{"timestamp":1615022511778,"files":[{"filename":"contentful-webhook/index.js","previous":221344,"size":221347,"diff":3}]},{"timestamp":1609970894222,"files":[{"filename":"contentful-webhook/index.js","previous":217379,"size":221344,"diff":3965}]},{"timestamp":1609970375754,"files":[{"filename":"contentful-webhook/index.js","previous":203415,"size":217379,"diff":13964}]},{"timestamp":1607360020128,"files":[{"filename":"contentful-webhook/index.js","previous":203418,"size":203415,"diff":-3}]},{"timestamp":1600966253111,"files":[{"filename":"contentful-webhook/index.js","previous":203440,"size":203418,"diff":-22}]},{"timestamp":1599246565057,"files":[{"filename":"contentful-webhook/index.js","previous":202993,"size":203440,"diff":447}]},{"timestamp":1598552652514,"files":[{"filename":"contentful-webhook/index.js","previous":202992,"size":202993,"diff":1}]},{"timestamp":1596542526761,"files":[{"filename":"contentful-webhook/index.js","previous":207102,"size":202992,"diff":-4110}]},{"timestamp":1596540034638,"files":[{"filename":"contentful-webhook/index.js","previous":206593,"size":207102,"diff":509}]},{"timestamp":1596457456615,"files":[{"filename":"contentful-webhook/index.js","previous":206357,"size":206593,"diff":236}]},{"timestamp":1596440551356,"files":[{"filename":"contentful-webhook/index.js","previous":206424,"size":206357,"diff":-67}]},{"timestamp":1596437759972,"files":[{"filename":"contentful-webhook/index.js","previous":541950,"size":206424,"diff":-335526}]},{"timestamp":1596413723535,"files":[{"filename":"contentful-webhook/index.js","previous":542071,"size":541950,"diff":-121}]},{"timestamp":1596407623809,"files":[{"filename":"contentful-webhook/index.js","previous":191169,"size":542071,"diff":350902}]}]
diff --git a/packages/cdk-github-webhook/lambda-file-sizes.json b/packages/cdk-github-webhook/lambda-file-sizes.json
index a167385c8..e221d8744 100644
--- a/packages/cdk-github-webhook/lambda-file-sizes.json
+++ b/packages/cdk-github-webhook/lambda-file-sizes.json
@@ -1,1 +1,1 @@
-[{"timestamp":1637517397296,"files":[{"filename":"github-webhook/index.js","previous":262039,"size":262040,"diff":1}]},{"timestamp":1633809689104,"files":[{"filename":"github-webhook/index.js","previous":193368,"size":262039,"diff":68671}]},{"timestamp":1633809137901,"files":[{"filename":"github-webhook/index.js","previous":191869,"size":193368,"diff":1499}]},{"timestamp":1622147586657,"files":[{"filename":"github-webhook/index.js","previous":191792,"size":191869,"diff":77}]},{"timestamp":1620565795835,"files":[{"filename":"github-webhook/index.js","previous":191427,"size":191792,"diff":365}]},{"timestamp":1620265643898,"files":[{"filename":"github-webhook/index.js","previous":191792,"size":191427,"diff":-365}]},{"timestamp":1619038419269,"files":[{"filename":"github-webhook/index.js","previous":191797,"size":191792,"diff":-5}]},{"timestamp":1617394285426,"files":[{"filename":"github-webhook/index.js","previous":191760,"size":191797,"diff":37}]},{"timestamp":1616697789259,"files":[{"filename":"github-webhook/index.js","previous":191665,"size":191760,"diff":95}]},{"timestamp":1615923673723,"files":[{"filename":"github-webhook/index.js","previous":1011459,"size":191665,"diff":-819794}]},{"timestamp":1615923385141,"files":[{"filename":"github-webhook/index.js","previous":1008634,"size":1011459,"diff":2825}]},{"timestamp":1615809019316,"files":[{"filename":"github-webhook/index.js","previous":182432,"size":1008634,"diff":826202}]},{"timestamp":1615023549795,"files":[{"filename":"github-webhook/index.js","previous":182255,"size":182432,"diff":177}]},{"timestamp":1615022566740,"files":[{"filename":"github-webhook/index.js","previous":181954,"size":182255,"diff":301}]},{"timestamp":1614104217580,"files":[{"filename":"github-webhook/index.js","previous":181904,"size":181954,"diff":50}]},{"timestamp":1612464188131,"files":[{"filename":"github-webhook/index.js","previous":181855,"size":181904,"diff":49}]},{"timestamp":1611769329263,"files":[{"filename":"github-webhook/index.js","previous":181805,"size":181855,"diff":50}]},{"timestamp":1609970949480,"files":[{"filename":"github-webhook/index.js","previous":179020,"size":181805,"diff":2785}]},{"timestamp":1607360063925,"files":[{"filename":"github-webhook/index.js","previous":178533,"size":179020,"diff":487}]},{"timestamp":1606896280100,"files":[{"filename":"github-webhook/index.js","previous":177980,"size":178533,"diff":553}]},{"timestamp":1606329557431,"files":[{"filename":"github-webhook/index.js","previous":177960,"size":177980,"diff":20}]},{"timestamp":1604259061964,"files":[{"filename":"github-webhook/index.js","previous":177961,"size":177960,"diff":-1}]},{"timestamp":1600966304194,"files":[{"filename":"github-webhook/index.js","previous":177682,"size":177961,"diff":279}]},{"timestamp":1599246609676,"files":[{"filename":"github-webhook/index.js","previous":177721,"size":177682,"diff":-39}]},{"timestamp":1596542584192,"files":[{"filename":"github-webhook/index.js","previous":181657,"size":177721,"diff":-3936}]},{"timestamp":1596540416823,"files":[{"filename":"github-webhook/index.js","previous":181110,"size":181657,"diff":547}]},{"timestamp":1596457756702,"files":[{"filename":"github-webhook/index.js","previous":181095,"size":181110,"diff":15}]},{"timestamp":1596455025723,"files":[{"filename":"github-webhook/index.js","previous":473647,"size":181095,"diff":-292552}]},{"timestamp":1596407673941,"files":[{"filename":"github-webhook/index.js","previous":332785,"size":473647,"diff":140862}]}]
+[{"timestamp":1639158552522,"files":[{"filename":"github-webhook/index.js","previous":262040,"size":262001,"diff":-39}]},{"timestamp":1637517397296,"files":[{"filename":"github-webhook/index.js","previous":262039,"size":262040,"diff":1}]},{"timestamp":1633809689104,"files":[{"filename":"github-webhook/index.js","previous":193368,"size":262039,"diff":68671}]},{"timestamp":1633809137901,"files":[{"filename":"github-webhook/index.js","previous":191869,"size":193368,"diff":1499}]},{"timestamp":1622147586657,"files":[{"filename":"github-webhook/index.js","previous":191792,"size":191869,"diff":77}]},{"timestamp":1620565795835,"files":[{"filename":"github-webhook/index.js","previous":191427,"size":191792,"diff":365}]},{"timestamp":1620265643898,"files":[{"filename":"github-webhook/index.js","previous":191792,"size":191427,"diff":-365}]},{"timestamp":1619038419269,"files":[{"filename":"github-webhook/index.js","previous":191797,"size":191792,"diff":-5}]},{"timestamp":1617394285426,"files":[{"filename":"github-webhook/index.js","previous":191760,"size":191797,"diff":37}]},{"timestamp":1616697789259,"files":[{"filename":"github-webhook/index.js","previous":191665,"size":191760,"diff":95}]},{"timestamp":1615923673723,"files":[{"filename":"github-webhook/index.js","previous":1011459,"size":191665,"diff":-819794}]},{"timestamp":1615923385141,"files":[{"filename":"github-webhook/index.js","previous":1008634,"size":1011459,"diff":2825}]},{"timestamp":1615809019316,"files":[{"filename":"github-webhook/index.js","previous":182432,"size":1008634,"diff":826202}]},{"timestamp":1615023549795,"files":[{"filename":"github-webhook/index.js","previous":182255,"size":182432,"diff":177}]},{"timestamp":1615022566740,"files":[{"filename":"github-webhook/index.js","previous":181954,"size":182255,"diff":301}]},{"timestamp":1614104217580,"files":[{"filename":"github-webhook/index.js","previous":181904,"size":181954,"diff":50}]},{"timestamp":1612464188131,"files":[{"filename":"github-webhook/index.js","previous":181855,"size":181904,"diff":49}]},{"timestamp":1611769329263,"files":[{"filename":"github-webhook/index.js","previous":181805,"size":181855,"diff":50}]},{"timestamp":1609970949480,"files":[{"filename":"github-webhook/index.js","previous":179020,"size":181805,"diff":2785}]},{"timestamp":1607360063925,"files":[{"filename":"github-webhook/index.js","previous":178533,"size":179020,"diff":487}]},{"timestamp":1606896280100,"files":[{"filename":"github-webhook/index.js","previous":177980,"size":178533,"diff":553}]},{"timestamp":1606329557431,"files":[{"filename":"github-webhook/index.js","previous":177960,"size":177980,"diff":20}]},{"timestamp":1604259061964,"files":[{"filename":"github-webhook/index.js","previous":177961,"size":177960,"diff":-1}]},{"timestamp":1600966304194,"files":[{"filename":"github-webhook/index.js","previous":177682,"size":177961,"diff":279}]},{"timestamp":1599246609676,"files":[{"filename":"github-webhook/index.js","previous":177721,"size":177682,"diff":-39}]},{"timestamp":1596542584192,"files":[{"filename":"github-webhook/index.js","previous":181657,"size":177721,"diff":-3936}]},{"timestamp":1596540416823,"files":[{"filename":"github-webhook/index.js","previous":181110,"size":181657,"diff":547}]},{"timestamp":1596457756702,"files":[{"filename":"github-webhook/index.js","previous":181095,"size":181110,"diff":15}]},{"timestamp":1596455025723,"files":[{"filename":"github-webhook/index.js","previous":473647,"size":181095,"diff":-292552}]},{"timestamp":1596407673941,"files":[{"filename":"github-webhook/index.js","previous":332785,"size":473647,"diff":140862}]}]
| `SlackApprovalAction` fails due to outdated node runtime in Lambda function
I'm trying to use the `SlackApprovalAction` from `@cloudcomponents/cdk-codepipeline-slack`, but I receive the following error when I run it:
```
Resource handler returned message: "The runtime parameter of nodejs10.x is no longer supported for
creating or updating AWS Lambda functions. We recommend you use the new runtime (nodejs14.x) while
creating or updating functions.
```
Is there any reason that lambda needs to use nodejs10.x? None of the lambdas are doing anything particularly complex so it *appears* to be safe to upgrade.
| There is no reason for that. It's just a mistake. | 2021-12-08T05:21:48 | 0.0 | [] | [] |
||
cloudcomponents/cdk-constructs | cloudcomponents__cdk-constructs-129 | 0ff07cbce1fe25b5ea6d57ce08553f4b1fff90c8 | diff --git a/examples/blue-green-container-deployment-example/src/blue-green-container-deployment-stack.ts b/examples/blue-green-container-deployment-example/src/blue-green-container-deployment-stack.ts
index ec32b5b93..47be2d583 100644
--- a/examples/blue-green-container-deployment-example/src/blue-green-container-deployment-stack.ts
+++ b/examples/blue-green-container-deployment-example/src/blue-green-container-deployment-stack.ts
@@ -4,7 +4,7 @@ import { CodeBuildAction, CodeCommitSourceAction, CodeDeployEcsDeployAction } fr
import { Vpc, Port } from '@aws-cdk/aws-ec2';
import { Cluster } from '@aws-cdk/aws-ecs';
import { ApplicationLoadBalancer, ApplicationTargetGroup, TargetType } from '@aws-cdk/aws-elasticloadbalancingv2';
-import { Construct, Stack, StackProps } from '@aws-cdk/core';
+import { Construct, Duration, Stack, StackProps } from '@aws-cdk/core';
import { EcsService, DummyTaskDefinition, EcsDeploymentGroup, PushImageProject } from '@cloudcomponents/cdk-blue-green-container-deployment';
import { ImageRepository } from '@cloudcomponents/cdk-container-registry';
@@ -67,6 +67,7 @@ export class BlueGreenContainerDeploymentStack extends Stack {
desiredCount: 2,
taskDefinition,
prodTargetGroup,
+ testTargetGroup,
});
ecsService.connections.allowFrom(loadBalancer, Port.tcp(80));
@@ -76,10 +77,10 @@ export class BlueGreenContainerDeploymentStack extends Stack {
applicationName: 'blue-green-application',
deploymentGroupName: 'blue-green-deployment-group',
ecsServices: [ecsService],
- targetGroupNames: [prodTargetGroup.targetGroupName, testTargetGroup.targetGroupName],
+ targetGroups: [prodTargetGroup, testTargetGroup],
prodTrafficListener: prodListener,
testTrafficListener: testListener,
- terminationWaitTimeInMinutes: 100,
+ terminationWaitTime: Duration.minutes(100),
});
// @see files: ./blue-green-repository for example content
diff --git a/packages/cdk-blue-green-container-deployment/API.md b/packages/cdk-blue-green-container-deployment/API.md
index 12aa7a197..054040491 100644
--- a/packages/cdk-blue-green-container-deployment/API.md
+++ b/packages/cdk-blue-green-container-deployment/API.md
@@ -177,12 +177,12 @@ new EcsDeploymentGroup(scope: Construct, id: string, props: EcsDeploymentGroupPr
* **deploymentGroupName** (<code>string</code>) *No description*
* **ecsServices** (<code>Array<[IEcsService](#cloudcomponents-cdk-blue-green-container-deployment-iecsservice)></code>) *No description*
* **prodTrafficListener** (<code>[TrafficListener](#cloudcomponents-cdk-blue-green-container-deployment-trafficlistener)</code>) *No description*
- * **targetGroupNames** (<code>Array<string></code>) *No description*
+ * **targetGroups** (<code>Array<[ApplicationTargetGroup](#aws-cdk-aws-elasticloadbalancingv2-applicationtargetgroup)></code>) *No description*
* **testTrafficListener** (<code>[TrafficListener](#cloudcomponents-cdk-blue-green-container-deployment-trafficlistener)</code>) *No description*
* **applicationName** (<code>string</code>) *No description* __*Optional*__
* **autoRollbackOnEvents** (<code>Array<[RollbackEvent](#cloudcomponents-cdk-blue-green-container-deployment-rollbackevent)></code>) The event type or types that trigger a rollback. __*Optional*__
* **deploymentConfig** (<code>[IEcsDeploymentConfig](#cloudcomponents-cdk-blue-green-container-deployment-iecsdeploymentconfig)</code>) *No description* __*Optional*__
- * **terminationWaitTimeInMinutes** (<code>number</code>) the number of minutes before deleting the original (blue) task set. __*Default*__: 60
+ * **terminationWaitTime** (<code>[Duration](#aws-cdk-core-duration)</code>) the number of minutes before deleting the original (blue) task set. __*Default*__: 60 minutes
@@ -221,6 +221,7 @@ new EcsService(scope: Construct, id: string, props: EcsServiceProps)
* **prodTargetGroup** (<code>[ITargetGroup](#aws-cdk-aws-elasticloadbalancingv2-itargetgroup)</code>) *No description*
* **serviceName** (<code>string</code>) *No description*
* **taskDefinition** (<code>[DummyTaskDefinition](#cloudcomponents-cdk-blue-green-container-deployment-dummytaskdefinition)</code>) *No description*
+ * **testTargetGroup** (<code>[ITargetGroup](#aws-cdk-aws-elasticloadbalancingv2-itargetgroup)</code>) *No description*
* **circuitBreaker** (<code>[DeploymentCircuitBreaker](#aws-cdk-aws-ecs-deploymentcircuitbreaker)</code>) Whether to enable the deployment circuit breaker. __*Default*__: disabled
* **containerPort** (<code>number</code>) *No description* __*Optional*__
* **desiredCount** (<code>number</code>) *No description* __*Optional*__
@@ -317,12 +318,12 @@ Name | Type | Description
**deploymentGroupName** | <code>string</code> | <span></span>
**ecsServices** | <code>Array<[IEcsService](#cloudcomponents-cdk-blue-green-container-deployment-iecsservice)></code> | <span></span>
**prodTrafficListener** | <code>[TrafficListener](#cloudcomponents-cdk-blue-green-container-deployment-trafficlistener)</code> | <span></span>
-**targetGroupNames** | <code>Array<string></code> | <span></span>
+**targetGroups** | <code>Array<[ApplicationTargetGroup](#aws-cdk-aws-elasticloadbalancingv2-applicationtargetgroup)></code> | <span></span>
**testTrafficListener** | <code>[TrafficListener](#cloudcomponents-cdk-blue-green-container-deployment-trafficlistener)</code> | <span></span>
**applicationName**? | <code>string</code> | __*Optional*__
**autoRollbackOnEvents**? | <code>Array<[RollbackEvent](#cloudcomponents-cdk-blue-green-container-deployment-rollbackevent)></code> | The event type or types that trigger a rollback.<br/>__*Optional*__
**deploymentConfig**? | <code>[IEcsDeploymentConfig](#cloudcomponents-cdk-blue-green-container-deployment-iecsdeploymentconfig)</code> | __*Optional*__
-**terminationWaitTimeInMinutes**? | <code>number</code> | the number of minutes before deleting the original (blue) task set.<br/>__*Default*__: 60
+**terminationWaitTime**? | <code>[Duration](#aws-cdk-core-duration)</code> | the number of minutes before deleting the original (blue) task set.<br/>__*Default*__: 60 minutes
@@ -339,6 +340,7 @@ Name | Type | Description
**prodTargetGroup** | <code>[ITargetGroup](#aws-cdk-aws-elasticloadbalancingv2-itargetgroup)</code> | <span></span>
**serviceName** | <code>string</code> | <span></span>
**taskDefinition** | <code>[DummyTaskDefinition](#cloudcomponents-cdk-blue-green-container-deployment-dummytaskdefinition)</code> | <span></span>
+**testTargetGroup** | <code>[ITargetGroup](#aws-cdk-aws-elasticloadbalancingv2-itargetgroup)</code> | <span></span>
**circuitBreaker**? | <code>[DeploymentCircuitBreaker](#aws-cdk-aws-ecs-deploymentcircuitbreaker)</code> | Whether to enable the deployment circuit breaker.<br/>__*Default*__: disabled
**containerPort**? | <code>number</code> | __*Optional*__
**desiredCount**? | <code>number</code> | __*Optional*__
diff --git a/packages/cdk-blue-green-container-deployment/lambda-file-sizes.json b/packages/cdk-blue-green-container-deployment/lambda-file-sizes.json
index 6e7d33e9b..5fc34da0a 100644
--- a/packages/cdk-blue-green-container-deployment/lambda-file-sizes.json
+++ b/packages/cdk-blue-green-container-deployment/lambda-file-sizes.json
@@ -1,1 +1,1 @@
-[{"timestamp":1631683651094,"files":[{"filename":"dummy-task-definition/index.js","previous":1728,"size":1728,"diff":0},{"filename":"ecs-deployment-group/index.js","previous":2182,"size":2182,"diff":0},{"filename":"ecs-service/index.js","previous":2152,"size":2138,"diff":-14}]},{"timestamp":1629825530770,"files":[{"filename":"dummy-task-definition/index.js","previous":1728,"size":1728,"diff":0},{"filename":"ecs-deployment-group/index.js","previous":2171,"size":2182,"diff":11},{"filename":"ecs-service/index.js","previous":2152,"size":2152,"diff":0}]},{"timestamp":1629823812165,"files":[{"filename":"dummy-task-definition/index.js","previous":1728,"size":1728,"diff":0},{"filename":"ecs-deployment-group/index.js","previous":2123,"size":2171,"diff":48},{"filename":"ecs-service/index.js","previous":2152,"size":2152,"diff":0}]},{"timestamp":1627652613714,"files":[{"filename":"dummy-task-definition/index.js","previous":1726,"size":1728,"diff":2},{"filename":"ecs-deployment-group/index.js","previous":2123,"size":2123,"diff":0},{"filename":"ecs-service/index.js","previous":2081,"size":2152,"diff":71}]},{"timestamp":1609276390870,"files":[{"filename":"dummy-task-definition/index.js","previous":1712,"size":1726,"diff":14},{"filename":"ecs-deployment-group/index.js","previous":2123,"size":2123,"diff":0},{"filename":"ecs-service/index.js","previous":2081,"size":2081,"diff":0}]},{"timestamp":1606329521054,"files":[{"filename":"dummy-task-definition/index.js","previous":1706,"size":1712,"diff":6},{"filename":"ecs-deployment-group/index.js","previous":2116,"size":2123,"diff":7},{"filename":"ecs-service/index.js","previous":2073,"size":2081,"diff":8}]},{"timestamp":1596457247342,"files":[{"filename":"dummy-task-definition/index.js","previous":1756,"size":1706,"diff":-50},{"filename":"ecs-deployment-group/index.js","previous":2116,"size":2116,"diff":0},{"filename":"ecs-service/index.js","previous":2073,"size":2073,"diff":0}]},{"timestamp":1596454924871,"files":[{"filename":"dummy-task-definition/index.js","previous":4964,"size":1756,"diff":-3208},{"filename":"ecs-deployment-group/index.js","previous":6103,"size":2116,"diff":-3987},{"filename":"ecs-service/index.js","previous":6141,"size":2073,"diff":-4068}]},{"timestamp":1596407637937,"files":[{"filename":"blue-green-service/__entrypoint__.js","previous":6760,"size":0,"diff":-6760},{"filename":"blue-green-service/index.js","previous":3368,"size":0,"diff":-3368},{"filename":"dummy-task-definition/__entrypoint__.js","previous":6760,"size":0,"diff":-6760},{"filename":"dummy-task-definition/index.js","previous":1963,"size":4964,"diff":3001},{"filename":"ecs-deployment-group/__entrypoint__.js","previous":6760,"size":0,"diff":-6760},{"filename":"ecs-deployment-group/index.js","previous":2292,"size":6103,"diff":3811},{"filename":"ecs-service/__entrypoint__.js","previous":6760,"size":0,"diff":-6760},{"filename":"ecs-service/index.js","previous":2312,"size":6141,"diff":3829}]}]
+[{"timestamp":1634467601943,"files":[{"filename":"ecs-deployment-group/index.js","previous":144792,"size":144825,"diff":33},{"filename":"ecs-service/index.js","previous":2133,"size":144797,"diff":142664}]},{"timestamp":1634411334117,"files":[{"filename":"dummy-task-definition/index.js","previous":1728,"size":0,"diff":-1728},{"filename":"ecs-deployment-group/index.js","previous":2182,"size":144792,"diff":142610},{"filename":"ecs-service/index.js","previous":2138,"size":2133,"diff":-5}]},{"timestamp":1631683651094,"files":[{"filename":"dummy-task-definition/index.js","previous":1728,"size":1728,"diff":0},{"filename":"ecs-deployment-group/index.js","previous":2182,"size":2182,"diff":0},{"filename":"ecs-service/index.js","previous":2152,"size":2138,"diff":-14}]},{"timestamp":1629825530770,"files":[{"filename":"dummy-task-definition/index.js","previous":1728,"size":1728,"diff":0},{"filename":"ecs-deployment-group/index.js","previous":2171,"size":2182,"diff":11},{"filename":"ecs-service/index.js","previous":2152,"size":2152,"diff":0}]},{"timestamp":1629823812165,"files":[{"filename":"dummy-task-definition/index.js","previous":1728,"size":1728,"diff":0},{"filename":"ecs-deployment-group/index.js","previous":2123,"size":2171,"diff":48},{"filename":"ecs-service/index.js","previous":2152,"size":2152,"diff":0}]},{"timestamp":1627652613714,"files":[{"filename":"dummy-task-definition/index.js","previous":1726,"size":1728,"diff":2},{"filename":"ecs-deployment-group/index.js","previous":2123,"size":2123,"diff":0},{"filename":"ecs-service/index.js","previous":2081,"size":2152,"diff":71}]},{"timestamp":1609276390870,"files":[{"filename":"dummy-task-definition/index.js","previous":1712,"size":1726,"diff":14},{"filename":"ecs-deployment-group/index.js","previous":2123,"size":2123,"diff":0},{"filename":"ecs-service/index.js","previous":2081,"size":2081,"diff":0}]},{"timestamp":1606329521054,"files":[{"filename":"dummy-task-definition/index.js","previous":1706,"size":1712,"diff":6},{"filename":"ecs-deployment-group/index.js","previous":2116,"size":2123,"diff":7},{"filename":"ecs-service/index.js","previous":2073,"size":2081,"diff":8}]},{"timestamp":1596457247342,"files":[{"filename":"dummy-task-definition/index.js","previous":1756,"size":1706,"diff":-50},{"filename":"ecs-deployment-group/index.js","previous":2116,"size":2116,"diff":0},{"filename":"ecs-service/index.js","previous":2073,"size":2073,"diff":0}]},{"timestamp":1596454924871,"files":[{"filename":"dummy-task-definition/index.js","previous":4964,"size":1756,"diff":-3208},{"filename":"ecs-deployment-group/index.js","previous":6103,"size":2116,"diff":-3987},{"filename":"ecs-service/index.js","previous":6141,"size":2073,"diff":-4068}]},{"timestamp":1596407637937,"files":[{"filename":"blue-green-service/__entrypoint__.js","previous":6760,"size":0,"diff":-6760},{"filename":"blue-green-service/index.js","previous":3368,"size":0,"diff":-3368},{"filename":"dummy-task-definition/__entrypoint__.js","previous":6760,"size":0,"diff":-6760},{"filename":"dummy-task-definition/index.js","previous":1963,"size":4964,"diff":3001},{"filename":"ecs-deployment-group/__entrypoint__.js","previous":6760,"size":0,"diff":-6760},{"filename":"ecs-deployment-group/index.js","previous":2292,"size":6103,"diff":3811},{"filename":"ecs-service/__entrypoint__.js","previous":6760,"size":0,"diff":-6760},{"filename":"ecs-service/index.js","previous":2312,"size":6141,"diff":3829}]}]
diff --git a/packages/cdk-blue-green-container-deployment/package.json b/packages/cdk-blue-green-container-deployment/package.json
index 52702c0cf..ef3a838cf 100644
--- a/packages/cdk-blue-green-container-deployment/package.json
+++ b/packages/cdk-blue-green-container-deployment/package.json
@@ -63,27 +63,32 @@
"peerDependencies": {
"@aws-cdk/aws-codebuild": "^1.127.0",
"@aws-cdk/aws-codedeploy": "^1.127.0",
+ "@aws-cdk/custom-resources": "^1.127.0",
"@aws-cdk/aws-ec2": "^1.127.0",
"@aws-cdk/aws-ecr": "^1.127.0",
"@aws-cdk/aws-ecs": "^1.127.0",
"@aws-cdk/aws-elasticloadbalancingv2": "^1.127.0",
"@aws-cdk/aws-iam": "^1.127.0",
+ "@aws-cdk/aws-lambda": "^1.127.0",
"@aws-cdk/core": "^1.127.0",
"constructs": "^3.2.0"
},
"dependencies": {
"@aws-cdk/aws-codebuild": "^1.127.0",
"@aws-cdk/aws-codedeploy": "^1.127.0",
+ "@aws-cdk/custom-resources": "^1.127.0",
"@aws-cdk/aws-ec2": "^1.127.0",
"@aws-cdk/aws-ecr": "^1.127.0",
"@aws-cdk/aws-ecs": "^1.127.0",
"@aws-cdk/aws-elasticloadbalancingv2": "^1.127.0",
"@aws-cdk/aws-iam": "^1.127.0",
+ "@aws-cdk/aws-lambda": "^1.127.0",
"@aws-cdk/core": "^1.127.0"
},
"devDependencies": {
"@aws-cdk/assert": "^1.127.0",
"aws-sdk": "^2.1004.0",
+ "custom-resource-helper": "^1.0.15",
"jest-cdk-snapshot": "^1.4.2"
},
"externals": [
diff --git a/packages/cdk-blue-green-container-deployment/src/dummy-task-definition.ts b/packages/cdk-blue-green-container-deployment/src/dummy-task-definition.ts
index ede7cb0d5..f5397a639 100644
--- a/packages/cdk-blue-green-container-deployment/src/dummy-task-definition.ts
+++ b/packages/cdk-blue-green-container-deployment/src/dummy-task-definition.ts
@@ -1,8 +1,7 @@
-import * as path from 'path';
-
import { NetworkMode } from '@aws-cdk/aws-ecs';
import { Role, ServicePrincipal, ManagedPolicy, PolicyStatement, Effect, IRole } from '@aws-cdk/aws-iam';
-import { Construct, CustomResource, CustomResourceProvider, CustomResourceProviderRuntime } from '@aws-cdk/core';
+import { Construct } from '@aws-cdk/core';
+import { AwsCustomResource, AwsCustomResourcePolicy, PhysicalResourceId, PhysicalResourceIdReference } from '@aws-cdk/custom-resources';
export interface IDummyTaskDefinition {
readonly executionRole: IRole;
@@ -60,41 +59,60 @@ export class DummyTaskDefinition extends Construct implements IDummyTaskDefiniti
managedPolicies: [ManagedPolicy.fromAwsManagedPolicyName('service-role/AmazonECSTaskExecutionRolePolicy')],
});
- const serviceToken = CustomResourceProvider.getOrCreate(this, 'Custom::DummyTaskDefinition', {
- codeDirectory: path.join(__dirname, 'lambdas', 'dummy-task-definition'),
- runtime: CustomResourceProviderRuntime.NODEJS_12_X,
- policyStatements: [
- {
- Effect: Effect.ALLOW,
- Action: ['ecs:RegisterTaskDefinition', 'ecs:DeregisterTaskDefinition'],
- Resource: '*',
- },
- {
- Effect: Effect.ALLOW,
- Action: ['iam:PassRole'],
- Resource: this.executionRole.roleArn,
- },
- ],
- });
-
this.family = props.family ?? this.node.addr;
this.containerName = props.containerName ?? 'sample-website';
this.containerPort = props.containerPort ?? 80;
- const taskDefinition = new CustomResource(this, 'CustomResource', {
- serviceToken,
+ const taskDefinition = new AwsCustomResource(this, 'DummyTaskDefinition', {
resourceType: 'Custom::DummyTaskDefinition',
- properties: {
- Family: this.family,
- Image: props.image,
- ExecutionRoleArn: this.executionRole.roleArn,
- NetworkMode: NetworkMode.AWS_VPC,
- ContainerName: this.containerName,
- ContainerPort: this.containerPort,
+ onCreate: {
+ service: 'ECS',
+ action: 'registerTaskDefinition',
+ parameters: {
+ requiresCompatibilities: ['FARGATE'],
+ family: this.family,
+ executionRoleArn: this.executionRole.roleArn,
+ networkMode: NetworkMode.AWS_VPC,
+ cpu: '256',
+ memory: '512',
+ containerDefinitions: [
+ {
+ name: this.containerName,
+ image: props.image,
+ portMappings: [
+ {
+ hostPort: this.containerPort,
+ protocol: 'tcp',
+ containerPort: this.containerPort,
+ },
+ ],
+ },
+ ],
+ },
+ physicalResourceId: PhysicalResourceId.fromResponse('taskDefinition.taskDefinitionArn'),
+ },
+ onDelete: {
+ service: 'ECS',
+ action: 'deregisterTaskDefinition',
+ parameters: {
+ taskDefinition: new PhysicalResourceIdReference(),
+ },
},
+ policy: AwsCustomResourcePolicy.fromStatements([
+ new PolicyStatement({
+ effect: Effect.ALLOW,
+ actions: ['ecs:RegisterTaskDefinition', 'ecs:DeregisterTaskDefinition'],
+ resources: ['*'],
+ }),
+ new PolicyStatement({
+ effect: Effect.ALLOW,
+ actions: ['iam:PassRole'],
+ resources: [this.executionRole.roleArn],
+ }),
+ ]),
});
- this.taskDefinitionArn = taskDefinition.ref;
+ this.taskDefinitionArn = taskDefinition.getResponseField('taskDefinition.taskDefinitionArn');
}
/**
diff --git a/packages/cdk-blue-green-container-deployment/src/ecs-deployment-group.ts b/packages/cdk-blue-green-container-deployment/src/ecs-deployment-group.ts
index cf1b5af9d..6a4591088 100644
--- a/packages/cdk-blue-green-container-deployment/src/ecs-deployment-group.ts
+++ b/packages/cdk-blue-green-container-deployment/src/ecs-deployment-group.ts
@@ -1,7 +1,9 @@
import * as path from 'path';
import { EcsApplication, IEcsApplication } from '@aws-cdk/aws-codedeploy';
-import { Role, ServicePrincipal, ManagedPolicy, Effect } from '@aws-cdk/aws-iam';
-import { Aws, Construct, Resource, IResource, CustomResource, CustomResourceProvider, CustomResourceProviderRuntime } from '@aws-cdk/core';
+import { ApplicationTargetGroup } from '@aws-cdk/aws-elasticloadbalancingv2';
+import { Role, ServicePrincipal, ManagedPolicy, Effect, PolicyStatement } from '@aws-cdk/aws-iam';
+import { Function, Runtime, Code } from '@aws-cdk/aws-lambda';
+import { Aws, Construct, Resource, IResource, CustomResource, Duration } from '@aws-cdk/core';
import { EcsDeploymentConfig, IEcsDeploymentConfig } from './ecs-deployment-config';
import { IEcsService } from './ecs-service';
@@ -48,7 +50,7 @@ export interface EcsDeploymentGroupProps {
readonly ecsServices: IEcsService[];
- readonly targetGroupNames: string[];
+ readonly targetGroups: ApplicationTargetGroup[];
readonly prodTrafficListener: TrafficListener;
@@ -61,9 +63,9 @@ export interface EcsDeploymentGroupProps {
*
* The maximum setting is 2880 minutes (2 days).
*
- * @default 60
+ * @default 60 minutes
*/
- readonly terminationWaitTimeInMinutes?: number;
+ readonly terminationWaitTime?: Duration;
/**
* The event type or types that trigger a rollback.
@@ -85,18 +87,18 @@ export class EcsDeploymentGroup extends Resource implements IEcsDeploymentGroup
deploymentGroupName,
deploymentConfig,
ecsServices,
- targetGroupNames,
+ targetGroups,
prodTrafficListener,
testTrafficListener,
- terminationWaitTimeInMinutes = 60,
+ terminationWaitTime = Duration.minutes(60),
autoRollbackOnEvents,
} = props;
- if (terminationWaitTimeInMinutes > 2880) {
+ if (terminationWaitTime.toMinutes() > 2880) {
throw new Error('Invalid TerminationWaitTimeInMinutes: The maximum setting is 2880 minutes (2 days).');
}
- const codeDeployEcsRole = new Role(this, 'EcsCodeDeployRole', {
+ const codeDeployEcsRole = new Role(this, 'Role', {
assumedBy: new ServicePrincipal('codedeploy.amazonaws.com'),
managedPolicies: [ManagedPolicy.fromAwsManagedPolicyName('AWSCodeDeployRoleForECS')],
});
@@ -105,44 +107,51 @@ export class EcsDeploymentGroup extends Resource implements IEcsDeploymentGroup
applicationName,
});
- const serviceToken = CustomResourceProvider.getOrCreate(this, 'Custom::EcsDeploymentGroup', {
- codeDirectory: path.join(__dirname, 'lambdas', 'ecs-deployment-group'),
- runtime: CustomResourceProviderRuntime.NODEJS_12_X,
- policyStatements: [
- {
- Effect: Effect.ALLOW,
- Action: ['codeDeploy:CreateDeploymentGroup', 'codeDeploy:UpdateDeploymentGroup', 'codeDeploy:DeleteDeploymentGroup'],
- Resource: '*',
- },
- {
- Effect: Effect.ALLOW,
- Action: ['iam:PassRole'],
- Resource: codeDeployEcsRole.roleArn,
- },
- ],
+ const serviceToken = new Function(this, 'Function', {
+ runtime: Runtime.NODEJS_12_X,
+ code: Code.fromAsset(path.join(__dirname, 'lambdas', 'ecs-deployment-group')),
+ handler: 'index.handler',
+ timeout: Duration.minutes(15),
});
+ serviceToken.addToRolePolicy(
+ new PolicyStatement({
+ effect: Effect.ALLOW,
+ actions: ['codeDeploy:CreateDeploymentGroup', 'codeDeploy:UpdateDeploymentGroup', 'codeDeploy:DeleteDeploymentGroup'],
+ resources: ['*'],
+ }),
+ );
+
+ serviceToken.addToRolePolicy(
+ new PolicyStatement({
+ effect: Effect.ALLOW,
+ actions: ['iam:PassRole'],
+ resources: [codeDeployEcsRole.roleArn],
+ }),
+ );
+
this.deploymentConfig = deploymentConfig || EcsDeploymentConfig.ALL_AT_ONCE;
if (Construct.isConstruct(props.deploymentConfig)) {
this.node.addDependency(props.deploymentConfig);
}
+ this.node.addDependency(...ecsServices);
const ecsDeploymentGroup = new CustomResource(this, 'CustomResource', {
- serviceToken,
+ serviceToken: serviceToken.functionArn,
resourceType: 'Custom::EcsDeploymentGroup',
properties: {
ApplicationName: this.application.applicationName,
DeploymentGroupName: deploymentGroupName,
ServiceRoleArn: codeDeployEcsRole.roleArn,
- TargetGroupNames: targetGroupNames,
+ TargetGroupNames: targetGroups.map((tg) => tg.targetGroupName),
EcsServices: ecsServices.map((service) => ({
ClusterName: service.clusterName,
ServiceName: service.serviceName,
})),
ProdTrafficListenerArn: prodTrafficListener.listenerArn,
TestTrafficListenerArn: testTrafficListener.listenerArn,
- TerminationWaitTimeInMinutes: terminationWaitTimeInMinutes,
+ TerminationWaitTimeInMinutes: terminationWaitTime.toMinutes(),
AutoRollbackOnEvents: autoRollbackOnEvents,
DeploymentConfigName: this.deploymentConfig.deploymentConfigName,
},
diff --git a/packages/cdk-blue-green-container-deployment/src/ecs-service.ts b/packages/cdk-blue-green-container-deployment/src/ecs-service.ts
index b8955e437..cf5b4ff18 100644
--- a/packages/cdk-blue-green-container-deployment/src/ecs-service.ts
+++ b/packages/cdk-blue-green-container-deployment/src/ecs-service.ts
@@ -2,8 +2,9 @@ import * as path from 'path';
import { IConnectable, Connections, SecurityGroup, Port } from '@aws-cdk/aws-ec2';
import { ICluster, LaunchType, DeploymentCircuitBreaker } from '@aws-cdk/aws-ecs';
import { ITargetGroup } from '@aws-cdk/aws-elasticloadbalancingv2';
-import { Effect } from '@aws-cdk/aws-iam';
-import { Duration, Construct, CustomResource, CustomResourceProvider, CustomResourceProviderRuntime } from '@aws-cdk/core';
+import { Effect, PolicyStatement } from '@aws-cdk/aws-iam';
+import { Function, Runtime, Code } from '@aws-cdk/aws-lambda';
+import { Duration, Construct, CustomResource } from '@aws-cdk/core';
import { DummyTaskDefinition } from './dummy-task-definition';
@@ -21,6 +22,7 @@ export interface EcsServiceProps {
readonly desiredCount?: number;
readonly containerPort?: number;
readonly prodTargetGroup: ITargetGroup;
+ readonly testTargetGroup: ITargetGroup;
readonly taskDefinition: DummyTaskDefinition;
/**
@@ -72,6 +74,7 @@ export class EcsService extends Construct implements IConnectable, IEcsService {
platformVersion = '1.4.0',
desiredCount = 1,
prodTargetGroup,
+ testTargetGroup,
taskDefinition,
healthCheckGracePeriod = Duration.seconds(60),
} = props;
@@ -80,6 +83,8 @@ export class EcsService extends Construct implements IConnectable, IEcsService {
const { vpc } = cluster;
+ this.node.addDependency(prodTargetGroup, testTargetGroup);
+
const securityGroups = props.securityGroups || [
new SecurityGroup(this, 'SecurityGroup', {
description: `Security group for ${this.node.id} service`,
@@ -87,25 +92,31 @@ export class EcsService extends Construct implements IConnectable, IEcsService {
}),
];
- const serviceToken = CustomResourceProvider.getOrCreate(this, 'Custom::BlueGreenService', {
- codeDirectory: path.join(__dirname, 'lambdas', 'ecs-service'),
- runtime: CustomResourceProviderRuntime.NODEJS_12_X,
- policyStatements: [
- {
- Effect: Effect.ALLOW,
- Action: ['ecs:CreateService', 'ecs:UpdateService', 'ecs:DeleteService', 'ecs:DescribeServices'],
- Resource: '*',
- },
- {
- Effect: Effect.ALLOW,
- Action: ['iam:PassRole'],
- Resource: taskDefinition.executionRole.roleArn,
- },
- ],
+ const serviceToken = new Function(this, 'Function', {
+ runtime: Runtime.NODEJS_12_X,
+ code: Code.fromAsset(path.join(__dirname, 'lambdas', 'ecs-service')),
+ handler: 'index.handler',
+ timeout: Duration.minutes(15),
});
+ serviceToken.addToRolePolicy(
+ new PolicyStatement({
+ effect: Effect.ALLOW,
+ actions: ['ecs:CreateService', 'ecs:UpdateService', 'ecs:DeleteService', 'ecs:DescribeServices'],
+ resources: ['*'],
+ }),
+ );
+
+ serviceToken.addToRolePolicy(
+ new PolicyStatement({
+ effect: Effect.ALLOW,
+ actions: ['iam:PassRole'],
+ resources: [taskDefinition.executionRole.roleArn],
+ }),
+ );
+
const service = new CustomResource(this, 'CustomResource', {
- serviceToken,
+ serviceToken: serviceToken.functionArn,
resourceType: 'Custom::BlueGreenService',
properties: {
Cluster: cluster.clusterName,
diff --git a/packages/cdk-blue-green-container-deployment/src/lambdas/dummy-task-definition/index.ts b/packages/cdk-blue-green-container-deployment/src/lambdas/dummy-task-definition/index.ts
deleted file mode 100644
index b134cbd09..000000000
--- a/packages/cdk-blue-green-container-deployment/src/lambdas/dummy-task-definition/index.ts
+++ /dev/null
@@ -1,86 +0,0 @@
-import type { CloudFormationCustomResourceEvent, CloudFormationCustomResourceCreateEvent, CloudFormationCustomResourceDeleteEvent } from 'aws-lambda';
-import { ECS } from 'aws-sdk';
-
-interface HandlerReturn {
- PhysicalResourceId: string;
-}
-
-export interface EcsTaskDefinitionProps {
- family: string;
- image: string;
- executionRoleArn: string;
- networkMode: string;
- containerName: string;
- containerPort: number;
-}
-
-const ecs = new ECS();
-
-const getProperties = (props: CloudFormationCustomResourceEvent['ResourceProperties']): EcsTaskDefinitionProps => ({
- family: props.Family,
- image: props.Image,
- executionRoleArn: props.ExecutionRoleArn,
- networkMode: props.NetworkMode,
- containerName: props.ContainerName,
- containerPort: props.ContainerPort,
-});
-
-const onCreate = async (event: CloudFormationCustomResourceCreateEvent): Promise<HandlerReturn> => {
- const { family, image, executionRoleArn, networkMode, containerName, containerPort } = getProperties(event.ResourceProperties);
-
- const { taskDefinition } = await ecs
- .registerTaskDefinition({
- requiresCompatibilities: ['FARGATE'],
- family,
- executionRoleArn,
- networkMode,
- cpu: '256',
- memory: '512',
- containerDefinitions: [
- {
- name: containerName,
- image,
- portMappings: [
- {
- hostPort: containerPort,
- protocol: 'tcp',
- containerPort: containerPort,
- },
- ],
- },
- ],
- })
- .promise();
-
- if (!taskDefinition) throw Error('Taskdefinition could not be registerd');
-
- return {
- PhysicalResourceId: taskDefinition.taskDefinitionArn as string,
- };
-};
-
-const onDelete = async (event: CloudFormationCustomResourceDeleteEvent): Promise<void> => {
- const taskDefinition = event.PhysicalResourceId;
-
- await ecs
- .deregisterTaskDefinition({
- taskDefinition,
- })
- .promise();
-};
-
-export const handler = async (event: CloudFormationCustomResourceEvent): Promise<HandlerReturn | void> => {
- const requestType = event.RequestType;
-
- switch (requestType) {
- case 'Create':
- return onCreate(event);
- case 'Update':
- // CodeDeploy is responsible for updates on the TaskDefinition
- return;
- case 'Delete':
- return onDelete(event);
- default:
- throw new Error(`Invalid request type: ${requestType}`);
- }
-};
diff --git a/packages/cdk-blue-green-container-deployment/src/lambdas/ecs-deployment-group/index.ts b/packages/cdk-blue-green-container-deployment/src/lambdas/ecs-deployment-group/index.ts
index 8db3d49a2..fe47f7bf9 100644
--- a/packages/cdk-blue-green-container-deployment/src/lambdas/ecs-deployment-group/index.ts
+++ b/packages/cdk-blue-green-container-deployment/src/lambdas/ecs-deployment-group/index.ts
@@ -1,10 +1,13 @@
-import type {
- CloudFormationCustomResourceEvent,
- CloudFormationCustomResourceCreateEvent,
- CloudFormationCustomResourceUpdateEvent,
- CloudFormationCustomResourceDeleteEvent,
-} from 'aws-lambda';
+import type { CloudFormationCustomResourceEvent, CloudFormationCustomResourceUpdateEvent } from 'aws-lambda';
import { CodeDeploy } from 'aws-sdk';
+import {
+ customResourceHelper,
+ OnCreateHandler,
+ OnUpdateHandler,
+ OnDeleteHandler,
+ ResourceHandler,
+ ResourceHandlerReturn,
+} from 'custom-resource-helper';
enum RollbackEvent {
DEPLOYMENT_FAILURE = 'DEPLOYMENT_FAILURE',
@@ -12,10 +15,6 @@ enum RollbackEvent {
DEPLOYMENT_STOP_ON_REQUEST = 'DEPLOYMENT_STOP_ON_REQUEST',
}
-interface HandlerReturn {
- PhysicalResourceId: string;
-}
-
export interface EcsDeploymentGroupProps {
applicationName: string;
deploymentGroupName: string;
@@ -49,7 +48,7 @@ const getProperties = (
deploymentConfigName: props.DeploymentConfigName,
});
-const onCreate = async (event: CloudFormationCustomResourceCreateEvent): Promise<HandlerReturn> => {
+const handleCreate: OnCreateHandler = async (event): Promise<ResourceHandlerReturn> => {
const {
applicationName,
deploymentGroupName,
@@ -106,11 +105,11 @@ const onCreate = async (event: CloudFormationCustomResourceCreateEvent): Promise
.promise();
return {
- PhysicalResourceId: deploymentGroupName,
+ physicalResourceId: deploymentGroupName,
};
};
-const onUpdate = async (event: CloudFormationCustomResourceUpdateEvent): Promise<HandlerReturn> => {
+const handleUpdate: OnUpdateHandler = async (event): Promise<ResourceHandlerReturn> => {
const newProps = getProperties(event.ResourceProperties);
const oldProps = getProperties(event.OldResourceProperties);
@@ -153,11 +152,11 @@ const onUpdate = async (event: CloudFormationCustomResourceUpdateEvent): Promise
.promise();
return {
- PhysicalResourceId: newProps.deploymentGroupName,
+ physicalResourceId: newProps.deploymentGroupName,
};
};
-const onDelete = async (event: CloudFormationCustomResourceDeleteEvent): Promise<void> => {
+const handleDelete: OnDeleteHandler = async (event): Promise<void> => {
const { applicationName, deploymentGroupName } = getProperties(event.ResourceProperties);
await codeDeploy
@@ -168,17 +167,10 @@ const onDelete = async (event: CloudFormationCustomResourceDeleteEvent): Promise
.promise();
};
-export const handler = async (event: CloudFormationCustomResourceEvent): Promise<HandlerReturn | void> => {
- const requestType = event.RequestType;
-
- switch (requestType) {
- case 'Create':
- return onCreate(event);
- case 'Update':
- return onUpdate(event);
- case 'Delete':
- return onDelete(event);
- default:
- throw new Error(`Invalid request type: ${requestType}`);
- }
-};
+export const handler = customResourceHelper(
+ (): ResourceHandler => ({
+ onCreate: handleCreate,
+ onUpdate: handleUpdate,
+ onDelete: handleDelete,
+ }),
+);
diff --git a/packages/cdk-blue-green-container-deployment/src/lambdas/ecs-service/index.ts b/packages/cdk-blue-green-container-deployment/src/lambdas/ecs-service/index.ts
index 2d34ed836..3f0e6f8e2 100644
--- a/packages/cdk-blue-green-container-deployment/src/lambdas/ecs-service/index.ts
+++ b/packages/cdk-blue-green-container-deployment/src/lambdas/ecs-service/index.ts
@@ -1,17 +1,13 @@
-import type {
- CloudFormationCustomResourceEvent,
- CloudFormationCustomResourceCreateEvent,
- CloudFormationCustomResourceUpdateEvent,
- CloudFormationCustomResourceDeleteEvent,
-} from 'aws-lambda';
+import type { CloudFormationCustomResourceEvent } from 'aws-lambda';
import { ECS } from 'aws-sdk';
-
-interface HandlerReturn {
- PhysicalResourceId: string;
- Data: {
- ServiceName: string;
- };
-}
+import {
+ customResourceHelper,
+ OnCreateHandler,
+ OnUpdateHandler,
+ OnDeleteHandler,
+ ResourceHandler,
+ ResourceHandlerReturn,
+} from 'custom-resource-helper';
export interface BlueGreenServiceProps {
cluster: string;
@@ -49,7 +45,7 @@ const getProperties = (props: CloudFormationCustomResourceEvent['ResourcePropert
deploymentConfiguration: props.DeploymentConfiguration,
});
-const onCreate = async (event: CloudFormationCustomResourceCreateEvent): Promise<HandlerReturn> => {
+const handleCreate: OnCreateHandler = async (event): Promise<ResourceHandlerReturn> => {
const {
cluster,
serviceName,
@@ -100,8 +96,8 @@ const onCreate = async (event: CloudFormationCustomResourceCreateEvent): Promise
if (!service) throw Error('Service could not be created');
return {
- PhysicalResourceId: service.serviceArn as string,
- Data: {
+ physicalResourceId: service.serviceArn as string,
+ responseData: {
ServiceName: service.serviceName as string,
},
};
@@ -115,7 +111,7 @@ const onCreate = async (event: CloudFormationCustomResourceCreateEvent): Promise
* updated, a new AWS CodeDeploy deployment should be created.
* For more information, see CreateDeployment in the AWS CodeDeploy API Reference.
*/
-const onUpdate = async (event: CloudFormationCustomResourceUpdateEvent): Promise<HandlerReturn> => {
+const handleUpdate: OnUpdateHandler = async (event): Promise<ResourceHandlerReturn> => {
const { cluster, serviceName, desiredCount, deploymentConfiguration, healthCheckGracePeriodSeconds } = getProperties(event.ResourceProperties);
const { service } = await ecs
@@ -131,14 +127,14 @@ const onUpdate = async (event: CloudFormationCustomResourceUpdateEvent): Promise
if (!service) throw Error('Service could not be updated');
return {
- PhysicalResourceId: service.serviceArn as string,
- Data: {
+ physicalResourceId: service.serviceArn as string,
+ responseData: {
ServiceName: service.serviceName as string,
},
};
};
-const onDelete = async (event: CloudFormationCustomResourceDeleteEvent): Promise<void> => {
+const handleDelete: OnDeleteHandler = async (event): Promise<void> => {
const { cluster, serviceName } = getProperties(event.ResourceProperties);
await ecs
@@ -157,17 +153,10 @@ const onDelete = async (event: CloudFormationCustomResourceDeleteEvent): Promise
.promise();
};
-export const handler = async (event: CloudFormationCustomResourceEvent): Promise<HandlerReturn | void> => {
- const requestType = event.RequestType;
-
- switch (requestType) {
- case 'Create':
- return onCreate(event);
- case 'Update':
- return onUpdate(event);
- case 'Delete':
- return onDelete(event);
- default:
- throw new Error(`Invalid request type: ${requestType}`);
- }
-};
+export const handler = customResourceHelper(
+ (): ResourceHandler => ({
+ onCreate: handleCreate,
+ onUpdate: handleUpdate,
+ onDelete: handleDelete,
+ }),
+);
diff --git a/packages/cdk-s3-antivirus/src/layers/clamav/Dockerfile b/packages/cdk-s3-antivirus/src/layers/clamav/Dockerfile
index 8db588e9d..8cdeaa218 100644
--- a/packages/cdk-s3-antivirus/src/layers/clamav/Dockerfile
+++ b/packages/cdk-s3-antivirus/src/layers/clamav/Dockerfile
@@ -1,7 +1,7 @@
FROM public.ecr.aws/lambda/provided:al2.2021.09.13.11
ARG asset_name=layer
-ARG CLAMAV_VERSION=0.103.3-5.el7
+ARG CLAMAV_VERSION=0.103.3-8.el7
USER root
RUN mkdir -p /opt/{lib,clamav}
@@ -10,8 +10,7 @@ RUN mkdir -p /opt/{lib,clamav}
# tools
#
RUN yum install -y zip yum-utils amazon-linux-extras
-RUN amazon-linux-extras install epel -y
-RUN yum-config-manager --enable epel
+RUN amazon-linux-extras install epel -y && yum-config-manager --enable epel
#
# layer
| The Cloudcomponent cdk-construct does not support multiple pipelines(e.g. FE and BE) within the same stack deployment
I have raised a PR [here ](https://github.com/cloudcomponents/cdk-constructs/pull/118) to fix this problem and would like to get it reviewed/approved or an alternative provided.
At this point in time, we had to fork this project and fix it ourselves but going forward we would like to have this feature included to avoid maintaining yet another package.
Thanks
| 2021-10-17T13:24:12 | 0.0 | [] | [] |
|||
cloudcomponents/cdk-constructs | cloudcomponents__cdk-constructs-101 | 1ad4b6cdef7968d8ece8969989b7e8c2617ae443 | diff --git a/packages/cdk-contentful-webhook/lambda-file-sizes.json b/packages/cdk-contentful-webhook/lambda-file-sizes.json
index 7bce275c3..5165ba196 100644
--- a/packages/cdk-contentful-webhook/lambda-file-sizes.json
+++ b/packages/cdk-contentful-webhook/lambda-file-sizes.json
@@ -1,1 +1,1 @@
-[{"timestamp":1620265629961,"files":[{"filename":"contentful-webhook/index.js","previous":231381,"size":231292,"diff":-89}]},{"timestamp":1615923664936,"files":[{"filename":"contentful-webhook/index.js","previous":1050356,"size":231381,"diff":-818975}]},{"timestamp":1615923376037,"files":[{"filename":"contentful-webhook/index.js","previous":1047464,"size":1050356,"diff":2892}]},{"timestamp":1615809010241,"files":[{"filename":"contentful-webhook/index.js","previous":221347,"size":1047464,"diff":826117}]},{"timestamp":1615022511778,"files":[{"filename":"contentful-webhook/index.js","previous":221344,"size":221347,"diff":3}]},{"timestamp":1609970894222,"files":[{"filename":"contentful-webhook/index.js","previous":217379,"size":221344,"diff":3965}]},{"timestamp":1609970375754,"files":[{"filename":"contentful-webhook/index.js","previous":203415,"size":217379,"diff":13964}]},{"timestamp":1607360020128,"files":[{"filename":"contentful-webhook/index.js","previous":203418,"size":203415,"diff":-3}]},{"timestamp":1600966253111,"files":[{"filename":"contentful-webhook/index.js","previous":203440,"size":203418,"diff":-22}]},{"timestamp":1599246565057,"files":[{"filename":"contentful-webhook/index.js","previous":202993,"size":203440,"diff":447}]},{"timestamp":1598552652514,"files":[{"filename":"contentful-webhook/index.js","previous":202992,"size":202993,"diff":1}]},{"timestamp":1596542526761,"files":[{"filename":"contentful-webhook/index.js","previous":207102,"size":202992,"diff":-4110}]},{"timestamp":1596540034638,"files":[{"filename":"contentful-webhook/index.js","previous":206593,"size":207102,"diff":509}]},{"timestamp":1596457456615,"files":[{"filename":"contentful-webhook/index.js","previous":206357,"size":206593,"diff":236}]},{"timestamp":1596440551356,"files":[{"filename":"contentful-webhook/index.js","previous":206424,"size":206357,"diff":-67}]},{"timestamp":1596437759972,"files":[{"filename":"contentful-webhook/index.js","previous":541950,"size":206424,"diff":-335526}]},{"timestamp":1596413723535,"files":[{"filename":"contentful-webhook/index.js","previous":542071,"size":541950,"diff":-121}]},{"timestamp":1596407623809,"files":[{"filename":"contentful-webhook/index.js","previous":191169,"size":542071,"diff":350902}]}]
+[{"timestamp":1620565790484,"files":[{"filename":"contentful-webhook/index.js","previous":231292,"size":231381,"diff":89}]},{"timestamp":1620265629961,"files":[{"filename":"contentful-webhook/index.js","previous":231381,"size":231292,"diff":-89}]},{"timestamp":1615923664936,"files":[{"filename":"contentful-webhook/index.js","previous":1050356,"size":231381,"diff":-818975}]},{"timestamp":1615923376037,"files":[{"filename":"contentful-webhook/index.js","previous":1047464,"size":1050356,"diff":2892}]},{"timestamp":1615809010241,"files":[{"filename":"contentful-webhook/index.js","previous":221347,"size":1047464,"diff":826117}]},{"timestamp":1615022511778,"files":[{"filename":"contentful-webhook/index.js","previous":221344,"size":221347,"diff":3}]},{"timestamp":1609970894222,"files":[{"filename":"contentful-webhook/index.js","previous":217379,"size":221344,"diff":3965}]},{"timestamp":1609970375754,"files":[{"filename":"contentful-webhook/index.js","previous":203415,"size":217379,"diff":13964}]},{"timestamp":1607360020128,"files":[{"filename":"contentful-webhook/index.js","previous":203418,"size":203415,"diff":-3}]},{"timestamp":1600966253111,"files":[{"filename":"contentful-webhook/index.js","previous":203440,"size":203418,"diff":-22}]},{"timestamp":1599246565057,"files":[{"filename":"contentful-webhook/index.js","previous":202993,"size":203440,"diff":447}]},{"timestamp":1598552652514,"files":[{"filename":"contentful-webhook/index.js","previous":202992,"size":202993,"diff":1}]},{"timestamp":1596542526761,"files":[{"filename":"contentful-webhook/index.js","previous":207102,"size":202992,"diff":-4110}]},{"timestamp":1596540034638,"files":[{"filename":"contentful-webhook/index.js","previous":206593,"size":207102,"diff":509}]},{"timestamp":1596457456615,"files":[{"filename":"contentful-webhook/index.js","previous":206357,"size":206593,"diff":236}]},{"timestamp":1596440551356,"files":[{"filename":"contentful-webhook/index.js","previous":206424,"size":206357,"diff":-67}]},{"timestamp":1596437759972,"files":[{"filename":"contentful-webhook/index.js","previous":541950,"size":206424,"diff":-335526}]},{"timestamp":1596413723535,"files":[{"filename":"contentful-webhook/index.js","previous":542071,"size":541950,"diff":-121}]},{"timestamp":1596407623809,"files":[{"filename":"contentful-webhook/index.js","previous":191169,"size":542071,"diff":350902}]}]
diff --git a/packages/cdk-github-webhook/lambda-file-sizes.json b/packages/cdk-github-webhook/lambda-file-sizes.json
index 852cf043c..a523a2465 100644
--- a/packages/cdk-github-webhook/lambda-file-sizes.json
+++ b/packages/cdk-github-webhook/lambda-file-sizes.json
@@ -1,1 +1,1 @@
-[{"timestamp":1620265643898,"files":[{"filename":"github-webhook/index.js","previous":191792,"size":191427,"diff":-365}]},{"timestamp":1619038419269,"files":[{"filename":"github-webhook/index.js","previous":191797,"size":191792,"diff":-5}]},{"timestamp":1617394285426,"files":[{"filename":"github-webhook/index.js","previous":191760,"size":191797,"diff":37}]},{"timestamp":1616697789259,"files":[{"filename":"github-webhook/index.js","previous":191665,"size":191760,"diff":95}]},{"timestamp":1615923673723,"files":[{"filename":"github-webhook/index.js","previous":1011459,"size":191665,"diff":-819794}]},{"timestamp":1615923385141,"files":[{"filename":"github-webhook/index.js","previous":1008634,"size":1011459,"diff":2825}]},{"timestamp":1615809019316,"files":[{"filename":"github-webhook/index.js","previous":182432,"size":1008634,"diff":826202}]},{"timestamp":1615023549795,"files":[{"filename":"github-webhook/index.js","previous":182255,"size":182432,"diff":177}]},{"timestamp":1615022566740,"files":[{"filename":"github-webhook/index.js","previous":181954,"size":182255,"diff":301}]},{"timestamp":1614104217580,"files":[{"filename":"github-webhook/index.js","previous":181904,"size":181954,"diff":50}]},{"timestamp":1612464188131,"files":[{"filename":"github-webhook/index.js","previous":181855,"size":181904,"diff":49}]},{"timestamp":1611769329263,"files":[{"filename":"github-webhook/index.js","previous":181805,"size":181855,"diff":50}]},{"timestamp":1609970949480,"files":[{"filename":"github-webhook/index.js","previous":179020,"size":181805,"diff":2785}]},{"timestamp":1607360063925,"files":[{"filename":"github-webhook/index.js","previous":178533,"size":179020,"diff":487}]},{"timestamp":1606896280100,"files":[{"filename":"github-webhook/index.js","previous":177980,"size":178533,"diff":553}]},{"timestamp":1606329557431,"files":[{"filename":"github-webhook/index.js","previous":177960,"size":177980,"diff":20}]},{"timestamp":1604259061964,"files":[{"filename":"github-webhook/index.js","previous":177961,"size":177960,"diff":-1}]},{"timestamp":1600966304194,"files":[{"filename":"github-webhook/index.js","previous":177682,"size":177961,"diff":279}]},{"timestamp":1599246609676,"files":[{"filename":"github-webhook/index.js","previous":177721,"size":177682,"diff":-39}]},{"timestamp":1596542584192,"files":[{"filename":"github-webhook/index.js","previous":181657,"size":177721,"diff":-3936}]},{"timestamp":1596540416823,"files":[{"filename":"github-webhook/index.js","previous":181110,"size":181657,"diff":547}]},{"timestamp":1596457756702,"files":[{"filename":"github-webhook/index.js","previous":181095,"size":181110,"diff":15}]},{"timestamp":1596455025723,"files":[{"filename":"github-webhook/index.js","previous":473647,"size":181095,"diff":-292552}]},{"timestamp":1596407673941,"files":[{"filename":"github-webhook/index.js","previous":332785,"size":473647,"diff":140862}]}]
+[{"timestamp":1620565795835,"files":[{"filename":"github-webhook/index.js","previous":191427,"size":191792,"diff":365}]},{"timestamp":1620265643898,"files":[{"filename":"github-webhook/index.js","previous":191792,"size":191427,"diff":-365}]},{"timestamp":1619038419269,"files":[{"filename":"github-webhook/index.js","previous":191797,"size":191792,"diff":-5}]},{"timestamp":1617394285426,"files":[{"filename":"github-webhook/index.js","previous":191760,"size":191797,"diff":37}]},{"timestamp":1616697789259,"files":[{"filename":"github-webhook/index.js","previous":191665,"size":191760,"diff":95}]},{"timestamp":1615923673723,"files":[{"filename":"github-webhook/index.js","previous":1011459,"size":191665,"diff":-819794}]},{"timestamp":1615923385141,"files":[{"filename":"github-webhook/index.js","previous":1008634,"size":1011459,"diff":2825}]},{"timestamp":1615809019316,"files":[{"filename":"github-webhook/index.js","previous":182432,"size":1008634,"diff":826202}]},{"timestamp":1615023549795,"files":[{"filename":"github-webhook/index.js","previous":182255,"size":182432,"diff":177}]},{"timestamp":1615022566740,"files":[{"filename":"github-webhook/index.js","previous":181954,"size":182255,"diff":301}]},{"timestamp":1614104217580,"files":[{"filename":"github-webhook/index.js","previous":181904,"size":181954,"diff":50}]},{"timestamp":1612464188131,"files":[{"filename":"github-webhook/index.js","previous":181855,"size":181904,"diff":49}]},{"timestamp":1611769329263,"files":[{"filename":"github-webhook/index.js","previous":181805,"size":181855,"diff":50}]},{"timestamp":1609970949480,"files":[{"filename":"github-webhook/index.js","previous":179020,"size":181805,"diff":2785}]},{"timestamp":1607360063925,"files":[{"filename":"github-webhook/index.js","previous":178533,"size":179020,"diff":487}]},{"timestamp":1606896280100,"files":[{"filename":"github-webhook/index.js","previous":177980,"size":178533,"diff":553}]},{"timestamp":1606329557431,"files":[{"filename":"github-webhook/index.js","previous":177960,"size":177980,"diff":20}]},{"timestamp":1604259061964,"files":[{"filename":"github-webhook/index.js","previous":177961,"size":177960,"diff":-1}]},{"timestamp":1600966304194,"files":[{"filename":"github-webhook/index.js","previous":177682,"size":177961,"diff":279}]},{"timestamp":1599246609676,"files":[{"filename":"github-webhook/index.js","previous":177721,"size":177682,"diff":-39}]},{"timestamp":1596542584192,"files":[{"filename":"github-webhook/index.js","previous":181657,"size":177721,"diff":-3936}]},{"timestamp":1596540416823,"files":[{"filename":"github-webhook/index.js","previous":181110,"size":181657,"diff":547}]},{"timestamp":1596457756702,"files":[{"filename":"github-webhook/index.js","previous":181095,"size":181110,"diff":15}]},{"timestamp":1596455025723,"files":[{"filename":"github-webhook/index.js","previous":473647,"size":181095,"diff":-292552}]},{"timestamp":1596407673941,"files":[{"filename":"github-webhook/index.js","previous":332785,"size":473647,"diff":140862}]}]
diff --git a/packages/cdk-pull-request-approval-rule/src/approval-rule-template-repositroy-association.ts b/packages/cdk-pull-request-approval-rule/src/approval-rule-template-repositroy-association.ts
index 73db5fe64..041da6c82 100644
--- a/packages/cdk-pull-request-approval-rule/src/approval-rule-template-repositroy-association.ts
+++ b/packages/cdk-pull-request-approval-rule/src/approval-rule-template-repositroy-association.ts
@@ -33,7 +33,7 @@ export class ApprovalRuleTemplateRepositoryAssociation extends Construct {
{
Effect: 'Allow',
Action: ['codecommit:AssociateApprovalRuleTemplateWithRepository', 'codecommit:DisassociateApprovalRuleTemplateFromRepository'],
- Resource: this.repository.repositoryArn,
+ Resource: '*',
},
],
});
| Bug: `ApprovalRuleTemplateRepositoryAssociation` creates only one `CustomResourceProviderRole`
```
"@aws-cdk/core": "1.102.0",
"@cloudcomponents/cdk-pull-request-approval-rule": "1.35.0",
```
Bug: `ApprovalRuleTemplateRepositoryAssociation` creates only one `CustomResourceProviderRole`. while it should create one separate for each instance.
**Reproduction steps:**
I created a construct called `CodeCommitPRApprovers` that creates the approval rule template and its association:
```
export class CodeCommitPRApprovers extends Construct {
constructor(scope: Construct, id: string, { repo }: CodeCommitPRApproversProps) {
super(scope, id);
// At the moment Esen will be required to approve all the pull requests.
const userARN = User.fromUserName(this, 'user', 'user').userArn;
const { approvalRuleTemplateName } = new ApprovalRuleTemplate(this, `${id}ApprovalRuleTemplate`, {
approvalRuleTemplateName: `master-branch-required-approvers-for-${id}`,
template: {
approvers: {
approvalPoolMembers: [userARN],
numberOfApprovalsNeeded: 1
},
branches: ['master']
}
});
new ApprovalRuleTemplateRepositoryAssociation(this, `${id}ApprovalRuleTemplateRepositoryAssociation`, {
approvalRuleTemplateName,
repository: repo,
});
}
}
```
Then in a stack I instantiate this construct twice, one for each repository as follows:
```
export class DevStack extends Stack {
constructor(scope: Construct, id: string, props?: StackProps) {
super(scope, id, props);
const infrastructureRepository = new Repository(this, 'InfrastructureRepository', {
repositoryName: 'infrastructure',
description: 'The CodeCommit repository for the infrastructure code.',
});
new CodeCommitPRApprovers(this, 'InfrastructurePRApprovers', {
repo: infrastructureRepository
});
const websiteRepo = new Repository(this, 'WebRepository', {
repositoryName: 'web',
description: "The CodeCommit repository for the Web application code.",
});
new CodeCommitPRApprovers(this, 'WebsitePRApprovers', {
repo: websiteRepo
});
```
The generated CloudFormation template includes only one `CustomResourceProviderRole`:
```
"CustomApprovalRuleTemplateRepositoryAssociationCustomResourceProviderRoleD1B94887": {
"Type": "AWS::IAM::Role",
"Properties": {
"AssumeRolePolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
}
}
]
},
"ManagedPolicyArns": [
{
"Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"
}
],
"Policies": [
{
"PolicyName": "Inline",
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"codecommit:AssociateApprovalRuleTemplateWithRepository",
"codecommit:DisassociateApprovalRuleTemplateFromRepository"
],
"Resource": {
"Fn::GetAtt": [
"WebRepository0EB245C6",
"Arn"
]
}
}
]
}
}
]
},
"Metadata": {
"aws:cdk:path": "DevStack/Custom::ApprovalRuleTemplateRepositoryAssociationCustomResourceProvider/Role"
}
},
```
The same `CustomResourceProviderRole` is not created for the infrastructure CodeCommit repository which means Lambda will have permissions to perform operations only to the specified Web repository.
Am I using this correctly?
| 2021-05-09T13:18:28 | 0.0 | [] | [] |
|||
cloudcomponents/cdk-constructs | cloudcomponents__cdk-constructs-99 | ffe815eab6d1989957dce9f7af5feb25ded8e1a9 | diff --git a/packages/cdk-contentful-webhook/lambda-file-sizes.json b/packages/cdk-contentful-webhook/lambda-file-sizes.json
index 36e0f090a..7bce275c3 100644
--- a/packages/cdk-contentful-webhook/lambda-file-sizes.json
+++ b/packages/cdk-contentful-webhook/lambda-file-sizes.json
@@ -1,1 +1,1 @@
-[{"timestamp":1615923664936,"files":[{"filename":"contentful-webhook/index.js","previous":1050356,"size":231381,"diff":-818975}]},{"timestamp":1615923376037,"files":[{"filename":"contentful-webhook/index.js","previous":1047464,"size":1050356,"diff":2892}]},{"timestamp":1615809010241,"files":[{"filename":"contentful-webhook/index.js","previous":221347,"size":1047464,"diff":826117}]},{"timestamp":1615022511778,"files":[{"filename":"contentful-webhook/index.js","previous":221344,"size":221347,"diff":3}]},{"timestamp":1609970894222,"files":[{"filename":"contentful-webhook/index.js","previous":217379,"size":221344,"diff":3965}]},{"timestamp":1609970375754,"files":[{"filename":"contentful-webhook/index.js","previous":203415,"size":217379,"diff":13964}]},{"timestamp":1607360020128,"files":[{"filename":"contentful-webhook/index.js","previous":203418,"size":203415,"diff":-3}]},{"timestamp":1600966253111,"files":[{"filename":"contentful-webhook/index.js","previous":203440,"size":203418,"diff":-22}]},{"timestamp":1599246565057,"files":[{"filename":"contentful-webhook/index.js","previous":202993,"size":203440,"diff":447}]},{"timestamp":1598552652514,"files":[{"filename":"contentful-webhook/index.js","previous":202992,"size":202993,"diff":1}]},{"timestamp":1596542526761,"files":[{"filename":"contentful-webhook/index.js","previous":207102,"size":202992,"diff":-4110}]},{"timestamp":1596540034638,"files":[{"filename":"contentful-webhook/index.js","previous":206593,"size":207102,"diff":509}]},{"timestamp":1596457456615,"files":[{"filename":"contentful-webhook/index.js","previous":206357,"size":206593,"diff":236}]},{"timestamp":1596440551356,"files":[{"filename":"contentful-webhook/index.js","previous":206424,"size":206357,"diff":-67}]},{"timestamp":1596437759972,"files":[{"filename":"contentful-webhook/index.js","previous":541950,"size":206424,"diff":-335526}]},{"timestamp":1596413723535,"files":[{"filename":"contentful-webhook/index.js","previous":542071,"size":541950,"diff":-121}]},{"timestamp":1596407623809,"files":[{"filename":"contentful-webhook/index.js","previous":191169,"size":542071,"diff":350902}]}]
+[{"timestamp":1620265629961,"files":[{"filename":"contentful-webhook/index.js","previous":231381,"size":231292,"diff":-89}]},{"timestamp":1615923664936,"files":[{"filename":"contentful-webhook/index.js","previous":1050356,"size":231381,"diff":-818975}]},{"timestamp":1615923376037,"files":[{"filename":"contentful-webhook/index.js","previous":1047464,"size":1050356,"diff":2892}]},{"timestamp":1615809010241,"files":[{"filename":"contentful-webhook/index.js","previous":221347,"size":1047464,"diff":826117}]},{"timestamp":1615022511778,"files":[{"filename":"contentful-webhook/index.js","previous":221344,"size":221347,"diff":3}]},{"timestamp":1609970894222,"files":[{"filename":"contentful-webhook/index.js","previous":217379,"size":221344,"diff":3965}]},{"timestamp":1609970375754,"files":[{"filename":"contentful-webhook/index.js","previous":203415,"size":217379,"diff":13964}]},{"timestamp":1607360020128,"files":[{"filename":"contentful-webhook/index.js","previous":203418,"size":203415,"diff":-3}]},{"timestamp":1600966253111,"files":[{"filename":"contentful-webhook/index.js","previous":203440,"size":203418,"diff":-22}]},{"timestamp":1599246565057,"files":[{"filename":"contentful-webhook/index.js","previous":202993,"size":203440,"diff":447}]},{"timestamp":1598552652514,"files":[{"filename":"contentful-webhook/index.js","previous":202992,"size":202993,"diff":1}]},{"timestamp":1596542526761,"files":[{"filename":"contentful-webhook/index.js","previous":207102,"size":202992,"diff":-4110}]},{"timestamp":1596540034638,"files":[{"filename":"contentful-webhook/index.js","previous":206593,"size":207102,"diff":509}]},{"timestamp":1596457456615,"files":[{"filename":"contentful-webhook/index.js","previous":206357,"size":206593,"diff":236}]},{"timestamp":1596440551356,"files":[{"filename":"contentful-webhook/index.js","previous":206424,"size":206357,"diff":-67}]},{"timestamp":1596437759972,"files":[{"filename":"contentful-webhook/index.js","previous":541950,"size":206424,"diff":-335526}]},{"timestamp":1596413723535,"files":[{"filename":"contentful-webhook/index.js","previous":542071,"size":541950,"diff":-121}]},{"timestamp":1596407623809,"files":[{"filename":"contentful-webhook/index.js","previous":191169,"size":542071,"diff":350902}]}]
diff --git a/packages/cdk-dynamodb-seeder/src/dynamodb-seeder.ts b/packages/cdk-dynamodb-seeder/src/dynamodb-seeder.ts
index 8603b7f2d..c0c196493 100644
--- a/packages/cdk-dynamodb-seeder/src/dynamodb-seeder.ts
+++ b/packages/cdk-dynamodb-seeder/src/dynamodb-seeder.ts
@@ -45,6 +45,16 @@ export class DynamoDBSeeder extends Construct {
}),
);
+ if (props.table.encryptionKey) {
+ handler.addToRolePolicy(
+ new PolicyStatement({
+ effect: Effect.ALLOW,
+ actions: ['kms:Encrypt', 'kms:Decrypt', 'kms:ReEncrypt*', 'kms:GenerateDataKey*', 'kms:DescribeKey', 'kms:CreateGrant'],
+ resources: [props.table.encryptionKey.keyArn],
+ }),
+ );
+ }
+
if (seedsBucket) {
const objectKey = seeds.s3Location?.objectKey ?? '*';
diff --git a/packages/cdk-github-webhook/lambda-file-sizes.json b/packages/cdk-github-webhook/lambda-file-sizes.json
index a4c00677e..852cf043c 100644
--- a/packages/cdk-github-webhook/lambda-file-sizes.json
+++ b/packages/cdk-github-webhook/lambda-file-sizes.json
@@ -1,1 +1,1 @@
-[{"timestamp":1619038419269,"files":[{"filename":"github-webhook/index.js","previous":191797,"size":191792,"diff":-5}]},{"timestamp":1617394285426,"files":[{"filename":"github-webhook/index.js","previous":191760,"size":191797,"diff":37}]},{"timestamp":1616697789259,"files":[{"filename":"github-webhook/index.js","previous":191665,"size":191760,"diff":95}]},{"timestamp":1615923673723,"files":[{"filename":"github-webhook/index.js","previous":1011459,"size":191665,"diff":-819794}]},{"timestamp":1615923385141,"files":[{"filename":"github-webhook/index.js","previous":1008634,"size":1011459,"diff":2825}]},{"timestamp":1615809019316,"files":[{"filename":"github-webhook/index.js","previous":182432,"size":1008634,"diff":826202}]},{"timestamp":1615023549795,"files":[{"filename":"github-webhook/index.js","previous":182255,"size":182432,"diff":177}]},{"timestamp":1615022566740,"files":[{"filename":"github-webhook/index.js","previous":181954,"size":182255,"diff":301}]},{"timestamp":1614104217580,"files":[{"filename":"github-webhook/index.js","previous":181904,"size":181954,"diff":50}]},{"timestamp":1612464188131,"files":[{"filename":"github-webhook/index.js","previous":181855,"size":181904,"diff":49}]},{"timestamp":1611769329263,"files":[{"filename":"github-webhook/index.js","previous":181805,"size":181855,"diff":50}]},{"timestamp":1609970949480,"files":[{"filename":"github-webhook/index.js","previous":179020,"size":181805,"diff":2785}]},{"timestamp":1607360063925,"files":[{"filename":"github-webhook/index.js","previous":178533,"size":179020,"diff":487}]},{"timestamp":1606896280100,"files":[{"filename":"github-webhook/index.js","previous":177980,"size":178533,"diff":553}]},{"timestamp":1606329557431,"files":[{"filename":"github-webhook/index.js","previous":177960,"size":177980,"diff":20}]},{"timestamp":1604259061964,"files":[{"filename":"github-webhook/index.js","previous":177961,"size":177960,"diff":-1}]},{"timestamp":1600966304194,"files":[{"filename":"github-webhook/index.js","previous":177682,"size":177961,"diff":279}]},{"timestamp":1599246609676,"files":[{"filename":"github-webhook/index.js","previous":177721,"size":177682,"diff":-39}]},{"timestamp":1596542584192,"files":[{"filename":"github-webhook/index.js","previous":181657,"size":177721,"diff":-3936}]},{"timestamp":1596540416823,"files":[{"filename":"github-webhook/index.js","previous":181110,"size":181657,"diff":547}]},{"timestamp":1596457756702,"files":[{"filename":"github-webhook/index.js","previous":181095,"size":181110,"diff":15}]},{"timestamp":1596455025723,"files":[{"filename":"github-webhook/index.js","previous":473647,"size":181095,"diff":-292552}]},{"timestamp":1596407673941,"files":[{"filename":"github-webhook/index.js","previous":332785,"size":473647,"diff":140862}]}]
+[{"timestamp":1620265643898,"files":[{"filename":"github-webhook/index.js","previous":191792,"size":191427,"diff":-365}]},{"timestamp":1619038419269,"files":[{"filename":"github-webhook/index.js","previous":191797,"size":191792,"diff":-5}]},{"timestamp":1617394285426,"files":[{"filename":"github-webhook/index.js","previous":191760,"size":191797,"diff":37}]},{"timestamp":1616697789259,"files":[{"filename":"github-webhook/index.js","previous":191665,"size":191760,"diff":95}]},{"timestamp":1615923673723,"files":[{"filename":"github-webhook/index.js","previous":1011459,"size":191665,"diff":-819794}]},{"timestamp":1615923385141,"files":[{"filename":"github-webhook/index.js","previous":1008634,"size":1011459,"diff":2825}]},{"timestamp":1615809019316,"files":[{"filename":"github-webhook/index.js","previous":182432,"size":1008634,"diff":826202}]},{"timestamp":1615023549795,"files":[{"filename":"github-webhook/index.js","previous":182255,"size":182432,"diff":177}]},{"timestamp":1615022566740,"files":[{"filename":"github-webhook/index.js","previous":181954,"size":182255,"diff":301}]},{"timestamp":1614104217580,"files":[{"filename":"github-webhook/index.js","previous":181904,"size":181954,"diff":50}]},{"timestamp":1612464188131,"files":[{"filename":"github-webhook/index.js","previous":181855,"size":181904,"diff":49}]},{"timestamp":1611769329263,"files":[{"filename":"github-webhook/index.js","previous":181805,"size":181855,"diff":50}]},{"timestamp":1609970949480,"files":[{"filename":"github-webhook/index.js","previous":179020,"size":181805,"diff":2785}]},{"timestamp":1607360063925,"files":[{"filename":"github-webhook/index.js","previous":178533,"size":179020,"diff":487}]},{"timestamp":1606896280100,"files":[{"filename":"github-webhook/index.js","previous":177980,"size":178533,"diff":553}]},{"timestamp":1606329557431,"files":[{"filename":"github-webhook/index.js","previous":177960,"size":177980,"diff":20}]},{"timestamp":1604259061964,"files":[{"filename":"github-webhook/index.js","previous":177961,"size":177960,"diff":-1}]},{"timestamp":1600966304194,"files":[{"filename":"github-webhook/index.js","previous":177682,"size":177961,"diff":279}]},{"timestamp":1599246609676,"files":[{"filename":"github-webhook/index.js","previous":177721,"size":177682,"diff":-39}]},{"timestamp":1596542584192,"files":[{"filename":"github-webhook/index.js","previous":181657,"size":177721,"diff":-3936}]},{"timestamp":1596540416823,"files":[{"filename":"github-webhook/index.js","previous":181110,"size":181657,"diff":547}]},{"timestamp":1596457756702,"files":[{"filename":"github-webhook/index.js","previous":181095,"size":181110,"diff":15}]},{"timestamp":1596455025723,"files":[{"filename":"github-webhook/index.js","previous":473647,"size":181095,"diff":-292552}]},{"timestamp":1596407673941,"files":[{"filename":"github-webhook/index.js","previous":332785,"size":473647,"diff":140862}]}]
| fix(dynamodb-seeder): Grant `encryptionKey` access
When a table has an `encryptionKey`, the `SingletonFunction` does not have the proper access.
The manual policy entry, should be replaced by `grantWriteData`, which gives the appropriate access to the KMS key if it exists.
```
props.table.grantWriteData(handler);
```
I can, and may put a PR in for this myself, but I ran into some test failures that I didn't have time to research at the moment, and wanted to get this documented in the very least.
| 2021-05-06T02:04:03 | 0.0 | [] | [] |
|||
cloudcomponents/cdk-constructs | cloudcomponents__cdk-constructs-96 | 2d538277c50cb43dc14c26fcc83289ae515d62a8 | diff --git a/README.md b/README.md
index bc8ae703d..e0d794de1 100644
--- a/README.md
+++ b/README.md
@@ -37,7 +37,7 @@ This repository is a monorepo managed with [Lerna](https://github.com/lerna/lern
| [cdk-temp-stack](/packages/cdk-temp-stack) | A stack that destroys itself after a given time (ttl) |  |
| [cdk-lambda-at-edge-pattern](/packages/cdk-lambda-at-edge-pattern) | CDK Constructs for Lambda@Edge pattern: HttpHeaders |  |
| [cdk-cloudfront-authorization](/packages/cdk-cloudfront-authorization) | CloudFront with Cognito authentication using Lambda@Edge |  |
-
+| [cdk-secret-key](/packages/cdk-secret-key) | Provide secret keys to lambdas |  |
## Contributing
We welcome community contributions and pull requests.
diff --git a/examples/contentful-webhook-example/package.json b/examples/contentful-webhook-example/package.json
index 58f8d2099..ce56e4d86 100644
--- a/examples/contentful-webhook-example/package.json
+++ b/examples/contentful-webhook-example/package.json
@@ -11,6 +11,7 @@
"dependencies": {
"@aws-cdk/aws-apigateway": "^1.93.0",
"@aws-cdk/core": "^1.93.0",
+ "@cloudcomponents/cdk-secret-key": "^1.0.0",
"@cloudcomponents/cdk-contentful-webhook": "^1.28.0",
"source-map-support": "^0.5.19"
},
diff --git a/examples/contentful-webhook-example/src/contentful-webhook-stack.ts b/examples/contentful-webhook-example/src/contentful-webhook-stack.ts
index 75259d3e1..f14b1317a 100644
--- a/examples/contentful-webhook-example/src/contentful-webhook-stack.ts
+++ b/examples/contentful-webhook-example/src/contentful-webhook-stack.ts
@@ -1,6 +1,7 @@
import { RestApi } from '@aws-cdk/aws-apigateway';
import { Construct, Stack, StackProps } from '@aws-cdk/core';
import { ContentfulWebhook } from '@cloudcomponents/cdk-contentful-webhook';
+import { SecretKey } from '@cloudcomponents/cdk-secret-key';
export class ContentfulWebhookStack extends Stack {
constructor(scope: Construct, id: string, props?: StackProps) {
@@ -9,7 +10,7 @@ export class ContentfulWebhookStack extends Stack {
const api = new RestApi(this, 'Endpoint');
api.root.addMethod('POST');
- const accessToken = process.env.ACCESS_TOKEN as string;
+ const accessToken = SecretKey.fromPlainText(process.env.ACCESS_TOKEN as string);
const spaceId = process.env.SPACE_ID as string;
diff --git a/examples/github-webhook-example/package.json b/examples/github-webhook-example/package.json
index 96e808765..3ae480a44 100644
--- a/examples/github-webhook-example/package.json
+++ b/examples/github-webhook-example/package.json
@@ -11,6 +11,7 @@
"dependencies": {
"@aws-cdk/aws-apigateway": "^1.93.0",
"@aws-cdk/core": "^1.93.0",
+ "@cloudcomponents/cdk-secret-key": "^1.0.0",
"@cloudcomponents/cdk-github-webhook": "^1.28.0",
"source-map-support": "^0.5.19"
},
diff --git a/examples/github-webhook-example/src/github-webhook-stack.ts b/examples/github-webhook-example/src/github-webhook-stack.ts
index 506f41b09..2b7b95877 100644
--- a/examples/github-webhook-example/src/github-webhook-stack.ts
+++ b/examples/github-webhook-example/src/github-webhook-stack.ts
@@ -1,6 +1,7 @@
import { RestApi } from '@aws-cdk/aws-apigateway';
import { Construct, Stack, StackProps } from '@aws-cdk/core';
import { GithubWebhook } from '@cloudcomponents/cdk-github-webhook';
+import { SecretKey } from '@cloudcomponents/cdk-secret-key';
export class GithubWebhookStack extends Stack {
constructor(scope: Construct, id: string, props?: StackProps) {
@@ -9,7 +10,7 @@ export class GithubWebhookStack extends Stack {
const api = new RestApi(this, 'github-webhook');
api.root.addMethod('POST');
- const githubApiToken = process.env.API_TOKEN as string;
+ const githubApiToken = SecretKey.fromPlainText(process.env.API_TOKEN as string);
// @example https://github.com/cloudcomponents/cdk-constructs
const githubRepoUrl = process.env.REPO_URL as string;
diff --git a/examples/stripe-webhook-example/package.json b/examples/stripe-webhook-example/package.json
index 77005ee4d..a39466b27 100644
--- a/examples/stripe-webhook-example/package.json
+++ b/examples/stripe-webhook-example/package.json
@@ -11,6 +11,7 @@
"dependencies": {
"@aws-cdk/aws-apigateway": "^1.93.0",
"@aws-cdk/core": "^1.93.0",
+ "@cloudcomponents/cdk-secret-key": "^1.0.0",
"@cloudcomponents/cdk-stripe-webhook": "^1.28.0",
"source-map-support": "^0.5.19"
},
diff --git a/examples/stripe-webhook-example/src/stripe-webhook-stack.ts b/examples/stripe-webhook-example/src/stripe-webhook-stack.ts
index cdf4234b4..f1e60a348 100644
--- a/examples/stripe-webhook-example/src/stripe-webhook-stack.ts
+++ b/examples/stripe-webhook-example/src/stripe-webhook-stack.ts
@@ -1,7 +1,7 @@
import { RestApi } from '@aws-cdk/aws-apigateway';
import { Construct, Stack, StackProps } from '@aws-cdk/core';
+import { SecretKey } from '@cloudcomponents/cdk-secret-key';
import { StripeWebhook } from '@cloudcomponents/cdk-stripe-webhook';
-
export class StripeWebhookStack extends Stack {
constructor(scope: Construct, id: string, props?: StackProps) {
super(scope, id, props);
@@ -9,7 +9,7 @@ export class StripeWebhookStack extends Stack {
const api = new RestApi(this, 'Endpoint');
api.root.addMethod('POST');
- const secretKey = process.env.SECRET_KEY as string;
+ const secretKey = SecretKey.fromPlainText(process.env.SECRET_KEY as string);
const events = ['charge.failed', 'charge.succeeded'];
diff --git a/packages/cdk-contentful-webhook/API.md b/packages/cdk-contentful-webhook/API.md
index 2a5a058e5..5f3b4b43f 100644
--- a/packages/cdk-contentful-webhook/API.md
+++ b/packages/cdk-contentful-webhook/API.md
@@ -34,7 +34,7 @@ new ContentfulWebhook(scope: Construct, id: string, props: ContentfulWebhookProp
* **scope** (<code>[Construct](#aws-cdk-core-construct)</code>) *No description*
* **id** (<code>string</code>) *No description*
* **props** (<code>[ContentfulWebhookProps](#cloudcomponents-cdk-contentful-webhook-contentfulwebhookprops)</code>) *No description*
- * **accessToken** (<code>string</code>) *No description*
+ * **accessToken** (<code>string | [SecretKey](#cloudcomponents-cdk-secret-key-secretkey)</code>) *No description*
* **name** (<code>string</code>) *No description*
* **spaceId** (<code>string</code>) *No description*
* **topics** (<code>Array<string></code>) *No description*
@@ -53,7 +53,7 @@ new ContentfulWebhook(scope: Construct, id: string, props: ContentfulWebhookProp
Name | Type | Description
-----|------|-------------
-**accessToken** | <code>string</code> | <span></span>
+**accessToken** | <code>string | [SecretKey](#cloudcomponents-cdk-secret-key-secretkey)</code> | <span></span>
**name** | <code>string</code> | <span></span>
**spaceId** | <code>string</code> | <span></span>
**topics** | <code>Array<string></code> | <span></span>
diff --git a/packages/cdk-contentful-webhook/README.md b/packages/cdk-contentful-webhook/README.md
index 60cca6e98..91ef5c7b6 100644
--- a/packages/cdk-contentful-webhook/README.md
+++ b/packages/cdk-contentful-webhook/README.md
@@ -24,9 +24,10 @@ pip install cloudcomponents.cdk-contentful-webhook
## How to use
```typescript
-import { Construct, Stack, StackProps } from '@aws-cdk/core';
import { RestApi } from '@aws-cdk/aws-apigateway';
+import { Construct, Stack, StackProps } from '@aws-cdk/core';
import { ContentfulWebhook } from '@cloudcomponents/cdk-contentful-webhook';
+import { SecretKey } from '@cloudcomponents/cdk-secret-key';
export class ContentfulWebhookStack extends Stack {
constructor(scope: Construct, id: string, props?: StackProps) {
@@ -35,7 +36,7 @@ export class ContentfulWebhookStack extends Stack {
const api = new RestApi(this, 'Endpoint');
api.root.addMethod('POST');
- const accessToken = process.env.ACCESS_TOKEN as string;
+ const accessToken = SecretKey.fromPlainText(process.env.ACCESS_TOKEN as string);
const spaceId = process.env.SPACE_ID as string;
diff --git a/packages/cdk-contentful-webhook/lambda-file-sizes.json b/packages/cdk-contentful-webhook/lambda-file-sizes.json
index 280660b72..faa4fe28a 100644
--- a/packages/cdk-contentful-webhook/lambda-file-sizes.json
+++ b/packages/cdk-contentful-webhook/lambda-file-sizes.json
@@ -1,1 +1,1 @@
-[{"timestamp":1615022511778,"files":[{"filename":"contentful-webhook/index.js","previous":221344,"size":221347,"diff":3}]},{"timestamp":1609970894222,"files":[{"filename":"contentful-webhook/index.js","previous":217379,"size":221344,"diff":3965}]},{"timestamp":1609970375754,"files":[{"filename":"contentful-webhook/index.js","previous":203415,"size":217379,"diff":13964}]},{"timestamp":1607360020128,"files":[{"filename":"contentful-webhook/index.js","previous":203418,"size":203415,"diff":-3}]},{"timestamp":1600966253111,"files":[{"filename":"contentful-webhook/index.js","previous":203440,"size":203418,"diff":-22}]},{"timestamp":1599246565057,"files":[{"filename":"contentful-webhook/index.js","previous":202993,"size":203440,"diff":447}]},{"timestamp":1598552652514,"files":[{"filename":"contentful-webhook/index.js","previous":202992,"size":202993,"diff":1}]},{"timestamp":1596542526761,"files":[{"filename":"contentful-webhook/index.js","previous":207102,"size":202992,"diff":-4110}]},{"timestamp":1596540034638,"files":[{"filename":"contentful-webhook/index.js","previous":206593,"size":207102,"diff":509}]},{"timestamp":1596457456615,"files":[{"filename":"contentful-webhook/index.js","previous":206357,"size":206593,"diff":236}]},{"timestamp":1596440551356,"files":[{"filename":"contentful-webhook/index.js","previous":206424,"size":206357,"diff":-67}]},{"timestamp":1596437759972,"files":[{"filename":"contentful-webhook/index.js","previous":541950,"size":206424,"diff":-335526}]},{"timestamp":1596413723535,"files":[{"filename":"contentful-webhook/index.js","previous":542071,"size":541950,"diff":-121}]},{"timestamp":1596407623809,"files":[{"filename":"contentful-webhook/index.js","previous":191169,"size":542071,"diff":350902}]}]
+[{"timestamp":1615809010241,"files":[{"filename":"contentful-webhook/index.js","previous":221347,"size":1047464,"diff":826117}]},{"timestamp":1615022511778,"files":[{"filename":"contentful-webhook/index.js","previous":221344,"size":221347,"diff":3}]},{"timestamp":1609970894222,"files":[{"filename":"contentful-webhook/index.js","previous":217379,"size":221344,"diff":3965}]},{"timestamp":1609970375754,"files":[{"filename":"contentful-webhook/index.js","previous":203415,"size":217379,"diff":13964}]},{"timestamp":1607360020128,"files":[{"filename":"contentful-webhook/index.js","previous":203418,"size":203415,"diff":-3}]},{"timestamp":1600966253111,"files":[{"filename":"contentful-webhook/index.js","previous":203440,"size":203418,"diff":-22}]},{"timestamp":1599246565057,"files":[{"filename":"contentful-webhook/index.js","previous":202993,"size":203440,"diff":447}]},{"timestamp":1598552652514,"files":[{"filename":"contentful-webhook/index.js","previous":202992,"size":202993,"diff":1}]},{"timestamp":1596542526761,"files":[{"filename":"contentful-webhook/index.js","previous":207102,"size":202992,"diff":-4110}]},{"timestamp":1596540034638,"files":[{"filename":"contentful-webhook/index.js","previous":206593,"size":207102,"diff":509}]},{"timestamp":1596457456615,"files":[{"filename":"contentful-webhook/index.js","previous":206357,"size":206593,"diff":236}]},{"timestamp":1596440551356,"files":[{"filename":"contentful-webhook/index.js","previous":206424,"size":206357,"diff":-67}]},{"timestamp":1596437759972,"files":[{"filename":"contentful-webhook/index.js","previous":541950,"size":206424,"diff":-335526}]},{"timestamp":1596413723535,"files":[{"filename":"contentful-webhook/index.js","previous":542071,"size":541950,"diff":-121}]},{"timestamp":1596407623809,"files":[{"filename":"contentful-webhook/index.js","previous":191169,"size":542071,"diff":350902}]}]
diff --git a/packages/cdk-contentful-webhook/package.json b/packages/cdk-contentful-webhook/package.json
index cf6bca562..db499d3d9 100644
--- a/packages/cdk-contentful-webhook/package.json
+++ b/packages/cdk-contentful-webhook/package.json
@@ -60,14 +60,17 @@
"peerDependencies": {
"@aws-cdk/aws-lambda": "^1.93.0",
"@aws-cdk/core": "^1.93.0",
+ "@cloudcomponents/cdk-secret-key": "^1.0.0",
"constructs": "^3.2.0"
},
"dependencies": {
"@aws-cdk/aws-lambda": "^1.93.0",
- "@aws-cdk/core": "^1.93.0"
+ "@aws-cdk/core": "^1.93.0",
+ "@cloudcomponents/cdk-secret-key": "^1.0.0"
},
"devDependencies": {
"@aws-cdk/assert": "^1.93.0",
+ "@cloudcomponents/lambda-utils": "^0.1.1",
"contentful-management": "^6.1.1",
"custom-resource-helper": "^1.0.15",
"jest-cdk-snapshot": "^1.4.1"
diff --git a/packages/cdk-contentful-webhook/src/contentful-webhook.ts b/packages/cdk-contentful-webhook/src/contentful-webhook.ts
index afc22d252..8b0ac6cf2 100644
--- a/packages/cdk-contentful-webhook/src/contentful-webhook.ts
+++ b/packages/cdk-contentful-webhook/src/contentful-webhook.ts
@@ -1,9 +1,10 @@
import * as path from 'path';
import { SingletonFunction, Runtime, Code } from '@aws-cdk/aws-lambda';
import { Construct, Duration, CustomResource } from '@aws-cdk/core';
+import { SecretKey } from '@cloudcomponents/cdk-secret-key';
export interface ContentfulWebhookProps {
- readonly accessToken: string;
+ readonly accessToken: string | SecretKey;
readonly spaceId: string;
readonly name: string;
readonly url: string;
@@ -15,6 +16,8 @@ export class ContentfulWebhook extends Construct {
public constructor(scope: Construct, id: string, props: ContentfulWebhookProps) {
super(scope, id);
+ const accessToken = typeof props.accessToken === 'string' ? SecretKey.fromPlainText(props.accessToken) : props.accessToken;
+
const handler = new SingletonFunction(this, 'CustomResourceHandler', {
uuid: '91f2075f-b950-4743-a66b-ee0f6febf50d',
runtime: Runtime.NODEJS_12_X,
@@ -24,12 +27,21 @@ export class ContentfulWebhook extends Construct {
timeout: Duration.minutes(15),
});
+ if (accessToken.grantRead) {
+ accessToken.grantRead(handler);
+ }
+
new CustomResource(this, 'CustomResource', {
serviceToken: handler.functionArn,
resourceType: 'Custom::ContentfulWebhook',
pascalCaseProperties: true,
properties: {
- ...props,
+ accessTokenString: accessToken.serialize(),
+ spaceId: props.spaceId,
+ name: props.name,
+ url: props.url,
+ topics: props.topics,
+ logLevel: props.logLevel,
},
});
}
diff --git a/packages/cdk-contentful-webhook/src/lambdas/contentful-webhook/index.ts b/packages/cdk-contentful-webhook/src/lambdas/contentful-webhook/index.ts
index 0029eb78d..61894aa79 100644
--- a/packages/cdk-contentful-webhook/src/lambdas/contentful-webhook/index.ts
+++ b/packages/cdk-contentful-webhook/src/lambdas/contentful-webhook/index.ts
@@ -1,3 +1,4 @@
+import { SecretKey } from '@cloudcomponents/lambda-utils';
import type { CloudFormationCustomResourceEventCommon } from 'aws-lambda';
import * as contentful from 'contentful-management';
@@ -11,8 +12,10 @@ import {
camelizeKeys,
} from 'custom-resource-helper';
+const secretKey = new SecretKey();
+
export interface WebhookProps {
- accessToken: string;
+ accessTokenString: string;
spaceId: string;
name: string;
url: string;
@@ -28,10 +31,12 @@ const getSpace = async (accessToken: string, spaceId: string) => {
};
const handleCreate: OnCreateHandler = async (event): Promise<ResourceHandlerReturn> => {
- const { accessToken, spaceId, ...props } = camelizeKeys<WebhookProps, CloudFormationCustomResourceEventCommon['ResourceProperties']>(
+ const { accessTokenString, spaceId, ...props } = camelizeKeys<WebhookProps, CloudFormationCustomResourceEventCommon['ResourceProperties']>(
event.ResourceProperties,
);
+ const accessToken = await secretKey.getValue(accessTokenString);
+
const space = await getSpace(accessToken, spaceId);
const res = await space.createWebhook({
@@ -47,10 +52,12 @@ const handleCreate: OnCreateHandler = async (event): Promise<ResourceHandlerRetu
};
const handleUpdate: OnUpdateHandler = async (event): Promise<ResourceHandlerReturn> => {
- const { accessToken, spaceId, ...props } = camelizeKeys<WebhookProps, CloudFormationCustomResourceEventCommon['ResourceProperties']>(
+ const { accessTokenString, spaceId, ...props } = camelizeKeys<WebhookProps, CloudFormationCustomResourceEventCommon['ResourceProperties']>(
event.ResourceProperties,
);
+ const accessToken = await secretKey.getValue(accessTokenString);
+
const webhookId = event.PhysicalResourceId;
const space = await getSpace(accessToken, spaceId);
@@ -72,10 +79,12 @@ const handleUpdate: OnUpdateHandler = async (event): Promise<ResourceHandlerRetu
};
const handleDelete: OnDeleteHandler = async (event): Promise<void> => {
- const { accessToken, spaceId } = camelizeKeys<WebhookProps, CloudFormationCustomResourceEventCommon['ResourceProperties']>(
+ const { accessTokenString, spaceId } = camelizeKeys<WebhookProps, CloudFormationCustomResourceEventCommon['ResourceProperties']>(
event.ResourceProperties,
);
+ const accessToken = await secretKey.getValue(accessTokenString);
+
const webhookId = event.PhysicalResourceId;
const space = await getSpace(accessToken, spaceId);
diff --git a/packages/cdk-github-webhook/API.md b/packages/cdk-github-webhook/API.md
index 53c6fe565..d33ca4237 100644
--- a/packages/cdk-github-webhook/API.md
+++ b/packages/cdk-github-webhook/API.md
@@ -35,7 +35,7 @@ new GithubWebhook(scope: Construct, id: string, props: GithubWebhookProps)
* **id** (<code>string</code>) *No description*
* **props** (<code>[GithubWebhookProps](#cloudcomponents-cdk-github-webhook-githubwebhookprops)</code>) *No description*
* **events** (<code>Array<string></code>) Determines what events the hook is triggered for.
- * **githubApiToken** (<code>string</code>) The OAuth access token.
+ * **githubApiToken** (<code>string | [SecretKey](#cloudcomponents-cdk-secret-key-secretkey)</code>) The OAuth access token.
* **githubRepoUrl** (<code>string</code>) The Github repo url.
* **payloadUrl** (<code>string</code>) The URL to which the payloads will be delivered.
* **logLevel** (<code>string</code>) *No description* __*Optional*__
@@ -53,7 +53,7 @@ new GithubWebhook(scope: Construct, id: string, props: GithubWebhookProps)
Name | Type | Description
-----|------|-------------
**events** | <code>Array<string></code> | Determines what events the hook is triggered for.
-**githubApiToken** | <code>string</code> | The OAuth access token.
+**githubApiToken** | <code>string | [SecretKey](#cloudcomponents-cdk-secret-key-secretkey)</code> | The OAuth access token.
**githubRepoUrl** | <code>string</code> | The Github repo url.
**payloadUrl** | <code>string</code> | The URL to which the payloads will be delivered.
**logLevel**? | <code>string</code> | __*Optional*__
diff --git a/packages/cdk-github-webhook/README.md b/packages/cdk-github-webhook/README.md
index cb10972a6..10eb4438c 100644
--- a/packages/cdk-github-webhook/README.md
+++ b/packages/cdk-github-webhook/README.md
@@ -26,9 +26,10 @@ pip install cloudcomponents.cdk-github-webhook
## How to use
```typescript
-import { Construct, Stack, StackProps } from '@aws-cdk/core';
import { RestApi } from '@aws-cdk/aws-apigateway';
+import { Construct, Stack, StackProps } from '@aws-cdk/core';
import { GithubWebhook } from '@cloudcomponents/cdk-github-webhook';
+import { SecretKey } from '@cloudcomponents/cdk-secret-key';
export class GithubWebhookStack extends Stack {
constructor(scope: Construct, id: string, props?: StackProps) {
@@ -37,7 +38,7 @@ export class GithubWebhookStack extends Stack {
const api = new RestApi(this, 'github-webhook');
api.root.addMethod('POST');
- const githubApiToken = process.env.API_TOKEN as string;
+ const githubApiToken = SecretKey.fromPlainText(process.env.API_TOKEN as string);
// @example https://github.com/cloudcomponents/cdk-constructs
const githubRepoUrl = process.env.REPO_URL as string;
diff --git a/packages/cdk-github-webhook/lambda-file-sizes.json b/packages/cdk-github-webhook/lambda-file-sizes.json
index e4fe64180..5331eae01 100644
--- a/packages/cdk-github-webhook/lambda-file-sizes.json
+++ b/packages/cdk-github-webhook/lambda-file-sizes.json
@@ -1,1 +1,1 @@
-[{"timestamp":1615023549795,"files":[{"filename":"github-webhook/index.js","previous":182255,"size":182432,"diff":177}]},{"timestamp":1615022566740,"files":[{"filename":"github-webhook/index.js","previous":181954,"size":182255,"diff":301}]},{"timestamp":1614104217580,"files":[{"filename":"github-webhook/index.js","previous":181904,"size":181954,"diff":50}]},{"timestamp":1612464188131,"files":[{"filename":"github-webhook/index.js","previous":181855,"size":181904,"diff":49}]},{"timestamp":1611769329263,"files":[{"filename":"github-webhook/index.js","previous":181805,"size":181855,"diff":50}]},{"timestamp":1609970949480,"files":[{"filename":"github-webhook/index.js","previous":179020,"size":181805,"diff":2785}]},{"timestamp":1607360063925,"files":[{"filename":"github-webhook/index.js","previous":178533,"size":179020,"diff":487}]},{"timestamp":1606896280100,"files":[{"filename":"github-webhook/index.js","previous":177980,"size":178533,"diff":553}]},{"timestamp":1606329557431,"files":[{"filename":"github-webhook/index.js","previous":177960,"size":177980,"diff":20}]},{"timestamp":1604259061964,"files":[{"filename":"github-webhook/index.js","previous":177961,"size":177960,"diff":-1}]},{"timestamp":1600966304194,"files":[{"filename":"github-webhook/index.js","previous":177682,"size":177961,"diff":279}]},{"timestamp":1599246609676,"files":[{"filename":"github-webhook/index.js","previous":177721,"size":177682,"diff":-39}]},{"timestamp":1596542584192,"files":[{"filename":"github-webhook/index.js","previous":181657,"size":177721,"diff":-3936}]},{"timestamp":1596540416823,"files":[{"filename":"github-webhook/index.js","previous":181110,"size":181657,"diff":547}]},{"timestamp":1596457756702,"files":[{"filename":"github-webhook/index.js","previous":181095,"size":181110,"diff":15}]},{"timestamp":1596455025723,"files":[{"filename":"github-webhook/index.js","previous":473647,"size":181095,"diff":-292552}]},{"timestamp":1596407673941,"files":[{"filename":"github-webhook/index.js","previous":332785,"size":473647,"diff":140862}]}]
+[{"timestamp":1615809019316,"files":[{"filename":"github-webhook/index.js","previous":182432,"size":1008634,"diff":826202}]},{"timestamp":1615023549795,"files":[{"filename":"github-webhook/index.js","previous":182255,"size":182432,"diff":177}]},{"timestamp":1615022566740,"files":[{"filename":"github-webhook/index.js","previous":181954,"size":182255,"diff":301}]},{"timestamp":1614104217580,"files":[{"filename":"github-webhook/index.js","previous":181904,"size":181954,"diff":50}]},{"timestamp":1612464188131,"files":[{"filename":"github-webhook/index.js","previous":181855,"size":181904,"diff":49}]},{"timestamp":1611769329263,"files":[{"filename":"github-webhook/index.js","previous":181805,"size":181855,"diff":50}]},{"timestamp":1609970949480,"files":[{"filename":"github-webhook/index.js","previous":179020,"size":181805,"diff":2785}]},{"timestamp":1607360063925,"files":[{"filename":"github-webhook/index.js","previous":178533,"size":179020,"diff":487}]},{"timestamp":1606896280100,"files":[{"filename":"github-webhook/index.js","previous":177980,"size":178533,"diff":553}]},{"timestamp":1606329557431,"files":[{"filename":"github-webhook/index.js","previous":177960,"size":177980,"diff":20}]},{"timestamp":1604259061964,"files":[{"filename":"github-webhook/index.js","previous":177961,"size":177960,"diff":-1}]},{"timestamp":1600966304194,"files":[{"filename":"github-webhook/index.js","previous":177682,"size":177961,"diff":279}]},{"timestamp":1599246609676,"files":[{"filename":"github-webhook/index.js","previous":177721,"size":177682,"diff":-39}]},{"timestamp":1596542584192,"files":[{"filename":"github-webhook/index.js","previous":181657,"size":177721,"diff":-3936}]},{"timestamp":1596540416823,"files":[{"filename":"github-webhook/index.js","previous":181110,"size":181657,"diff":547}]},{"timestamp":1596457756702,"files":[{"filename":"github-webhook/index.js","previous":181095,"size":181110,"diff":15}]},{"timestamp":1596455025723,"files":[{"filename":"github-webhook/index.js","previous":473647,"size":181095,"diff":-292552}]},{"timestamp":1596407673941,"files":[{"filename":"github-webhook/index.js","previous":332785,"size":473647,"diff":140862}]}]
diff --git a/packages/cdk-github-webhook/package.json b/packages/cdk-github-webhook/package.json
index 72f666bf8..9430c5f0d 100644
--- a/packages/cdk-github-webhook/package.json
+++ b/packages/cdk-github-webhook/package.json
@@ -60,14 +60,17 @@
"peerDependencies": {
"@aws-cdk/aws-lambda": "^1.93.0",
"@aws-cdk/core": "^1.93.0",
+ "@cloudcomponents/cdk-secret-key": "^1.0.0",
"constructs": "^3.2.0"
},
"dependencies": {
"@aws-cdk/aws-lambda": "^1.93.0",
- "@aws-cdk/core": "^1.93.0"
+ "@aws-cdk/core": "^1.93.0",
+ "@cloudcomponents/cdk-secret-key": "^1.0.0"
},
"devDependencies": {
"@aws-cdk/assert": "^1.93.0",
+ "@cloudcomponents/lambda-utils": "^0.1.1",
"@octokit/rest": "^18.3.5",
"@types/parse-github-url": "^1.0.0",
"axios": "^0.21.1",
diff --git a/packages/cdk-github-webhook/src/github-webhook.ts b/packages/cdk-github-webhook/src/github-webhook.ts
index 20119f478..f59c9ab31 100644
--- a/packages/cdk-github-webhook/src/github-webhook.ts
+++ b/packages/cdk-github-webhook/src/github-webhook.ts
@@ -1,12 +1,13 @@
import * as path from 'path';
import { SingletonFunction, Runtime, Code } from '@aws-cdk/aws-lambda';
import { Construct, Duration, CustomResource } from '@aws-cdk/core';
+import { SecretKey } from '@cloudcomponents/cdk-secret-key';
export interface GithubWebhookProps {
/**
* The OAuth access token
*/
- readonly githubApiToken: string;
+ readonly githubApiToken: string | SecretKey;
/**
* The Github repo url
@@ -31,6 +32,8 @@ export class GithubWebhook extends Construct {
constructor(scope: Construct, id: string, props: GithubWebhookProps) {
super(scope, id);
+ const githubApiToken = typeof props.githubApiToken === 'string' ? SecretKey.fromPlainText(props.githubApiToken) : props.githubApiToken;
+
const handler = new SingletonFunction(this, 'CustomResourceHandler', {
uuid: '83CBF3EB-7B62-44F2-8C67-8441E4C1232E',
runtime: Runtime.NODEJS_12_X,
@@ -40,17 +43,20 @@ export class GithubWebhook extends Construct {
timeout: Duration.minutes(15),
});
- const { githubApiToken, githubRepoUrl, payloadUrl, events, logLevel } = props;
+ if (githubApiToken.grantRead) {
+ githubApiToken.grantRead(handler);
+ }
new CustomResource(this, 'CustomResource', {
serviceToken: handler.functionArn,
resourceType: 'Custom::GithubWebhook',
+ pascalCaseProperties: true,
properties: {
- GithubApiToken: githubApiToken,
- GithubRepoUrl: githubRepoUrl,
- PayloadUrl: payloadUrl,
- Events: events,
- LogLevel: logLevel,
+ githubApiTokenString: githubApiToken.serialize(),
+ githubRepoUrl: props.githubRepoUrl,
+ payloadUrl: props.payloadUrl,
+ events: props.events,
+ logLevel: props.logLevel,
},
});
}
diff --git a/packages/cdk-github-webhook/src/lambdas/github-webhook/index.ts b/packages/cdk-github-webhook/src/lambdas/github-webhook/index.ts
index 609d79f97..257238a3d 100644
--- a/packages/cdk-github-webhook/src/lambdas/github-webhook/index.ts
+++ b/packages/cdk-github-webhook/src/lambdas/github-webhook/index.ts
@@ -1,3 +1,4 @@
+import { SecretKey } from '@cloudcomponents/lambda-utils';
import type { CloudFormationCustomResourceEventCommon } from 'aws-lambda';
import {
customResourceHelper,
@@ -11,19 +12,23 @@ import {
import { createWebhook, updateWebhook, deleteWebhook } from './webhook-api';
+const secretKey = new SecretKey();
+
export interface WebhookProps {
- githubApiToken: string;
+ githubApiTokenString: string;
githubRepoUrl: string;
payloadUrl: string;
events: string[];
}
const handleCreate: OnCreateHandler = async (event): Promise<ResourceHandlerReturn> => {
- const { githubApiToken, githubRepoUrl, payloadUrl, events } = camelizeKeys<
+ const { githubApiTokenString, githubRepoUrl, payloadUrl, events } = camelizeKeys<
WebhookProps,
CloudFormationCustomResourceEventCommon['ResourceProperties']
>(event.ResourceProperties);
+ const githubApiToken = await secretKey.getValue(githubApiTokenString);
+
const { data } = await createWebhook(githubApiToken, githubRepoUrl, payloadUrl, events);
const physicalResourceId = data.id.toString();
@@ -37,11 +42,13 @@ const handleCreate: OnCreateHandler = async (event): Promise<ResourceHandlerRetu
};
const handleUpdate: OnUpdateHandler = async (event): Promise<ResourceHandlerReturn> => {
- const { githubApiToken, githubRepoUrl, payloadUrl, events } = camelizeKeys<
+ const { githubApiTokenString, githubRepoUrl, payloadUrl, events } = camelizeKeys<
WebhookProps,
CloudFormationCustomResourceEventCommon['ResourceProperties']
>(event.ResourceProperties);
+ const githubApiToken = await secretKey.getValue(githubApiTokenString);
+
const hookId = event.PhysicalResourceId;
const { data } = await updateWebhook(githubApiToken, githubRepoUrl, payloadUrl, events, parseInt(hookId, 10));
@@ -57,10 +64,12 @@ const handleUpdate: OnUpdateHandler = async (event): Promise<ResourceHandlerRetu
};
const handleDelete: OnDeleteHandler = async (event): Promise<void> => {
- const { githubApiToken, githubRepoUrl } = camelizeKeys<WebhookProps, CloudFormationCustomResourceEventCommon['ResourceProperties']>(
+ const { githubApiTokenString, githubRepoUrl } = camelizeKeys<WebhookProps, CloudFormationCustomResourceEventCommon['ResourceProperties']>(
event.ResourceProperties,
);
+ const githubApiToken = await secretKey.getValue(githubApiTokenString);
+
const hookId = event.PhysicalResourceId;
await deleteWebhook(githubApiToken, githubRepoUrl, parseInt(hookId, 10));
diff --git a/packages/cdk-pull-request-check/API.md b/packages/cdk-pull-request-check/API.md
index 4ee426b18..c740b6bf4 100644
--- a/packages/cdk-pull-request-check/API.md
+++ b/packages/cdk-pull-request-check/API.md
@@ -36,13 +36,17 @@ new PullRequestCheck(scope: Construct, id: string, props: PullRequestCheckProps)
* **props** (<code>[PullRequestCheckProps](#cloudcomponents-cdk-pull-request-check-pullrequestcheckprops)</code>) *No description*
* **buildSpec** (<code>[BuildSpec](#aws-cdk-aws-codebuild-buildspec)</code>) Filename or contents of buildspec in JSON format.
* **repository** (<code>[IRepository](#aws-cdk-aws-codecommit-irepository)</code>) The CodeCommit repository.
+ * **allowAllOutbound** (<code>boolean</code>) Whether to allow the CodeBuild to send all network traffic. __*Default*__: true
* **buildImage** (<code>[IBuildImage](#aws-cdk-aws-codebuild-ibuildimage)</code>) Build environment to use for the build. __*Default*__: BuildEnvironment.LinuxBuildImage.STANDARD_2_0
* **computeType** (<code>[ComputeType](#aws-cdk-aws-codebuild-computetype)</code>) The type of compute to use for this build. __*Default*__: taken from {@link #buildImage#defaultComputeType}
* **postComment** (<code>boolean</code>) Specifies whether comments should be written in the request. __*Default*__: true
* **privileged** (<code>boolean</code>) Indicates how the project builds Docker images. __*Default*__: false
* **projectName** (<code>string</code>) The human-visible name of this PullRequest-Project. __*Optional*__
* **role** (<code>[IRole](#aws-cdk-aws-iam-irole)</code>) The IAM service Role of the Project. __*Optional*__
+ * **securityGroups** (<code>Array<[ISecurityGroup](#aws-cdk-aws-ec2-isecuritygroup)></code>) What security group to associate with the codebuild project's network interfaces. __*Default*__: Security group will be automatically created
+ * **subnetSelection** (<code>[SubnetSelection](#aws-cdk-aws-ec2-subnetselection)</code>) Where to place the network interfaces within the VPC. __*Default*__: All private subnets
* **updateApprovalState** (<code>boolean</code>) Indicates whether the approval state [APPROVE, REVOKE] should be updated. __*Default*__: true
+ * **vpc** (<code>[IVpc](#aws-cdk-aws-ec2-ivpc)</code>) VPC network to place codebuild network interfaces. __*Default*__: No VPC is specified
### Methods
@@ -128,13 +132,17 @@ Name | Type | Description
-----|------|-------------
**buildSpec** | <code>[BuildSpec](#aws-cdk-aws-codebuild-buildspec)</code> | Filename or contents of buildspec in JSON format.
**repository** | <code>[IRepository](#aws-cdk-aws-codecommit-irepository)</code> | The CodeCommit repository.
+**allowAllOutbound**? | <code>boolean</code> | Whether to allow the CodeBuild to send all network traffic.<br/>__*Default*__: true
**buildImage**? | <code>[IBuildImage](#aws-cdk-aws-codebuild-ibuildimage)</code> | Build environment to use for the build.<br/>__*Default*__: BuildEnvironment.LinuxBuildImage.STANDARD_2_0
**computeType**? | <code>[ComputeType](#aws-cdk-aws-codebuild-computetype)</code> | The type of compute to use for this build.<br/>__*Default*__: taken from {@link #buildImage#defaultComputeType}
**postComment**? | <code>boolean</code> | Specifies whether comments should be written in the request.<br/>__*Default*__: true
**privileged**? | <code>boolean</code> | Indicates how the project builds Docker images.<br/>__*Default*__: false
**projectName**? | <code>string</code> | The human-visible name of this PullRequest-Project.<br/>__*Optional*__
**role**? | <code>[IRole](#aws-cdk-aws-iam-irole)</code> | The IAM service Role of the Project.<br/>__*Optional*__
+**securityGroups**? | <code>Array<[ISecurityGroup](#aws-cdk-aws-ec2-isecuritygroup)></code> | What security group to associate with the codebuild project's network interfaces.<br/>__*Default*__: Security group will be automatically created
+**subnetSelection**? | <code>[SubnetSelection](#aws-cdk-aws-ec2-subnetselection)</code> | Where to place the network interfaces within the VPC.<br/>__*Default*__: All private subnets
**updateApprovalState**? | <code>boolean</code> | Indicates whether the approval state [APPROVE, REVOKE] should be updated.<br/>__*Default*__: true
+**vpc**? | <code>[IVpc](#aws-cdk-aws-ec2-ivpc)</code> | VPC network to place codebuild network interfaces.<br/>__*Default*__: No VPC is specified
diff --git a/packages/cdk-pull-request-check/src/pull-request-check.ts b/packages/cdk-pull-request-check/src/pull-request-check.ts
index 85e7f7c48..3554703bc 100644
--- a/packages/cdk-pull-request-check/src/pull-request-check.ts
+++ b/packages/cdk-pull-request-check/src/pull-request-check.ts
@@ -141,16 +141,16 @@ export class PullRequestCheck extends Construct {
},
buildSpec,
role,
- vpc: vpc,
- subnetSelection: subnetSelection,
- securityGroups: securityGroups,
- allowAllOutbound: allowAllOutbound,
+ vpc,
+ subnetSelection,
+ securityGroups,
+ allowAllOutbound,
});
if (updateApprovalState || postComment) {
const codeBuildResultFunction = new Function(this, 'CodeBuildResultFunction', {
runtime: Runtime.NODEJS_12_X,
- code: Code.asset(path.join(__dirname, 'lambdas', 'code-build-result')),
+ code: Code.fromAsset(path.join(__dirname, 'lambdas', 'code-build-result')),
handler: 'index.handler',
environment: {
UPDATE_APPROVAL_STATE: updateApprovalState ? 'TRUE' : 'FALSE',
diff --git a/packages/cdk-secret-key/API.md b/packages/cdk-secret-key/API.md
new file mode 100644
index 000000000..188f9003e
--- /dev/null
+++ b/packages/cdk-secret-key/API.md
@@ -0,0 +1,205 @@
+# API Reference
+
+**Classes**
+
+Name|Description
+----|-----------
+[SecretKey](#cloudcomponents-cdk-secret-key-secretkey)|*No description*
+[SecretKeyStore](#cloudcomponents-cdk-secret-key-secretkeystore)|*No description*
+
+
+**Enums**
+
+Name|Description
+----|-----------
+[KeyType](#cloudcomponents-cdk-secret-key-keytype)|*No description*
+
+
+
+## class SecretKey <a id="cloudcomponents-cdk-secret-key-secretkey"></a>
+
+
+
+
+### Initializer
+
+
+
+
+```ts
+new SecretKey(secretKeyType: KeyType)
+```
+
+* **secretKeyType** (<code>[KeyType](#cloudcomponents-cdk-secret-key-keytype)</code>) *No description*
+
+
+
+### Properties
+
+
+Name | Type | Description
+-----|------|-------------
+**secretKeyType** | <code>[KeyType](#cloudcomponents-cdk-secret-key-keytype)</code> | <span></span>
+
+### Methods
+
+
+#### grantRead(grantee) <a id="cloudcomponents-cdk-secret-key-secretkey-grantread"></a>
+
+
+
+```ts
+grantRead(grantee: IGrantable): Grant
+```
+
+* **grantee** (<code>[IGrantable](#aws-cdk-aws-iam-igrantable)</code>) *No description*
+
+__Returns__:
+* <code>[Grant](#aws-cdk-aws-iam-grant)</code>
+
+#### serialize() <a id="cloudcomponents-cdk-secret-key-secretkey-serialize"></a>
+
+
+
+```ts
+serialize(): string
+```
+
+
+__Returns__:
+* <code>string</code>
+
+#### *static* fromPlainText(value) <a id="cloudcomponents-cdk-secret-key-secretkey-fromplaintext"></a>
+
+
+
+```ts
+static fromPlainText(value: string): SecretKey
+```
+
+* **value** (<code>string</code>) *No description*
+
+__Returns__:
+* <code>[SecretKey](#cloudcomponents-cdk-secret-key-secretkey)</code>
+
+#### *static* fromSSMParameter(secretKeyParameter) <a id="cloudcomponents-cdk-secret-key-secretkey-fromssmparameter"></a>
+
+
+
+```ts
+static fromSSMParameter(secretKeyParameter: IParameter): SecretKey
+```
+
+* **secretKeyParameter** (<code>[IParameter](#aws-cdk-aws-ssm-iparameter)</code>) *No description*
+
+__Returns__:
+* <code>[SecretKey](#cloudcomponents-cdk-secret-key-secretkey)</code>
+
+#### *static* fromSecretsManager(secretKeySecret, fieldName?) <a id="cloudcomponents-cdk-secret-key-secretkey-fromsecretsmanager"></a>
+
+
+
+```ts
+static fromSecretsManager(secretKeySecret: ISecret, fieldName?: string): SecretKey
+```
+
+* **secretKeySecret** (<code>[ISecret](#aws-cdk-aws-secretsmanager-isecret)</code>) *No description*
+* **fieldName** (<code>string</code>) *No description*
+
+__Returns__:
+* <code>[SecretKey](#cloudcomponents-cdk-secret-key-secretkey)</code>
+
+
+
+## class SecretKeyStore <a id="cloudcomponents-cdk-secret-key-secretkeystore"></a>
+
+
+
+
+### Initializer
+
+
+
+
+```ts
+new SecretKeyStore(secretKeyType: KeyType)
+```
+
+* **secretKeyType** (<code>[KeyType](#cloudcomponents-cdk-secret-key-keytype)</code>) *No description*
+
+
+
+### Properties
+
+
+Name | Type | Description
+-----|------|-------------
+**secretKeyType** | <code>[KeyType](#cloudcomponents-cdk-secret-key-keytype)</code> | <span></span>
+
+### Methods
+
+
+#### grantWrite(grantee) <a id="cloudcomponents-cdk-secret-key-secretkeystore-grantwrite"></a>
+
+
+
+```ts
+grantWrite(grantee: IGrantable): Grant
+```
+
+* **grantee** (<code>[IGrantable](#aws-cdk-aws-iam-igrantable)</code>) *No description*
+
+__Returns__:
+* <code>[Grant](#aws-cdk-aws-iam-grant)</code>
+
+#### serialize() <a id="cloudcomponents-cdk-secret-key-secretkeystore-serialize"></a>
+
+
+
+```ts
+serialize(): string
+```
+
+
+__Returns__:
+* <code>string</code>
+
+#### *static* fromSSMParameter(secretKeyParameter) <a id="cloudcomponents-cdk-secret-key-secretkeystore-fromssmparameter"></a>
+
+
+
+```ts
+static fromSSMParameter(secretKeyParameter: IParameter): SecretKeyStore
+```
+
+* **secretKeyParameter** (<code>[IParameter](#aws-cdk-aws-ssm-iparameter)</code>) *No description*
+
+__Returns__:
+* <code>[SecretKeyStore](#cloudcomponents-cdk-secret-key-secretkeystore)</code>
+
+#### *static* fromSecretsManager(secretKeySecret) <a id="cloudcomponents-cdk-secret-key-secretkeystore-fromsecretsmanager"></a>
+
+
+
+```ts
+static fromSecretsManager(secretKeySecret: ISecret): SecretKeyStore
+```
+
+* **secretKeySecret** (<code>[ISecret](#aws-cdk-aws-secretsmanager-isecret)</code>) *No description*
+
+__Returns__:
+* <code>[SecretKeyStore](#cloudcomponents-cdk-secret-key-secretkeystore)</code>
+
+
+
+## enum KeyType <a id="cloudcomponents-cdk-secret-key-keytype"></a>
+
+
+
+Name | Description
+-----|-----
+**SECRETS_MANAGER** |
+**SSM_PARAMETER** |
+**PLAIN_TEXT** |
+
+
diff --git a/packages/cdk-secret-key/LICENSE b/packages/cdk-secret-key/LICENSE
new file mode 100644
index 000000000..50db47fc0
--- /dev/null
+++ b/packages/cdk-secret-key/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 cloudcomponents
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/packages/cdk-secret-key/README.md b/packages/cdk-secret-key/README.md
new file mode 100644
index 000000000..7df8af121
--- /dev/null
+++ b/packages/cdk-secret-key/README.md
@@ -0,0 +1,47 @@
+[](https://github.com/cloudcomponents/cdk-constructs)
+
+# @cloudcomponents/cdk-secret-key
+
+[](https://github.com/cloudcomponents/cdk-constructs/actions?query=workflow=Build)
+[](https://github.com/hupe1980/cdkdx)
+[](https://www.npmjs.com/package/@cloudcomponents/cdk-secret-key)
+[](https://pypi.org/project/cloudcomponents.cdk-secret-key/)
+
+> Provide secret keys to lambdas
+
+## Install
+TypeScript/JavaScript:
+
+```bash
+npm i @cloudcomponents/cdk-secret-key
+```
+
+Python:
+
+```bash
+pip install cloudcomponents.cdk-secret-key
+```
+
+## How to use
+
+```typescript
+import { SecretKey } from '@cloudcomponents/cdk-secret-key';
+
+const secretKey = SecretKey.fromPlainText(process.env.SECRET_KEY as string);
+const secretKeyString = secretKey.serialize(),
+
+```
+
+See [cloudcomponents/lambda-utils-nodejs](https://github.com/cloudcomponents/lambda-utils-nodejs) for the counterpart in lambda functions
+
+## API Reference
+
+See [API.md](https://github.com/cloudcomponents/cdk-constructs/tree/master/packages/cdk-secret-key/API.md).
+
+## Example
+
+See more complete [examples](https://github.com/cloudcomponents/cdk-constructs/tree/master/examples).
+
+## License
+
+[MIT](https://github.com/cloudcomponents/cdk-constructs/tree/master/packages/cdk-secret-key/LICENSE)
diff --git a/packages/cdk-secret-key/package.json b/packages/cdk-secret-key/package.json
new file mode 100644
index 000000000..08d872133
--- /dev/null
+++ b/packages/cdk-secret-key/package.json
@@ -0,0 +1,75 @@
+{
+ "name": "@cloudcomponents/cdk-secret-key",
+ "version": "1.0.0",
+ "description": "Provide secret keys to lambdas",
+ "license": "MIT",
+ "author": {
+ "name": "hupe1980",
+ "url": "https://github.com/hupe1980"
+ },
+ "awscdkio": {
+ "twitter": "hupe1980"
+ },
+ "repository": {
+ "type": "git",
+ "url": "https://github.com/cloudcomponents/cdk-constructs.git",
+ "directory": "packages/cdk-secret-key"
+ },
+ "homepage": "https://github.com/cloudcomponents/cdk-constructs",
+ "keywords": [
+ "aws",
+ "cdk",
+ "secrets",
+ "@cloudcomponents"
+ ],
+ "main": "lib/index.js",
+ "types": "lib/index.d.ts",
+ "files": [
+ "lib",
+ ".jsii"
+ ],
+ "scripts": {
+ "build": "cdkdx build",
+ "watch": "cdkdx build -w",
+ "test": "cdkdx test",
+ "lint": "cdkdx lint",
+ "upgrade:cdk": "cdkdx upgrade-cdk",
+ "package": "cdkdx package",
+ "docgen": "cdkdx docgen",
+ "release:npm": "cdkdx release npm",
+ "release:pypi": "cdkdx release pypi"
+ },
+ "jsii": {
+ "outdir": "dist",
+ "tsc": {
+ "outDir": "lib",
+ "rootDir": "src"
+ },
+ "excludeTypescript": [
+ "src/lambdas",
+ "src/**/__tests__"
+ ],
+ "targets": {
+ "python": {
+ "distName": "cloudcomponents.cdk-secret-key",
+ "module": "cloudcomponents.cdk_secret_key"
+ }
+ }
+ },
+ "peerDependencies": {
+ "@aws-cdk/aws-iam": "^1.93.0",
+ "@aws-cdk/aws-ssm": "^1.93.0",
+ "@aws-cdk/aws-secretsmanager": "^1.93.0",
+ "@aws-cdk/core": "^1.93.0",
+ "constructs": "^3.2.0"
+ },
+ "dependencies": {
+ "@aws-cdk/aws-iam": "^1.93.0",
+ "@aws-cdk/aws-ssm": "^1.93.0",
+ "@aws-cdk/aws-secretsmanager": "^1.93.0",
+ "@aws-cdk/core": "^1.93.0"
+ },
+ "publishConfig": {
+ "access": "public"
+ }
+}
diff --git a/packages/cdk-secret-key/src/index.ts b/packages/cdk-secret-key/src/index.ts
new file mode 100644
index 000000000..29ef29457
--- /dev/null
+++ b/packages/cdk-secret-key/src/index.ts
@@ -0,0 +1,3 @@
+export * from './key-type';
+export * from './secret-key-store';
+export * from './secret-key';
diff --git a/packages/cdk-secret-key/src/key-type.ts b/packages/cdk-secret-key/src/key-type.ts
new file mode 100644
index 000000000..2f6b196a5
--- /dev/null
+++ b/packages/cdk-secret-key/src/key-type.ts
@@ -0,0 +1,5 @@
+export enum KeyType {
+ SECRETS_MANAGER = 'SECRETS_MANAGER',
+ SSM_PARAMETER = 'SSM_PARAMETER',
+ PLAIN_TEXT = 'PLAIN_TEXT',
+}
diff --git a/packages/cdk-secret-key/src/secret-key-store.ts b/packages/cdk-secret-key/src/secret-key-store.ts
new file mode 100644
index 000000000..df55dc24d
--- /dev/null
+++ b/packages/cdk-secret-key/src/secret-key-store.ts
@@ -0,0 +1,55 @@
+import { Grant, IGrantable } from '@aws-cdk/aws-iam';
+import { ISecret } from '@aws-cdk/aws-secretsmanager';
+import { IParameter } from '@aws-cdk/aws-ssm';
+
+import { KeyType } from './key-type';
+
+export abstract class SecretKeyStore {
+ public static fromSSMParameter(secretKeyParameter: IParameter): SecretKeyStore {
+ return new SSMParameterSecretKeyStore(secretKeyParameter);
+ }
+
+ public static fromSecretsManager(secretKeySecret: ISecret): SecretKeyStore {
+ return new SecretsManagerSecretKeyStore(secretKeySecret);
+ }
+
+ constructor(public readonly secretKeyType: KeyType) {}
+
+ public abstract grantWrite?(grantee: IGrantable): Grant;
+
+ public abstract serialize(): string;
+}
+
+class SecretsManagerSecretKeyStore extends SecretKeyStore {
+ constructor(public readonly secretKeySecret: ISecret) {
+ super(KeyType.SECRETS_MANAGER);
+ }
+
+ public grantWrite(grantee: IGrantable): Grant {
+ return this.secretKeySecret.grantWrite(grantee);
+ }
+
+ public serialize(): string {
+ return JSON.stringify({
+ secretKeyType: this.secretKeyType,
+ secretArn: this.secretKeySecret.secretArn,
+ });
+ }
+}
+
+class SSMParameterSecretKeyStore extends SecretKeyStore {
+ constructor(public readonly secretKeyParameter: IParameter) {
+ super(KeyType.SSM_PARAMETER);
+ }
+
+ public grantWrite(grantee: IGrantable): Grant {
+ return this.secretKeyParameter.grantWrite(grantee);
+ }
+
+ public serialize(): string {
+ return JSON.stringify({
+ secretKeyType: this.secretKeyType,
+ parameterName: this.secretKeyParameter.parameterName,
+ });
+ }
+}
diff --git a/packages/cdk-secret-key/src/secret-key.ts b/packages/cdk-secret-key/src/secret-key.ts
new file mode 100644
index 000000000..e0c1a7104
--- /dev/null
+++ b/packages/cdk-secret-key/src/secret-key.ts
@@ -0,0 +1,75 @@
+import { Grant, IGrantable } from '@aws-cdk/aws-iam';
+import { ISecret } from '@aws-cdk/aws-secretsmanager';
+import { IParameter } from '@aws-cdk/aws-ssm';
+
+import { KeyType } from './key-type';
+
+export abstract class SecretKey {
+ public static fromPlainText(value: string): SecretKey {
+ return new PlainTextSecretKey(value);
+ }
+
+ public static fromSSMParameter(secretKeyParameter: IParameter): SecretKey {
+ return new SSMParameterSecretKey(secretKeyParameter);
+ }
+
+ public static fromSecretsManager(secretKeySecret: ISecret, fieldName?: string): SecretKey {
+ return new SecretsManagerSecretKey(secretKeySecret, fieldName);
+ }
+
+ constructor(public readonly secretKeyType: KeyType) {}
+
+ public abstract grantRead?(grantee: IGrantable): Grant;
+
+ public abstract serialize(): string;
+}
+
+class PlainTextSecretKey extends SecretKey {
+ public grantRead: undefined;
+
+ constructor(public readonly value: string) {
+ super(KeyType.PLAIN_TEXT);
+ }
+
+ public serialize(): string {
+ return JSON.stringify({
+ secretKeyType: this.secretKeyType,
+ value: this.value,
+ });
+ }
+}
+
+class SecretsManagerSecretKey extends SecretKey {
+ constructor(public readonly secretKeySecret: ISecret, public readonly fieldName?: string) {
+ super(KeyType.SECRETS_MANAGER);
+ }
+
+ public grantRead(grantee: IGrantable): Grant {
+ return this.secretKeySecret.grantRead(grantee);
+ }
+
+ public serialize(): string {
+ return JSON.stringify({
+ secretKeyType: this.secretKeyType,
+ secretId: this.secretKeySecret.secretArn,
+ fieldName: this.fieldName,
+ });
+ }
+}
+
+class SSMParameterSecretKey extends SecretKey {
+ constructor(public readonly secretKeyParameter: IParameter) {
+ super(KeyType.SSM_PARAMETER);
+ }
+
+ public grantRead(grantee: IGrantable): Grant {
+ return this.secretKeyParameter.grantRead(grantee);
+ }
+
+ public serialize(): string {
+ return JSON.stringify({
+ secretKeyType: this.secretKeyType,
+ parameterName: this.secretKeyParameter.parameterName,
+ });
+ }
+}
diff --git a/packages/cdk-stripe-webhook/API.md b/packages/cdk-stripe-webhook/API.md
index 0cb1cc437..0c28fe7d5 100644
--- a/packages/cdk-stripe-webhook/API.md
+++ b/packages/cdk-stripe-webhook/API.md
@@ -35,7 +35,7 @@ new StripeWebhook(scope: Construct, id: string, props: StripeWebhookProps)
* **id** (<code>string</code>) *No description*
* **props** (<code>[StripeWebhookProps](#cloudcomponents-cdk-stripe-webhook-stripewebhookprops)</code>) *No description*
* **events** (<code>Array<string></code>) *No description*
- * **secretKey** (<code>string</code>) *No description*
+ * **secretKey** (<code>string | [SecretKey](#cloudcomponents-cdk-secret-key-secretkey)</code>) *No description*
* **url** (<code>string</code>) *No description*
* **description** (<code>string</code>) *No description* __*Optional*__
* **logLevel** (<code>string</code>) *No description* __*Optional*__
@@ -61,7 +61,7 @@ Name | Type | Description
Name | Type | Description
-----|------|-------------
**events** | <code>Array<string></code> | <span></span>
-**secretKey** | <code>string</code> | <span></span>
+**secretKey** | <code>string | [SecretKey](#cloudcomponents-cdk-secret-key-secretkey)</code> | <span></span>
**url** | <code>string</code> | <span></span>
**description**? | <code>string</code> | __*Optional*__
**logLevel**? | <code>string</code> | __*Optional*__
diff --git a/packages/cdk-stripe-webhook/README.md b/packages/cdk-stripe-webhook/README.md
index 68ef0201c..842e06251 100644
--- a/packages/cdk-stripe-webhook/README.md
+++ b/packages/cdk-stripe-webhook/README.md
@@ -25,10 +25,10 @@ pip install cloudcomponents.cdk-stripe-webhook
## How to use
```typescript
-import { Construct, Stack, StackProps } from '@aws-cdk/core';
import { RestApi } from '@aws-cdk/aws-apigateway';
+import { Construct, Stack, StackProps } from '@aws-cdk/core';
+import { SecretKey } from '@cloudcomponents/cdk-secret-key';
import { StripeWebhook } from '@cloudcomponents/cdk-stripe-webhook';
-
export class StripeWebhookStack extends Stack {
constructor(scope: Construct, id: string, props?: StackProps) {
super(scope, id, props);
@@ -36,7 +36,7 @@ export class StripeWebhookStack extends Stack {
const api = new RestApi(this, 'Endpoint');
api.root.addMethod('POST');
- const secretKey = process.env.SECRET_KEY as string;
+ const secretKey = SecretKey.fromPlainText(process.env.SECRET_KEY as string);
const events = ['charge.failed', 'charge.succeeded'];
@@ -48,6 +48,7 @@ export class StripeWebhookStack extends Stack {
});
}
}
+
```
## API Reference
diff --git a/packages/cdk-stripe-webhook/lambda-file-sizes.json b/packages/cdk-stripe-webhook/lambda-file-sizes.json
index 8e28251a1..2047754d6 100644
--- a/packages/cdk-stripe-webhook/lambda-file-sizes.json
+++ b/packages/cdk-stripe-webhook/lambda-file-sizes.json
@@ -1,1 +1,1 @@
-[{"timestamp":1615535054925,"files":[{"filename":"stripe-webhook/index.js","previous":178288,"size":178311,"diff":23}]},{"timestamp":1612894907890,"files":[{"filename":"stripe-webhook/index.js","previous":178247,"size":178288,"diff":41}]},{"timestamp":1611251026819,"files":[{"filename":"stripe-webhook/index.js","previous":178239,"size":178247,"diff":8}]},{"timestamp":1610572412633,"files":[{"filename":"stripe-webhook/index.js","previous":178125,"size":178239,"diff":114}]},{"timestamp":1609970972468,"files":[{"filename":"stripe-webhook/index.js","previous":175300,"size":178125,"diff":2825}]},{"timestamp":1607360080295,"files":[{"filename":"stripe-webhook/index.js","previous":175302,"size":175300,"diff":-2}]},{"timestamp":1606332128770,"files":[{"filename":"stripe-webhook/index.js","previous":175297,"size":175302,"diff":5}]},{"timestamp":1606331402732,"files":[{"filename":"stripe-webhook/index.js","previous":173633,"size":175297,"diff":1664}]},{"timestamp":1600966323131,"files":[{"filename":"stripe-webhook/index.js","previous":173622,"size":173633,"diff":11}]},{"timestamp":1596542600035,"files":[{"filename":"stripe-webhook/index.js","previous":177655,"size":173622,"diff":-4033}]},{"timestamp":1596540606102,"files":[{"filename":"stripe-webhook/index.js","previous":177121,"size":177655,"diff":534}]},{"timestamp":1596458220846,"files":[{"filename":"stripe-webhook/index.js","previous":177110,"size":177121,"diff":11}]},{"timestamp":1596457299467,"files":[{"filename":"stripe-webhook/index.js","previous":463445,"size":177110,"diff":-286335}]},{"timestamp":1596407687567,"files":[{"filename":"stripe-webhook/index.js","previous":164597,"size":463445,"diff":298848}]}]
+[{"timestamp":1615805520640,"files":[{"filename":"stripe-webhook/index.js","previous":178329,"size":1004493,"diff":826164}]},{"timestamp":1615673334493,"files":[{"filename":"stripe-webhook/index.js","previous":178311,"size":178329,"diff":18}]},{"timestamp":1615535054925,"files":[{"filename":"stripe-webhook/index.js","previous":178288,"size":178311,"diff":23}]},{"timestamp":1612894907890,"files":[{"filename":"stripe-webhook/index.js","previous":178247,"size":178288,"diff":41}]},{"timestamp":1611251026819,"files":[{"filename":"stripe-webhook/index.js","previous":178239,"size":178247,"diff":8}]},{"timestamp":1610572412633,"files":[{"filename":"stripe-webhook/index.js","previous":178125,"size":178239,"diff":114}]},{"timestamp":1609970972468,"files":[{"filename":"stripe-webhook/index.js","previous":175300,"size":178125,"diff":2825}]},{"timestamp":1607360080295,"files":[{"filename":"stripe-webhook/index.js","previous":175302,"size":175300,"diff":-2}]},{"timestamp":1606332128770,"files":[{"filename":"stripe-webhook/index.js","previous":175297,"size":175302,"diff":5}]},{"timestamp":1606331402732,"files":[{"filename":"stripe-webhook/index.js","previous":173633,"size":175297,"diff":1664}]},{"timestamp":1600966323131,"files":[{"filename":"stripe-webhook/index.js","previous":173622,"size":173633,"diff":11}]},{"timestamp":1596542600035,"files":[{"filename":"stripe-webhook/index.js","previous":177655,"size":173622,"diff":-4033}]},{"timestamp":1596540606102,"files":[{"filename":"stripe-webhook/index.js","previous":177121,"size":177655,"diff":534}]},{"timestamp":1596458220846,"files":[{"filename":"stripe-webhook/index.js","previous":177110,"size":177121,"diff":11}]},{"timestamp":1596457299467,"files":[{"filename":"stripe-webhook/index.js","previous":463445,"size":177110,"diff":-286335}]},{"timestamp":1596407687567,"files":[{"filename":"stripe-webhook/index.js","previous":164597,"size":463445,"diff":298848}]}]
diff --git a/packages/cdk-stripe-webhook/package.json b/packages/cdk-stripe-webhook/package.json
index 84dcd1d4d..096213e6e 100644
--- a/packages/cdk-stripe-webhook/package.json
+++ b/packages/cdk-stripe-webhook/package.json
@@ -60,14 +60,17 @@
"peerDependencies": {
"@aws-cdk/aws-lambda": "^1.93.0",
"@aws-cdk/core": "^1.93.0",
- "constructs": "^3.2.0"
+ "constructs": "^3.2.0",
+ "@cloudcomponents/cdk-secret-key": "^1.0.0"
},
"dependencies": {
"@aws-cdk/aws-lambda": "^1.93.0",
- "@aws-cdk/core": "^1.93.0"
+ "@aws-cdk/core": "^1.93.0",
+ "@cloudcomponents/cdk-secret-key": "^1.0.0"
},
"devDependencies": {
"@aws-cdk/assert": "^1.93.0",
+ "@cloudcomponents/lambda-utils": "^0.1.1",
"@types/stripe": "^8.0.417",
"custom-resource-helper": "^1.0.15",
"jest-cdk-snapshot": "^1.4.1",
diff --git a/packages/cdk-stripe-webhook/src/lambdas/stripe-webhook/index.ts b/packages/cdk-stripe-webhook/src/lambdas/stripe-webhook/index.ts
index 2de192c35..d44cea35d 100644
--- a/packages/cdk-stripe-webhook/src/lambdas/stripe-webhook/index.ts
+++ b/packages/cdk-stripe-webhook/src/lambdas/stripe-webhook/index.ts
@@ -1,3 +1,4 @@
+import { SecretKey } from '@cloudcomponents/lambda-utils';
import type { CloudFormationCustomResourceEventCommon } from 'aws-lambda';
import {
camelizeKeys,
@@ -10,19 +11,22 @@ import {
} from 'custom-resource-helper';
import Stripe from 'stripe';
+const secretKey = new SecretKey();
export interface WebhookProps {
- secretKey: string;
+ secretKeyString: string;
url: string;
description?: string;
events: Stripe.WebhookEndpointCreateParams.EnabledEvent[];
}
const handleCreate: OnCreateHandler = async (event, _): Promise<ResourceHandlerReturn> => {
- const { secretKey, url, events, description } = camelizeKeys<WebhookProps, CloudFormationCustomResourceEventCommon['ResourceProperties']>(
+ const { secretKeyString, url, events, description } = camelizeKeys<WebhookProps, CloudFormationCustomResourceEventCommon['ResourceProperties']>(
event.ResourceProperties,
);
- const stripe = new Stripe(secretKey, { apiVersion: '2020-08-27' });
+ const value = await secretKey.getValue(secretKeyString);
+
+ const stripe = new Stripe(value, { apiVersion: '2020-08-27' });
const data = await stripe.webhookEndpoints.create({
url,
@@ -41,13 +45,15 @@ const handleCreate: OnCreateHandler = async (event, _): Promise<ResourceHandlerR
};
const handleUpdate: OnUpdateHandler = async (event, _): Promise<ResourceHandlerReturn> => {
- const { secretKey, url, events, description } = camelizeKeys<WebhookProps, CloudFormationCustomResourceEventCommon['ResourceProperties']>(
+ const { secretKeyString, url, events, description } = camelizeKeys<WebhookProps, CloudFormationCustomResourceEventCommon['ResourceProperties']>(
event.ResourceProperties,
);
+ const value = await secretKey.getValue(secretKeyString);
+
const webhookId = event.PhysicalResourceId;
- const stripe = new Stripe(secretKey, { apiVersion: '2020-08-27' });
+ const stripe = new Stripe(value, { apiVersion: '2020-08-27' });
const data = await stripe.webhookEndpoints.update(webhookId, {
url,
@@ -66,11 +72,13 @@ const handleUpdate: OnUpdateHandler = async (event, _): Promise<ResourceHandlerR
};
const handleDelete: OnDeleteHandler = async (event, _): Promise<void> => {
- const { secretKey } = camelizeKeys<WebhookProps, CloudFormationCustomResourceEventCommon['ResourceProperties']>(event.ResourceProperties);
+ const { secretKeyString } = camelizeKeys<WebhookProps, CloudFormationCustomResourceEventCommon['ResourceProperties']>(event.ResourceProperties);
+
+ const value = await secretKey.getValue(secretKeyString);
const webhookId = event.PhysicalResourceId;
- const stripe = new Stripe(secretKey, { apiVersion: '2020-08-27' });
+ const stripe = new Stripe(value, { apiVersion: '2020-08-27' });
await stripe.webhookEndpoints.del(webhookId);
};
diff --git a/packages/cdk-stripe-webhook/src/stripe-webhook.ts b/packages/cdk-stripe-webhook/src/stripe-webhook.ts
index 1a924463b..b0282020f 100644
--- a/packages/cdk-stripe-webhook/src/stripe-webhook.ts
+++ b/packages/cdk-stripe-webhook/src/stripe-webhook.ts
@@ -1,9 +1,9 @@
import * as path from 'path';
import { SingletonFunction, Runtime, Code } from '@aws-cdk/aws-lambda';
import { Construct, Duration, CustomResource } from '@aws-cdk/core';
-
+import { SecretKey } from '@cloudcomponents/cdk-secret-key';
export interface StripeWebhookProps {
- readonly secretKey: string;
+ readonly secretKey: SecretKey | string;
readonly url: string;
readonly description?: string;
readonly events: string[];
@@ -16,6 +16,8 @@ export class StripeWebhook extends Construct {
constructor(scope: Construct, id: string, props: StripeWebhookProps) {
super(scope, id);
+ const secretKey = typeof props.secretKey === 'string' ? SecretKey.fromPlainText(props.secretKey) : props.secretKey;
+
const handler = new SingletonFunction(this, 'CustomResourceHandler', {
uuid: 'e9db3870-d793-4cd2-96a9-efe2e318ebbc',
runtime: Runtime.NODEJS_12_X,
@@ -25,12 +27,20 @@ export class StripeWebhook extends Construct {
timeout: Duration.minutes(15),
});
+ if (secretKey.grantRead) {
+ secretKey.grantRead(handler);
+ }
+
const cr = new CustomResource(this, 'CustomResource', {
serviceToken: handler.functionArn,
resourceType: 'Custom::StripeWebhook',
pascalCaseProperties: true,
properties: {
- ...props,
+ url: props.url,
+ description: props.description,
+ events: props.events,
+ logLevel: props.logLevel,
+ secretKeyString: secretKey.serialize(),
},
});
diff --git a/yarn.lock b/yarn.lock
index 1a945e6d0..ec41cc10b 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -662,7 +662,7 @@
"@aws-cdk/core" "1.93.0"
constructs "^3.2.0"
-"@aws-cdk/[email protected]":
+"@aws-cdk/[email protected]", "@aws-cdk/aws-secretsmanager@^1.93.0":
version "1.93.0"
resolved "https://registry.yarnpkg.com/@aws-cdk/aws-secretsmanager/-/aws-secretsmanager-1.93.0.tgz#e45d0c0d00838cd7343cf505fd47871d9701e9bb"
integrity sha512-Vyl2k66J0Ag256W8YBcdS7QM9cr6/aBZuEq5kyFT9GMwhpQ5RpDT+OUg31Pu3mLizlU70YlYueegnUQYEjxTbQ==
@@ -1127,6 +1127,13 @@
resolved "https://registry.yarnpkg.com/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz#75a2e8b51cb758a7553d6804a5932d7aace75c39"
integrity sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==
+"@cloudcomponents/lambda-utils@^0.1.1":
+ version "0.1.1"
+ resolved "https://registry.yarnpkg.com/@cloudcomponents/lambda-utils/-/lambda-utils-0.1.1.tgz#244aa2641c98ae773cc43c1f0c23b37bb6ae5c07"
+ integrity sha512-TTnT1yRGeL28EBFRhDXBMiUKn/mK95ZXKY+cOduG/MYU5U8hjC3/H+t0efkUPQJTnAyqR2jzvY8tsUkqFQmf+Q==
+ dependencies:
+ aws-parameter-cache "^1.2.0"
+
"@cnakazawa/watch@^1.0.3":
version "1.0.4"
resolved "https://registry.yarnpkg.com/@cnakazawa/watch/-/watch-1.0.4.tgz#f864ae85004d0fcab6f50be9141c4da368d1656a"
@@ -3561,6 +3568,11 @@ aws-local-testing-library@^0.0.7:
lodash.merge "^4.6.2"
uuid "^8.3.0"
+aws-parameter-cache@^1.2.0:
+ version "1.2.0"
+ resolved "https://registry.yarnpkg.com/aws-parameter-cache/-/aws-parameter-cache-1.2.0.tgz#b202fcaace4e066a9b6f9a00e7eb29daab900643"
+ integrity sha512-GoMfOSEYJNXyUqkK2YBwyR6cKUEv8Gs6LrTx5pkZhLKCnU7finPgLHhS76SppPyrBJ9CuzRBi8FH4mvVf0eZ9w==
+
aws-sdk@^2.848.0:
version "2.849.0"
resolved "https://registry.yarnpkg.com/aws-sdk/-/aws-sdk-2.849.0.tgz#7fc9863e04f9e97c1f81f204fca84dfb2e4175dd"
| Stripe Webhook does not work with secrets vended by SecretsManager
What I'm trying to do:
```ts
...
const secretKey = SecretValue.secretsManager(stripeKeySecretName)
...
new StripeWebhook(this, 'StripeWebhook', {
secretKey: secretKey.toString(),
url: api.url,
events,
logLevel: 'debug',
})
```
That is to read stripe secret key value from SecretsManager vs providing it in code which is problematic.
I get the following error when trying to deploy this:
```
6:44:59 PM | CREATE_FAILED | Custom::StripeWebhook | StripeWebhookCustomResource8D1A2FEA
Failed to create resource. Invalid API Key provided: {{resolv********************************************************
*************************************************::}}
```
**Expected outcome:**
The key can be resolved from secrets manager.
---
Ideally I think this interface would actually accept the `SecretValue` instead of a string (and you can build that either directly from text or from SecretsManager/SSM/etc
| It's been on my to-do list for a long time. But unfortunately cloudformation doesn't support that yet:
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html
_Dynamic references for secure values, such as ssm-secure and secretsmanager, are not currently supported in custom resources._
One possible solution would be pass the ARN as a parameter and read the secret in the lambda
>One possible solution would be pass the ARN as a parameter and read the secret in the lambda
I think that'd be the best approach given current limitations.
a very basic level support can be just resolving secret into env variables in lambda (which is not idea but cfn-supported) | 2021-03-15T11:23:52 | 0.0 | [] | [] |
||
MarketSquare/robotframework-openapidriver | MarketSquare__robotframework-openapidriver-18 | 3467ff5ef2643999fc4863e13a2b0c5e54ace0a5 | diff --git a/poetry.lock b/poetry.lock
index 9cd0bca..1fe01b2 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,6 +1,6 @@
[[package]]
name = "anyio"
-version = "3.3.4"
+version = "3.4.0"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
category = "dev"
optional = false
@@ -12,7 +12,7 @@ sniffio = ">=1.1"
[package.extras]
doc = ["sphinx-rtd-theme", "sphinx-autodoc-typehints (>=1.2.0)"]
-test = ["coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "pytest (>=6.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (<0.15)", "mock (>=4)", "uvloop (>=0.15)"]
+test = ["coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "pytest (>=6.0)", "pytest-mock (>=3.6.1)", "trustme", "contextlib2", "uvloop (<0.15)", "mock (>=4)", "uvloop (>=0.15)"]
trio = ["trio (>=0.16)"]
[[package]]
@@ -28,7 +28,7 @@ tests = ["pytest", "pytest-asyncio", "mypy (>=0.800)"]
[[package]]
name = "astroid"
-version = "2.8.6"
+version = "2.9.0"
description = "An abstract syntax tree for Python with inference support."
category = "dev"
optional = false
@@ -55,7 +55,7 @@ tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>
[[package]]
name = "black"
-version = "21.11b1"
+version = "21.12b0"
description = "The uncompromising code formatter."
category = "dev"
optional = false
@@ -66,7 +66,6 @@ click = ">=7.1.2"
mypy-extensions = ">=0.4.3"
pathspec = ">=0.9.0,<1"
platformdirs = ">=2"
-regex = ">=2021.4.4"
tomli = ">=0.2.6,<2.0.0"
typing-extensions = [
{version = ">=3.10.0.0", markers = "python_version < \"3.10\""},
@@ -98,7 +97,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
[[package]]
name = "charset-normalizer"
-version = "2.0.7"
+version = "2.0.9"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
category = "main"
optional = false
@@ -150,7 +149,7 @@ python-versions = ">= 2.7, != 3.0.*, != 3.1.*, != 3.2.*, != 3.3.*, != 3.4.*"
[[package]]
name = "docutils"
-version = "0.18"
+version = "0.18.1"
description = "Docutils -- Python Documentation Utilities"
category = "main"
optional = false
@@ -273,7 +272,7 @@ python-versions = "*"
[[package]]
name = "more-itertools"
-version = "8.11.0"
+version = "8.12.0"
description = "More routines for operating on iterables, beyond itertools"
category = "main"
optional = false
@@ -441,19 +440,19 @@ python-versions = ">=3.5"
[[package]]
name = "pylint"
-version = "2.11.1"
+version = "2.12.2"
description = "python code static checker"
category = "dev"
optional = false
-python-versions = "~=3.6"
+python-versions = ">=3.6.2"
[package.dependencies]
-astroid = ">=2.8.0,<2.9"
+astroid = ">=2.9.0,<2.10"
colorama = {version = "*", markers = "sys_platform == \"win32\""}
isort = ">=4.2.5,<6"
mccabe = ">=0.6,<0.7"
platformdirs = ">=2.2.0"
-toml = ">=0.7.1"
+toml = ">=0.9.2"
typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""}
[[package]]
@@ -472,14 +471,6 @@ category = "main"
optional = false
python-versions = ">=3.6"
-[[package]]
-name = "regex"
-version = "2021.11.10"
-description = "Alternative regular expression module, to replace re."
-category = "dev"
-optional = false
-python-versions = "*"
-
[[package]]
name = "requests"
version = "2.26.0"
@@ -522,14 +513,6 @@ robotframework = ">=3.1"
[package.extras]
xls = ["pandas", "xlrd (>=1.2.0)", "openpyxl"]
-[[package]]
-name = "robotframework-pythonlibcore"
-version = "3.0.0"
-description = "Tools to ease creating larger test libraries for Robot Framework using Python."
-category = "main"
-optional = false
-python-versions = ">=3.6, <4"
-
[[package]]
name = "robotframework-stacktrace"
version = "0.4.1"
@@ -635,7 +618,7 @@ python-versions = ">=3.6"
[[package]]
name = "types-requests"
-version = "2.26.0"
+version = "2.26.1"
description = "Typing stubs for requests"
category = "dev"
optional = false
@@ -643,7 +626,7 @@ python-versions = "*"
[[package]]
name = "typing-extensions"
-version = "4.0.0"
+version = "4.0.1"
description = "Backported and Experimental Type Hints for Python 3.6+"
category = "dev"
optional = false
@@ -664,7 +647,7 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
[[package]]
name = "uvicorn"
-version = "0.15.0"
+version = "0.16.0"
description = "The lightning-fast ASGI server."
category = "dev"
optional = false
@@ -676,7 +659,7 @@ click = ">=7.0"
h11 = ">=0.8"
[package.extras]
-standard = ["websockets (>=9.1)", "httptools (>=0.2.0,<0.3.0)", "watchgod (>=0.6)", "python-dotenv (>=0.13)", "PyYAML (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "colorama (>=0.4)"]
+standard = ["httptools (>=0.2.0,<0.4.0)", "watchgod (>=0.6)", "python-dotenv (>=0.13)", "PyYAML (>=5.1)", "websockets (>=9.1)", "websockets (>=10.0)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "colorama (>=0.4)"]
[[package]]
name = "werkzeug"
@@ -712,28 +695,28 @@ testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytes
[metadata]
lock-version = "1.1"
python-versions = "^3.8"
-content-hash = "a69a3e7dd1e36c45a0599d2b906c24333a604a9aa032c877966fbbfa4689fcb6"
+content-hash = "79a37fff4941466e9150cfb247e061eea26e855c567ab1fa56f8673ecb8784c9"
[metadata.files]
anyio = [
- {file = "anyio-3.3.4-py3-none-any.whl", hash = "sha256:4fd09a25ab7fa01d34512b7249e366cd10358cdafc95022c7ff8c8f8a5026d66"},
- {file = "anyio-3.3.4.tar.gz", hash = "sha256:67da67b5b21f96b9d3d65daa6ea99f5d5282cb09f50eb4456f8fb51dffefc3ff"},
+ {file = "anyio-3.4.0-py3-none-any.whl", hash = "sha256:2855a9423524abcdd652d942f8932fda1735210f77a6b392eafd9ff34d3fe020"},
+ {file = "anyio-3.4.0.tar.gz", hash = "sha256:24adc69309fb5779bc1e06158e143e0b6d2c56b302a3ac3de3083c705a6ed39d"},
]
asgiref = [
{file = "asgiref-3.4.1-py3-none-any.whl", hash = "sha256:ffc141aa908e6f175673e7b1b3b7af4fdb0ecb738fc5c8b88f69f055c2415214"},
{file = "asgiref-3.4.1.tar.gz", hash = "sha256:4ef1ab46b484e3c706329cedeff284a5d40824200638503f5768edb6de7d58e9"},
]
astroid = [
- {file = "astroid-2.8.6-py3-none-any.whl", hash = "sha256:cd8326b424c971e7d87678609cf6275d22028afd37d6ac59c16d47f1245882f6"},
- {file = "astroid-2.8.6.tar.gz", hash = "sha256:5f6f75e45f15290e73b56f9dfde95b4bf96382284cde406ef4203e928335a495"},
+ {file = "astroid-2.9.0-py3-none-any.whl", hash = "sha256:776ca0b748b4ad69c00bfe0fff38fa2d21c338e12c84aa9715ee0d473c422778"},
+ {file = "astroid-2.9.0.tar.gz", hash = "sha256:5939cf55de24b92bda00345d4d0659d01b3c7dafb5055165c330bc7c568ba273"},
]
attrs = [
{file = "attrs-21.2.0-py2.py3-none-any.whl", hash = "sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1"},
{file = "attrs-21.2.0.tar.gz", hash = "sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb"},
]
black = [
- {file = "black-21.11b1-py3-none-any.whl", hash = "sha256:802c6c30b637b28645b7fde282ed2569c0cd777dbe493a41b6a03c1d903f99ac"},
- {file = "black-21.11b1.tar.gz", hash = "sha256:a042adbb18b3262faad5aff4e834ff186bb893f95ba3a8013f09de1e5569def2"},
+ {file = "black-21.12b0-py3-none-any.whl", hash = "sha256:a615e69ae185e08fdd73e4715e260e2479c861b5740057fde6e8b4e3b7dd589f"},
+ {file = "black-21.12b0.tar.gz", hash = "sha256:77b80f693a569e2e527958459634f18df9b0ba2625ba4e0c2d5da5be42e6f2b3"},
]
certifi = [
{file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"},
@@ -744,8 +727,8 @@ chardet = [
{file = "chardet-4.0.0.tar.gz", hash = "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa"},
]
charset-normalizer = [
- {file = "charset-normalizer-2.0.7.tar.gz", hash = "sha256:e019de665e2bcf9c2b64e2e5aa025fa991da8720daa3c1138cadd2fd1856aed0"},
- {file = "charset_normalizer-2.0.7-py3-none-any.whl", hash = "sha256:f7af805c321bfa1ce6714c51f254e0d5bb5e5834039bc17db7ebe3a4cec9492b"},
+ {file = "charset-normalizer-2.0.9.tar.gz", hash = "sha256:b0b883e8e874edfdece9c28f314e3dd5badf067342e42fb162203335ae61aa2c"},
+ {file = "charset_normalizer-2.0.9-py3-none-any.whl", hash = "sha256:1eecaa09422db5be9e29d7fc65664e6c33bd06f9ced7838578ba40d58bdf3721"},
]
click = [
{file = "click-8.0.3-py3-none-any.whl", hash = "sha256:353f466495adaeb40b6b5f592f9f91cb22372351c84caeb068132442a4518ef3"},
@@ -815,8 +798,8 @@ dictpath = [
{file = "dictpath-0.1.3.tar.gz", hash = "sha256:751cde3b76b176d25f961b90c423a11a4d5ede9bd09ab0d64a85abb738c190d8"},
]
docutils = [
- {file = "docutils-0.18-py2.py3-none-any.whl", hash = "sha256:a31688b2ea858517fa54293e5d5df06fbb875fb1f7e4c64529271b77781ca8fc"},
- {file = "docutils-0.18.tar.gz", hash = "sha256:c1d5dab2b11d16397406a282e53953fe495a46d69ae329f55aa98a5c4e3c5fbb"},
+ {file = "docutils-0.18.1-py2.py3-none-any.whl", hash = "sha256:23010f129180089fbcd3bc08cfefccb3b890b0050e1ca00c867036e9d161b98c"},
+ {file = "docutils-0.18.1.tar.gz", hash = "sha256:679987caf361a7539d76e584cbeddc311e3aee937877c87346f31debc63e9d06"},
]
fastapi = [
{file = "fastapi-0.70.0-py3-none-any.whl", hash = "sha256:a36d5f2fad931aa3575c07a3472c784e81f3e664e3bb5c8b9c88d0ec1104f59c"},
@@ -880,8 +863,8 @@ mccabe = [
{file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"},
]
more-itertools = [
- {file = "more-itertools-8.11.0.tar.gz", hash = "sha256:0a2fd25d343c08d7e7212071820e7e7ea2f41d8fb45d6bc8a00cd6ce3b7aab88"},
- {file = "more_itertools-8.11.0-py3-none-any.whl", hash = "sha256:88afff98d83d08fe5e4049b81e2b54c06ebb6b3871a600040865c7a592061cbb"},
+ {file = "more-itertools-8.12.0.tar.gz", hash = "sha256:7dc6ad46f05f545f900dd59e8dfb4e84a4827b97b3cfecb175ea0c7d247f6064"},
+ {file = "more_itertools-8.12.0-py3-none-any.whl", hash = "sha256:43e6dd9942dffd72661a2c4ef383ad7da1e6a3e968a927ad7a6083ab410a688b"},
]
mypy = [
{file = "mypy-0.910-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:a155d80ea6cee511a3694b108c4494a39f42de11ee4e61e72bc424c490e46457"},
@@ -971,8 +954,8 @@ pygments = [
{file = "Pygments-2.10.0.tar.gz", hash = "sha256:f398865f7eb6874156579fdf36bc840a03cab64d1cde9e93d68f46a425ec52c6"},
]
pylint = [
- {file = "pylint-2.11.1-py3-none-any.whl", hash = "sha256:0f358e221c45cbd4dad2a1e4b883e75d28acdcccd29d40c76eb72b307269b126"},
- {file = "pylint-2.11.1.tar.gz", hash = "sha256:2c9843fff1a88ca0ad98a256806c82c5a8f86086e7ccbdb93297d86c3f90c436"},
+ {file = "pylint-2.12.2-py3-none-any.whl", hash = "sha256:daabda3f7ed9d1c60f52d563b1b854632fd90035bcf01443e234d3dc794e3b74"},
+ {file = "pylint-2.12.2.tar.gz", hash = "sha256:9d945a73640e1fec07ee34b42f5669b770c759acd536ec7b16d7e4b87a9c9ff9"},
]
pyrsistent = [
{file = "pyrsistent-0.18.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f4c8cabb46ff8e5d61f56a037974228e978f26bfefce4f61a4b1ac0ba7a2ab72"},
@@ -1032,57 +1015,6 @@ pyyaml = [
{file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"},
{file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"},
]
-regex = [
- {file = "regex-2021.11.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9345b6f7ee578bad8e475129ed40123d265464c4cfead6c261fd60fc9de00bcf"},
- {file = "regex-2021.11.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:416c5f1a188c91e3eb41e9c8787288e707f7d2ebe66e0a6563af280d9b68478f"},
- {file = "regex-2021.11.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0538c43565ee6e703d3a7c3bdfe4037a5209250e8502c98f20fea6f5fdf2965"},
- {file = "regex-2021.11.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ee1227cf08b6716c85504aebc49ac827eb88fcc6e51564f010f11a406c0a667"},
- {file = "regex-2021.11.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6650f16365f1924d6014d2ea770bde8555b4a39dc9576abb95e3cd1ff0263b36"},
- {file = "regex-2021.11.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30ab804ea73972049b7a2a5c62d97687d69b5a60a67adca07eb73a0ddbc9e29f"},
- {file = "regex-2021.11.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:68a067c11463de2a37157930d8b153005085e42bcb7ad9ca562d77ba7d1404e0"},
- {file = "regex-2021.11.10-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:162abfd74e88001d20cb73ceaffbfe601469923e875caf9118333b1a4aaafdc4"},
- {file = "regex-2021.11.10-cp310-cp310-win32.whl", hash = "sha256:98ba568e8ae26beb726aeea2273053c717641933836568c2a0278a84987b2a1a"},
- {file = "regex-2021.11.10-cp310-cp310-win_amd64.whl", hash = "sha256:780b48456a0f0ba4d390e8b5f7c661fdd218934388cde1a974010a965e200e12"},
- {file = "regex-2021.11.10-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:dba70f30fd81f8ce6d32ddeef37d91c8948e5d5a4c63242d16a2b2df8143aafc"},
- {file = "regex-2021.11.10-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1f54b9b4b6c53369f40028d2dd07a8c374583417ee6ec0ea304e710a20f80a0"},
- {file = "regex-2021.11.10-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fbb9dc00e39f3e6c0ef48edee202f9520dafb233e8b51b06b8428cfcb92abd30"},
- {file = "regex-2021.11.10-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666abff54e474d28ff42756d94544cdfd42e2ee97065857413b72e8a2d6a6345"},
- {file = "regex-2021.11.10-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5537f71b6d646f7f5f340562ec4c77b6e1c915f8baae822ea0b7e46c1f09b733"},
- {file = "regex-2021.11.10-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed2e07c6a26ed4bea91b897ee2b0835c21716d9a469a96c3e878dc5f8c55bb23"},
- {file = "regex-2021.11.10-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ca5f18a75e1256ce07494e245cdb146f5a9267d3c702ebf9b65c7f8bd843431e"},
- {file = "regex-2021.11.10-cp36-cp36m-win32.whl", hash = "sha256:93a5051fcf5fad72de73b96f07d30bc29665697fb8ecdfbc474f3452c78adcf4"},
- {file = "regex-2021.11.10-cp36-cp36m-win_amd64.whl", hash = "sha256:b483c9d00a565633c87abd0aaf27eb5016de23fed952e054ecc19ce32f6a9e7e"},
- {file = "regex-2021.11.10-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:fff55f3ce50a3ff63ec8e2a8d3dd924f1941b250b0aac3d3d42b687eeff07a8e"},
- {file = "regex-2021.11.10-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e32d2a2b02ccbef10145df9135751abea1f9f076e67a4e261b05f24b94219e36"},
- {file = "regex-2021.11.10-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:53db2c6be8a2710b359bfd3d3aa17ba38f8aa72a82309a12ae99d3c0c3dcd74d"},
- {file = "regex-2021.11.10-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2207ae4f64ad3af399e2d30dde66f0b36ae5c3129b52885f1bffc2f05ec505c8"},
- {file = "regex-2021.11.10-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5ca078bb666c4a9d1287a379fe617a6dccd18c3e8a7e6c7e1eb8974330c626a"},
- {file = "regex-2021.11.10-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd33eb9bdcfbabab3459c9ee651d94c842bc8a05fabc95edf4ee0c15a072495e"},
- {file = "regex-2021.11.10-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:05b7d6d7e64efe309972adab77fc2af8907bb93217ec60aa9fe12a0dad35874f"},
- {file = "regex-2021.11.10-cp37-cp37m-win32.whl", hash = "sha256:e71255ba42567d34a13c03968736c5d39bb4a97ce98188fafb27ce981115beec"},
- {file = "regex-2021.11.10-cp37-cp37m-win_amd64.whl", hash = "sha256:07856afef5ffcc052e7eccf3213317fbb94e4a5cd8177a2caa69c980657b3cb4"},
- {file = "regex-2021.11.10-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ba05430e819e58544e840a68b03b28b6d328aff2e41579037e8bab7653b37d83"},
- {file = "regex-2021.11.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7f301b11b9d214f83ddaf689181051e7f48905568b0c7017c04c06dfd065e244"},
- {file = "regex-2021.11.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aaa4e0705ef2b73dd8e36eeb4c868f80f8393f5f4d855e94025ce7ad8525f50"},
- {file = "regex-2021.11.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:788aef3549f1924d5c38263104dae7395bf020a42776d5ec5ea2b0d3d85d6646"},
- {file = "regex-2021.11.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f8af619e3be812a2059b212064ea7a640aff0568d972cd1b9e920837469eb3cb"},
- {file = "regex-2021.11.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85bfa6a5413be0ee6c5c4a663668a2cad2cbecdee367630d097d7823041bdeec"},
- {file = "regex-2021.11.10-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f23222527b307970e383433daec128d769ff778d9b29343fb3496472dc20dabe"},
- {file = "regex-2021.11.10-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:da1a90c1ddb7531b1d5ff1e171b4ee61f6345119be7351104b67ff413843fe94"},
- {file = "regex-2021.11.10-cp38-cp38-win32.whl", hash = "sha256:0617383e2fe465732af4509e61648b77cbe3aee68b6ac8c0b6fe934db90be5cc"},
- {file = "regex-2021.11.10-cp38-cp38-win_amd64.whl", hash = "sha256:a3feefd5e95871872673b08636f96b61ebef62971eab044f5124fb4dea39919d"},
- {file = "regex-2021.11.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f7f325be2804246a75a4f45c72d4ce80d2443ab815063cdf70ee8fb2ca59ee1b"},
- {file = "regex-2021.11.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:537ca6a3586931b16a85ac38c08cc48f10fc870a5b25e51794c74df843e9966d"},
- {file = "regex-2021.11.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eef2afb0fd1747f33f1ee3e209bce1ed582d1896b240ccc5e2697e3275f037c7"},
- {file = "regex-2021.11.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:432bd15d40ed835a51617521d60d0125867f7b88acf653e4ed994a1f8e4995dc"},
- {file = "regex-2021.11.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b43c2b8a330a490daaef5a47ab114935002b13b3f9dc5da56d5322ff218eeadb"},
- {file = "regex-2021.11.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:962b9a917dd7ceacbe5cd424556914cb0d636001e393b43dc886ba31d2a1e449"},
- {file = "regex-2021.11.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fa8c626d6441e2d04b6ee703ef2d1e17608ad44c7cb75258c09dd42bacdfc64b"},
- {file = "regex-2021.11.10-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3c5fb32cc6077abad3bbf0323067636d93307c9fa93e072771cf9a64d1c0f3ef"},
- {file = "regex-2021.11.10-cp39-cp39-win32.whl", hash = "sha256:3b5df18db1fccd66de15aa59c41e4f853b5df7550723d26aa6cb7f40e5d9da5a"},
- {file = "regex-2021.11.10-cp39-cp39-win_amd64.whl", hash = "sha256:83ee89483672b11f8952b158640d0c0ff02dc43d9cb1b70c1564b49abe92ce29"},
- {file = "regex-2021.11.10.tar.gz", hash = "sha256:f341ee2df0999bfdf7a95e448075effe0db212a59387de1a70690e4acb03d4c6"},
-]
requests = [
{file = "requests-2.26.0-py2.py3-none-any.whl", hash = "sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24"},
{file = "requests-2.26.0.tar.gz", hash = "sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7"},
@@ -1095,10 +1027,6 @@ robotframework-datadriver = [
{file = "robotframework-datadriver-1.5.0.tar.gz", hash = "sha256:67e14bf8d7c4d370ecaca35879cbf786c3f30382d3ceaf50c6ed81820e8166cf"},
{file = "robotframework_datadriver-1.5.0-py3-none-any.whl", hash = "sha256:58f5009fa2d91fba5cfdefe72927523538e10ecee2657808b927f39b63ef47b9"},
]
-robotframework-pythonlibcore = [
- {file = "robotframework-pythonlibcore-3.0.0.tar.gz", hash = "sha256:1bce3b8dfcb7519789ee3a89320f6402e126f6d0a02794184a1ab8cee0e46b5d"},
- {file = "robotframework_pythonlibcore-3.0.0-py2.py3-none-any.whl", hash = "sha256:af10c2403cd38834988c4ce68ffb6ec6f9b14bd2cd39ecf836d443377c59b7c4"},
-]
robotframework-stacktrace = [
{file = "robotframework-stacktrace-0.4.1.tar.gz", hash = "sha256:e96cb36e7e9ab55104c1f7d3606249a109e0a4c3bb6a0e294bff07d54ee6f6a5"},
{file = "robotframework_stacktrace-0.4.1-py3-none-any.whl", hash = "sha256:018d7a55b99733e64e3cc0b134771b61a47de61de23609ed35c7bf0a53e9290e"},
@@ -1159,19 +1087,20 @@ tomli = [
{file = "tomli-1.2.2.tar.gz", hash = "sha256:c6ce0015eb38820eaf32b5db832dbc26deb3dd427bd5f6556cf0acac2c214fee"},
]
types-requests = [
- {file = "types-requests-2.26.0.tar.gz", hash = "sha256:df5ec8c34b413a42ebb38e4f96bdeb68090b875bdfcc5138dc82989c95445883"},
- {file = "types_requests-2.26.0-py3-none-any.whl", hash = "sha256:809b5dcd3c408ac39d11d593835b6aff32420b3e7ddb79c7f3e823330f040466"},
+ {file = "types-requests-2.26.1.tar.gz", hash = "sha256:0893e112e1510bbb67f537941c92192de7472e51bf7f236e0e583866f0ed933e"},
+ {file = "types_requests-2.26.1-py3-none-any.whl", hash = "sha256:853571b3accc188976c0f4feffcaebf6cdfc170082b5e43f3358aa78de61f531"},
]
typing-extensions = [
- {file = "typing_extensions-4.0.0-py3-none-any.whl", hash = "sha256:829704698b22e13ec9eaf959122315eabb370b0884400e9818334d8b677023d9"},
+ {file = "typing_extensions-4.0.1-py3-none-any.whl", hash = "sha256:7f001e5ac290a0c0401508864c7ec868be4e701886d5b573a9528ed3973d9d3b"},
+ {file = "typing_extensions-4.0.1.tar.gz", hash = "sha256:4ca091dea149f945ec56afb48dae714f21e8692ef22a395223bcd328961b6a0e"},
]
urllib3 = [
{file = "urllib3-1.26.7-py2.py3-none-any.whl", hash = "sha256:c4fdf4019605b6e5423637e01bc9fe4daef873709a7973e195ceba0a62bbc844"},
{file = "urllib3-1.26.7.tar.gz", hash = "sha256:4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece"},
]
uvicorn = [
- {file = "uvicorn-0.15.0-py3-none-any.whl", hash = "sha256:17f898c64c71a2640514d4089da2689e5db1ce5d4086c2d53699bf99513421c1"},
- {file = "uvicorn-0.15.0.tar.gz", hash = "sha256:d9a3c0dd1ca86728d3e235182683b4cf94cd53a867c288eaeca80ee781b2caff"},
+ {file = "uvicorn-0.16.0-py3-none-any.whl", hash = "sha256:d8c839231f270adaa6d338d525e2652a0b4a5f4c2430b5c4ef6ae4d11776b0d2"},
+ {file = "uvicorn-0.16.0.tar.gz", hash = "sha256:eacb66afa65e0648fcbce5e746b135d09722231ffffc61883d4fac2b62fbea8d"},
]
werkzeug = [
{file = "Werkzeug-2.0.2-py3-none-any.whl", hash = "sha256:63d3dc1cf60e7b7e35e97fa9861f7397283b75d765afcaefd993d6046899de8f"},
diff --git a/pyproject.toml b/pyproject.toml
index a6d6966..37f0d82 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -23,7 +23,6 @@ packages = [
[tool.poetry.dependencies]
python = "^3.8"
robotframework = ">=4"
-robotframework-pythonlibcore = ">=3"
robotframework-datadriver = ">=1.5"
requests = "*"
prance = "*"
@@ -94,3 +93,6 @@ disable = ["W1203"]
[tool.pylint.'FORMAT CHECKER']
max-line-length=120
+
+[tool.pylint.'SIMILARITIES CHECKER']
+ignore-imports="yes"
diff --git a/src/OpenApiDriver/dto_base.py b/src/OpenApiDriver/dto_base.py
index 9a00f59..7873627 100644
--- a/src/OpenApiDriver/dto_base.py
+++ b/src/OpenApiDriver/dto_base.py
@@ -17,7 +17,7 @@
NOT_SET = object()
-class ResourceRelation(ABC):
+class ResourceRelation(ABC): # pylint: disable=too-few-public-methods
"""ABC for all resource relations or restrictions within the API."""
property_name: str
@@ -81,7 +81,8 @@ class UniquePropertyValueConstraint(ResourceRelation):
]
-class DtoBase(ABC):
+@dataclass
+class Dto(ABC):
"""Base class for the Dto class."""
@staticmethod
@@ -183,12 +184,3 @@ def get_invalidated_data(
)
return properties
return properties # pragma: no cover
-
-
-@dataclass
-class DataClassMixin:
- """Mixin to add dataclass functionality to an ABC."""
-
-
-class Dto(DataClassMixin, DtoBase):
- """Abstract base class to support custom mappings of resource dependencies."""
diff --git a/src/OpenApiDriver/openapi_executors.py b/src/OpenApiDriver/openapi_executors.py
index 3c51d24..456bfd0 100644
--- a/src/OpenApiDriver/openapi_executors.py
+++ b/src/OpenApiDriver/openapi_executors.py
@@ -1,7 +1,6 @@
"""Module containing the classes to perform automatic OpenAPI contract validation."""
import json as _json
-import sys
from dataclasses import asdict
from enum import Enum
from logging import getLogger
@@ -9,22 +8,23 @@
from random import choice
from typing import Any, Dict, List, Optional, Union
-from openapi_core import create_spec
from openapi_core.contrib.requests import (
RequestsOpenAPIRequest,
RequestsOpenAPIResponse,
)
from openapi_core.templating.paths.exceptions import ServerNotFound
-from openapi_core.validation.response.validators import ResponseValidator
from requests import Response
-from requests.auth import AuthBase, HTTPBasicAuth
+from requests.auth import AuthBase
from robot.api import SkipExecution
from robot.api.deco import keyword, library
from robot.libraries.BuiltIn import BuiltIn
-from robotlibcore import DynamicCore
-from OpenApiDriver.dto_utils import get_dto_class
-from OpenApiDriver.openapi_libcore import OpenApiLibCore, RequestData, resolve_schema
+from OpenApiDriver.openapi_libcore import (
+ OpenApiLibCore,
+ RequestData,
+ RequestValues,
+ resolve_schema,
+)
run_keyword = BuiltIn().run_keyword
@@ -42,13 +42,13 @@ class ValidationLevel(str, Enum):
@library
-class OpenApiExecutors(DynamicCore): # pylint: disable=too-many-instance-attributes
+class OpenApiExecutors(OpenApiLibCore): # pylint: disable=too-many-instance-attributes
"""Main class providing the keywords and core logic to perform endpoint validations."""
ROBOT_LIBRARY_DOC_FORMAT = "ROBOT"
ROBOT_LIBRARY_SCOPE = "TEST SUITE"
- def __init__( # pylint: disable=too-many-arguments, too-many-locals
+ def __init__( # pylint: disable=too-many-arguments
self,
openapi_specification: Dict[str, Any],
origin: str = "",
@@ -63,42 +63,7 @@ def __init__( # pylint: disable=too-many-arguments, too-many-locals
require_body_for_invalid_url: bool = False,
invalid_property_default_response: int = 422,
) -> None:
- self.openapi_spec: Dict[str, Any] = openapi_specification
- validation_spec = create_spec(self.openapi_spec)
- self.response_validator = ResponseValidator(
- spec=validation_spec,
- base_url=base_path,
- )
- self.origin = origin
- self.base_url = f"{self.origin}{base_path}"
- # only username and password, security_token or auth object should be provided
- # if multiple are provided, username and password take precendence
- self.security_token = security_token
- self.auth = auth
- if username and password:
- self.auth = HTTPBasicAuth(username, password)
- self.response_validation = response_validation
- self.disable_server_validation = disable_server_validation
- self.require_body_for_invalid_url = require_body_for_invalid_url
- self.invalid_property_default_response = invalid_property_default_response
- if mappings_path and str(mappings_path) != ".":
- mappings_path = Path(mappings_path)
- if not mappings_path.is_file():
- logger.warning(
- f"mappings_path '{mappings_path}' is not a Python module."
- )
- # intermediate variable to ensure path.append is possible so we'll never
- # path.pop a location that we didn't append
- mappings_folder = str(mappings_path.parent)
- sys.path.append(mappings_folder)
- mappings_module_name = mappings_path.stem
- self.get_dto_class = get_dto_class(
- mappings_module_name=mappings_module_name
- )
- sys.path.pop()
- else:
- self.get_dto_class = get_dto_class(mappings_module_name="no_mapping")
- openapi_libcore = OpenApiLibCore(
+ super().__init__(
openapi_specification=openapi_specification,
origin=origin,
base_path=base_path,
@@ -108,7 +73,10 @@ def __init__( # pylint: disable=too-many-arguments, too-many-locals
security_token=security_token,
auth=auth,
)
- DynamicCore.__init__(self, [openapi_libcore])
+ self.response_validation = response_validation
+ self.disable_server_validation = disable_server_validation
+ self.require_body_for_invalid_url = require_body_for_invalid_url
+ self.invalid_property_default_response = invalid_property_default_response
@keyword
def test_unauthorized(self, endpoint: str, method: str) -> None:
@@ -191,18 +159,17 @@ def test_endpoint(self, endpoint: str, method: str, status_code: int) -> None:
request_data: RequestData = self.get_request_data(
method=method, endpoint=endpoint
)
- dto = request_data.dto
params = request_data.params
headers = request_data.headers
- json_data = asdict(dto)
+ json_data = asdict(request_data.dto)
# when patching, get the original data to check only patched data has changed
if method == "PATCH":
- original_data = self.get_original_data(endpoint=endpoint, url=url)
+ original_data = self.get_original_data(url=url)
# in case of a status code indicating an error, ensure the error occurs
if status_code >= 400:
- data_relations = dto.get_relations_for_error_code(status_code)
- parameter_relations = dto.get_parameter_relations_for_error_code(
- status_code
+ data_relations = request_data.dto.get_relations_for_error_code(status_code)
+ parameter_relations = (
+ request_data.dto.get_parameter_relations_for_error_code(status_code)
)
invalidation_keyword_data = {
"get_invalid_json_data": [
@@ -211,7 +178,7 @@ def test_endpoint(self, endpoint: str, method: str, status_code: int) -> None:
request_data.dto_schema,
url,
method,
- dto,
+ request_data.dto,
status_code,
],
"invalidate_parameters": [
@@ -252,51 +219,56 @@ def test_endpoint(self, endpoint: str, method: str, status_code: int) -> None:
logger.error(
f"No Dto mapping found to cause status_code {status_code}."
)
+ run_keyword(
+ "perform_validated_request",
+ endpoint,
+ status_code,
+ RequestValues(
+ url=url,
+ method=method,
+ params=params,
+ headers=headers,
+ json_data=json_data,
+ ),
+ original_data,
+ )
if status_code < 300 and (
request_data.has_optional_properties
or request_data.has_optional_params
or request_data.has_optional_headers
):
logger.info("Performing request without optional properties and parameters")
- url_ = run_keyword("get_valid_url", endpoint, method)
+ url = run_keyword("get_valid_url", endpoint, method)
request_data = self.get_request_data(method=method, endpoint=endpoint)
- params_ = request_data.get_required_params()
- headers_ = request_data.get_required_headers()
- json_data_ = request_data.get_required_properties_dict()
+ params = request_data.get_required_params()
+ headers = request_data.get_required_headers()
+ json_data = request_data.get_required_properties_dict()
if method == "PATCH":
- original_data_ = self.get_original_data(endpoint=endpoint, url=url_)
+ original_data = self.get_original_data(url=url)
else:
- original_data_ = None
+ original_data = None
run_keyword(
"perform_validated_request",
endpoint,
- method,
status_code,
- url_,
- params_,
- headers_,
- json_data_,
- original_data_,
+ RequestValues(
+ url=url,
+ method=method,
+ params=params,
+ headers=headers,
+ json_data=json_data,
+ ),
+ original_data,
)
- run_keyword(
- "perform_validated_request",
- endpoint,
- method,
- status_code,
- url,
- params,
- headers,
- json_data,
- original_data,
- )
- def get_original_data(self, endpoint: str, url: str) -> Optional[Dict[str, Any]]:
+ def get_original_data(self, url: str) -> Optional[Dict[str, Any]]:
"""
Attempt to GET the current data for the given url and return it.
If the GET request fails, None is returned.
"""
original_data = None
+ endpoint = self.get_parameterized_endpoint_from_url(url)
get_request_data = self.get_request_data(endpoint=endpoint, method="GET")
get_params = get_request_data.params
get_headers = get_request_data.headers
@@ -311,12 +283,8 @@ def get_original_data(self, endpoint: str, url: str) -> Optional[Dict[str, Any]]
def perform_validated_request(
self,
endpoint: str,
- method: str,
status_code: int,
- url: str,
- params: Dict[str, Any],
- headers: Dict[str, str],
- json_data: Dict[str, Any],
+ request_values: RequestValues,
original_data: Optional[Dict[str, Any]] = None,
) -> None:
"""
@@ -325,7 +293,12 @@ def perform_validated_request(
the target resource was indeed deleted (OK response) or not (error responses).
"""
response = run_keyword(
- "authorized_request", url, method, params, headers, json_data
+ "authorized_request",
+ request_values.url,
+ request_values.method,
+ request_values.params,
+ request_values.headers,
+ request_values.json_data,
)
if response.status_code != status_code:
if not response.ok:
@@ -340,37 +313,37 @@ def perform_validated_request(
logger.error(f"Failed to get json body from response: {exception}")
response_json = {}
logger.info(
- f"\nSend: {_json.dumps(json_data, indent=4, sort_keys=True)}"
+ f"\nSend: {_json.dumps(request_values.json_data, indent=4, sort_keys=True)}"
f"\nGot: {_json.dumps(response_json, indent=4, sort_keys=True)}"
)
raise AssertionError(
f"Response status_code {response.status_code} was not {status_code}"
)
run_keyword("validate_response", endpoint, response, original_data)
- if method == "DELETE":
+ if request_values.method == "DELETE":
get_request_data = self.get_request_data(endpoint=endpoint, method="GET")
get_params = get_request_data.params
get_headers = get_request_data.headers
get_response = run_keyword(
- "authorized_request", url, "GET", get_params, get_headers
+ "authorized_request", request_values.url, "GET", get_params, get_headers
)
if response.ok:
if get_response.ok:
raise AssertionError(
- f"Resource still exists after deletion. Url was {url}"
+ f"Resource still exists after deletion. Url was {request_values.url}"
)
# if the endpoint supports GET, 404 is expected, if not 405 is expected
if get_response.status_code not in [404, 405]:
logger.warning(
f"Unexpected response after deleting resource: Status_code "
- f"{get_response.status_code} was received after trying to get {url} "
+ f"{get_response.status_code} was received after trying to get {request_values.url} "
f"after sucessfully deleting it."
)
else:
if not get_response.ok:
raise AssertionError(
f"Resource could not be retrieved after failed deletion. "
- f"Url was {url}, status_code was {get_response.status_code}"
+ f"Url was {request_values.url}, status_code was {get_response.status_code}"
)
@keyword
@@ -394,24 +367,7 @@ def validate_response(
assert not response.content
return None
# validate the response against the schema
- openapi_request = RequestsOpenAPIRequest(response.request)
- openapi_response = RequestsOpenAPIResponse(response)
- validation_result = self.response_validator.validate(
- request=openapi_request,
- response=openapi_response,
- )
- if self.disable_server_validation:
- validation_result.errors = [
- e for e in validation_result.errors if not isinstance(e, ServerNotFound)
- ]
- if self.response_validation == ValidationLevel.STRICT:
- validation_result.raise_for_errors()
- if self.response_validation in [ValidationLevel.WARN, ValidationLevel.INFO]:
- for validation_error in validation_result.errors:
- if self.response_validation == ValidationLevel.WARN:
- logger.warning(validation_error)
- else:
- logger.info(validation_error)
+ self._validate_response_against_spec(response)
request_method = response.request.method
if request_method is None:
@@ -420,6 +376,7 @@ def validate_response(
f"on the request property of the provided response."
)
return None
+
response_spec = self._get_response_spec(
endpoint=endpoint,
method=request_method,
@@ -435,11 +392,12 @@ def validate_response(
f"Content-Type '{response.headers['Content-Type']}' of the response "
f"is not '{content_type}' as specified in the OpenAPI document."
)
- json_response = response.json()
- response_schema = response_spec["content"][content_type]["schema"]
- resolved_schema = resolve_schema(response_schema)
- if list_item_schema := resolved_schema.get("items"):
+ json_response = response.json()
+ response_schema = resolve_schema(
+ response_spec["content"][content_type]["schema"]
+ )
+ if list_item_schema := response_schema.get("items"):
if not isinstance(json_response, list):
raise AssertionError(
f"Response schema violation: the schema specifies an array as "
@@ -461,27 +419,46 @@ def validate_response(
return None
run_keyword(
- "validate_resource_properties", json_response, resolved_schema["properties"]
+ "validate_resource_properties", json_response, response_schema["properties"]
)
# ensure the href is valid if present in the response
if href := json_response.get("href"):
- url = f"{self.origin}{href}"
- endpoint = url.replace(self.base_url, "")
- request_data = self.get_request_data(endpoint=endpoint, method="GET")
- params = request_data.params
- headers = request_data.headers
- get_response = self.authorized_request(
- url=url, method="GET", params=params, headers=headers
- )
- assert (
- get_response.json() == json_response
- ), f"{get_response.json()} not equal to original {json_response}"
+ self._assert_href_is_valid(href, json_response)
# every property that was sucessfully send and that is in the response
# schema must have the value that was send
if response.ok and response.request.method in ["POST", "PUT", "PATCH"]:
run_keyword("validate_send_response", response, original_data)
return None
+ def _assert_href_is_valid(self, href: str, json_response: Dict[str, Any]):
+ url = f"{self.origin}{href}"
+ endpoint = url.replace(self.base_url, "")
+ request_data = self.get_request_data(endpoint=endpoint, method="GET")
+ params = request_data.params
+ headers = request_data.headers
+ get_response = run_keyword("authorized_request", url, "GET", params, headers)
+ assert (
+ get_response.json() == json_response
+ ), f"{get_response.json()} not equal to original {json_response}"
+
+ def _validate_response_against_spec(self, response: Response):
+ validation_result = self.response_validator.validate(
+ request=RequestsOpenAPIRequest(response.request),
+ response=RequestsOpenAPIResponse(response),
+ )
+ if self.disable_server_validation:
+ validation_result.errors = [
+ e for e in validation_result.errors if not isinstance(e, ServerNotFound)
+ ]
+ if self.response_validation == ValidationLevel.STRICT:
+ validation_result.raise_for_errors()
+ if self.response_validation in [ValidationLevel.WARN, ValidationLevel.INFO]:
+ for validation_error in validation_result.errors:
+ if self.response_validation == ValidationLevel.WARN:
+ logger.warning(validation_error)
+ else:
+ logger.info(validation_error)
+
@staticmethod
@keyword
def validate_resource_properties(
diff --git a/src/OpenApiDriver/openapi_libcore.py b/src/OpenApiDriver/openapi_libcore.py
index f89258f..8d43353 100644
--- a/src/OpenApiDriver/openapi_libcore.py
+++ b/src/OpenApiDriver/openapi_libcore.py
@@ -3,7 +3,7 @@
import json as _json
import sys
from copy import deepcopy
-from dataclasses import asdict, dataclass, field, Field, make_dataclass
+from dataclasses import Field, asdict, dataclass, field, make_dataclass
from itertools import zip_longest
from logging import getLogger
from pathlib import Path
@@ -70,6 +70,17 @@ def merge_schemas(first: Dict[str, Any], second: Dict[str, Any]) -> Dict[str, An
return merged_schema
+@dataclass
+class RequestValues:
+ """Helper class to hold parameter values needed to make a request."""
+
+ url: str
+ method: str
+ params: Optional[Dict[str, Any]]
+ headers: Optional[Dict[str, str]]
+ json_data: Optional[Dict[str, Any]]
+
+
@dataclass
class RequestData:
"""Helper class to manage parameters used when making requests."""
@@ -82,12 +93,16 @@ class RequestData:
@property
def has_optional_properties(self) -> bool:
+ """Whether or not the dto data (json data) contains optional properties."""
properties = asdict(self.dto).keys()
- in_required_func: Callable[[str], bool] = lambda x: x in self.dto_schema.get("required", [])
+ in_required_func: Callable[[str], bool] = lambda x: x in self.dto_schema.get(
+ "required", []
+ )
return not all(map(in_required_func, properties))
@property
def has_optional_params(self) -> bool:
+ """Whether or not any of the query parameters are optional."""
optional_params = [
p.get("name")
for p in self.parameters
@@ -98,6 +113,7 @@ def has_optional_params(self) -> bool:
@property
def has_optional_headers(self) -> bool:
+ """Whether or not any of the headers are optional."""
optional_headers = [
p.get("name")
for p in self.parameters
@@ -107,6 +123,7 @@ def has_optional_headers(self) -> bool:
return any(map(in_optional_headers, self.headers))
def get_required_properties_dict(self) -> Dict[str, Any]:
+ """Get the json-compatible dto data containing only the required properties."""
required_properties = self.dto_schema.get("required", [])
required_properties_dict: Dict[str, Any] = {}
for key, value in asdict(self.dto).items():
@@ -115,12 +132,14 @@ def get_required_properties_dict(self) -> Dict[str, Any]:
return required_properties_dict
def get_required_params(self) -> Dict[str, str]:
+ """Get the params dict containing only the required query parameters."""
required_parameters = [
p.get("name") for p in self.parameters if p.get("required")
]
return {k: v for k, v in self.params.items() if k in required_parameters}
def get_required_headers(self) -> Dict[str, str]:
+ """Get the headers dict containing only the required headers."""
required_parameters = [
p.get("name") for p in self.parameters if p.get("required")
]
@@ -225,42 +244,20 @@ def get_valid_id_for_endpoint(self, endpoint: str, method: str) -> str:
# Try to create a new resource to prevent conflicts caused by
# operations performed on the same resource by other test cases
request_data = self.get_request_data(endpoint=endpoint, method="POST")
- params = request_data.params
- headers = request_data.headers
- dto = request_data.dto
- try:
- json_data = asdict(dto)
- response: Response = run_keyword(
- "authorized_request", url, "POST", params, headers, json_data
- )
- except NotImplementedError as exception:
- logger.debug(f"get_valid_id_for_endpoint POST failed: {exception}")
+
+ response: Response = run_keyword(
+ "authorized_request",
+ url,
+ "POST",
+ request_data.get_required_params(),
+ request_data.get_required_headers(),
+ request_data.get_required_properties_dict(),
+ )
+ if response.status_code == 405:
# For endpoints that do no support POST, try to get an existing id using GET
try:
- request_data = self.get_request_data(endpoint=endpoint, method="GET")
- params = request_data.params
- headers = request_data.headers
- response = run_keyword(
- "authorized_request", url, "GET", params, headers
- )
- assert response.ok
- response_data: Union[
- Dict[str, Any], List[Dict[str, Any]]
- ] = response.json()
- if isinstance(response_data, list):
- valid_ids: List[str] = [item["id"] for item in response_data]
- logger.debug(
- f"get_valid_id_for_endpoint: returning choice from list {valid_ids}"
- )
- return choice(valid_ids)
- if valid_id := response_data.get("id"):
- logger.debug(f"get_valid_id_for_endpoint: returning {valid_id}")
- return valid_id
- valid_ids = [item["id"] for item in response_data["items"]]
- logger.debug(
- f"get_valid_id_for_endpoint: returning choice from items {valid_ids}"
- )
- return choice(valid_ids)
+ valid_id = choice(run_keyword("get_ids_for_endpoint", url))
+ return valid_id
except Exception as exception:
logger.debug(
f"Failed to get a valid id using GET on {url}"
@@ -301,6 +298,31 @@ def get_valid_id_for_endpoint(self, endpoint: str, method: str) -> str:
valid_id = response_data["id"]
return valid_id
+ @keyword
+ def get_ids_for_endpoint(self, url: str) -> List[str]:
+ """
+ Perform a GET request on the `url` and return the list of resource
+ `ids` from the response.
+ """
+ endpoint = self.get_parameterized_endpoint_from_url(url)
+ request_data = self.get_request_data(endpoint=endpoint, method="GET")
+ response = run_keyword(
+ "authorized_request",
+ url,
+ "GET",
+ request_data.get_required_params(),
+ request_data.get_required_headers(),
+ )
+ assert response.ok
+ response_data: Union[Dict[str, Any], List[Dict[str, Any]]] = response.json()
+ if isinstance(response_data, list):
+ valid_ids: List[str] = [item["id"] for item in response_data]
+ return valid_ids
+ if valid_id := response_data.get("id"):
+ return [valid_id]
+ valid_ids = [item["id"] for item in response_data["items"]]
+ return valid_ids
+
@keyword
def get_request_data(self, endpoint: str, method: str) -> RequestData:
"""Return an object with valid request data for body, headers and query params."""
@@ -308,21 +330,18 @@ def get_request_data(self, endpoint: str, method: str) -> RequestData:
# The endpoint can contain already resolved Ids that have to be matched
# against the parametrized endpoints in the paths section.
spec_endpoint = self.get_parametrized_endpoint(endpoint)
+ dto_class = self.get_dto_class(endpoint=spec_endpoint, method=method)
try:
method_spec = self.openapi_spec["paths"][spec_endpoint][method]
- except KeyError as exception:
- raise NotImplementedError(
- f"method '{method}' not suported on '{spec_endpoint}"
- ) from exception
- dto_class = self.get_dto_class(endpoint=spec_endpoint, method=method)
-
- parameters = method_spec.get("parameters", [])
- parameter_relations = dto_class.get_parameter_relations()
- query_params = [p for p in parameters if p.get("in") == "query"]
- header_params = [p for p in parameters if p.get("in") == "header"]
- params = self.get_parameter_data(query_params, parameter_relations)
- headers = self.get_parameter_data(header_params, parameter_relations)
+ except KeyError:
+ logger.warning(
+ f"method '{method}' not suported on '{spec_endpoint}, using empty spec."
+ )
+ method_spec = {}
+ parameters, params, headers = self.get_request_parameters(
+ dto_class=dto_class, method_spec=method_spec
+ )
if (body_spec := method_spec.get("requestBody", None)) is None:
if dto_class == DefaultDto:
dto_instance: Dto = DefaultDto()
@@ -339,29 +358,16 @@ def get_request_data(self, endpoint: str, method: str) -> RequestData:
params=params,
headers=headers,
)
- # Content should be a single key/value entry, so use tuple assignment
- (content_type,) = body_spec["content"].keys()
- if content_type != "application/json":
- # At present no supported for other types.
- raise NotImplementedError(f"content_type '{content_type}' not supported")
- content_schema = body_spec["content"][content_type]["schema"]
- # TODO: is resolve_schema still needed?
- resolved_schema: Dict[str, Any] = resolve_schema(content_schema)
+ content_schema = self.get_content_schema(body_spec)
dto_data = self.get_json_data_for_dto_class(
- schema=resolved_schema,
+ schema=content_schema,
dto_class=dto_class,
operation_id=method_spec.get("operationId"),
)
if dto_data is None:
dto_instance = DefaultDto()
else:
- fields: List[Union[str, Tuple[str, type], Tuple[str, type, Optional[Field[Any]]]]] = []
- for key, value in dto_data.items():
- required_properties = resolved_schema.get("required", [])
- if key in required_properties:
- fields.append((key, type(value)))
- else:
- fields.append((key, type(value), field(default=None)))
+ fields = self.get_fields_from_dto_data(content_schema, dto_data)
dto_class = make_dataclass(
cls_name=method_spec["operationId"],
fields=fields,
@@ -370,12 +376,51 @@ def get_request_data(self, endpoint: str, method: str) -> RequestData:
dto_instance = dto_class(**dto_data) # type: ignore[call-arg]
return RequestData(
dto=dto_instance,
- dto_schema=resolved_schema,
+ dto_schema=content_schema,
parameters=parameters,
params=params,
headers=headers,
)
+ @staticmethod
+ def get_fields_from_dto_data(
+ content_schema: Dict[str, Any], dto_data: Dict[str, Any]
+ ):
+ """Get a dataclasses fields list based on the content_schema and dto_data."""
+ fields: List[Union[str, Tuple[str, type], Tuple[str, type, Field[Any]]]] = []
+ for key, value in dto_data.items():
+ required_properties = content_schema.get("required", [])
+ if key in required_properties:
+ fields.append((key, type(value)))
+ else:
+ fields.append((key, type(value), field(default=None))) # type: ignore[arg-type]
+ return fields
+
+ def get_request_parameters(
+ self, dto_class: Union[Dto, Type[Dto]], method_spec: Dict[str, Any]
+ ) -> Tuple[List[Dict[str, Any]], Dict[str, Any], Dict[str, str]]:
+ """Get the methods parameter spec and params and headers with valid data."""
+ parameters = method_spec.get("parameters", [])
+ parameter_relations = dto_class.get_parameter_relations()
+ query_params = [p for p in parameters if p.get("in") == "query"]
+ header_params = [p for p in parameters if p.get("in") == "header"]
+ params = self.get_parameter_data(query_params, parameter_relations)
+ headers = self.get_parameter_data(header_params, parameter_relations)
+ return parameters, params, headers
+
+ @staticmethod
+ def get_content_schema(body_spec: Dict[str, Any]):
+ """Get the content schema from the requestBody spec."""
+ # Content should be a single key/value entry, so use tuple assignment
+ (content_type,) = body_spec["content"].keys()
+ if content_type != "application/json":
+ # At present no supported for other types.
+ raise NotImplementedError(f"content_type '{content_type}' not supported")
+ content_schema = body_spec["content"][content_type]["schema"]
+ # TODO: is resolve_schema still needed?
+ resolved_schema: Dict[str, Any] = resolve_schema(content_schema)
+ return resolved_schema
+
def get_parametrized_endpoint(self, endpoint: str) -> str:
"""
Get the parametrized endpoint as found in the `paths` section of the openapi
@@ -511,11 +556,7 @@ def get_invalidated_url(self, valid_url: str) -> Optional[str]:
Return an url with all the path parameters in the `valid_url` replaced by a
random UUID. If the `valid_url` does not contain any parameters, None is returned.
"""
- endpoint = valid_url.replace(self.base_url, "")
- endpoint_parts = endpoint.split("/")
- # first part will be '' since an endpoint starts with /
- endpoint_parts.pop(0)
- parameterized_endpoint = self.get_parametrized_endpoint(endpoint=endpoint)
+ parameterized_endpoint = self.get_parameterized_endpoint_from_url(valid_url)
parameterized_url = self.base_url + parameterized_endpoint
valid_url_parts = list(reversed(valid_url.split("/")))
parameterized_parts = reversed(parameterized_url.split("/"))
@@ -530,6 +571,18 @@ def get_invalidated_url(self, valid_url: str) -> Optional[str]:
# TODO: add support for header / query parameters that can be invalidated
return None
+ @keyword
+ def get_parameterized_endpoint_from_url(self, url: str):
+ """
+ Return the endpoint as found in the `paths` section based on the given `url`.
+ """
+ endpoint = url.replace(self.base_url, "")
+ endpoint_parts = endpoint.split("/")
+ # first part will be '' since an endpoint starts with /
+ endpoint_parts.pop(0)
+ parameterized_endpoint = self.get_parametrized_endpoint(endpoint=endpoint)
+ return parameterized_endpoint
+
@keyword
def get_invalid_json_data(
self,
@@ -652,16 +705,20 @@ def ensure_in_use(self, url: str, resource_relation: IdReference) -> None:
resource_id = endpoint_parts[-1]
else:
resource_id = ""
- post_endpoint = resource_relation.post_path
- property_name = resource_relation.property_name
- request_data = self.get_request_data(method="POST", endpoint=post_endpoint)
+ request_data = self.get_request_data(
+ method="POST", endpoint=resource_relation.post_path
+ )
params = request_data.params
headers = request_data.headers
dto = request_data.dto
json_data = asdict(dto)
if resource_id:
- json_data[property_name] = resource_id
- post_url: str = run_keyword("get_valid_url", post_endpoint, "POST")
+ json_data[resource_relation.property_name] = resource_id
+ post_url: str = run_keyword(
+ "get_valid_url",
+ resource_relation.post_path,
+ "POST",
+ )
response: Response = run_keyword(
"authorized_request", post_url, "POST", params, headers, json_data
)
@@ -705,10 +762,13 @@ def get_json_data_with_conflict(
post_json = json_data
endpoint = post_url.replace(self.base_url, "")
request_data = self.get_request_data(endpoint=endpoint, method="POST")
- params = request_data.params
- headers = request_data.headers
response: Response = run_keyword(
- "authorized_request", post_url, "POST", params, headers, post_json
+ "authorized_request",
+ post_url,
+ "POST",
+ request_data.params,
+ request_data.headers,
+ post_json,
)
# conflicting resource may already exist
assert (
@@ -725,8 +785,8 @@ def authorized_request(
url: str,
method: str,
params: Optional[Dict[str, Any]] = None,
- headers: Optional[Dict[str, Any]] = None,
- json: Optional[Dict[str, Any]] = None,
+ headers: Optional[Dict[str, str]] = None,
+ json_data: Optional[Dict[str, Any]] = None,
) -> Response:
"""
Perform a request using the security token or authentication set in the library.
@@ -744,7 +804,7 @@ def authorized_request(
method=method,
params=params,
headers=headers,
- json=json,
+ json=json_data,
auth=self.auth,
verify=False,
)
diff --git a/src/OpenApiDriver/openapidriver.py b/src/OpenApiDriver/openapidriver.py
index be13ad2..63729f2 100644
--- a/src/OpenApiDriver/openapidriver.py
+++ b/src/OpenApiDriver/openapidriver.py
@@ -137,7 +137,6 @@
from requests.auth import AuthBase
from robot.api.deco import library
from robot.libraries.BuiltIn import BuiltIn
-from robotlibcore import DynamicCore
from OpenApiDriver.openapi_executors import OpenApiExecutors, ValidationLevel
from OpenApiDriver.openapi_reader import OpenApiReader
@@ -149,7 +148,7 @@
@library
-class OpenApiDriver(DataDriver, DynamicCore):
+class OpenApiDriver(DataDriver, OpenApiExecutors):
# region: docstring
"""
Visit the [https://github.com/MarketSquare/robotframework-openapidriver | library page]
@@ -171,7 +170,7 @@ class OpenApiDriver(DataDriver, DynamicCore):
ROBOT_LIBRARY_DOC_FORMAT = "ROBOT"
ROBOT_LIBRARY_SCOPE = "TEST SUITE"
- def __init__( # pylint: disable=too-many-arguments
+ def __init__( # pylint: disable=too-many-arguments, too-many-locals
self,
source: str,
ignored_endpoints: Optional[List[str]] = None,
@@ -293,7 +292,8 @@ def __init__( # pylint: disable=too-many-arguments
)
mappings_path = Path(mappings_path).as_posix()
- openapi_executors = OpenApiExecutors(
+ OpenApiExecutors.__init__(
+ self,
openapi_specification=openapi_spec,
origin=origin,
base_path=base_path,
@@ -307,7 +307,6 @@ def __init__( # pylint: disable=too-many-arguments
require_body_for_invalid_url=require_body_for_invalid_url,
invalid_property_default_response=invalid_property_default_response,
)
- DynamicCore.__init__(self, [openapi_executors])
# FIXME: Hack to allow directly loading the OpenApiReader - remove when DataDriver
# accepts an AbstractReaderClass subclass as reader_class argument
| Investigate: DynamicCore is most likely not needed
Due to @keyword decorator usage, direct subclassing will probably be a more direct solution.
| 2021-12-13T10:16:23 | 0.0 | [] | [] |
|||
nvidia-riva/riva-asrlib-decoder | nvidia-riva__riva-asrlib-decoder-11 | 22d917b25ddaca8ecac3a003d3dab1508552f6ef | diff --git a/Dockerfile b/Dockerfile
index b4a01ba..95dafe6 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -13,6 +13,7 @@ FROM ${FROM_IMAGE_NAME}
ENV PATH=/usr/local/cuda/bin:${PATH}
ENV NVIDIA_DRIVER_CAPABILITIES=video,compute,utility
+ENV NVIDIA_VISIBLE_DEVICES=all
# CUDA
COPY --from=cuda /usr/local/cuda /usr/local/cuda
diff --git a/include/riva/asrlib/decoder/batched-mapped-decoder-cuda.h b/include/riva/asrlib/decoder/batched-mapped-decoder-cuda.h
index 338a61e..5a4f60b 100644
--- a/include/riva/asrlib/decoder/batched-mapped-decoder-cuda.h
+++ b/include/riva/asrlib/decoder/batched-mapped-decoder-cuda.h
@@ -48,9 +48,7 @@ class BatchedMappedDecoderCuda {
std::size_t logits_n_input_frames_valid,
// this should not be copied by reference. It should be passed as an
// lvalue or an rvalue because it is always copied.
- const std::function<void(std::tuple<
- std::optional<kaldi::CompactLattice>,
- std::optional<kaldi::cuda_decoder::CTMResult>>&)>& callback);
+ const BatchedMappedOnlineDecoderCuda::LatticeCallback& callback);
void ComputeTasks();
void AcquireTasks();
@@ -96,10 +94,7 @@ class BatchedMappedDecoderCuda {
std::size_t logits_frame_stride;
std::size_t logits_n_input_frames_valid;
CorrelationID corr_id;
- std::function<void(
- std::tuple<
- std::optional<kaldi::CompactLattice>, std::optional<kaldi::cuda_decoder::CTMResult>>&)>
- callback;
+ BatchedMappedOnlineDecoderCuda::LatticeCallback callback;
std::size_t loglikes_time_offset = std::size_t(0);
};
diff --git a/include/riva/asrlib/decoder/batched-mapped-online-decoder-cuda.h b/include/riva/asrlib/decoder/batched-mapped-online-decoder-cuda.h
index cf0812f..964d6c9 100644
--- a/include/riva/asrlib/decoder/batched-mapped-online-decoder-cuda.h
+++ b/include/riva/asrlib/decoder/batched-mapped-online-decoder-cuda.h
@@ -20,6 +20,7 @@
#include <cudadecoder/cuda-decoder-common.h>
#include <cudadecoder/cuda-decoder.h>
#include <cudadecoder/lattice-postprocessor.h>
+#include <optional>
#include <fstext/lattice-utils.h>
#include <itf/options-itf.h>
#include <itf/transition-information.h>
@@ -117,9 +118,11 @@ struct BatchedMappedOnlineDecoderCudaConfig {
class BatchedMappedOnlineDecoderCuda {
public:
using CorrelationID = uint64_t;
- using LatticeCallback = std::function<void(
- std::tuple<
- std::optional<kaldi::CompactLattice>, std::optional<kaldi::cuda_decoder::CTMResult>>&)>;
+ using ReturnType =
+ std::tuple<std::optional<kaldi::CompactLattice>,
+ std::optional<kaldi::cuda_decoder::CTMResult>,
+ std::optional<std::vector<kaldi::cuda_decoder::NBestResult>>>;
+ using LatticeCallback = std::function<void(ReturnType&)>;
BatchedMappedOnlineDecoderCuda(
const BatchedMappedOnlineDecoderCudaConfig& config, const fst::Fst<fst::StdArc>& decode_fst,
std::unique_ptr<kaldi::TransitionInformation>&& trans_information)
@@ -160,11 +163,10 @@ class BatchedMappedOnlineDecoderCuda {
if (num_worker_threads > 0) {
thread_pool_ = std::make_unique<kaldi::futures_thread_pool>(num_worker_threads);
}
- // TODO: Uncomment this
- // if (config_.num_decoder_copy_threads > 0) {
- // cuda_decoder_->SetThreadPoolAndStartCPUWorkers(
- // thread_pool_.get(), config_.num_decoder_copy_threads);
- // }
+ if (config_.num_decoder_copy_threads > 0) {
+ cuda_decoder_->SetThreadPoolAndStartCPUWorkers(
+ thread_pool_.get(), config_.num_decoder_copy_threads);
+ }
}
~BatchedMappedOnlineDecoderCuda()
@@ -196,7 +198,6 @@ class BatchedMappedOnlineDecoderCuda {
channels = &channels_;
}
ListIChannelsInBatch(corr_ids, &channels_);
- // KALDI_LOG << "GALVEZ:CHANNELS:" << channels_[0];
std::vector<int> list_channels_first_chunk;
for (std::size_t i = 0; i < is_first_chunk.size(); ++i) {
@@ -348,6 +349,7 @@ class BatchedMappedOnlineDecoderCuda {
private:
+ // could use this as an opportunity to clear saved variables for the channel
void FinalizeDecoding(int32 ichannel, const LatticeCallback* callback)
{
kaldi::Lattice lat;
@@ -373,17 +375,17 @@ class BatchedMappedOnlineDecoderCuda {
// this is wasteful, since GetCTM calls GetPostprocessedLattice
// but doesn't return the result.
- // okay, so I am basically doing both here...
- std::tuple<std::optional<kaldi::CompactLattice>, std::optional<kaldi::cuda_decoder::CTMResult>>
- result;
+ ReturnType result;
if (config_.use_lattice_postprocessor) {
kaldi::CompactLattice clat;
lattice_postprocessor_->GetPostprocessedLattice(dlat, &clat);
kaldi::cuda_decoder::CTMResult ctm;
+ // is this okay? I am modifying the refernece twice...
lattice_postprocessor_->GetCTM(dlat, &ctm);
- result = {std::make_optional(clat), std::make_optional(ctm)};
+ std::vector<kaldi::cuda_decoder::NBestResult> nbest = lattice_postprocessor_->GetNBestList(dlat);
+ result = {std::make_optional(clat), std::make_optional(ctm), std::make_optional(nbest)};
} else {
- result = {std::make_optional(dlat), std::nullopt};
+ result = {std::make_optional(dlat), std::nullopt, std::nullopt};
}
// if ptr set and if callback func callable
@@ -395,14 +397,6 @@ class BatchedMappedOnlineDecoderCuda {
n_lattice_callbacks_not_done_.fetch_sub(1, std::memory_order_release);
}
- // static wrapper for thread pool
- static void FinalizeDecodingWrapper(void* obj, uint64_t ichannel64, void* callback_ptr)
- {
- int32 ichannel = static_cast<int32>(ichannel64);
- const LatticeCallback* callback = static_cast<const LatticeCallback*>(callback_ptr);
- static_cast<BatchedMappedOnlineDecoderCuda*>(obj)->FinalizeDecoding(ichannel, callback);
- }
-
void ListIChannelsInBatch(const std::vector<CorrelationID>& corr_ids, std::vector<int>* channels)
{
channels->clear();
diff --git a/pyproject.toml b/pyproject.toml
index 174cc43..27127fc 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -17,6 +17,7 @@ environment = 'RIVA_ASRLIB_DECODER_CMAKE_ARGS="-DCMAKE_BUILD_TYPE=Release" CUDAA
# it uses setup.py, not pyproject.toml
before-test = "pip install Cython"
test-command = "pytest {project}/src/riva/asrlib/decoder/test_decoder.py"
-test-requires = ["pytest", "kaldi-io", "more-itertools", "nemo_toolkit[asr]"]
-# fails because onnx doesn't provide python3.6 wheels: https://pypi.org/project/onnx/#files
-test-skip = "cp36-manylinux_x86_64"
+test-requires = ["pytest", "kaldi-io", "more-itertools", "nemo_toolkit[asr]", "torchaudio"]
+# 3.6 fails because onnx doesn't provide python3.6 wheels: https://pypi.org/project/onnx/#files
+# 3.11 fails becasue of numba: Cannot install on Python version 3.11.1; only versions >=3.7,<3.11 are supported
+test-skip = "{cp36-manylinux_x86_64,cp311-manylinux_x86_64}"
diff --git a/setup.py b/setup.py
index 11b42af..b24ad40 100644
--- a/setup.py
+++ b/setup.py
@@ -95,7 +95,7 @@ def build_extension(self, ext: setuptools.extension.Extension):
setuptools.setup(
python_requires='>=3.7',
name='riva-asrlib-decoder',
- version='0.1.0',
+ version='0.2.0',
author='NVIDIA',
author_email='[email protected]',
keywords='ASR, CUDA, WFST, Decoder',
@@ -132,6 +132,7 @@ def build_extension(self, ext: setuptools.extension.Extension):
"kaldi-io",
"more-itertools",
"nemo_toolkit[asr]",
+ "torchaudio",
]
},
zip_safe=False,
diff --git a/src/riva/asrlib/decoder/batched-mapped-decoder-cuda.cc b/src/riva/asrlib/decoder/batched-mapped-decoder-cuda.cc
index f1b1871..d6bdc04 100644
--- a/src/riva/asrlib/decoder/batched-mapped-decoder-cuda.cc
+++ b/src/riva/asrlib/decoder/batched-mapped-decoder-cuda.cc
@@ -43,9 +43,7 @@ BatchedMappedDecoderCuda::~BatchedMappedDecoderCuda()
void
BatchedMappedDecoderCuda::DecodeWithCallback(
const float* d_logits, std::size_t logits_frame_stride, std::size_t logits_n_input_frames_valid,
- const std::function<void(std::tuple<
- std::optional<kaldi::CompactLattice>,
- std::optional<kaldi::cuda_decoder::CTMResult>>&)>& callback)
+ const BatchedMappedOnlineDecoderCuda::LatticeCallback& callback)
{
UtteranceTask task;
// at 5000 files/s, expected to overflow in ~116 million years
@@ -81,9 +79,7 @@ BatchedMappedDecoderCuda::AcquireTasks()
auto& callback = task.callback;
cuda_online_pipeline_.SetLatticeCallback(
- task.corr_id, [this, callback](std::tuple<
- std::optional<kaldi::CompactLattice>,
- std::optional<kaldi::cuda_decoder::CTMResult>>& result) {
+ task.corr_id, [this, callback](BatchedMappedOnlineDecoderCuda::ReturnType& result) {
if (callback)
callback(result);
n_tasks_not_done_.fetch_sub(1, std::memory_order_release);
diff --git a/src/riva/asrlib/decoder/bin/offline-cuda-decode-binary.cc b/src/riva/asrlib/decoder/bin/offline-cuda-decode-binary.cc
index 6c46b55..fd38e22 100644
--- a/src/riva/asrlib/decoder/bin/offline-cuda-decode-binary.cc
+++ b/src/riva/asrlib/decoder/bin/offline-cuda-decode-binary.cc
@@ -150,9 +150,7 @@ main(int argc, char** argv)
// must copy the captured variables. Otherwise, they will be lost.
auto callback = [key, d_loglikes, &output_writer_m, &clat_writer, &ctm_output_path,
&word_syms = cuda_pipeline.GetSymbolTable()](
- std::tuple<
- std::optional<kaldi::CompactLattice>,
- std::optional<kaldi::cuda_decoder::CTMResult>>& result) {
+ BatchedMappedOnlineDecoderCuda::ReturnType& result) {
CU_SAFE_CALL(cudaFree(d_loglikes));
if (std::get<0>(result).has_value()) {
diff --git a/src/riva/asrlib/decoder/python_decoder.cc b/src/riva/asrlib/decoder/python_decoder.cc
index d66b635..96ed8ec 100644
--- a/src/riva/asrlib/decoder/python_decoder.cc
+++ b/src/riva/asrlib/decoder/python_decoder.cc
@@ -261,9 +261,7 @@ PybindBatchedMappedDecoderCuda(py::module& m)
// stride of each row is stride. Always greater than number of cols
auto write_results =
[i, &clat_writer, &keys](
- std::tuple<
- std::optional<kaldi::CompactLattice>,
- std::optional<kaldi::cuda_decoder::CTMResult>>& asr_results) {
+ riva::asrlib::BatchedMappedOnlineDecoderCuda::ReturnType& asr_results) {
const kaldi::CompactLattice& lattice = std::get<0>(asr_results).value();
clat_writer.Write(keys[i], lattice);
};
@@ -278,10 +276,10 @@ PybindBatchedMappedDecoderCuda(py::module& m)
pyclass.def(
- "decode",
+ "decode_mbr",
[](PyClass& cuda_pipeline, const DLManagedTensor* managed_logits,
const DLManagedTensor* managed_logits_lengths)
- -> std::vector<std::vector<std::tuple<std::string, float, float>>> {
+ -> std::vector<std::vector<std::tuple<std::string, float, float, float>>> {
// contiguousness might not mean what I think it means. It may just mean
// stride has no padding.
@@ -313,7 +311,7 @@ PybindBatchedMappedDecoderCuda(py::module& m)
}
// logits should be batch x time x logits
int64_t batch_size = logits_lengths.shape[0];
- std::vector<std::vector<std::tuple<std::string, float, float>>> results(batch_size);
+ std::vector<std::vector<std::tuple<std::string, float, float, float>>> results(batch_size);
for (int64_t i = 0; i < batch_size; ++i) {
int64_t valid_time_steps = index<int64_t>(logits_lengths, i);
@@ -324,15 +322,14 @@ PybindBatchedMappedDecoderCuda(py::module& m)
// stride of each row is stride. Always greater than number of cols
auto place_results =
[i, &results, &word_syms = cuda_pipeline.GetSymbolTable()](
- std::tuple<
- std::optional<kaldi::CompactLattice>,
- std::optional<kaldi::cuda_decoder::CTMResult>>& asr_results) {
+ riva::asrlib::BatchedMappedOnlineDecoderCuda::ReturnType& asr_results) {
const kaldi::cuda_decoder::CTMResult& ctm_result = std::get<1>(asr_results).value();
for (size_t iword = 0; iword < ctm_result.times_seconds.size(); ++iword) {
results[i].emplace_back(
word_syms.Find(ctm_result.words[iword]),
ctm_result.times_seconds[iword].first,
- ctm_result.times_seconds[iword].second);
+ ctm_result.times_seconds[iword].second,
+ ctm_result.conf[iword]);
}
};
cuda_pipeline.DecodeWithCallback(
@@ -347,6 +344,75 @@ PybindBatchedMappedDecoderCuda(py::module& m)
cuda_pipeline.WaitForAllTasks();
return results;
});
+
+ pyclass.def("decode_map",
+ [](PyClass& cuda_pipeline, const DLManagedTensor* managed_logits,
+ const DLManagedTensor* managed_logits_lengths, size_t n)
+ -> std::vector<std::vector<std::tuple<std::string, float, float, float>>>
+ {
+ const DLTensor& logits = managed_logits->dl_tensor;
+ const DLTensor& logits_lengths = managed_logits_lengths->dl_tensor;
+ if (logits.ndim != 3) {
+ throw std::invalid_argument("Expected a 3D logits tensor");
+ }
+ if (logits.dtype.code != kDLFloat || logits.dtype.bits != 32 || logits.dtype.lanes != 1) {
+ throw std::invalid_argument("Expected a float32 logits tensor");
+ }
+ // TODO: Consider setting device id based on this... Could use RAII like
+ // in K2 for that.
+ if (logits.device.device_type != kDLCUDA) {
+ throw std::invalid_argument("Expected logits tensor to be on GPU");
+ }
+ if (logits_lengths.ndim != 1) {
+ throw std::invalid_argument("Expected a 1D logits lengths tensor");
+ }
+ if (logits_lengths.dtype.code != kDLInt || logits_lengths.dtype.bits != 64 ||
+ logits_lengths.dtype.lanes != 1) {
+ throw std::invalid_argument("Expected a 64-bit signed integer logits lengths tensor");
+ }
+ if (logits_lengths.device.device_type != kDLCPU) {
+ throw std::invalid_argument("Expected lengths tensor to be on CPU");
+ }
+ // logits should be batch x time x logits
+ int64_t batch_size = logits_lengths.shape[0];
+ std::vector<std::vector<std::tuple<std::string, float, float, float>>> results(batch_size);
+ for (int64_t i = 0; i < batch_size; ++i) {
+ int64_t valid_time_steps = index<int64_t>(logits_lengths, i);
+
+ // this may not be right... Yes, it seems quite wrong...
+ const float* single_sample_logits_start = address<float>(logits, i, 0);
+ // number of rows is number of frames
+ // number of cols is number of logits
+ // stride of each row is stride. Always greater than number of cols
+ auto place_results =
+ [i, &results, &word_syms = cuda_pipeline.GetSymbolTable()](
+ riva::asrlib::BatchedMappedOnlineDecoderCuda::ReturnType& asr_results) {
+ const std::vector<kaldi::cuda_decoder::NBestResult>& nbest_results = std::get<2>(asr_results).value();
+ std::vector<
+ std::tuple<float,
+ std::vector<std::string>,
+ std::vector<std::pair<float, float>>
+ >> result_this_utt;
+ for (const kaldi::cuda_decoder::NBestResult& nbest_result: nbest_results) {
+ std::vector<std::string> words; words.reserve(nbest_result.words.size());
+ for (auto&& word_id: nbest_result.words) {
+ words.emplace_back(word_syms.Find(word_id));
+ }
+ result_this_utt.emplace_back(nbest_result.score, words, std::move(nbest_result.times_seconds));
+ }
+ };
+ cuda_pipeline.DecodeWithCallback(
+ single_sample_logits_start,
+ // single_sample_logits.data_ptr<float>(),
+ stride(logits, 1),
+ // single_sample_logits.stride(1),
+ index<int64_t>(logits_lengths, i),
+ // size(0)
+ place_results);
+ }
+ cuda_pipeline.WaitForAllTasks();
+ return results;
+});
}
} // anonymous namespace
| Error building from source.
Hi @galv, I have the intention to plug the `k2` HLG decoding graph into your amazing decoding algorithm. So I tried to build `riva-asrlib-decoder` from source and here is my environment detected during configuration:
```
$ cmake -DRIVA_ASRLIB_BUILD_PYTHON_BINDINGS=NO ..
-- The CUDA compiler identification is NVIDIA 11.7.99
-- The CXX compiler identification is GNU 11.3.0
-- Detecting CUDA compiler ABI info
-- Detecting CUDA compiler ABI info - done
-- Check for working CUDA compiler: /usr/local/cuda/bin/nvcc - skipped
-- Detecting CUDA compile features
-- Detecting CUDA compile features - done
-- Detecting CXX compiler ABI info
-- Detecting CXX compiler ABI info - done
-- Check for working CXX compiler: /usr/bin/c++ - skipped
-- Detecting CXX compile features
-- Detecting CXX compile features - done
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Success
-- Found Threads: TRUE
-- Found CUDAToolkit: /usr/local/cuda/include (found version "11.7.99")
-- Configuring done
-- Generating done
-- Build files have been written to: /home/english_k2/riva-asrlib-decoder/build
```
Now the building process was successful until it reached `batched-mapped-decoder-cuda` compilation, where it failed with errors. The stack trace is long, so I attach the log file.
[build_log.txt](https://github.com/nvidia-riva/riva-asrlib-decoder/files/10326635/build_log.txt)
Thanks in advance for the help.
| 2023-01-05T00:52:17 | 0.0 | [] | [] |
|||
nvidia-riva/riva-asrlib-decoder | nvidia-riva__riva-asrlib-decoder-4 | 77f08769736aafabd8422a05e6692b81ffb5a618 | diff --git a/.gitmodules b/.gitmodules
index 251d074..d727864 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,6 +1,6 @@
[submodule "third_party/kaldi"]
path = third_party/kaldi
- url = https://gitlab-master.nvidia.com/belgarten/kaldi.git
+ url = https://github.com/galv/kaldi.git
[submodule "third_party/pybind11"]
path = third_party/pybind11
url = https://github.com/pybind/pybind11.git
diff --git a/third_party/kaldi b/third_party/kaldi
index 6e633c9..b3163d3 160000
--- a/third_party/kaldi
+++ b/third_party/kaldi
@@ -1,1 +1,1 @@
-Subproject commit 6e633c9a3885be849970f76ae0f032a30e6effe5
+Subproject commit b3163d3a2e40287c4fc39fbabc9c05c87e974f4c
| Accuracy difference
Hey Hi,
I am trying to use Riva decoder as a substitution for k2 decoder. With riva decoder I am not getting the same accuracy as that of k2 decoder. There are lot more deletions, at least 12%. Experimented with different hypereparamters like acoustic_scale, max_active_states, but result seems to be not changing much. I have tried with the different topologies as well(eesen, compact), its the same case with all of them. Can you please help in this regard.
| @kbramhendra Thanks for the report. I've passed this on to our team internally.
@kbramhendra what value have you set for "max_expand"?
We have it set to 10 here:
https://github.com/nvidia-riva/riva-asrlib-decoder/blob/77f08769736aafabd8422a05e6692b81ffb5a618/src/riva/asrlib/decoder/test_graph_construction.py#L170
I have noticed that disabling this (setting it to 0) does improve WER. However, I am not certain where it is your issue based on what you have told me.
This is to account for an explosion in the state space of a depth first search in "word alignment" algorithm that can happen in rare circumstances.
Now, word alignment isn't necessary, strictly speaking, so I could consider disabling it, but I am still looking into what the "right" solution is here. | 2022-10-19T23:16:15 | 0.0 | [] | [] |
||
iamDecode/sklearn-pmml-model | iamDecode__sklearn-pmml-model-51 | 5eba881ca454a4d798f495eed8a3ae028fdc6061 | diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml
index 5f74118..c439ebb 100644
--- a/.github/workflows/build.yaml
+++ b/.github/workflows/build.yaml
@@ -29,7 +29,7 @@ jobs:
python-version: '3.8'
- name: Build wheels
- uses: pypa/[email protected]
+ uses: pypa/[email protected]
env:
CIBW_ARCHS: ${{ matrix.arch }}
diff --git a/README.md b/README.md
index 542fcc4..3b3a9ee 100644
--- a/README.md
+++ b/README.md
@@ -50,8 +50,9 @@ from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from sklearn_pmml_model.ensemble import PMMLForestClassifier
+from sklearn_pmml_model.auto_detect import auto_detect_estimator
-# Prepare data
+# Prepare the data
iris = load_iris()
X = pd.DataFrame(iris.data)
X.columns = np.array(iris.feature_names)
@@ -59,7 +60,13 @@ y = pd.Series(np.array(iris.target_names)[iris.target])
y.name = "Class"
Xtr, Xte, ytr, yte = train_test_split(X, y, test_size=0.33, random_state=123)
-clf = PMMLForestClassifier(pmml="models/randomForest.pmml")
+# Specify the model type for the least overhead...
+#clf = PMMLForestClassifier(pmml="models/randomForest.pmml")
+
+# ...or simply let the library auto-detect the model type
+clf = auto_detect_estimator(pmml="models/randomForest.pmml")
+
+# Use the model as any other scikit-learn model
clf.predict(Xte)
clf.score(Xte, yte)
```
diff --git a/models/nn-pima-regression.pmml b/models/nn-pima-regression.pmml
new file mode 100644
index 0000000..039ac0e
--- /dev/null
+++ b/models/nn-pima-regression.pmml
@@ -0,0 +1,1510 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<PMML xmlns="http://www.dmg.org/PMML-4_4" xmlns:data="http://jpmml.org/jpmml-model/InlineTable" version="4.4">
+ <Header>
+ <Application name="SkLearn2PMML package" version="0.92.0"/>
+ <Timestamp>2023-10-31T15:25:46Z</Timestamp>
+ </Header>
+ <MiningBuildTask>
+ <Extension name="repr">PMMLPipeline(steps=[('regressor', MLPRegressor())])</Extension>
+ </MiningBuildTask>
+ <DataDictionary>
+ <DataField name="type" optype="continuous" dataType="double"/>
+ <DataField name="npreg" optype="continuous" dataType="double"/>
+ <DataField name="glu" optype="continuous" dataType="double"/>
+ <DataField name="bp" optype="continuous" dataType="double"/>
+ <DataField name="skin" optype="continuous" dataType="double"/>
+ <DataField name="bmi" optype="continuous" dataType="double"/>
+ <DataField name="ped" optype="continuous" dataType="double"/>
+ <DataField name="age(20,30]" optype="continuous" dataType="double"/>
+ <DataField name="age(30,40]" optype="continuous" dataType="double"/>
+ <DataField name="age(40,50]" optype="continuous" dataType="double"/>
+ <DataField name="age(50,60]" optype="continuous" dataType="double"/>
+ <DataField name="age(60,70]" optype="continuous" dataType="double"/>
+ </DataDictionary>
+ <NeuralNetwork functionName="regression" algorithmName="sklearn.neural_network._multilayer_perceptron.MLPRegressor" activationFunction="rectifier">
+ <MiningSchema>
+ <MiningField name="type" usageType="target"/>
+ <MiningField name="npreg"/>
+ <MiningField name="glu"/>
+ <MiningField name="bp"/>
+ <MiningField name="skin"/>
+ <MiningField name="bmi"/>
+ <MiningField name="ped"/>
+ <MiningField name="age(20,30]"/>
+ <MiningField name="age(30,40]"/>
+ <MiningField name="age(40,50]"/>
+ <MiningField name="age(50,60]"/>
+ <MiningField name="age(60,70]"/>
+ </MiningSchema>
+ <NeuralInputs>
+ <NeuralInput id="input/1">
+ <DerivedField optype="continuous" dataType="double">
+ <FieldRef field="npreg"/>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="input/2">
+ <DerivedField optype="continuous" dataType="double">
+ <FieldRef field="glu"/>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="input/3">
+ <DerivedField optype="continuous" dataType="double">
+ <FieldRef field="bp"/>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="input/4">
+ <DerivedField optype="continuous" dataType="double">
+ <FieldRef field="skin"/>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="input/5">
+ <DerivedField optype="continuous" dataType="double">
+ <FieldRef field="bmi"/>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="input/6">
+ <DerivedField optype="continuous" dataType="double">
+ <FieldRef field="ped"/>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="input/7">
+ <DerivedField optype="continuous" dataType="double">
+ <FieldRef field="age(20,30]"/>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="input/8">
+ <DerivedField optype="continuous" dataType="double">
+ <FieldRef field="age(30,40]"/>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="input/9">
+ <DerivedField optype="continuous" dataType="double">
+ <FieldRef field="age(40,50]"/>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="input/10">
+ <DerivedField optype="continuous" dataType="double">
+ <FieldRef field="age(50,60]"/>
+ </DerivedField>
+ </NeuralInput>
+ <NeuralInput id="input/11">
+ <DerivedField optype="continuous" dataType="double">
+ <FieldRef field="age(60,70]"/>
+ </DerivedField>
+ </NeuralInput>
+ </NeuralInputs>
+ <NeuralLayer>
+ <Neuron id="1/1" bias="0.00265891011911612">
+ <Con from="input/1" weight="1.8118638013714125E-7"/>
+ <Con from="input/2" weight="-7.028517769019742E-4"/>
+ <Con from="input/3" weight="0.0015090262886212598"/>
+ <Con from="input/4" weight="-0.0011070809542408377"/>
+ <Con from="input/5" weight="-0.062281374983865795"/>
+ <Con from="input/6" weight="-0.026272661149706653"/>
+ <Con from="input/7" weight="0.05782812853048599"/>
+ <Con from="input/8" weight="0.019273804606863953"/>
+ <Con from="input/9" weight="1.1255112778132209E-4"/>
+ <Con from="input/10" weight="0.034936183419060275"/>
+ <Con from="input/11" weight="0.008646204364837408"/>
+ </Neuron>
+ <Neuron id="1/2" bias="0.02612234301441615">
+ <Con from="input/1" weight="-0.1642454623220169"/>
+ <Con from="input/2" weight="0.12597735281597822"/>
+ <Con from="input/3" weight="0.1222249190672387"/>
+ <Con from="input/4" weight="0.1024538134545668"/>
+ <Con from="input/5" weight="-0.19372036138346413"/>
+ <Con from="input/6" weight="0.06048384708197899"/>
+ <Con from="input/7" weight="0.12117347402922408"/>
+ <Con from="input/8" weight="-0.12945203456396875"/>
+ <Con from="input/9" weight="0.2508614192471043"/>
+ <Con from="input/10" weight="-0.0827354796032743"/>
+ <Con from="input/11" weight="0.04628799571724669"/>
+ </Neuron>
+ <Neuron id="1/3" bias="-0.15171862593642854">
+ <Con from="input/1" weight="-0.08865759657549953"/>
+ <Con from="input/2" weight="-0.10731239326805937"/>
+ <Con from="input/3" weight="0.011822498001876725"/>
+ <Con from="input/4" weight="0.029637269743059223"/>
+ <Con from="input/5" weight="0.22785504523085082"/>
+ <Con from="input/6" weight="0.09825510886185819"/>
+ <Con from="input/7" weight="0.043668907106465975"/>
+ <Con from="input/8" weight="-0.02270543401757993"/>
+ <Con from="input/9" weight="0.17869903907849188"/>
+ <Con from="input/10" weight="-0.010298619585091748"/>
+ <Con from="input/11" weight="9.306024989434273E-7"/>
+ </Neuron>
+ <Neuron id="1/4" bias="0.008431065660907677">
+ <Con from="input/1" weight="-0.03955395435760275"/>
+ <Con from="input/2" weight="-6.434987514145467E-4"/>
+ <Con from="input/3" weight="-0.0036523240579218732"/>
+ <Con from="input/4" weight="-0.0018662650218135052"/>
+ <Con from="input/5" weight="-0.023875099985382994"/>
+ <Con from="input/6" weight="-8.816129422915128E-4"/>
+ <Con from="input/7" weight="0.08380689477454731"/>
+ <Con from="input/8" weight="-0.016744905970132035"/>
+ <Con from="input/9" weight="0.018986577596777868"/>
+ <Con from="input/10" weight="-0.008752887258550765"/>
+ <Con from="input/11" weight="0.07460039484935461"/>
+ </Neuron>
+ <Neuron id="1/5" bias="0.18331781921756982">
+ <Con from="input/1" weight="0.0657402269635581"/>
+ <Con from="input/2" weight="0.13330437392303524"/>
+ <Con from="input/3" weight="0.09617270581867053"/>
+ <Con from="input/4" weight="0.15598809813285772"/>
+ <Con from="input/5" weight="-0.16942375433206086"/>
+ <Con from="input/6" weight="-0.1358097695940833"/>
+ <Con from="input/7" weight="-0.2262249320083651"/>
+ <Con from="input/8" weight="0.17686410835554697"/>
+ <Con from="input/9" weight="0.016759959780329268"/>
+ <Con from="input/10" weight="0.14639822804185965"/>
+ <Con from="input/11" weight="-0.0903571983108445"/>
+ </Neuron>
+ <Neuron id="1/6" bias="0.11192302492891218">
+ <Con from="input/1" weight="0.011279266553650528"/>
+ <Con from="input/2" weight="-0.04588939426009265"/>
+ <Con from="input/3" weight="-7.441741725354109E-7"/>
+ <Con from="input/4" weight="-0.03989203724958411"/>
+ <Con from="input/5" weight="-0.019317921354923173"/>
+ <Con from="input/6" weight="0.008746526771694879"/>
+ <Con from="input/7" weight="0.08408286358359222"/>
+ <Con from="input/8" weight="-0.0077930035540997155"/>
+ <Con from="input/9" weight="-0.021942339177948127"/>
+ <Con from="input/10" weight="0.042481692793532394"/>
+ <Con from="input/11" weight="0.01424786267000579"/>
+ </Neuron>
+ <Neuron id="1/7" bias="0.05292217780668152">
+ <Con from="input/1" weight="0.1491413184083045"/>
+ <Con from="input/2" weight="0.1692252698226989"/>
+ <Con from="input/3" weight="0.13241351713190946"/>
+ <Con from="input/4" weight="-0.14383778806500774"/>
+ <Con from="input/5" weight="-0.04683357558182852"/>
+ <Con from="input/6" weight="-0.17676399524758"/>
+ <Con from="input/7" weight="0.07327840443923929"/>
+ <Con from="input/8" weight="-0.05016489577984695"/>
+ <Con from="input/9" weight="0.16099090310982125"/>
+ <Con from="input/10" weight="-0.024379789192842213"/>
+ <Con from="input/11" weight="-0.16383634995458218"/>
+ </Neuron>
+ <Neuron id="1/8" bias="0.13898730257327088">
+ <Con from="input/1" weight="0.07325351312606214"/>
+ <Con from="input/2" weight="-0.17294267162877835"/>
+ <Con from="input/3" weight="0.1665294171811274"/>
+ <Con from="input/4" weight="0.21484508296304064"/>
+ <Con from="input/5" weight="0.191800669943735"/>
+ <Con from="input/6" weight="0.06503747469837437"/>
+ <Con from="input/7" weight="0.013994182740511084"/>
+ <Con from="input/8" weight="0.06618548258995689"/>
+ <Con from="input/9" weight="0.05783525305697089"/>
+ <Con from="input/10" weight="-0.17006988865795902"/>
+ <Con from="input/11" weight="-0.14722119104025064"/>
+ </Neuron>
+ <Neuron id="1/9" bias="-0.11345953382221485">
+ <Con from="input/1" weight="-0.1827855902348362"/>
+ <Con from="input/2" weight="0.15246450936233166"/>
+ <Con from="input/3" weight="-0.024440433788136887"/>
+ <Con from="input/4" weight="0.09681688103216667"/>
+ <Con from="input/5" weight="-0.01681974992027676"/>
+ <Con from="input/6" weight="0.11838954207864126"/>
+ <Con from="input/7" weight="-0.18122295216487472"/>
+ <Con from="input/8" weight="-0.20300153704894347"/>
+ <Con from="input/9" weight="0.02241618638734302"/>
+ <Con from="input/10" weight="0.19992758977785785"/>
+ <Con from="input/11" weight="0.05718609571745748"/>
+ </Neuron>
+ <Neuron id="1/10" bias="-0.07904053855068999">
+ <Con from="input/1" weight="0.011519607517324034"/>
+ <Con from="input/2" weight="-0.10255485595206476"/>
+ <Con from="input/3" weight="0.2017840893058596"/>
+ <Con from="input/4" weight="-0.13907686826934343"/>
+ <Con from="input/5" weight="0.029216667027531944"/>
+ <Con from="input/6" weight="-0.14447465520797312"/>
+ <Con from="input/7" weight="0.20110082041503893"/>
+ <Con from="input/8" weight="0.010611442252075571"/>
+ <Con from="input/9" weight="0.15232911426848034"/>
+ <Con from="input/10" weight="-0.05407564041311819"/>
+ <Con from="input/11" weight="-0.19738453590609292"/>
+ </Neuron>
+ <Neuron id="1/11" bias="-0.13264348179419952">
+ <Con from="input/1" weight="-0.03723375973719748"/>
+ <Con from="input/2" weight="-0.03580804469044255"/>
+ <Con from="input/3" weight="-3.534307430430517E-7"/>
+ <Con from="input/4" weight="-1.486297677591271E-7"/>
+ <Con from="input/5" weight="-0.03787997203962979"/>
+ <Con from="input/6" weight="0.027795067810747476"/>
+ <Con from="input/7" weight="-9.939960476352345E-4"/>
+ <Con from="input/8" weight="-0.002399462675631708"/>
+ <Con from="input/9" weight="-1.4576652291777706E-5"/>
+ <Con from="input/10" weight="2.8241420493041307E-7"/>
+ <Con from="input/11" weight="0.06089989129872425"/>
+ </Neuron>
+ <Neuron id="1/12" bias="0.2093984260925891">
+ <Con from="input/1" weight="-0.002011173825103898"/>
+ <Con from="input/2" weight="-0.06992292117075972"/>
+ <Con from="input/3" weight="0.044430305233003384"/>
+ <Con from="input/4" weight="-9.394699436725945E-6"/>
+ <Con from="input/5" weight="-0.07556832382165875"/>
+ <Con from="input/6" weight="0.05198106046711025"/>
+ <Con from="input/7" weight="-0.05439450775189672"/>
+ <Con from="input/8" weight="-8.300859113046713E-4"/>
+ <Con from="input/9" weight="0.007899548098431304"/>
+ <Con from="input/10" weight="-0.004531300672859034"/>
+ <Con from="input/11" weight="3.5904257581170083E-6"/>
+ </Neuron>
+ <Neuron id="1/13" bias="0.12861787841847533">
+ <Con from="input/1" weight="0.11240580868193839"/>
+ <Con from="input/2" weight="0.029274691215399593"/>
+ <Con from="input/3" weight="0.053613877654463994"/>
+ <Con from="input/4" weight="-0.17721751802456429"/>
+ <Con from="input/5" weight="0.17739546313396046"/>
+ <Con from="input/6" weight="0.14240256791185743"/>
+ <Con from="input/7" weight="0.09380402953270517"/>
+ <Con from="input/8" weight="0.027126853681312867"/>
+ <Con from="input/9" weight="0.03177237181578323"/>
+ <Con from="input/10" weight="0.062459047800570676"/>
+ <Con from="input/11" weight="0.0322381051012958"/>
+ </Neuron>
+ <Neuron id="1/14" bias="0.1407890469915643">
+ <Con from="input/1" weight="-0.053656246252465664"/>
+ <Con from="input/2" weight="-0.076036710605903"/>
+ <Con from="input/3" weight="0.12475956801860673"/>
+ <Con from="input/4" weight="0.016074241247598618"/>
+ <Con from="input/5" weight="-0.09435187766967733"/>
+ <Con from="input/6" weight="-0.11704703795979453"/>
+ <Con from="input/7" weight="0.14571008781046677"/>
+ <Con from="input/8" weight="0.04842513921767673"/>
+ <Con from="input/9" weight="0.136336753054262"/>
+ <Con from="input/10" weight="0.04599397932536757"/>
+ <Con from="input/11" weight="0.02900002064724381"/>
+ </Neuron>
+ <Neuron id="1/15" bias="-0.21042736916059435">
+ <Con from="input/1" weight="2.606621339506302E-4"/>
+ <Con from="input/2" weight="-0.08594664484394972"/>
+ <Con from="input/3" weight="0.011536011084124838"/>
+ <Con from="input/4" weight="0.0763651219660663"/>
+ <Con from="input/5" weight="-0.015249421814662295"/>
+ <Con from="input/6" weight="-0.005424704992291749"/>
+ <Con from="input/7" weight="0.024006538266562516"/>
+ <Con from="input/8" weight="0.02869969242899469"/>
+ <Con from="input/9" weight="5.84606578376406E-7"/>
+ <Con from="input/10" weight="-2.669203127364709E-4"/>
+ <Con from="input/11" weight="0.001042288204437937"/>
+ </Neuron>
+ <Neuron id="1/16" bias="0.09622882004694495">
+ <Con from="input/1" weight="0.2233485978388461"/>
+ <Con from="input/2" weight="0.1118585591951811"/>
+ <Con from="input/3" weight="-0.05522395633526845"/>
+ <Con from="input/4" weight="0.21724374178718905"/>
+ <Con from="input/5" weight="-0.06672906942664548"/>
+ <Con from="input/6" weight="-0.1965163025771502"/>
+ <Con from="input/7" weight="-0.026178983286066743"/>
+ <Con from="input/8" weight="0.06381471678307474"/>
+ <Con from="input/9" weight="-0.03604546271071288"/>
+ <Con from="input/10" weight="-0.13089369438860654"/>
+ <Con from="input/11" weight="-0.15764061636045681"/>
+ </Neuron>
+ <Neuron id="1/17" bias="0.05048493774934995">
+ <Con from="input/1" weight="-1.2358938220135445E-4"/>
+ <Con from="input/2" weight="-0.08158328727391766"/>
+ <Con from="input/3" weight="-0.05582662583957741"/>
+ <Con from="input/4" weight="0.06485111453339866"/>
+ <Con from="input/5" weight="1.2936551318506044E-4"/>
+ <Con from="input/6" weight="3.105131710992821E-6"/>
+ <Con from="input/7" weight="1.6960643024849716E-5"/>
+ <Con from="input/8" weight="0.012860544224956697"/>
+ <Con from="input/9" weight="-0.014422724853066357"/>
+ <Con from="input/10" weight="-0.07594499077255756"/>
+ <Con from="input/11" weight="-0.08224579423508707"/>
+ </Neuron>
+ <Neuron id="1/18" bias="-0.029727170124587282">
+ <Con from="input/1" weight="-0.10802413344925843"/>
+ <Con from="input/2" weight="0.12301164680290329"/>
+ <Con from="input/3" weight="-0.10750424859886304"/>
+ <Con from="input/4" weight="-0.2297208687867011"/>
+ <Con from="input/5" weight="-0.19180063103331757"/>
+ <Con from="input/6" weight="0.21859767152252085"/>
+ <Con from="input/7" weight="-0.03914599957380683"/>
+ <Con from="input/8" weight="-0.009029697155894183"/>
+ <Con from="input/9" weight="0.018454237329376105"/>
+ <Con from="input/10" weight="-7.86009076373428E-6"/>
+ <Con from="input/11" weight="0.04079237408504789"/>
+ </Neuron>
+ <Neuron id="1/19" bias="0.1495925921467923">
+ <Con from="input/1" weight="0.059125996761878824"/>
+ <Con from="input/2" weight="0.21059129737518612"/>
+ <Con from="input/3" weight="-0.12420504455293595"/>
+ <Con from="input/4" weight="0.10065175474096988"/>
+ <Con from="input/5" weight="0.01034166366314762"/>
+ <Con from="input/6" weight="0.09150876002390043"/>
+ <Con from="input/7" weight="0.20304534551561568"/>
+ <Con from="input/8" weight="-0.09284350432240324"/>
+ <Con from="input/9" weight="-0.12713504651688673"/>
+ <Con from="input/10" weight="0.16994980229685464"/>
+ <Con from="input/11" weight="0.05987429086008883"/>
+ </Neuron>
+ <Neuron id="1/20" bias="-0.19140843493016557">
+ <Con from="input/1" weight="0.05122275093827667"/>
+ <Con from="input/2" weight="-0.01880969317377156"/>
+ <Con from="input/3" weight="0.0334375004855692"/>
+ <Con from="input/4" weight="-0.07286535659421346"/>
+ <Con from="input/5" weight="-0.08397485174668051"/>
+ <Con from="input/6" weight="0.08214176768147301"/>
+ <Con from="input/7" weight="-0.007802774204103769"/>
+ <Con from="input/8" weight="0.01479811730704628"/>
+ <Con from="input/9" weight="0.06031293045077707"/>
+ <Con from="input/10" weight="0.014673502459694522"/>
+ <Con from="input/11" weight="4.84592480088334E-4"/>
+ </Neuron>
+ <Neuron id="1/21" bias="0.18694295605439185">
+ <Con from="input/1" weight="-0.021350506581250317"/>
+ <Con from="input/2" weight="-0.005755264209639761"/>
+ <Con from="input/3" weight="-0.061499747467076905"/>
+ <Con from="input/4" weight="-0.0623719432282052"/>
+ <Con from="input/5" weight="0.021930413203432853"/>
+ <Con from="input/6" weight="0.08181292196047918"/>
+ <Con from="input/7" weight="0.022198243177371242"/>
+ <Con from="input/8" weight="0.025178412646371284"/>
+ <Con from="input/9" weight="7.732688333504336E-4"/>
+ <Con from="input/10" weight="-0.08311094563998099"/>
+ <Con from="input/11" weight="7.793096582706583E-6"/>
+ </Neuron>
+ <Neuron id="1/22" bias="0.04121525440125413">
+ <Con from="input/1" weight="0.12467185359115429"/>
+ <Con from="input/2" weight="0.15427448751827488"/>
+ <Con from="input/3" weight="-0.031740398529933336"/>
+ <Con from="input/4" weight="-0.0029679677257977138"/>
+ <Con from="input/5" weight="-0.14713458472784408"/>
+ <Con from="input/6" weight="0.08891290404080633"/>
+ <Con from="input/7" weight="-0.034186971768268654"/>
+ <Con from="input/8" weight="0.20318932029617462"/>
+ <Con from="input/9" weight="-0.08672653814773593"/>
+ <Con from="input/10" weight="0.009614434267752008"/>
+ <Con from="input/11" weight="0.016788575913343522"/>
+ </Neuron>
+ <Neuron id="1/23" bias="-0.1150875022143237">
+ <Con from="input/1" weight="-0.07443203490499159"/>
+ <Con from="input/2" weight="-5.285835307510086E-7"/>
+ <Con from="input/3" weight="-0.031733195567180815"/>
+ <Con from="input/4" weight="-0.08271934041805339"/>
+ <Con from="input/5" weight="-0.06284379824460196"/>
+ <Con from="input/6" weight="3.5787306123490255E-4"/>
+ <Con from="input/7" weight="-0.00229811863451435"/>
+ <Con from="input/8" weight="3.506983325407396E-4"/>
+ <Con from="input/9" weight="0.0566384482025502"/>
+ <Con from="input/10" weight="5.063369642884269E-4"/>
+ <Con from="input/11" weight="0.03999201424378799"/>
+ </Neuron>
+ <Neuron id="1/24" bias="-0.2207929097131104">
+ <Con from="input/1" weight="-0.02493139686438293"/>
+ <Con from="input/2" weight="1.3655346990134E-4"/>
+ <Con from="input/3" weight="-3.303735434967655E-4"/>
+ <Con from="input/4" weight="-5.336264652597448E-4"/>
+ <Con from="input/5" weight="-0.06739081810372632"/>
+ <Con from="input/6" weight="-7.390652597384036E-6"/>
+ <Con from="input/7" weight="0.002262812284406575"/>
+ <Con from="input/8" weight="-0.06491229104639726"/>
+ <Con from="input/9" weight="-8.200282758625663E-6"/>
+ <Con from="input/10" weight="-0.005681572540037902"/>
+ <Con from="input/11" weight="-0.0016186342382720595"/>
+ </Neuron>
+ <Neuron id="1/25" bias="-0.22457437506764732">
+ <Con from="input/1" weight="-0.15306152379991816"/>
+ <Con from="input/2" weight="0.08647772828479618"/>
+ <Con from="input/3" weight="0.026316421787528122"/>
+ <Con from="input/4" weight="0.08079464335529118"/>
+ <Con from="input/5" weight="-0.10030068880214663"/>
+ <Con from="input/6" weight="0.010094841918788894"/>
+ <Con from="input/7" weight="-0.16715748893627438"/>
+ <Con from="input/8" weight="-0.13686719808918568"/>
+ <Con from="input/9" weight="-0.026554603777290513"/>
+ <Con from="input/10" weight="-0.15701832593945644"/>
+ <Con from="input/11" weight="0.15610705334310382"/>
+ </Neuron>
+ <Neuron id="1/26" bias="0.06065900086718071">
+ <Con from="input/1" weight="0.023301678864102546"/>
+ <Con from="input/2" weight="-0.02440644576137243"/>
+ <Con from="input/3" weight="1.468105281876273E-6"/>
+ <Con from="input/4" weight="-0.07757337020228203"/>
+ <Con from="input/5" weight="-0.012419191268328831"/>
+ <Con from="input/6" weight="-0.0024619734095135486"/>
+ <Con from="input/7" weight="-0.036133095486309136"/>
+ <Con from="input/8" weight="1.0659209827194382E-7"/>
+ <Con from="input/9" weight="-0.0029479510346678327"/>
+ <Con from="input/10" weight="-0.03156047325576637"/>
+ <Con from="input/11" weight="-0.0022867723296917763"/>
+ </Neuron>
+ <Neuron id="1/27" bias="-0.10208548263122774">
+ <Con from="input/1" weight="-1.4766436900908348E-6"/>
+ <Con from="input/2" weight="-0.01377882353610863"/>
+ <Con from="input/3" weight="-0.009959174876287927"/>
+ <Con from="input/4" weight="3.1533190615503597E-7"/>
+ <Con from="input/5" weight="2.6570865450606547E-4"/>
+ <Con from="input/6" weight="5.352363841931812E-6"/>
+ <Con from="input/7" weight="0.0157885240451424"/>
+ <Con from="input/8" weight="-3.8477468070514355E-7"/>
+ <Con from="input/9" weight="-1.2438477997597587E-5"/>
+ <Con from="input/10" weight="0.02733455836667517"/>
+ <Con from="input/11" weight="0.002198367192222496"/>
+ </Neuron>
+ <Neuron id="1/28" bias="-0.15842645254038193">
+ <Con from="input/1" weight="0.09277537232650711"/>
+ <Con from="input/2" weight="-0.1418581267966331"/>
+ <Con from="input/3" weight="-0.0017452900946857914"/>
+ <Con from="input/4" weight="0.10341745581812879"/>
+ <Con from="input/5" weight="0.18957605115370496"/>
+ <Con from="input/6" weight="-0.1769373426279225"/>
+ <Con from="input/7" weight="0.029823369742811346"/>
+ <Con from="input/8" weight="-0.15342694778979463"/>
+ <Con from="input/9" weight="-0.015858154530509087"/>
+ <Con from="input/10" weight="-1.1919358826988075E-5"/>
+ <Con from="input/11" weight="-0.007680450012961898"/>
+ </Neuron>
+ <Neuron id="1/29" bias="-0.165382017055262">
+ <Con from="input/1" weight="-0.24856984738068108"/>
+ <Con from="input/2" weight="0.06217769657346213"/>
+ <Con from="input/3" weight="0.18731607862235655"/>
+ <Con from="input/4" weight="0.21207973452018541"/>
+ <Con from="input/5" weight="0.043639236084350594"/>
+ <Con from="input/6" weight="0.17928614368184045"/>
+ <Con from="input/7" weight="-0.0673944226899035"/>
+ <Con from="input/8" weight="0.167173016702692"/>
+ <Con from="input/9" weight="-0.1932678421077979"/>
+ <Con from="input/10" weight="-0.08439195282004475"/>
+ <Con from="input/11" weight="-0.06708548740624957"/>
+ </Neuron>
+ <Neuron id="1/30" bias="-0.16107807227682952">
+ <Con from="input/1" weight="0.10609987594189269"/>
+ <Con from="input/2" weight="0.028952513471000425"/>
+ <Con from="input/3" weight="0.1714590224646558"/>
+ <Con from="input/4" weight="-0.1274067303879318"/>
+ <Con from="input/5" weight="0.0737417327903896"/>
+ <Con from="input/6" weight="0.19300700443193078"/>
+ <Con from="input/7" weight="-0.11799416932298674"/>
+ <Con from="input/8" weight="-0.07035289391135763"/>
+ <Con from="input/9" weight="0.1544570743701765"/>
+ <Con from="input/10" weight="-0.10701697821219446"/>
+ <Con from="input/11" weight="0.1569525872745163"/>
+ </Neuron>
+ <Neuron id="1/31" bias="0.16321754745734793">
+ <Con from="input/1" weight="-0.14736167670259584"/>
+ <Con from="input/2" weight="-0.009885286259267797"/>
+ <Con from="input/3" weight="0.21844908066367516"/>
+ <Con from="input/4" weight="-0.11204249146204501"/>
+ <Con from="input/5" weight="0.197211055207723"/>
+ <Con from="input/6" weight="-0.08865000422568736"/>
+ <Con from="input/7" weight="0.1460407655691322"/>
+ <Con from="input/8" weight="0.0445138483602739"/>
+ <Con from="input/9" weight="-0.24435082736290614"/>
+ <Con from="input/10" weight="0.20578968634388883"/>
+ <Con from="input/11" weight="0.009870529644698221"/>
+ </Neuron>
+ <Neuron id="1/32" bias="-0.22041333441016212">
+ <Con from="input/1" weight="0.007644375500396112"/>
+ <Con from="input/2" weight="-0.05120195133738646"/>
+ <Con from="input/3" weight="-0.08884844061497196"/>
+ <Con from="input/4" weight="0.24653572725943265"/>
+ <Con from="input/5" weight="0.13623682460867215"/>
+ <Con from="input/6" weight="-0.23934978934612533"/>
+ <Con from="input/7" weight="0.19673058679776598"/>
+ <Con from="input/8" weight="-0.0703655812821117"/>
+ <Con from="input/9" weight="-0.008944188172214792"/>
+ <Con from="input/10" weight="-0.01862880364930727"/>
+ <Con from="input/11" weight="3.8341616066151565E-7"/>
+ </Neuron>
+ <Neuron id="1/33" bias="-0.14646231719643177">
+ <Con from="input/1" weight="0.01892906273206611"/>
+ <Con from="input/2" weight="0.03424081564854529"/>
+ <Con from="input/3" weight="0.16590755265152551"/>
+ <Con from="input/4" weight="-0.11999005665946641"/>
+ <Con from="input/5" weight="0.033753876534838606"/>
+ <Con from="input/6" weight="-0.22168401701591844"/>
+ <Con from="input/7" weight="-0.0800499121023605"/>
+ <Con from="input/8" weight="-0.11424784431125522"/>
+ <Con from="input/9" weight="-0.08298287463477971"/>
+ <Con from="input/10" weight="0.04260726338720988"/>
+ <Con from="input/11" weight="-0.16548684465174265"/>
+ </Neuron>
+ <Neuron id="1/34" bias="-0.21209918750006043">
+ <Con from="input/1" weight="0.14052656124910173"/>
+ <Con from="input/2" weight="0.12771872646589388"/>
+ <Con from="input/3" weight="-0.1651199304387588"/>
+ <Con from="input/4" weight="-0.024304693392713506"/>
+ <Con from="input/5" weight="0.11370084594347118"/>
+ <Con from="input/6" weight="0.19718944666123456"/>
+ <Con from="input/7" weight="-0.1130453135334852"/>
+ <Con from="input/8" weight="0.002660675294261097"/>
+ <Con from="input/9" weight="0.10777214254404704"/>
+ <Con from="input/10" weight="0.0791607641478749"/>
+ <Con from="input/11" weight="-0.18759137745890506"/>
+ </Neuron>
+ <Neuron id="1/35" bias="0.1849943775829245">
+ <Con from="input/1" weight="-0.010175762971526198"/>
+ <Con from="input/2" weight="-0.06386351649895576"/>
+ <Con from="input/3" weight="0.008646588189523248"/>
+ <Con from="input/4" weight="-0.07269455801777852"/>
+ <Con from="input/5" weight="0.02912350818610208"/>
+ <Con from="input/6" weight="0.004393846041006971"/>
+ <Con from="input/7" weight="0.040210989241948256"/>
+ <Con from="input/8" weight="0.08601338645042901"/>
+ <Con from="input/9" weight="1.7464100787089105E-6"/>
+ <Con from="input/10" weight="0.04796023847772494"/>
+ <Con from="input/11" weight="3.680113480754596E-7"/>
+ </Neuron>
+ <Neuron id="1/36" bias="0.018528848397164354">
+ <Con from="input/1" weight="0.18163801473471583"/>
+ <Con from="input/2" weight="0.021337376439477664"/>
+ <Con from="input/3" weight="0.11446363106670941"/>
+ <Con from="input/4" weight="-0.1675039315200177"/>
+ <Con from="input/5" weight="-0.154747059261019"/>
+ <Con from="input/6" weight="0.09379555389771317"/>
+ <Con from="input/7" weight="-0.19327110764105676"/>
+ <Con from="input/8" weight="0.16697828291906522"/>
+ <Con from="input/9" weight="0.07312495512380561"/>
+ <Con from="input/10" weight="0.07920120295310992"/>
+ <Con from="input/11" weight="-0.2211537460326459"/>
+ </Neuron>
+ <Neuron id="1/37" bias="0.0065416367253528385">
+ <Con from="input/1" weight="0.18526822668834067"/>
+ <Con from="input/2" weight="-0.013598996408137042"/>
+ <Con from="input/3" weight="-0.012099912614735898"/>
+ <Con from="input/4" weight="0.11968669798387466"/>
+ <Con from="input/5" weight="-0.03892107700066021"/>
+ <Con from="input/6" weight="0.10882122042536244"/>
+ <Con from="input/7" weight="0.21151041670236154"/>
+ <Con from="input/8" weight="-0.16418611991135793"/>
+ <Con from="input/9" weight="-0.10484297749000199"/>
+ <Con from="input/10" weight="-0.09208843632447543"/>
+ <Con from="input/11" weight="-0.030988867729463"/>
+ </Neuron>
+ <Neuron id="1/38" bias="0.17948111246708434">
+ <Con from="input/1" weight="-0.006382451683939106"/>
+ <Con from="input/2" weight="-0.02680738740907699"/>
+ <Con from="input/3" weight="-0.057899208077485095"/>
+ <Con from="input/4" weight="-4.4598504548005615E-7"/>
+ <Con from="input/5" weight="1.60735871174599E-6"/>
+ <Con from="input/6" weight="0.07452312585063232"/>
+ <Con from="input/7" weight="1.0857339168887824E-6"/>
+ <Con from="input/8" weight="0.02690622712958157"/>
+ <Con from="input/9" weight="3.305011902676456E-4"/>
+ <Con from="input/10" weight="0.0027383962666477583"/>
+ <Con from="input/11" weight="0.040703207155620855"/>
+ </Neuron>
+ <Neuron id="1/39" bias="0.007589865011343372">
+ <Con from="input/1" weight="-0.0383830845986773"/>
+ <Con from="input/2" weight="-1.1280609136993967E-4"/>
+ <Con from="input/3" weight="-0.028805406414633687"/>
+ <Con from="input/4" weight="-0.02427010059344259"/>
+ <Con from="input/5" weight="-0.06494203152456297"/>
+ <Con from="input/6" weight="2.612296384673143E-7"/>
+ <Con from="input/7" weight="-0.06908239294071779"/>
+ <Con from="input/8" weight="-0.05318731295477062"/>
+ <Con from="input/9" weight="-0.002930857509425447"/>
+ <Con from="input/10" weight="0.0591729505118686"/>
+ <Con from="input/11" weight="1.0897316838540399E-5"/>
+ </Neuron>
+ <Neuron id="1/40" bias="-0.11017040895628849">
+ <Con from="input/1" weight="-0.05180802495151109"/>
+ <Con from="input/2" weight="-0.07977693292964043"/>
+ <Con from="input/3" weight="0.05375291679781633"/>
+ <Con from="input/4" weight="0.09391264545205047"/>
+ <Con from="input/5" weight="-0.08560052954605968"/>
+ <Con from="input/6" weight="-0.14501927044163873"/>
+ <Con from="input/7" weight="0.06586141844105628"/>
+ <Con from="input/8" weight="-0.11262502224990736"/>
+ <Con from="input/9" weight="-0.08888857608435252"/>
+ <Con from="input/10" weight="1.1830393840422021E-5"/>
+ <Con from="input/11" weight="-0.006967533656826953"/>
+ </Neuron>
+ <Neuron id="1/41" bias="-0.02193187382832826">
+ <Con from="input/1" weight="0.0687291928467525"/>
+ <Con from="input/2" weight="-0.09067420308509268"/>
+ <Con from="input/3" weight="0.1620592819871901"/>
+ <Con from="input/4" weight="0.1920407598279434"/>
+ <Con from="input/5" weight="0.01347614207680161"/>
+ <Con from="input/6" weight="-0.1616800317131121"/>
+ <Con from="input/7" weight="0.15579683997076402"/>
+ <Con from="input/8" weight="0.2352828194983866"/>
+ <Con from="input/9" weight="-0.10995780372349878"/>
+ <Con from="input/10" weight="-0.1251063956045225"/>
+ <Con from="input/11" weight="0.050232652653777604"/>
+ </Neuron>
+ <Neuron id="1/42" bias="0.1157888019798368">
+ <Con from="input/1" weight="0.036228214194749454"/>
+ <Con from="input/2" weight="0.18182270869214867"/>
+ <Con from="input/3" weight="0.060381164668165864"/>
+ <Con from="input/4" weight="0.08133378221725812"/>
+ <Con from="input/5" weight="-0.1267301344116167"/>
+ <Con from="input/6" weight="-0.0286071759346827"/>
+ <Con from="input/7" weight="-0.17569172693064367"/>
+ <Con from="input/8" weight="-0.1241314159331323"/>
+ <Con from="input/9" weight="0.20953657516239735"/>
+ <Con from="input/10" weight="-0.03441072294754117"/>
+ <Con from="input/11" weight="-0.2399257729148798"/>
+ </Neuron>
+ <Neuron id="1/43" bias="-0.12723998518552407">
+ <Con from="input/1" weight="-8.020118584888395E-6"/>
+ <Con from="input/2" weight="-0.052481679160909525"/>
+ <Con from="input/3" weight="-1.975725941377608E-4"/>
+ <Con from="input/4" weight="0.04769720896210699"/>
+ <Con from="input/5" weight="0.03310353351728317"/>
+ <Con from="input/6" weight="-0.027402716979106845"/>
+ <Con from="input/7" weight="1.0371996883541142E-5"/>
+ <Con from="input/8" weight="0.08002341606925534"/>
+ <Con from="input/9" weight="-0.026727775661911566"/>
+ <Con from="input/10" weight="-0.08353164557969872"/>
+ <Con from="input/11" weight="-0.02278245854515151"/>
+ </Neuron>
+ <Neuron id="1/44" bias="-0.07113703913427527">
+ <Con from="input/1" weight="-0.17815824080784226"/>
+ <Con from="input/2" weight="0.17162248928777663"/>
+ <Con from="input/3" weight="-0.14434626111201956"/>
+ <Con from="input/4" weight="-0.2109705311717116"/>
+ <Con from="input/5" weight="-0.1259911149159952"/>
+ <Con from="input/6" weight="-0.14274080807189599"/>
+ <Con from="input/7" weight="0.13074383854557728"/>
+ <Con from="input/8" weight="-0.21987950896858335"/>
+ <Con from="input/9" weight="0.1538375333518014"/>
+ <Con from="input/10" weight="-0.2107473252212336"/>
+ <Con from="input/11" weight="0.04997908022820057"/>
+ </Neuron>
+ <Neuron id="1/45" bias="-0.12074831618935764">
+ <Con from="input/1" weight="0.11278328001246074"/>
+ <Con from="input/2" weight="0.12458668752645143"/>
+ <Con from="input/3" weight="-0.04911564411007584"/>
+ <Con from="input/4" weight="0.004771354868232131"/>
+ <Con from="input/5" weight="0.04912502940840506"/>
+ <Con from="input/6" weight="0.20202878829880855"/>
+ <Con from="input/7" weight="-0.14206828224305118"/>
+ <Con from="input/8" weight="0.13277899847997007"/>
+ <Con from="input/9" weight="0.05005268393764696"/>
+ <Con from="input/10" weight="-0.02556617655505319"/>
+ <Con from="input/11" weight="0.10256723944881259"/>
+ </Neuron>
+ <Neuron id="1/46" bias="0.09818454423762518">
+ <Con from="input/1" weight="0.20248799618213603"/>
+ <Con from="input/2" weight="-0.04714012274119774"/>
+ <Con from="input/3" weight="0.07262031065233308"/>
+ <Con from="input/4" weight="0.19458971563275312"/>
+ <Con from="input/5" weight="0.10774266766703545"/>
+ <Con from="input/6" weight="0.028259044856138674"/>
+ <Con from="input/7" weight="0.09063180198360617"/>
+ <Con from="input/8" weight="-0.04592472244655645"/>
+ <Con from="input/9" weight="-0.07663899539358629"/>
+ <Con from="input/10" weight="0.04830310303078345"/>
+ <Con from="input/11" weight="-0.01166999894713483"/>
+ </Neuron>
+ <Neuron id="1/47" bias="-0.05161444606142245">
+ <Con from="input/1" weight="-0.15010479778219563"/>
+ <Con from="input/2" weight="0.004914877749787603"/>
+ <Con from="input/3" weight="-0.04188055386961394"/>
+ <Con from="input/4" weight="0.12050843839625455"/>
+ <Con from="input/5" weight="-0.0392673615812059"/>
+ <Con from="input/6" weight="-0.07236696970887307"/>
+ <Con from="input/7" weight="0.11057526701405553"/>
+ <Con from="input/8" weight="-0.05841009941455235"/>
+ <Con from="input/9" weight="-0.12345524916846745"/>
+ <Con from="input/10" weight="-5.064273823022453E-5"/>
+ <Con from="input/11" weight="-0.023975535121102527"/>
+ </Neuron>
+ <Neuron id="1/48" bias="-0.11868115253883688">
+ <Con from="input/1" weight="-0.017489071540542977"/>
+ <Con from="input/2" weight="-0.006677087217012159"/>
+ <Con from="input/3" weight="-0.0820158116145378"/>
+ <Con from="input/4" weight="0.06076838216092109"/>
+ <Con from="input/5" weight="-0.03132499479809804"/>
+ <Con from="input/6" weight="2.093249779807425E-5"/>
+ <Con from="input/7" weight="0.04553506462250285"/>
+ <Con from="input/8" weight="2.3781519324313766E-4"/>
+ <Con from="input/9" weight="-0.041156537451661354"/>
+ <Con from="input/10" weight="-2.464917221803537E-4"/>
+ <Con from="input/11" weight="0.0051505653736290055"/>
+ </Neuron>
+ <Neuron id="1/49" bias="-0.22921763013642665">
+ <Con from="input/1" weight="0.06341837478132832"/>
+ <Con from="input/2" weight="-6.194407452337829E-4"/>
+ <Con from="input/3" weight="7.377328788883732E-6"/>
+ <Con from="input/4" weight="-3.0338306502103925E-7"/>
+ <Con from="input/5" weight="-0.0768048909711716"/>
+ <Con from="input/6" weight="-1.482283779112577E-7"/>
+ <Con from="input/7" weight="-0.05438380611133201"/>
+ <Con from="input/8" weight="0.0075603220148998205"/>
+ <Con from="input/9" weight="9.901095488218113E-5"/>
+ <Con from="input/10" weight="-4.848920235163743E-6"/>
+ <Con from="input/11" weight="2.051966066124726E-6"/>
+ </Neuron>
+ <Neuron id="1/50" bias="-0.2095780143703725">
+ <Con from="input/1" weight="0.19200238885942933"/>
+ <Con from="input/2" weight="-0.019387304789985703"/>
+ <Con from="input/3" weight="0.024503690131856458"/>
+ <Con from="input/4" weight="0.19568691972002003"/>
+ <Con from="input/5" weight="-0.09220532998265046"/>
+ <Con from="input/6" weight="-0.10946317187548223"/>
+ <Con from="input/7" weight="-0.1992027900575847"/>
+ <Con from="input/8" weight="-0.06841091862366296"/>
+ <Con from="input/9" weight="-0.2698104473959012"/>
+ <Con from="input/10" weight="0.20117016155861736"/>
+ <Con from="input/11" weight="-0.029825430937472693"/>
+ </Neuron>
+ <Neuron id="1/51" bias="-0.21986509480621735">
+ <Con from="input/1" weight="-0.12722118890512532"/>
+ <Con from="input/2" weight="0.2020190824630309"/>
+ <Con from="input/3" weight="0.14100909173290602"/>
+ <Con from="input/4" weight="-0.05886281780545626"/>
+ <Con from="input/5" weight="-0.05042376450467732"/>
+ <Con from="input/6" weight="0.15115561011403905"/>
+ <Con from="input/7" weight="0.15574522999015855"/>
+ <Con from="input/8" weight="-0.06396279742662153"/>
+ <Con from="input/9" weight="-0.1322823415543513"/>
+ <Con from="input/10" weight="0.06902245493185465"/>
+ <Con from="input/11" weight="0.1416860864449508"/>
+ </Neuron>
+ <Neuron id="1/52" bias="0.08139875824160828">
+ <Con from="input/1" weight="0.059032430039972945"/>
+ <Con from="input/2" weight="-0.06592344578777849"/>
+ <Con from="input/3" weight="-0.010277045303192567"/>
+ <Con from="input/4" weight="0.05764677434515153"/>
+ <Con from="input/5" weight="-0.018126997409326988"/>
+ <Con from="input/6" weight="6.26053797181097E-4"/>
+ <Con from="input/7" weight="0.04112990939138228"/>
+ <Con from="input/8" weight="0.0014990911105503381"/>
+ <Con from="input/9" weight="2.4391686812632334E-6"/>
+ <Con from="input/10" weight="-0.0034742760074927474"/>
+ <Con from="input/11" weight="0.001599229653252463"/>
+ </Neuron>
+ <Neuron id="1/53" bias="0.04992039310388038">
+ <Con from="input/1" weight="-0.20365696365957658"/>
+ <Con from="input/2" weight="0.08361206079477125"/>
+ <Con from="input/3" weight="-0.10112667464897877"/>
+ <Con from="input/4" weight="0.13514085868937284"/>
+ <Con from="input/5" weight="0.07687960978811569"/>
+ <Con from="input/6" weight="-0.17195166995786826"/>
+ <Con from="input/7" weight="-0.08367742886616643"/>
+ <Con from="input/8" weight="0.06086294350167601"/>
+ <Con from="input/9" weight="0.25473941988429327"/>
+ <Con from="input/10" weight="-0.22224721581384202"/>
+ <Con from="input/11" weight="-0.1672660894402233"/>
+ </Neuron>
+ <Neuron id="1/54" bias="0.05077915290190573">
+ <Con from="input/1" weight="0.13214082032034444"/>
+ <Con from="input/2" weight="-0.11839043429226433"/>
+ <Con from="input/3" weight="0.23075863771136718"/>
+ <Con from="input/4" weight="-0.1272140011075098"/>
+ <Con from="input/5" weight="-0.10890262277659513"/>
+ <Con from="input/6" weight="0.2741724017201871"/>
+ <Con from="input/7" weight="-0.056166678909410725"/>
+ <Con from="input/8" weight="-0.19908375956485502"/>
+ <Con from="input/9" weight="-0.059973525659164806"/>
+ <Con from="input/10" weight="0.015302338705167532"/>
+ <Con from="input/11" weight="-0.32999587104227873"/>
+ </Neuron>
+ <Neuron id="1/55" bias="0.07077848115527213">
+ <Con from="input/1" weight="-0.01388458025809869"/>
+ <Con from="input/2" weight="-0.12293684922057926"/>
+ <Con from="input/3" weight="0.15090591745629914"/>
+ <Con from="input/4" weight="-0.21182396933179168"/>
+ <Con from="input/5" weight="0.014100541818019658"/>
+ <Con from="input/6" weight="-0.09762527568142022"/>
+ <Con from="input/7" weight="-0.01338385136263488"/>
+ <Con from="input/8" weight="0.008815236565660705"/>
+ <Con from="input/9" weight="-0.02183439047813788"/>
+ <Con from="input/10" weight="-6.099282457224889E-7"/>
+ <Con from="input/11" weight="-8.307812541573677E-4"/>
+ </Neuron>
+ <Neuron id="1/56" bias="-0.20974318355904853">
+ <Con from="input/1" weight="0.06293482316843194"/>
+ <Con from="input/2" weight="-0.09664797505798098"/>
+ <Con from="input/3" weight="0.12124637437557664"/>
+ <Con from="input/4" weight="0.18924450881756616"/>
+ <Con from="input/5" weight="0.17487181339390548"/>
+ <Con from="input/6" weight="0.02680642346609525"/>
+ <Con from="input/7" weight="-0.004383323094016607"/>
+ <Con from="input/8" weight="-0.09229784784185434"/>
+ <Con from="input/9" weight="-0.002383697662003324"/>
+ <Con from="input/10" weight="0.022959613061436422"/>
+ <Con from="input/11" weight="0.2395195611464611"/>
+ </Neuron>
+ <Neuron id="1/57" bias="0.2045926855427585">
+ <Con from="input/1" weight="0.04912781765900063"/>
+ <Con from="input/2" weight="0.13931059063275675"/>
+ <Con from="input/3" weight="-0.01933348129289806"/>
+ <Con from="input/4" weight="-0.07401856294277755"/>
+ <Con from="input/5" weight="0.08834601463735767"/>
+ <Con from="input/6" weight="0.08586506369934449"/>
+ <Con from="input/7" weight="-0.23210145753417114"/>
+ <Con from="input/8" weight="0.1147130675615461"/>
+ <Con from="input/9" weight="-0.0670228184433728"/>
+ <Con from="input/10" weight="-0.2108110618547677"/>
+ <Con from="input/11" weight="0.09016744202078211"/>
+ </Neuron>
+ <Neuron id="1/58" bias="-0.019100938047092758">
+ <Con from="input/1" weight="6.410035832901865E-4"/>
+ <Con from="input/2" weight="-0.07041010100916488"/>
+ <Con from="input/3" weight="-0.08034127453063158"/>
+ <Con from="input/4" weight="-0.003440319616623197"/>
+ <Con from="input/5" weight="-0.001300077776691976"/>
+ <Con from="input/6" weight="-0.08372410918968998"/>
+ <Con from="input/7" weight="-0.01730082336661642"/>
+ <Con from="input/8" weight="-0.012833301576977173"/>
+ <Con from="input/9" weight="0.035591122515135076"/>
+ <Con from="input/10" weight="-0.005175762442888744"/>
+ <Con from="input/11" weight="-0.03333746485552988"/>
+ </Neuron>
+ <Neuron id="1/59" bias="0.11242116104514052">
+ <Con from="input/1" weight="-0.0025039462686323186"/>
+ <Con from="input/2" weight="-6.716143250037276E-7"/>
+ <Con from="input/3" weight="-0.06597785219469417"/>
+ <Con from="input/4" weight="1.8736642362925578E-6"/>
+ <Con from="input/5" weight="1.1307823582651954E-6"/>
+ <Con from="input/6" weight="-1.9478797369119027E-6"/>
+ <Con from="input/7" weight="-0.05020418468269377"/>
+ <Con from="input/8" weight="0.0816282949014115"/>
+ <Con from="input/9" weight="-0.002777688424337804"/>
+ <Con from="input/10" weight="0.042367283326037536"/>
+ <Con from="input/11" weight="-7.830179858779044E-4"/>
+ </Neuron>
+ <Neuron id="1/60" bias="-0.16755236616180033">
+ <Con from="input/1" weight="-7.713933193926721E-4"/>
+ <Con from="input/2" weight="5.4831475154791E-4"/>
+ <Con from="input/3" weight="-0.08510046238721018"/>
+ <Con from="input/4" weight="-0.032963578966228355"/>
+ <Con from="input/5" weight="0.0061509538809763255"/>
+ <Con from="input/6" weight="0.0036773113014747522"/>
+ <Con from="input/7" weight="0.039537665549406076"/>
+ <Con from="input/8" weight="-0.001783320370928473"/>
+ <Con from="input/9" weight="-0.004989108803485892"/>
+ <Con from="input/10" weight="-1.6726773187314903E-8"/>
+ <Con from="input/11" weight="-3.1106100402852343E-7"/>
+ </Neuron>
+ <Neuron id="1/61" bias="0.19549732087849872">
+ <Con from="input/1" weight="1.0184147854808128E-5"/>
+ <Con from="input/2" weight="-1.123946228085676E-5"/>
+ <Con from="input/3" weight="-0.07154795086485946"/>
+ <Con from="input/4" weight="-0.012814557187740527"/>
+ <Con from="input/5" weight="-0.05438403702076232"/>
+ <Con from="input/6" weight="0.04958508451221472"/>
+ <Con from="input/7" weight="0.014749762803037964"/>
+ <Con from="input/8" weight="-0.004454366349130764"/>
+ <Con from="input/9" weight="0.0594796659148971"/>
+ <Con from="input/10" weight="0.004065856216558457"/>
+ <Con from="input/11" weight="-1.6998434682550647E-4"/>
+ </Neuron>
+ <Neuron id="1/62" bias="-0.16037141513952222">
+ <Con from="input/1" weight="-0.13131326966501383"/>
+ <Con from="input/2" weight="0.03822550745365675"/>
+ <Con from="input/3" weight="0.12492062999368043"/>
+ <Con from="input/4" weight="-0.07133970370371047"/>
+ <Con from="input/5" weight="0.20211930444816492"/>
+ <Con from="input/6" weight="-0.21635036413703695"/>
+ <Con from="input/7" weight="0.1437321565789146"/>
+ <Con from="input/8" weight="-0.048864711606459964"/>
+ <Con from="input/9" weight="-0.022250032838976662"/>
+ <Con from="input/10" weight="-0.16426294467930275"/>
+ <Con from="input/11" weight="-0.010871259737985433"/>
+ </Neuron>
+ <Neuron id="1/63" bias="-0.09444686599011277">
+ <Con from="input/1" weight="-0.09018275499213996"/>
+ <Con from="input/2" weight="3.7619029708244294E-4"/>
+ <Con from="input/3" weight="0.21725191547995368"/>
+ <Con from="input/4" weight="-0.07200636602681201"/>
+ <Con from="input/5" weight="0.19027003704724363"/>
+ <Con from="input/6" weight="-0.218354955817112"/>
+ <Con from="input/7" weight="-0.12214390171860519"/>
+ <Con from="input/8" weight="0.15423833444088683"/>
+ <Con from="input/9" weight="0.013658081321491131"/>
+ <Con from="input/10" weight="-0.120782080099004"/>
+ <Con from="input/11" weight="0.11039277558999204"/>
+ </Neuron>
+ <Neuron id="1/64" bias="0.08196297290722009">
+ <Con from="input/1" weight="-0.17599735122775081"/>
+ <Con from="input/2" weight="-0.02967397539285234"/>
+ <Con from="input/3" weight="-0.05540052626732067"/>
+ <Con from="input/4" weight="0.11480381543201157"/>
+ <Con from="input/5" weight="0.14315711548872664"/>
+ <Con from="input/6" weight="-0.13072159088759497"/>
+ <Con from="input/7" weight="0.06694818977997986"/>
+ <Con from="input/8" weight="-0.06357744280199756"/>
+ <Con from="input/9" weight="-0.12920728956464142"/>
+ <Con from="input/10" weight="-0.2137666285529882"/>
+ <Con from="input/11" weight="0.017189994829831898"/>
+ </Neuron>
+ <Neuron id="1/65" bias="-0.1027165865345076">
+ <Con from="input/1" weight="0.14253096180662406"/>
+ <Con from="input/2" weight="-0.20849156906259816"/>
+ <Con from="input/3" weight="0.22452233166130567"/>
+ <Con from="input/4" weight="-0.08835625880874896"/>
+ <Con from="input/5" weight="-0.12271475960977174"/>
+ <Con from="input/6" weight="0.21040272462888363"/>
+ <Con from="input/7" weight="-9.672119519246348E-7"/>
+ <Con from="input/8" weight="0.010770661230711834"/>
+ <Con from="input/9" weight="-0.14520316922114399"/>
+ <Con from="input/10" weight="-0.00651603856312897"/>
+ <Con from="input/11" weight="-0.047329211909582614"/>
+ </Neuron>
+ <Neuron id="1/66" bias="-0.05580846973711595">
+ <Con from="input/1" weight="-0.22978035109730943"/>
+ <Con from="input/2" weight="0.14192676507569743"/>
+ <Con from="input/3" weight="0.05689805368903757"/>
+ <Con from="input/4" weight="0.23756495715292325"/>
+ <Con from="input/5" weight="0.2300236318958282"/>
+ <Con from="input/6" weight="0.1578041707806837"/>
+ <Con from="input/7" weight="0.10813056947717274"/>
+ <Con from="input/8" weight="-0.11104024134382645"/>
+ <Con from="input/9" weight="0.18264144278198288"/>
+ <Con from="input/10" weight="0.20233130002579852"/>
+ <Con from="input/11" weight="-0.1769360109427225"/>
+ </Neuron>
+ <Neuron id="1/67" bias="0.09360556197001788">
+ <Con from="input/1" weight="-0.12656902238058576"/>
+ <Con from="input/2" weight="-0.011547510026253777"/>
+ <Con from="input/3" weight="0.21001722094541941"/>
+ <Con from="input/4" weight="0.002935050606708585"/>
+ <Con from="input/5" weight="-0.0024423189017688785"/>
+ <Con from="input/6" weight="-0.07872448219282137"/>
+ <Con from="input/7" weight="-0.05158331197456656"/>
+ <Con from="input/8" weight="-0.13815633920431786"/>
+ <Con from="input/9" weight="0.2625347040043431"/>
+ <Con from="input/10" weight="-0.06259281163833988"/>
+ <Con from="input/11" weight="0.15309777814517708"/>
+ </Neuron>
+ <Neuron id="1/68" bias="0.10804186156726688">
+ <Con from="input/1" weight="-0.0076381348851719665"/>
+ <Con from="input/2" weight="0.15255177389218144"/>
+ <Con from="input/3" weight="0.2087707237569208"/>
+ <Con from="input/4" weight="0.05805805580846562"/>
+ <Con from="input/5" weight="0.1644259041363125"/>
+ <Con from="input/6" weight="0.13995268288491983"/>
+ <Con from="input/7" weight="0.2012091118968204"/>
+ <Con from="input/8" weight="-0.011999082116037887"/>
+ <Con from="input/9" weight="0.07624588542921282"/>
+ <Con from="input/10" weight="0.022414597488753903"/>
+ <Con from="input/11" weight="-0.1664879293374052"/>
+ </Neuron>
+ <Neuron id="1/69" bias="-0.13653790999591245">
+ <Con from="input/1" weight="0.003011316114537129"/>
+ <Con from="input/2" weight="0.11359542166675754"/>
+ <Con from="input/3" weight="-0.08122607865208775"/>
+ <Con from="input/4" weight="0.18022788161386274"/>
+ <Con from="input/5" weight="0.09959528398514518"/>
+ <Con from="input/6" weight="0.22147771933820504"/>
+ <Con from="input/7" weight="-0.03439733279604396"/>
+ <Con from="input/8" weight="0.07220014677510243"/>
+ <Con from="input/9" weight="0.15325452714962653"/>
+ <Con from="input/10" weight="-0.14226107748281164"/>
+ <Con from="input/11" weight="0.045886761704833905"/>
+ </Neuron>
+ <Neuron id="1/70" bias="-0.10965073332198724">
+ <Con from="input/1" weight="-0.03687916838618512"/>
+ <Con from="input/2" weight="3.560990020551165E-6"/>
+ <Con from="input/3" weight="-0.021087309023892723"/>
+ <Con from="input/4" weight="-2.0733467297341833E-7"/>
+ <Con from="input/5" weight="-0.002241385795861413"/>
+ <Con from="input/6" weight="0.05012703606714386"/>
+ <Con from="input/7" weight="0.027571859322559462"/>
+ <Con from="input/8" weight="0.044540546529254224"/>
+ <Con from="input/9" weight="-0.07972703579966586"/>
+ <Con from="input/10" weight="-3.664019095280996E-7"/>
+ <Con from="input/11" weight="7.772160828195268E-4"/>
+ </Neuron>
+ <Neuron id="1/71" bias="0.10938717682200279">
+ <Con from="input/1" weight="0.14757959404738646"/>
+ <Con from="input/2" weight="0.15764228653171494"/>
+ <Con from="input/3" weight="-0.03747444266827266"/>
+ <Con from="input/4" weight="-0.15394332882310158"/>
+ <Con from="input/5" weight="-0.04089418562957983"/>
+ <Con from="input/6" weight="0.02622316438359032"/>
+ <Con from="input/7" weight="0.07689857668938334"/>
+ <Con from="input/8" weight="0.006327585139241369"/>
+ <Con from="input/9" weight="-0.18281278645974916"/>
+ <Con from="input/10" weight="-0.21576400381059294"/>
+ <Con from="input/11" weight="-0.03196520978891572"/>
+ </Neuron>
+ <Neuron id="1/72" bias="0.009371296890607814">
+ <Con from="input/1" weight="0.12783055723570555"/>
+ <Con from="input/2" weight="0.06731936923966957"/>
+ <Con from="input/3" weight="-0.05779509498987038"/>
+ <Con from="input/4" weight="0.10514977804424334"/>
+ <Con from="input/5" weight="-0.1601496108205719"/>
+ <Con from="input/6" weight="-0.12501696047666452"/>
+ <Con from="input/7" weight="-0.005803605688495266"/>
+ <Con from="input/8" weight="0.14662009480915156"/>
+ <Con from="input/9" weight="0.08211864104401617"/>
+ <Con from="input/10" weight="-0.20300630015193147"/>
+ <Con from="input/11" weight="0.046503662558161456"/>
+ </Neuron>
+ <Neuron id="1/73" bias="0.11856669569073816">
+ <Con from="input/1" weight="0.11876793400071708"/>
+ <Con from="input/2" weight="0.11411519074404204"/>
+ <Con from="input/3" weight="0.12068242161903327"/>
+ <Con from="input/4" weight="-0.09698375735993395"/>
+ <Con from="input/5" weight="-0.20947686936310317"/>
+ <Con from="input/6" weight="0.1685826892890504"/>
+ <Con from="input/7" weight="-0.1568995222075782"/>
+ <Con from="input/8" weight="-0.18731629353101484"/>
+ <Con from="input/9" weight="0.09126439383149777"/>
+ <Con from="input/10" weight="-0.0799757628683236"/>
+ <Con from="input/11" weight="0.1340569959034099"/>
+ </Neuron>
+ <Neuron id="1/74" bias="0.03424972483690177">
+ <Con from="input/1" weight="0.04336827470149121"/>
+ <Con from="input/2" weight="0.021745016082210692"/>
+ <Con from="input/3" weight="0.029963988082354422"/>
+ <Con from="input/4" weight="0.17059766465458265"/>
+ <Con from="input/5" weight="-0.0887525103675521"/>
+ <Con from="input/6" weight="-0.21399823298646684"/>
+ <Con from="input/7" weight="-0.08619706359762791"/>
+ <Con from="input/8" weight="-0.22160860975956342"/>
+ <Con from="input/9" weight="-0.16292116005739168"/>
+ <Con from="input/10" weight="0.11636667363393163"/>
+ <Con from="input/11" weight="0.11813380629056107"/>
+ </Neuron>
+ <Neuron id="1/75" bias="0.055354875153892175">
+ <Con from="input/1" weight="0.00744274958112183"/>
+ <Con from="input/2" weight="-0.04198799957306165"/>
+ <Con from="input/3" weight="-0.0111542594426618"/>
+ <Con from="input/4" weight="0.11032773594946416"/>
+ <Con from="input/5" weight="-0.09734230619426168"/>
+ <Con from="input/6" weight="-0.13171427834962926"/>
+ <Con from="input/7" weight="-3.0554994735299994E-6"/>
+ <Con from="input/8" weight="-0.12230777523960032"/>
+ <Con from="input/9" weight="-0.012311187688438033"/>
+ <Con from="input/10" weight="0.009748279068877756"/>
+ <Con from="input/11" weight="0.0826973152002792"/>
+ </Neuron>
+ <Neuron id="1/76" bias="0.11906738130340451">
+ <Con from="input/1" weight="-0.055450780197335806"/>
+ <Con from="input/2" weight="0.0830059984314774"/>
+ <Con from="input/3" weight="0.09448368184341996"/>
+ <Con from="input/4" weight="0.11840378670165415"/>
+ <Con from="input/5" weight="-0.09845581539568435"/>
+ <Con from="input/6" weight="-0.037011691109654334"/>
+ <Con from="input/7" weight="0.22729388834225484"/>
+ <Con from="input/8" weight="-0.16237095730192397"/>
+ <Con from="input/9" weight="-0.1651517830198004"/>
+ <Con from="input/10" weight="0.1558838607042648"/>
+ <Con from="input/11" weight="-0.17911959225416496"/>
+ </Neuron>
+ <Neuron id="1/77" bias="0.09351903665796954">
+ <Con from="input/1" weight="-0.16053815627825013"/>
+ <Con from="input/2" weight="0.15922500878454238"/>
+ <Con from="input/3" weight="0.17138499901367865"/>
+ <Con from="input/4" weight="-0.11865652583576908"/>
+ <Con from="input/5" weight="-0.20212610857068988"/>
+ <Con from="input/6" weight="0.14093996647028587"/>
+ <Con from="input/7" weight="0.0266264810287466"/>
+ <Con from="input/8" weight="0.22238879917952747"/>
+ <Con from="input/9" weight="-0.10815687697059266"/>
+ <Con from="input/10" weight="0.014513921851242955"/>
+ <Con from="input/11" weight="-0.2486090445871652"/>
+ </Neuron>
+ <Neuron id="1/78" bias="-0.030369292224361244">
+ <Con from="input/1" weight="-0.027562282508851177"/>
+ <Con from="input/2" weight="9.740048928652661E-5"/>
+ <Con from="input/3" weight="-0.0066266402909006"/>
+ <Con from="input/4" weight="-0.006824030031397785"/>
+ <Con from="input/5" weight="-2.8163894895941084E-5"/>
+ <Con from="input/6" weight="-5.317699083378196E-6"/>
+ <Con from="input/7" weight="-0.038564798813609545"/>
+ <Con from="input/8" weight="0.007858984183009359"/>
+ <Con from="input/9" weight="-0.04449366259311753"/>
+ <Con from="input/10" weight="0.0014168797675743895"/>
+ <Con from="input/11" weight="-8.726298830596743E-6"/>
+ </Neuron>
+ <Neuron id="1/79" bias="-0.09792859706543669">
+ <Con from="input/1" weight="-0.05400898119875725"/>
+ <Con from="input/2" weight="-0.05243076587684924"/>
+ <Con from="input/3" weight="-0.07581103785612991"/>
+ <Con from="input/4" weight="-0.020044603970219975"/>
+ <Con from="input/5" weight="0.007847079468766331"/>
+ <Con from="input/6" weight="0.006027055797630304"/>
+ <Con from="input/7" weight="-0.02370465650190648"/>
+ <Con from="input/8" weight="0.029868609407784224"/>
+ <Con from="input/9" weight="0.042351310411914124"/>
+ <Con from="input/10" weight="-0.021689001585869613"/>
+ <Con from="input/11" weight="-0.07407839374214437"/>
+ </Neuron>
+ <Neuron id="1/80" bias="0.1379965141538459">
+ <Con from="input/1" weight="-0.12257090186793412"/>
+ <Con from="input/2" weight="0.216874791818019"/>
+ <Con from="input/3" weight="-0.16612192719769145"/>
+ <Con from="input/4" weight="0.010548096427103216"/>
+ <Con from="input/5" weight="0.06905174569890025"/>
+ <Con from="input/6" weight="-0.19963583113399494"/>
+ <Con from="input/7" weight="-0.1477098873911362"/>
+ <Con from="input/8" weight="0.03356654239698111"/>
+ <Con from="input/9" weight="0.07731131506006209"/>
+ <Con from="input/10" weight="0.17119855921861407"/>
+ <Con from="input/11" weight="-0.051058785178881225"/>
+ </Neuron>
+ <Neuron id="1/81" bias="-0.17703347063633296">
+ <Con from="input/1" weight="0.24832936919312384"/>
+ <Con from="input/2" weight="-0.028596647272747612"/>
+ <Con from="input/3" weight="-0.017985722449303854"/>
+ <Con from="input/4" weight="-0.05626788728420815"/>
+ <Con from="input/5" weight="0.26273605425848"/>
+ <Con from="input/6" weight="0.18538551717434013"/>
+ <Con from="input/7" weight="-0.13739574497137366"/>
+ <Con from="input/8" weight="0.08968374217700668"/>
+ <Con from="input/9" weight="0.15526743659271836"/>
+ <Con from="input/10" weight="0.018346329580683267"/>
+ <Con from="input/11" weight="-0.22182997661997322"/>
+ </Neuron>
+ <Neuron id="1/82" bias="-0.13195863842863692">
+ <Con from="input/1" weight="0.05037250479946206"/>
+ <Con from="input/2" weight="-0.025810062370987274"/>
+ <Con from="input/3" weight="0.009557553336407511"/>
+ <Con from="input/4" weight="0.08554241802403599"/>
+ <Con from="input/5" weight="-0.010428726907312055"/>
+ <Con from="input/6" weight="-0.0018197838504670283"/>
+ <Con from="input/7" weight="0.044579141242593184"/>
+ <Con from="input/8" weight="4.2886714153621513E-4"/>
+ <Con from="input/9" weight="-0.05106164680131359"/>
+ <Con from="input/10" weight="1.300461695421815E-4"/>
+ <Con from="input/11" weight="0.030740040956793252"/>
+ </Neuron>
+ <Neuron id="1/83" bias="-0.05244769624787343">
+ <Con from="input/1" weight="0.2002075120213289"/>
+ <Con from="input/2" weight="-0.0014687989241005202"/>
+ <Con from="input/3" weight="0.1366215709573972"/>
+ <Con from="input/4" weight="0.20088911158329548"/>
+ <Con from="input/5" weight="-0.04183565276680294"/>
+ <Con from="input/6" weight="0.2060035835484278"/>
+ <Con from="input/7" weight="-0.04334979823140568"/>
+ <Con from="input/8" weight="-0.09887552256220915"/>
+ <Con from="input/9" weight="0.25150133893832993"/>
+ <Con from="input/10" weight="-0.07071850035582786"/>
+ <Con from="input/11" weight="0.15198057355064842"/>
+ </Neuron>
+ <Neuron id="1/84" bias="-0.025199045626818628">
+ <Con from="input/1" weight="-0.011291635687581233"/>
+ <Con from="input/2" weight="-0.010063985656897883"/>
+ <Con from="input/3" weight="-0.017587126321971375"/>
+ <Con from="input/4" weight="0.016008111484596847"/>
+ <Con from="input/5" weight="0.028095078070615575"/>
+ <Con from="input/6" weight="-0.05475728281290597"/>
+ <Con from="input/7" weight="-0.04269258319837894"/>
+ <Con from="input/8" weight="3.6157062389980226E-5"/>
+ <Con from="input/9" weight="-0.010319438172539446"/>
+ <Con from="input/10" weight="1.6902383912790342E-6"/>
+ <Con from="input/11" weight="-0.06884340597171905"/>
+ </Neuron>
+ <Neuron id="1/85" bias="-0.12043038987997573">
+ <Con from="input/1" weight="-0.011438463632882633"/>
+ <Con from="input/2" weight="-0.0787964757963858"/>
+ <Con from="input/3" weight="-1.2486770352806875E-5"/>
+ <Con from="input/4" weight="-0.0067249172283823915"/>
+ <Con from="input/5" weight="-0.02456402020025156"/>
+ <Con from="input/6" weight="-2.628033865915508E-6"/>
+ <Con from="input/7" weight="-0.003531506559112116"/>
+ <Con from="input/8" weight="0.07949354708584061"/>
+ <Con from="input/9" weight="0.003651216229989388"/>
+ <Con from="input/10" weight="-0.054999060384849134"/>
+ <Con from="input/11" weight="-0.0784358303277118"/>
+ </Neuron>
+ <Neuron id="1/86" bias="-0.10845927201585003">
+ <Con from="input/1" weight="-0.0031958293724514706"/>
+ <Con from="input/2" weight="-0.028793155468756625"/>
+ <Con from="input/3" weight="2.0934994547929318E-8"/>
+ <Con from="input/4" weight="-6.572094797044436E-7"/>
+ <Con from="input/5" weight="0.020966060185655028"/>
+ <Con from="input/6" weight="0.05013060792222194"/>
+ <Con from="input/7" weight="-0.07065431129518684"/>
+ <Con from="input/8" weight="0.05821904225494201"/>
+ <Con from="input/9" weight="-0.06408882436026482"/>
+ <Con from="input/10" weight="-0.07573506344798622"/>
+ <Con from="input/11" weight="-0.042370590744185505"/>
+ </Neuron>
+ <Neuron id="1/87" bias="0.10559422124768346">
+ <Con from="input/1" weight="-0.20871481855786056"/>
+ <Con from="input/2" weight="0.12558319876831375"/>
+ <Con from="input/3" weight="0.03541749599206384"/>
+ <Con from="input/4" weight="-0.11990186314269961"/>
+ <Con from="input/5" weight="-0.042518852635359046"/>
+ <Con from="input/6" weight="-0.20737316263361685"/>
+ <Con from="input/7" weight="0.04941394629558378"/>
+ <Con from="input/8" weight="-0.0030235167860510875"/>
+ <Con from="input/9" weight="0.00280841879491218"/>
+ <Con from="input/10" weight="0.18232366249312107"/>
+ <Con from="input/11" weight="-0.16596546234740928"/>
+ </Neuron>
+ <Neuron id="1/88" bias="-0.02920019495582405">
+ <Con from="input/1" weight="0.1932667839146258"/>
+ <Con from="input/2" weight="-0.011942938532602764"/>
+ <Con from="input/3" weight="-0.030720904991261577"/>
+ <Con from="input/4" weight="0.16596911022221897"/>
+ <Con from="input/5" weight="-0.1548934411036336"/>
+ <Con from="input/6" weight="0.1448784827271793"/>
+ <Con from="input/7" weight="0.20215204514479165"/>
+ <Con from="input/8" weight="-0.1034721209934974"/>
+ <Con from="input/9" weight="-0.006014464483069626"/>
+ <Con from="input/10" weight="-0.07300352600656095"/>
+ <Con from="input/11" weight="-0.004307998165468835"/>
+ </Neuron>
+ <Neuron id="1/89" bias="0.19624238394737767">
+ <Con from="input/1" weight="-0.018950108652298608"/>
+ <Con from="input/2" weight="0.21062721255912548"/>
+ <Con from="input/3" weight="-0.12702669755430895"/>
+ <Con from="input/4" weight="-0.08085343133547532"/>
+ <Con from="input/5" weight="0.1617036290299089"/>
+ <Con from="input/6" weight="0.09398272668763837"/>
+ <Con from="input/7" weight="0.0759563165348135"/>
+ <Con from="input/8" weight="-0.1502441585407914"/>
+ <Con from="input/9" weight="0.015700442419623935"/>
+ <Con from="input/10" weight="0.025115906667137932"/>
+ <Con from="input/11" weight="0.22596035368145723"/>
+ </Neuron>
+ <Neuron id="1/90" bias="-0.06737587107501875">
+ <Con from="input/1" weight="-0.0017648856225168652"/>
+ <Con from="input/2" weight="0.2212293017593189"/>
+ <Con from="input/3" weight="-0.06969558576900857"/>
+ <Con from="input/4" weight="0.04502967714370593"/>
+ <Con from="input/5" weight="0.2252630475166762"/>
+ <Con from="input/6" weight="-0.10719111846128467"/>
+ <Con from="input/7" weight="0.04786329877958647"/>
+ <Con from="input/8" weight="0.19176316913671945"/>
+ <Con from="input/9" weight="0.09300603086477512"/>
+ <Con from="input/10" weight="0.055208109310108305"/>
+ <Con from="input/11" weight="0.1930911268487541"/>
+ </Neuron>
+ <Neuron id="1/91" bias="0.20562452636701742">
+ <Con from="input/1" weight="-0.14596804584494705"/>
+ <Con from="input/2" weight="0.14884617419034574"/>
+ <Con from="input/3" weight="-0.10828465766239591"/>
+ <Con from="input/4" weight="-0.07806270998331752"/>
+ <Con from="input/5" weight="-0.11810294682124699"/>
+ <Con from="input/6" weight="-0.17828777887716682"/>
+ <Con from="input/7" weight="-0.16822267337242736"/>
+ <Con from="input/8" weight="0.1613257533796261"/>
+ <Con from="input/9" weight="-0.22982472798036588"/>
+ <Con from="input/10" weight="0.16795551654551003"/>
+ <Con from="input/11" weight="0.11306389088635437"/>
+ </Neuron>
+ <Neuron id="1/92" bias="-0.013780696176762284">
+ <Con from="input/1" weight="0.20239357424843338"/>
+ <Con from="input/2" weight="0.15075883093042455"/>
+ <Con from="input/3" weight="0.17715204608232127"/>
+ <Con from="input/4" weight="-0.06313720580103765"/>
+ <Con from="input/5" weight="0.016991451777145974"/>
+ <Con from="input/6" weight="-0.14648985959421446"/>
+ <Con from="input/7" weight="-0.18579684580391875"/>
+ <Con from="input/8" weight="-0.13468774523014465"/>
+ <Con from="input/9" weight="0.1346102660436579"/>
+ <Con from="input/10" weight="0.123067672478522"/>
+ <Con from="input/11" weight="-0.13703691264921514"/>
+ </Neuron>
+ <Neuron id="1/93" bias="0.07320445727830797">
+ <Con from="input/1" weight="0.1600234176565746"/>
+ <Con from="input/2" weight="0.1777379765882867"/>
+ <Con from="input/3" weight="-0.04079396100847461"/>
+ <Con from="input/4" weight="-0.11620144675353439"/>
+ <Con from="input/5" weight="0.2271229071803269"/>
+ <Con from="input/6" weight="0.11415151382420734"/>
+ <Con from="input/7" weight="0.2501343641303166"/>
+ <Con from="input/8" weight="0.21745852249906955"/>
+ <Con from="input/9" weight="0.14499605793510092"/>
+ <Con from="input/10" weight="-0.05394394301699278"/>
+ <Con from="input/11" weight="0.012219157242233793"/>
+ </Neuron>
+ <Neuron id="1/94" bias="0.020091375290149795">
+ <Con from="input/1" weight="-0.007890226011169822"/>
+ <Con from="input/2" weight="-0.05878144710097515"/>
+ <Con from="input/3" weight="0.09228541969415127"/>
+ <Con from="input/4" weight="-0.2283927822941047"/>
+ <Con from="input/5" weight="0.06764249771611484"/>
+ <Con from="input/6" weight="-0.16242555086071012"/>
+ <Con from="input/7" weight="0.13758197097690766"/>
+ <Con from="input/8" weight="0.22017225901995788"/>
+ <Con from="input/9" weight="-0.06710908281200109"/>
+ <Con from="input/10" weight="-0.06989965793782968"/>
+ <Con from="input/11" weight="-0.12233603067017973"/>
+ </Neuron>
+ <Neuron id="1/95" bias="0.0664583555229913">
+ <Con from="input/1" weight="-0.07965718620137546"/>
+ <Con from="input/2" weight="0.17770493634697915"/>
+ <Con from="input/3" weight="0.0850887700351891"/>
+ <Con from="input/4" weight="0.043595349594743996"/>
+ <Con from="input/5" weight="0.22466218212404518"/>
+ <Con from="input/6" weight="-0.16523375534503307"/>
+ <Con from="input/7" weight="-0.23492072981457648"/>
+ <Con from="input/8" weight="-0.11117091671040394"/>
+ <Con from="input/9" weight="0.14679211088399635"/>
+ <Con from="input/10" weight="-0.025228304057926778"/>
+ <Con from="input/11" weight="-0.024318576228706243"/>
+ </Neuron>
+ <Neuron id="1/96" bias="0.17994991094023322">
+ <Con from="input/1" weight="-0.1847449487727458"/>
+ <Con from="input/2" weight="-0.12853878768455448"/>
+ <Con from="input/3" weight="0.06691084321141394"/>
+ <Con from="input/4" weight="0.20930345410543952"/>
+ <Con from="input/5" weight="0.031387803332787914"/>
+ <Con from="input/6" weight="0.17082553707402964"/>
+ <Con from="input/7" weight="0.1458537900999949"/>
+ <Con from="input/8" weight="-0.09244003794835416"/>
+ <Con from="input/9" weight="0.026263842760627768"/>
+ <Con from="input/10" weight="0.16468834922401684"/>
+ <Con from="input/11" weight="-0.03967180834483082"/>
+ </Neuron>
+ <Neuron id="1/97" bias="-0.07997542970049423">
+ <Con from="input/1" weight="0.03440342878381039"/>
+ <Con from="input/2" weight="-2.109680455327634E-6"/>
+ <Con from="input/3" weight="-0.03919886334183453"/>
+ <Con from="input/4" weight="-0.06687230885543183"/>
+ <Con from="input/5" weight="-0.0010268471758763554"/>
+ <Con from="input/6" weight="0.04079968119336743"/>
+ <Con from="input/7" weight="0.0757291736233379"/>
+ <Con from="input/8" weight="4.902945724137978E-6"/>
+ <Con from="input/9" weight="3.9626957972596224E-7"/>
+ <Con from="input/10" weight="-2.61976680198548E-7"/>
+ <Con from="input/11" weight="-0.012825254542909635"/>
+ </Neuron>
+ <Neuron id="1/98" bias="-0.22906904267158573">
+ <Con from="input/1" weight="-0.04781766857801519"/>
+ <Con from="input/2" weight="-1.2569755213856385E-5"/>
+ <Con from="input/3" weight="1.246969662992824E-5"/>
+ <Con from="input/4" weight="-0.07928327185984907"/>
+ <Con from="input/5" weight="-6.464890009189099E-6"/>
+ <Con from="input/6" weight="-1.199903692230622E-7"/>
+ <Con from="input/7" weight="-0.04195390794332405"/>
+ <Con from="input/8" weight="0.0035174420069694523"/>
+ <Con from="input/9" weight="-0.05330010621383221"/>
+ <Con from="input/10" weight="0.004762965590637927"/>
+ <Con from="input/11" weight="2.6178690597832173E-4"/>
+ </Neuron>
+ <Neuron id="1/99" bias="0.018378574919933867">
+ <Con from="input/1" weight="-0.00114982780274558"/>
+ <Con from="input/2" weight="0.14382020734565418"/>
+ <Con from="input/3" weight="0.01959055178797041"/>
+ <Con from="input/4" weight="0.2269186961602934"/>
+ <Con from="input/5" weight="-0.02083014920870308"/>
+ <Con from="input/6" weight="-0.08182572917217269"/>
+ <Con from="input/7" weight="0.19294561231095464"/>
+ <Con from="input/8" weight="-0.21134998821099812"/>
+ <Con from="input/9" weight="-0.03648198217766381"/>
+ <Con from="input/10" weight="-0.1925521467086333"/>
+ <Con from="input/11" weight="-0.16136430669432078"/>
+ </Neuron>
+ <Neuron id="1/100" bias="0.057806229724191666">
+ <Con from="input/1" weight="-0.008200293801031861"/>
+ <Con from="input/2" weight="-0.01432474084334179"/>
+ <Con from="input/3" weight="8.364556495280334E-7"/>
+ <Con from="input/4" weight="-0.007356194935543671"/>
+ <Con from="input/5" weight="-0.07746675361426816"/>
+ <Con from="input/6" weight="0.05583501485619308"/>
+ <Con from="input/7" weight="-0.026577335974367757"/>
+ <Con from="input/8" weight="0.052402181409854025"/>
+ <Con from="input/9" weight="0.012740343285237432"/>
+ <Con from="input/10" weight="0.005587410058911182"/>
+ <Con from="input/11" weight="4.012854364869521E-4"/>
+ </Neuron>
+ </NeuralLayer>
+ <NeuralLayer activationFunction="identity">
+ <Neuron id="2/1" bias="0.06537861438715241">
+ <Con from="1/1" weight="0.06089625351537137"/>
+ <Con from="1/2" weight="0.19247513945161993"/>
+ <Con from="1/3" weight="-0.15201529299746477"/>
+ <Con from="1/4" weight="-0.054455082942376415"/>
+ <Con from="1/5" weight="0.05582520336140908"/>
+ <Con from="1/6" weight="0.01554829293604816"/>
+ <Con from="1/7" weight="-0.22324731804628958"/>
+ <Con from="1/8" weight="0.05005416693008956"/>
+ <Con from="1/9" weight="0.2310570484168023"/>
+ <Con from="1/10" weight="0.09256091024072056"/>
+ <Con from="1/11" weight="-0.0019905702444812526"/>
+ <Con from="1/12" weight="-1.1396287075830049E-6"/>
+ <Con from="1/13" weight="-0.12724342389301407"/>
+ <Con from="1/14" weight="0.034544631309046635"/>
+ <Con from="1/15" weight="-1.6338991901311967E-6"/>
+ <Con from="1/16" weight="0.010395025279564032"/>
+ <Con from="1/17" weight="-0.09293455030659095"/>
+ <Con from="1/18" weight="0.08622369994167331"/>
+ <Con from="1/19" weight="0.07113834919650444"/>
+ <Con from="1/20" weight="0.007552660217511228"/>
+ <Con from="1/21" weight="-0.06874952402307052"/>
+ <Con from="1/22" weight="0.23271571905762956"/>
+ <Con from="1/23" weight="-0.0511731744804373"/>
+ <Con from="1/24" weight="0.036737510528877984"/>
+ <Con from="1/25" weight="0.07348581762548412"/>
+ <Con from="1/26" weight="6.726020470924027E-5"/>
+ <Con from="1/27" weight="-1.1260768404627E-5"/>
+ <Con from="1/28" weight="0.13149029200708964"/>
+ <Con from="1/29" weight="-0.036519325287438745"/>
+ <Con from="1/30" weight="0.11125587323515009"/>
+ <Con from="1/31" weight="-0.07436546202906673"/>
+ <Con from="1/32" weight="-0.1868472646611207"/>
+ <Con from="1/33" weight="-0.16500915577746642"/>
+ <Con from="1/34" weight="0.23806094945419415"/>
+ <Con from="1/35" weight="-0.09169389633162621"/>
+ <Con from="1/36" weight="0.11465274746833047"/>
+ <Con from="1/37" weight="-0.06439255212241753"/>
+ <Con from="1/38" weight="0.06473236957038989"/>
+ <Con from="1/39" weight="-0.0014858261510840832"/>
+ <Con from="1/40" weight="0.10847704690833314"/>
+ <Con from="1/41" weight="0.009459912467408786"/>
+ <Con from="1/42" weight="0.1253891339805427"/>
+ <Con from="1/43" weight="1.0988770370545215E-5"/>
+ <Con from="1/44" weight="-0.024288415651270753"/>
+ <Con from="1/45" weight="0.04290519751724819"/>
+ <Con from="1/46" weight="0.0030575083732025483"/>
+ <Con from="1/47" weight="-0.15912742046959608"/>
+ <Con from="1/48" weight="2.444708167752965E-6"/>
+ <Con from="1/49" weight="0.0836463727824661"/>
+ <Con from="1/50" weight="-0.06873369767148853"/>
+ <Con from="1/51" weight="0.0835449541524573"/>
+ <Con from="1/52" weight="0.026685382404797803"/>
+ <Con from="1/53" weight="0.016683005607570668"/>
+ <Con from="1/54" weight="0.06568354851966444"/>
+ <Con from="1/55" weight="-0.07839894009675233"/>
+ <Con from="1/56" weight="-0.04914934370287086"/>
+ <Con from="1/57" weight="0.12543068507605742"/>
+ <Con from="1/58" weight="0.0016910227686411318"/>
+ <Con from="1/59" weight="2.0420419524608114E-4"/>
+ <Con from="1/60" weight="-1.7969154171017242E-6"/>
+ <Con from="1/61" weight="0.0035981512405719577"/>
+ <Con from="1/62" weight="-0.13183035978351673"/>
+ <Con from="1/63" weight="-0.05768346661994895"/>
+ <Con from="1/64" weight="0.09889598130383635"/>
+ <Con from="1/65" weight="-0.04658112364097498"/>
+ <Con from="1/66" weight="-0.18236440109962315"/>
+ <Con from="1/67" weight="0.19745995602882435"/>
+ <Con from="1/68" weight="0.1312026440616039"/>
+ <Con from="1/69" weight="-0.12405339504943769"/>
+ <Con from="1/70" weight="5.2451124015065845E-6"/>
+ <Con from="1/71" weight="-0.0242136538395989"/>
+ <Con from="1/72" weight="-0.1189175514978303"/>
+ <Con from="1/73" weight="-0.19130795519890503"/>
+ <Con from="1/74" weight="-0.08438169751660321"/>
+ <Con from="1/75" weight="0.1191727912192244"/>
+ <Con from="1/76" weight="-0.17294320756964043"/>
+ <Con from="1/77" weight="0.1510873737411365"/>
+ <Con from="1/78" weight="0.09141106591925753"/>
+ <Con from="1/79" weight="-0.023537516428296563"/>
+ <Con from="1/80" weight="-0.23519812999716014"/>
+ <Con from="1/81" weight="0.34785841174297516"/>
+ <Con from="1/82" weight="-0.04694013187919052"/>
+ <Con from="1/83" weight="0.021582472964538184"/>
+ <Con from="1/84" weight="-0.003067411599820214"/>
+ <Con from="1/85" weight="0.0907844413433516"/>
+ <Con from="1/86" weight="0.05961869533428206"/>
+ <Con from="1/87" weight="0.06265028097731315"/>
+ <Con from="1/88" weight="0.16951004634119118"/>
+ <Con from="1/89" weight="-0.18912767659969856"/>
+ <Con from="1/90" weight="0.1751942050398166"/>
+ <Con from="1/91" weight="-0.1315313949047535"/>
+ <Con from="1/92" weight="-0.22617013378283202"/>
+ <Con from="1/93" weight="-0.09809448617933665"/>
+ <Con from="1/94" weight="-0.19986854410019297"/>
+ <Con from="1/95" weight="0.0892278654562924"/>
+ <Con from="1/96" weight="0.09789509297115"/>
+ <Con from="1/97" weight="-0.04904814162293805"/>
+ <Con from="1/98" weight="0.08343937082644237"/>
+ <Con from="1/99" weight="-0.0284396391527597"/>
+ <Con from="1/100" weight="-0.094162213762689"/>
+ </Neuron>
+ </NeuralLayer>
+ <NeuralOutputs>
+ <NeuralOutput outputNeuron="2/1">
+ <DerivedField optype="continuous" dataType="double">
+ <FieldRef field="type"/>
+ </DerivedField>
+ </NeuralOutput>
+ </NeuralOutputs>
+ </NeuralNetwork>
+</PMML>
diff --git a/pyproject.toml b/pyproject.toml
index e845a45..f100ce0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,9 +4,4 @@ build-backend = "setuptools.build_meta:__legacy__"
[tool.cibuildwheel]
before-build = "python -m pip install cython numpy"
-
-[tool.cibuildwheel.macos]
skip = "pp*"
-
-[tool.cibuildwheel.windows]
-skip = "pp*"
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 0852b3e..827b2b8 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -5,7 +5,7 @@ cached-property==1.5.2
pytest==7.3.1
pandas==2.0.1
pytest-cov==4.0.0
-Cython==0.29.34
+Cython==3.0.5
sphinx==6.2.0
numpydoc==1.5.0
sphinx-autoapi==2.1.0
diff --git a/setup.py b/setup.py
index afd5a27..bdb5564 100644
--- a/setup.py
+++ b/setup.py
@@ -162,7 +162,7 @@ def declare_cython_extension(extName, use_math=False, use_openmp=False, include_
#
# Note that my_ext_modules is just a list of Extension objects. We could add any C sources (not coming from Cython modules) here if needed.
# cythonize() just performs the Cython-level processing, and returns a list of Extension objects.
-my_ext_modules = cythonize(cython_ext_modules, include_path=my_include_dirs, gdb_debug=my_debug)
+my_ext_modules = cythonize(cython_ext_modules, include_path=my_include_dirs, gdb_debug=my_debug, compiler_directives={'legacy_implicit_noexcept': True})
#########################################################
diff --git a/sklearn_pmml_model/auto_detect/__init__.py b/sklearn_pmml_model/auto_detect/__init__.py
new file mode 100644
index 0000000..1f3c1d4
--- /dev/null
+++ b/sklearn_pmml_model/auto_detect/__init__.py
@@ -0,0 +1,14 @@
+"""
+The :mod:`sklearn.auto_detect` module implements methods to automatically
+detect the type of model from a PMML file.
+"""
+
+# License: BSD 2-Clause
+
+from .base import auto_detect_estimator, auto_detect_classifier, auto_detect_regressor
+
+__all__ = [
+ 'auto_detect_estimator',
+ 'auto_detect_classifier',
+ 'auto_detect_regressor',
+]
diff --git a/sklearn_pmml_model/auto_detect/base.py b/sklearn_pmml_model/auto_detect/base.py
new file mode 100644
index 0000000..592bb5a
--- /dev/null
+++ b/sklearn_pmml_model/auto_detect/base.py
@@ -0,0 +1,177 @@
+from sklearn_pmml_model.base import PMMLBaseEstimator
+from sklearn_pmml_model.datatypes import Category
+from sklearn_pmml_model.tree import PMMLTreeClassifier, PMMLTreeRegressor
+from sklearn_pmml_model.ensemble import PMMLForestClassifier, PMMLForestRegressor, PMMLGradientBoostingClassifier, \
+ PMMLGradientBoostingRegressor
+from sklearn_pmml_model.neural_network import PMMLMLPClassifier, PMMLMLPRegressor
+from sklearn_pmml_model.svm import PMMLSVC, PMMLSVR
+from sklearn_pmml_model.naive_bayes import PMMLGaussianNB
+from sklearn_pmml_model.linear_model import PMMLLogisticRegression, PMMLLinearRegression, PMMLRidgeClassifier, PMMLRidge
+from sklearn_pmml_model.neighbors import PMMLKNeighborsClassifier, PMMLKNeighborsRegressor
+
+
+def auto_detect_estimator(pmml, **kwargs):
+ """
+ Automatically detect and return the described estimator from PMML file.
+
+ Parameters
+ ----------
+ pmml : str, object
+ Filename or file object containing PMML data.
+
+ """
+ base = PMMLBaseEstimator(pmml=pmml)
+ target_field_name = base.target_field.attrib['name']
+ target_field_type = base.field_mapping[target_field_name][1]
+
+ if isinstance(target_field_type, Category) or target_field_type is str:
+ return auto_detect_classifier(pmml, **kwargs)
+ else:
+ return auto_detect_regressor(pmml, **kwargs)
+
+
+def auto_detect_classifier(pmml, **kwargs):
+ """
+ Automatically detect and return the described classifier from PMML file.
+
+ Parameters
+ ----------
+ pmml : str, object
+ Filename or file object containing PMML data.
+
+ """
+ if isinstance(pmml, str):
+ file = open(pmml, 'r')
+ else:
+ pmml.seek(0)
+ file = pmml
+
+ for line in file:
+ if '<Segmentation' in line:
+ clfs = [x for x in (detect_classifier(line) for line in file) if x is not None]
+ file.close()
+
+ if all(clf is PMMLTreeClassifier or clf is PMMLLogisticRegression for clf in clfs):
+ if 'multipleModelMethod="majorityVote"' in line or 'multipleModelMethod="average"' in line:
+ return PMMLForestClassifier(pmml=pmml, **kwargs)
+ if 'multipleModelMethod="modelChain"' in line:
+ return PMMLGradientBoostingClassifier(pmml=pmml, **kwargs)
+
+ raise Exception('Unsupported PMML classifier: invalid segmentation.')
+
+ clf = detect_classifier(line)
+ if clf:
+ file.close()
+ return clf(pmml, **kwargs)
+
+ file.close()
+ raise Exception('Unsupported PMML classifier.')
+
+
+def auto_detect_regressor(pmml, **kwargs):
+ """
+ Automatically detect and return the described regressor from PMML file.
+
+ Parameters
+ ----------
+ pmml : str, object
+ Filename or file object containing PMML data.
+
+ """
+ if isinstance(pmml, str):
+ file = open(pmml, 'r')
+ else:
+ pmml.seek(0)
+ file = pmml
+
+ for line in file:
+ if '<Segmentation' in line:
+ regs = [x for x in (detect_regressor(line) for line in file) if x is not None]
+ file.close()
+
+ if all(reg is PMMLTreeRegressor or reg is PMMLLinearRegression for reg in regs):
+ if 'multipleModelMethod="majorityVote"' in line or 'multipleModelMethod="average"' in line:
+ return PMMLForestRegressor(pmml=pmml, **kwargs)
+ if 'multipleModelMethod="sum"' in line:
+ return PMMLGradientBoostingRegressor(pmml=pmml, **kwargs)
+
+ raise Exception('Unsupported PMML regressor: invalid segmentation.')
+
+ reg = detect_regressor(line)
+ if reg:
+ file.close()
+ return reg(pmml, **kwargs)
+
+ file.close()
+ raise Exception('Unsupported PMML regressor.')
+
+
+def detect_classifier(line):
+ """
+ Detect the type of classifier in line if present.
+
+ Parameters
+ ----------
+ line : str
+ Line of a PMML file as a string.
+
+ pmml : str, object
+ Filename or file object containing PMML data.
+
+ """
+ if '<TreeModel' in line:
+ return PMMLTreeClassifier
+
+ if '<NeuralNetwork' in line:
+ return PMMLMLPClassifier
+
+ if '<SupportVectorMachineModel' in line:
+ return PMMLSVC
+
+ if '<NaiveBayesModel' in line:
+ return PMMLGaussianNB
+
+ if '<GeneralRegressionModel' in line:
+ return PMMLRidgeClassifier
+
+ if '<RegressionModel' in line:
+ return PMMLLogisticRegression
+
+ if '<NearestNeighborModel' in line:
+ return PMMLKNeighborsClassifier
+
+ return None
+
+
+def detect_regressor(line):
+ """
+ Detect the type of regressor in line if present.
+
+ Parameters
+ ----------
+ line : str
+ Line of a PMML file as a string.
+
+ pmml : str, object
+ Filename or file object containing PMML data.
+
+ """
+ if '<TreeModel' in line:
+ return PMMLTreeRegressor
+
+ if '<NeuralNetwork' in line:
+ return PMMLMLPRegressor
+
+ if '<SupportVectorMachineModel' in line:
+ return PMMLSVR
+
+ if '<GeneralRegressionModel' in line:
+ return PMMLRidge
+
+ if '<RegressionModel' in line:
+ return PMMLLinearRegression
+
+ if '<NearestNeighborModel' in line:
+ return PMMLKNeighborsRegressor
+
+ return None
diff --git a/sklearn_pmml_model/tree/_criterion.pxd b/sklearn_pmml_model/tree/_criterion.pxd
index 7d28024..7e8e06c 100644
--- a/sklearn_pmml_model/tree/_criterion.pxd
+++ b/sklearn_pmml_model/tree/_criterion.pxd
@@ -57,10 +57,10 @@ cdef class Criterion:
# Methods
cdef int init(self, DOUBLE_t* y, SIZE_t y_stride, DOUBLE_t* sample_weight,
double weighted_n_samples, SIZE_t* samples, SIZE_t start,
- SIZE_t end) nogil except -1
- cdef int reset(self) nogil except -1
- cdef int reverse_reset(self) nogil except -1
- cdef int update(self, SIZE_t new_pos) nogil except -1
+ SIZE_t end) except -1 nogil
+ cdef int reset(self) except -1 nogil
+ cdef int reverse_reset(self) except -1 nogil
+ cdef int update(self, SIZE_t new_pos) except -1 nogil
cdef double node_impurity(self) nogil
cdef void children_impurity(self, double* impurity_left,
double* impurity_right) nogil
diff --git a/sklearn_pmml_model/tree/_criterion.pyx b/sklearn_pmml_model/tree/_criterion.pyx
index cceb358..4b7d4ee 100644
--- a/sklearn_pmml_model/tree/_criterion.pyx
+++ b/sklearn_pmml_model/tree/_criterion.pyx
@@ -54,7 +54,7 @@ cdef class Criterion:
cdef int init(self, DOUBLE_t* y, SIZE_t y_stride, DOUBLE_t* sample_weight,
double weighted_n_samples, SIZE_t* samples, SIZE_t start,
- SIZE_t end) nogil except -1:
+ SIZE_t end) except -1 nogil:
"""Placeholder for a method which will initialize the criterion.
Returns -1 in case of failure to allocate memory (and raise MemoryError)
@@ -83,7 +83,7 @@ cdef class Criterion:
pass
- cdef int reset(self) nogil except -1:
+ cdef int reset(self) except -1 nogil:
"""Reset the criterion at pos=start.
This method must be implemented by the subclass.
@@ -91,14 +91,14 @@ cdef class Criterion:
pass
- cdef int reverse_reset(self) nogil except -1:
+ cdef int reverse_reset(self) except -1 nogil:
"""Reset the criterion at pos=end.
This method must be implemented by the subclass.
"""
pass
- cdef int update(self, SIZE_t new_pos) nogil except -1:
+ cdef int update(self, SIZE_t new_pos) except -1 nogil:
"""Updated statistics by moving samples[pos:new_pos] to the left child.
This updates the collected statistics by moving samples[pos:new_pos]
@@ -284,7 +284,7 @@ cdef class ClassificationCriterion(Criterion):
cdef int init(self, DOUBLE_t* y, SIZE_t y_stride,
DOUBLE_t* sample_weight, double weighted_n_samples,
- SIZE_t* samples, SIZE_t start, SIZE_t end) nogil except -1:
+ SIZE_t* samples, SIZE_t start, SIZE_t end) except -1 nogil:
"""Initialize the criterion at node samples[start:end] and
children samples[start:start] and samples[start:end].
@@ -353,7 +353,7 @@ cdef class ClassificationCriterion(Criterion):
self.reset()
return 0
- cdef int reset(self) nogil except -1:
+ cdef int reset(self) except -1 nogil:
"""Reset the criterion at pos=start
Returns -1 in case of failure to allocate memory (and raise MemoryError)
@@ -380,7 +380,7 @@ cdef class ClassificationCriterion(Criterion):
sum_right += self.sum_stride
return 0
- cdef int reverse_reset(self) nogil except -1:
+ cdef int reverse_reset(self) except -1 nogil:
"""Reset the criterion at pos=end
Returns -1 in case of failure to allocate memory (and raise MemoryError)
@@ -407,7 +407,7 @@ cdef class ClassificationCriterion(Criterion):
sum_right += self.sum_stride
return 0
- cdef int update(self, SIZE_t new_pos) nogil except -1:
+ cdef int update(self, SIZE_t new_pos) except -1 nogil:
"""Updated statistics by moving samples[pos:new_pos] to the left child.
Returns -1 in case of failure to allocate memory (and raise MemoryError)
@@ -754,7 +754,7 @@ cdef class RegressionCriterion(Criterion):
cdef int init(self, DOUBLE_t* y, SIZE_t y_stride, DOUBLE_t* sample_weight,
double weighted_n_samples, SIZE_t* samples, SIZE_t start,
- SIZE_t end) nogil except -1:
+ SIZE_t end) except -1 nogil:
"""Initialize the criterion at node samples[start:end] and
children samples[start:start] and samples[start:end]."""
# Initialize fields
@@ -796,7 +796,7 @@ cdef class RegressionCriterion(Criterion):
self.reset()
return 0
- cdef int reset(self) nogil except -1:
+ cdef int reset(self) except -1 nogil:
"""Reset the criterion at pos=start."""
cdef SIZE_t n_bytes = self.n_outputs * sizeof(double)
memset(self.sum_left, 0, n_bytes)
@@ -807,7 +807,7 @@ cdef class RegressionCriterion(Criterion):
self.pos = self.start
return 0
- cdef int reverse_reset(self) nogil except -1:
+ cdef int reverse_reset(self) except -1 nogil:
"""Reset the criterion at pos=end."""
cdef SIZE_t n_bytes = self.n_outputs * sizeof(double)
memset(self.sum_right, 0, n_bytes)
@@ -818,7 +818,7 @@ cdef class RegressionCriterion(Criterion):
self.pos = self.end
return 0
- cdef int update(self, SIZE_t new_pos) nogil except -1:
+ cdef int update(self, SIZE_t new_pos) except -1 nogil:
"""Updated statistics by moving samples[pos:new_pos] to the left."""
cdef double* sum_left = self.sum_left
@@ -1047,7 +1047,7 @@ cdef class MAE(RegressionCriterion):
cdef int init(self, DOUBLE_t* y, SIZE_t y_stride, DOUBLE_t* sample_weight,
double weighted_n_samples, SIZE_t* samples, SIZE_t start,
- SIZE_t end) nogil except -1:
+ SIZE_t end) except -1 nogil:
"""Initialize the criterion at node samples[start:end] and
children samples[start:start] and samples[start:end]."""
@@ -1099,7 +1099,7 @@ cdef class MAE(RegressionCriterion):
self.reset()
return 0
- cdef int reset(self) nogil except -1:
+ cdef int reset(self) except -1 nogil:
"""Reset the criterion at pos=start
Returns -1 in case of failure to allocate memory (and raise MemoryError)
@@ -1131,7 +1131,7 @@ cdef class MAE(RegressionCriterion):
weight)
return 0
- cdef int reverse_reset(self) nogil except -1:
+ cdef int reverse_reset(self) except -1 nogil:
"""Reset the criterion at pos=end
Returns -1 in case of failure to allocate memory (and raise MemoryError)
@@ -1160,7 +1160,7 @@ cdef class MAE(RegressionCriterion):
weight)
return 0
- cdef int update(self, SIZE_t new_pos) nogil except -1:
+ cdef int update(self, SIZE_t new_pos) except -1 nogil:
"""Updated statistics by moving samples[pos:new_pos] to the left
Returns -1 in case of failure to allocate memory (and raise MemoryError)
diff --git a/sklearn_pmml_model/tree/_splitter.pxd b/sklearn_pmml_model/tree/_splitter.pxd
index 928fc93..12fc47f 100644
--- a/sklearn_pmml_model/tree/_splitter.pxd
+++ b/sklearn_pmml_model/tree/_splitter.pxd
@@ -86,12 +86,12 @@ cdef class Splitter:
np.ndarray X_idx_sorted=*) except -1
cdef int node_reset(self, SIZE_t start, SIZE_t end,
- double* weighted_n_node_samples) nogil except -1
+ double* weighted_n_node_samples) except -1 nogil
cdef int node_split(self,
double impurity, # Impurity of the node
SplitRecord* split,
- SIZE_t* n_constant_features) nogil except -1
+ SIZE_t* n_constant_features) except -1 nogil
cdef void node_value(self, double* dest) nogil
diff --git a/sklearn_pmml_model/tree/_splitter.pyx b/sklearn_pmml_model/tree/_splitter.pyx
index 6a1535f..5d06747 100644
--- a/sklearn_pmml_model/tree/_splitter.pyx
+++ b/sklearn_pmml_model/tree/_splitter.pyx
@@ -46,7 +46,7 @@ cdef DTYPE_t FEATURE_THRESHOLD = 1e-7
# in SparseSplitter
cdef DTYPE_t EXTRACT_NNZ_SWITCH = 0.1
-cdef inline void _init_split(SplitRecord* self, SIZE_t start_pos) nogil:
+cdef inline void _init_split(SplitRecord* self, SIZE_t start_pos) noexcept nogil:
self.impurity_left = INFINITY
self.impurity_right = INFINITY
self.pos = start_pos
@@ -213,7 +213,7 @@ cdef class Splitter:
return 0
cdef int node_reset(self, SIZE_t start, SIZE_t end,
- double* weighted_n_node_samples) nogil except -1:
+ double* weighted_n_node_samples) except -1 nogil:
"""Reset splitter on node samples[start:end].
Returns -1 in case of failure to allocate memory (and raise MemoryError)
@@ -244,7 +244,7 @@ cdef class Splitter:
return 0
cdef int node_split(self, double impurity, SplitRecord* split,
- SIZE_t* n_constant_features) nogil except -1:
+ SIZE_t* n_constant_features) except -1 nogil:
"""Find the best split on node samples[start:end].
This is a placeholder method. The majority of computation will be done
@@ -383,7 +383,7 @@ cdef class BestSplitter(BaseDenseSplitter):
cdef int node_split(self, double impurity, SplitRecord* split,
- SIZE_t* n_constant_features) nogil except -1:
+ SIZE_t* n_constant_features) except -1 nogil:
"""Find the best split on node samples[start:end]
Returns -1 in case of failure to allocate memory (and raise MemoryError)
@@ -679,7 +679,7 @@ cdef class BestSplitter(BaseDenseSplitter):
# Sort n-element arrays pointed to by Xf and samples, simultaneously,
# by the values in Xf. Algorithm: Introsort (Musser, SP&E, 1997).
-cdef inline void sort(DTYPE_t* Xf, SIZE_t* samples, SIZE_t n) nogil:
+cdef inline void sort(DTYPE_t* Xf, SIZE_t* samples, SIZE_t n) noexcept nogil:
if n == 0:
return
cdef int maxd = 2 * <int>log(n)
@@ -687,7 +687,7 @@ cdef inline void sort(DTYPE_t* Xf, SIZE_t* samples, SIZE_t n) nogil:
cdef inline void swap(DTYPE_t* Xf, SIZE_t* samples,
- SIZE_t i, SIZE_t j) nogil:
+ SIZE_t i, SIZE_t j) noexcept nogil:
# Helper for sort
Xf[i], Xf[j] = Xf[j], Xf[i]
samples[i], samples[j] = samples[j], samples[i]
@@ -716,7 +716,7 @@ cdef inline DTYPE_t median3(DTYPE_t* Xf, SIZE_t n) nogil:
# Introsort with median of 3 pivot selection and 3-way partition function
# (robust to repeated elements, e.g. lots of zero features).
cdef void introsort(DTYPE_t* Xf, SIZE_t *samples,
- SIZE_t n, int maxd) nogil:
+ SIZE_t n, int maxd) noexcept nogil:
cdef DTYPE_t pivot
cdef SIZE_t i, l, r
@@ -749,7 +749,7 @@ cdef void introsort(DTYPE_t* Xf, SIZE_t *samples,
cdef inline void sift_down(DTYPE_t* Xf, SIZE_t* samples,
- SIZE_t start, SIZE_t end) nogil:
+ SIZE_t start, SIZE_t end) noexcept nogil:
# Restore heap order in Xf[start:end] by moving the max element to start.
cdef SIZE_t child, maxind, root
@@ -771,7 +771,7 @@ cdef inline void sift_down(DTYPE_t* Xf, SIZE_t* samples,
root = maxind
-cdef void heapsort(DTYPE_t* Xf, SIZE_t* samples, SIZE_t n) nogil:
+cdef void heapsort(DTYPE_t* Xf, SIZE_t* samples, SIZE_t n) noexcept nogil:
cdef SIZE_t start, end
# heapify
@@ -802,7 +802,7 @@ cdef class RandomSplitter(BaseDenseSplitter):
self.presort), self.__getstate__())
cdef int node_split(self, double impurity, SplitRecord* split,
- SIZE_t* n_constant_features) nogil except -1:
+ SIZE_t* n_constant_features) except -1 nogil:
"""Find the best random split on node samples[start:end]
Returns -1 in case of failure to allocate memory (and raise MemoryError)
@@ -1209,7 +1209,7 @@ cdef int compare_SIZE_t(const void* a, const void* b) nogil:
cdef inline void binary_search(INT32_t* sorted_array,
INT32_t start, INT32_t end,
SIZE_t value, SIZE_t* index,
- INT32_t* new_start) nogil:
+ INT32_t* new_start) noexcept nogil:
"""Return the index of value in the sorted array.
If not found, return -1. new_start is the last pivot + 1
@@ -1241,7 +1241,7 @@ cdef inline void extract_nnz_index_to_samples(INT32_t* X_indices,
SIZE_t* index_to_samples,
DTYPE_t* Xf,
SIZE_t* end_negative,
- SIZE_t* start_positive) nogil:
+ SIZE_t* start_positive) noexcept nogil:
"""Extract and partition values for a feature using index_to_samples.
Complexity is O(indptr_end - indptr_start).
@@ -1283,7 +1283,7 @@ cdef inline void extract_nnz_binary_search(INT32_t* X_indices,
SIZE_t* end_negative,
SIZE_t* start_positive,
SIZE_t* sorted_samples,
- bint* is_samples_sorted) nogil:
+ bint* is_samples_sorted) noexcept nogil:
"""Extract and partition values for a given feature using binary search.
If n_samples = end - start and n_indices = indptr_end - indptr_start,
@@ -1344,7 +1344,7 @@ cdef inline void extract_nnz_binary_search(INT32_t* X_indices,
cdef inline void sparse_swap(SIZE_t* index_to_samples, SIZE_t* samples,
- SIZE_t pos_1, SIZE_t pos_2) nogil:
+ SIZE_t pos_1, SIZE_t pos_2) noexcept nogil:
"""Swap sample pos_1 and pos_2 preserving sparse invariant."""
samples[pos_1], samples[pos_2] = samples[pos_2], samples[pos_1]
index_to_samples[samples[pos_1]] = pos_1
@@ -1363,7 +1363,7 @@ cdef class BestSparseSplitter(BaseSparseSplitter):
self.presort), self.__getstate__())
cdef int node_split(self, double impurity, SplitRecord* split,
- SIZE_t* n_constant_features) nogil except -1:
+ SIZE_t* n_constant_features) except -1 nogil:
"""Find the best split on node samples[start:end], using sparse features
Returns -1 in case of failure to allocate memory (and raise MemoryError)
@@ -1595,7 +1595,7 @@ cdef class RandomSparseSplitter(BaseSparseSplitter):
self.presort), self.__getstate__())
cdef int node_split(self, double impurity, SplitRecord* split,
- SIZE_t* n_constant_features) nogil except -1:
+ SIZE_t* n_constant_features) except -1 nogil:
"""Find a random split on node samples[start:end], using sparse features
Returns -1 in case of failure to allocate memory (and raise MemoryError)
diff --git a/sklearn_pmml_model/tree/_tree.pxd b/sklearn_pmml_model/tree/_tree.pxd
index 08ac5f3..21e7d05 100644
--- a/sklearn_pmml_model/tree/_tree.pxd
+++ b/sklearn_pmml_model/tree/_tree.pxd
@@ -62,9 +62,9 @@ cdef class Tree:
cdef SIZE_t _add_node(self, SIZE_t parent, bint is_left, bint is_leaf,
SIZE_t feature, SplitValue split_value, double impurity,
SIZE_t n_node_samples,
- double weighted_n_samples) nogil except -1
- cdef int _resize(self, SIZE_t capacity) nogil except -1
- cdef int _resize_c(self, SIZE_t capacity=*) nogil except -1
+ double weighted_n_samples) except -1 nogil
+ cdef int _resize(self, SIZE_t capacity) except -1 nogil
+ cdef int _resize_c(self, SIZE_t capacity=*) except -1 nogil
cdef np.ndarray _get_value_ndarray(self)
cdef np.ndarray _get_node_ndarray(self)
diff --git a/sklearn_pmml_model/tree/_tree.pyx b/sklearn_pmml_model/tree/_tree.pyx
index d49536c..ca1be98 100644
--- a/sklearn_pmml_model/tree/_tree.pyx
+++ b/sklearn_pmml_model/tree/_tree.pyx
@@ -47,6 +47,7 @@ cdef extern from "numpy/arrayobject.h":
int nd, np.npy_intp* dims,
np.npy_intp* strides,
void* data, int flags, object obj)
+ int PyArray_SetBaseObject(np.ndarray arr, PyObject* obj)
# =============================================================================
# Types and constants
@@ -179,7 +180,7 @@ cdef class DepthFirstTreeBuilder(TreeBuilder):
cdef int init_capacity
if tree.max_depth <= 10:
- init_capacity = (2 ** (tree.max_depth + 1)) - 1
+ init_capacity = <int> (2 ** (tree.max_depth + 1)) - 1
else:
init_capacity = 2047
@@ -301,7 +302,7 @@ cdef class DepthFirstTreeBuilder(TreeBuilder):
# Best first builder ----------------------------------------------------------
cdef inline int _add_to_frontier(PriorityHeapRecord* rec,
- PriorityHeap frontier) nogil except -1:
+ PriorityHeap frontier) except -1 nogil:
"""Adds record ``rec`` to the priority queue ``frontier``
Returns -1 in case of failure to allocate memory (and raise MemoryError)
@@ -456,7 +457,7 @@ cdef class BestFirstTreeBuilder(TreeBuilder):
SIZE_t start, SIZE_t end, double impurity,
bint is_first, bint is_left, Node* parent,
SIZE_t depth,
- PriorityHeapRecord* res) nogil except -1:
+ PriorityHeapRecord* res) except -1 nogil:
"""Adds node w/ partition ``[start, end)`` to the frontier. """
cdef SplitRecord split
cdef SIZE_t node_id
@@ -794,7 +795,7 @@ cdef class Tree:
value = memcpy(self.value, (<np.ndarray> value_ndarray).data,
self.capacity * self.value_stride * sizeof(double))
- cdef int _resize(self, SIZE_t capacity) nogil except -1:
+ cdef int _resize(self, SIZE_t capacity) except -1 nogil:
"""Resize all inner arrays to `capacity`, if `capacity` == -1, then
double the size of the inner arrays.
@@ -808,7 +809,7 @@ cdef class Tree:
# XXX using (size_t)(-1) is ugly, but SIZE_MAX is not available in C89
# (i.e., older MSVC).
- cdef int _resize_c(self, SIZE_t capacity=<SIZE_t>(-1)) nogil except -1:
+ cdef int _resize_c(self, SIZE_t capacity=<SIZE_t>(-1)) except -1 nogil:
"""Guts of _resize
Returns -1 in case of failure to allocate memory (and raise MemoryError)
@@ -842,7 +843,7 @@ cdef class Tree:
cdef SIZE_t _add_node(self, SIZE_t parent, bint is_left, bint is_leaf,
SIZE_t feature, SplitValue split_value, double impurity,
SIZE_t n_node_samples,
- double weighted_n_node_samples) nogil except -1:
+ double weighted_n_node_samples) except -1 nogil:
"""Add a node to the tree.
The new node registers itself as the child of its parent.
@@ -1253,7 +1254,8 @@ cdef class Tree:
cdef np.ndarray arr
arr = np.PyArray_SimpleNewFromData(3, shape, np.NPY_DOUBLE, self.value)
Py_INCREF(self)
- arr.base = <PyObject*> self
+ if PyArray_SetBaseObject(arr, <PyObject*> self) < 0:
+ raise ValueError("Can't initialize array.")
return arr
cdef np.ndarray _get_node_ndarray(self):
@@ -1273,5 +1275,6 @@ cdef class Tree:
strides, <void*> self.nodes,
np.NPY_DEFAULT, None)
Py_INCREF(self)
- arr.base = <PyObject*> self
+ if PyArray_SetBaseObject(arr, <PyObject*> self) < 0:
+ raise ValueError("Can't initialize array.")
return arr
diff --git a/sklearn_pmml_model/tree/_utils.pxd b/sklearn_pmml_model/tree/_utils.pxd
index 6c641e9..5862a4a 100644
--- a/sklearn_pmml_model/tree/_utils.pxd
+++ b/sklearn_pmml_model/tree/_utils.pxd
@@ -98,7 +98,7 @@ ctypedef fused realloc_ptr:
(INT32_t*)
(UINT32_t*)
-cdef realloc_ptr safe_realloc(realloc_ptr* p, size_t nelems, size_t elem_bytes) nogil except *
+cdef realloc_ptr safe_realloc(realloc_ptr* p, size_t nelems, size_t elem_bytes) except * nogil
cdef np.ndarray sizet_ptr_to_ndarray(SIZE_t* data, SIZE_t size)
@@ -113,7 +113,7 @@ cdef double rand_uniform(double low, double high,
UINT32_t* random_state) nogil
-cdef double log(double x) nogil
+cdef double log(double x) noexcept nogil
cdef void setup_cat_cache(UINT32_t* cachebits, UINT64_t cat_split,
@@ -146,7 +146,7 @@ cdef class Stack:
cdef bint is_empty(self) nogil
cdef int push(self, SIZE_t start, SIZE_t end, SIZE_t depth, SIZE_t parent,
bint is_left, double impurity,
- SIZE_t n_constant_features) nogil except -1
+ SIZE_t n_constant_features) except -1 nogil
cdef int pop(self, StackRecord* res) nogil
@@ -178,7 +178,7 @@ cdef class PriorityHeap:
cdef int push(self, SIZE_t node_id, SIZE_t start, SIZE_t end, SIZE_t pos,
SIZE_t depth, bint is_leaf, double improvement,
double impurity, double impurity_left,
- double impurity_right) nogil except -1
+ double impurity_right) except -1 nogil
cdef int pop(self, PriorityHeapRecord* res) nogil
# =============================================================================
@@ -196,9 +196,9 @@ cdef class WeightedPQueue:
cdef WeightedPQueueRecord* array_
cdef bint is_empty(self) nogil
- cdef int reset(self) nogil except -1
+ cdef int reset(self) except -1 nogil
cdef SIZE_t size(self) nogil
- cdef int push(self, DOUBLE_t data, DOUBLE_t weight) nogil except -1
+ cdef int push(self, DOUBLE_t data, DOUBLE_t weight) except -1 nogil
cdef int remove(self, DOUBLE_t data, DOUBLE_t weight) nogil
cdef int pop(self, DOUBLE_t* data, DOUBLE_t* weight) nogil
cdef int peek(self, DOUBLE_t* data, DOUBLE_t* weight) nogil
@@ -219,8 +219,8 @@ cdef class WeightedMedianCalculator:
# = w[0] + w[1] + ... + w[k-1]
cdef SIZE_t size(self) nogil
- cdef int push(self, DOUBLE_t data, DOUBLE_t weight) nogil except -1
- cdef int reset(self) nogil except -1
+ cdef int push(self, DOUBLE_t data, DOUBLE_t weight) except -1 nogil
+ cdef int reset(self) except -1 nogil
cdef int update_median_parameters_post_push(
self, DOUBLE_t data, DOUBLE_t weight,
DOUBLE_t original_median) nogil
@@ -229,4 +229,4 @@ cdef class WeightedMedianCalculator:
cdef int update_median_parameters_post_remove(
self, DOUBLE_t data, DOUBLE_t weight,
DOUBLE_t original_median) nogil
- cdef DOUBLE_t get_median(self) nogil
+ cdef DOUBLE_t get_median(self) noexcept nogil
diff --git a/sklearn_pmml_model/tree/_utils.pyx b/sklearn_pmml_model/tree/_utils.pyx
index e0e246c..4b95658 100644
--- a/sklearn_pmml_model/tree/_utils.pyx
+++ b/sklearn_pmml_model/tree/_utils.pyx
@@ -25,7 +25,7 @@ np.import_array()
# Helper functions
# =============================================================================
-cdef realloc_ptr safe_realloc(realloc_ptr* p, size_t nelems, size_t nbytes_elem) nogil except *:
+cdef realloc_ptr safe_realloc(realloc_ptr* p, size_t nelems, size_t nbytes_elem) except * nogil:
# sizeof(realloc_ptr[0]) would be more like idiomatic C, but causes Cython
# 0.20.1 to crash.
cdef size_t nbytes = nelems * nbytes_elem
@@ -170,7 +170,7 @@ cdef class Stack:
cdef int push(self, SIZE_t start, SIZE_t end, SIZE_t depth, SIZE_t parent,
bint is_left, double impurity,
- SIZE_t n_constant_features) nogil except -1:
+ SIZE_t n_constant_features) except -1 nogil:
"""Push a new element onto the stack.
Return -1 in case of failure to allocate memory (and raise MemoryError)
@@ -286,7 +286,7 @@ cdef class PriorityHeap:
cdef int push(self, SIZE_t node_id, SIZE_t start, SIZE_t end, SIZE_t pos,
SIZE_t depth, bint is_leaf, double improvement,
double impurity, double impurity_left,
- double impurity_right) nogil except -1:
+ double impurity_right) except -1 nogil:
"""Push record on the priority heap.
Return -1 in case of failure to allocate memory (and raise MemoryError)
@@ -374,7 +374,7 @@ cdef class WeightedPQueue:
def __dealloc__(self):
free(self.array_)
- cdef int reset(self) nogil except -1:
+ cdef int reset(self) except -1 nogil:
"""Reset the WeightedPQueue to its state at construction
Return -1 in case of failure to allocate memory (and raise MemoryError)
@@ -391,7 +391,7 @@ cdef class WeightedPQueue:
cdef SIZE_t size(self) nogil:
return self.array_ptr
- cdef int push(self, DOUBLE_t data, DOUBLE_t weight) nogil except -1:
+ cdef int push(self, DOUBLE_t data, DOUBLE_t weight) except -1 nogil:
"""Push record on the array.
Return -1 in case of failure to allocate memory (and raise MemoryError)
@@ -549,7 +549,7 @@ cdef class WeightedMedianCalculator:
WeightedMedianCalculator"""
return self.samples.size()
- cdef int reset(self) nogil except -1:
+ cdef int reset(self) except -1 nogil:
"""Reset the WeightedMedianCalculator to its state at construction
Return -1 in case of failure to allocate memory (and raise MemoryError)
@@ -563,7 +563,7 @@ cdef class WeightedMedianCalculator:
self.sum_w_0_k = 0
return 0
- cdef int push(self, DOUBLE_t data, DOUBLE_t weight) nogil except -1:
+ cdef int push(self, DOUBLE_t data, DOUBLE_t weight) except -1 nogil:
"""Push a value and its associated weight to the WeightedMedianCalculator
Return -1 in case of failure to allocate memory (and raise MemoryError)
diff --git a/sklearn_pmml_model/tree/quad_tree.pxd b/sklearn_pmml_model/tree/quad_tree.pxd
index 7c146ac..89ffcd2 100644
--- a/sklearn_pmml_model/tree/quad_tree.pxd
+++ b/sklearn_pmml_model/tree/quad_tree.pxd
@@ -72,7 +72,7 @@ cdef class _QuadTree:
# Point insertion methods
cdef int insert_point(self, DTYPE_t[3] point, SIZE_t point_index,
- SIZE_t cell_id=*) nogil except -1
+ SIZE_t cell_id=*) except -1 nogil
cdef SIZE_t _insert_point_in_new_child(self, DTYPE_t[3] point, Cell* cell,
SIZE_t point_index, SIZE_t size=*
) nogil
@@ -81,8 +81,8 @@ cdef class _QuadTree:
# Create a summary of the Tree compare to a query point
cdef long summarize(self, DTYPE_t[3] point, DTYPE_t* results,
- float squared_theta=*, int cell_id=*, long idx=*
- ) nogil
+ float squared_theta=*, SIZE_t cell_id=*, long idx=*
+ ) noexcept nogil
# Internal cell initialization methods
cdef void _init_cell(self, Cell* cell, SIZE_t parent, SIZE_t depth) nogil
@@ -91,10 +91,10 @@ cdef class _QuadTree:
# Private methods
cdef int _check_point_in_cell(self, DTYPE_t[3] point, Cell* cell
- ) nogil except -1
+ ) except -1 nogil
# Private array manipulation to manage the ``cells`` array
- cdef int _resize(self, SIZE_t capacity) nogil except -1
- cdef int _resize_c(self, SIZE_t capacity=*) nogil except -1
- cdef int _get_cell(self, DTYPE_t[3] point, SIZE_t cell_id=*) nogil except -1
+ cdef int _resize(self, SIZE_t capacity) except -1 nogil
+ cdef int _resize_c(self, SIZE_t capacity=*) except -1 nogil
+ cdef int _get_cell(self, DTYPE_t[3] point, SIZE_t cell_id=*) except -1 nogil
cdef np.ndarray _get_cell_ndarray(self)
diff --git a/sklearn_pmml_model/tree/quad_tree.pyx b/sklearn_pmml_model/tree/quad_tree.pyx
index b2b8fa6..dceb43a 100644
--- a/sklearn_pmml_model/tree/quad_tree.pyx
+++ b/sklearn_pmml_model/tree/quad_tree.pyx
@@ -27,6 +27,7 @@ cdef extern from "numpy/arrayobject.h":
int nd, np.npy_intp* dims,
np.npy_intp* strides,
void* data, int flags, object obj)
+ int PyArray_SetBaseObject(np.ndarray arr, PyObject* obj)
# XXX using (size_t)(-1) is ugly, but SIZE_MAX is not available in C89
@@ -78,7 +79,7 @@ cdef class _QuadTree:
# Parameters of the tree
self.n_dimensions = n_dimensions
self.verbose = verbose
- self.n_cells_per_cell = 2 ** self.n_dimensions
+ self.n_cells_per_cell = <int> (2 ** self.n_dimensions)
# Inner structures
self.max_depth = 0
@@ -138,7 +139,7 @@ cdef class _QuadTree:
self._resize(capacity=self.cell_count)
cdef int insert_point(self, DTYPE_t[3] point, SIZE_t point_index,
- SIZE_t cell_id=0) nogil except -1:
+ SIZE_t cell_id=0) except -1 nogil:
"""Insert a point in the QuadTree."""
cdef int ax
cdef DTYPE_t n_frac
@@ -327,7 +328,7 @@ cdef class _QuadTree:
self.cell_count += 1
cdef int _check_point_in_cell(self, DTYPE_t[3] point, Cell* cell
- ) nogil except -1:
+ ) except -1 nogil:
"""Check that the given point is in the cell boundaries."""
if self.verbose >= 50:
@@ -395,7 +396,7 @@ cdef class _QuadTree:
cdef long summarize(self, DTYPE_t[3] point, DTYPE_t* results,
float squared_theta=.5, SIZE_t cell_id=0, long idx=0
- ) nogil:
+ ) noexcept nogil:
"""Summarize the tree compared to a query point.
Input arguments
@@ -486,7 +487,7 @@ cdef class _QuadTree:
return self._get_cell(query_pt, 0)
cdef int _get_cell(self, DTYPE_t[3] point, SIZE_t cell_id=0
- ) nogil except -1:
+ ) except -1 nogil:
"""guts of get_cell.
Return the id of the cell containing the query point or raise ValueError
@@ -576,10 +577,11 @@ cdef class _QuadTree:
strides, <void*> self.cells,
np.NPY_DEFAULT, None)
Py_INCREF(self)
- arr.base = <PyObject*> self
+ if PyArray_SetBaseObject(arr, <PyObject*> self) < 0:
+ raise ValueError("Can't intialize array!")
return arr
- cdef int _resize(self, SIZE_t capacity) nogil except -1:
+ cdef int _resize(self, SIZE_t capacity) except -1 nogil:
"""Resize all inner arrays to `capacity`, if `capacity` == -1, then
double the size of the inner arrays.
@@ -591,7 +593,7 @@ cdef class _QuadTree:
with gil:
raise MemoryError()
- cdef int _resize_c(self, SIZE_t capacity=DEFAULT) nogil except -1:
+ cdef int _resize_c(self, SIZE_t capacity=DEFAULT) except -1 nogil:
"""Guts of _resize
Returns -1 in case of failure to allocate memory (and raise MemoryError)
| Create generalised model which autodetects model type
I think the most common use case for this library would be to quickly load a PMML model file with minimal further configuration. This is currently not possible as it is required to choose which type of model you are running, and instantiate that model. To make the process easier, it would be great to have a `PMMLBaseEstimator.autoDetect(pmml)` method which runs some checks in order to determine if a model is supported, and if so return the parsed model in one call.
| 2023-10-31T16:33:19 | 0.0 | [] | [] |
|||
iamDecode/sklearn-pmml-model | iamDecode__sklearn-pmml-model-36 | 056fab1ed7076b513a3199af3395447252029800 | diff --git a/sklearn_pmml_model/linear_model/implementations.py b/sklearn_pmml_model/linear_model/implementations.py
index a873236..2a52ce7 100644
--- a/sklearn_pmml_model/linear_model/implementations.py
+++ b/sklearn_pmml_model/linear_model/implementations.py
@@ -87,14 +87,32 @@ def __init__(self, pmml):
# Import coefficients and intercepts
model = self.root.find('RegressionModel')
-
- if model is None:
- raise Exception('PMML model does not contain RegressionModel.')
-
- tables = [
- table for table in model.findall('RegressionTable')
- if table.find('NumericPredictor') is not None
- ]
+ mining_model = self.root.find('MiningModel')
+ tables = []
+
+ if mining_model is not None and self.n_classes_ > 2:
+ self.multi_class = 'ovr'
+ segmentation = mining_model.find('Segmentation')
+
+ if segmentation.get('multipleModelMethod') not in ['modelChain']:
+ raise Exception('PMML model for multi-class logistic regression should use modelChain method.')
+
+ # Parse segments
+ segments = segmentation.findall('Segment')
+ valid_segments = [segment for segment in segments if segment.find('True') is not None]
+ models = [segment.find('RegressionModel') for segment in valid_segments]
+
+ tables = [
+ models[i].find('RegressionTable') for i in range(self.n_classes_)
+ ]
+ elif model is not None:
+ self.multi_class = 'auto'
+ tables = [
+ table for table in model.findall('RegressionTable')
+ if table.find('NumericPredictor') is not None
+ ]
+ else:
+ raise Exception('PMML model does not contain RegressionModel or Segmentation.')
self.coef_ = [
_get_coefficients(self, table)
@@ -113,7 +131,6 @@ def __init__(self, pmml):
self.coef_ = np.array(self.coef_)
self.intercept_ = np.array(self.intercept_)
- self.multi_class = 'auto'
self.solver = 'lbfgs'
def fit(self, x, y):
| logistic
The logistic regression that I use, the linear model that I use, it says in the document that logistic regression is included, why does it show up when I predict PMML model does not contain RegressionModel.
| Thanks for your interest in `sklearn-pmml-model`! In order for me to help you find the problem, it would be great if you can stick to the issue template. Without an extract of the PMML model you are trying to convert, it is difficult for me to help you.
That being said, based on the error I think the library you used to export the PMML model has converted the original `RegressionModel` to an equivalent `GeneralRegressionModel`. If this is the case, you should be able to generate predictions using `PMMLRidgeClassifier` for classification, or `PMMLRidge` for regression.
> Thanks for your interest in `sklearn-pmml-model`! In order for me to help you find the problem, it would be great if you can stick to the issue template. Without an extract of the PMML model you are trying to convert, it is difficult for me to help you.
>
> That being said, based on the error I think the library you used to export the PMML model has converted the original `RegressionModel` to an equivalent `GeneralRegressionModel`. If this is the case, you should be able to generate predictions using `PMMLRidgeClassifier` for classification, or `PMMLRidge` for regression.
I used SVM to predict before, and I want to use logistic regression to classify, test the accuracy of the results, and use logistic regression prediction under the linear model. I mainly want to try logistic regression for classification.
> Thanks for your interest in `sklearn-pmml-model`! In order for me to help you find the problem, it would be great if you can stick to the issue template. Without an extract of the PMML model you are trying to convert, it is difficult for me to help you.
>
> That being said, based on the error I think the library you used to export the PMML model has converted the original `RegressionModel` to an equivalent `GeneralRegressionModel`. If this is the case, you should be able to generate predictions using `PMMLRidgeClassifier` for classification, or `PMMLRidge` for regression.
Well, I can use THE SVM export to PMML to make predictions, but the logical classification prediction will report an error
I suppose you are using `PMMLLogisticRegression` to make 'logical classification' predictions? In my previous comment, I recommended to use `PMMLRidgeClassifier` instead. To do that, just replace "PMMLLogisticRegression" with "PMMLRidgeClassifier". I think that should work for you.
> I suppose you are using `PMMLLogisticRegression` to make 'logical classification' predictions? In my previous comment, I recommended to use `PMMLRidgeClassifier` instead. To do that, just replace "PMMLLogisticRegression" with "PMMLRidgeClassifier". I think that should work for you.
Should I change my training to RidgeClassifier, or is there a problem with data processing? SVM can be a good test,Exception: PMML model does not contain GeneralRegressionModel.
> I suppose you are using `PMMLLogisticRegression` to make 'logical classification' predictions? In my previous comment, I recommended to use `PMMLRidgeClassifier` instead. To do that, just replace "PMMLLogisticRegression" with "PMMLRidgeClassifier". I think that should work for you.
Why is it easier for me to predict with SVM, but harder for me to predict with logistic regression? Is there any other model that can do better classification
> I suppose you are using `PMMLLogisticRegression` to make 'logical classification' predictions? In my previous comment, I recommended to use `PMMLRidgeClassifier` instead. To do that, just replace "PMMLLogisticRegression" with "PMMLRidgeClassifier". I think that should work for you.

If only classfier parameters can be predicted in PMMLPipeline, but the accuracy of the result is not high, the logistic regression parameters need to be adjusted to reach a certain precision value.
I am not entirely sure what your problem is. It would be helpful if you can provide a copy of the PMML file that you having problems with.
In your screenshot you show the method `PMMLPipeline`. Do note this method is not part of this library, but from `sklearn2pmml` instead. That library converts sklearn models *into* PMML, as opposed to `sklearn-pmml-model` creating a sklearn model *from* a PMML.
For me, `PMMLLogisticRegression` works just fine. Check out this simple example on how to use it along with `sklearn2pmml`:
```python
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from sklearn_pmml_model.linear_model import PMMLLogisticRegression
from sklearn2pmml.pipeline import PMMLPipeline
from sklearn2pmml import sklearn2pmml
# Prepare data
iris = load_iris()
X = pd.DataFrame(iris.data)
X.columns = np.array(iris.feature_names)
y = pd.Series(np.array(iris.target_names)[iris.target])
y.name = "Class"
# train logistic regression
clf = LogisticRegression()
pipeline = PMMLPipeline([
("classifier", clf)
])
pipeline.fit(X, y)
# convert to PMML
sklearn2pmml(pipeline, "test.pmml", with_repr = True)
# Load from PMML and predict
clf = PMMLLogisticRegression(pmml="test.pmml")
clf.predict(X)
clf.score(X, y)
```
> I am not entirely sure what your problem is. It would be helpful if you can provide a copy of the PMML file that you having problems with.
>
> In your screenshot you show the method `PMMLPipeline`. Do note this method is not part of this library, but from `sklearn2pmml` instead. That library converts sklearn models _into_ PMML, as opposed to `sklearn-pmml-model` creating a sklearn model _from_ a PMML.
>
> For me, `PMMLLogisticRegression` works just fine. Check out this simple example on how to use it along with `sklearn2pmml`:
>
> ```python
> from sklearn.datasets import load_iris
> from sklearn.linear_model import LogisticRegression
> from sklearn.model_selection import train_test_split
> import pandas as pd
> import numpy as np
> from sklearn_pmml_model.linear_model import PMMLLogisticRegression
> from sklearn2pmml.pipeline import PMMLPipeline
> from sklearn2pmml import sklearn2pmml
>
> # Prepare data
> iris = load_iris()
> X = pd.DataFrame(iris.data)
> X.columns = np.array(iris.feature_names)
> y = pd.Series(np.array(iris.target_names)[iris.target])
> y.name = "Class"
>
> # train logistic regression
> clf = LogisticRegression()
> pipeline = PMMLPipeline([
> ("classifier", clf)
> ])
> pipeline.fit(X, y)
>
> # convert to PMML
> sklearn2pmml(pipeline, "test.pmml", with_repr = True)
>
> # Load from PMML and predict
> clf = PMMLLogisticRegression(pmml="test.pmml")
> clf.predict(X)
> clf.score(X, y)

Logistic regression can be used, but it's not very accurate, only 40% accurate.Are there other networks that do categorization?
The parameters you show don't make a lot of sense to me. `max_iter = 2` is way too low to yield any decent classification. I suggest you start with `LogisticRegression()`, so without any arguments. See if that works (it should), and then gradually add arguments to see if it improves performance. Often enough, the default parameters prove to be sufficient.
If you like to try another model, I suggest trying `RandomForestClassifier`.
> The parameters you show don't make a lot of sense to me. `max_iter = 2` is way too low to yield any decent classification. I suggest you start with `LogisticRegression()`, so without any arguments. See if that works (it should), and then gradually add arguments to see if it improves performance. Often enough, the default parameters prove to be sufficient.
>
> If you like to try another model, I suggest trying `RandomForestClassifier`.
The test accuracy of default parameters is not high, which can only reach half of SVM, and it needs to be adjusted, and it does not need too complex network model.
> The parameters you show don't make a lot of sense to me. `max_iter = 2` is way too low to yield any decent classification. I suggest you start with `LogisticRegression()`, so without any arguments. See if that works (it should), and then gradually add arguments to see if it improves performance. Often enough, the default parameters prove to be sufficient.
>
> If you like to try another model, I suggest trying `RandomForestClassifier`.
I tried the random forest,ModuleNotFoundError: No module named 'sklearn_pmml_model.tree._tree'.I use three categories
Please make sure you installed the library using `pip install sklearn-pmml-model`. This error seems to indicate the Cython code is not compiled, which is only the case if you downloaded this library and are working in that directory directly.
If you, for some reason, cannot use `pip`, running the following command will compile the Cython code inplace, and should fix the issue you have:
```
python setup.py build_ext --inplace
```
I don't recommend this, and it will require a C compiler, which is a bit of a pain to setup on windows. More information about this process can be found at https://sklearn-pmml-model.readthedocs.io/en/latest/install.html#from-source.
> Please make sure you installed the library using `pip install sklearn-pmml-model`. This error seems to indicate the Cython code is not compiled, which is only the case if you downloaded this library and are working in that directory directly.
>
> If you, for some reason, cannot use `pip`, running the following command will compile the Cython code inplace, and should fix the issue you have:
>
> ```
> python setup.py build_ext --inplace
> ```
>
> I don't recommend this, and it will require a C compiler, which is a bit of a pain to setup on windows. More information about this process can be found at https://sklearn-pmml-model.readthedocs.io/en/latest/install.html#from-source.
I installed the package according to Requerment.txt
> Please make sure you installed the library using `pip install sklearn-pmml-model`. This error seems to indicate the Cython code is not compiled, which is only the case if you downloaded this library and are working in that directory directly.
>
> If you, for some reason, cannot use `pip`, running the following command will compile the Cython code inplace, and should fix the issue you have:
>
> ```
> python setup.py build_ext --inplace
> ```
>
> I don't recommend this, and it will require a C compiler, which is a bit of a pain to setup on windows. More information about this process can be found at https://sklearn-pmml-model.readthedocs.io/en/latest/install.html#from-source.
If I use logistic regression to do the tripartite model can't it predict
which is only the case if you downloaded this library and are working in that directory directly.
>
I can use PIP, how can I simply use random forest, I don't want to install c compiler.
> Please make sure you installed the library using `pip install sklearn-pmml-model`. This error seems to indicate the Cython code is not compiled, which is only the case if you downloaded this library and are working in that directory directly.
>
> If you, for some reason, cannot use `pip`, running the following command will compile the Cython code inplace, and should fix the issue you have:
>
> ```
> python setup.py build_ext --inplace
> ```
>
> I don't recommend this, and it will require a C compiler, which is a bit of a pain to setup on windows. More information about this process can be found at https://sklearn-pmml-model.readthedocs.io/en/latest/install.html#from-source.
Why do I use logistic regression to do the binary classification of such errors, the first two days can also do three classifications will report errors
ValueError: matmul: Input operand 1 has a mismatch in its core dimension 0, with gufunc signature (n?,k),(k,m?)->(n?,m?) (size 2 is different from 1024)
If you use `pip` to install the library, no C compiler is necessary. More information on how to install using pip can be found in the documentation: https://sklearn-pmml-model.readthedocs.io/en/latest/install.html#pip.
`pip` is the standard package manager for Python, and is included with every Python install. The documentation includes a link to more general information about `pip` here: https://packaging.python.org/tutorials/installing-packages/#use-pip-for-installing.
> If you use `pip` to install the library, no C compiler is necessary. More information on how to install using pip can be found in the documentation: https://sklearn-pmml-model.readthedocs.io/en/latest/install.html#pip.
>
> `pip` is the standard package manager for Python, and is included with every Python install. The documentation includes a link to more general information about `pip` here: https://packaging.python.org/tutorials/installing-packages/#use-pip-for-installing.
I installed packages from Requiest with PIP. Why do I get errors with those models
> Why do I get errors with those models
You have to let me know which errors you are seeing, otherwise I cannot help you.
---
I am expecting you still installed the packages with `pip` but are still within a clone of this package. If you are working in a copy of this repository, please remove it, start fresh, do a pip install, and try out the example I provided here: https://github.com/iamDecode/sklearn-pmml-model/issues/35#issuecomment-906271001. If this works, you can proceed to try different models and datasets.
> > Why do I get errors with those models
>
> You have to let me know which errors you are seeing, otherwise I cannot help you.
>
> I am expecting you still installed the packages with `pip` but are still within a clone of this package. If you are working in a copy of this repository, please remove it, start fresh, do a pip install, and try out the example I provided here: [#35 (comment)](https://github.com/iamDecode/sklearn-pmml-model/issues/35#issuecomment-906271001). If this works, you can proceed to try different models and datasets.
ValueError: matmul: Input operand 1 has a mismatch in its core dimension 0, with gufunc signature (n?,k),(k,m?)->(n?,m?) (size 2 is different from 1024)

The error occurred when I used logistic regression or ridge regression, it is ok to carry out binary classification before logistic regression, can triple classification be used? I mainly use it to test binary classification and triple classification. If it is triple classification, do I need to make any modifications。
> > Why do I get errors with those models
>
> You have to let me know which errors you are seeing, otherwise I cannot help you.
>
> I am expecting you still installed the packages with `pip` but are still within a clone of this package. If you are working in a copy of this repository, please remove it, start fresh, do a pip install, and try out the example I provided here: [#35 (comment)](https://github.com/iamDecode/sklearn-pmml-model/issues/35#issuecomment-906271001). If this works, you can proceed to try different models and datasets.
Well, use the package version, but don't use it directly in your project.
> > Why do I get errors with those models
>
> You have to let me know which errors you are seeing, otherwise I cannot help you.
>
> I am expecting you still installed the packages with `pip` but are still within a clone of this package. If you are working in a copy of this repository, please remove it, start fresh, do a pip install, and try out the example I provided here: [#35 (comment)](https://github.com/iamDecode/sklearn-pmml-model/issues/35#issuecomment-906271001). If this works, you can proceed to try different models and datasets.

I used logistic to classify them into three categories and found Exception: PMML model does not contain RegressionModel. Reinstalled the package, the dichotomies can be predicted, ridge regression is also such a problem.
> > Why do I get errors with those models
>
> You have to let me know which errors you are seeing, otherwise I cannot help you.
>
> I am expecting you still installed the packages with `pip` but are still within a clone of this package. If you are working in a copy of this repository, please remove it, start fresh, do a pip install, and try out the example I provided here: [#35 (comment)](https://github.com/iamDecode/sklearn-pmml-model/issues/35#issuecomment-906271001). If this works, you can proceed to try different models and datasets.
ValueError: numpy.ndarray size changed, may indicate binary incompatibility. Expected 88 from C header, got 80 from PyObject,
Do I need to do some configuration when I use GBDT classification.
> I used logistic to classify them into three categories and found Exception: PMML model does not contain RegressionModel. Reinstalled the package, the dichotomies can be predicted, ridge regression is also such a problem.
Ok I think I understand now. You seem to be using the `multi_class='ovr'` parameter on your `LogisticRegression` class (from https://github.com/iamDecode/sklearn-pmml-model/issues/35#issuecomment-906867774). This means one-versus-rest regression. This type is not explicitly supported by the library yet, but I am working on adding it right now.
To get it working in the mean time, you can use the default parameter `multi_class='auto'` or specifically select `multi_class='multinomial'` instead. This type of regression should work!
> > I used logistic to classify them into three categories and found Exception: PMML model does not contain RegressionModel. Reinstalled the package, the dichotomies can be predicted, ridge regression is also such a problem.
>
> Ok I think I understand now. You seem to be using the `multi_class='ovr'` parameter on your `LogisticRegression` class (from [#35 (comment)](https://github.com/iamDecode/sklearn-pmml-model/issues/35#issuecomment-906867774)). This means one-versus-rest regression. This type is not explicitly supported by the library yet, but I am working on adding it right now.
>
> To get it working in the mean time, you can use the default parameter `multi_class='auto'` or specifically select `multi_class='multinomial'` instead. This type of regression should work!

Well, I had a logistic triage error,Exception: PMML model does not contain RegressionModel.
> > I used logistic to classify them into three categories and found Exception: PMML model does not contain RegressionModel. Reinstalled the package, the dichotomies can be predicted, ridge regression is also such a problem.
>
> Ok I think I understand now. You seem to be using the `multi_class='ovr'` parameter on your `LogisticRegression` class (from [#35 (comment)](https://github.com/iamDecode/sklearn-pmml-model/issues/35#issuecomment-906867774)). This means one-versus-rest regression. This type is not explicitly supported by the library yet, but I am working on adding it right now.
>
> To get it working in the mean time, you can use the default parameter `multi_class='auto'` or specifically select `multi_class='multinomial'` instead. This type of regression should work!

soga,Three categories running, ha ha | 2021-09-07T13:25:04 | 0.0 | [] | [] |
||
Ecogenomics/GTDBTk | Ecogenomics__GTDBTk-504 | e41c38dd771c81677f1a492a1a98734998724ccd | diff --git a/.github/workflows/master-push.yml b/.github/workflows/master-push.yml
index 43f538a0..ccfc2a3d 100644
--- a/.github/workflows/master-push.yml
+++ b/.github/workflows/master-push.yml
@@ -33,13 +33,6 @@ jobs:
python -m pip install --upgrade pip
python -m pip install -r requirements.txt
- - name: Extract config values
- working-directory: ${{ github.workspace }}/master/gtdbtk/config
- run: |
- grep AF_THRESHOLD config.py > config2.py
- grep PPLACER_MIN_RAM_BAC_FULL config.py >> config2.py
- mv config2.py config.py
-
- name: Build documentation
working-directory: ${{ github.workspace }}/master/docs
run: make html
diff --git a/README.md b/README.md
index b6346f72..4d49df3e 100644
--- a/README.md
+++ b/README.md
@@ -37,13 +37,9 @@ Documentation for GTDB-Tk can be found [here](https://ecogenomics.github.io/GTDB
## ✨ New Features
-GTDB-Tk v2.2.0+ includes the following new features:
-- GTDB-TK `classify` and `classify_wf` have changed in version 2.2.0+. There is now an ANI classification stage (`ANI screen`) that precedes classification by placement in a reference tree.
- - **This is now the default behavior for `classify` and `classify_wf`.**
- - In `classify`, user genomes are first compared against a Mash database comprised of all GTDB representative genomes and genome pairs of sufficient similarity processed by FastANI. User genomes classified to a GTDB representative based on FastANI results are not run through pplacer.
- - In the `classify_wf` workflow, genomes are classified using Mash and FastANI before executing the identify step. User genomes classified with FastANI are not run through the remainder of the pipeline (identify, align, classify).
- - `classify_wf` and `classify` have now **an extra mutually exclusive required argument**: You can either pick `--skip_ani_screen` (to skip the ani_screening step to classify genomes using mash and FastANI) or `--mash_db` path to save/read (if exists) the Mash reference sketch database.
- - To classify genomes without the additional `ani_screen` step, use the `--skip_ani_screen` flag.
+GTDB-Tk v2.3.0+ includes the following new features:
+- New functionality ``convert_to_species`` function to convert GTDB genome IDs to GTDB species names
+
## 📈 Performance
Using ANI screen "can" reduce computation by >50%, although it depends on the set of input genomes. A set of input genomes consisting primarily of new species will not benefit from ANI screen as much as a set of genomes that are largely assigned to GTDB species clusters. In the latter case, the ANI screen will reduce the number of genomes that need to be classified by pplacer which reduces computation time substantially (between 25% and 60% in our testing).
diff --git a/docs/src/announcements.rst b/docs/src/announcements.rst
index 8de8008b..2a6bfdf8 100644
--- a/docs/src/announcements.rst
+++ b/docs/src/announcements.rst
@@ -1,6 +1,25 @@
Announcements
=============
+GTDB-Tk 2.3.0 available
+-----------------------
+
+*May 09, 2023*
+
+* GTDB-Tk version ``2.3.0`` is now available.
+* This version of GTDB-Tk **does not** require a new version of the GTDB-Tk reference package.
+
+
+GTDB R214 available
+-------------------
+
+*April 31, 2021*
+
+* GTDB Release 214 is now available and will be used from version ``2.3.0`` and up.
+* This version of GTDB-Tk is compatible with both release207 and release214 of the GTDB-Tk reference package.
+ `gtdbtk_r214_data.tar.gz <https://data.gtdb.ecogenomic.org/releases/release214/214.0/auxillary_files/>`_.
+
+
GTDB-Tk 2.2.0 available
-----------------------
diff --git a/docs/src/changelog.rst b/docs/src/changelog.rst
index 07c5db60..a059b5b3 100644
--- a/docs/src/changelog.rst
+++ b/docs/src/changelog.rst
@@ -2,6 +2,21 @@
Change log
==========
+2.3.0
+-----
+
+Bug Fixes:
+
+* (`#508 <https://github.com/Ecogenomics/GTDBTk/issues/508>`_) (`#509 <https://github.com/Ecogenomics/GTDBTk/issues/509>`_) If **ALL** genomes for a specific domain are either filtered out or classified with ANI they are now reported in the summary file.
+
+Minor changes:
+
+* (`#491 <https://github.com/Ecogenomics/GTDBTk/issues/491>`_) (`#498 <https://github.com/Ecogenomics/GTDBTk/issues/498>`_) Allow GTDB-Tk to show ``--help`` and ``-v`` without ``GTDBTK_DATA_PATH`` being set.
+ * WARNING: This is a breaking change if you are importing GTDB-Tk as a library and importing values from ``gtdbtk.config.config``, instead you need to import as ``from gtdbtk.config.common import CONFIG`` then access values via ``CONFIG.<var>``
+* (`#508 <https://github.com/Ecogenomics/GTDBTk/issues/508>`_) Mash distance is changed from 0.1 to 0.15 . This is will increase the number of FastANI comparisons but will cover cases wheere genomes have a larger Mash distance but a small ANI.
+* (`#497 <https://github.com/Ecogenomics/GTDBTk/issues/497>`_) Add a ``convert_to_species`` function is GTDB-Tk to replace GCA/GCF ids with their GTDB species name
+* Add ``--db_version`` flag to ``check_install`` to check the version of previous GTDB-Tk packages.
+
2.2.6
-----
diff --git a/docs/src/installing/index.rst b/docs/src/installing/index.rst
index c9d15f14..78f0079b 100644
--- a/docs/src/installing/index.rst
+++ b/docs/src/installing/index.rst
@@ -33,12 +33,12 @@ Hardware requirements
- Storage
- Time
* - Archaea
- - ~34 GB
- - ~65 GB
+ - ~45 GB
+ - ~85 GB
- ~1 hour / 1,000 genomes @ 64 CPUs
* - Bacteria
- - ~55GB (320 GB when using --full_tree)
- - ~65 GB
+ - ~65GB (410 GB when using --full_tree)
+ - ~85 GB
- ~1 hour / 1,000 genomes @ 64 CPUs
.. note::
@@ -117,13 +117,13 @@ Please cite these tools if you use GTDB-Tk in your work.
GTDB-Tk reference data
----------------------
-GTDB-Tk requires ~66G of external data that needs to be downloaded and unarchived:
+GTDB-Tk requires ~84G of external data that needs to be downloaded and unarchived:
.. code-block:: bash
- wget https://data.gtdb.ecogenomic.org/releases/latest/auxillary_files/gtdbtk_v2_data.tar.gz
- wget https://data.ace.uq.edu.au/public/gtdb/data/releases/latest/auxillary_files/gtdbtk_v2_data.tar.gz (or, mirror)
- tar xvzf gtdbtk_v2_data.tar.gz
+ wget https://data.gtdb.ecogenomic.org/releases/latest/auxillary_files/gtdbtk_data.tar.gz
+ wget https://data.ace.uq.edu.au/public/gtdb/data/releases/latest/auxillary_files/gtdbtk_data.tar.gz (or, mirror)
+ tar xvzf gtdbtk_data.tar.gz
.. note:: Note that different versions of the GTDB release data may not run on all versions of GTDB-Tk, check the supported versions!
@@ -137,6 +137,10 @@ GTDB-Tk requires ~66G of external data that needs to be downloaded and unarchive
- Minimum version
- Maximum version
- MD5
+ * - `R214 <https://data.gtdb.ecogenomic.org/releases/release214/214.0/auxillary_files/gtdbtk_r214_data.tar.gz>`_
+ - 2.1.0
+ - Current
+ - 630745840850c532546996b22da14c27
* - `R207_v2 <https://data.gtdb.ecogenomic.org/releases/release207/207.0/auxillary_files/gtdbtk_r207_v2_data.tar.gz>`_
- 2.1.0
- Current
diff --git a/gtdbtk/__init__.py b/gtdbtk/__init__.py
index a8a6d5ec..027a3026 100644
--- a/gtdbtk/__init__.py
+++ b/gtdbtk/__init__.py
@@ -29,4 +29,4 @@
__status__ = 'Production'
__title__ = 'GTDB-Tk'
__url__ = 'https://github.com/Ecogenomics/GTDBTk'
-__version__ = '2.2.6'
+__version__ = '2.3.0'
diff --git a/gtdbtk/__main__.py b/gtdbtk/__main__.py
index 8f531885..0afea986 100644
--- a/gtdbtk/__main__.py
+++ b/gtdbtk/__main__.py
@@ -49,12 +49,13 @@ def print_help():
decorate -> Decorate tree with GTDB taxonomy
Tools:
- infer_ranks -> Establish taxonomic ranks of internal nodes using RED
- ani_rep -> Calculates ANI to GTDB representative genomes
- trim_msa -> Trim an untrimmed MSA file based on a mask
- export_msa -> Export the untrimmed archaeal or bacterial MSA file
- remove_labels -> Remove labels (bootstrap values, node labels) from an Newick tree
- convert_to_itol -> Convert a GTDB-Tk Newick tree to an iTOL tree
+ infer_ranks -> Establish taxonomic ranks of internal nodes using RED
+ ani_rep -> Calculates ANI to GTDB representative genomes
+ trim_msa -> Trim an untrimmed MSA file based on a mask
+ export_msa -> Export the untrimmed archaeal or bacterial MSA file
+ remove_labels -> Remove labels (bootstrap values, node labels) from an Newick tree
+ convert_to_itol -> Convert a GTDB-Tk Newick tree to an iTOL tree
+ convert_to_species -> Convert GTDB genome IDs to GTDB species names
Testing:
diff --git a/gtdbtk/ani_rep.py b/gtdbtk/ani_rep.py
index d947ca0e..9d807075 100644
--- a/gtdbtk/ani_rep.py
+++ b/gtdbtk/ani_rep.py
@@ -6,8 +6,7 @@
from gtdbtk.biolib_lite.common import canonical_gid
from gtdbtk.biolib_lite.execute import check_dependencies
from gtdbtk.biolib_lite.taxonomy import Taxonomy
-from gtdbtk.config.config import (TAXONOMY_FILE,
- AF_THRESHOLD)
+from gtdbtk.config.common import CONFIG
from gtdbtk.config.output import DIR_ANI_REP_INT_MASH
from gtdbtk.exceptions import GTDBTkExit
from gtdbtk.external.fastani import FastANI
@@ -76,7 +75,7 @@ def run(self, genomes, no_mash, mash_d, out_dir, prefix, mash_k, mash_v, mash_s,
prefix, mash_k, mash_v,
mash_s, max_mash_dist, mash_db=mash_db)
- taxonomy = Taxonomy().read(TAXONOMY_FILE, canonical_ids=True)
+ taxonomy = Taxonomy().read(CONFIG.TAXONOMY_FILE, canonical_ids=True)
ani_summary_file = ANISummaryFile(out_dir, prefix, fastani_results, taxonomy)
ani_summary_file.write()
ANIClosestFile(out_dir,
@@ -269,7 +268,7 @@ def _write(self):
fh.write(f'{gid}\t{ref_gid}')
fh.write(f'\t{closest_ani}\t{closest_af}')
fh.write(f'\t{taxonomy_str}')
- fh.write(f'\t{closest_ani >= gtdb_ani_radius and closest_af >= AF_THRESHOLD}\n')
+ fh.write(f'\t{closest_ani >= gtdb_ani_radius and closest_af >= CONFIG.AF_THRESHOLD}\n')
else:
fh.write(f'{gid}\tno result\tno result\tno result\tno result\tno result\n')
else:
diff --git a/gtdbtk/ani_screen.py b/gtdbtk/ani_screen.py
index a2240b4f..4b461a5d 100644
--- a/gtdbtk/ani_screen.py
+++ b/gtdbtk/ani_screen.py
@@ -8,9 +8,8 @@
from gtdbtk.classify import Classify
from gtdbtk.config.output import DIR_ANISCREEN
-from gtdbtk.config.config import (TAXONOMY_FILE, MASH_SKETCH_FILE, AF_THRESHOLD)
from gtdbtk.files.gtdb_radii import GTDBRadiiFile
-
+from gtdbtk.config.common import CONFIG
class ANIScreener(object):
"""Computes a list of genomes to a list of representatives."""
@@ -39,7 +38,7 @@ def run_aniscreen(self,genomes, no_mash,out_dir,prefix, mash_k, mash_v, mash_s,
if mash_db.endswith('/'):
make_sure_path_exists(mash_db)
if os.path.isdir(mash_db):
- mash_db = os.path.join(mash_db, MASH_SKETCH_FILE)
+ mash_db = os.path.join(mash_db, CONFIG.MASH_SKETCH_FILE)
#we set mash_d == mash_max_dist to avoid user to run mash with impossible values
mash_d = mash_max_dist
@@ -49,7 +48,7 @@ def run_aniscreen(self,genomes, no_mash,out_dir,prefix, mash_k, mash_v, mash_s,
fastani_results = ani_rep.run_mash_fastani(genomes, no_mash, mash_d, os.path.join(out_dir, DIR_ANISCREEN),
prefix, mash_k, mash_v, mash_s, mash_max_dist, mash_db)
- taxonomy = Taxonomy().read(TAXONOMY_FILE, canonical_ids=True)
+ taxonomy = Taxonomy().read(CONFIG.TAXONOMY_FILE, canonical_ids=True)
mash_classified_user_genomes = self.sort_fastani_ani_screen(
fastani_results,taxonomy)
@@ -88,7 +87,7 @@ def sort_fastani_ani_screen(self,fastani_results,taxonomy,bac_ar_diff=None):
# sort the dictionary by ani then af
for gid in fastani_results.keys():
thresh_results = [(ref_gid, hit) for (ref_gid, hit) in fastani_results[gid].items() if
- hit['af'] >= AF_THRESHOLD and hit['ani'] >= self.gtdb_radii.get_rep_ani(
+ hit['af'] >= CONFIG.AF_THRESHOLD and hit['ani'] >= self.gtdb_radii.get_rep_ani(
canonical_gid(ref_gid))]
all_results = [(ref_gid, hit) for (ref_gid, hit) in fastani_results[gid].items()]
closest = sorted(thresh_results, key=lambda x: (-x[1]['ani'], -x[1]['af']))
diff --git a/gtdbtk/biolib_lite/logger.py b/gtdbtk/biolib_lite/logger.py
index e15ef146..593c4650 100644
--- a/gtdbtk/biolib_lite/logger.py
+++ b/gtdbtk/biolib_lite/logger.py
@@ -23,7 +23,7 @@
from tqdm import tqdm
-from gtdbtk.config.config import LOG_TASK
+from gtdbtk.config.common import CONFIG
from .common import make_sure_path_exists
@@ -128,7 +128,7 @@ class SpecialFormatter(logging.Formatter):
datefmt="%Y-%m-%d %H:%M:%S")
def format(self, record):
- if record.levelno == LOG_TASK:
+ if record.levelno == CONFIG.LOG_TASK:
return self.task_fmt.format(record)
if record.levelno >= logging.ERROR:
return self.err_fmt.format(record)
@@ -162,7 +162,7 @@ class ColourlessFormatter(SpecialFormatter):
def format(self, record):
record.msg = self.ansi_escape.sub('', record.msg)
- if record.levelno == LOG_TASK:
+ if record.levelno == CONFIG.LOG_TASK:
return self.task_fmt.format(record)
if record.levelno >= logging.ERROR:
return self.err_fmt.format(record)
diff --git a/gtdbtk/classify.py b/gtdbtk/classify.py
index ad6a915e..4301912e 100644
--- a/gtdbtk/classify.py
+++ b/gtdbtk/classify.py
@@ -28,7 +28,7 @@
from numpy import median as np_median
-import gtdbtk.config.config as Config
+from gtdbtk.config.common import CONFIG
from gtdbtk.ani_rep import ANIRep, ANISummaryFile
from gtdbtk.biolib_lite.common import make_sure_path_exists,canonical_gid
from gtdbtk.biolib_lite.execute import check_dependencies
@@ -69,8 +69,8 @@ def __init__(self, cpus=1, pplacer_cpus=None, af_threshold=None,skip_pplacer=Fal
self.skip_pplacer = skip_pplacer
- self.taxonomy_file = Config.TAXONOMY_FILE
- self.af_threshold = af_threshold if af_threshold else Config.AF_THRESHOLD
+ self.taxonomy_file = CONFIG.TAXONOMY_FILE
+ self.af_threshold = af_threshold if af_threshold else CONFIG.AF_THRESHOLD
self.gtdb_taxonomy = Taxonomy().read(self.taxonomy_file)
self.order_rank = ["d__", "p__", "c__", "o__", 'f__', 'g__', 's__']
@@ -110,7 +110,7 @@ def __init__(self, cpus=1, pplacer_cpus=None, af_threshold=None,skip_pplacer=Fal
@staticmethod
def parse_radius_file():
results = {}
- with open(Config.RADII_FILE) as f:
+ with open(CONFIG.RADII_FILE) as f:
for line in f:
infos = line.strip().split('\t')
gid = infos[1]
@@ -159,16 +159,16 @@ def place_genomes(self,
mem_gb = get_memory_gb()
if mem_gb is not None:
mem_total = mem_gb['MemTotal']
- if marker_set_id == 'bac120' and levelopt is None and mem_total < Config.PPLACER_MIN_RAM_BAC_FULL:
- self.logger.warning(mem_warning.format(req_gb=Config.PPLACER_MIN_RAM_BAC_FULL,
+ if marker_set_id == 'bac120' and levelopt is None and mem_total < CONFIG.PPLACER_MIN_RAM_BAC_FULL:
+ self.logger.warning(mem_warning.format(req_gb=CONFIG.PPLACER_MIN_RAM_BAC_FULL,
domain='bacterial',
cur_gb=mem_total))
- if marker_set_id == 'bac120' and levelopt == 'high' and mem_total < Config.PPLACER_MIN_RAM_BAC_SPLIT:
- self.logger.warning(mem_warning.format(req_gb=Config.PPLACER_MIN_RAM_BAC_SPLIT,
+ if marker_set_id == 'bac120' and levelopt == 'high' and mem_total < CONFIG.PPLACER_MIN_RAM_BAC_SPLIT:
+ self.logger.warning(mem_warning.format(req_gb=CONFIG.PPLACER_MIN_RAM_BAC_SPLIT,
domain='bacterial',
cur_gb=mem_total))
- elif marker_set_id == 'ar53' and mem_total < Config.PPLACER_MIN_RAM_ARC:
- self.logger.warning(mem_warning.format(req_gb=Config.PPLACER_MIN_RAM_ARC,
+ elif marker_set_id == 'ar53' and mem_total < CONFIG.PPLACER_MIN_RAM_ARC:
+ self.logger.warning(mem_warning.format(req_gb=CONFIG.PPLACER_MIN_RAM_ARC,
domain='archaeal',
cur_gb=mem_total))
@@ -200,35 +200,35 @@ def place_genomes(self,
pplacer_ref_pkg = None
if marker_set_id == 'bac120':
if levelopt is None:
- self.logger.log(Config.LOG_TASK,
+ self.logger.log(CONFIG.LOG_TASK,
f'Placing {num_genomes:,} bacterial genomes '
f'into reference tree with pplacer using '
f'{self.pplacer_cpus} CPUs (be patient).')
- pplacer_ref_pkg = os.path.join(Config.PPLACER_DIR,
- Config.PPLACER_BAC120_REF_PKG)
+ pplacer_ref_pkg = os.path.join(CONFIG.PPLACER_DIR,
+ CONFIG.PPLACER_BAC120_REF_PKG)
elif levelopt == 'high':
- self.logger.log(Config.LOG_TASK,
+ self.logger.log(CONFIG.LOG_TASK,
f'Placing {num_genomes:,} bacterial genomes '
f'into backbone reference tree with pplacer using '
f'{self.pplacer_cpus} CPUs (be patient).')
- pplacer_ref_pkg = os.path.join(Config.BACKBONE_PPLACER_DIR,
- Config.BACKBONE_PPLACER_REF_PKG)
+ pplacer_ref_pkg = os.path.join(CONFIG.BACKBONE_PPLACER_DIR,
+ CONFIG.BACKBONE_PPLACER_REF_PKG)
elif levelopt == 'low':
- self.logger.log(Config.LOG_TASK,
+ self.logger.log(CONFIG.LOG_TASK,
f'Placing {num_genomes:,} bacterial genomes '
f'into class-level reference tree {tree_iter} ({idx_tree}/{number_low_trees}) with '
f'pplacer using {self.pplacer_cpus} CPUs '
f'(be patient).')
- pplacer_ref_pkg = os.path.join(Config.CLASS_LEVEL_PPLACER_DIR,
- Config.CLASS_LEVEL_PPLACER_REF_PKG.format(iter=tree_iter))
+ pplacer_ref_pkg = os.path.join(CONFIG.CLASS_LEVEL_PPLACER_DIR,
+ CONFIG.CLASS_LEVEL_PPLACER_REF_PKG.format(iter=tree_iter))
elif marker_set_id == 'ar53':
- self.logger.log(Config.LOG_TASK,
+ self.logger.log(CONFIG.LOG_TASK,
f'Placing {num_genomes:,} archaeal genomes into '
f'reference tree with pplacer using '
f'{self.pplacer_cpus} CPUs (be patient).')
- pplacer_ref_pkg = os.path.join(Config.PPLACER_DIR,
- Config.PPLACER_AR53_REF_PKG)
+ pplacer_ref_pkg = os.path.join(CONFIG.PPLACER_DIR,
+ CONFIG.PPLACER_AR53_REF_PKG)
else:
raise GenomeMarkerSetUnknown(f'Unknown marker set: {marker_set_id}')
@@ -318,7 +318,7 @@ def parser_marker_summary_file(self, marker_summary_fh):
for gid, marker_dict in marker_summary_fh.genomes.items():
multi_hits_percent = (100 * len(marker_dict['mul'])) / \
len(marker_summary_fh.marker_names)
- if multi_hits_percent >= Config.DEFAULT_MULTIHIT_THRESHOLD:
+ if multi_hits_percent >= CONFIG.DEFAULT_MULTIHIT_THRESHOLD:
results[gid] = round(multi_hits_percent, 1)
return results
@@ -333,10 +333,10 @@ def run(self,
skip_ani_screen=False,
genes=False,
no_mash=False,
- mash_k=Config.MASH_K_VALUE,
- mash_v=Config.MASH_V_VALUE,
- mash_s=Config.MASH_S_VALUE,
- mash_max_dist=Config.MASH_MAX_DISTANCE,
+ mash_k=CONFIG.MASH_K_VALUE,
+ mash_v=CONFIG.MASH_V_VALUE,
+ mash_s=CONFIG.MASH_S_VALUE,
+ mash_max_dist=CONFIG.MASH_MAX_DISTANCE,
mash_db=None,
ani_summary_files=None,
all_classified_ani=False):
@@ -360,7 +360,7 @@ def run(self,
if mash_db.endswith('/'):
make_sure_path_exists(mash_db)
if os.path.isdir(mash_db):
- mash_db = os.path.join(mash_db, Config.MASH_SKETCH_FILE)
+ mash_db = os.path.join(mash_db, CONFIG.MASH_SKETCH_FILE)
# we set mash_d == mash_max_dist to avoid user to run mash with impossible values
mash_d = mash_max_dist
@@ -455,23 +455,45 @@ def run(self,
if (not os.path.exists(user_msa_file)) or (os.path.getsize(user_msa_file) < 30):
# file will not exist if there are no User genomes from a given domain
#
+ if marker_set_id == 'ar53':
+ # we still add the filtered genomes to the summary file
+ # add filtered genomes to the summary file
+ warning_counter = self.add_filtered_genomes_to_summary(align_dir, warning_counter, summary_file,
+ marker_set_id, prefix)
+
# But if there is Unclassified genomes without domain,
# they still have to be written in the bac120 summary file:
- if marker_set_id == 'bac120':
+ elif marker_set_id == 'bac120':
# Add failed genomes from prodigal and genomes with no markers in the bac120 summary file
# This is an executive direction: failed prodigal and genomes with no markers are not bacterial or archaeal
# but they need to be included in one of the summary file
prodigal_failed_counter = self.add_failed_genomes_to_summary(align_dir, summary_file, prefix)
- if summary_file.has_row():
- summary_file.write()
- output_files.setdefault(marker_set_id, []).append(summary_file.path)
- # Symlink to the summary file from the root
+ # we also add the filtered genomes to the summary file
+ # add filtered genomes to the summary file
+ warning_counter = self.add_filtered_genomes_to_summary(align_dir, warning_counter, summary_file,
+ marker_set_id, prefix)
+
+ # we add all genomes classified with ANI
+ if mash_classified_user_genomes and marker_set_id in mash_classified_user_genomes:
+ list_summary_rows = mash_classified_user_genomes.get(marker_set_id)
+ for row in list_summary_rows:
+ summary_file.add_row(row)
+
+ if summary_file.has_row():
+ summary_file.write()
+ output_files.setdefault(marker_set_id, []).append(summary_file.path)
+ # Symlink to the summary file from the root
+ if marker_set_id == 'ar53':
+ symlink_f(PATH_AR53_SUMMARY_OUT.format(prefix=prefix),
+ os.path.join(out_dir, os.path.basename(PATH_AR53_SUMMARY_OUT.format(prefix=prefix))))
+ elif marker_set_id == 'bac120':
symlink_f(PATH_BAC120_SUMMARY_OUT.format(prefix=prefix),
os.path.join(out_dir, os.path.basename(PATH_BAC120_SUMMARY_OUT.format(prefix=prefix))))
- if prodigal_failed_counter > 0:
- self.logger.warning(f"{prodigal_failed_counter} of {len(genomes)} "
- f"genome{'' if prodigal_failed_counter == 1 else 's'} "
- f"ha{'s' if prodigal_failed_counter == 1 else 've'} been labeled as 'Unclassified'.")
+ if prodigal_failed_counter > 0:
+ self.logger.warning(f"{prodigal_failed_counter} of {len(genomes)} "
+ f"genome{'' if prodigal_failed_counter == 1 else 's'} "
+ f"ha{'s' if prodigal_failed_counter == 1 else 've'} been labeled as 'Unclassified'.")
+
continue
@@ -560,7 +582,7 @@ def run(self,
tree_mapping_dict = {}
tree_mapping_dict_reverse = {}
- with open(Config.CLASS_LEVEL_TREE_MAPPING_FILE) as ltmf:
+ with open(CONFIG.CLASS_LEVEL_TREE_MAPPING_FILE) as ltmf:
for line in ltmf:
k, v = line.strip().split()
tree_mapping_dict[k] = v
@@ -1163,7 +1185,7 @@ def _parse_tree(self, tree, genomes, msa_dict, percent_multihit_dict,genes, tran
# Persist descendant information for efficient traversal.
tt = TreeTraversal()
- self.logger.log(Config.LOG_TASK, 'Traversing tree to determine classification method.')
+ self.logger.log(CONFIG.LOG_TASK, 'Traversing tree to determine classification method.')
if genes:
fastani_verification = {}
else:
@@ -1179,7 +1201,7 @@ def _parse_tree(self, tree, genomes, msa_dict, percent_multihit_dict,genes, tran
fastani = FastANI(cpus=self.cpus, force_single=True)
d_ani_compare, d_paths = self._get_fastani_genome_path(
fastani_verification, genomes)
- self.logger.log(Config.LOG_TASK,
+ self.logger.log(CONFIG.LOG_TASK,
f'Calculating average nucleotide identity using '
f'FastANI (v{fastani.version}).')
all_fastani_dict = fastani.run(d_ani_compare, d_paths)
@@ -1247,13 +1269,13 @@ def _assign_mrca_red(self, input_tree, marker_set_id, levelopt=None, tree_iter=N
tt = TreeTraversal()
if levelopt is None:
- red_file = Config.MRCA_RED_BAC120
+ red_file = CONFIG.MRCA_RED_BAC120
elif levelopt == 'high':
- red_file = Config.BACKBONE_RED_FILE
+ red_file = CONFIG.BACKBONE_RED_FILE
elif levelopt == 'low':
- red_file = Config.CLASS_LEVEL_RED_FILE.format(iter=tree_iter)
+ red_file = CONFIG.CLASS_LEVEL_RED_FILE.format(iter=tree_iter)
if marker_set_id == 'ar53':
- red_file = Config.MRCA_RED_AR53
+ red_file = CONFIG.MRCA_RED_AR53
# create map from leave labels to tree nodes
leaf_node_map = {}
@@ -1367,16 +1389,17 @@ def add_filtered_genomes_to_summary(self, align_dir,warning_counter, summary_fil
else:
filtered_file = os.path.join(align_dir,PATH_AR53_FILTERED_GENOMES.format(prefix=prefix))
domain = 'Archaea'
-
- with open(filtered_file) as fin:
- for line in fin:
- infos = line.strip().split('\t')
- summary_row = ClassifySummaryFileRow()
- summary_row.gid = infos[0]
- summary_row.classification = f'Unclassified {domain}'
- summary_row.warnings = infos[1]
- summary_file.add_row(summary_row)
- warning_counter += 1
+ # if file exists:
+ if os.path.exists(filtered_file):
+ with open(filtered_file) as fin:
+ for line in fin:
+ infos = line.strip().split('\t')
+ summary_row = ClassifySummaryFileRow()
+ summary_row.gid = infos[0]
+ summary_row.classification = f'Unclassified {domain}'
+ summary_row.warnings = infos[1]
+ summary_file.add_row(summary_row)
+ warning_counter += 1
return warning_counter
def add_failed_genomes_to_summary(self, align_dir, summary_file, prefix):
@@ -1442,7 +1465,7 @@ def _sort_fastani_results_pre_pplacer(self,fastani_results,bac_ar_diff):
# sort the dictionary by ani then af
for gid in fastani_results.keys():
thresh_results = [(ref_gid, hit) for (ref_gid, hit) in fastani_results[gid].items() if
- hit['af'] >= Config.AF_THRESHOLD and hit['ani'] >= self.gtdb_radii.get_rep_ani(
+ hit['af'] >= CONFIG.AF_THRESHOLD and hit['ani'] >= self.gtdb_radii.get_rep_ani(
canonical_gid(ref_gid))]
closest = sorted(thresh_results, key=lambda x: (-x[1]['ani'], -x[1]['af']))
if len(closest) > 0:
@@ -1807,15 +1830,15 @@ def _calculate_red_distances(self, input_tree, out_dir):
preserve_underscores=True)
self.logger.info('Reading taxonomy from file.')
- taxonomy = Taxonomy().read(Config.TAXONOMY_FILE)
+ taxonomy = Taxonomy().read(CONFIG.TAXONOMY_FILE)
# determine taxa to be used for inferring distribution
trusted_taxa = None
taxa_for_dist_inference = self._filter_taxa_for_dist_inference(tree,
taxonomy,
trusted_taxa,
- Config.RED_MIN_CHILDREN,
- Config.RED_MIN_SUPPORT)
+ CONFIG.RED_MIN_CHILDREN,
+ CONFIG.RED_MIN_SUPPORT)
phylum_rel_dists, rel_node_dists = self.median_rd_over_phyla(tree,
taxa_for_dist_inference,
@@ -2153,14 +2176,14 @@ def _get_fastani_genome_path(self, fastani_verification, genomes):
# TODEL UBA genomes
if shortleaf.startswith("UBA"):
ref_path = os.path.join(
- Config.FASTANI_GENOMES,
+ CONFIG.FASTANI_GENOMES,
'UBA',
- shortleaf + Config.FASTANI_GENOMES_EXT)
+ shortleaf + CONFIG.FASTANI_GENOMES_EXT)
else:
ref_path = os.path.join(
- Config.FASTANI_GENOMES,
+ CONFIG.FASTANI_GENOMES,
self.parse_leaf_to_dir_path(shortleaf),
- shortleaf + Config.FASTANI_GENOMES_EXT)
+ shortleaf + CONFIG.FASTANI_GENOMES_EXT)
if not os.path.isfile(ref_path):
raise GTDBTkExit(f'Reference genome missing from FastANI database: {ref_path}')
diff --git a/gtdbtk/cli.py b/gtdbtk/cli.py
index 0b9e94f1..739e1160 100644
--- a/gtdbtk/cli.py
+++ b/gtdbtk/cli.py
@@ -4,8 +4,7 @@
from gtdbtk.biolib_lite.custom_help_formatter import ChangeTempAction
from gtdbtk.biolib_lite.custom_help_formatter import CustomHelpFormatter
-from gtdbtk.config.config import AF_THRESHOLD, PPLACER_MIN_RAM_BAC_FULL, MASH_K_VALUE, MASH_S_VALUE, MASH_D_VALUE, \
- MASH_V_VALUE, MASH_MAX_DISTANCE
+from gtdbtk.config.common import CONFIG
@contextmanager
@@ -63,7 +62,7 @@ def __bacteria(group):
def __outgroup_taxon(group, required):
group.add_argument('--outgroup_taxon', type=str, default=None, required=required,
help="taxon to use as outgroup (e.g., "
- "``p__Patescibacteria`` or ``p__Altarchaeota``)")
+ "``p__Patescibacteria`` or ``p__Altiarchaeota``)")
def __out_dir(group, required):
@@ -205,7 +204,7 @@ def __scratch_dir(group):
def __full_tree(group):
group.add_argument('-f', '--full_tree', default=False, action='store_true',
help='use the unsplit bacterial tree for the classify step; this is the original GTDB-Tk '
- f'approach (version < 2) and requires more than {PPLACER_MIN_RAM_BAC_FULL} GB of RAM to load the reference tree')
+ f'approach (version < 2) and requires more than {CONFIG.PPLACER_MIN_RAM_BAC_FULL} GB of RAM to load the reference tree')
def __identify_dir(group, required):
@@ -255,26 +254,26 @@ def __no_mash(group):
def __mash_k(group):
- group.add_argument('--mash_k', default=MASH_K_VALUE, type=int,
+ group.add_argument('--mash_k', default=CONFIG.MASH_K_VALUE, type=int,
help='k-mer size [1-32]')
def __mash_s(group):
- group.add_argument('--mash_s', default=MASH_S_VALUE, type=int,
+ group.add_argument('--mash_s', default=CONFIG.MASH_S_VALUE, type=int,
help='maximum number of non-redundant hashes')
def __mash_d(group):
- group.add_argument('--mash_d', default=MASH_D_VALUE, type=float,
+ group.add_argument('--mash_d', default=CONFIG.MASH_D_VALUE, type=float,
help='maximum distance to keep [0-1]')
def __mash_v(group):
- group.add_argument('--mash_v', default=MASH_V_VALUE, type=float,
+ group.add_argument('--mash_v', default=CONFIG.MASH_V_VALUE, type=float,
help='maximum p-value to keep [0-1]')
def __mash_max_distance(group):
- group.add_argument('--mash_max_distance', default=MASH_MAX_DISTANCE, type=float,
+ group.add_argument('--mash_max_distance', default=CONFIG.MASH_MAX_DISTANCE, type=float,
help='Maximum Mash distance to select a potential GTDB genome as representative '
'of a user genome.')
@@ -284,7 +283,7 @@ def __mash_db(group):
def __min_af(group):
- group.add_argument('--min_af', type=float, default=AF_THRESHOLD,
+ group.add_argument('--min_af', type=float, default=CONFIG.AF_THRESHOLD,
help='minimum alignment fraction to assign genome to a species cluster')
@@ -318,6 +317,14 @@ def __domain(group, required):
group.add_argument('--domain', required=required, choices=['arc', 'bac'],
help="domain to export")
+def __all_ranks(group):
+ group.add_argument('--all_ranks', default=False, action='store_true',
+ help='add all missing ranks to the leaf nodes if they are present in the reference tree.')
+
+def __db_version(group):
+ group.add_argument('--db_version', type = int, default = None,
+ help="GTDB-Tk version package to test for compatibility.")
+
def __write_single_copy_genes(group):
group.add_argument('--write_single_copy_genes', default=False, action='store_true',
@@ -586,6 +593,17 @@ def get_main_parser():
__debug(grp)
__help(grp)
+ # Convert genome ids to species names.
+ with subparser(sub_parsers, 'convert_to_species', 'Replace GTDB genomes ids with GTDB Species name.') as parser:
+ with arg_group(parser, 'required named arguments') as grp:
+ __input_tree(grp, required=True)
+ __output_tree(grp, required=True)
+ with arg_group(parser, 'optional arguments') as grp:
+ __custom_taxonomy_file(grp)
+ __all_ranks(grp)
+ __debug(grp)
+ __help(grp)
+
# Export MSA.
with subparser(sub_parsers, 'export_msa', 'Export the untrimmed archaeal or bacterial MSA file.') as parser:
with arg_group(parser, 'required named arguments') as grp:
@@ -599,6 +617,7 @@ def get_main_parser():
with subparser(sub_parsers, 'check_install', 'Verify third party programs and '
'GTDB reference package.') as parser:
with arg_group(parser, 'optional arguments') as grp:
+ __db_version(grp)
__debug(grp)
__help(grp)
diff --git a/gtdbtk/config/common.py b/gtdbtk/config/common.py
new file mode 100644
index 00000000..8a18e90f
--- /dev/null
+++ b/gtdbtk/config/common.py
@@ -0,0 +1,370 @@
+import json
+import os
+import sys
+from functools import lru_cache
+
+
+class __GTDBTkCommonConfig:
+ """
+ This class encapsulates all configuration options. It will protect against
+ importing code that depends on a specific value throwing an exception
+ that requires the setting of the GTDB-Tk reference data path.
+ """
+
+ MIN_REF_DATA_VERSION = 'r207'
+ COMPATIBLE_REF_DATA_VERSIONS = ['r207','r214']
+
+ BACKBONE_PPLACER_REF_PKG = 'gtdbtk_package_backbone.refpkg'
+ CLASS_LEVEL_PPLACER_REF_PKG = 'gtdbtk.package.{iter}.refpkg'
+
+ # Relative Evolution Distance
+ RED_INTERVAL = 0.1
+ RED_MIN_SUPPORT = 0.0
+ RED_MIN_CHILDREN = 2
+
+ # Marker information
+ BAC120_MARKERS = {"PFAM": ["PF00380.20.hmm", "PF00410.20.hmm", "PF00466.21.hmm",
+ "PF01025.20.hmm", "PF02576.18.hmm", "PF03726.15.hmm"],
+ "TIGRFAM": ["TIGR00006.HMM", "TIGR00019.HMM", "TIGR00020.HMM",
+ "TIGR00029.HMM", "TIGR00043.HMM", "TIGR00054.HMM",
+ "TIGR00059.HMM", "TIGR00061.HMM", "TIGR00064.HMM",
+ "TIGR00065.HMM", "TIGR00082.HMM", "TIGR00083.HMM",
+ "TIGR00084.HMM", "TIGR00086.HMM", "TIGR00088.HMM",
+ "TIGR00090.HMM", "TIGR00092.HMM", "TIGR00095.HMM",
+ "TIGR00115.HMM", "TIGR00116.HMM", "TIGR00138.HMM",
+ "TIGR00158.HMM", "TIGR00166.HMM", "TIGR00168.HMM",
+ "TIGR00186.HMM", "TIGR00194.HMM", "TIGR00250.HMM",
+ "TIGR00337.HMM", "TIGR00344.HMM", "TIGR00362.HMM",
+ "TIGR00382.HMM", "TIGR00392.HMM", "TIGR00396.HMM",
+ "TIGR00398.HMM", "TIGR00414.HMM", "TIGR00416.HMM",
+ "TIGR00420.HMM", "TIGR00431.HMM", "TIGR00435.HMM",
+ "TIGR00436.HMM", "TIGR00442.HMM", "TIGR00445.HMM",
+ "TIGR00456.HMM", "TIGR00459.HMM", "TIGR00460.HMM",
+ "TIGR00468.HMM", "TIGR00472.HMM", "TIGR00487.HMM",
+ "TIGR00496.HMM", "TIGR00539.HMM", "TIGR00580.HMM",
+ "TIGR00593.HMM", "TIGR00615.HMM", "TIGR00631.HMM",
+ "TIGR00634.HMM", "TIGR00635.HMM", "TIGR00643.HMM",
+ "TIGR00663.HMM", "TIGR00717.HMM", "TIGR00755.HMM",
+ "TIGR00810.HMM", "TIGR00922.HMM", "TIGR00928.HMM",
+ "TIGR00959.HMM", "TIGR00963.HMM", "TIGR00964.HMM",
+ "TIGR00967.HMM", "TIGR01009.HMM", "TIGR01011.HMM",
+ "TIGR01017.HMM", "TIGR01021.HMM", "TIGR01029.HMM",
+ "TIGR01032.HMM", "TIGR01039.HMM", "TIGR01044.HMM",
+ "TIGR01059.HMM", "TIGR01063.HMM", "TIGR01066.HMM",
+ "TIGR01071.HMM", "TIGR01079.HMM", "TIGR01082.HMM",
+ "TIGR01087.HMM", "TIGR01128.HMM", "TIGR01146.HMM",
+ "TIGR01164.HMM", "TIGR01169.HMM", "TIGR01171.HMM",
+ "TIGR01302.HMM", "TIGR01391.HMM", "TIGR01393.HMM",
+ "TIGR01394.HMM", "TIGR01510.HMM", "TIGR01632.HMM",
+ "TIGR01951.HMM", "TIGR01953.HMM", "TIGR02012.HMM",
+ "TIGR02013.HMM", "TIGR02027.HMM", "TIGR02075.HMM",
+ "TIGR02191.HMM", "TIGR02273.HMM", "TIGR02350.HMM",
+ "TIGR02386.HMM", "TIGR02397.HMM", "TIGR02432.HMM",
+ "TIGR02729.HMM", "TIGR03263.HMM", "TIGR03594.HMM",
+ "TIGR03625.HMM", "TIGR03632.HMM", "TIGR03654.HMM",
+ "TIGR03723.HMM", "TIGR03725.HMM", "TIGR03953.HMM"]}
+
+ # New Version of AR53_MARKERS
+ AR53_MARKERS = {"PFAM": ["PF04919.13.hmm", "PF07541.13.hmm", "PF01000.27.hmm",
+ "PF00687.22.hmm", "PF00466.21.hmm", "PF00827.18.hmm", "PF01280.21.hmm", "PF01090.20.hmm",
+ "PF01200.19.hmm", "PF01015.19.hmm", "PF00900.21.hmm", "PF00410.20.hmm"],
+ "TIGRFAM": ["TIGR00037.HMM", "TIGR00064.HMM", "TIGR00111.HMM",
+ "TIGR00134.HMM", "TIGR00279.HMM", "TIGR00291.HMM", "TIGR00323.HMM",
+ "TIGR00335.HMM", "TIGR00373.HMM", "TIGR00405.HMM", "TIGR00448.HMM",
+ "TIGR00483.HMM", "TIGR00491.HMM", "TIGR00522.HMM", "TIGR00967.HMM",
+ "TIGR00982.HMM", "TIGR01008.HMM", "TIGR01012.HMM", "TIGR01018.HMM",
+ "TIGR01020.HMM", "TIGR01028.HMM", "TIGR01046.HMM", "TIGR01052.HMM",
+ "TIGR01171.HMM", "TIGR01213.HMM", "TIGR01952.HMM", "TIGR02236.HMM",
+ "TIGR02338.HMM", "TIGR02389.HMM", "TIGR02390.HMM", "TIGR03626.HMM",
+ "TIGR03627.HMM", "TIGR03628.HMM", "TIGR03629.HMM", "TIGR03670.HMM",
+ "TIGR03671.HMM", "TIGR03672.HMM", "TIGR03673.HMM", "TIGR03674.HMM",
+ "TIGR03676.HMM", "TIGR03680.HMM"]}
+
+ # Information for Multiple hits markers:
+ DEFAULT_MULTIHIT_THRESHOLD = 10.0
+
+ # Information for aligning genomes
+ DEFAULT_DOMAIN_THRESHOLD = 10.0
+ AR_MARKER_COUNT = 53
+ BAC_MARKER_COUNT = 120
+
+ # Information about alignment Fraction to resolve fastANI results
+ AF_THRESHOLD = 0.5
+
+ PPLACER_MIN_RAM_BAC_FULL = 320
+ PPLACER_MIN_RAM_BAC_SPLIT = 55
+ PPLACER_MIN_RAM_ARC = 40
+
+ FASTANI_SPECIES_THRESHOLD = 95.0
+ FASTANI_GENOMES_EXT = "_genomic.fna.gz"
+
+ # Mash configuration
+ MASH_SKETCH_FILE = 'gtdb_ref_sketch.msh'
+ MASH_K_VALUE = 16
+ MASH_S_VALUE = 5000
+ MASH_MAX_DISTANCE = 0.15
+ MASH_D_VALUE = MASH_MAX_DISTANCE
+ MASH_V_VALUE = 1.0
+
+ # Config values for checking GTDB-Tk on startup.
+ GTDBTK_VER_CHECK = True
+ GTDBTK_VER_TIMEOUT = 3 # seconds
+
+ # Internal settings used for logging.
+ LOG_TASK = 21
+
+ # To avoid multiple hits of parsing files
+ _generic_path = None
+ _red_dist_bac_dict = None
+ _red_dist_arc_dict = None
+ _version_data = None
+
+ @property
+ def GENERIC_PATH(self):
+ if self._generic_path is None:
+ try:
+ # expandvars is required to transform things like $HOME
+ out = os.path.expandvars(os.environ['GTDBTK_DATA_PATH'])
+ self._generic_path = out
+ except KeyError:
+ print('\n' + '=' * 80)
+ print(' ERROR '.center(80))
+ print('_' * 80 + '\n')
+ print("The 'GTDBTK_DATA_PATH' environment variable is not defined.".center(80) + '\n')
+ print('Please set this variable to your reference data package.'.center(80))
+ print('https://ecogenomics.github.io/GTDBTk/installing/index.html'.center(80))
+ print('=' * 80)
+ sys.exit(1)
+ return self._generic_path
+
+ @property
+ def MSA_FOLDER(self):
+ return os.path.join(self.GENERIC_PATH, 'msa/')
+
+ @property
+ def MASK_DIR(self):
+ return os.path.join(self.GENERIC_PATH, 'masks/')
+
+ @property
+ def PPLACER_DIR(self):
+ return os.path.join(self.GENERIC_PATH, 'pplacer/')
+
+ @property
+ def FASTANI_DIR(self):
+ return os.path.join(self.GENERIC_PATH, 'fastani/')
+
+ @property
+ def MASH_DIR(self):
+ return os.path.join(self.GENERIC_PATH, 'mash/')
+
+ @property
+ def TAX_FOLDER(self):
+ return os.path.join(self.GENERIC_PATH, 'taxonomy/')
+
+ @property
+ def RADII_DIR(self):
+ return os.path.join(self.GENERIC_PATH, 'radii/')
+
+ @property
+ def METADATA_DIR(self):
+ return os.path.join(self.GENERIC_PATH, 'metadata/')
+
+ @property
+ def RED_DIR(self):
+ return os.path.join(self.GENERIC_PATH, "mrca_red/")
+
+ @property
+ def MARKER_DIR(self):
+ return os.path.join(self.GENERIC_PATH, 'markers/')
+
+ @property
+ def TIGRFAM_HMMS(self):
+ return os.path.join(self.MARKER_DIR, 'tigrfam/tigrfam.hmm')
+
+ @property
+ def PFAM_HMM_DIR(self):
+ return os.path.join(self.MARKER_DIR, 'pfam/')
+
+ @property
+ def SPLIT_DIR(self):
+ return os.path.join(self.GENERIC_PATH, 'split')
+
+ @property
+ def BACKBONE_SPLIT_DIR(self):
+ return os.path.join(self.SPLIT_DIR, 'backbone')
+
+ @property
+ def CLASS_LEVEL_SPLIT_DIR(self):
+ return os.path.join(self.SPLIT_DIR, 'class_level')
+
+ @property
+ def BACKBONE_PPLACER_DIR(self):
+ return os.path.join(self.BACKBONE_SPLIT_DIR, 'pplacer')
+
+ @property
+ def CLASS_LEVEL_PPLACER_DIR(self):
+ return os.path.join(self.CLASS_LEVEL_SPLIT_DIR, 'pplacer')
+
+ @property
+ def BACKBONE_RED_DIR(self):
+ return os.path.join(self.BACKBONE_SPLIT_DIR, 'red')
+
+ @property
+ def CLASS_LEVEL_RED_DIR(self):
+ return os.path.join(self.CLASS_LEVEL_SPLIT_DIR, 'red')
+
+ @property
+ def CLASS_LEVEL_TREE_MAPPING_FILE(self):
+ return os.path.join(self.CLASS_LEVEL_SPLIT_DIR, 'tree_mapping.tsv')
+
+ @property
+ def BACKBONE_RED_FILE(self):
+ return os.path.join(self.BACKBONE_RED_DIR, 'backbone_red_value.tsv')
+
+ @property
+ def CLASS_LEVEL_RED_FILE(self):
+ return os.path.join(self.CLASS_LEVEL_RED_DIR, 'red_value_{iter}.tsv')
+
+ def _read_metadata_file(self):
+ if self._red_dist_bac_dict is None or self._red_dist_arc_dict is None or self._version_data is None:
+ try:
+ with open(os.path.join(self.METADATA_DIR, "metadata.txt")) as metadataData:
+ for line in metadataData:
+ try:
+ line_infos = line.strip().split('=')
+ if line_infos[0] == 'RED_DIST_BAC_DICT':
+ self._red_dist_bac_dict = json.loads(line_infos[1])
+ elif line_infos[0] == 'RED_DIST_ARC_DICT':
+ self._red_dist_arc_dict = json.loads(line_infos[1])
+ elif line_infos[0] == 'VERSION_DATA':
+ self._version_data = line_infos[1]
+ except ValueError:
+ print(f"Skipping invalid line {repr(line)}")
+ except IOError:
+ print('\n' + '=' * 80)
+ print(' ERROR '.center(80))
+ print('_' * 80 + '\n')
+ print('The GTDB-Tk reference data does not exist or is corrupted.'.center(80))
+ print(('GTDBTK_DATA_PATH=%s' % self.GENERIC_PATH).center(80) + '\n')
+ print('Please compare the checksum to those provided in the download repository.'.center(80))
+ print('https://github.com/Ecogenomics/GTDBTk#gtdb-tk-reference-data'.center(80))
+ print('=' * 80)
+ sys.exit(1)
+ return self._red_dist_bac_dict, self._red_dist_arc_dict, self._version_data
+
+ @property
+ def RED_DIST_BAC_DICT(self):
+ return self._read_metadata_file()[0]
+
+ @property
+ def RED_DIST_ARC_DICT(self):
+ return self._read_metadata_file()[1]
+
+ @property
+ def VERSION_DATA(self):
+ return self._read_metadata_file()[2]
+
+ """
+ MSA file names
+ """
+
+ @property
+ def CONCAT_BAC120(self):
+ return os.path.join(self.MSA_FOLDER, f"gtdb_{self.VERSION_DATA}_bac120.faa")
+
+ @property
+ def CONCAT_AR53(self):
+ return os.path.join(self.MSA_FOLDER, f"gtdb_{self.VERSION_DATA}_ar53.faa")
+
+ @property
+ def TAXONOMY_FILE(self):
+ return os.path.join(self.TAX_FOLDER, "gtdb_taxonomy.tsv")
+
+ @property
+ def RADII_FILE(self):
+ return os.path.join(self.RADII_DIR, "gtdb_radii.tsv")
+
+ """
+ Mask file names
+ """
+
+ @property
+ def MASK_BAC120(self):
+ return f"gtdb_{self.VERSION_DATA}_bac120.mask"
+
+ @property
+ def MASK_AR53(self):
+ return f"gtdb_{self.VERSION_DATA}_ar53.mask"
+
+ @property
+ def MASK_RPS23(self):
+ return f"gtdb_{self.VERSION_DATA}_rps23.mask"
+
+ @property
+ def PPLACER_BAC120_REF_PKG(self):
+ return f"gtdb_{self.VERSION_DATA}_bac120.refpkg"
+
+ @property
+ def PPLACER_AR53_REF_PKG(self):
+ return f"gtdb_{self.VERSION_DATA}_ar53.refpkg"
+
+ @property
+ def PPLACER_RPS23_REF_PKG(self):
+ return f"gtdb_{self.VERSION_DATA}_rps23.refpkg"
+
+ @property
+ def FASTANI_GENOMES(self):
+ return os.path.join(self.FASTANI_DIR, "database/")
+
+ @property
+ def FASTANI_GENOME_LIST(self):
+ return os.path.join(self.FASTANI_DIR, "genome_paths.tsv")
+
+ @property
+ def MRCA_RED_BAC120(self):
+ return os.path.join(self.RED_DIR, f"gtdbtk_{self.VERSION_DATA}_bac120.tsv")
+
+ @property
+ def MRCA_RED_AR53(self):
+ return os.path.join(self.RED_DIR, f"gtdbtk_{self.VERSION_DATA}_ar53.tsv")
+
+ def get_REF_HASHES(self,version=None):
+ compatible_versions = [int(x.replace('r','')) for x in CONFIG.COMPATIBLE_REF_DATA_VERSIONS]
+ if version is not None and version not in compatible_versions:
+ raise ValueError(f"Version {version} is not compatible with this version of GTDB-Tk. Compatible versions are {compatible_versions}")
+
+ if version is None or version==214:
+ return {
+ self.PPLACER_DIR: '6786e9fc16b31db7d6eaaa9f8cfa87a8a4974434',
+ self.MASK_DIR: '8d5a2139feabbb70789c62155f3761d2aeed1601',
+ self.MARKER_DIR: '163f542c3f0a40f59df45d453aa235b39aa96e27',
+ self.RADII_DIR: '4753acc920001a1400788ee89cb4632900449055',
+ self.MSA_FOLDER: '75df495678a121497e14346b453caf42f4b03922',
+ self.METADATA_DIR: 'a089cc36bf79a40c7506019accc5f93e940d9fed',
+ self.TAX_FOLDER: '89b12cf8106f326887599dcb30ef94ebba142035',
+ self.FASTANI_DIR: 'e12824beccc15fe67a373e2aa8eee72feecf89c6',
+ self.RED_DIR: 'c24a2f48bb0c1df38f92a8f526aa846f596c94c6'
+ }
+ elif version==207:
+ return {
+ self.PPLACER_DIR: '20903925a856a58b102a7b0ce160c5cbd2cf675b',
+ self.MASK_DIR: '50e414a9de18170e8cb97f990f89ff60a0fe29d5',
+ self.MARKER_DIR: '163f542c3f0a40f59df45d453aa235b39aa96e27',
+ self.RADII_DIR: '8fd13b1c5d7a7b073ba96fb628581613b293a374',
+ self.MSA_FOLDER: '24f250d7cf0eb0bc65dccd2f3c9247e553ea322f',
+ self.METADATA_DIR: '9772fbeac1311b31e10293fa610eb33aa1ec8e15',
+ self.TAX_FOLDER: '6fb0233b05633242369b40c026fd1ee53e266afa',
+ self.FASTANI_DIR: '973c456c02f55bb82908a6811c7076e207e9b206',
+ self.RED_DIR: '7b8b67b3157204b470c9eb809d3c39c4effffabc'
+ }
+
+ REF_HASHES = property(get_REF_HASHES)
+
+
+# Export the class for import by other modules
+@lru_cache(maxsize=1)
+def __get_config():
+ return __GTDBTkCommonConfig()
+
+
+CONFIG = __get_config()
diff --git a/gtdbtk/config/config.py b/gtdbtk/config/config.py
deleted file mode 100644
index ccccac9e..00000000
--- a/gtdbtk/config/config.py
+++ /dev/null
@@ -1,266 +0,0 @@
-import json
-import os
-import sys
-
-"""
-Load the reference package. This will fail if the directory doesn't exist.
-"""
-try:
- GENERIC_PATH = os.environ['GTDBTK_DATA_PATH']
- #expand the variables in the path
- GENERIC_PATH = os.path.expandvars(GENERIC_PATH)
-except KeyError:
- print('\n' + '=' * 80)
- print(' ERROR '.center(80))
- print('_' * 80 + '\n')
- print("The 'GTDBTK_DATA_PATH' environment variable is not defined.".center(80) + '\n')
- print('Please set this variable to your reference data package.'.center(80))
- print('https://ecogenomics.github.io/GTDBTk/installing/index.html'.center(80))
- print('=' * 80)
- sys.exit(1)
-
-"""
-If the reference package sub-folders still exist in GTDBTK_DATA_PATH, then there
-is no need to edit the variables below.
-"""
-MIN_REF_DATA_VERSION = 'r207'
-
-MSA_FOLDER = os.path.join(GENERIC_PATH, "msa/")
-MASK_DIR = os.path.join(GENERIC_PATH, "masks/")
-PPLACER_DIR = os.path.join(GENERIC_PATH, "pplacer/")
-FASTANI_DIR = os.path.join(GENERIC_PATH, "fastani/")
-MASH_DIR = os.path.join(GENERIC_PATH, "mash/")
-
-TAX_FOLDER = os.path.join(GENERIC_PATH, "taxonomy/")
-RADII_DIR = os.path.join(GENERIC_PATH, "radii/")
-METADATA_DIR = os.path.join(GENERIC_PATH, "metadata/")
-RED_DIR = os.path.join(GENERIC_PATH, "mrca_red/")
-MARKER_DIR = os.path.join(GENERIC_PATH, 'markers/')
-TIGRFAM_HMMS = os.path.join(MARKER_DIR, 'tigrfam/tigrfam.hmm')
-PFAM_HMM_DIR = os.path.join(MARKER_DIR, 'pfam/')
-
-SPLIT_DIR = os.path.join(GENERIC_PATH, 'split')
-BACKBONE_SPLIT_DIR = os.path.join(SPLIT_DIR, 'backbone')
-CLASS_LEVEL_SPLIT_DIR = os.path.join(SPLIT_DIR, 'class_level')
-BACKBONE_PPLACER_DIR = os.path.join(BACKBONE_SPLIT_DIR, 'pplacer')
-CLASS_LEVEL_PPLACER_DIR = os.path.join(CLASS_LEVEL_SPLIT_DIR, 'pplacer')
-BACKBONE_RED_DIR = os.path.join(BACKBONE_SPLIT_DIR, 'red')
-CLASS_LEVEL_RED_DIR = os.path.join(CLASS_LEVEL_SPLIT_DIR, 'red')
-
-CLASS_LEVEL_TREE_MAPPING_FILE = os.path.join(CLASS_LEVEL_SPLIT_DIR, 'tree_mapping.tsv')
-
-BACKBONE_PPLACER_REF_PKG = 'gtdbtk_package_backbone.refpkg'
-BACKBONE_RED_FILE = os.path.join(BACKBONE_RED_DIR, 'backbone_red_value.tsv')
-CLASS_LEVEL_PPLACER_REF_PKG = 'gtdbtk.package.{iter}.refpkg'
-CLASS_LEVEL_RED_FILE = os.path.join(CLASS_LEVEL_RED_DIR, 'red_value_{iter}.tsv')
-
-RED_DIST_BAC_DICT = ''
-RED_DIST_ARC_DICT = ''
-VERSION_DATA = ''
-try:
- with open(os.path.join(METADATA_DIR, "metadata.txt")) as metadataData:
- for line in metadataData:
- try:
- line_infos = line.strip().split('=')
- if line_infos[0] == 'RED_DIST_BAC_DICT':
- RED_DIST_BAC_DICT = json.loads(line_infos[1])
- elif line_infos[0] == 'RED_DIST_ARC_DICT':
- RED_DIST_ARC_DICT = json.loads(line_infos[1])
- elif line_infos[0] == 'VERSION_DATA':
- VERSION_DATA = line_infos[1]
- except ValueError:
- print(f"Skipping invalid line {repr(line)}")
-except IOError:
- print('\n' + '=' * 80)
- print(' ERROR '.center(80))
- print('_' * 80 + '\n')
- print('The GTDB-Tk reference data does not exist or is corrupted.'.center(80))
- print(('GTDBTK_DATA_PATH=%s' % GENERIC_PATH).center(80) + '\n')
- print('Please compare the checksum to those provided in the download repository.'.center(80))
- print('https://github.com/Ecogenomics/GTDBTk#gtdb-tk-reference-data'.center(80))
- print('=' * 80)
- sys.exit(1)
-
-# Relative Evolution Distance
-RED_INTERVAL = 0.1
-RED_MIN_SUPPORT = 0.0
-RED_MIN_CHILDREN = 2
-
-# Marker information
-BAC120_MARKERS = {"PFAM": ["PF00380.20.hmm", "PF00410.20.hmm", "PF00466.21.hmm",
- "PF01025.20.hmm", "PF02576.18.hmm", "PF03726.15.hmm"],
- "TIGRFAM": ["TIGR00006.HMM", "TIGR00019.HMM", "TIGR00020.HMM",
- "TIGR00029.HMM", "TIGR00043.HMM", "TIGR00054.HMM",
- "TIGR00059.HMM", "TIGR00061.HMM", "TIGR00064.HMM",
- "TIGR00065.HMM", "TIGR00082.HMM", "TIGR00083.HMM",
- "TIGR00084.HMM", "TIGR00086.HMM", "TIGR00088.HMM",
- "TIGR00090.HMM", "TIGR00092.HMM", "TIGR00095.HMM",
- "TIGR00115.HMM", "TIGR00116.HMM", "TIGR00138.HMM",
- "TIGR00158.HMM", "TIGR00166.HMM", "TIGR00168.HMM",
- "TIGR00186.HMM", "TIGR00194.HMM", "TIGR00250.HMM",
- "TIGR00337.HMM", "TIGR00344.HMM", "TIGR00362.HMM",
- "TIGR00382.HMM", "TIGR00392.HMM", "TIGR00396.HMM",
- "TIGR00398.HMM", "TIGR00414.HMM", "TIGR00416.HMM",
- "TIGR00420.HMM", "TIGR00431.HMM", "TIGR00435.HMM",
- "TIGR00436.HMM", "TIGR00442.HMM", "TIGR00445.HMM",
- "TIGR00456.HMM", "TIGR00459.HMM", "TIGR00460.HMM",
- "TIGR00468.HMM", "TIGR00472.HMM", "TIGR00487.HMM",
- "TIGR00496.HMM", "TIGR00539.HMM", "TIGR00580.HMM",
- "TIGR00593.HMM", "TIGR00615.HMM", "TIGR00631.HMM",
- "TIGR00634.HMM", "TIGR00635.HMM", "TIGR00643.HMM",
- "TIGR00663.HMM", "TIGR00717.HMM", "TIGR00755.HMM",
- "TIGR00810.HMM", "TIGR00922.HMM", "TIGR00928.HMM",
- "TIGR00959.HMM", "TIGR00963.HMM", "TIGR00964.HMM",
- "TIGR00967.HMM", "TIGR01009.HMM", "TIGR01011.HMM",
- "TIGR01017.HMM", "TIGR01021.HMM", "TIGR01029.HMM",
- "TIGR01032.HMM", "TIGR01039.HMM", "TIGR01044.HMM",
- "TIGR01059.HMM", "TIGR01063.HMM", "TIGR01066.HMM",
- "TIGR01071.HMM", "TIGR01079.HMM", "TIGR01082.HMM",
- "TIGR01087.HMM", "TIGR01128.HMM", "TIGR01146.HMM",
- "TIGR01164.HMM", "TIGR01169.HMM", "TIGR01171.HMM",
- "TIGR01302.HMM", "TIGR01391.HMM", "TIGR01393.HMM",
- "TIGR01394.HMM", "TIGR01510.HMM", "TIGR01632.HMM",
- "TIGR01951.HMM", "TIGR01953.HMM", "TIGR02012.HMM",
- "TIGR02013.HMM", "TIGR02027.HMM", "TIGR02075.HMM",
- "TIGR02191.HMM", "TIGR02273.HMM", "TIGR02350.HMM",
- "TIGR02386.HMM", "TIGR02397.HMM", "TIGR02432.HMM",
- "TIGR02729.HMM", "TIGR03263.HMM", "TIGR03594.HMM",
- "TIGR03625.HMM", "TIGR03632.HMM", "TIGR03654.HMM",
- "TIGR03723.HMM", "TIGR03725.HMM", "TIGR03953.HMM"]}
-
-
-
-#New Version of AR53_MARKERS
-AR53_MARKERS = {"PFAM": ["PF04919.13.hmm","PF07541.13.hmm","PF01000.27.hmm",
- "PF00687.22.hmm","PF00466.21.hmm","PF00827.18.hmm","PF01280.21.hmm","PF01090.20.hmm",
- "PF01200.19.hmm","PF01015.19.hmm","PF00900.21.hmm","PF00410.20.hmm"],
- "TIGRFAM":["TIGR00037.HMM","TIGR00064.HMM","TIGR00111.HMM",
- "TIGR00134.HMM","TIGR00279.HMM","TIGR00291.HMM","TIGR00323.HMM",
- "TIGR00335.HMM","TIGR00373.HMM","TIGR00405.HMM","TIGR00448.HMM",
- "TIGR00483.HMM","TIGR00491.HMM","TIGR00522.HMM","TIGR00967.HMM",
- "TIGR00982.HMM","TIGR01008.HMM","TIGR01012.HMM","TIGR01018.HMM",
- "TIGR01020.HMM","TIGR01028.HMM","TIGR01046.HMM","TIGR01052.HMM",
- "TIGR01171.HMM","TIGR01213.HMM","TIGR01952.HMM","TIGR02236.HMM",
- "TIGR02338.HMM","TIGR02389.HMM","TIGR02390.HMM","TIGR03626.HMM",
- "TIGR03627.HMM","TIGR03628.HMM","TIGR03629.HMM","TIGR03670.HMM",
- "TIGR03671.HMM","TIGR03672.HMM","TIGR03673.HMM","TIGR03674.HMM",
- "TIGR03676.HMM","TIGR03680.HMM"]}
-
-
-# Information for Multiple hits markers:
-DEFAULT_MULTIHIT_THRESHOLD = 10.0
-
-# Information for aligning genomes
-DEFAULT_DOMAIN_THRESHOLD = 10.0
-AR_MARKER_COUNT = 53
-BAC_MARKER_COUNT = 120
-
-# Information about alignment Fraction to resolve fastANI results
-AF_THRESHOLD = 0.5
-
-# MSA file names
-CONCAT_BAC120 = os.path.join(MSA_FOLDER, f"gtdb_{VERSION_DATA}_bac120.faa")
-CONCAT_AR53 = os.path.join(MSA_FOLDER, f"gtdb_{VERSION_DATA}_ar53.faa")
-
-# Taxonomy file name
-TAXONOMY_FILE = os.path.join(TAX_FOLDER, "gtdb_taxonomy.tsv")
-
-# Type Strain radii file
-RADII_FILE = os.path.join(RADII_DIR, "gtdb_radii.tsv")
-
-# Mask file names
-MASK_BAC120 = f"gtdb_{VERSION_DATA}_bac120.mask"
-MASK_AR53 = f"gtdb_{VERSION_DATA}_ar53.mask"
-MASK_RPS23 = f"gtdb_{VERSION_DATA}_rps23.mask"
-
-# Pplacer configuration
-PPLACER_BAC120_REF_PKG = f"gtdb_{VERSION_DATA}_bac120.refpkg"
-PPLACER_AR53_REF_PKG = f"gtdb_{VERSION_DATA}_ar53.refpkg"
-PPLACER_RPS23_REF_PKG = f"gtdb_{VERSION_DATA}_rps23.refpkg"
-PPLACER_MIN_RAM_BAC_FULL = 320
-PPLACER_MIN_RAM_BAC_SPLIT = 55
-PPLACER_MIN_RAM_ARC = 40
-
-# Fastani configuration
-FASTANI_SPECIES_THRESHOLD = 95.0
-FASTANI_GENOMES = os.path.join(FASTANI_DIR, "database/")
-FASTANI_GENOME_LIST = os.path.join(FASTANI_DIR, "genome_paths.tsv")
-FASTANI_GENOMES_EXT = "_genomic.fna.gz"
-
-# Mash configuration
-MASH_SKETCH_FILE = 'gtdb_ref_sketch.msh'
-MASH_K_VALUE = 16
-MASH_S_VALUE = 5000
-MASH_MAX_DISTANCE = 0.1
-MASH_D_VALUE = MASH_MAX_DISTANCE
-MASH_V_VALUE = 1.0
-
-
-# MRCA RED VALUE
-MRCA_RED_BAC120 = os.path.join(RED_DIR, f"gtdbtk_{VERSION_DATA}_bac120.tsv")
-MRCA_RED_AR53 = os.path.join(RED_DIR, f"gtdbtk_{VERSION_DATA}_ar53.tsv")
-
-# Hashing information for validating the reference package.
-REF_HASHES = {PPLACER_DIR: '20903925a856a58b102a7b0ce160c5cbd2cf675b',
- MASK_DIR: '50e414a9de18170e8cb97f990f89ff60a0fe29d5',
- MARKER_DIR: '163f542c3f0a40f59df45d453aa235b39aa96e27',
- RADII_DIR: '8fd13b1c5d7a7b073ba96fb628581613b293a374',
- MSA_FOLDER: '24f250d7cf0eb0bc65dccd2f3c9247e553ea322f',
- METADATA_DIR: '9772fbeac1311b31e10293fa610eb33aa1ec8e15',
- TAX_FOLDER: '6fb0233b05633242369b40c026fd1ee53e266afa',
- FASTANI_DIR: '973c456c02f55bb82908a6811c7076e207e9b206',
- RED_DIR: '7b8b67b3157204b470c9eb809d3c39c4effffabc'}
-
-# Config values for checking GTDB-Tk on startup.
-GTDBTK_VER_CHECK = True
-GTDBTK_VER_TIMEOUT = 3 # seconds
-
-# Internal settings used for logging.
-LOG_TASK = 21
-
-
-
-
-#
-#New Version of AR53_MARKERS
-# AR53_MARKERS = {"PFAM": ["PF01868.17.hmm", "PF01282.20.hmm", "PF01655.19.hmm",
-# "PF01092.20.hmm", "PF01000.27.hmm", "PF00368.19.hmm",
-# "PF00827.18.hmm", "PF01269.18.hmm", "PF00466.21.hmm",
-# "PF01015.19.hmm", "PF13685.7.hmm", "PF02978.20.hmm",
-# "PF04919.13.hmm", "PF01984.21.hmm", "PF04104.15.hmm",
-# "PF00410.20.hmm", "PF01798.19.hmm", "PF01864.18.hmm",
-# "PF01990.18.hmm", "PF07541.13.hmm", "PF04019.13.hmm",
-# "PF00900.21.hmm", "PF01090.20.hmm", "PF02006.17.hmm",
-# "PF01157.19.hmm", "PF01191.20.hmm", "PF01866.18.hmm",
-# "PF01198.20.hmm", "PF01496.20.hmm", "PF00687.22.hmm",
-# "PF03874.17.hmm", "PF01194.18.hmm", "PF01200.19.hmm",
-# "PF13656.7.hmm", "PF01280.21.hmm"],
-# "TIGRFAM": ["TIGR00468.HMM", "TIGR01060.HMM", "TIGR03627.HMM",
-# "TIGR01020.HMM", "TIGR02258.HMM", "TIGR00293.HMM",
-# "TIGR00389.HMM", "TIGR01012.HMM", "TIGR00490.HMM",
-# "TIGR03677.HMM", "TIGR03636.HMM", "TIGR03722.HMM",
-# "TIGR00458.HMM", "TIGR00291.HMM", "TIGR00670.HMM",
-# "TIGR00064.HMM", "TIGR03629.HMM", "TIGR00021.HMM",
-# "TIGR03672.HMM", "TIGR00111.HMM", "TIGR03684.HMM",
-# "TIGR01077.HMM", "TIGR01213.HMM", "TIGR01080.HMM",
-# "TIGR00501.HMM", "TIGR00729.HMM", "TIGR01038.HMM",
-# "TIGR00270.HMM", "TIGR03628.HMM", "TIGR01028.HMM",
-# "TIGR00521.HMM", "TIGR03671.HMM", "TIGR00240.HMM",
-# "TIGR02390.HMM", "TIGR02338.HMM", "TIGR00037.HMM",
-# "TIGR02076.HMM", "TIGR00335.HMM", "TIGR01025.HMM",
-# "TIGR00471.HMM", "TIGR00336.HMM", "TIGR00522.HMM",
-# "TIGR02153.HMM", "TIGR02651.HMM", "TIGR03674.HMM",
-# "TIGR00323.HMM", "TIGR00134.HMM", "TIGR02236.HMM",
-# "TIGR03683.HMM", "TIGR00491.HMM", "TIGR00658.HMM",
-# "TIGR03680.HMM", "TIGR00392.HMM", "TIGR00422.HMM",
-# "TIGR00279.HMM", "TIGR01052.HMM", "TIGR00442.HMM",
-# "TIGR00308.HMM", "TIGR00398.HMM", "TIGR00456.HMM",
-# "TIGR00549.HMM", "TIGR00408.HMM", "TIGR00432.HMM",
-# "TIGR00264.HMM", "TIGR00982.HMM", "TIGR00324.HMM",
-# "TIGR01952.HMM", "TIGR03626.HMM", "TIGR03670.HMM",
-# "TIGR00337.HMM", "TIGR01046.HMM", "TIGR01018.HMM",
-# "TIGR00936.HMM", "TIGR00463.HMM", "TIGR01309.HMM",
-# "TIGR03653.HMM", "TIGR00042.HMM", "TIGR02389.HMM",
-# "TIGR00307.HMM", "TIGR03673.HMM", "TIGR00373.HMM",
-# "TIGR01008.HMM", "TIGR00283.HMM", "TIGR00425.HMM",
-# "TIGR00405.HMM", "TIGR03665.HMM", "TIGR00448.HMM"]}
diff --git a/gtdbtk/external/mash.py b/gtdbtk/external/mash.py
index fa18bd7d..7ee02144 100644
--- a/gtdbtk/external/mash.py
+++ b/gtdbtk/external/mash.py
@@ -8,7 +8,7 @@
from gtdbtk.biolib_lite.common import make_sure_path_exists
from gtdbtk.exceptions import GTDBTkExit
from gtdbtk.tools import tqdm_log
-import gtdbtk.config.config as Config
+from gtdbtk.config.common import CONFIG
class Mash(object):
@@ -271,7 +271,7 @@ def __init__(self, genomes, root, prefix, cpus, k, s):
class RefSketchFile(SketchFile):
- name = Config.MASH_SKETCH_FILE
+ name = CONFIG.MASH_SKETCH_FILE
def __init__(self, genomes, root, prefix, cpus, k, s, mash_db=None):
"""Create a query file for a given set of genomes.
diff --git a/gtdbtk/files/gtdb_radii.py b/gtdbtk/files/gtdb_radii.py
index 55086a6a..b0f3ef73 100644
--- a/gtdbtk/files/gtdb_radii.py
+++ b/gtdbtk/files/gtdb_radii.py
@@ -1,16 +1,18 @@
from gtdbtk.biolib_lite.common import canonical_gid
-from gtdbtk.config.config import RADII_FILE
-
+from gtdbtk.config.common import CONFIG
class GTDBRadiiFile(object):
"""A wrapper for the gtdb_radii.tsv file included in the reference data."""
- path = RADII_FILE
def __init__(self):
self._rep_idx = None
self._species_idx = None
self._read()
+ @property
+ def path(self):
+ return CONFIG.RADII_FILE
+
def _read(self):
"""Read the file and create any data."""
self._rep_idx, self._species_idx = dict(), dict()
diff --git a/gtdbtk/files/marker/copy_number.py b/gtdbtk/files/marker/copy_number.py
index b0323f83..8b6a3f0b 100644
--- a/gtdbtk/files/marker/copy_number.py
+++ b/gtdbtk/files/marker/copy_number.py
@@ -21,7 +21,7 @@
from gtdbtk.biolib_lite.common import make_sure_path_exists
from gtdbtk.biolib_lite.seq_io import read_fasta
-from gtdbtk.config.config import BAC120_MARKERS, AR53_MARKERS
+from gtdbtk.config.common import CONFIG
from gtdbtk.config.output import PATH_BAC120_MARKER_SUMMARY, PATH_AR53_MARKER_SUMMARY
from gtdbtk.exceptions import GTDBTkExit
from gtdbtk.files.marker.tophit import TopHitPfamFile, TopHitTigrFile, Hit
@@ -159,7 +159,7 @@ class CopyNumberFileAR53(CopyNumberFile):
def __init__(self, out_dir: str, prefix: str):
path = os.path.join(out_dir, PATH_AR53_MARKER_SUMMARY.format(prefix=prefix))
- super().__init__(path, 'ar53', AR53_MARKERS)
+ super().__init__(path, 'ar53', CONFIG.AR53_MARKERS)
class CopyNumberFileBAC120(CopyNumberFile):
@@ -167,4 +167,4 @@ class CopyNumberFileBAC120(CopyNumberFile):
def __init__(self, out_dir: str, prefix: str):
path = os.path.join(out_dir, PATH_BAC120_MARKER_SUMMARY.format(prefix=prefix))
- super().__init__(path, 'bac120', BAC120_MARKERS)
+ super().__init__(path, 'bac120', CONFIG.BAC120_MARKERS)
diff --git a/gtdbtk/files/marker_info.py b/gtdbtk/files/marker_info.py
index b5e07809..f07d0d23 100644
--- a/gtdbtk/files/marker_info.py
+++ b/gtdbtk/files/marker_info.py
@@ -18,20 +18,24 @@
import os
from gtdbtk.biolib_lite.common import make_sure_path_exists
-from gtdbtk.config.config import AR53_MARKERS, BAC120_MARKERS, TIGRFAM_HMMS, PFAM_HMM_DIR
+from gtdbtk.config.common import CONFIG
from gtdbtk.config.output import PATH_AR53_MARKER_INFO, PATH_BAC120_MARKER_INFO
class MarkerInfoFile(object):
"""Store the GTDB-Tk RED dictionary."""
- marker_paths = {"PFAM": os.path.join(PFAM_HMM_DIR, 'individual_hmms'),
- "TIGRFAM": os.path.join(os.path.dirname(TIGRFAM_HMMS), 'individual_hmms')}
-
def __init__(self, path: str, markers: dict):
self.path = path
self.markers = self._parse_markers(markers)
+ @property
+ def marker_paths(self):
+ return {
+ "PFAM": os.path.join(CONFIG.PFAM_HMM_DIR, 'individual_hmms'),
+ "TIGRFAM": os.path.join(os.path.dirname(CONFIG.TIGRFAM_HMMS), 'individual_hmms')
+ }
+
def _parse_markers(self, markers):
out = dict()
for db_marker in sorted(markers):
@@ -71,7 +75,7 @@ class MarkerInfoFileAR53(MarkerInfoFile):
def __init__(self, out_dir: str, prefix: str):
path = os.path.join(out_dir, PATH_AR53_MARKER_INFO.format(prefix=prefix))
- super().__init__(path, AR53_MARKERS)
+ super().__init__(path, CONFIG.AR53_MARKERS)
class MarkerInfoFileBAC120(MarkerInfoFile):
@@ -79,4 +83,4 @@ class MarkerInfoFileBAC120(MarkerInfoFile):
def __init__(self, out_dir: str, prefix: str):
path = os.path.join(out_dir, PATH_BAC120_MARKER_INFO.format(prefix=prefix))
- super().__init__(path, BAC120_MARKERS)
+ super().__init__(path, CONFIG.BAC120_MARKERS)
diff --git a/gtdbtk/files/red_dict.py b/gtdbtk/files/red_dict.py
index 989b64ec..7bd50180 100644
--- a/gtdbtk/files/red_dict.py
+++ b/gtdbtk/files/red_dict.py
@@ -19,7 +19,7 @@
from typing import Dict
from gtdbtk.biolib_lite.common import make_sure_path_exists
-from gtdbtk.config.config import RED_DIST_ARC_DICT, RED_DIST_BAC_DICT
+from gtdbtk.config.common import CONFIG
from gtdbtk.config.output import PATH_AR53_RED_DICT, PATH_BAC120_RED_DICT
@@ -46,7 +46,7 @@ class REDDictFileAR53(REDDictFile):
def __init__(self, out_dir: str, prefix: str):
path = os.path.join(out_dir, PATH_AR53_RED_DICT.format(prefix=prefix))
- super().__init__(path, RED_DIST_ARC_DICT)
+ super().__init__(path, CONFIG.RED_DIST_ARC_DICT)
class REDDictFileBAC120(REDDictFile):
@@ -54,4 +54,4 @@ class REDDictFileBAC120(REDDictFile):
def __init__(self, out_dir: str, prefix: str):
path = os.path.join(out_dir, PATH_BAC120_RED_DICT.format(prefix=prefix))
- super().__init__(path, RED_DIST_BAC_DICT)
+ super().__init__(path, CONFIG.RED_DIST_BAC_DICT)
diff --git a/gtdbtk/infer_ranks.py b/gtdbtk/infer_ranks.py
index 7464ee29..a7b46a74 100644
--- a/gtdbtk/infer_ranks.py
+++ b/gtdbtk/infer_ranks.py
@@ -21,12 +21,7 @@
from gtdbtk.biolib_lite.newick import parse_label, create_label
from gtdbtk.biolib_lite.taxonomy import Taxonomy
-from gtdbtk.config.config import (TAXONOMY_FILE,
- RED_DIST_BAC_DICT,
- RED_DIST_ARC_DICT,
- MRCA_RED_BAC120,
- MRCA_RED_AR53,
- RED_INTERVAL)
+from gtdbtk.config.common import CONFIG
from gtdbtk.exceptions import GTDBTkExit
from gtdbtk.relative_distance import RelativeDistance
@@ -43,7 +38,7 @@ def _get_ingroup_domain(self, ingroup_taxon) -> str:
"""Get domain on ingroup taxon."""
# read GTDB taxonomy in order to establish domain on ingroup taxon
- gtdb_taxonomy = Taxonomy().read(TAXONOMY_FILE)
+ gtdb_taxonomy = Taxonomy().read(CONFIG.TAXONOMY_FILE)
ingroup_domain = None
for taxa in gtdb_taxonomy.values():
if ingroup_taxon in taxa:
@@ -60,9 +55,9 @@ def _get_median_reds(self, ingroup_domain: str):
# get median RED values for domain
if ingroup_domain == 'd__Bacteria':
- median_reds = RED_DIST_BAC_DICT
+ median_reds = CONFIG.RED_DIST_BAC_DICT
elif ingroup_domain == 'd__Archaea':
- median_reds = RED_DIST_ARC_DICT
+ median_reds = CONFIG.RED_DIST_ARC_DICT
else:
raise GTDBTkExit(f'Unrecognized GTDB domain: {ingroup_domain}.')
@@ -100,9 +95,9 @@ def _find_ingroup_taxon(self, ingroup_taxon, tree):
def _find_ingroup_red(self, ingroup_node, ingroup_domain, tree):
"""Find RED of the ingroup taxon."""
- red_file = MRCA_RED_BAC120
+ red_file = CONFIG.MRCA_RED_BAC120
if ingroup_domain == 'd__Archaea':
- red_file = MRCA_RED_AR53
+ red_file = CONFIG.MRCA_RED_AR53
# create map from leave labels to tree nodes
leaf_node_map = {}
@@ -132,7 +127,7 @@ def _determine_red_ranks(self, node_red, median_reds):
rank_label = Taxonomy.rank_labels[rank_idx]
abs_red_diff = abs(node_red - median_red)
- if abs_red_diff <= RED_INTERVAL:
+ if abs_red_diff <= CONFIG.RED_INTERVAL:
red_ranks[rank_label] = abs_red_diff
red_ranks_label = []
diff --git a/gtdbtk/main.py b/gtdbtk/main.py
index 9eb0db4e..bf881dd1 100644
--- a/gtdbtk/main.py
+++ b/gtdbtk/main.py
@@ -27,7 +27,7 @@
from tqdm import tqdm
-import gtdbtk.config.config as Config
+from gtdbtk.config.common import CONFIG
from gtdbtk.ani_rep import ANIRep
from gtdbtk.ani_screen import ANIScreener
from gtdbtk.biolib_lite.common import (check_dir_exists,
@@ -86,20 +86,20 @@ def __init__(self, version,output_dir=None):
self.stage_logger = StageLogger()
self.stage_logger.version=self.version
self.stage_logger.command_line=f'{prog_name} {" ".join(sys.argv[1:])}'
- self.stage_logger.database_version = Config.VERSION_DATA
- self.stage_logger.database_path=Config.GENERIC_PATH
+ self.stage_logger.database_version = CONFIG.VERSION_DATA
+ self.stage_logger.database_path=CONFIG.GENERIC_PATH
self.stage_logger.output_dir=output_dir
self.stage_logger.path = os.path.join(output_dir, "gtdbtk.json")
def _check_package_compatibility(self):
"""Check that GTDB-Tk is using the most up-to-date reference package."""
- pkg_ver = float(Config.VERSION_DATA.replace('r', ''))
- min_ver = float(Config.MIN_REF_DATA_VERSION.replace('r', ''))
+ pkg_ver = float(CONFIG.VERSION_DATA.replace('r', ''))
+ min_ver = float(CONFIG.MIN_REF_DATA_VERSION.replace('r', ''))
self.logger.info(f'Using GTDB-Tk reference data version '
- f'{Config.VERSION_DATA}: {Config.GENERIC_PATH}')
+ f'{CONFIG.VERSION_DATA}: {CONFIG.GENERIC_PATH}')
if pkg_ver < min_ver:
self.logger.warning(colour(f'You are not using the reference data '
- f'intended for this release: {Config.MIN_REF_DATA_VERSION}',
+ f'intended for this release: {CONFIG.MIN_REF_DATA_VERSION}',
['bright'], fg='yellow'))
def _verify_genome_id(self, genome_id: str) -> bool:
@@ -220,7 +220,7 @@ def _read_taxonomy_files(self, options) -> Dict[str, Tuple[str, str, str, str, s
"""Read and merge taxonomy files."""
self.logger.info('Reading GTDB taxonomy for representative genomes.')
- taxonomy = Taxonomy().read(Config.TAXONOMY_FILE)
+ taxonomy = Taxonomy().read(CONFIG.TAXONOMY_FILE)
if options.gtdbtk_classification_file:
# add and overwrite taxonomy for genomes specified in the
@@ -852,7 +852,7 @@ def decorate(self, options):
self.logger.info('Done.')
- def check_install(self):
+ def check_install(self,options):
""" Verify all GTDB-Tk data files are present.
Raises
@@ -862,7 +862,7 @@ def check_install(self):
"""
self.logger.info("Running install verification")
misc = Misc()
- misc.check_install()
+ misc.check_install(options.db_version)
self.logger.info('Done.')
def infer_ranks(self, options):
@@ -911,6 +911,20 @@ def convert_to_itol(self, options):
r.convert_to_itol(options.input_tree, options.output_tree)
self.logger.info('Done.')
+ def convert_to_species(self, options):
+ """Change GTDB genomes ids to GTDb species name in the tree.
+
+ Parameters
+ ----------
+ options : argparse.Namespace
+ The CLI arguments input by the user.
+ """
+ check_file_exists(options.input_tree)
+
+ r = Misc()
+ r.convert_to_species(options.input_tree, options.output_tree,options.custom_taxonomy_file,options.all_ranks)
+ self.logger.info('Done.')
+
def remove_labels(self, options):
"""Remove labels from tree.
@@ -1201,6 +1215,8 @@ def parse_options(self, options):
self.remove_labels(options)
elif options.subparser_name == 'convert_to_itol':
self.convert_to_itol(options)
+ elif options.subparser_name == 'convert_to_species':
+ self.convert_to_species(options)
elif options.subparser_name == 'trim_msa':
self.trim_msa(options)
elif options.subparser_name == 'export_msa':
@@ -1210,7 +1226,7 @@ def parse_options(self, options):
'fastANI'])
self.run_test(options)
elif options.subparser_name == 'check_install':
- self.check_install()
+ self.check_install(options)
else:
self.logger.error('Unknown GTDB-Tk command: "' +
options.subparser_name + '"\n')
diff --git a/gtdbtk/markers.py b/gtdbtk/markers.py
index fcb3bc1c..fd9ad6cd 100644
--- a/gtdbtk/markers.py
+++ b/gtdbtk/markers.py
@@ -25,7 +25,7 @@
import gzip
import numpy as np
-import gtdbtk.config.config as Config
+from gtdbtk.config.common import CONFIG
from gtdbtk.biolib_lite.common import make_sure_path_exists
from gtdbtk.biolib_lite.execute import check_dependencies
from gtdbtk.biolib_lite.seq_io import read_fasta
@@ -65,13 +65,13 @@ def __init__(self, cpus=1, debug=False):
self.gff_file_suffix = GFF_FILE_SUFFIX
self.checksum_suffix = CHECKSUM_SUFFIX
- self.taxonomy_file = Config.TAXONOMY_FILE
+ self.taxonomy_file = CONFIG.TAXONOMY_FILE
- self.pfam_hmm_dir = Config.PFAM_HMM_DIR
+ self.pfam_hmm_dir = CONFIG.PFAM_HMM_DIR
self.pfam_suffix = PFAM_SUFFIX
self.pfam_top_hit_suffix = PFAM_TOP_HIT_SUFFIX
- self.tigrfam_hmms = Config.TIGRFAM_HMMS
+ self.tigrfam_hmms = CONFIG.TIGRFAM_HMMS
self.tigrfam_suffix = TIGRFAM_SUFFIX
self.tigrfam_top_hit_suffix = TIGRFAM_TOP_HIT_SUFFIX
@@ -123,11 +123,11 @@ def _report_identified_marker_genes(self, gene_dict, outdir, prefix,
# Iterate over each domain.
marker_doms = list()
- marker_doms.append((Config.AR53_MARKERS['PFAM'] +
- Config.AR53_MARKERS['TIGRFAM'],
+ marker_doms.append((CONFIG.AR53_MARKERS['PFAM'] +
+ CONFIG.AR53_MARKERS['TIGRFAM'],
ar53_copy_number_file, 'ar53'))
- marker_doms.append((Config.BAC120_MARKERS['PFAM'] +
- Config.BAC120_MARKERS['TIGRFAM'],
+ marker_doms.append((CONFIG.BAC120_MARKERS['PFAM'] +
+ CONFIG.BAC120_MARKERS['TIGRFAM'],
bac120_copy_number_file, 'bac120'))
for marker_names, marker_file, marker_d in marker_doms:
@@ -201,7 +201,7 @@ def identify(self, genomes, tln_tables, out_dir, prefix, force, genes, write_sin
self.gff_file_suffix,
force)
self.logger.log(
- Config.LOG_TASK, f'Running Prodigal {prodigal.version} to identify genes.')
+ CONFIG.LOG_TASK, f'Running Prodigal {prodigal.version} to identify genes.')
genome_dictionary = prodigal.run(genomes, tln_tables)
else:
@@ -222,7 +222,7 @@ def identify(self, genomes, tln_tables, out_dir, prefix, force, genes, write_sin
symlink_f(os.path.abspath(gpath), os.path.join(symlink_protein_dir,gid+self.protein_file_suffix))
# annotated genes against TIGRFAM and Pfam databases
- self.logger.log(Config.LOG_TASK,
+ self.logger.log(CONFIG.LOG_TASK,
'Identifying TIGRFAM protein families.')
gene_files = [(db_genome_id, genome_dictionary[db_genome_id]['aa_gene_path'])
for db_genome_id in genome_dictionary.keys()]
@@ -235,7 +235,7 @@ def identify(self, genomes, tln_tables, out_dir, prefix, force, genes, write_sin
self.marker_gene_dir)
tigr_search.run(gene_files)
- self.logger.log(Config.LOG_TASK, 'Identifying Pfam protein families.')
+ self.logger.log(CONFIG.LOG_TASK, 'Identifying Pfam protein families.')
pfam_search = PfamSearch(self.cpus,
self.pfam_hmm_dir,
self.protein_file_suffix,
@@ -247,7 +247,7 @@ def identify(self, genomes, tln_tables, out_dir, prefix, force, genes, write_sin
self.logger.info(
f'Annotations done using HMMER {tigr_search.version}.')
- self.logger.log(Config.LOG_TASK,
+ self.logger.log(CONFIG.LOG_TASK,
'Summarising identified marker genes.')
reports = self._report_identified_marker_genes(genome_dictionary, out_dir, prefix,
write_single_copy_genes,reports)
@@ -414,8 +414,8 @@ def genome_domain(self, identity_dir, prefix):
ar_gids = set()
bac_ar_diff = {}
for gid in bac_count:
- arc_aa_per = (ar_count[gid] * 100.0 / Config.AR_MARKER_COUNT)
- bac_aa_per = (bac_count[gid] * 100.0 / Config.BAC_MARKER_COUNT)
+ arc_aa_per = (ar_count[gid] * 100.0 / CONFIG.AR_MARKER_COUNT)
+ bac_aa_per = (bac_count[gid] * 100.0 / CONFIG.BAC_MARKER_COUNT)
if bac_aa_per >= arc_aa_per:
bac_gids.add(gid)
else:
@@ -534,8 +534,8 @@ def align(self,
self.logger.info(
f'Aligning markers in {len(genomic_files):,} genomes with {self.cpus} CPUs.')
- dom_iter = ((bac_gids, Config.CONCAT_BAC120, Config.MASK_BAC120, "bac120", 'bacterial', CopyNumberFileBAC120),
- (ar_gids, Config.CONCAT_AR53, Config.MASK_AR53, "ar53", 'archaeal', CopyNumberFileAR53))
+ dom_iter = ((bac_gids, CONFIG.CONCAT_BAC120, CONFIG.MASK_BAC120, "bac120", 'bacterial', CopyNumberFileBAC120),
+ (ar_gids, CONFIG.CONCAT_AR53, CONFIG.MASK_AR53, "ar53", 'archaeal', CopyNumberFileAR53))
# For some genomes, it is possible to have no markers.
no_marker_gids = bac_gids.union(ar_gids)
@@ -576,7 +576,7 @@ def align(self,
gtdb_taxonomy,
taxa_filter,
outgroup_taxon)
- gtdb_msa_mask = os.path.join(Config.MASK_DIR, mask_file)
+ gtdb_msa_mask = os.path.join(CONFIG.MASK_DIR, mask_file)
# Generate the user MSA.
user_msa = align.align_marker_set(
@@ -641,7 +641,7 @@ def align(self,
self.logger.info(
f'Filtered genomes include {len(filtered_user_genomes)} user submitted genomes.')
else:
- self.logger.log(Config.LOG_TASK,
+ self.logger.log(CONFIG.LOG_TASK,
f'Masking columns of {domain_str} multiple sequence alignment using canonical mask.')
trimmed_seqs, pruned_seqs = self._apply_mask(gtdb_msa,
user_msa,
diff --git a/gtdbtk/misc.py b/gtdbtk/misc.py
index 32457539..925ea0e4 100644
--- a/gtdbtk/misc.py
+++ b/gtdbtk/misc.py
@@ -22,7 +22,8 @@
import dendropy
-import gtdbtk.config.config as Config
+from gtdbtk.biolib_lite.taxonomy import Taxonomy
+from gtdbtk.config.common import CONFIG
from gtdbtk.biolib_lite.execute import check_dependencies
from gtdbtk.biolib_lite.logger import colour
from gtdbtk.biolib_lite.newick import parse_label
@@ -53,9 +54,9 @@ def trim_msa(self, untrimmed_msa, mask_type, maskid, output_file):
The path to the output trimmed MSA.
"""
if maskid == 'bac' and mask_type == 'reference':
- mask = os.path.join(Config.MASK_DIR, Config.MASK_BAC120)
+ mask = os.path.join(CONFIG.MASK_DIR, CONFIG.MASK_BAC120)
elif maskid == 'arc' and mask_type == 'reference':
- mask = os.path.join(Config.MASK_DIR, Config.MASK_AR53)
+ mask = os.path.join(CONFIG.MASK_DIR, CONFIG.MASK_AR53)
elif mask_type == 'file':
mask = maskid
else:
@@ -158,6 +159,52 @@ def convert_to_itol(self, input_file, output_file):
intree.write_to_path(output_file, schema='newick', suppress_rooting=True,unquoted_underscores=True)
+ def convert_to_species(self, input_file, output_file,custom_taxonomy_file=None,all_ranks=False):
+ """Change GTDB genomes ids to GTDb species name in the tree.
+
+ Parameters
+ ----------
+ input_file : str
+ The path to the input Newick tree.
+ output_file : str
+ The path to the output Newick tree.
+ """
+
+ self.logger.info("Convert GTDB-Tk tree...")
+ intree= dendropy.Tree.get_from_path(input_file,
+ schema='newick',
+ rooting='force-rooted',
+ preserve_underscores=True)
+
+ # get all leaves from the tree
+ leaves = intree.leaf_nodes()
+ #load the taxonomy file
+ taxonomy = Taxonomy().read(CONFIG.TAXONOMY_FILE)
+ #load the custom taxonomy file
+ if custom_taxonomy_file:
+ self.logger.info("Loading custom taxonomy file...")
+ custom_taxonomy = Taxonomy().read(custom_taxonomy_file)
+ #check intersection between custom taxonomy and taxonomy
+ intersection = set(custom_taxonomy.keys()).intersection(set(taxonomy.keys()))
+ if len(intersection) > 0:
+ self.logger.warning("{} genomes are present in both custom taxonomy and taxonomy file. The custom taxonomy will be used.".format(len(intersection)))
+ #update taxonomy with custom taxonomy
+ taxonomy.update(custom_taxonomy)
+
+
+ #get the species name for each genome
+ for leaf in leaves:
+ if leaf.taxon.label in taxonomy:
+ # get the label from parent node
+ if all_ranks:
+ leaf.taxon.label = ';'.join(taxonomy[leaf.taxon.label])
+ else:
+ leaf.taxon.label = taxonomy[leaf.taxon.label][-1]
+
+ #write the tree
+ intree.write_to_path(output_file, schema='newick', suppress_rooting=True,unquoted_underscores=True)
+
+
def remove_intermediate_files(self,output_dir,wf_name):
"""Remove intermediate files.
@@ -190,7 +237,7 @@ def remove_intermediate_files(self,output_dir,wf_name):
shutil.rmtree(intermediate_infer)
self.logger.info('Intermediate files removed.')
- def check_install(self):
+ def check_install(self,db_version):
"""Check that all reference files exist.
Returns
@@ -220,8 +267,10 @@ def check_install(self):
ok = True
# Compute the hash for each directory
- self.logger.info(f'Checking integrity of reference package: {Config.GENERIC_PATH}')
- for obj_path, expected_hash in Config.REF_HASHES.items():
+ self.logger.info(f'Checking integrity of reference package: {CONFIG.GENERIC_PATH}')
+ ref_hashes = CONFIG.get_REF_HASHES(db_version)
+
+ for obj_path, expected_hash in ref_hashes.items():
base_name = obj_path[:-1] if obj_path.endswith('/') else obj_path
base_name = base_name.split('/')[-1]
user_hash = sha1_dir(obj_path, progress=True)
diff --git a/gtdbtk/pipeline/align.py b/gtdbtk/pipeline/align.py
index e64d214a..e0457228 100644
--- a/gtdbtk/pipeline/align.py
+++ b/gtdbtk/pipeline/align.py
@@ -5,7 +5,7 @@
import tempfile
from collections import defaultdict
-from gtdbtk.config.config import LOG_TASK
+from gtdbtk.config.common import CONFIG
from gtdbtk.exceptions import GTDBTkExit
from gtdbtk.external.hmm_aligner import HmmAligner
from gtdbtk.files.marker.copy_number import CopyNumberFile
@@ -215,7 +215,7 @@ def align_marker_set(gid_dict, marker_info_file: MarkerInfoFile, copy_number_fil
"""
logger = logging.getLogger('timestamp')
- logger.log(LOG_TASK, f'Generating concatenated alignment for each marker.')
+ logger.log(CONFIG.LOG_TASK, f'Generating concatenated alignment for each marker.')
single_copy_hits = get_single_copy_hits(gid_dict, copy_number_file, cpus)
with tempfile.TemporaryDirectory(prefix='gtdbtk_tmp_') as dir_tmp:
@@ -230,7 +230,7 @@ def align_marker_set(gid_dict, marker_info_file: MarkerInfoFile, copy_number_fil
# Run hmmalign on all of the markers (in order of largest)
hmmer_v = HmmAligner.get_version()
- logger.log(LOG_TASK, f'Aligning {len(marker_paths)} identified markers using hmmalign {hmmer_v}.')
+ logger.log(CONFIG.LOG_TASK, f'Aligning {len(marker_paths)} identified markers using hmmalign {hmmer_v}.')
queue = list()
for marker_id, marker_path in sorted(marker_paths.items(),
key=lambda z: -marker_info_file.markers[z[0]]['size']):
diff --git a/gtdbtk/pipeline/export_msa.py b/gtdbtk/pipeline/export_msa.py
index 31340998..4f215c41 100644
--- a/gtdbtk/pipeline/export_msa.py
+++ b/gtdbtk/pipeline/export_msa.py
@@ -2,7 +2,7 @@
from shutil import copyfile
from gtdbtk.biolib_lite.common import make_sure_path_exists
-from gtdbtk.config.config import CONCAT_AR53, CONCAT_BAC120
+from gtdbtk.config.common import CONFIG
from gtdbtk.exceptions import GTDBTkExit
from gtdbtk.model.enum import Domain
@@ -14,9 +14,9 @@ def export_msa(domain: Domain, output_file: str):
:param output_file: The path to write the MSA.
"""
if domain is Domain.ARCHAEA:
- file_to_export = CONCAT_AR53
+ file_to_export = CONFIG.CONCAT_AR53
elif domain is Domain.BACTERIA:
- file_to_export = CONCAT_BAC120
+ file_to_export = CONFIG.CONCAT_BAC120
else:
raise GTDBTkExit(f'Unknown domain: "{domain}"')
diff --git a/gtdbtk/split.py b/gtdbtk/split.py
index ee0c90db..b92547d5 100644
--- a/gtdbtk/split.py
+++ b/gtdbtk/split.py
@@ -18,7 +18,7 @@
import logging
-import gtdbtk.config.config as Config
+from gtdbtk.config.common import CONFIG
from gtdbtk.biolib_lite.common import make_sure_path_exists
from gtdbtk.biolib_lite.newick import parse_label
from gtdbtk.biolib_lite.seq_io import read_fasta
@@ -69,7 +69,7 @@ def get_high_pplacer_taxonomy(self, out_dir, marker_set_id, prefix, user_msa_fil
self.logger.error('There was an error determining the marker set.')
raise GenomeMarkerSetUnknown
- red_bac_dict = Config.RED_DIST_BAC_DICT
+ red_bac_dict = CONFIG.RED_DIST_BAC_DICT
# We get the pplacer taxonomy for comparison
user_genome_ids = set(read_fasta(user_msa_file).keys())
diff --git a/gtdbtk/tools.py b/gtdbtk/tools.py
index 9022eb5a..77f33959 100644
--- a/gtdbtk/tools.py
+++ b/gtdbtk/tools.py
@@ -14,7 +14,7 @@
import dendropy
from tqdm import tqdm
-import gtdbtk.config.config as Config
+from gtdbtk.config.common import CONFIG
from gtdbtk.config.output import CHECKSUM_SUFFIX
from gtdbtk.exceptions import GTDBTkExit
@@ -34,7 +34,7 @@ def get_reference_ids():
An immutable set with short and long accessions (e.g. GB_GCA_ and GCA_).
"""
results = set()
- with open(Config.TAXONOMY_FILE) as tf:
+ with open(CONFIG.TAXONOMY_FILE) as tf:
for line in tf:
raw_id = line.split('\t')[0]
results.add(raw_id)
@@ -53,12 +53,12 @@ def get_ref_genomes():
Dict[genome_id] = fasta_path
"""
ref_genomes = dict()
- with open(Config.FASTANI_GENOME_LIST) as g_path_file:
+ with open(CONFIG.FASTANI_GENOME_LIST) as g_path_file:
for line in g_path_file:
(full_name, path) = line.strip().split()
- if full_name.endswith(Config.FASTANI_GENOMES_EXT):
- accession = full_name.split(Config.FASTANI_GENOMES_EXT)[0]
- ref_genomes[accession] = os.path.join(Config.FASTANI_DIR, path, full_name)
+ if full_name.endswith(CONFIG.FASTANI_GENOMES_EXT):
+ accession = full_name.split(CONFIG.FASTANI_GENOMES_EXT)[0]
+ ref_genomes[accession] = os.path.join(CONFIG.FASTANI_DIR, path, full_name)
return ref_genomes
def aa_percent_msa(aa_string):
@@ -290,11 +290,11 @@ def get_proc_memory_gb(pid):
def get_gtdbtk_latest_version():
- if not Config.GTDBTK_VER_CHECK:
+ if not CONFIG.GTDBTK_VER_CHECK:
return None
try:
resp = json.loads(urllib.request.urlopen('https://pypi.org/pypi/gtdbtk/json',
- timeout=Config.GTDBTK_VER_TIMEOUT).read().decode('utf-8'))
+ timeout=CONFIG.GTDBTK_VER_TIMEOUT).read().decode('utf-8'))
return resp['info']['version']
except Exception:
return None
| Unset GTDBTK_DATA_PATH prevents even most basic operation
Hi there,
I just upgraded my installation to 2.2.5 via pip; unfortunately even most basic commands don't work anymore:
```
$ gtdbtk --version
================================================================================
ERROR
________________________________________________________________________________
The 'GTDBTK_DATA_PATH' environment variable is not defined.
Please set this variable to your reference data package.
https://github.com/Ecogenomics/GTDBTk#installation
================================================================================
```
```
$ gtdbtk --help
================================================================================
ERROR
________________________________________________________________________________
The 'GTDBTK_DATA_PATH' environment variable is not defined.
Please set this variable to your reference data package.
https://github.com/Ecogenomics/GTDBTk#installation
================================================================================
```
```
$ gtdbtk check_install
================================================================================
ERROR
________________________________________________________________________________
The 'GTDBTK_DATA_PATH' environment variable is not defined.
Please set this variable to your reference data package.
https://github.com/Ecogenomics/GTDBTk#installation
================================================================================
```
https://github.com/Ecogenomics/GTDBTk#installation doesn't exist (as an HTML anchor), and the
documentation at https://ecogenomics.github.io/GTDBTk/ has nothing on setting up `GTDBTK_DATA_PATH`.
I guess basic things like `--version` and `--help` (at least) should work without this environment variable.
Missing Archaea summary when using ANI screen
Hello,
I've tested the new ANI screen method, using the mash DB, for `classify_wf`.
I've observed that I am missing a few genome at each run as well as the summary output `gtdbtl.ar53.summary.tsv`. The missing outputs correspond to Archaean genomes that were identified during the ANI screen as I can find them in `classify/ani_screen/gtdbtk.ar53.ani_summary.tsv`.
I guess implementation of the ANI screen missed the Archaea part of the pipeline ?
I am using version 2.2.6 in a conda environment created by installing GTDB-Tk from bioconda but I guess it is installation independent. Here is my command : `gtdbtk classify_wf --mash_db ./GTDB/gtdb-tk-r207v2.msh --genome_dir ./ALL/ -x fasta --out_dir gtdbtk2_classify --cpus 18 --pplacer_cpus 18 --tmpdir ./tmp --scratch_dir ./pplacer`
As it is a bit related, I was wondering if it was possible to consolidate the Archean et Bacterial summary into an unique output for the future release ?
Thank you.
Issue509
| Please try to set the `GTDBTK_DATA_PATH` as documented [here](https://ecogenomics.github.io/GTDBTk/installing/pip.html#step-3-download-and-alias-the-gtdb-tk-reference-data).
I will change the https://github.com/Ecogenomics/GTDBTk#installation URL to https://ecogenomics.github.io/GTDBTk/installing/index.html instead.
I'll do that, but nonetheless, I'd expect basic things (`--version`, `--help`) to work anyways.
| 2023-04-03T23:17:56 | 0.0 | [] | [] |
||
Ecogenomics/GTDBTk | Ecogenomics__GTDBTk-377 | aec940aebae65a086e17e7a50f35f2af43c35e77 | diff --git a/README.md b/README.md
index 8a41c7df..036e3d9d 100644
--- a/README.md
+++ b/README.md
@@ -7,17 +7,27 @@
[](https://hub.docker.com/r/ecogenomic/gtdbtk)
[](https://hub.docker.com/r/ecogenomic/gtdbtk)
-<b>[GTDB-Tk v1.5.0](https://ecogenomics.github.io/GTDBTk/announcements.html) was released on April 23, 2021 along with new reference data for [GTDB R06-RS202](https://gtdb.ecogenomic.org/). Upgrading is recommended.</b>
-<b> Please note v1.5.0+ is not compatible with GTDB R05-RS95. </b>
+<b>[GTDB-Tk v2.0.0](https://ecogenomics.github.io/GTDBTk/announcements.html) was released on April 8, 2022 along with new reference data for [GTDB R07-RS207](https://gtdb.ecogenomic.org/). Upgrading is recommended.</b>
+<b> Please note v2.0.0+ is not compatible with GTDB R06-RS202. </b>
GTDB-Tk is a software toolkit for assigning objective taxonomic classifications to bacterial and archaeal genomes based on the Genome Database Taxonomy [GTDB](https://gtdb.ecogenomic.org/). It is designed to work with recent advances that allow hundreds or thousands of metagenome-assembled genomes (MAGs) to be obtained directly from environmental samples. It can also be applied to isolate and single-cell genomes. The GTDB-Tk is open source and released under the [GNU General Public License (Version 3)](https://www.gnu.org/licenses/gpl-3.0.en.html).
Notifications about GTDB-Tk releases will be available through the GTDB Twitter account (https://twitter.com/ace_gtdb).
-Please post questions and issues related to GTDB-Tk on the Issues section of the GitHub repository. Questions related to the [GTDB](https://gtdb.ecogenomic.org/) should be sent to the [GTDB team](https://gtdb.ecogenomic.org/about).
+Please post questions and issues related to GTDB-Tk on the Issues section of the GitHub repository. Questions related to the [GTDB](https://gtdb.ecogenomic.org/) can be posted on the [GTDB Forum](https://forum.gtdb.ecogenomic.org/) or sent to the [GTDB team](https://gtdb.ecogenomic.org/about).
+
+## New Features
+
+GTDB-Tk v2.0.0 includes the following new features:
+- GTDB-TK now uses a **divide-and-conquer** approach where the bacterial reference tree is split into multiple order-level subtrees. This reduces the memory requirements of GTDB-Tk from **320 GB** of RAM when using the full GTDB R07-RS207 reference tree to approximately **35 GB**. A manuscript describing this approach is in preparation. If you wish to continue using the full GTDB reference tree use the `--full-tree` flag.
+- Archaeal classification now uses a refined set of 53 archaeal-specific marker genes based on the recent publication by [Dombrowski et al., 2020](https://www.nature.com/articles/s41467-020-17408-w). This set of archaeal marker genes is now used by GTDB for curating the archaeal taxonomy.
+- By default, all directories containing intermediate results are **now removed** by default at the end of the `classify_wf` and `de_novo_wf` pipelines. If you wish to retain these intermediates files use the `--keep-intermediates` flag.
+- All MSA files produced by the `align` step are now compressed with gzip.
+- The classification summary and failed genomes files are now the only files linked in the root directory of `classify_wf`.
+
## Documentation
-https://ecogenomics.github.io/GTDBTk/
+Documentation for GTDB-Tk can be found [here](https://ecogenomics.github.io/GTDBTk/).
## References
diff --git a/docs/src/announcements.rst b/docs/src/announcements.rst
index f3bb23e7..446cb301 100644
--- a/docs/src/announcements.rst
+++ b/docs/src/announcements.rst
@@ -1,6 +1,27 @@
Announcements
=============
+
+GTDB R207 available
+------------------
+
+*April xx, 2022*
+
+* GTDB Release 202 is now available and will be used from version ``2.0.0`` and up.
+* This version of GTDB-Tk requires a new version of the GTDB-Tk reference package
+ `gtdbtk_r207_data.tar.gz <https://data.ace.uq.edu.au/public/gtdb/data/releases/release207/207.0/auxillary_files>`_.
+
+
+GTDB R202 available
+------------------
+
+*April 23, 2021*
+
+* GTDB Release 202 is now available and will be used from version ``1.5.0`` and up.
+* This version of GTDB-Tk requires a new version of the GTDB-Tk reference package
+ `gtdbtk_r202_data.tar.gz <https://data.ace.uq.edu.au/public/gtdb/data/releases/release202/202.0/auxillary_files>`_.
+
+
GTDB R95 available
------------------
diff --git a/docs/src/changelog.rst b/docs/src/changelog.rst
index 9f35ce7a..cfcbc25e 100644
--- a/docs/src/changelog.rst
+++ b/docs/src/changelog.rst
@@ -52,7 +52,7 @@ Change log
* Check if stdout is being piped to a file before adding colour.
* (`#283 <https://github.com/Ecogenomics/GTDBTk/issues/283>`_) Significantly improved ``classify`` performance (noticeable when running trees > 1,000 taxa).
* Automatically cap pplacer CPUs to 64 unless specifying ``--pplacer_cpus`` to prevent pplacer from hanging.
-* (`#262 <https://github.com/Ecogenomics/GTDBTk/issues/262>`_) Added ``--write_single_copy_genes`` to the ``identify`` command. Writes unaligned single-copy AR122/BAC120 marker genes to disk.
+* (`#262 <https://github.com/Ecogenomics/GTDBTk/issues/262>`_) Added ``--write_single_copy_genes`` to the ``identify`` command. Writes unaligned single-copy AR53/BAC120 marker genes to disk.
* When running ``-version`` warn if GTDB-Tk is not running the most up-to-date version (disable via ``GTDBTK_VER_CHECK = False`` in ``config.py``). If GTDB-Tk encounters an error it will silently continue (3 second timeout).
* (`#276 <https://github.com/Ecogenomics/GTDBTk/issues/276>`_) Renamed the column ``aa_percent`` to ``msa_percent`` in ``summary.tsv`` (produced by ``classify``).
* (`#286 <https://github.com/Ecogenomics/GTDBTk/pull/286>`_) Fixed a file not found error when the reference data is a symbolic link (thanks `davidealbanese <https://github.com/davidealbanese>`_!).
diff --git a/docs/src/commands/align.rst b/docs/src/commands/align.rst
index 7120bef2..5bdcd957 100644
--- a/docs/src/commands/align.rst
+++ b/docs/src/commands/align.rst
@@ -3,7 +3,7 @@
align
=====
-Create a multiple sequence alignment based on the AR122/BAC120 marker set.
+Create a multiple sequence alignment based on the AR53/BAC120 marker set.
Arguments
diff --git a/docs/src/commands/classify_wf.rst b/docs/src/commands/classify_wf.rst
index 75766e06..f68f17c1 100644
--- a/docs/src/commands/classify_wf.rst
+++ b/docs/src/commands/classify_wf.rst
@@ -16,7 +16,7 @@ The classify workflow consists of three steps: ``identify``, ``align``, and ``cl
The ``identify`` step calls genes using `Prodigal <http://compbio.ornl.gov/prodigal/>`_,
and uses HMM models and the `HMMER <http://hmmer.org/>`_ package to identify the
-120 bacterial and 122 archaeal marker genes used for phylogenetic inference
+120 bacterial and 53 archaeal marker genes used for phylogenetic inference
(`Parks et al., 2018 <https://www.ncbi.nlm.nih.gov/pubmed/30148503>`_). Multiple
sequence alignments (MSA) are obtained by aligning marker genes to their respective HMM model.
diff --git a/docs/src/examples/classify_wf.ipynb b/docs/src/examples/classify_wf.ipynb
index 21211419..407cd2dc 100644
--- a/docs/src/examples/classify_wf.ipynb
+++ b/docs/src/examples/classify_wf.ipynb
@@ -133,7 +133,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "However, it is sometimes more useful to just read the summary files which detail markers identified from either the archaeal 122, or bacterial 120 marker set."
+ "However, it is sometimes more useful to just read the summary files which detail markers identified from either the archaeal 53, or bacterial 120 marker set."
]
},
{
@@ -152,7 +152,7 @@
}
],
"source": [
- "cat /tmp/gtdbtk/identify/gtdbtk.ar122.markers_summary.tsv "
+ "cat /tmp/gtdbtk/identify/gtdbtk.ar53.markers_summary.tsv "
]
},
{
@@ -201,7 +201,7 @@
"### Results\n",
"It is important to pay attention to the output, if a genome had a low number of markers identified it will be excluded from the analysis at this step. A warning will appear if that is the case.\n",
"\n",
- "Depending on the domain, a prefixed file of either `ar122` or `bac120` will appear containing the MSA of the user genomes and the GTDB genomes, or just the user genomes (`gtdbtk.ar122.msa.fasta` and `gtdbtk.ar122.user_msa.fasta` respectively.)"
+ "Depending on the domain, a prefixed file of either `ar53` or `bac120` will appear containing the MSA of the user genomes and the GTDB genomes, or just the user genomes (`gtdbtk.ar53.msa.fasta` and `gtdbtk.ar53.user_msa.fasta` respectively.)"
]
},
{
@@ -213,9 +213,9 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001B[0m\u001B[38;5;27malign\u001B[0m \u001B[38;5;51mgtdbtk.ar122.user_msa.fasta\u001B[0m \u001B[38;5;27midentify\u001B[0m\n",
- "\u001B[38;5;51mgtdbtk.ar122.filtered.tsv\u001B[0m gtdbtk.log\n",
- "\u001B[38;5;51mgtdbtk.ar122.msa.fasta\u001B[0m gtdbtk.warnings.log\n"
+ "\u001B[0m\u001B[38;5;27malign\u001B[0m \u001B[38;5;51mgtdbtk.ar53.user_msa.fasta\u001B[0m \u001B[38;5;27midentify\u001B[0m\n",
+ "\u001B[38;5;51mgtdbtk.ar53.filtered.tsv\u001B[0m gtdbtk.log\n",
+ "\u001B[38;5;51mgtdbtk.ar53.msa.fasta\u001B[0m gtdbtk.warnings.log\n"
]
}
],
@@ -264,7 +264,7 @@
"metadata": {},
"source": [
"### Results\n",
- "The two main files output (one again, depending on their domain) are the summary file, and the reference tree containing those genomes (`gtdbtk.ar122.summary.tsv`, and `gtdbtk.ar122.classify.tree` respectively). Classification of the genomes are present in the summary file."
+ "The two main files output (one again, depending on their domain) are the summary file, and the reference tree containing those genomes (`gtdbtk.ar53.summary.tsv`, and `gtdbtk.ar53.classify.tree` respectively). Classification of the genomes are present in the summary file."
]
},
{
@@ -276,8 +276,8 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001B[0m\u001B[38;5;27mclassify\u001B[0m \u001B[38;5;51mgtdbtk.ar122.summary.tsv\u001B[0m gtdbtk.warnings.log\n",
- "\u001B[38;5;51mgtdbtk.ar122.classify.tree\u001B[0m gtdbtk.log\n"
+ "\u001B[0m\u001B[38;5;27mclassify\u001B[0m \u001B[38;5;51mgtdbtk.ar53.summary.tsv\u001B[0m gtdbtk.warnings.log\n",
+ "\u001B[38;5;51mgtdbtk.ar53.classify.tree\u001B[0m gtdbtk.log\n"
]
}
],
diff --git a/docs/src/files/markers_summary.tsv.rst b/docs/src/files/markers_summary.tsv.rst
index f5ef0c05..3ff92fb3 100644
--- a/docs/src/files/markers_summary.tsv.rst
+++ b/docs/src/files/markers_summary.tsv.rst
@@ -4,7 +4,7 @@ markers_summary.tsv
===================
A summary of unique, duplicated, and missing markers within the 120 bacterial marker set,
-or the 122 archaeal marker set for each submitted genome.
+or the 53 archaeal marker set for each submitted genome.
For each genome:
diff --git a/docs/src/files/pplacer.domain.json.rst b/docs/src/files/pplacer.domain.json.rst
index 8934e626..cb31bd98 100644
--- a/docs/src/files/pplacer.domain.json.rst
+++ b/docs/src/files/pplacer.domain.json.rst
@@ -31,7 +31,7 @@ Example
}
], "metadata":
{"invocation":
- "pplacer -m WAG -j 3 -c \/release89\/pplacer\/gtdb_r89_ar122.refpkg -o classify_output\/classify\/intermediate_results\/pplacer\/pplacer.ar122.json align_output\/align\/gtdbtk.ar122.user_msa.fasta"
+ "pplacer -m WAG -j 3 -c \/release89\/pplacer\/gtdb_r89_ar53.refpkg -o classify_output\/classify\/intermediate_results\/pplacer\/pplacer.ar53.json align_output\/align\/gtdbtk.ar53.user_msa.fasta"
}, "version": 3, "fields":
["distal_length", "edge_num", "like_weight_ratio", "likelihood",
"pendant_length"
diff --git a/docs/src/files/pplacer.domain.out.rst b/docs/src/files/pplacer.domain.out.rst
index d84cf8fa..ca50051e 100644
--- a/docs/src/files/pplacer.domain.out.rst
+++ b/docs/src/files/pplacer.domain.out.rst
@@ -16,7 +16,7 @@ Example
.. code-block:: text
- Running pplacer v1.1.alpha19-0-g807f6f3 analysis on align_output/align/gtdbtk.ar122.user_msa.fasta...
+ Running pplacer v1.1.alpha19-0-g807f6f3 analysis on align_output/align/gtdbtk.ar53.user_msa.fasta...
Didn't find any reference sequences in given alignment file. Using supplied reference alignment.
Pre-masking sequences... sequence length cut from 5124 to 5114.
Determining figs... figs disabled.
diff --git a/docs/src/files/summary.tsv.rst b/docs/src/files/summary.tsv.rst
index 3f3014c8..0bff092c 100644
--- a/docs/src/files/summary.tsv.rst
+++ b/docs/src/files/summary.tsv.rst
@@ -4,7 +4,7 @@
summary.tsv
===========
-Classifications provided by the GTDB-Tk are in the files \<prefix>.bac120.summary.tsv and \<prefix>.ar122.summary.tsv for bacterial and archaeal genomes, respectively. These are tab separated files with the following columns:
+Classifications provided by the GTDB-Tk are in the files \<prefix>.bac120.summary.tsv and \<prefix>.ar53.summary.tsv for bacterial and archaeal genomes, respectively. These are tab separated files with the following columns:
* user_genome: Unique identifier of query genome taken from the FASTA file of the genome.
* classification: GTDB taxonomy string inferred by the GTDB-Tk. An unassigned species (i.e., ``s__``) indicates that the query genome is either i) placed outside a named genus or ii) the ANI to the closest intra-genus reference genome with an AF >=0.65 is not within the species-specific ANI circumscription radius.
diff --git a/docs/src/installing/index.rst b/docs/src/installing/index.rst
index 4acf3063..4ee1ec43 100644
--- a/docs/src/installing/index.rst
+++ b/docs/src/installing/index.rst
@@ -34,12 +34,12 @@ Hardware requirements
- Storage
- Time
* - Archaea
- - ~13 GB
- - ~27 GB
+ - ~34 GB
+ - ~65 GB
- ~1 hour / 1,000 genomes @ 64 CPUs
* - Bacteria
- - ~215 GB
- - ~27 GB
+ - ~320 GB ( 20GB for divide-and-conquer)
+ - ~65 GB
- ~1 hour / 1,000 genomes @ 64 CPUs
.. note::
diff --git a/gtdbtk/__init__.py b/gtdbtk/__init__.py
index ecc26b3f..8e5ad4c9 100644
--- a/gtdbtk/__init__.py
+++ b/gtdbtk/__init__.py
@@ -29,4 +29,4 @@
__status__ = 'Production'
__title__ = 'GTDB-Tk'
__url__ = 'https://github.com/Ecogenomics/GTDBTk'
-__version__ = '1.7.0'
+__version__ = '2.0.0'
diff --git a/gtdbtk/__main__.py b/gtdbtk/__main__.py
index 43af9c06..35749469 100644
--- a/gtdbtk/__main__.py
+++ b/gtdbtk/__main__.py
@@ -48,14 +48,17 @@ def print_help():
decorate -> Decorate tree with GTDB taxonomy
Tools:
- infer_ranks -> Establish taxonomic ranks of internal nodes using RED
- ani_rep -> Calculates ANI to GTDB representative genomes
- trim_msa -> Trim an untrimmed MSA file based on a mask
- export_msa -> Export the untrimmed archaeal or bacterial MSA file
+ infer_ranks -> Establish taxonomic ranks of internal nodes using RED
+ ani_rep -> Calculates ANI to GTDB representative genomes
+ trim_msa -> Trim an untrimmed MSA file based on a mask
+ export_msa -> Export the untrimmed archaeal or bacterial MSA file
+ remove_labels -> Remove labels (bootstrap values, node labels) from an Newick tree
+ convert_to_itol -> Convert a GTDB-Tk Newick tree to an iTOL tree
+
Testing:
test -> Validate the classify_wf pipeline with 3 archaeal genomes
- check_install -> Verify third party programs and GTDB reference package.
+ check_install -> Verify third party programs and GTDB reference package
Use: gtdbtk <command> -h for command specific help
''' % __version__)
diff --git a/gtdbtk/biolib_lite/seq_io.py b/gtdbtk/biolib_lite/seq_io.py
index 35011db3..1212d60b 100644
--- a/gtdbtk/biolib_lite/seq_io.py
+++ b/gtdbtk/biolib_lite/seq_io.py
@@ -122,15 +122,19 @@ def read_fasta_seq(fasta_file, keep_annotation=False):
try:
open_file = open
+ mode = 'r'
if fasta_file.endswith('.gz'):
open_file = gzip.open
+ mode = 'rb'
seq_id = None
annotation = None
seq = None
- with open_file(fasta_file, 'r') as f:
+ with open_file(fasta_file, mode) as f:
for line in f.readlines():
+ if isinstance(line, bytes):
+ line = line.decode()
# skip blank lines
if not line.strip():
continue
diff --git a/gtdbtk/classify.py b/gtdbtk/classify.py
index 73d33113..d93eb98d 100644
--- a/gtdbtk/classify.py
+++ b/gtdbtk/classify.py
@@ -38,15 +38,19 @@
from gtdbtk.exceptions import GenomeMarkerSetUnknown, GTDBTkExit
from gtdbtk.external.fastani import FastANI
from gtdbtk.external.pplacer import Pplacer
-from gtdbtk.io.classify_summary import ClassifySummaryFileAR122, ClassifySummaryFileBAC120, ClassifySummaryFileRow
-from gtdbtk.io.marker.copy_number import CopyNumberFileAR122, CopyNumberFileBAC120
-from gtdbtk.io.pplacer_classification import PplacerClassifyFileBAC120, PplacerClassifyFileAR122
+from gtdbtk.io.classify_summary import ClassifySummaryFileAR53, ClassifySummaryFileBAC120, ClassifySummaryFileRow
+from gtdbtk.io.marker.copy_number import CopyNumberFileAR53, CopyNumberFileBAC120
+from gtdbtk.io.pplacer_classification import PplacerClassifyFileBAC120, PplacerClassifyFileAR53, \
+ PplacerLowClassifyFileBAC120
from gtdbtk.io.prodigal.tln_table_summary import TlnTableSummaryFile
-from gtdbtk.io.red_dict import REDDictFileAR122, REDDictFileBAC120
+from gtdbtk.io.red_dict import REDDictFileAR53, REDDictFileBAC120
+from gtdbtk.io.missing_genomes import DisappearingGenomesFileAR53, DisappearingGenomesFileBAC120
+from gtdbtk.io.tree_mapping import GenomeMappingFile, GenomeMappingFileRow
from gtdbtk.markers import Markers
from gtdbtk.relative_distance import RelativeDistance
+from gtdbtk.split import Split
from gtdbtk.tools import add_ncbi_prefix, symlink_f, get_memory_gb, get_reference_ids, TreeTraversal, \
- calculate_patristic_distance, tqdm_log
+ calculate_patristic_distance, tqdm_log, truncate_taxonomy, standardise_taxonomy
sys.setrecursionlimit(15000)
@@ -83,13 +87,14 @@ def __init__(self, cpus=1, pplacer_cpus=None, af_threshold=None):
self.pplacer_cpus = 64
self.species_radius = self.parse_radius_file()
-
self.reference_ids = get_reference_ids()
# rank_of_interest determine the rank in the tree_mapping file for
# lower classification
self.rank_of_interest = "o__"
+
+
def parse_radius_file(self):
results = {}
with open(Config.RADII_FILE) as f:
@@ -101,7 +106,7 @@ def parse_radius_file(self):
results[gid] = float(infos[2])
return results
- def parse_leaf_to_dir_path(self,genome_id):
+ def parse_leaf_to_dir_path(self, genome_id):
""" Convert a genome id to a path.
i.e GCA_123456789.0 would be converted to GCA/123/456/789/
@@ -113,8 +118,8 @@ def parse_leaf_to_dir_path(self,genome_id):
path to the genome id path
"""
try:
- genome_path = '/'.join([genome_id[0:3],genome_id[4:7],
- genome_id[7:10],genome_id[10:13]])
+ genome_path = '/'.join([genome_id[0:3], genome_id[4:7],
+ genome_id[7:10], genome_id[10:13]])
return genome_path
except IndexError:
logger = logging.getLogger('timestamp')
@@ -128,7 +133,9 @@ def place_genomes(self,
prefix,
scratch_dir=None,
levelopt=None,
- tree_iter=None):
+ tree_iter=None,
+ number_low_trees=None,
+ idx_tree=None):
"""Place genomes into reference tree using pplacer."""
# Warn if the memory is insufficient
@@ -143,17 +150,17 @@ def place_genomes(self,
self.logger.warning(mem_warning.format(req_gb=Config.PPLACER_MIN_RAM_BAC,
domain='bacterial',
cur_gb=mem_total))
- elif marker_set_id == 'ar122' and mem_total < Config.PPLACER_MIN_RAM_ARC:
+ elif marker_set_id == 'ar53' and mem_total < Config.PPLACER_MIN_RAM_ARC:
self.logger.warning(mem_warning.format(req_gb=Config.PPLACER_MIN_RAM_ARC,
domain='archaeal',
cur_gb=mem_total))
# rename user MSA file for compatibility with pplacer
- if not user_msa_file.endswith('.fasta'):
+ if not user_msa_file.endswith('.fasta') and not user_msa_file.endswith('.gz'):
if marker_set_id == 'bac120':
t = PATH_BAC120_USER_MSA.format(prefix=prefix)
- elif marker_set_id == 'ar122':
- t = PATH_AR122_USER_MSA.format(prefix=prefix)
+ elif marker_set_id == 'ar53':
+ t = PATH_AR53_USER_MSA.format(prefix=prefix)
else:
raise GenomeMarkerSetUnknown('There was an error determining the marker set.')
@@ -173,6 +180,7 @@ def place_genomes(self,
make_sure_path_exists(scratch_dir)
# get path to pplacer reference package
+ pplacer_ref_pkg = None
if marker_set_id == 'bac120':
if levelopt is None:
self.logger.log(Config.LOG_TASK,
@@ -181,28 +189,29 @@ def place_genomes(self,
f'{self.pplacer_cpus} CPUs (be patient).')
pplacer_ref_pkg = os.path.join(Config.PPLACER_DIR,
Config.PPLACER_BAC120_REF_PKG)
+
elif levelopt == 'high':
self.logger.log(Config.LOG_TASK,
f'Placing {num_genomes:,} bacterial genomes '
- f'into high reference tree with pplacer using '
+ f'into backbone reference tree with pplacer using '
f'{self.pplacer_cpus} CPUs (be patient).')
pplacer_ref_pkg = os.path.join(Config.HIGH_PPLACER_DIR,
Config.HIGH_PPLACER_REF_PKG)
elif levelopt == 'low':
self.logger.log(Config.LOG_TASK,
f'Placing {num_genomes:,} bacterial genomes '
- f'into low reference tree {tree_iter} with '
+ f'into order-level reference tree {tree_iter} ({idx_tree}/{number_low_trees}) with '
f'pplacer using {self.pplacer_cpus} CPUs '
f'(be patient).')
pplacer_ref_pkg = os.path.join(Config.LOW_PPLACER_DIR,
Config.LOW_PPLACER_REF_PKG.format(iter=tree_iter))
- elif marker_set_id == 'ar122':
+ elif marker_set_id == 'ar53':
self.logger.log(Config.LOG_TASK,
f'Placing {num_genomes:,} archaeal genomes into '
f'reference tree with pplacer using '
f'{self.pplacer_cpus} CPUs (be patient).')
pplacer_ref_pkg = os.path.join(Config.PPLACER_DIR,
- Config.PPLACER_AR122_REF_PKG)
+ Config.PPLACER_AR53_REF_PKG)
else:
raise GenomeMarkerSetUnknown(f'Unknown marker set: {marker_set_id}')
@@ -212,6 +221,7 @@ def place_genomes(self,
os.makedirs(pplacer_out_dir)
# run pplacer
+ pplacer_json_out, pplacer_out = None, None
if marker_set_id == 'bac120':
if levelopt is None:
pplacer_out = os.path.join(out_dir, PATH_BAC120_PPLACER_OUT)
@@ -227,9 +237,9 @@ def place_genomes(self,
out_dir, PATH_LOW_BAC120_PPLACER_OUT.format(iter=tree_iter))
pplacer_json_out = os.path.join(
out_dir, PATH_LOW_BAC120_PPLACER_JSON.format(iter=tree_iter))
- elif marker_set_id == 'ar122':
- pplacer_out = os.path.join(out_dir, PATH_AR122_PPLACER_OUT)
- pplacer_json_out = os.path.join(out_dir, PATH_AR122_PPLACER_JSON)
+ elif marker_set_id == 'ar53':
+ pplacer_out = os.path.join(out_dir, PATH_AR53_PPLACER_OUT)
+ pplacer_json_out = os.path.join(out_dir, PATH_AR53_PPLACER_JSON)
else:
self.logger.error('There was an error determining the marker set.')
raise GenomeMarkerSetUnknown
@@ -237,10 +247,14 @@ def place_genomes(self,
pplacer = Pplacer()
if levelopt is None or levelopt == 'high':
self.logger.info(f'pplacer version: {pplacer.version}')
- pplacer.run(self.pplacer_cpus, 'wag', pplacer_ref_pkg, pplacer_json_out,
- user_msa_file, pplacer_out, pplacer_mmap_file)
+ # #DEBUG line
+ run_pplacer = True
+ if run_pplacer:
+ pplacer.run(self.pplacer_cpus, 'wag', pplacer_ref_pkg, pplacer_json_out,
+ user_msa_file, pplacer_out, pplacer_mmap_file)
# extract tree
+ tree_file = None
if marker_set_id == 'bac120':
if levelopt is None:
tree_file = os.path.join(
@@ -251,9 +265,9 @@ def place_genomes(self,
elif levelopt == 'low':
tree_file = os.path.join(
out_dir, PATH_LOW_BAC120_TREE_FILE.format(prefix=prefix, iter=tree_iter))
- elif marker_set_id == 'ar122':
+ elif marker_set_id == 'ar53':
tree_file = os.path.join(
- out_dir, PATH_AR122_TREE_FILE.format(prefix=prefix))
+ out_dir, PATH_AR53_TREE_FILE.format(prefix=prefix))
else:
self.logger.error('There was an error determining the marker set.')
raise GenomeMarkerSetUnknown
@@ -261,64 +275,44 @@ def place_genomes(self,
pplacer.tog(pplacer_json_out, tree_file)
# Symlink to the tree summary file
- if marker_set_id == 'bac120':
- symlink_f(PATH_BAC120_TREE_FILE.format(prefix=prefix),
- os.path.join(out_dir, os.path.basename(PATH_BAC120_TREE_FILE.format(prefix=prefix))))
- elif marker_set_id == 'ar122':
- symlink_f(PATH_AR122_TREE_FILE.format(prefix=prefix),
- os.path.join(out_dir, os.path.basename(PATH_AR122_TREE_FILE.format(prefix=prefix))))
- else:
- self.logger.error('There was an error determining the marker set.')
- raise GenomeMarkerSetUnknown
+ # if marker_set_id == 'bac120' and levelopt is None:
+ # symlink_f(PATH_BAC120_TREE_FILE.format(prefix=prefix),
+ # os.path.join(out_dir, os.path.basename(PATH_BAC120_TREE_FILE.format(prefix=prefix))))
+ # elif levelopt == 'high':
+ # symlink_f(PATH_HIGH_BAC120_TREE_FILE.format(prefix=prefix),
+ # os.path.join(out_dir, os.path.basename(PATH_HIGH_BAC120_TREE_FILE.format(prefix=prefix))))
+ # elif levelopt == 'low':
+ # symlink_f(PATH_LOW_BAC120_TREE_FILE.format(prefix=prefix, iter=tree_iter),
+ # os.path.join(out_dir,
+ # os.path.basename(PATH_LOW_BAC120_TREE_FILE.format(prefix=prefix, iter=tree_iter))))
+ # elif marker_set_id == 'ar53':
+ # symlink_f(PATH_AR53_TREE_FILE.format(prefix=prefix),
+ # os.path.join(out_dir, os.path.basename(PATH_AR53_TREE_FILE.format(prefix=prefix))))
+ # else:
+ # self.logger.error('There was an error determining the marker set.')
+ # raise GenomeMarkerSetUnknown
# Symlink to the tree summary file
- if marker_set_id == 'bac120':
- if levelopt is None:
- symlink_f(PATH_BAC120_TREE_FILE.format(prefix=prefix),
- os.path.join(out_dir, os.path.basename(PATH_BAC120_TREE_FILE.format(prefix=prefix))))
- elif levelopt == 'high':
- symlink_f(PATH_HIGH_BAC120_TREE_FILE.format(prefix=prefix),
- os.path.join(out_dir, os.path.basename(PATH_HIGH_BAC120_TREE_FILE.format(prefix=prefix))))
- elif levelopt == 'low':
- symlink_f(PATH_LOW_BAC120_TREE_FILE.format(iter=tree_iter, prefix=prefix),
- os.path.join(out_dir, os.path.basename(PATH_LOW_BAC120_TREE_FILE.format(iter=tree_iter, prefix=prefix))))
- elif marker_set_id == 'ar122':
- symlink_f(PATH_AR122_TREE_FILE.format(prefix=prefix),
- os.path.join(out_dir, os.path.basename(PATH_AR122_TREE_FILE.format(prefix=prefix))))
- else:
- self.logger.error('There was an error determining the marker set.')
- raise GenomeMarkerSetUnknown
+ # if marker_set_id == 'bac120':
+ # if levelopt is None:
+ # symlink_f(PATH_BAC120_TREE_FILE.format(prefix=prefix),
+ # os.path.join(out_dir, os.path.basename(PATH_BAC120_TREE_FILE.format(prefix=prefix))))
+ # elif levelopt == 'high':
+ # symlink_f(PATH_HIGH_BAC120_TREE_FILE.format(prefix=prefix),
+ # os.path.join(out_dir, os.path.basename(PATH_HIGH_BAC120_TREE_FILE.format(prefix=prefix))))
+ # elif levelopt == 'low':
+ # symlink_f(PATH_LOW_BAC120_TREE_FILE.format(iter=tree_iter, prefix=prefix),
+ # os.path.join(out_dir, os.path.basename(
+ # PATH_LOW_BAC120_TREE_FILE.format(iter=tree_iter, prefix=prefix))))
+ # elif marker_set_id == 'ar53':
+ # symlink_f(PATH_AR53_TREE_FILE.format(prefix=prefix),
+ # os.path.join(out_dir, os.path.basename(PATH_AR53_TREE_FILE.format(prefix=prefix))))
+ # else:
+ # self.logger.error('There was an error determining the marker set.')
+ # raise GenomeMarkerSetUnknown
return tree_file
- def standardise_taxonomy(self, taxstring, marker_set=None):
- """Create a 7 rank taxonomy string from an incomplete taxonomy string
-
- Parameters
- ----------
- taxstring : str
- incomplete taxonomy string
- marker_set : str
- The marker set to use.
-
- Returns
- -------
- string
- 7 rank taxonomy string.
- """
- # return taxstring
- taxlist = taxstring.split(";")
- while '' in taxlist:
- taxlist.remove('')
- if marker_set == 'bac120':
- taxlist.insert(0, 'd__Bacteria')
- if marker_set == 'ar122':
- taxlist.insert(0, 'd__Archaea')
- taxlist.extend(self.order_rank[len(taxlist):])
- new_taxstring = ";".join(taxlist)
- return new_taxstring
-
-
def _parse_red_dict(self, red_dist_dict):
results = {}
for k, v in red_dist_dict.items():
@@ -355,29 +349,44 @@ def run(self,
scratch_dir=None,
recalculate_red=None,
debugopt=False,
- splittreeopt=False):
+ fulltreeopt=False):
"""Classify genomes based on position in reference tree."""
_bac_gids, _ar_gids, bac_ar_diff = Markers().genome_domain(align_dir, prefix)
+ disappearing_genomes = []
- for marker_set_id in ('ar122', 'bac120'):
+ for marker_set_id in ('ar53', 'bac120'):
- if marker_set_id == 'ar122':
- marker_summary_fh = CopyNumberFileAR122(align_dir, prefix)
+ if marker_set_id == 'ar53':
+ marker_summary_fh = CopyNumberFileAR53(align_dir, prefix)
marker_summary_fh.read()
- user_msa_file = os.path.join(align_dir,
- PATH_AR122_USER_MSA.format(prefix=prefix))
- summary_file = ClassifySummaryFileAR122(out_dir, prefix)
- red_dict_file = REDDictFileAR122(out_dir, prefix)
- pplacer_classify_file = PplacerClassifyFileAR122(out_dir, prefix)
+ if os.path.isfile(os.path.join(align_dir,
+ PATH_AR53_USER_MSA.format(prefix=prefix))):
+ user_msa_file = os.path.join(align_dir,
+ PATH_AR53_USER_MSA.format(prefix=prefix))
+ else:
+ user_msa_file = os.path.join(align_dir,
+ PATH_AR53_USER_MSA.format(prefix=prefix)+'.gz')
+ summary_file = ClassifySummaryFileAR53(out_dir, prefix)
+ red_dict_file = REDDictFileAR53(out_dir, prefix)
+ disappearing_genomes_file = DisappearingGenomesFileAR53(out_dir, prefix)
+ pplacer_classify_file = PplacerClassifyFileAR53(out_dir, prefix)
elif marker_set_id == 'bac120':
marker_summary_fh = CopyNumberFileBAC120(align_dir, prefix)
marker_summary_fh.read()
- user_msa_file = os.path.join(align_dir,
- PATH_BAC120_USER_MSA.format(prefix=prefix))
+ if os.path.isfile(os.path.join(align_dir,
+ PATH_BAC120_USER_MSA.format(prefix=prefix))):
+ user_msa_file = os.path.join(align_dir,
+ PATH_BAC120_USER_MSA.format(prefix=prefix))
+ else:
+ user_msa_file = os.path.join(align_dir,
+ PATH_BAC120_USER_MSA.format(prefix=prefix)+'.gz')
summary_file = ClassifySummaryFileBAC120(out_dir, prefix)
red_dict_file = REDDictFileBAC120(out_dir, prefix)
+ disappearing_genomes_file = DisappearingGenomesFileBAC120(out_dir, prefix)
pplacer_classify_file = PplacerClassifyFileBAC120(out_dir, prefix)
+ if not fulltreeopt:
+ tree_mapping_file = GenomeMappingFile(out_dir, prefix)
else:
raise GenomeMarkerSetUnknown('There was an error determining the marker set.')
@@ -397,11 +406,12 @@ def run(self,
msa_dict = read_fasta(user_msa_file)
- if splittreeopt is True:
+ if not fulltreeopt and marker_set_id == 'bac120':
+ splitter = Split(self.order_rank, self.gtdb_taxonomy, self.reference_ids)
# run pplacer to place bins in reference genome tree
- num_genomes = sum([1 for _seq_id, _seq in read_seq(user_msa_file)])
- debugfile, conflict_file = self._generate_summary_file(
- marker_set_id, prefix, out_dir, debugopt, splittreeopt)
+ genomes_to_process=[seq_id for seq_id, _seq in read_seq(user_msa_file)]
+ debug_file, conflict_file = self._generate_summary_file(
+ marker_set_id, prefix, out_dir, debugopt, fulltreeopt)
high_classify_tree = self.place_genomes(user_msa_file,
marker_set_id,
@@ -412,33 +422,64 @@ def run(self,
tree = self._assign_mrca_red(
high_classify_tree, marker_set_id, 'high')
- high_classification = self._get_high_pplacer_taxonomy(
+ high_classification = splitter.get_high_pplacer_taxonomy(
out_dir, marker_set_id, prefix, user_msa_file, tree)
+ disappearing_genomes = [seq_id for seq_id in genomes_to_process if seq_id not in high_classification]
+ if disappearing_genomes:
+ for disappearing_genome in disappearing_genomes:
+ disappearing_genomes_file.add_genome(disappearing_genome,'Backbone')
+
tree_mapping_dict = {}
+ tree_mapping_dict_reverse = {}
with open(Config.LOW_TREE_MAPPING_FILE) as ltmf:
for line in ltmf:
k, v = line.strip().split()
tree_mapping_dict[k] = v
+ tree_mapping_dict_reverse.setdefault(v, []).append(k)
- sorted_high_taxonomy, len_sorted_genomes = self._map_high_taxonomy(
+ splitter = Split(self.order_rank,self.gtdb_taxonomy,self.reference_ids)
+ sorted_high_taxonomy, len_sorted_genomes = splitter.map_high_taxonomy(
high_classification, tree_mapping_dict, summary_file)
- self.logger.info(f"{len_sorted_genomes} out of {num_genomes} have an order assignments. Those genomes will be reclassified.")
+ self.logger.info(f"{len_sorted_genomes} out of {len(genomes_to_process)} have an order assignments. Those genomes "
+ f"will be reclassified.")
- for tree_iter in sorted(sorted_high_taxonomy, key=lambda z: len(sorted_high_taxonomy[z]), reverse=True):
+ for idx, tree_iter in enumerate(
+ sorted(sorted_high_taxonomy, key=lambda z: len(sorted_high_taxonomy[z]), reverse=True)):
listg = sorted_high_taxonomy.get(tree_iter)
low_classify_tree, submsa_file_path = self._place_in_low_tree(
- tree_iter, listg, msa_dict, marker_set_id, prefix, scratch_dir, out_dir)
+ tree_iter, len(sorted_high_taxonomy), idx + 1, listg, msa_dict, marker_set_id, prefix,
+ scratch_dir, out_dir)
+ genomes_to_process = [seq_id for seq_id, _seq in read_seq(submsa_file_path)]
mrca_lowtree = self._assign_mrca_red(
low_classify_tree, marker_set_id, 'low', tree_iter)
+ pplacer_classify_file = PplacerLowClassifyFileBAC120(out_dir, prefix,tree_iter)
pplacer_taxonomy_dict = self._get_pplacer_taxonomy(pplacer_classify_file,
- marker_set_id, user_msa_file, mrca_lowtree)
+ marker_set_id, user_msa_file, mrca_lowtree)
+ disappearing_genomes = [seq_id for seq_id in genomes_to_process if
+ seq_id not in pplacer_taxonomy_dict]
+ if disappearing_genomes:
+ self.logger.warning(f"{len(disappearing_genomes)} out of {len(genomes_to_process)} have not been"
+ f" properly placed by pplacer. This is a known issue with pplacer but we do "
+ f"not have a solution currently. Those missing genomes are written to "
+ f"the {disappearing_genomes_file.file_name} file. We recommend rerunning "
+ f"those genomes through GTDB-Tk to 'fix' the problem.")
+ for disappearing_genome in disappearing_genomes:
+ disappearing_genomes_file.add_genome(disappearing_genome, tree_iter)
+
+ self._parse_tree(mrca_lowtree, genomes, msa_dict,
+ percent_multihit_dict, tln_table_summary_file.genomes,
+ bac_ar_diff, submsa_file_path, red_dict_file.data,
+ summary_file, conflict_file, pplacer_taxonomy_dict,
+ high_classification, debug_file, debugopt,
+ tree_mapping_file, tree_iter, tree_mapping_dict_reverse)
+
+ # Symlink to the summary file from the root
+ symlink_f(PATH_BAC120_SUMMARY_OUT.format(prefix=prefix),
+ os.path.join(out_dir, os.path.basename(PATH_BAC120_SUMMARY_OUT.format(prefix=prefix))))
- self._parse_tree(mrca_lowtree, genomes, msa_dict, percent_multihit_dict, tln_table_summary_file.genomes,
- bac_ar_diff, submsa_file_path, red_dict_file.data, summary_file, conflict_file, pplacer_taxonomy_dict,
- high_classification, debugfile, debugopt)
if debugopt:
- debugfile.close()
+ debug_file.close()
else:
classify_tree = self.place_genomes(user_msa_file,
@@ -447,9 +488,11 @@ def run(self,
prefix,
scratch_dir)
+ genomes_to_process = [seq_id for seq_id, _seq in read_seq(user_msa_file)]
+
# get taxonomic classification of each user genome
- debugfile, conflict_file = self._generate_summary_file(
- marker_set_id, prefix, out_dir, debugopt, splittreeopt)
+ debug_file, conflict_file = self._generate_summary_file(
+ marker_set_id, prefix, out_dir, debugopt, fulltreeopt)
if recalculate_red:
tree_to_process = self._calculate_red_distances(
@@ -463,45 +506,61 @@ def run(self,
user_msa_file,
tree_to_process)
+ disappearing_genomes = [seq_id for seq_id in genomes_to_process if seq_id not in pplacer_taxonomy_dict]
+
+
self._parse_tree(tree_to_process, genomes, msa_dict, percent_multihit_dict,
tln_table_summary_file.genomes,
bac_ar_diff, user_msa_file, red_dict_file.data, summary_file, conflict_file,
pplacer_taxonomy_dict, None,
- debugfile, debugopt)
+ debug_file, debugopt, None, None, None)
# Symlink to the summary file from the root
if marker_set_id == 'bac120':
symlink_f(PATH_BAC120_SUMMARY_OUT.format(prefix=prefix),
os.path.join(out_dir, os.path.basename(PATH_BAC120_SUMMARY_OUT.format(prefix=prefix))))
- elif marker_set_id == 'ar122':
- symlink_f(PATH_AR122_SUMMARY_OUT.format(prefix=prefix),
- os.path.join(out_dir, os.path.basename(PATH_AR122_SUMMARY_OUT.format(prefix=prefix))))
+ elif marker_set_id == 'ar53':
+ symlink_f(PATH_AR53_SUMMARY_OUT.format(prefix=prefix),
+ os.path.join(out_dir, os.path.basename(PATH_AR53_SUMMARY_OUT.format(prefix=prefix))))
else:
raise GenomeMarkerSetUnknown('There was an error determining the marker set.')
+ if disappearing_genomes:
+ for disappearing_genome in disappearing_genomes:
+ disappearing_genomes_file.add_genome(disappearing_genome,'N/A')
+
+
if debugopt:
- debugfile.close()
+ debug_file.close()
+
+
+
+ if not fulltreeopt and marker_set_id == 'bac120':
+ tree_mapping_file.write()
# Write the summary file to disk.
+ if disappearing_genomes_file.data:
+ disappearing_genomes_file.write()
summary_file.write()
- def _generate_summary_file(self, marker_set_id, prefix, out_dir, debugopt=None, splittreeopt=None):
- debugfile = None
+ def _generate_summary_file(self, marker_set_id, prefix, out_dir, debugopt=None, fulltreeopt=None):
+ debug_file = None
conflict_summary = None
if debugopt:
- debugfile = open(os.path.join(
+ debug_file = open(os.path.join(
out_dir, prefix + '.{}.debug_file.tsv'.format(marker_set_id)), 'w')
- debugfile.write(
+ debug_file.write(
"User genome\tRed value\tHigher rank\tHigher value\tLower rank\tLower value\tcase\tclosest_rank\ttool\n")
- if splittreeopt:
+ if not fulltreeopt:
conflict_summary = open(os.path.join(
out_dir, PATH_BAC120_CONFLICT.format(prefix=prefix)), 'w')
conflict_summary.write(
- "User genome\tHigh classification\tLow Classification\n")
- return debugfile, conflict_summary
+ "User genome\tHigh classification\tLow Classification\tType Classification\n")
+ return debug_file, conflict_summary
- def _place_in_low_tree(self, tree_iter, listg, msa_dict, marker_set_id, prefix, scratch_dir, out_dir):
+ def _place_in_low_tree(self, tree_iter, number_low_trees, idx_tree, listg, msa_dict, marker_set_id, prefix,
+ scratch_dir, out_dir):
make_sure_path_exists(os.path.join(
out_dir, DIR_LOW_PPLACER.format(iter=tree_iter)))
submsa_file_path = os.path.join(
@@ -512,12 +571,13 @@ def _place_in_low_tree(self, tree_iter, listg, msa_dict, marker_set_id, prefix,
for gid in listg:
submsa_file.write('>{}\n{}\n'.format(gid, msa_dict.get(gid)))
submsa_file.close()
- low_classify_tree = self.place_genomes(PATH_LOW_BAC120_SUBMSA.format(iter=tree_iter),
+ low_classify_tree = self.place_genomes(submsa_file_path,
marker_set_id,
out_dir,
prefix,
scratch_dir,
- 'low', tree_iter)
+ 'low', tree_iter,
+ number_low_trees, idx_tree)
return low_classify_tree, submsa_file_path
@staticmethod
@@ -595,12 +655,12 @@ def _get_fastani_verification(tree, reference_ids, tt):
out[leaf_node] = {"potential_g": [(leaf_ref_genome, 0.0)],
"pplacer_g": leaf_ref_genome}
- return out
+ return out, qry_nodes
def _classify_red_topology(self, tree, msa_dict, percent_multihit_dict, trans_table_dict, bac_ar_diff,
user_msa_file, red_dict, summary_file, conflict_file, pplacer_taxonomy_dict,
- high_classification, debugfile, debugopt, classified_user_genomes,
- unclassified_user_genomes, tt):
+ high_classification, debug_file, debugopt, classified_user_genomes,
+ unclassified_user_genomes, tt, order_in_spe_tree):
user_genome_ids = set(read_fasta(user_msa_file).keys())
user_genome_ids = user_genome_ids.difference(set(classified_user_genomes))
for leaf in tree.leaf_node_iter(filter_fn=lambda x: x.taxon.label in user_genome_ids):
@@ -682,6 +742,7 @@ def _classify_red_topology(self, tree, msa_dict, percent_multihit_dict, trans_ta
list_subrank.append(self.gtdb_taxonomy.get(leaf_subrank)
[self.order_rank.index(parent_rank) + 1])
if len(set(list_subrank)) == 1:
+ print(leaf.taxon.label)
print(list_leaves)
print(list_subrank)
raise GTDBTkExit('There should be only one leaf.')
@@ -753,50 +814,113 @@ def _classify_red_topology(self, tree, msa_dict, percent_multihit_dict, trans_ta
list_subnode = [subnd.taxon.label.replace("'", '')
for subnd in tt.get_leaf_nodes(cur_node)]
red_taxonomy = self._get_redtax(list_subnode, closest_rank)
+ notes = []
- del debug_info[0]
+ standardised_red_tax = standardise_taxonomy(';'.join(red_taxonomy))
- summary_row = ClassifySummaryFileRow()
- if leaf.taxon.label in unclassified_user_genomes:
- summary_row = unclassified_user_genomes.get(leaf.taxon.label)
- if summary_row.note == '':
- summary_row.note = None
- summary_row.gid = leaf.taxon.label
- summary_row.classification = self.standardise_taxonomy(red_taxonomy)
- summary_row.pplacer_tax = pplacer_taxonomy_dict.get(leaf.taxon.label)
- if summary_row.classification_method is None:
- summary_row.classification_method = detection
- summary_row.msa_percent = self.aa_percent_msa(msa_dict.get(summary_row.gid))
- summary_row.tln_table = trans_table_dict.get(summary_row.gid)
- summary_row.red_value = current_rel_list
+ if order_in_spe_tree and 'o__;' in standardised_red_tax:
+ v = high_classification.get(leaf.taxon.label)
+ tk_tax_red_without_order = truncate_taxonomy(v.get('tk_tax_red'), 'o__')
+ summary_row = ClassifySummaryFileRow()
+ summary_row.gid = leaf.taxon.label
+ summary_row.classification = tk_tax_red_without_order
+ summary_row.pplacer_tax = v.get('pplacer_tax')
+ summary_row.red_value = v.get('rel_dist')
+ notes.append(
+ 'Genome not classified with order in species tree, reverse to backbone tree classification.')
+
+ #### Test 1.2
+ elif order_in_spe_tree and self.check_common_rank_btwn_tax(standardised_red_tax,high_classification.get(leaf.taxon.label),'f__'):
+ summary_row = self.generate_summary_row_reverse_to_backbone(standardised_red_tax, leaf, high_classification)
+ notes.append('Genome has conflicting families between trees, reverse to backbone tree classification.')
+ elif order_in_spe_tree and self.check_common_rank_btwn_tax(standardised_red_tax,high_classification.get(leaf.taxon.label),'o__'):
+ summary_row = self.generate_summary_row_reverse_to_backbone(standardised_red_tax, leaf, high_classification)
+ notes.append('Genome has conflicting orders between trees, reverse to backbone tree classification.')
+ elif order_in_spe_tree and self.check_common_rank_btwn_tax(standardised_red_tax,high_classification.get(leaf.taxon.label),'c__'):
+ summary_row = self.generate_summary_row_reverse_to_backbone(standardised_red_tax,leaf,high_classification)
+ notes.append('Genome has conflicting classes between trees, reverse to backbone tree classification.')
+
+ elif order_in_spe_tree and red_taxonomy[
+ self.order_rank.index(self.rank_of_interest)] not in order_in_spe_tree:
+ v = high_classification.get(leaf.taxon.label)
+ tk_tax_red_without_order = truncate_taxonomy(v.get('tk_tax_red'), 'o__')
+ summary_row = ClassifySummaryFileRow()
+ summary_row.gid = leaf.taxon.label
+ summary_row.classification = tk_tax_red_without_order
+ summary_row.pplacer_tax = v.get('pplacer_tax')
+ summary_row.red_value = v.get('rel_dist')
+ notes.append('Genome placed in dummy order in species tree, reverse to backbone tree classification.')
+ else:
+ del debug_info[0]
+ summary_row = ClassifySummaryFileRow()
+ if leaf.taxon.label in unclassified_user_genomes:
+ summary_row = unclassified_user_genomes.get(leaf.taxon.label)
+ if summary_row.note == '':
+ summary_row.note = None
+ summary_row.gid = leaf.taxon.label
+ summary_row.classification = standardise_taxonomy(';'.join(red_taxonomy))
+ summary_row.pplacer_tax = pplacer_taxonomy_dict.get(leaf.taxon.label)
+ if summary_row.classification_method is None:
+ summary_row.classification_method = detection
+ summary_row.msa_percent = self.aa_percent_msa(msa_dict.get(summary_row.gid))
+ summary_row.tln_table = trans_table_dict.get(summary_row.gid)
+ summary_row.red_value = current_rel_list
- notes = []
if summary_row.gid in percent_multihit_dict:
notes.append('Genome has more than {}% of markers with multiple hits'.format(
percent_multihit_dict.get(summary_row.gid)))
if summary_row.gid in bac_ar_diff:
notes.append('Genome domain questionable ( {}% Bacterial, {}% Archaeal)'.format(
bac_ar_diff.get(summary_row.gid).get('bac120'),
- bac_ar_diff.get(summary_row.gid).get('ar122')))
+ bac_ar_diff.get(summary_row.gid).get('ar53')))
if len(notes) > 0:
summary_row.warnings = ';'.join(notes)
summary_file.add_row(summary_row)
if debugopt:
- debugfile.write('{0}\t{1}\t{2}\t{3}\n'.format(
+ debug_file.write('{0}\t{1}\t{2}\t{3}\n'.format(
leaf.taxon.label, current_rel_list, '\t'.join(str(x) for x in debug_info), detection))
if high_classification and leaf.taxon.label in high_classification:
- fullrank = [x for x in high_classification.get(leaf.taxon.label).get('tk_tax').split(
+ fullrank_terminal = [x for x in high_classification.get(leaf.taxon.label).get('tk_tax_terminal').split(
+ ';')[0:self.order_rank.index(self.rank_of_interest) + 2] if len(x) > 3]
+ low_taxonomy = summary_row.classification.split(';')[0:len(fullrank_terminal)]
+
+ if fullrank_terminal != low_taxonomy:
+ conflict_file.write(
+ '{}\t{}\t{}\tterminal_classification\n'.format(leaf.taxon.label, high_classification.get(
+ leaf.taxon.label).get('tk_tax_terminal'), summary_row.classification))
+
+ fullrank_red = [x for x in high_classification.get(leaf.taxon.label).get('tk_tax_red').split(
';')[0:self.order_rank.index(self.rank_of_interest) + 2] if len(x) > 3]
- low_taxonomy = summary_row.classification.split(';')[0:len(fullrank)]
- if fullrank != low_taxonomy:
- conflict_file.write('{}\t{}\t{}\n'.format(leaf.taxon.label, high_classification.get(
- leaf.taxon.label).get('tk_tax'), summary_row.classification))
+ low_taxonomy = summary_row.classification.split(';')[0:len(fullrank_red)]
+
+ if fullrank_red != low_taxonomy:
+ conflict_file.write(
+ '{}\t{}\t{}\tred_classification\n'.format(leaf.taxon.label, high_classification.get(
+ leaf.taxon.label).get('tk_tax_red'), summary_row.classification))
+
+ def generate_summary_row_reverse_to_backbone(self,standardised_red_tax, leaf, high_classification):
+ taxonomy_infos = high_classification.get(leaf.taxon.label)
+ common_ranks = [z for z in standardised_red_tax.split(';') if
+ len(z) > 3 and z in taxonomy_infos.get('tk_tax_red').split(';')]
+ combined_ranks = standardise_taxonomy(';'.join(common_ranks), 'bac120')
+ summary_row = ClassifySummaryFileRow()
+ summary_row.gid = leaf.taxon.label
+ summary_row.classification = combined_ranks
+ summary_row.pplacer_tax = taxonomy_infos.get('pplacer_tax')
+ summary_row.red_value = taxonomy_infos.get('rel_dist')
+ return summary_row
+
+ def check_common_rank_btwn_tax(self,standardised_red_tax,taxonomy_infos,rank):
+ return (taxonomy_infos.get('tk_tax_red').split(';')[self.order_rank.index(rank)] != rank and
+ standardised_red_tax.split(';')[self.order_rank.index(rank)] != rank and
+ standardised_red_tax.split(';')[self.order_rank.index(rank)] != taxonomy_infos.get('tk_tax_red').split(';')[
+ self.order_rank.index(rank)])
def _parse_tree(self, tree, genomes, msa_dict, percent_multihit_dict, trans_table_dict, bac_ar_diff,
user_msa_file, red_dict, summary_file, conflict_file, pplacer_taxonomy_dict, high_classification,
- debugfile, debugopt):
+ debug_file, debugopt, tree_mapping_file, tree_iter, tree_mapping_dict_reverse):
# Genomes can be classified by using FastANI or RED values
# We go through all leaves of the tree. if the leaf is a user
# genome we take its parent node and look at all the leaves
@@ -806,7 +930,7 @@ def _parse_tree(self, tree, genomes, msa_dict, percent_multihit_dict, trans_tabl
tt = TreeTraversal()
self.logger.log(Config.LOG_TASK, 'Traversing tree to determine classification method.')
- fastani_verification = self._get_fastani_verification(tree, self.reference_ids, tt)
+ fastani_verification, qury_nodes = self._get_fastani_verification(tree, self.reference_ids, tt)
# we run a fastani comparison for each user genomes against the
# selected genomes in the same genus
@@ -827,34 +951,27 @@ def _parse_tree(self, tree, genomes, msa_dict, percent_multihit_dict, trans_tabl
self.logger.info(f'{len(classified_user_genomes):,} genome(s) have '
f'been classified using FastANI and pplacer.')
+ if tree_mapping_file:
+ for genome in [x.taxon.label for x in qury_nodes]:
+ mapping_row = GenomeMappingFileRow()
+ mapping_row.gid = genome
+ if genome in classified_user_genomes:
+ mapping_row.ani_classification = True
+ else:
+ mapping_row.ani_classification = True
+ mapping_row.mapped_tree = tree_iter
+ tree_mapping_file.add_row(mapping_row)
+
# Iterate over each leaf node that was not classified with FastANI.
+ order_in_spe_tree = None
+ if tree_mapping_dict_reverse:
+ order_in_spe_tree = tree_mapping_dict_reverse.get(tree_iter)
self._classify_red_topology(tree, msa_dict, percent_multihit_dict,
trans_table_dict, bac_ar_diff, user_msa_file,
red_dict, summary_file, conflict_file,
pplacer_taxonomy_dict, high_classification,
- debugfile, debugopt, classified_user_genomes,
- unclassified_user_genomes, tt)
-
- def _map_high_taxonomy(self, high_classification, mapping_dict, summary_file):
- mapped_rank = {}
- counter = 0
- for k, v in high_classification.items():
- # if the classification has an order
-
- rk_to_check = v.get('tk_tax').split(
- ';')[self.order_rank.index(self.rank_of_interest)]
- if len(rk_to_check) > 3:
- mapped_rank.setdefault(
- mapping_dict.get(rk_to_check), []).append(k)
- counter += 1
- else:
- summary_row = ClassifySummaryFileRow()
- summary_row.gid = k
- summary_row.classification = v.get('tk_tax')
- summary_row.pplacer_tax = v.get('pplacer_tax')
- summary_row.red_value = v.get('rel_dist')
- summary_file.add_row(summary_row)
- return mapped_rank, counter
+ debug_file, debugopt, classified_user_genomes,
+ unclassified_user_genomes, tt, order_in_spe_tree)
def _assign_mrca_red(self, input_tree, marker_set_id, levelopt=None, tree_iter=None):
"""Parse the pplacer tree and write the partial taxonomy for each user genome based on their placements
@@ -862,7 +979,7 @@ def _assign_mrca_red(self, input_tree, marker_set_id, levelopt=None, tree_iter=N
Parameters
----------
input_tree : pplacer tree
- marker_set_id : bacterial or archeal id (bac120 or ar122)
+ marker_set_id : bacterial or archeal id (bac120 or ar53)
Returns
-------
@@ -885,8 +1002,8 @@ def _assign_mrca_red(self, input_tree, marker_set_id, levelopt=None, tree_iter=N
red_file = Config.HIGH_RED_FILE
elif levelopt == 'low':
red_file = Config.LOW_RED_FILE.format(iter=tree_iter)
- if marker_set_id == 'ar122':
- red_file = Config.MRCA_RED_AR122
+ if marker_set_id == 'ar53':
+ red_file = Config.MRCA_RED_AR53
# create map from leave labels to tree nodes
leaf_node_map = {}
@@ -913,6 +1030,8 @@ def _assign_mrca_red(self, input_tree, marker_set_id, levelopt=None, tree_iter=N
# We only give RED value to added nodes placed on a reference edge ( between a reference parent and a reference child)
# The new red value for the pplacer node =
# RED_parent + (RED_child -RED_parent) * ( (pplacer_disttoroot - parent_disttoroot) / (child_disttoroot - parent_disttoroot) )
+
+ count = 0
for nd in tree.leaf_nodes():
if nd not in reference_nodes:
nd.rel_dist = 1.0
@@ -962,7 +1081,7 @@ def _get_pplacer_taxonomy(self, pplacer_classify_file, marker_set_id, user_msa_f
Parameters
----------
pplacer_classify_file : output file object to write
- marker_set_id : bacterial or archaeal id (bac120 or ar122)
+ marker_set_id : bacterial or archaeal id (bac120 or ar53)
user_msa_file : msa file listing all user genomes for a certain domain
tree : pplacer tree including the user genomes
@@ -986,7 +1105,7 @@ def _get_pplacer_taxonomy(self, pplacer_classify_file, marker_set_id, user_msa_f
cur_node = cur_node.parent_node
taxa_str = ';'.join(taxa[::-1])
pplacer_classify_file.add_genome(leaf.taxon.label,
- self.standardise_taxonomy(taxa_str, marker_set_id))
+ standardise_taxonomy(taxa_str, marker_set_id))
pplacer_classify_file.write()
return pplacer_classify_file.data
@@ -1032,15 +1151,19 @@ def _sort_fastani_results(self, fastani_verification, pplacer_taxonomy_dict,
Parameters
----------
- fastani_verification : dictionary listing the potential genomes associated with a user genome d[user_genome] = {"potential_g": [
- (potential_genome_in_same_genus,patristic distance)], "pplacer_g": genome_of_reference_selected_by_pplacer(if any)}
- all_fastani_dict : dictionary listing the fastani ANI for each user genomes against the potential genomes d[user_genome]={ref_genome1:{"af":af,"ani":ani},ref_genome2:{"af":af,"ani":ani}}
+ fastani_verification : dictionary listing the potential genomes associated with a user genome
+ d[user_genome] = {"potential_g": [(potential_genome_in_same_genus,patristic distance)],
+ "pplacer_g": genome_of_reference_selected_by_pplacer(if any)}
+ all_fastani_dict : dictionary listing the fastani ANI for each user genomes against the potential genomes
+ d[user_genome]={ref_genome1:{"af":af,"ani":ani},ref_genome2:{"af":af,"ani":ani}}
summaryfout: output file
Returns
-------
- classified_user_genomes: list of genomes where FastANI and Placement in the reference tree have predicted a taxonomy
- unclassified_user_genomes: dictionary of genomes where FastANI and Placement in the reference tree have not predicted a taxonomy
+ classified_user_genomes: list of genomes where FastANI and Placement in the reference tree have
+ predicted a taxonomy
+ unclassified_user_genomes: dictionary of genomes where FastANI and Placement in the reference tree have not
+ predicted a taxonomy
"""
classified_user_genomes = []
@@ -1057,7 +1180,7 @@ def _sort_fastani_results(self, fastani_verification, pplacer_taxonomy_dict,
if userleaf.taxon.label in bac_ar_diff:
notes.append('Genome domain questionable ( {}% Bacterial, {}% Archaeal)'.format(
bac_ar_diff.get(userleaf.taxon.label).get('bac120'),
- bac_ar_diff.get(userleaf.taxon.label).get('ar122')))
+ bac_ar_diff.get(userleaf.taxon.label).get('ar53')))
if potential_nodes.get("pplacer_g"):
pplacer_leafnode = potential_nodes.get("pplacer_g").taxon.label
@@ -1067,7 +1190,7 @@ def _sort_fastani_results(self, fastani_verification, pplacer_taxonomy_dict,
# import IPython; IPython.embed()
prefilter_af_reference_dictionary = {k: v for k, v in
all_fastani_dict.get(userleaf.taxon.label).items() if v.get(
- 'af') >= self.af_threshold}
+ 'af') >= self.af_threshold}
sorted_prefilter_af_dict = sorted(iter(prefilter_af_reference_dictionary.items()),
key=lambda _x_y1: (_x_y1[1]['ani'], _x_y1[1]['af']), reverse=True)
@@ -1076,7 +1199,8 @@ def _sort_fastani_results(self, fastani_verification, pplacer_taxonomy_dict,
fastani_matching_reference = None
if len(sorted_prefilter_af_dict) > 0:
- if sorted_prefilter_af_dict[0][1].get('ani') >= self.species_radius.get(sorted_prefilter_af_dict[0][0]):
+ if sorted_prefilter_af_dict[0][1].get('ani') >= self.species_radius.get(
+ sorted_prefilter_af_dict[0][0]):
fastani_matching_reference = sorted_prefilter_af_dict[0][0]
current_ani = all_fastani_dict.get(userleaf.taxon.label).get(
fastani_matching_reference).get('ani')
@@ -1110,7 +1234,7 @@ def _sort_fastani_results(self, fastani_verification, pplacer_taxonomy_dict,
if pplacer_leafnode == fastani_matching_reference:
if taxa_str.endswith("s__"):
taxa_str = taxa_str + pplacer_leafnode
- summary_row.classification = self.standardise_taxonomy(
+ summary_row.classification = standardise_taxonomy(
taxa_str)
summary_row.closest_placement_ref = summary_row.fastani_ref
summary_row.closest_placement_radius = summary_row.fastani_ref_radius
@@ -1129,7 +1253,7 @@ def _sort_fastani_results(self, fastani_verification, pplacer_taxonomy_dict,
else:
taxa_str = ";".join(self.gtdb_taxonomy.get(
add_ncbi_prefix(fastani_matching_reference)))
- summary_row.classification = self.standardise_taxonomy(
+ summary_row.classification = standardise_taxonomy(
taxa_str)
summary_row.closest_placement_ref = pplacer_leafnode
summary_row.closest_placement_radius = str(
@@ -1177,7 +1301,7 @@ def _sort_fastani_results(self, fastani_verification, pplacer_taxonomy_dict,
elif userleaf.taxon.label in all_fastani_dict:
prefilter_af_reference_dictionary = {k: v for k, v in
all_fastani_dict.get(userleaf.taxon.label).items() if v.get(
- 'af') >= self.af_threshold}
+ 'af') >= self.af_threshold}
sorted_prefilter_af_dict = sorted(iter(prefilter_af_reference_dictionary.items()),
key=lambda _x_y1: (_x_y1[1]['ani'], _x_y1[1]['af']), reverse=True)
sorted_dict = sorted(iter(all_fastani_dict.get(
@@ -1205,13 +1329,14 @@ def _sort_fastani_results(self, fastani_verification, pplacer_taxonomy_dict,
if len(notes) > 0:
summary_row.warnings = ';'.join(notes)
- if sorted_prefilter_af_dict[0][1].get('ani') >= self.species_radius.get(sorted_prefilter_af_dict[0][0]):
+ if sorted_prefilter_af_dict[0][1].get('ani') >= self.species_radius.get(
+ sorted_prefilter_af_dict[0][0]):
fastani_matching_reference = sorted_prefilter_af_dict[0][0]
exception_genomes.append(fastani_matching_reference)
taxa_str = ";".join(self.gtdb_taxonomy.get(
add_ncbi_prefix(fastani_matching_reference)))
- summary_row.classification = self.standardise_taxonomy(
+ summary_row.classification = standardise_taxonomy(
taxa_str)
summary_row.fastani_ref = fastani_matching_reference
@@ -1280,7 +1405,7 @@ def _get_redtax(self, list_subnode, closest_rank):
# otherwise it's stored as undefined
# case a,b
subtax.append(closest_rank + "undefined")
- return ';'.join(subtax)
+ return subtax
def _parse_subnodes(self, list_subnode, closest_rank):
subtax = []
@@ -1674,10 +1799,18 @@ def _get_fastani_genome_path(self, fastani_verification, genomes):
shortleaf = leafnode.taxon.label
if leafnode.taxon.label.startswith('GB_') or leafnode.taxon.label.startswith('RS_'):
shortleaf = leafnode.taxon.label[3:]
- ref_path = os.path.join(
- Config.FASTANI_GENOMES,
- self.parse_leaf_to_dir_path(shortleaf),
- shortleaf + Config.FASTANI_GENOMES_EXT)
+ # TODEL UBA genomes
+ if shortleaf.startswith("UBA"):
+ ref_path = os.path.join(
+ Config.FASTANI_GENOMES,
+ 'UBA',
+ shortleaf + Config.FASTANI_GENOMES_EXT)
+ else:
+ ref_path = os.path.join(
+ Config.FASTANI_GENOMES,
+ self.parse_leaf_to_dir_path(shortleaf),
+ shortleaf + Config.FASTANI_GENOMES_EXT)
+
if not os.path.isfile(ref_path):
raise GTDBTkExit(f'Reference genome missing from FastANI database: {ref_path}')
@@ -1685,186 +1818,3 @@ def _get_fastani_genome_path(self, fastani_verification, genomes):
dict_paths[shortleaf] = ref_path
return dict_compare, dict_paths
-
- # FUNCTION FOR SPLIT Tree
-
- def _get_high_pplacer_taxonomy(self, out_dir, marker_set_id, prefix, user_msa_file, tree):
- """Parse the pplacer tree and write the partial taxonomy for each user genome based on their placements
-
- Parameters
- ----------
- out_dir : output directory
- prefix : desired prefix for output files
- marker_set_id : bacterial or archeal id (bac120 or ar122)
- user_msa_file : msa file listing all user genomes for a certain domain
- tree : pplacer tree including the user genomes
-
- Returns
- -------
- dictionary[genome_label]=pplacer_taxonomy
-
- """
- results = {}
- out_root = os.path.join(out_dir, 'classify', 'intermediate_results')
- make_sure_path_exists(out_root)
- result = {}
-
- if marker_set_id == 'bac120':
- out_pplacer = os.path.join(
- out_dir, PATH_BAC120_HIGH_PPLACER_CLASS.format(prefix=prefix))
- # elif marker_set_id == 'ar122':
- # out_pplacer = os.path.join(
- # out_dir, PATH_AR122_PPLACER_CLASS.format(prefix=prefix))
- else:
- self.logger.error('There was an error determining the marker set.')
- raise GenomeMarkerSetUnknown
-
- marker_dict = Config.RED_DIST_BAC_DICT
-
- # We get the pplacer taxonomy for comparison
- count = 0
- with open(out_pplacer, 'w') as pplaceout:
- user_genome_ids = set(read_fasta(user_msa_file).keys())
- for leaf in tree.leaf_node_iter():
- is_on_terminal_branch = False
- term_branch_taxonomy = ''
- if leaf.taxon.label in user_genome_ids:
- count += 1
- taxa = []
- cur_node = leaf
- current_rel_dist = 1.0
- while cur_node.parent_node:
- if hasattr(cur_node, 'rel_dist') and current_rel_dist == 1.0 and cur_node.rel_dist < 1.0:
- current_rel_dist = cur_node.rel_dist
- if cur_node.is_internal():
- child_genomes = [nd.taxon.label
- for nd in cur_node.leaf_nodes()
- if nd.taxon.label not in user_genome_ids]
- if len(child_genomes) == 1:
- is_on_terminal_branch = True
- term_branch_taxonomy = self.gtdb_taxonomy.get(
- child_genomes[0])
-
- _support, taxon, _aux_info = parse_label(
- cur_node.label)
- if taxon:
- for t in taxon.split(';')[::-1]:
- taxa.append(t.strip())
- cur_node = cur_node.parent_node
-
- taxa_str = ';'.join(taxa[::-1])
- pplacer_tax = str(taxa_str)
-
- if is_on_terminal_branch:
- tax_of_leaf = term_branch_taxonomy[term_branch_taxonomy.index(
- taxa_str.split(';')[-1]) + 1:-1]
- #print ('tax_of_leaf', tax_of_leaf)
- taxa_str = self._classify_on_terminal_branch(
- tax_of_leaf, current_rel_dist, taxa_str.split(';')[-1][0:3], term_branch_taxonomy, marker_dict)
- else:
- cur_node = leaf
- parent_taxon_node = cur_node.parent_node
- _support, parent_taxon, _aux_info = parse_label(
- parent_taxon_node.label)
-
- while parent_taxon_node is not None and not parent_taxon:
- parent_taxon_node = parent_taxon_node.parent_node
- _support, parent_taxon, _aux_info = parse_label(
- parent_taxon_node.label)
-
- # is the node represent multiple ranks, we select the lowest one
- # i.e. if node is p__A;c__B;o__C we pick o__
- parent_rank = parent_taxon.split(";")[-1][0:3]
- parent_rel_dist = parent_taxon_node.rel_dist
-
- if parent_rank != 'g__':
- node_in_ref_tree = cur_node
- while len([childnd.taxon.label.replace("'", '') for childnd in node_in_ref_tree.leaf_iter(
- ) if childnd.taxon.label[0:3] in ['RS_', 'UBA', 'GB_']]) == 0:
- node_in_ref_tree = node_in_ref_tree.parent_node
- # we select a node of the reference tree
-
- # we select the child rank (if parent_rank = 'c__'
- # child rank will be 'o__)'
- child_rk = self.order_rank[self.order_rank.index(
- parent_rank) + 1]
-
- # get all reference genomes under the current node
- list_subnode = [childnd.taxon.label.replace("'", '') for childnd in
- node_in_ref_tree.leaf_iter()
- if childnd.taxon.label[0:3] in ['RS_', 'UBA', 'GB_']]
-
- # get all names for the child rank
- list_ranks = [self.gtdb_taxonomy.get(name)[self.order_rank.index(child_rk)]
- for name in list_subnode]
-
- # if there is just one rank name
- if len(set(list_ranks)) == 1:
- for subranknd in node_in_ref_tree.preorder_iter():
- _support, subranknd_taxon, _aux_info = parse_label(
- subranknd.label)
- if subranknd.is_internal() and subranknd_taxon is not None and subranknd_taxon.startswith(
- child_rk):
- child_taxons = subranknd_taxon.split(
- ";")
- child_taxon_node = subranknd
- child_rel_dist = child_taxon_node.rel_dist
- break
- taxa_str = self._classify_on_internal_branch(
- child_taxons, current_rel_dist, child_rel_dist, parent_rank, taxa_str, marker_dict)
- results[leaf.taxon.label] = {"tk_tax": self.standardise_taxonomy(taxa_str, 'bac120'),
- "pplacer_tax": self.standardise_taxonomy(pplacer_tax, 'bac120'), 'rel_dist': current_rel_dist}
- pplaceout.write('{}\t{}\t{}\t{}\t{}\n'.format(leaf.taxon.label, self.standardise_taxonomy(taxa_str, 'bac120'),
- self.standardise_taxonomy(pplacer_tax, 'bac120'), is_on_terminal_branch, current_rel_dist))
- return results
-
- def _classify_on_internal_branch(self, child_taxons, current_rel_list, child_rel_dist, parent_rank, taxa_str, marker_dict):
- # if there is multiple ranks on the child node (i.e genome between p__Nitrospirae and c__Nitrospiria;o__Nitrospirales;f__Nitropiraceae)
- # we loop through the list of rank from f_ to c_ rank
- closest_rank = None
-
- for child_taxon in reversed(child_taxons):
- # if lower rank is c__Nitropiria
- if child_taxon == child_taxons[0]:
- if (abs(current_rel_list - marker_dict.get(child_taxon[:3])) < abs(
- child_rel_dist - marker_dict.get(child_taxon[:3])) and
- abs(current_rel_list - marker_dict.get(child_taxon[:3])) < abs(
- current_rel_list - marker_dict.get(parent_rank))):
- closest_rank = child_taxon[:3]
- elif closest_rank is None:
- closest_rank = parent_rank
- else:
- pchildrank = child_taxons[child_taxons.index(
- child_taxon) - 1]
- if (abs(current_rel_list - marker_dict.get(child_taxon[:3])) < abs(
- current_rel_list - marker_dict.get(pchildrank[:3])) and
- abs(current_rel_list - marker_dict.get(child_taxon[:3])) < abs(
- child_rel_dist - marker_dict.get(child_taxon[:3]))):
- closest_rank = child_taxon
- break
- if closest_rank is not None:
- for k, v in self.gtdb_taxonomy.items():
- if '{};'.format(closest_rank) in v:
- return ';'.join(v[1:v.index(closest_rank) + 1])
- return taxa_str
-
- def _classify_on_terminal_branch(self, list_leaf_ranks, current_rel_list, parent_rank, term_branch_taxonomy, marker_dict):
- closest_rank = None
- for leaf_taxon in reversed(list_leaf_ranks):
- # print leaf_taxon
- if leaf_taxon == list_leaf_ranks[0]:
- if abs(current_rel_list - marker_dict.get(leaf_taxon[:3])) < abs(
- current_rel_list - marker_dict.get(parent_rank)):
- closest_rank = leaf_taxon[:3]
- break
- else:
- pchildrank = list_leaf_ranks[list_leaf_ranks.index(
- leaf_taxon) - 1]
- # print leaf_taxon[:3]
- if abs(current_rel_list - marker_dict.get(leaf_taxon[:3])) < abs(
- current_rel_list - marker_dict.get(pchildrank[:3])):
- closest_rank = leaf_taxon[:3]
- break
- if closest_rank is None:
- closest_rank = parent_rank
- return ';'.join(term_branch_taxonomy[1:self.order_rank.index(closest_rank) + 1])
diff --git a/gtdbtk/cli.py b/gtdbtk/cli.py
index c4e2b7f6..5b31274b 100644
--- a/gtdbtk/cli.py
+++ b/gtdbtk/cli.py
@@ -4,7 +4,7 @@
from gtdbtk.biolib_lite.custom_help_formatter import ChangeTempAction
from gtdbtk.biolib_lite.custom_help_formatter import CustomHelpFormatter
-from gtdbtk.config.config import AF_THRESHOLD
+from gtdbtk.config.config import AF_THRESHOLD, PPLACER_MIN_RAM_BAC
@contextmanager
@@ -31,8 +31,14 @@ def __temp_dir(group):
help="specify alternative directory for temporary files")
+def __genes(group):
+ group.add_argument('--genes', action='store_true', default=False,
+ help='indicates input files contain called genes (skip gene calling)')
+
+
def __genome_dir(group):
- group.add_argument('--genome_dir', help="directory containing genome files in FASTA format")
+ group.add_argument(
+ '--genome_dir', help="directory containing genome files in FASTA format")
def __batchfile(group):
@@ -153,7 +159,8 @@ def __prefix(group):
def __cpus(group):
- group.add_argument('--cpus', default=1, type=int, help='number of CPUs to use')
+ group.add_argument('--cpus', default=1, type=int,
+ help='number of CPUs to use')
def __force(group):
@@ -172,12 +179,12 @@ def __help(group):
def __pplacer_cpus(group):
group.add_argument('--pplacer_cpus', type=int, default=None,
- help='use ``pplacer_cpus`` during placement (default: ``cpus``)')
+ help='number of CPUs to use during pplacer placement')
def __scratch_dir(group):
group.add_argument('--scratch_dir', type=str, default=None,
- help='Reduce pplacer memory usage by writing to disk (slower).')
+ help='reduce pplacer memory usage by writing to disk (slower).')
def __recalculate_red(group):
@@ -185,9 +192,10 @@ def __recalculate_red(group):
help='recalculate RED values based on the reference tree and all added user genomes')
-def __split_tree(group):
- group.add_argument('-s', '--split_tree', default=False, action='store_true',
- help='Use shards of the reference tree (for Bacteria only). reduce memory usage (slower).')
+def __full_tree(group):
+ group.add_argument('-f', '--full_tree', default=False, action='store_true',
+ help='use the unsplit bacterial tree for the classify step; this is the original GTDB-Tk '
+ f'approach (version < 2) and requires more than {PPLACER_MIN_RAM_BAC} GB of RAM to load the reference tree')
def __identify_dir(group, required):
@@ -237,19 +245,23 @@ def __no_mash(group):
def __mash_k(group):
- group.add_argument('--mash_k', default=16, type=int, help='k-mer size [1-32]')
+ group.add_argument('--mash_k', default=16, type=int,
+ help='k-mer size [1-32]')
def __mash_s(group):
- group.add_argument('--mash_s', default=5000, type=int, help='maximum number of non-redundant hashes')
+ group.add_argument('--mash_s', default=5000, type=int,
+ help='maximum number of non-redundant hashes')
def __mash_d(group):
- group.add_argument('--mash_d', default=0.1, type=float, help='maximum distance to keep [0-1]')
+ group.add_argument('--mash_d', default=0.1, type=float,
+ help='maximum distance to keep [0-1]')
def __mash_v(group):
- group.add_argument('--mash_v', default=1.0, type=float, help='maximum p-value to keep [0-1]')
+ group.add_argument('--mash_v', default=1.0, type=float,
+ help='maximum p-value to keep [0-1]')
def __mash_db(group):
@@ -259,7 +271,7 @@ def __mash_db(group):
def __min_af(group):
group.add_argument('--min_af', type=float, default=AF_THRESHOLD,
- help='minimum alignment fraction to consider closest genome')
+ help='minimum alignment fraction to assign genome to a species cluster')
def __untrimmed_msa(group, required):
@@ -267,6 +279,11 @@ def __untrimmed_msa(group, required):
help="path to the untrimmed MSA file")
+def __keep_intermediates(group):
+ group.add_argument('--keep_intermediates', default=False, action='store_true',
+ help='keep intermediate files in the final directory')
+
+
def __output(group, required):
group.add_argument('--output', type=str, default=None, required=required,
help='output file')
@@ -295,7 +312,8 @@ def __write_single_copy_genes(group):
def get_main_parser():
# Setup the main, and sub parsers.
- main_parser = argparse.ArgumentParser(prog='gtdbtk', add_help=False, conflict_handler='resolve')
+ main_parser = argparse.ArgumentParser(
+ prog='gtdbtk', add_help=False, conflict_handler='resolve')
sub_parsers = main_parser.add_subparsers(help="--", dest='subparser_name')
# de novo workflow.
@@ -326,9 +344,11 @@ def get_main_parser():
__gtdbtk_classification_file(grp)
__custom_taxonomy_file(grp)
__prefix(grp)
+ __genes(grp)
__cpus(grp)
__force(grp)
__temp_dir(grp)
+ __keep_intermediates(grp)
__debug(grp)
__help(grp)
@@ -340,15 +360,17 @@ def get_main_parser():
with arg_group(parser, 'required named arguments') as grp:
__out_dir(grp, required=True)
with arg_group(parser, 'optional arguments') as grp:
+ __full_tree(grp)
__extension(grp)
__min_perc_aa(grp)
__prefix(grp)
+ __genes(grp)
__cpus(grp)
__pplacer_cpus(grp)
__force(grp)
__scratch_dir(grp)
# __recalculate_red(grp)
- # __split_tree(grp)
+ __keep_intermediates(grp)
__min_af(grp)
__temp_dir(grp)
__debug(grp)
@@ -364,6 +386,7 @@ def get_main_parser():
with arg_group(parser, 'optional arguments') as grp:
__extension(grp)
__prefix(grp)
+ __genes(grp)
__cpus(grp)
__force(grp)
__write_single_copy_genes(grp)
@@ -423,7 +446,7 @@ def get_main_parser():
__cpus(grp)
__pplacer_cpus(grp)
__scratch_dir(grp)
- # __split_tree(grp)
+ __full_tree(grp)
# __recalculate_red(grp)
__min_af(grp)
__temp_dir(grp)
@@ -510,6 +533,25 @@ def get_main_parser():
__debug(grp)
__help(grp)
+ # Remove labels
+ with subparser(sub_parsers, 'remove_labels', 'Remove labels (bootstrap values, node labels) from an Newick tree to '
+ 'to improve compatibility with tree viewers.') as parser:
+ with arg_group(parser, 'required named arguments') as grp:
+ __input_tree(grp, required=True)
+ __output_tree(grp, required=True)
+ with arg_group(parser, 'optional arguments') as grp:
+ __debug(grp)
+ __help(grp)
+
+ # Remove labels
+ with subparser(sub_parsers, 'convert_to_itol', 'Reformat the GTDB-Tk tree to be iTOL compatible.') as parser:
+ with arg_group(parser, 'required named arguments') as grp:
+ __input_tree(grp, required=True)
+ __output_tree(grp, required=True)
+ with arg_group(parser, 'optional arguments') as grp:
+ __debug(grp)
+ __help(grp)
+
# Export MSA.
with subparser(sub_parsers, 'export_msa', 'Export the untrimmed archaeal or bacterial MSA file.') as parser:
with arg_group(parser, 'required named arguments') as grp:
diff --git a/gtdbtk/config/config.py b/gtdbtk/config/config.py
index bfec3d71..0238fbe2 100644
--- a/gtdbtk/config/config.py
+++ b/gtdbtk/config/config.py
@@ -45,7 +45,7 @@
LOW_TREE_MAPPING_FILE = os.path.join(LOW_SPLIT_DIR, 'tree_mapping.tsv')
-HIGH_PPLACER_REF_PKG = 'gtdbtk_package_high_level'
+HIGH_PPLACER_REF_PKG = 'gtdbtk_package_backbone.refpkg'
HIGH_RED_FILE = os.path.join(HIGH_RED_DIR, 'high_red_value.tsv')
LOW_PPLACER_REF_PKG = os.path.join(LOW_PPLACER_DIR, 'gtdbtk.package.{iter}.refpkg')
LOW_RED_FILE = os.path.join(LOW_RED_DIR, 'red_value_{iter}.tsv')
@@ -83,6 +83,91 @@
RED_MIN_CHILDREN = 2
# Marker information
+# Marker information TO DELETE , This is only temporary
+# BAC120_MARKERS = {"PFAM": ["PF00380.14.hmm", "PF00410.14.hmm", "PF00466.15.hmm",
+# "PF01025.14.hmm", "PF02576.12.hmm", "PF03726.9.hmm"],
+# "TIGRFAM": ["TIGR00006.HMM", "TIGR00019.HMM", "TIGR00020.HMM",
+# "TIGR00029.HMM", "TIGR00043.HMM", "TIGR00054.HMM",
+# "TIGR00059.HMM", "TIGR00061.HMM", "TIGR00064.HMM",
+# "TIGR00065.HMM", "TIGR00082.HMM", "TIGR00083.HMM",
+# "TIGR00084.HMM", "TIGR00086.HMM", "TIGR00088.HMM",
+# "TIGR00090.HMM", "TIGR00092.HMM", "TIGR00095.HMM",
+# "TIGR00115.HMM", "TIGR00116.HMM", "TIGR00138.HMM",
+# "TIGR00158.HMM", "TIGR00166.HMM", "TIGR00168.HMM",
+# "TIGR00186.HMM", "TIGR00194.HMM", "TIGR00250.HMM",
+# "TIGR00337.HMM", "TIGR00344.HMM", "TIGR00362.HMM",
+# "TIGR00382.HMM", "TIGR00392.HMM", "TIGR00396.HMM",
+# "TIGR00398.HMM", "TIGR00414.HMM", "TIGR00416.HMM",
+# "TIGR00420.HMM", "TIGR00431.HMM", "TIGR00435.HMM",
+# "TIGR00436.HMM", "TIGR00442.HMM", "TIGR00445.HMM",
+# "TIGR00456.HMM", "TIGR00459.HMM", "TIGR00460.HMM",
+# "TIGR00468.HMM", "TIGR00472.HMM", "TIGR00487.HMM",
+# "TIGR00496.HMM", "TIGR00539.HMM", "TIGR00580.HMM",
+# "TIGR00593.HMM", "TIGR00615.HMM", "TIGR00631.HMM",
+# "TIGR00634.HMM", "TIGR00635.HMM", "TIGR00643.HMM",
+# "TIGR00663.HMM", "TIGR00717.HMM", "TIGR00755.HMM",
+# "TIGR00810.HMM", "TIGR00922.HMM", "TIGR00928.HMM",
+# "TIGR00959.HMM", "TIGR00963.HMM", "TIGR00964.HMM",
+# "TIGR00967.HMM", "TIGR01009.HMM", "TIGR01011.HMM",
+# "TIGR01017.HMM", "TIGR01021.HMM", "TIGR01029.HMM",
+# "TIGR01032.HMM", "TIGR01039.HMM", "TIGR01044.HMM",
+# "TIGR01059.HMM", "TIGR01063.HMM", "TIGR01066.HMM",
+# "TIGR01071.HMM", "TIGR01079.HMM", "TIGR01082.HMM",
+# "TIGR01087.HMM", "TIGR01128.HMM", "TIGR01146.HMM",
+# "TIGR01164.HMM", "TIGR01169.HMM", "TIGR01171.HMM",
+# "TIGR01302.HMM", "TIGR01391.HMM", "TIGR01393.HMM",
+# "TIGR01394.HMM", "TIGR01510.HMM", "TIGR01632.HMM",
+# "TIGR01951.HMM", "TIGR01953.HMM", "TIGR02012.HMM",
+# "TIGR02013.HMM", "TIGR02027.HMM", "TIGR02075.HMM",
+# "TIGR02191.HMM", "TIGR02273.HMM", "TIGR02350.HMM",
+# "TIGR02386.HMM", "TIGR02397.HMM", "TIGR02432.HMM",
+# "TIGR02729.HMM", "TIGR03263.HMM", "TIGR03594.HMM",
+# "TIGR03625.HMM", "TIGR03632.HMM", "TIGR03654.HMM",
+# "TIGR03723.HMM", "TIGR03725.HMM", "TIGR03953.HMM"]}
+
+# AR53_MARKERS = {"PFAM": ["PF01868.11.hmm", "PF01282.14.hmm", "PF01655.13.hmm",
+# "PF01092.14.hmm", "PF01000.21.hmm", "PF00368.13.hmm",
+# "PF00827.12.hmm", "PF01269.12.hmm", "PF00466.15.hmm",
+# "PF01015.13.hmm", "PF13685.1.hmm", "PF02978.14.hmm",
+# "PF04919.7.hmm", "PF01984.15.hmm", "PF04104.9.hmm",
+# "PF00410.14.hmm", "PF01798.13.hmm", "PF01864.12.hmm",
+# "PF01990.12.hmm", "PF07541.7.hmm", "PF04019.7.hmm",
+# "PF00900.15.hmm", "PF01090.14.hmm", "PF02006.11.hmm",
+# "PF01157.13.hmm", "PF01191.14.hmm", "PF01866.12.hmm",
+# "PF01198.14.hmm", "PF01496.14.hmm", "PF00687.16.hmm",
+# "PF03874.11.hmm", "PF01194.12.hmm", "PF01200.13.hmm",
+# "PF13656.1.hmm", "PF01280.15.hmm"],
+# "TIGRFAM": ["TIGR00468.HMM", "TIGR01060.HMM", "TIGR03627.HMM",
+# "TIGR01020.HMM", "TIGR02258.HMM", "TIGR00293.HMM",
+# "TIGR00389.HMM", "TIGR01012.HMM", "TIGR00490.HMM",
+# "TIGR03677.HMM", "TIGR03636.HMM", "TIGR03722.HMM",
+# "TIGR00458.HMM", "TIGR00291.HMM", "TIGR00670.HMM",
+# "TIGR00064.HMM", "TIGR03629.HMM", "TIGR00021.HMM",
+# "TIGR03672.HMM", "TIGR00111.HMM", "TIGR03684.HMM",
+# "TIGR01077.HMM", "TIGR01213.HMM", "TIGR01080.HMM",
+# "TIGR00501.HMM", "TIGR00729.HMM", "TIGR01038.HMM",
+# "TIGR00270.HMM", "TIGR03628.HMM", "TIGR01028.HMM",
+# "TIGR00521.HMM", "TIGR03671.HMM", "TIGR00240.HMM",
+# "TIGR02390.HMM", "TIGR02338.HMM", "TIGR00037.HMM",
+# "TIGR02076.HMM", "TIGR00335.HMM", "TIGR01025.HMM",
+# "TIGR00471.HMM", "TIGR00336.HMM", "TIGR00522.HMM",
+# "TIGR02153.HMM", "TIGR02651.HMM", "TIGR03674.HMM",
+# "TIGR00323.HMM", "TIGR00134.HMM", "TIGR02236.HMM",
+# "TIGR03683.HMM", "TIGR00491.HMM", "TIGR00658.HMM",
+# "TIGR03680.HMM", "TIGR00392.HMM", "TIGR00422.HMM",
+# "TIGR00279.HMM", "TIGR01052.HMM", "TIGR00442.HMM",
+# "TIGR00308.HMM", "TIGR00398.HMM", "TIGR00456.HMM",
+# "TIGR00549.HMM", "TIGR00408.HMM", "TIGR00432.HMM",
+# "TIGR00264.HMM", "TIGR00982.HMM", "TIGR00324.HMM",
+# "TIGR01952.HMM", "TIGR03626.HMM", "TIGR03670.HMM",
+# "TIGR00337.HMM", "TIGR01046.HMM", "TIGR01018.HMM",
+# "TIGR00936.HMM", "TIGR00463.HMM", "TIGR01309.HMM",
+# "TIGR03653.HMM", "TIGR00042.HMM", "TIGR02389.HMM",
+# "TIGR00307.HMM", "TIGR03673.HMM", "TIGR00373.HMM",
+# "TIGR01008.HMM", "TIGR00283.HMM", "TIGR00425.HMM",
+# "TIGR00405.HMM", "TIGR03665.HMM", "TIGR00448.HMM"]}
+
+
BAC120_MARKERS = {"PFAM": ["PF00380.20.hmm", "PF00410.20.hmm", "PF00466.21.hmm",
"PF01025.20.hmm", "PF02576.18.hmm", "PF03726.15.hmm"],
"TIGRFAM": ["TIGR00006.HMM", "TIGR00019.HMM", "TIGR00020.HMM",
@@ -124,62 +209,83 @@
"TIGR03625.HMM", "TIGR03632.HMM", "TIGR03654.HMM",
"TIGR03723.HMM", "TIGR03725.HMM", "TIGR03953.HMM"]}
-AR122_MARKERS = {"PFAM": ["PF01868.17.hmm", "PF01282.20.hmm", "PF01655.19.hmm",
- "PF01092.20.hmm", "PF01000.27.hmm", "PF00368.19.hmm",
- "PF00827.18.hmm", "PF01269.18.hmm", "PF00466.21.hmm",
- "PF01015.19.hmm", "PF13685.7.hmm", "PF02978.20.hmm",
- "PF04919.13.hmm", "PF01984.21.hmm", "PF04104.15.hmm",
- "PF00410.20.hmm", "PF01798.19.hmm", "PF01864.18.hmm",
- "PF01990.18.hmm", "PF07541.13.hmm", "PF04019.13.hmm",
- "PF00900.21.hmm", "PF01090.20.hmm", "PF02006.17.hmm",
- "PF01157.19.hmm", "PF01191.20.hmm", "PF01866.18.hmm",
- "PF01198.20.hmm", "PF01496.20.hmm", "PF00687.22.hmm",
- "PF03874.17.hmm", "PF01194.18.hmm", "PF01200.19.hmm",
- "PF13656.7.hmm", "PF01280.21.hmm"],
- "TIGRFAM": ["TIGR00468.HMM", "TIGR01060.HMM", "TIGR03627.HMM",
- "TIGR01020.HMM", "TIGR02258.HMM", "TIGR00293.HMM",
- "TIGR00389.HMM", "TIGR01012.HMM", "TIGR00490.HMM",
- "TIGR03677.HMM", "TIGR03636.HMM", "TIGR03722.HMM",
- "TIGR00458.HMM", "TIGR00291.HMM", "TIGR00670.HMM",
- "TIGR00064.HMM", "TIGR03629.HMM", "TIGR00021.HMM",
- "TIGR03672.HMM", "TIGR00111.HMM", "TIGR03684.HMM",
- "TIGR01077.HMM", "TIGR01213.HMM", "TIGR01080.HMM",
- "TIGR00501.HMM", "TIGR00729.HMM", "TIGR01038.HMM",
- "TIGR00270.HMM", "TIGR03628.HMM", "TIGR01028.HMM",
- "TIGR00521.HMM", "TIGR03671.HMM", "TIGR00240.HMM",
- "TIGR02390.HMM", "TIGR02338.HMM", "TIGR00037.HMM",
- "TIGR02076.HMM", "TIGR00335.HMM", "TIGR01025.HMM",
- "TIGR00471.HMM", "TIGR00336.HMM", "TIGR00522.HMM",
- "TIGR02153.HMM", "TIGR02651.HMM", "TIGR03674.HMM",
- "TIGR00323.HMM", "TIGR00134.HMM", "TIGR02236.HMM",
- "TIGR03683.HMM", "TIGR00491.HMM", "TIGR00658.HMM",
- "TIGR03680.HMM", "TIGR00392.HMM", "TIGR00422.HMM",
- "TIGR00279.HMM", "TIGR01052.HMM", "TIGR00442.HMM",
- "TIGR00308.HMM", "TIGR00398.HMM", "TIGR00456.HMM",
- "TIGR00549.HMM", "TIGR00408.HMM", "TIGR00432.HMM",
- "TIGR00264.HMM", "TIGR00982.HMM", "TIGR00324.HMM",
- "TIGR01952.HMM", "TIGR03626.HMM", "TIGR03670.HMM",
- "TIGR00337.HMM", "TIGR01046.HMM", "TIGR01018.HMM",
- "TIGR00936.HMM", "TIGR00463.HMM", "TIGR01309.HMM",
- "TIGR03653.HMM", "TIGR00042.HMM", "TIGR02389.HMM",
- "TIGR00307.HMM", "TIGR03673.HMM", "TIGR00373.HMM",
- "TIGR01008.HMM", "TIGR00283.HMM", "TIGR00425.HMM",
- "TIGR00405.HMM", "TIGR03665.HMM", "TIGR00448.HMM"]}
+#
+# #New Version of AR53_MARKERS
+# AR53_MARKERS = {"PFAM": ["PF01868.17.hmm", "PF01282.20.hmm", "PF01655.19.hmm",
+# "PF01092.20.hmm", "PF01000.27.hmm", "PF00368.19.hmm",
+# "PF00827.18.hmm", "PF01269.18.hmm", "PF00466.21.hmm",
+# "PF01015.19.hmm", "PF13685.7.hmm", "PF02978.20.hmm",
+# "PF04919.13.hmm", "PF01984.21.hmm", "PF04104.15.hmm",
+# "PF00410.20.hmm", "PF01798.19.hmm", "PF01864.18.hmm",
+# "PF01990.18.hmm", "PF07541.13.hmm", "PF04019.13.hmm",
+# "PF00900.21.hmm", "PF01090.20.hmm", "PF02006.17.hmm",
+# "PF01157.19.hmm", "PF01191.20.hmm", "PF01866.18.hmm",
+# "PF01198.20.hmm", "PF01496.20.hmm", "PF00687.22.hmm",
+# "PF03874.17.hmm", "PF01194.18.hmm", "PF01200.19.hmm",
+# "PF13656.7.hmm", "PF01280.21.hmm"],
+# "TIGRFAM": ["TIGR00468.HMM", "TIGR01060.HMM", "TIGR03627.HMM",
+# "TIGR01020.HMM", "TIGR02258.HMM", "TIGR00293.HMM",
+# "TIGR00389.HMM", "TIGR01012.HMM", "TIGR00490.HMM",
+# "TIGR03677.HMM", "TIGR03636.HMM", "TIGR03722.HMM",
+# "TIGR00458.HMM", "TIGR00291.HMM", "TIGR00670.HMM",
+# "TIGR00064.HMM", "TIGR03629.HMM", "TIGR00021.HMM",
+# "TIGR03672.HMM", "TIGR00111.HMM", "TIGR03684.HMM",
+# "TIGR01077.HMM", "TIGR01213.HMM", "TIGR01080.HMM",
+# "TIGR00501.HMM", "TIGR00729.HMM", "TIGR01038.HMM",
+# "TIGR00270.HMM", "TIGR03628.HMM", "TIGR01028.HMM",
+# "TIGR00521.HMM", "TIGR03671.HMM", "TIGR00240.HMM",
+# "TIGR02390.HMM", "TIGR02338.HMM", "TIGR00037.HMM",
+# "TIGR02076.HMM", "TIGR00335.HMM", "TIGR01025.HMM",
+# "TIGR00471.HMM", "TIGR00336.HMM", "TIGR00522.HMM",
+# "TIGR02153.HMM", "TIGR02651.HMM", "TIGR03674.HMM",
+# "TIGR00323.HMM", "TIGR00134.HMM", "TIGR02236.HMM",
+# "TIGR03683.HMM", "TIGR00491.HMM", "TIGR00658.HMM",
+# "TIGR03680.HMM", "TIGR00392.HMM", "TIGR00422.HMM",
+# "TIGR00279.HMM", "TIGR01052.HMM", "TIGR00442.HMM",
+# "TIGR00308.HMM", "TIGR00398.HMM", "TIGR00456.HMM",
+# "TIGR00549.HMM", "TIGR00408.HMM", "TIGR00432.HMM",
+# "TIGR00264.HMM", "TIGR00982.HMM", "TIGR00324.HMM",
+# "TIGR01952.HMM", "TIGR03626.HMM", "TIGR03670.HMM",
+# "TIGR00337.HMM", "TIGR01046.HMM", "TIGR01018.HMM",
+# "TIGR00936.HMM", "TIGR00463.HMM", "TIGR01309.HMM",
+# "TIGR03653.HMM", "TIGR00042.HMM", "TIGR02389.HMM",
+# "TIGR00307.HMM", "TIGR03673.HMM", "TIGR00373.HMM",
+# "TIGR01008.HMM", "TIGR00283.HMM", "TIGR00425.HMM",
+# "TIGR00405.HMM", "TIGR03665.HMM", "TIGR00448.HMM"]}
+
+#New Version of AR53_MARKERS
+AR53_MARKERS = {"PFAM": ["PF04919.13.hmm","PF07541.13.hmm","PF01000.27.hmm",
+"PF00687.22.hmm","PF00466.21.hmm","PF00827.18.hmm","PF01280.21.hmm","PF01090.20.hmm",
+"PF01200.19.hmm","PF01015.19.hmm","PF00900.21.hmm","PF00410.20.hmm"],
+"TIGRFAM":["TIGR00037.HMM","TIGR00064.HMM","TIGR00111.HMM",
+"TIGR00134.HMM","TIGR00279.HMM","TIGR00291.HMM","TIGR00323.HMM",
+"TIGR00335.HMM","TIGR00373.HMM","TIGR00405.HMM","TIGR00448.HMM",
+"TIGR00483.HMM","TIGR00491.HMM","TIGR00522.HMM","TIGR00967.HMM",
+"TIGR00982.HMM","TIGR01008.HMM","TIGR01012.HMM","TIGR01018.HMM",
+"TIGR01020.HMM","TIGR01028.HMM","TIGR01046.HMM","TIGR01052.HMM",
+"TIGR01171.HMM","TIGR01213.HMM","TIGR01952.HMM","TIGR02236.HMM",
+"TIGR02338.HMM","TIGR02389.HMM","TIGR02390.HMM","TIGR03626.HMM",
+"TIGR03627.HMM","TIGR03628.HMM","TIGR03629.HMM","TIGR03670.HMM",
+"TIGR03671.HMM","TIGR03672.HMM","TIGR03673.HMM","TIGR03674.HMM",
+"TIGR03676.HMM","TIGR03680.HMM"]}
+
+
+
# Information for Multiple hits markers:
DEFAULT_MULTIHIT_THRESHOLD = 10.0
# Information for aligning genomes
DEFAULT_DOMAIN_THRESHOLD = 10.0
-AR_MARKER_COUNT = 122
+AR_MARKER_COUNT = 53
BAC_MARKER_COUNT = 120
# Information about alignment Fraction to resolve fastANI results
-AF_THRESHOLD = 0.65
+AF_THRESHOLD = 0.5
# MSA file names
CONCAT_BAC120 = os.path.join(MSA_FOLDER, f"gtdb_{VERSION_DATA}_bac120.faa")
-CONCAT_AR122 = os.path.join(MSA_FOLDER, f"gtdb_{VERSION_DATA}_ar122.faa")
+CONCAT_AR53 = os.path.join(MSA_FOLDER, f"gtdb_{VERSION_DATA}_ar53.faa")
# Taxonomy file name
TAXONOMY_FILE = os.path.join(TAX_FOLDER, "gtdb_taxonomy.tsv")
@@ -189,15 +295,15 @@
# Mask file names
MASK_BAC120 = f"gtdb_{VERSION_DATA}_bac120.mask"
-MASK_AR122 = f"gtdb_{VERSION_DATA}_ar122.mask"
+MASK_AR53 = f"gtdb_{VERSION_DATA}_ar53.mask"
MASK_RPS23 = f"gtdb_{VERSION_DATA}_rps23.mask"
# Pplacer configuration
PPLACER_BAC120_REF_PKG = f"gtdb_{VERSION_DATA}_bac120.refpkg"
-PPLACER_AR122_REF_PKG = f"gtdb_{VERSION_DATA}_ar122.refpkg"
+PPLACER_AR53_REF_PKG = f"gtdb_{VERSION_DATA}_ar53.refpkg"
PPLACER_RPS23_REF_PKG = f"gtdb_{VERSION_DATA}_rps23.refpkg"
-PPLACER_MIN_RAM_BAC = 215
-PPLACER_MIN_RAM_ARC = 13
+PPLACER_MIN_RAM_BAC = 320
+PPLACER_MIN_RAM_ARC = 40
# Fastani configuration
FASTANI_SPECIES_THRESHOLD = 95.0
@@ -207,18 +313,18 @@
# MRCA RED VALUE
MRCA_RED_BAC120 = os.path.join(RED_DIR, f"gtdbtk_{VERSION_DATA}_bac120.tsv")
-MRCA_RED_AR122 = os.path.join(RED_DIR, f"gtdbtk_{VERSION_DATA}_ar122.tsv")
+MRCA_RED_AR53 = os.path.join(RED_DIR, f"gtdbtk_{VERSION_DATA}_ar53.tsv")
# Hashing information for validating the reference package.
-REF_HASHES = {PPLACER_DIR: '4d931b5109a240602f55228029b87ee768da8141',
- MASK_DIR: '36d6ac371d247b2b952523b9798e78908ea323fa',
- MARKER_DIR: '2ba5ae35fb272462663651d18fd9e523317e48cd',
- RADII_DIR: '9f9a2e21e27b9049044d04d731795499414a365c',
- MSA_FOLDER: 'b426865245c39ee9f01b0392fb8f7867a9f76f0a',
- METADATA_DIR: '7640aed96fdb13707a2b79b746a94335faabd6df',
- TAX_FOLDER: '4a7a1e4047c088e92dee9740206499cdb7e5beca',
- FASTANI_DIR: '70439cf088d0fa0fdbb4f47b4a6b47e199912139',
- RED_DIR: 'ad6a184150e7b6e58547912660a17999fadcfbff'}
+REF_HASHES = {PPLACER_DIR: '20903925a856a58b102a7b0ce160c5cbd2cf675b',
+ MASK_DIR: '50e414a9de18170e8cb97f990f89ff60a0fe29d5',
+ MARKER_DIR: '163f542c3f0a40f59df45d453aa235b39aa96e27',
+ RADII_DIR: '8fd13b1c5d7a7b073ba96fb628581613b293a374',
+ MSA_FOLDER: '24f250d7cf0eb0bc65dccd2f3c9247e553ea322f',
+ METADATA_DIR: '9772fbeac1311b31e10293fa610eb33aa1ec8e15',
+ TAX_FOLDER: '6fb0233b05633242369b40c026fd1ee53e266afa',
+ FASTANI_DIR: '973c456c02f55bb82908a6811c7076e207e9b206',
+ RED_DIR: '7b8b67b3157204b470c9eb809d3c39c4effffabc'}
# Config values for checking GTDB-Tk on startup.
GTDBTK_VER_CHECK = True
diff --git a/gtdbtk/config/output.py b/gtdbtk/config/output.py
index 7af03915..b949a4b1 100644
--- a/gtdbtk/config/output.py
+++ b/gtdbtk/config/output.py
@@ -6,7 +6,7 @@
DIR_MARKER_GENE = join(DIR_IDENTIFY_INTERMEDIATE, 'marker_genes')
DIR_IDENTIFY_FASTA = join(DIR_IDENTIFY_INTERMEDIATE, 'single_copy_fasta')
PATH_BAC120_MARKER_SUMMARY = join(DIR_IDENTIFY, '{prefix}.bac120.markers_summary.tsv')
-PATH_AR122_MARKER_SUMMARY = join(DIR_IDENTIFY, '{prefix}.ar122.markers_summary.tsv')
+PATH_AR53_MARKER_SUMMARY = join(DIR_IDENTIFY, '{prefix}.ar53.markers_summary.tsv')
PATH_TLN_TABLE_SUMMARY = join(DIR_IDENTIFY, '{prefix}.translation_table_summary.tsv')
PATH_FAILS = join(DIR_IDENTIFY,'{prefix}.failed_genomes.tsv')
@@ -26,40 +26,44 @@
DIR_ALIGN = 'align'
DIR_ALIGN_INTERMEDIATE = join(DIR_ALIGN, 'intermediate_results')
PATH_BAC120_FILTERED_GENOMES = join(DIR_ALIGN, '{prefix}.bac120.filtered.tsv')
-PATH_AR122_FILTERED_GENOMES = join(DIR_ALIGN, '{prefix}.ar122.filtered.tsv')
+PATH_AR53_FILTERED_GENOMES = join(DIR_ALIGN, '{prefix}.ar53.filtered.tsv')
PATH_BAC120_MSA = join(DIR_ALIGN, '{prefix}.bac120.msa.fasta')
-PATH_AR122_MSA = join(DIR_ALIGN, '{prefix}.ar122.msa.fasta')
+PATH_AR53_MSA = join(DIR_ALIGN, '{prefix}.ar53.msa.fasta')
PATH_BAC120_USER_MSA = join(DIR_ALIGN, '{prefix}.bac120.user_msa.fasta')
-PATH_AR122_USER_MSA = join(DIR_ALIGN, '{prefix}.ar122.user_msa.fasta')
+PATH_AR53_USER_MSA = join(DIR_ALIGN, '{prefix}.ar53.user_msa.fasta')
PATH_BAC120_MARKER_INFO = join(DIR_ALIGN_INTERMEDIATE, '{prefix}.bac120.marker_info.tsv')
-PATH_AR122_MARKER_INFO = join(DIR_ALIGN_INTERMEDIATE, '{prefix}.ar122.marker_info.tsv')
+PATH_AR53_MARKER_INFO = join(DIR_ALIGN_INTERMEDIATE, '{prefix}.ar53.marker_info.tsv')
DIR_ALIGN_MARKERS = join(DIR_ALIGN_INTERMEDIATE, 'markers')
# Command: classify
DIR_CLASSIFY = 'classify'
PATH_BAC120_TREE_FILE = join(DIR_CLASSIFY, '{prefix}.bac120.classify.tree')
-PATH_AR122_TREE_FILE = join(DIR_CLASSIFY, '{prefix}.ar122.classify.tree')
+PATH_AR53_TREE_FILE = join(DIR_CLASSIFY, '{prefix}.ar53.classify.tree')
PATH_BAC120_SUMMARY_OUT = join(DIR_CLASSIFY, '{prefix}.bac120.summary.tsv')
-PATH_AR122_SUMMARY_OUT = join(DIR_CLASSIFY, '{prefix}.ar122.summary.tsv')
+PATH_AR53_SUMMARY_OUT = join(DIR_CLASSIFY, '{prefix}.ar53.summary.tsv')
PATH_HIGH_BAC120_TREE_FILE = join(DIR_CLASSIFY, '{prefix}.high.bac120.classify.tree')
PATH_LOW_BAC120_TREE_FILE = join(DIR_CLASSIFY, '{prefix}.bac120.classify.tree.{iter}.tree')
PATH_BAC120_CONFLICT = join(DIR_CLASSIFY, '{prefix}.bac120.conflict.tsv')
+PATH_AR53_DISAPPEARING_GENOMES = join(DIR_CLASSIFY, '{prefix}.ar53.disappearing_genomes.tsv')
+PATH_BAC120_DISAPPEARING_GENOMES = join(DIR_CLASSIFY, '{prefix}.bac120.disappearing_genomes.tsv')
DIR_CLASSIFY_INTERMEDIATE = join(DIR_CLASSIFY, 'intermediate_results')
PATH_BAC120_RED_DICT = join(DIR_CLASSIFY_INTERMEDIATE, '{prefix}.bac120.red_dictionary.tsv')
-PATH_AR122_RED_DICT = join(DIR_CLASSIFY_INTERMEDIATE, '{prefix}.ar122.red_dictionary.tsv')
+PATH_AR53_RED_DICT = join(DIR_CLASSIFY_INTERMEDIATE, '{prefix}.ar53.red_dictionary.tsv')
PATH_BAC120_PPLACER_CLASS = join(DIR_CLASSIFY_INTERMEDIATE, '{prefix}.bac120.classification_pplacer.tsv')
+PATH_AR53_PPLACER_CLASS = join(DIR_CLASSIFY_INTERMEDIATE, '{prefix}.ar53.classification_pplacer.tsv')
PATH_BAC120_HIGH_PPLACER_CLASS = join(DIR_CLASSIFY_INTERMEDIATE, '{prefix}.bac120.high.classification_pplacer.tsv')
PATH_BAC120_LOW_PPLACER_CLASS = join(DIR_CLASSIFY_INTERMEDIATE, '{prefix}.bac120.low.classification_pplacer_tree_{iter}.tsv')
+PATH_BAC120_TREE_MAPPING = join(DIR_CLASSIFY_INTERMEDIATE, '{prefix}.bac120.tree.mapping.tsv')
+
-PATH_AR122_PPLACER_CLASS = join(DIR_CLASSIFY_INTERMEDIATE, '{prefix}.ar122.classification_pplacer.tsv')
DIR_PPLACER = join(DIR_CLASSIFY_INTERMEDIATE, 'pplacer')
PATH_BAC120_PPLACER_OUT = join(DIR_PPLACER, 'pplacer.bac120.out')
-PATH_AR122_PPLACER_OUT = join(DIR_PPLACER, 'pplacer.ar122.out')
+PATH_AR53_PPLACER_OUT = join(DIR_PPLACER, 'pplacer.ar53.out')
PATH_BAC120_PPLACER_JSON = join(DIR_PPLACER, 'pplacer.bac120.json')
-PATH_AR122_PPLACER_JSON = join(DIR_PPLACER, 'pplacer.ar122.json')
+PATH_AR53_PPLACER_JSON = join(DIR_PPLACER, 'pplacer.ar53.json')
# SPLIT TREE
PATH_HIGH_BAC120_PPLACER_OUT = join(DIR_PPLACER, 'pplacer.high.bac120.out')
@@ -80,11 +84,11 @@
PATH_FASTTREE_LOG = join(DIR_INFER_INTERMEDIATE, '{prefix}.fasttree.log')
PATH_BAC120_UNROOTED_TREE = join(DIR_INFER_INTERMEDIATE, '{prefix}.bac120.unrooted.tree')
-PATH_AR122_UNROOTED_TREE = join(DIR_INFER_INTERMEDIATE, '{prefix}.ar122.unrooted.tree')
+PATH_AR53_UNROOTED_TREE = join(DIR_INFER_INTERMEDIATE, '{prefix}.ar53.unrooted.tree')
PATH_BAC120_ROOTED_TREE = join(DIR_INFER_INTERMEDIATE, '{prefix}.bac120.rooted.tree')
-PATH_AR122_ROOTED_TREE = join(DIR_INFER_INTERMEDIATE, '{prefix}.ar122.rooted.tree')
+PATH_AR53_ROOTED_TREE = join(DIR_INFER_INTERMEDIATE, '{prefix}.ar53.rooted.tree')
PATH_BAC120_DECORATED_TREE = join(DIR_INFER, '{prefix}.bac120.decorated.tree')
-PATH_AR122_DECORATED_TREE = join(DIR_INFER, '{prefix}.ar122.decorated.tree')
+PATH_AR53_DECORATED_TREE = join(DIR_INFER, '{prefix}.ar53.decorated.tree')
# Command: ani_rep
DIR_ANI_REP_INT = join('intermediate_results')
diff --git a/gtdbtk/exceptions.py b/gtdbtk/exceptions.py
index beaaeaf1..d20eb33c 100644
--- a/gtdbtk/exceptions.py
+++ b/gtdbtk/exceptions.py
@@ -59,7 +59,7 @@ def __init__(self, message=''):
class GenomeMarkerSetUnknown(GTDBTkException):
- """ Thrown when the genome marker set is unknown (i.e. not ar122, or bac120). """
+ """ Thrown when the genome marker set is unknown (i.e. not ar53, or bac120). """
def __init__(self, message=''):
GTDBTkException.__init__(self, message)
diff --git a/gtdbtk/external/fasttree.py b/gtdbtk/external/fasttree.py
index cd804507..29b952df 100644
--- a/gtdbtk/external/fasttree.py
+++ b/gtdbtk/external/fasttree.py
@@ -15,9 +15,12 @@
# #
###############################################################################
+import gzip
import logging
import os
+import shutil
import subprocess
+import tempfile
from gtdbtk.biolib_lite.common import make_sure_path_exists
from gtdbtk.biolib_lite.execute import check_dependencies
@@ -103,16 +106,28 @@ def run(self, output_tree, tree_log, fasttree_log, prot_model, no_support, gamma
args.append('-log')
args.append(tree_log)
- args.append(msa_file)
self.logger.info('Inferring FastTree ({}) using a maximum of {} CPUs.'.format(
', '.join(model_out), cpus))
- with open(output_tree, 'w') as f_out_tree:
- with open(fasttree_log, 'w') as f_out_err:
- proc = subprocess.Popen(
- args, stdout=f_out_tree, stderr=f_out_err, env=env)
- proc.communicate()
+ # Use a temporary directory if the input file is gzipped
+ with tempfile.TemporaryDirectory(prefix='gtdbtk_') as tmp_dir:
+
+ # Uncompress the archive if it's compressed
+ if msa_file.endswith('.gz'):
+ msa_path = os.path.join(tmp_dir, os.path.basename(msa_file[0:-3]))
+ with gzip.open(msa_file, 'rb') as f_in:
+ with open(msa_path, 'wb') as f_out:
+ shutil.copyfileobj(f_in, f_out)
+ else:
+ msa_path = msa_file
+ args.append(msa_path)
+
+ with open(output_tree, 'w') as f_out_tree:
+ with open(fasttree_log, 'w') as f_out_err:
+ proc = subprocess.Popen(
+ args, stdout=f_out_tree, stderr=f_out_err, env=env)
+ proc.communicate()
# Validate results
if proc.returncode != 0:
diff --git a/gtdbtk/external/hmm_aligner.py b/gtdbtk/external/hmm_aligner.py
index c17f6a21..d3e91b51 100644
--- a/gtdbtk/external/hmm_aligner.py
+++ b/gtdbtk/external/hmm_aligner.py
@@ -24,7 +24,7 @@
from gtdbtk.biolib_lite.execute import check_dependencies
from gtdbtk.exceptions import GTDBTkException
-from gtdbtk.io.marker.copy_number import CopyNumberFileAR122, CopyNumberFileBAC120
+from gtdbtk.io.marker.copy_number import CopyNumberFileAR53, CopyNumberFileBAC120
from gtdbtk.io.marker.tophit import TopHitPfamFile, TopHitTigrFile
from gtdbtk.tools import tqdm_log
@@ -40,7 +40,7 @@ def __init__(self,
pfam_hmm_dir,
tigrfam_hmm_dir,
bac120_markers,
- ar122_markers):
+ ar53_markers):
"""Initialization."""
check_dependencies(['hmmalign'])
@@ -55,14 +55,14 @@ def __init__(self,
self.tigrfam_hmm_dir = tigrfam_hmm_dir
self.bac120_markers = bac120_markers
- self.ar122_markers = ar122_markers
+ self.ar53_markers = ar53_markers
self.marker_path_prefix = {"PFAM": os.path.join(self.pfam_hmm_dir,
'individual_hmms'),
"TIGRFAM": os.path.join(os.path.dirname(self.tigrfam_hmm_dir),
'individual_hmms')}
- self.ar122_marker_sizes = None
+ self.ar53_marker_sizes = None
self.bac120_marker_sizes = None
self.version = self.get_version()
@@ -85,15 +85,15 @@ def get_version():
return "(version unavailable)"
def _get_hmm_sizes(self):
- ar122, bac120 = dict(), dict()
- for marker_d, out_d in ((self.ar122_markers, ar122),
+ ar53, bac120 = dict(), dict()
+ for marker_d, out_d in ((self.ar53_markers, ar53),
(self.bac120_markers, bac120)):
for marker_type in ('PFAM', 'TIGRFAM'):
for marker_name in marker_d[marker_type]:
marker_path = os.path.join(self.marker_path_prefix[marker_type], marker_name)
marker_name_strip = marker_name.replace('.HMM', '').replace('.hmm', '')
out_d[marker_name_strip] = self._get_hmm_size(marker_path)
- self.ar122_marker_sizes = ar122
+ self.ar53_marker_sizes = ar53
self.bac120_marker_sizes = bac120
return
@@ -105,7 +105,7 @@ def align_marker_set(self, db_genome_ids, marker_set_id):
db_genome_ids : dict
A dictionary containing the genome ids and aa paths to process.
marker_set_id : str
- The marker set of these genomes (bac120/ar122).
+ The marker set of these genomes (bac120/ar53).
Returns
-------
@@ -211,9 +211,9 @@ def _run_multi_align(self, db_genome_id, path, marker_set_id):
if marker_set_id == 'bac120':
copy_number_file = CopyNumberFileBAC120('/dev/null', None)
marker_size_d = self.bac120_marker_sizes
- elif marker_set_id == 'ar122':
- copy_number_file = CopyNumberFileAR122('/dev/null', None)
- marker_size_d = self.ar122_marker_sizes
+ elif marker_set_id == 'ar53':
+ copy_number_file = CopyNumberFileAR53('/dev/null', None)
+ marker_size_d = self.ar53_marker_sizes
else:
raise GTDBTkException('Unknown marker set.')
@@ -231,12 +231,12 @@ def _run_multi_align(self, db_genome_id, path, marker_set_id):
os.path.join(
marker_paths[db_marker], marker)
for marker in self.bac120_markers[db_marker]})
- elif marker_set_id == "ar122":
- for db_marker in sorted(self.ar122_markers):
+ elif marker_set_id == "ar53":
+ for db_marker in sorted(self.ar53_markers):
marker_dict_original.update({marker.replace(".HMM", "").replace(".hmm", ""):
os.path.join(
marker_paths[db_marker], marker)
- for marker in self.ar122_markers[db_marker]})
+ for marker in self.ar53_markers[db_marker]})
elif marker_set_id == "rps23":
for db_marker in sorted(self.rps23_markers):
marker_dict_original.update({marker.replace(".HMM", "").replace(".hmm", ""):
diff --git a/gtdbtk/infer_ranks.py b/gtdbtk/infer_ranks.py
index 415a1b8d..7464ee29 100644
--- a/gtdbtk/infer_ranks.py
+++ b/gtdbtk/infer_ranks.py
@@ -25,7 +25,7 @@
RED_DIST_BAC_DICT,
RED_DIST_ARC_DICT,
MRCA_RED_BAC120,
- MRCA_RED_AR122,
+ MRCA_RED_AR53,
RED_INTERVAL)
from gtdbtk.exceptions import GTDBTkExit
from gtdbtk.relative_distance import RelativeDistance
@@ -102,7 +102,7 @@ def _find_ingroup_red(self, ingroup_node, ingroup_domain, tree):
red_file = MRCA_RED_BAC120
if ingroup_domain == 'd__Archaea':
- red_file = MRCA_RED_AR122
+ red_file = MRCA_RED_AR53
# create map from leave labels to tree nodes
leaf_node_map = {}
diff --git a/gtdbtk/io/classify_summary.py b/gtdbtk/io/classify_summary.py
index 87d3c150..d373f3d6 100644
--- a/gtdbtk/io/classify_summary.py
+++ b/gtdbtk/io/classify_summary.py
@@ -18,7 +18,7 @@
import logging
import os
-from gtdbtk.config.output import PATH_AR122_SUMMARY_OUT, PATH_BAC120_SUMMARY_OUT
+from gtdbtk.config.output import PATH_AR53_SUMMARY_OUT, PATH_BAC120_SUMMARY_OUT
from gtdbtk.exceptions import GTDBTkExit
@@ -146,12 +146,12 @@ def read(self):
self.add_row(row)
-class ClassifySummaryFileAR122(ClassifySummaryFile):
- """Store classify summary information for AR122 markers."""
+class ClassifySummaryFileAR53(ClassifySummaryFile):
+ """Store classify summary information for AR53 markers."""
def __init__(self, out_dir: str, prefix: str):
- path = os.path.join(out_dir, PATH_AR122_SUMMARY_OUT.format(prefix=prefix))
- super().__init__(path, 'ar122')
+ path = os.path.join(out_dir, PATH_AR53_SUMMARY_OUT.format(prefix=prefix))
+ super().__init__(path, 'ar53')
class ClassifySummaryFileBAC120(ClassifySummaryFile):
@@ -160,3 +160,5 @@ class ClassifySummaryFileBAC120(ClassifySummaryFile):
def __init__(self, out_dir: str, prefix: str):
path = os.path.join(out_dir, PATH_BAC120_SUMMARY_OUT.format(prefix=prefix))
super().__init__(path, 'bac120')
+
+
diff --git a/gtdbtk/io/marker/copy_number.py b/gtdbtk/io/marker/copy_number.py
index b2396918..7eb8d131 100644
--- a/gtdbtk/io/marker/copy_number.py
+++ b/gtdbtk/io/marker/copy_number.py
@@ -21,8 +21,8 @@
from gtdbtk.biolib_lite.common import make_sure_path_exists
from gtdbtk.biolib_lite.seq_io import read_fasta
-from gtdbtk.config.config import BAC120_MARKERS, AR122_MARKERS
-from gtdbtk.config.output import PATH_BAC120_MARKER_SUMMARY, PATH_AR122_MARKER_SUMMARY
+from gtdbtk.config.config import BAC120_MARKERS, AR53_MARKERS
+from gtdbtk.config.output import PATH_BAC120_MARKER_SUMMARY, PATH_AR53_MARKER_SUMMARY
from gtdbtk.exceptions import GTDBTkExit
from gtdbtk.io.marker.tophit import TopHitPfamFile, TopHitTigrFile, Hit
@@ -154,12 +154,12 @@ def read(self):
raise GTDBTkExit(f'The marker file is inconsistent: {self.path}')
-class CopyNumberFileAR122(CopyNumberFile):
- """Store hmm copy number information for AR122 markers."""
+class CopyNumberFileAR53(CopyNumberFile):
+ """Store hmm copy number information for AR53 markers."""
def __init__(self, out_dir: str, prefix: str):
- path = os.path.join(out_dir, PATH_AR122_MARKER_SUMMARY.format(prefix=prefix))
- super().__init__(path, 'ar122', AR122_MARKERS)
+ path = os.path.join(out_dir, PATH_AR53_MARKER_SUMMARY.format(prefix=prefix))
+ super().__init__(path, 'ar53', AR53_MARKERS)
class CopyNumberFileBAC120(CopyNumberFile):
diff --git a/gtdbtk/io/marker_info.py b/gtdbtk/io/marker_info.py
index f636873f..b5e07809 100644
--- a/gtdbtk/io/marker_info.py
+++ b/gtdbtk/io/marker_info.py
@@ -18,8 +18,8 @@
import os
from gtdbtk.biolib_lite.common import make_sure_path_exists
-from gtdbtk.config.config import AR122_MARKERS, BAC120_MARKERS, TIGRFAM_HMMS, PFAM_HMM_DIR
-from gtdbtk.config.output import PATH_AR122_MARKER_INFO, PATH_BAC120_MARKER_INFO
+from gtdbtk.config.config import AR53_MARKERS, BAC120_MARKERS, TIGRFAM_HMMS, PFAM_HMM_DIR
+from gtdbtk.config.output import PATH_AR53_MARKER_INFO, PATH_BAC120_MARKER_INFO
class MarkerInfoFile(object):
@@ -66,12 +66,12 @@ def write(self):
fh.write('\t'.join(row) + '\n')
-class MarkerInfoFileAR122(MarkerInfoFile):
- """Marker information for the AR122 marker set."""
+class MarkerInfoFileAR53(MarkerInfoFile):
+ """Marker information for the AR53 marker set."""
def __init__(self, out_dir: str, prefix: str):
- path = os.path.join(out_dir, PATH_AR122_MARKER_INFO.format(prefix=prefix))
- super().__init__(path, AR122_MARKERS)
+ path = os.path.join(out_dir, PATH_AR53_MARKER_INFO.format(prefix=prefix))
+ super().__init__(path, AR53_MARKERS)
class MarkerInfoFileBAC120(MarkerInfoFile):
diff --git a/gtdbtk/io/missing_genomes.py b/gtdbtk/io/missing_genomes.py
new file mode 100644
index 00000000..869fde53
--- /dev/null
+++ b/gtdbtk/io/missing_genomes.py
@@ -0,0 +1,63 @@
+###############################################################################
+# #
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU General Public License as published by #
+# the Free Software Foundation, either version 3 of the License, or #
+# (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU General Public License for more details. #
+# #
+# You should have received a copy of the GNU General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+# #
+###############################################################################
+
+import os
+from typing import Dict
+
+from gtdbtk.biolib_lite.common import make_sure_path_exists
+from gtdbtk.config.output import PATH_AR53_DISAPPEARING_GENOMES, PATH_BAC120_DISAPPEARING_GENOMES
+from gtdbtk.exceptions import GTDBTkExit
+
+
+class DisappearingGenomesFile(object):
+ """Store the GTDB-Tk RED dictionary."""
+
+ def __init__(self, path: str,domain: str):
+ self.path: str = path
+ self.domain: str = domain
+ self.data: Dict[str, str] = dict()
+ self.file_name: str = os.path.basename(path)
+
+ def add_genome(self, gid: str, tree_index: str):
+ """PlAdds the pplacer classification of a given genome."""
+ if gid in self.data:
+ raise GTDBTkExit(f'Warning! Attempting to add duplicate genome: {gid}')
+ self.data[gid] = tree_index
+
+ def write(self):
+ """Write the file to disk, note that domain is omitted."""
+ make_sure_path_exists(os.path.dirname(self.path))
+ with open(self.path, 'w') as fh:
+ fh.write(f'genome_id\tdomain\ttree_index\n')
+ for seqid, infos in self.data.items():
+ fh.write(f'{seqid}\t{self.domain}\t{infos}\n')
+
+
+class DisappearingGenomesFileAR53(DisappearingGenomesFile):
+ """Store the RED dictionary for the AR53 marker set."""
+
+ def __init__(self, out_dir: str, prefix: str):
+ path = os.path.join(out_dir, PATH_AR53_DISAPPEARING_GENOMES.format(prefix=prefix))
+ super().__init__(path,'archaea')
+
+
+class DisappearingGenomesFileBAC120(DisappearingGenomesFile):
+ """Store the RED dictionary for the BAC120 marker set."""
+
+ def __init__(self, out_dir: str, prefix: str):
+ path = os.path.join(out_dir, PATH_BAC120_DISAPPEARING_GENOMES.format(prefix=prefix))
+ super().__init__(path,'bacteria')
diff --git a/gtdbtk/io/pplacer_classification.py b/gtdbtk/io/pplacer_classification.py
index 997b02ef..4deae677 100644
--- a/gtdbtk/io/pplacer_classification.py
+++ b/gtdbtk/io/pplacer_classification.py
@@ -19,7 +19,8 @@
from typing import Dict
from gtdbtk.biolib_lite.common import make_sure_path_exists
-from gtdbtk.config.output import PATH_AR122_PPLACER_CLASS, PATH_BAC120_PPLACER_CLASS
+from gtdbtk.config.output import PATH_AR53_PPLACER_CLASS, PATH_BAC120_PPLACER_CLASS, PATH_BAC120_HIGH_PPLACER_CLASS, \
+ PATH_BAC120_LOW_PPLACER_CLASS
from gtdbtk.exceptions import GTDBTkExit
@@ -30,6 +31,7 @@ def __init__(self, path: str):
self.path: str = path
self.data: Dict[str, str] = dict()
+
def add_genome(self, gid: str, tax_str: str):
"""Adds the pplacer classification of a given genome."""
if gid in self.data:
@@ -39,16 +41,17 @@ def add_genome(self, gid: str, tax_str: str):
def write(self):
"""Write the file to disk."""
make_sure_path_exists(os.path.dirname(self.path))
- with open(self.path, 'w') as fh:
- for gid, tax_str in sorted(self.data.items()):
- fh.write(f'{gid}\t{tax_str}\n')
+ if len(self.data) > 0 :
+ with open(self.path, 'w') as fh:
+ for gid, tax_str in sorted(self.data.items()):
+ fh.write(f'{gid}\t{tax_str}\n')
-class PplacerClassifyFileAR122(PplacerClassifyFile):
- """Store the pplacer classifications for the AR122 marker set."""
+class PplacerClassifyFileAR53(PplacerClassifyFile):
+ """Store the pplacer classifications for the AR53 marker set."""
def __init__(self, out_dir: str, prefix: str):
- path = os.path.join(out_dir, PATH_AR122_PPLACER_CLASS.format(prefix=prefix))
+ path = os.path.join(out_dir, PATH_AR53_PPLACER_CLASS.format(prefix=prefix))
super().__init__(path)
@@ -58,3 +61,67 @@ class PplacerClassifyFileBAC120(PplacerClassifyFile):
def __init__(self, out_dir: str, prefix: str):
path = os.path.join(out_dir, PATH_BAC120_PPLACER_CLASS.format(prefix=prefix))
super().__init__(path)
+
+class PplacerLowClassifyFileBAC120(PplacerClassifyFile):
+ """Store the pplacer classifications for the BAC120 marker set."""
+
+ def __init__(self, out_dir: str, prefix: str,iter:str):
+ path = os.path.join(out_dir, PATH_BAC120_LOW_PPLACER_CLASS.format(prefix=prefix,iter=iter))
+ super().__init__(path)
+
+
+class PplacerHighClassifyRow(object):
+ """Initialise the row, default all of the values to None."""
+
+ def __init__(self):
+ self.gid = None
+ self.gtdb_taxonomy_red = None
+ self.gtdb_taxonomy_terminal = None
+ self.pplacer_taxonomy = None
+ self.is_terminal = None
+ self.red = None
+
+
+class PplacerHighClassifyFile(object):
+ """Store the pplacer classifications."""
+
+ def __init__(self,out_dir: str,prefix: str):
+ self.path = os.path.join(out_dir, PATH_BAC120_HIGH_PPLACER_CLASS.format(prefix=prefix))
+ self.rows = dict() # keyed by user_genome
+ self.none_value = 'N/A'
+
+ def add_row(self, row: PplacerHighClassifyRow):
+ if row.gid in self.rows:
+ raise GTDBTkExit(f'Attempting to add duplicate row: {row.gid}')
+ self.rows[row.gid] = row
+
+ @staticmethod
+ def get_col_order(row: PplacerHighClassifyRow = None):
+ """Return the column order that will be written. If a row is provided
+ then format the row in that specific order."""
+ if row is None:
+ row = PplacerHighClassifyRow()
+ mapping = [('user_genome', row.gid),
+ ('gtdb_taxonomy_red', row.gtdb_taxonomy_red),
+ ('gtdb_taxonomy_terminal', row.gtdb_taxonomy_terminal),
+ ('pplacer_taxonomy', row.pplacer_taxonomy),
+ ('is_terminal', row.is_terminal),
+ ('red', row.red)]
+ cols, data = list(), list()
+ for col_name, col_val in mapping:
+ cols.append(col_name)
+ data.append(col_val)
+ return cols, data
+
+
+ def write(self):
+ """Write the file to disk."""
+ make_sure_path_exists(os.path.dirname(self.path))
+ cols = ['gid','gtdb_taxonomy','pplacer_taxonomy','is_terminal','red']
+ with open(self.path, 'w') as fh:
+ fh.write('\t'.join(self.get_col_order()[0]) + '\n')
+ for gid, row in sorted(self.rows.items()):
+ buf = list()
+ for data in self.get_col_order(row)[1]:
+ buf.append(self.none_value if data is None else str(data))
+ fh.write('\t'.join(buf) + '\n')
diff --git a/gtdbtk/io/red_dict.py b/gtdbtk/io/red_dict.py
index 03bec75b..989b64ec 100644
--- a/gtdbtk/io/red_dict.py
+++ b/gtdbtk/io/red_dict.py
@@ -20,7 +20,7 @@
from gtdbtk.biolib_lite.common import make_sure_path_exists
from gtdbtk.config.config import RED_DIST_ARC_DICT, RED_DIST_BAC_DICT
-from gtdbtk.config.output import PATH_AR122_RED_DICT, PATH_BAC120_RED_DICT
+from gtdbtk.config.output import PATH_AR53_RED_DICT, PATH_BAC120_RED_DICT
class REDDictFile(object):
@@ -41,11 +41,11 @@ def write(self):
fh.write(f'Genus\t{self.data["g__"]}\n')
-class REDDictFileAR122(REDDictFile):
- """Store the RED dictionary for the AR122 marker set."""
+class REDDictFileAR53(REDDictFile):
+ """Store the RED dictionary for the AR53 marker set."""
def __init__(self, out_dir: str, prefix: str):
- path = os.path.join(out_dir, PATH_AR122_RED_DICT.format(prefix=prefix))
+ path = os.path.join(out_dir, PATH_AR53_RED_DICT.format(prefix=prefix))
super().__init__(path, RED_DIST_ARC_DICT)
diff --git a/gtdbtk/io/tree_mapping.py b/gtdbtk/io/tree_mapping.py
new file mode 100644
index 00000000..8880ccbe
--- /dev/null
+++ b/gtdbtk/io/tree_mapping.py
@@ -0,0 +1,91 @@
+###############################################################################
+# #
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU General Public License as published by #
+# the Free Software Foundation, either version 3 of the License, or #
+# (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU General Public License for more details. #
+# #
+# You should have received a copy of the GNU General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+# #
+###############################################################################
+
+import logging
+import os
+
+from gtdbtk.config.output import PATH_BAC120_TREE_MAPPING
+from gtdbtk.exceptions import GTDBTkExit
+
+
+class GenomeMappingFileRow(object):
+ """A row contained within the GenomeMappingFile object."""
+
+ def __init__(self):
+ self.gid = None
+ self.ani_classification = None
+ self.mapped_tree = None
+
+class GenomeMappingFile(object):
+ """Store the GTDB-Tk classify summary output."""
+
+ def __init__(self, out_dir: str,prefix: str):
+ self.logger = logging.getLogger('timestamp')
+ self.path = os.path.join(out_dir, PATH_BAC120_TREE_MAPPING.format(prefix=prefix))
+ self.rows = dict() # keyed by user_genome
+ self.none_value = 'N/A'
+
+ @staticmethod
+ def get_col_order(row: GenomeMappingFileRow = None):
+ """Return the column order that will be written. If a row is provided
+ then format the row in that specific order."""
+ if row is None:
+ row = GenomeMappingFileRow()
+ mapping = [('user_genome', row.gid),
+ ('is_ani_classification', row.ani_classification),
+ ('species_tree_mapped', row.mapped_tree)]
+ cols, data = list(), list()
+ for col_name, col_val in mapping:
+ cols.append(col_name)
+ data.append(col_val)
+ return cols, data
+
+ def add_row(self, row: GenomeMappingFileRow):
+ if row.gid in self.rows:
+ raise GTDBTkExit(f'Attempting to add duplicate row: {row.gid}')
+ self.rows[row.gid] = row
+
+ def write(self):
+ """Writes the summary file to disk. None will be replaced with N/A"""
+ with open(self.path, 'w') as fh:
+ fh.write('\t'.join(self.get_col_order()[0]) + '\n')
+ for gid, row in sorted(self.rows.items()):
+ buf = list()
+ for data in self.get_col_order(row)[1]:
+ buf.append(self.none_value if data is None else str(data))
+ fh.write('\t'.join(buf) + '\n')
+
+ def read(self):
+ """Read the summary file from disk."""
+ if not os.path.isfile(self.path):
+ raise GTDBTkExit(f'Error, classify tree mappings file not found: {self.path}')
+ with open(self.path) as fh:
+
+ # Load and verify the columns match the expected order.
+ cols_exp, _ = self.get_col_order()
+ cols_cur = fh.readline().strip().split('\t')
+ if cols_exp != cols_cur:
+ raise GTDBTkExit(f'The classify tree mappings columns are inconsistent: {cols_cur}')
+
+ # Process the data.
+ for line in fh.readlines():
+ data = line.strip().split('\t')
+ row = GenomeMappingFileRow()
+ row.gid = data[0]
+ row.ani_classification = data[1]
+ row.mapped_tree = data[2]
+ self.add_row(row)
\ No newline at end of file
diff --git a/gtdbtk/main.py b/gtdbtk/main.py
index ee1dab70..ecc788af 100644
--- a/gtdbtk/main.py
+++ b/gtdbtk/main.py
@@ -41,7 +41,7 @@
from gtdbtk.external.fasttree import FastTree
from gtdbtk.infer_ranks import InferRanks
from gtdbtk.io.batchfile import Batchfile
-from gtdbtk.io.classify_summary import ClassifySummaryFileAR122
+from gtdbtk.io.classify_summary import ClassifySummaryFileAR53
from gtdbtk.markers import Markers
from gtdbtk.misc import Misc
from gtdbtk.model.enum import Domain
@@ -273,6 +273,7 @@ def identify(self, options):
options.out_dir,
options.prefix,
options.force,
+ options.genes,
options.write_single_copy_genes)
self.logger.info('Done.')
@@ -395,7 +396,7 @@ def run_test(self, options):
shutil.copytree(input_dir, genome_test_dir)
args = ['gtdbtk', 'classify_wf', '--genome_dir', genome_test_dir,
- '--out_dir', output_dir, '--cpus', str(options.cpus)]
+ '--out_dir', output_dir, '--cpus', str(options.cpus), '-f']
self.logger.info('Command: {}'.format(' '.join(args)))
# Pipe the output and write to disk.
@@ -414,7 +415,7 @@ def run_test(self, options):
proc.wait()
exit_code = proc.returncode
- summary_fh = ClassifySummaryFileAR122(output_dir, 'gtdbtk')
+ summary_fh = ClassifySummaryFileAR53(output_dir, 'gtdbtk')
if exit_code != 0:
self.logger.error('The test returned a non-zero exit code.')
@@ -460,9 +461,13 @@ def classify(self, options):
prefix=options.prefix,
scratch_dir=options.scratch_dir,
debugopt=options.debug,
- splittreeopt=options.split_tree,
+ fulltreeopt=options.full_tree,
recalculate_red=False)
+ self.logger.info('Note that Tk classification mode is insufficient for publication of new taxonomic '
+ 'designations. New designations should be based on one or more de novo trees, an '
+ 'example of which can be produced by Tk in de novo mode.')
+
self.logger.info('Done.')
def trim_msa(self, options):
@@ -566,14 +571,14 @@ def decorate(self, options):
os.path.join(options.out_dir,
os.path.basename(
PATH_BAC120_DECORATED_TREE.format(prefix=options.prefix) + '-table')))
- elif options.suffix == 'ar122':
- symlink_f(PATH_AR122_DECORATED_TREE.format(prefix=options.prefix),
+ elif options.suffix == 'ar53':
+ symlink_f(PATH_AR53_DECORATED_TREE.format(prefix=options.prefix),
os.path.join(options.out_dir,
- os.path.basename(PATH_AR122_DECORATED_TREE.format(prefix=options.prefix))))
- symlink_f(PATH_AR122_DECORATED_TREE.format(prefix=options.prefix) + '-table',
+ os.path.basename(PATH_AR53_DECORATED_TREE.format(prefix=options.prefix))))
+ symlink_f(PATH_AR53_DECORATED_TREE.format(prefix=options.prefix) + '-table',
os.path.join(options.out_dir,
os.path.basename(
- PATH_AR122_DECORATED_TREE.format(prefix=options.prefix) + '-table')))
+ PATH_AR53_DECORATED_TREE.format(prefix=options.prefix) + '-table')))
else:
raise GenomeMarkerSetUnknown(
'There was an error determining the marker set.')
@@ -623,6 +628,47 @@ def ani_rep(self, options):
self.logger.info('Done.')
+ def convert_to_itol(self, options):
+ """Convert Tree to iTOL format.
+
+ Parameters
+ ----------
+ options : argparse.Namespace
+ The CLI arguments input by the user.
+ """
+ check_file_exists(options.input_tree)
+
+ r = Misc()
+ r.convert_to_itol(options.input_tree, options.output_tree)
+ self.logger.info('Done.')
+
+ def remove_labels(self, options):
+ """Remove labels from tree.
+
+ Parameters
+ ----------
+ options : argparse.Namespace
+ The CLI arguments input by the user.
+ """
+
+ check_file_exists(options.input_tree)
+
+ r = Misc()
+ r.remove_labels(options.input_tree, options.output_tree)
+ self.logger.info('Done.')
+
+ def remove_intermediate_files(self,out_dir,workflow_name):
+ """Remove intermediate files from the output directory.
+ Parameters
+ ----------
+ out_dir : str
+ The output directory.
+ """
+
+ misc = Misc()
+ misc.remove_intermediate_files(out_dir,workflow_name)
+ self.logger.info('Done.')
+
def parse_options(self, options):
"""Parse user options and call the correct pipeline(s)
@@ -664,15 +710,26 @@ def parse_options(self, options):
if options.bacteria:
options.suffix = "bac120"
else:
- options.suffix = "ar122"
+ options.suffix = "ar53"
if options.skip_gtdb_refs:
if options.suffix == 'bac120':
- options.msa_file = os.path.join(
- options.out_dir, PATH_BAC120_USER_MSA.format(prefix=options.prefix))
- elif options.suffix == 'ar122':
- options.msa_file = os.path.join(
- options.out_dir, PATH_AR122_USER_MSA.format(prefix=options.prefix))
+ if os.path.isfile(os.path.join(options.out_dir,
+ PATH_BAC120_USER_MSA.format(prefix=options.prefix))):
+ options.msa_file = os.path.join(options.out_dir,
+ PATH_BAC120_USER_MSA.format(prefix=options.prefix))
+ else:
+ options.msa_file = os.path.join(options.out_dir,
+ PATH_BAC120_USER_MSA.format(prefix=options.prefix) + '.gz')
+
+ elif options.suffix == 'ar53':
+ if os.path.isfile(os.path.join(options.out_dir,
+ PATH_AR53_USER_MSA.format(prefix=options.prefix))):
+ options.msa_file = os.path.join(options.out_dir,
+ PATH_AR53_USER_MSA.format(prefix=options.prefix))
+ else:
+ options.msa_file = os.path.join(options.out_dir,
+ PATH_AR53_USER_MSA.format(prefix=options.prefix) + '.gz')
else:
self.logger.error(
'There was an error determining the marker set.')
@@ -680,11 +737,21 @@ def parse_options(self, options):
'Unknown marker set: {}'.format(options.suffix))
else:
if options.suffix == 'bac120':
- options.msa_file = os.path.join(
- options.out_dir, PATH_BAC120_MSA.format(prefix=options.prefix))
- elif options.suffix == 'ar122':
- options.msa_file = os.path.join(
- options.out_dir, PATH_AR122_MSA.format(prefix=options.prefix))
+ if os.path.isfile(os.path.join(
+ options.out_dir, PATH_BAC120_MSA.format(prefix=options.prefix))):
+ options.msa_file = os.path.join(
+ options.out_dir, PATH_BAC120_MSA.format(prefix=options.prefix))
+ else:
+ options.msa_file = os.path.join(
+ options.out_dir, PATH_BAC120_MSA.format(prefix=options.prefix) + '.gz')
+ elif options.suffix == 'ar53':
+ if os.path.isfile(os.path.join(
+ options.out_dir, PATH_AR53_MSA.format(prefix=options.prefix))):
+ options.msa_file = os.path.join(
+ options.out_dir, PATH_AR53_MSA.format(prefix=options.prefix))
+ else:
+ options.msa_file = os.path.join(
+ options.out_dir, PATH_AR53_MSA.format(prefix=options.prefix) + '.gz')
else:
self.logger.error(
'There was an error determining the marker set.')
@@ -698,11 +765,11 @@ def parse_options(self, options):
PATH_BAC120_UNROOTED_TREE.format(prefix=options.prefix))
options.output_tree = os.path.join(options.out_dir,
PATH_BAC120_ROOTED_TREE.format(prefix=options.prefix))
- elif options.suffix == 'ar122':
+ elif options.suffix == 'ar53':
options.input_tree = os.path.join(options.out_dir,
- PATH_AR122_UNROOTED_TREE.format(prefix=options.prefix))
+ PATH_AR53_UNROOTED_TREE.format(prefix=options.prefix))
options.output_tree = os.path.join(options.out_dir,
- PATH_AR122_ROOTED_TREE.format(prefix=options.prefix))
+ PATH_AR53_ROOTED_TREE.format(prefix=options.prefix))
self.root(options)
@@ -711,21 +778,18 @@ def parse_options(self, options):
PATH_BAC120_ROOTED_TREE.format(prefix=options.prefix))
options.output_tree = os.path.join(options.out_dir,
PATH_BAC120_DECORATED_TREE.format(prefix=options.prefix))
- elif options.suffix == 'ar122':
+ elif options.suffix == 'ar53':
options.input_tree = os.path.join(options.out_dir,
- PATH_AR122_ROOTED_TREE.format(prefix=options.prefix))
+ PATH_AR53_ROOTED_TREE.format(prefix=options.prefix))
options.output_tree = os.path.join(options.out_dir,
- PATH_AR122_DECORATED_TREE.format(prefix=options.prefix))
+ PATH_AR53_DECORATED_TREE.format(prefix=options.prefix))
self.decorate(options)
- elif options.subparser_name == 'classify_wf':
+ if not options.keep_intermediates:
+ self.remove_intermediate_files(options.out_dir,'de_novo_wf')
- # TODO: Remove this block once the split_tree function is implemented.
- if hasattr(options, 'split_tree') and options.split_tree:
- self.logger.warning('The split tree option is not yet '
- ' supported, overriding value to False.')
- options.split_tree = False
+ elif options.subparser_name == 'classify_wf':
check_dependencies(['prodigal', 'hmmalign', 'pplacer', 'guppy',
'fastANI'])
@@ -750,6 +814,9 @@ def parse_options(self, options):
self.align(options)
self.classify(options)
+ if not options.keep_intermediates:
+ self.remove_intermediate_files(options.out_dir,'classify_wf')
+
elif options.subparser_name == 'identify':
self.identify(options)
elif options.subparser_name == 'align':
@@ -759,10 +826,10 @@ def parse_options(self, options):
elif options.subparser_name == 'classify':
# TODO: Remove this block once the split_tree function is implemented.
- if hasattr(options, 'split_tree') and options.split_tree:
- self.logger.warning('The split tree option is not yet '
- ' supported, overriding value to False.')
- options.split_tree = False
+ # if hasattr(options, 'split_tree') and options.split_tree:
+ # self.logger.warning('The split tree option is not yet '
+ # ' supported, overriding value to False.')
+ # options.split_tree = False
# if options.recalculate_red and options.split_tree:
# raise GTDBTkExit('--split_tree and --recalculate_red are mutually exclusive.')
@@ -775,6 +842,10 @@ def parse_options(self, options):
self.infer_ranks(options)
elif options.subparser_name == 'ani_rep':
self.ani_rep(options)
+ elif options.subparser_name == 'remove_labels':
+ self.remove_labels(options)
+ elif options.subparser_name == 'convert_to_itol':
+ self.convert_to_itol(options)
elif options.subparser_name == 'trim_msa':
self.trim_msa(options)
elif options.subparser_name == 'export_msa':
diff --git a/gtdbtk/markers.py b/gtdbtk/markers.py
index 8677617e..579e5055 100644
--- a/gtdbtk/markers.py
+++ b/gtdbtk/markers.py
@@ -21,6 +21,7 @@
from shutil import copy
from typing import Dict, Tuple, Optional
+import gzip
import numpy as np
import gtdbtk.config.config as Config
@@ -33,9 +34,9 @@
from gtdbtk.external.pfam_search import PfamSearch
from gtdbtk.external.prodigal import Prodigal
from gtdbtk.external.tigrfam_search import TigrfamSearch
-from gtdbtk.io.marker.copy_number import CopyNumberFileAR122, CopyNumberFileBAC120
+from gtdbtk.io.marker.copy_number import CopyNumberFileAR53, CopyNumberFileBAC120
from gtdbtk.io.marker.tophit import TopHitPfamFile, TopHitTigrFile
-from gtdbtk.io.marker_info import MarkerInfoFileAR122, MarkerInfoFileBAC120
+from gtdbtk.io.marker_info import MarkerInfoFileAR53, MarkerInfoFileBAC120
from gtdbtk.io.prodigal.tln_table import TlnTableFile
from gtdbtk.io.prodigal.tln_table_summary import TlnTableSummaryFile
from gtdbtk.pipeline import align
@@ -57,7 +58,6 @@ def __init__(self, cpus=1, debug=False):
self.marker_gene_dir = None
self.failed_genomes = None
-
self.genome_file_suffix = GENOME_FILE_SUFFIX
self.protein_file_suffix = PROTEIN_FILE_SUFFIX
self.nt_gene_file_suffix = NT_GENE_FILE_SUFFIX
@@ -78,9 +78,9 @@ def _report_identified_marker_genes(self, gene_dict, outdir, prefix,
write_single_copy_genes):
"""Report statistics for identified marker genes."""
- # Summarise the copy number of each AR122 and BAC120 markers.
+ # Summarise the copy number of each AR53 and BAC120 markers.
tln_summary_file = TlnTableSummaryFile(outdir, prefix)
- ar122_copy_number_file = CopyNumberFileAR122(outdir, prefix)
+ ar53_copy_number_file = CopyNumberFileAR53(outdir, prefix)
bac120_copy_number_file = CopyNumberFileBAC120(outdir, prefix)
# Process each genome.
@@ -92,39 +92,41 @@ def _report_identified_marker_genes(self, gene_dict, outdir, prefix,
tigr_tophit_file.read()
# Summarise each of the markers for this genome.
- ar122_copy_number_file.add_genome(db_genome_id, info.get("aa_gene_path"),
- pfam_tophit_file, tigr_tophit_file)
+ ar53_copy_number_file.add_genome(db_genome_id, info.get("aa_gene_path"),
+ pfam_tophit_file, tigr_tophit_file)
bac120_copy_number_file.add_genome(db_genome_id, info.get("aa_gene_path"),
pfam_tophit_file, tigr_tophit_file)
# Write the best translation table to disk for this genome.
- tln_summary_file.add_genome(db_genome_id, info.get("best_translation_table"))
+ tln_summary_file.add_genome(
+ db_genome_id, info.get("best_translation_table"))
# Write each of the summary files to disk.
- ar122_copy_number_file.write()
+ ar53_copy_number_file.write()
bac120_copy_number_file.write()
tln_summary_file.write()
# Create a symlink to store the summary files in the root.
- symlink_f(PATH_BAC120_MARKER_SUMMARY.format(prefix=prefix),
- os.path.join(outdir, os.path.basename(PATH_BAC120_MARKER_SUMMARY.format(prefix=prefix))))
- symlink_f(PATH_AR122_MARKER_SUMMARY.format(prefix=prefix),
- os.path.join(outdir, os.path.basename(PATH_AR122_MARKER_SUMMARY.format(prefix=prefix))))
- symlink_f(PATH_TLN_TABLE_SUMMARY.format(prefix=prefix),
- os.path.join(outdir, os.path.basename(PATH_TLN_TABLE_SUMMARY.format(prefix=prefix))))
+ # symlink_f(PATH_BAC120_MARKER_SUMMARY.format(prefix=prefix),
+ # os.path.join(outdir, os.path.basename(PATH_BAC120_MARKER_SUMMARY.format(prefix=prefix))))
+ # symlink_f(PATH_AR53_MARKER_SUMMARY.format(prefix=prefix),
+ # os.path.join(outdir, os.path.basename(PATH_AR53_MARKER_SUMMARY.format(prefix=prefix))))
+ # symlink_f(PATH_TLN_TABLE_SUMMARY.format(prefix=prefix),
+ # os.path.join(outdir, os.path.basename(PATH_TLN_TABLE_SUMMARY.format(prefix=prefix))))
symlink_f(PATH_FAILS.format(prefix=prefix),
os.path.join(outdir, os.path.basename(PATH_FAILS.format(prefix=prefix))))
- # Write the single copy AR122/BAC120 FASTA files to disk.
+ # Write the single copy AR53/BAC120 FASTA files to disk.
if write_single_copy_genes:
fasta_dir = os.path.join(outdir, DIR_IDENTIFY_FASTA)
- self.logger.info(f'Writing unaligned single-copy genes to: {fasta_dir}')
+ self.logger.info(
+ f'Writing unaligned single-copy genes to: {fasta_dir}')
# Iterate over each domain.
marker_doms = list()
- marker_doms.append((Config.AR122_MARKERS['PFAM'] +
- Config.AR122_MARKERS['TIGRFAM'],
- ar122_copy_number_file, 'ar122'))
+ marker_doms.append((Config.AR53_MARKERS['PFAM'] +
+ Config.AR53_MARKERS['TIGRFAM'],
+ ar53_copy_number_file, 'ar53'))
marker_doms.append((Config.BAC120_MARKERS['PFAM'] +
Config.BAC120_MARKERS['TIGRFAM'],
bac120_copy_number_file, 'bac120'))
@@ -137,7 +139,8 @@ def _report_identified_marker_genes(self, gene_dict, outdir, prefix,
# Iterate over each marker.
for marker_name in marker_names:
marker_name = marker_name.rstrip(r'\.[HMMhmm]')
- marker_path = os.path.join(fasta_d_dir, f'{marker_name}.fa')
+ marker_path = os.path.join(
+ fasta_d_dir, f'{marker_name}.fa')
to_write = list()
for genome_id in sorted(gene_dict):
@@ -150,7 +153,7 @@ def _report_identified_marker_genes(self, gene_dict, outdir, prefix,
with open(marker_path, 'w') as fh:
fh.write('\n'.join(to_write))
- def identify(self, genomes, tln_tables, out_dir, prefix, force, write_single_copy_genes):
+ def identify(self, genomes, tln_tables, out_dir, prefix, force, genes, write_single_copy_genes):
"""Identify marker genes in genomes.
Parameters
@@ -165,8 +168,10 @@ def identify(self, genomes, tln_tables, out_dir, prefix, force, write_single_cop
Prefix to append to generated files.
force : bool
Overwrite any existing files.
+ genes : bool
+ True if the supplied genomes are called genes, False otherwise.
write_single_copy_genes : bool
- Write unique AR122/BAC120 marker files to disk.
+ Write unique AR53/BAC120 marker files to disk.
Raises
------
@@ -180,19 +185,35 @@ def identify(self, genomes, tln_tables, out_dir, prefix, force, write_single_cop
f'{self.cpus} threads.')
self.marker_gene_dir = os.path.join(out_dir, DIR_MARKER_GENE)
- self.failed_genomes = os.path.join(out_dir, PATH_FAILS.format(prefix=prefix))
- prodigal = Prodigal(self.cpus,
- self.failed_genomes,
- self.marker_gene_dir,
- self.protein_file_suffix,
- self.nt_gene_file_suffix,
- self.gff_file_suffix,
- force)
- self.logger.log(Config.LOG_TASK, f'Running Prodigal {prodigal.version} to identify genes.')
- genome_dictionary = prodigal.run(genomes, tln_tables)
+ self.failed_genomes = os.path.join(
+ out_dir, PATH_FAILS.format(prefix=prefix))
+
+ if not genes:
+ prodigal = Prodigal(self.cpus,
+ self.failed_genomes,
+ self.marker_gene_dir,
+ self.protein_file_suffix,
+ self.nt_gene_file_suffix,
+ self.gff_file_suffix,
+ force)
+ self.logger.log(
+ Config.LOG_TASK, f'Running Prodigal {prodigal.version} to identify genes.')
+ genome_dictionary = prodigal.run(genomes, tln_tables)
+
+ else:
+ self.logger.info(
+ 'Using supplied genomes as called genes, skipping Prodigal.')
+ genome_dictionary = dict()
+ for gid, gpath in genomes.items():
+ genome_dictionary[gid] = {'aa_gene_path': gpath,
+ 'translation_table_path': None,
+ 'nt_gene_path': None,
+ 'best_translation_table': 'user_supplied',
+ 'gff_path': None}
# annotated genes against TIGRFAM and Pfam databases
- self.logger.log(Config.LOG_TASK, 'Identifying TIGRFAM protein families.')
+ self.logger.log(Config.LOG_TASK,
+ 'Identifying TIGRFAM protein families.')
gene_files = [genome_dictionary[db_genome_id]['aa_gene_path']
for db_genome_id in genome_dictionary.keys()]
tigr_search = TigrfamSearch(self.cpus,
@@ -213,9 +234,11 @@ def identify(self, genomes, tln_tables, out_dir, prefix, force, write_single_cop
self.checksum_suffix,
self.marker_gene_dir)
pfam_search.run(gene_files)
- self.logger.info(f'Annotations done using HMMER {tigr_search.version}.')
+ self.logger.info(
+ f'Annotations done using HMMER {tigr_search.version}.')
- self.logger.log(Config.LOG_TASK, 'Summarising identified marker genes.')
+ self.logger.log(Config.LOG_TASK,
+ 'Summarising identified marker genes.')
self._report_identified_marker_genes(genome_dictionary, out_dir, prefix,
write_single_copy_genes)
@@ -280,7 +303,8 @@ def _msa_filter_by_taxa(self, concatenated_file: str,
msa = read_fasta(concatenated_file)
msa_len = len(msa)
- self.logger.info(f'Read concatenated alignment for {msa_len:,} GTDB genomes.')
+ self.logger.info(
+ f'Read concatenated alignment for {msa_len:,} GTDB genomes.')
if taxa_filter is not None:
taxa_to_keep = set(taxa_filter.split(','))
@@ -312,7 +336,7 @@ def _apply_mask(self, gtdb_msa, user_msa, msa_mask, min_perc_aa):
list_seq = np.fromiter(seq, dtype='S1')
if list_mask.shape[0] != list_seq.shape[0]:
raise MSAMaskLengthMismatch(
- 'Mask and alignment length do not match.')
+ f'Mask ({list_mask.shape[0]}) and alignment ({list_seq.shape[0]}) length do not match.')
list_masked_seq = list_seq[list_mask]
@@ -323,7 +347,8 @@ def _apply_mask(self, gtdb_msa, user_msa, msa_mask, min_perc_aa):
masked_seq = list_masked_seq.tostring().decode('utf-8')
- valid_bases = list_masked_seq.shape[0] - masked_seq_counts['.'] - masked_seq_counts['-']
+ valid_bases = list_masked_seq.shape[0] - \
+ masked_seq_counts['.'] - masked_seq_counts['-']
if seq_id in user_msa and valid_bases < list_masked_seq.shape[0] * min_perc_aa:
pruned_seqs[seq_id] = masked_seq
continue
@@ -332,17 +357,28 @@ def _apply_mask(self, gtdb_msa, user_msa, msa_mask, min_perc_aa):
return output_seqs, pruned_seqs
- def _write_msa(self, seqs, output_file, gtdb_taxonomy):
+ def _write_msa(self, seqs, output_file, gtdb_taxonomy, zip_output=False):
"""Write sequences to FASTA file."""
- with open(output_file, 'w') as fout:
- for genome_id, alignment in sorted(seqs.items()):
- if genome_id in gtdb_taxonomy:
- fout.write('>%s %s\n' %
- (genome_id, ';'.join(gtdb_taxonomy[genome_id])))
- else:
- fout.write('>%s\n' % genome_id)
- fout.write('%s\n' % alignment)
+ if zip_output:
+ output_file_gz = output_file + '.gz'
+ with gzip.open(output_file_gz, 'w') as fgz:
+ for genome_id, alignment in sorted(seqs.items()):
+ if genome_id in gtdb_taxonomy:
+ fgz.write(
+ f">{genome_id} {';'.join(gtdb_taxonomy[genome_id])}\n".encode())
+ else:
+ fgz.write(f">{genome_id}\n".encode())
+ fgz.write(f'{alignment}\n'.encode())
+ else:
+ with open(output_file, 'w') as fout:
+ for genome_id, alignment in sorted(seqs.items()):
+ if genome_id in gtdb_taxonomy:
+ fout.write('>%s %s\n' %
+ (genome_id, ';'.join(gtdb_taxonomy[genome_id])))
+ else:
+ fout.write('>%s\n' % genome_id)
+ fout.write('%s\n' % alignment)
def genome_domain(self, identity_dir, prefix):
"""Determine domain of User genomes based on identified marker genes."""
@@ -350,16 +386,17 @@ def genome_domain(self, identity_dir, prefix):
ar_count = defaultdict(int)
# Load the marker files for each domain
- ar122_marker_file = CopyNumberFileAR122(identity_dir, prefix)
- ar122_marker_file.read()
+ ar53_marker_file = CopyNumberFileAR53(identity_dir, prefix)
+ ar53_marker_file.read()
bac120_marker_file = CopyNumberFileBAC120(identity_dir, prefix)
bac120_marker_file.read()
# Get the number of single copy markers for each domain
- for out_d, marker_summary in ((ar_count, ar122_marker_file),
+ for out_d, marker_summary in ((ar_count, ar53_marker_file),
(bac_count, bac120_marker_file)):
for genome_id in marker_summary.genomes:
- out_d[genome_id] = len(marker_summary.get_single_copy_hits(genome_id))
+ out_d[genome_id] = len(
+ marker_summary.get_single_copy_hits(genome_id))
bac_gids = set()
ar_gids = set()
@@ -373,7 +410,7 @@ def genome_domain(self, identity_dir, prefix):
ar_gids.add(gid)
if abs(bac_aa_per - arc_aa_per) <= 10:
bac_ar_diff[gid] = {'bac120': round(
- bac_aa_per, 1), 'ar122': round(arc_aa_per, 1)}
+ bac_aa_per, 1), 'ar53': round(arc_aa_per, 1)}
return bac_gids, ar_gids, bac_ar_diff
@@ -422,7 +459,8 @@ def align(self,
"""Align marker genes in genomes."""
# read genomes that failed identify steps to skip them
- failed_genomes_file = os.path.join(os.path.join(identify_dir,os.path.basename(PATH_FAILS.format(prefix=prefix))))
+ failed_genomes_file = os.path.join(os.path.join(
+ identify_dir, PATH_FAILS.format(prefix=prefix)))
if os.path.isfile(failed_genomes_file):
with open(failed_genomes_file) as fgf:
failed_genomes = [row.split()[0] for row in fgf]
@@ -430,7 +468,8 @@ def align(self,
failed_genomes = list()
# If the user is re-running this step, check if the identify step is consistent.
- genomic_files = self._path_to_identify_data(identify_dir, identify_dir != out_dir)
+ genomic_files = self._path_to_identify_data(
+ identify_dir, identify_dir != out_dir)
if genomes_to_process is not None and len(genomic_files) != len(genomes_to_process):
if list(set(genomic_files.keys()) - set(genomes_to_process.keys())).sort() != failed_genomes.sort():
self.logger.error('{} are not present in the input list of genome to process.'.format(
@@ -445,20 +484,21 @@ def align(self,
identify_path = os.path.join(out_dir, DIR_IDENTIFY)
make_sure_path_exists(identify_path)
copy(CopyNumberFileBAC120(identify_dir, prefix).path, identify_path)
- copy(CopyNumberFileAR122(identify_dir, prefix).path, identify_path)
+ copy(CopyNumberFileAR53(identify_dir, prefix).path, identify_path)
copy(TlnTableSummaryFile(identify_dir, prefix).path, identify_path)
# Create the align intermediate directory.
make_sure_path_exists(os.path.join(out_dir, DIR_ALIGN_INTERMEDIATE))
# Write out files with marker information
- ar122_marker_info_file = MarkerInfoFileAR122(out_dir, prefix)
- ar122_marker_info_file.write()
+ ar53_marker_info_file = MarkerInfoFileAR53(out_dir, prefix)
+ ar53_marker_info_file.write()
bac120_marker_info_file = MarkerInfoFileBAC120(out_dir, prefix)
bac120_marker_info_file.write()
# Determine what domain each genome belongs to.
- bac_gids, ar_gids, _bac_ar_diff = self.genome_domain(identify_dir, prefix)
+ bac_gids, ar_gids, _bac_ar_diff = self.genome_domain(
+ identify_dir, prefix)
if len(bac_gids) + len(ar_gids) == 0:
raise GTDBTkExit(f'Unable to assign a domain to any genomes, '
f'please check the identify marker summary file, '
@@ -473,12 +513,13 @@ def align(self,
# f'genomes identified as archaeal.')
# align.concat_single_copy_hits(dir_tmp_arc,
# cur_gid_dict,
- # ar122_marker_info_file)
+ # ar53_marker_info_file)
#
- self.logger.info(f'Aligning markers in {len(genomic_files):,} genomes with {self.cpus} CPUs.')
+ self.logger.info(
+ f'Aligning markers in {len(genomic_files):,} genomes with {self.cpus} CPUs.')
dom_iter = ((bac_gids, Config.CONCAT_BAC120, Config.MASK_BAC120, "bac120", 'bacterial', CopyNumberFileBAC120),
- (ar_gids, Config.CONCAT_AR122, Config.MASK_AR122, "ar122", 'archaeal', CopyNumberFileAR122))
+ (ar_gids, Config.CONCAT_AR53, Config.MASK_AR53, "ar53", 'archaeal', CopyNumberFileAR53))
gtdb_taxonomy = Taxonomy().read(self.taxonomy_file)
for gids, msa_file, mask_file, marker_set_id, domain_str, copy_number_f in dom_iter:
@@ -486,7 +527,8 @@ def align(self,
if len(gids) == 0:
continue
- self.logger.info(f'Processing {len(gids):,} genomes identified as {domain_str}.')
+ self.logger.info(
+ f'Processing {len(gids):,} genomes identified as {domain_str}.')
if marker_set_id == 'bac120':
marker_info_file = bac120_marker_info_file
marker_filtered_genomes = os.path.join(
@@ -496,13 +538,13 @@ def align(self,
marker_user_msa_path = os.path.join(
out_dir, PATH_BAC120_USER_MSA.format(prefix=prefix))
else:
- marker_info_file = ar122_marker_info_file
+ marker_info_file = ar53_marker_info_file
marker_filtered_genomes = os.path.join(
- out_dir, PATH_AR122_FILTERED_GENOMES.format(prefix=prefix))
+ out_dir, PATH_AR53_FILTERED_GENOMES.format(prefix=prefix))
marker_msa_path = os.path.join(
- out_dir, PATH_AR122_MSA.format(prefix=prefix))
+ out_dir, PATH_AR53_MSA.format(prefix=prefix))
marker_user_msa_path = os.path.join(
- out_dir, PATH_AR122_USER_MSA.format(prefix=prefix))
+ out_dir, PATH_AR53_USER_MSA.format(prefix=prefix))
cur_genome_files = {
gid: f for gid, f in genomic_files.items() if gid in gids}
@@ -517,9 +559,11 @@ def align(self,
gtdb_msa_mask = os.path.join(Config.MASK_DIR, mask_file)
# Generate the user MSA.
- user_msa = align.align_marker_set(cur_genome_files, marker_info_file, copy_number_f, self.cpus)
+ user_msa = align.align_marker_set(
+ cur_genome_files, marker_info_file, copy_number_f, self.cpus)
if len(user_msa) == 0:
- self.logger.warning(f'Identified {len(user_msa):,} single copy {domain_str} hits.')
+ self.logger.warning(
+ f'Identified {len(user_msa):,} single copy {domain_str} hits.')
continue
# Write the individual marker alignments to disk
@@ -529,13 +573,15 @@ def align(self,
# filter columns without sufficient representation across taxa
if skip_trimming:
- self.logger.info('Skipping custom filtering and selection of columns.')
+ self.logger.info(
+ 'Skipping custom filtering and selection of columns.')
pruned_seqs = {}
trimmed_seqs = merge_two_dicts(gtdb_msa, user_msa)
elif custom_msa_filters:
aligned_genomes = merge_two_dicts(gtdb_msa, user_msa)
- self.logger.info('Performing custom filtering and selection of columns.')
+ self.logger.info(
+ 'Performing custom filtering and selection of columns.')
trim_msa = TrimMSA(cols_per_gene,
min_perc_aa / 100.0,
@@ -560,19 +606,19 @@ def align(self,
filtered_user_genomes = set(
pruned_seqs).intersection(user_msa)
if len(filtered_user_genomes):
- self.logger.info('Filtered genomes include {:.} user submitted genomes.'.format(len(
- filtered_user_genomes)))
+ self.logger.info(
+ f'Filtered genomes include {len(filtered_user_genomes)} user submitted genomes.')
else:
self.logger.log(Config.LOG_TASK,
- f'Masking columns of {domain_str} multiple sequence alignment using canonical mask.')
+ f'Masking columns of {domain_str} multiple sequence alignment using canonical mask.')
trimmed_seqs, pruned_seqs = self._apply_mask(gtdb_msa,
user_msa,
gtdb_msa_mask,
min_perc_aa / 100.0)
self.logger.info('Masked {} alignment from {:,} to {:,} AAs.'.format(
- domain_str,
- len(list(user_msa.values())[0]),
- len(list(trimmed_seqs.values())[0])))
+ domain_str,
+ len(list(user_msa.values())[0]),
+ len(list(trimmed_seqs.values())[0])))
if min_perc_aa > 0:
self.logger.info('{:,} {} user genomes have amino acids in <{:.1f}% of columns in filtered MSA.'.format(
@@ -586,15 +632,18 @@ def align(self,
if len(pruned_seq) == 0:
perc_alignment = 0
else:
- valid_bases = sum([1 for c in pruned_seq if c.isalpha()])
+ valid_bases = sum(
+ [1 for c in pruned_seq if c.isalpha()])
perc_alignment = valid_bases * 100.0 / len(pruned_seq)
- fout.write(f'{pruned_seq_id}\tInsufficient number of amino acids in MSA ({perc_alignment:.1f}%)\n')
+ fout.write(
+ f'{pruned_seq_id}\tInsufficient number of amino acids in MSA ({perc_alignment:.1f}%)\n')
# write out MSAs
if not skip_gtdb_refs:
self.logger.info(f'Creating concatenated alignment for {len(trimmed_seqs):,} '
f'{domain_str} GTDB and user genomes.')
- self._write_msa(trimmed_seqs, marker_msa_path, gtdb_taxonomy)
+ self._write_msa(trimmed_seqs, marker_msa_path,
+ gtdb_taxonomy, zip_output=True)
trimmed_user_msa = {k: v for k, v in trimmed_seqs.items()
if k in user_msa}
@@ -602,31 +651,32 @@ def align(self,
self.logger.info(f'Creating concatenated alignment for {len(trimmed_user_msa):,} '
f'{domain_str} user genomes.')
self._write_msa(trimmed_user_msa,
- marker_user_msa_path, gtdb_taxonomy)
+ marker_user_msa_path, gtdb_taxonomy, zip_output=True)
else:
- self.logger.info(f'All {domain_str} user genomes have been filtered out.')
+ self.logger.info(
+ f'All {domain_str} user genomes have been filtered out.')
# Create symlinks to the summary files
- if marker_set_id == 'bac120':
- symlink_f(PATH_BAC120_FILTERED_GENOMES.format(prefix=prefix),
- os.path.join(out_dir, os.path.basename(PATH_BAC120_FILTERED_GENOMES.format(prefix=prefix))))
- if len(trimmed_user_msa) > 0:
- symlink_f(PATH_BAC120_USER_MSA.format(prefix=prefix),
- os.path.join(out_dir, os.path.basename(PATH_BAC120_USER_MSA.format(prefix=prefix))))
- if not skip_gtdb_refs:
- symlink_f(PATH_BAC120_MSA.format(prefix=prefix),
- os.path.join(out_dir, os.path.basename(PATH_BAC120_MSA.format(prefix=prefix))))
- elif marker_set_id == 'ar122':
- symlink_f(PATH_AR122_FILTERED_GENOMES.format(prefix=prefix),
- os.path.join(out_dir, os.path.basename(PATH_AR122_FILTERED_GENOMES.format(prefix=prefix))))
- if len(trimmed_user_msa) > 0:
- symlink_f(PATH_AR122_USER_MSA.format(prefix=prefix),
- os.path.join(out_dir, os.path.basename(PATH_AR122_USER_MSA.format(prefix=prefix))))
- if not skip_gtdb_refs:
- symlink_f(PATH_AR122_MSA.format(prefix=prefix),
- os.path.join(out_dir, os.path.basename(PATH_AR122_MSA.format(prefix=prefix))))
- else:
- raise GenomeMarkerSetUnknown('There was an error determining the marker set.')
+ # if marker_set_id == 'bac120':
+ # symlink_f(PATH_BAC120_FILTERED_GENOMES.format(prefix=prefix),
+ # os.path.join(out_dir, os.path.basename(PATH_BAC120_FILTERED_GENOMES.format(prefix=prefix))))
+ # if len(trimmed_user_msa) > 0:
+ # symlink_f(PATH_BAC120_USER_MSA.format(prefix=prefix),
+ # os.path.join(out_dir, os.path.basename(PATH_BAC120_USER_MSA.format(prefix=prefix))))
+ # if not skip_gtdb_refs:
+ # symlink_f(PATH_BAC120_MSA.format(prefix=prefix),
+ # os.path.join(out_dir, os.path.basename(PATH_BAC120_MSA.format(prefix=prefix))))
+ # elif marker_set_id == 'ar53':
+ # symlink_f(PATH_AR53_FILTERED_GENOMES.format(prefix=prefix),
+ # os.path.join(out_dir, os.path.basename(PATH_AR53_FILTERED_GENOMES.format(prefix=prefix))))
+ # if len(trimmed_user_msa) > 0:
+ # symlink_f(PATH_AR53_USER_MSA.format(prefix=prefix),
+ # os.path.join(out_dir, os.path.basename(PATH_AR53_USER_MSA.format(prefix=prefix))))
+ # if not skip_gtdb_refs:
+ # symlink_f(PATH_AR53_MSA.format(prefix=prefix),
+ # os.path.join(out_dir, os.path.basename(PATH_AR53_MSA.format(prefix=prefix))))
+ # else:
+ # raise GenomeMarkerSetUnknown('There was an error determining the marker set.')
def _write_individual_markers(self, user_msa, marker_set_id, marker_list, out_dir, prefix):
marker_dir = join(out_dir, DIR_ALIGN_MARKERS)
@@ -636,7 +686,8 @@ def _write_individual_markers(self, user_msa, marker_set_id, marker_list, out_di
marker_to_msa = dict()
offset = 0
for marker_id, marker_desc, marker_len in sorted(markers, key=lambda x: x[0]):
- path_msa = os.path.join(marker_dir, f'{prefix}.{marker_set_id}.{marker_id}.faa')
+ path_msa = os.path.join(
+ marker_dir, f'{prefix}.{marker_set_id}.{marker_id}.faa')
marker_to_msa[path_msa] = defaultdict(str)
for gid, msa in user_msa.items():
marker_to_msa[path_msa][gid] += msa[offset: marker_len + offset]
diff --git a/gtdbtk/misc.py b/gtdbtk/misc.py
index cf2e9e8d..32457539 100644
--- a/gtdbtk/misc.py
+++ b/gtdbtk/misc.py
@@ -18,10 +18,16 @@
import logging
import os
+import shutil
+
+import dendropy
+
import gtdbtk.config.config as Config
from gtdbtk.biolib_lite.execute import check_dependencies
from gtdbtk.biolib_lite.logger import colour
+from gtdbtk.biolib_lite.newick import parse_label
from gtdbtk.biolib_lite.seq_io import read_fasta
+from gtdbtk.config.output import DIR_CLASSIFY_INTERMEDIATE, DIR_ALIGN_INTERMEDIATE, DIR_IDENTIFY_INTERMEDIATE
from gtdbtk.exceptions import GTDBTkException, GTDBTkExit
from gtdbtk.tools import sha1_dir
@@ -49,7 +55,7 @@ def trim_msa(self, untrimmed_msa, mask_type, maskid, output_file):
if maskid == 'bac' and mask_type == 'reference':
mask = os.path.join(Config.MASK_DIR, Config.MASK_BAC120)
elif maskid == 'arc' and mask_type == 'reference':
- mask = os.path.join(Config.MASK_DIR, Config.MASK_AR122)
+ mask = os.path.join(Config.MASK_DIR, Config.MASK_AR53)
elif mask_type == 'file':
mask = maskid
else:
@@ -101,6 +107,89 @@ def checkfolder(self, folder_path, folder_name):
folder_name, folder_path, colour('MISSING', ['bright'], fg='red')))
return False
+ def remove_labels(self, input_file, output_file):
+ """Remove labels from a Newick Tree.
+
+ Parameters
+ ----------
+ input_file : str
+ The path to the input Newick tree.
+ output_file : str
+ The path to the output Newick tree.
+ """
+
+ self.logger.info("Removing labels from tree {}".format(input_file))
+ intree= dendropy.Tree.get_from_path(input_file,
+ schema='newick',
+ rooting='force-rooted',
+ preserve_underscores=True)
+
+ for node in intree.internal_nodes():
+ node.label = None
+
+ intree.write_to_path(output_file, schema='newick', suppress_rooting=True,unquoted_underscores=True)
+
+
+ def convert_to_itol(self, input_file, output_file):
+ """Remove labels from a Newick Tree.
+
+ Parameters
+ ----------
+ input_file : str
+ The path to the input Newick tree.
+ output_file : str
+ The path to the output Newick tree.
+ """
+
+ self.logger.info("Convert GTDB-Tk tree to iTOL format")
+ intree= dendropy.Tree.get_from_path(input_file,
+ schema='newick',
+ rooting='force-rooted',
+ preserve_underscores=True)
+
+ for node in intree.internal_nodes():
+ if node.label:
+ bootstrap,label,_aux = parse_label(node.label)
+ if label:
+ label = label.replace('; ',';').replace(';','|').replace("'","").lstrip('')
+ node.label = label
+ if node.edge.length:
+ node.edge.length = f'{node.edge.length}[{bootstrap}]'
+
+ intree.write_to_path(output_file, schema='newick', suppress_rooting=True,unquoted_underscores=True)
+
+
+ def remove_intermediate_files(self,output_dir,wf_name):
+ """Remove intermediate files.
+
+ Parameters
+ ----------
+ output_dir : str
+ The path to the output directory.
+ wf_name : str
+ The name of the workflow to delete intermediate files.
+ """
+ self.logger.info('Removing intermediate files.')
+ #Remove identify step intermediate files
+ intermediate_identify = os.path.join(output_dir, DIR_IDENTIFY_INTERMEDIATE)
+ if os.path.exists(intermediate_identify) and os.path.isdir(intermediate_identify):
+ shutil.rmtree(intermediate_identify)
+ #Remove align step intermediate files
+ intermediate_align = os.path.join(output_dir, DIR_ALIGN_INTERMEDIATE)
+ if os.path.exists(intermediate_align) and os.path.isdir(intermediate_align):
+ shutil.rmtree(intermediate_align)
+ if wf_name == 'classify_wf':
+ #Remove classify step intermediate files
+ intermediate_classify = os.path.join(output_dir, DIR_CLASSIFY_INTERMEDIATE)
+ if os.path.exists(intermediate_classify) and os.path.isdir(intermediate_classify):
+ shutil.rmtree(intermediate_classify)
+ elif wf_name == 'de_novo_wf':
+ #Remove classify step intermediate files
+ intermediate_infer = os.path.join(output_dir, DIR_ALIGN_INTERMEDIATE)
+ if os.path.exists(intermediate_infer) and os.path.isdir(intermediate_infer):
+ shutil.rmtree(intermediate_infer)
+ self.logger.info('Intermediate files removed.')
+
def check_install(self):
"""Check that all reference files exist.
diff --git a/gtdbtk/pipeline/align.py b/gtdbtk/pipeline/align.py
index 28c1990f..af16cc0d 100644
--- a/gtdbtk/pipeline/align.py
+++ b/gtdbtk/pipeline/align.py
@@ -75,7 +75,7 @@ def get_single_copy_hits(gid_dict: dict, copy_number_file, cpus):
copy_number_file))
# Process the queue.
- with mp.get_context('spawn').Pool(processes=cpus) as pool:
+ with mp.Pool(processes=cpus) as pool:
results = list(tqdm_log(pool.imap_unordered(get_single_copy_hits_worker, queue),
total=len(queue), unit='genome'))
@@ -238,7 +238,7 @@ def align_marker_set(gid_dict, marker_info_file: MarkerInfoFile, copy_number_fil
marker_info_file.markers[marker_id]['path'],
marker_path,
frozenset(single_copy_hits[marker_id])))
- with mp.get_context('spawn').Pool(processes=cpus) as pool:
+ with mp.Pool(processes=cpus) as pool:
results = list(tqdm_log(pool.imap_unordered(run_hmm_align_worker, queue),
total=len(queue), unit='marker'))
diff --git a/gtdbtk/pipeline/export_msa.py b/gtdbtk/pipeline/export_msa.py
index e65d9693..31340998 100644
--- a/gtdbtk/pipeline/export_msa.py
+++ b/gtdbtk/pipeline/export_msa.py
@@ -2,7 +2,7 @@
from shutil import copyfile
from gtdbtk.biolib_lite.common import make_sure_path_exists
-from gtdbtk.config.config import CONCAT_AR122, CONCAT_BAC120
+from gtdbtk.config.config import CONCAT_AR53, CONCAT_BAC120
from gtdbtk.exceptions import GTDBTkExit
from gtdbtk.model.enum import Domain
@@ -14,7 +14,7 @@ def export_msa(domain: Domain, output_file: str):
:param output_file: The path to write the MSA.
"""
if domain is Domain.ARCHAEA:
- file_to_export = CONCAT_AR122
+ file_to_export = CONCAT_AR53
elif domain is Domain.BACTERIA:
file_to_export = CONCAT_BAC120
else:
diff --git a/gtdbtk/split.py b/gtdbtk/split.py
new file mode 100644
index 00000000..bdfc7c96
--- /dev/null
+++ b/gtdbtk/split.py
@@ -0,0 +1,367 @@
+###############################################################################
+# #
+# This program is free software: you can redistribute it and/or modify #
+# it under the terms of the GNU General Public License as published by #
+# the Free Software Foundation, either version 3 of the License, or #
+# (at your option) any later version. #
+# #
+# This program is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
+# GNU General Public License for more details. #
+# #
+# You should have received a copy of the GNU General Public License #
+# along with this program. If not, see <http://www.gnu.org/licenses/>. #
+# #
+###############################################################################
+import os
+
+import logging
+
+import gtdbtk.config.config as Config
+from gtdbtk.biolib_lite.common import make_sure_path_exists
+from gtdbtk.biolib_lite.newick import parse_label
+from gtdbtk.biolib_lite.seq_io import read_fasta
+from gtdbtk.config.output import *
+from gtdbtk.exceptions import GenomeMarkerSetUnknown, GTDBTkExit
+from gtdbtk.io.classify_summary import ClassifySummaryFileRow
+from gtdbtk.io.pplacer_classification import PplacerHighClassifyRow, PplacerHighClassifyFile
+from gtdbtk.tools import TreeTraversal, standardise_taxonomy
+
+
+class Split(object):
+ """Determine taxonomic classification of genomes by ML placement using the Split Methods."""
+
+ def __init__(self, order_rank, gtdb_taxonomy, reference_ids):
+ """Initialize."""
+ self.order_rank = order_rank
+ self.logger = logging.getLogger('timestamp')
+ self.gtdb_taxonomy = gtdb_taxonomy
+ self.reference_ids = reference_ids
+
+ # rank_of_interest determine the rank in the tree_mapping file for
+ # lower classification
+ self.rank_of_interest = "o__"
+
+ def get_high_pplacer_taxonomy(self, out_dir, marker_set_id, prefix, user_msa_file, tree):
+ """Parse the pplacer tree and write the partial taxonomy for each user genome based on their placements
+
+ Parameters
+ ----------
+ out_dir : output directory
+ prefix : desired prefix for output files
+ marker_set_id : bacterial or archaeal id (bac120 or ar53)
+ user_msa_file : msa file listing all user genomes for a certain domain
+ tree : pplacer tree including the user genomes
+
+ Returns
+ -------
+ dictionary[genome_label]=pplacer_taxonomy
+
+ """
+ results = {}
+ out_root = os.path.join(out_dir, 'classify', 'intermediate_results')
+ make_sure_path_exists(out_root)
+
+ if marker_set_id == 'bac120':
+ out_pplacer = PplacerHighClassifyFile(out_dir,prefix)
+ else:
+ self.logger.error('There was an error determining the marker set.')
+ raise GenomeMarkerSetUnknown
+
+ red_bac_dict = Config.RED_DIST_BAC_DICT
+
+ # We get the pplacer taxonomy for comparison
+ user_genome_ids = set(read_fasta(user_msa_file).keys())
+ for leaf in tree.leaf_node_iter():
+
+ is_on_terminal_branch = False
+ terminal_branch_test = False
+ term_branch_taxonomy = ''
+ if leaf.taxon.label in user_genome_ids:
+ pplacer_row = PplacerHighClassifyRow()
+ taxa = []
+ cur_node = leaf
+ current_rel_dist = 1.0
+ # every user genomes has a RED value of one assigned to it
+ while cur_node.parent_node:
+ # we go up the tree from the user genome
+ if hasattr(cur_node, 'rel_dist') and current_rel_dist == 1.0 and cur_node.rel_dist < 1.0:
+ # if the parent node of the current genome has a red distance,
+ # it means it is part of the reference tree
+ # we store the first RED value encountered in the
+ # tree
+ current_rel_dist = cur_node.rel_dist
+ if cur_node.is_internal():
+ # We check if the genome is place on a terminal
+ # branch
+
+ if not terminal_branch_test:
+ child_genomes = [nd.taxon.label for nd in cur_node.leaf_nodes(
+ ) if nd.taxon.label not in user_genome_ids]
+ if len(child_genomes) == 1:
+ is_on_terminal_branch = True
+ term_branch_taxonomy = self.gtdb_taxonomy.get(
+ child_genomes[0])
+ terminal_branch_test = True
+ if len(child_genomes) > 1:
+ terminal_branch_test = True
+ # While going up the tree we store of taxonomy
+ # information
+ _support, taxon, _aux_info = parse_label(
+ cur_node.label)
+ if taxon:
+ for t in taxon.split(';')[::-1]:
+ taxa.append(t.strip())
+ cur_node = cur_node.parent_node
+
+ taxa_str = ';'.join(taxa[::-1])
+
+ pplacer_tax = str(taxa_str)
+
+ taxa_str_terminal,taxa_str_red = '',''
+
+ if is_on_terminal_branch:
+ # some rank may be missing from going up the tree.
+ # if the genome is on a terminal branch,
+ # we can select the taxonomy from the reference leaf to get the low level of the taxonomy
+ # we select down to genus
+ if len(taxa) > 1:
+ tax_of_leaf = term_branch_taxonomy[term_branch_taxonomy.index(
+ taxa_str.split(';')[-1]) + 1:-1]
+ else:
+ tax_of_leaf = term_branch_taxonomy[1:-1]
+ taxa_str = 'd__Bacteria'
+
+ taxa_str_terminal = self._classify_on_terminal_branch(
+ tax_of_leaf, current_rel_dist, taxa_str.split(';')[-1][0:3], term_branch_taxonomy,
+ red_bac_dict)
+
+ cur_node = leaf
+ parent_taxon_node = cur_node.parent_node
+ _support, parent_taxon, _aux_info = parse_label(
+ parent_taxon_node.label)
+
+ while parent_taxon_node is not None and not parent_taxon:
+ parent_taxon_node = parent_taxon_node.parent_node
+ _support, parent_taxon, _aux_info = parse_label(
+ parent_taxon_node.label)
+
+ # is the node represent multiple ranks, we select the lowest one
+ # i.e. if node is p__A;c__B;o__C we pick o__
+ parent_rank = parent_taxon.split(";")[-1]
+
+ if parent_rank[0:3] != 'g__':
+ node_in_ref_tree = cur_node
+ while len([childnd.taxon.label.replace("'", '') for childnd in node_in_ref_tree.leaf_iter(
+ ) if childnd.taxon.label in self.reference_ids]) == 0:
+ node_in_ref_tree = node_in_ref_tree.parent_node
+ # we select a node of the reference tree
+
+ # we select the child rank (if parent_rank = 'c__'
+ # child rank will be 'o__)'
+ child_rk = self.order_rank[self.order_rank.index(
+ parent_rank[0:3]) + 1]
+
+ # get all reference genomes under the current node
+ list_subnode = [childnd.taxon.label.replace("'", '') for childnd in
+ node_in_ref_tree.leaf_iter()
+ if childnd.taxon.label in self.reference_ids]
+
+ # get all names for the child rank
+ list_ranks = [self.gtdb_taxonomy.get(name)[self.order_rank.index(child_rk)]
+ for name in list_subnode]
+
+ # if there is just one rank name
+ if len(set(list_ranks)) == 1:
+ child_taxons = []
+ child_rel_dist = None
+ for subranknd in node_in_ref_tree.preorder_iter():
+ _support, subranknd_taxon, _aux_info = parse_label(
+ subranknd.label)
+ if subranknd.is_internal() and subranknd_taxon is not None and subranknd_taxon.startswith(
+ child_rk):
+ child_taxons = subranknd_taxon.split(
+ ";")
+ child_taxon_node = subranknd
+ child_rel_dist = child_taxon_node.rel_dist
+ break
+
+ taxa_str_red, taxa_str_terminal = self._classify_on_internal_branch(leaf.taxon.label,
+ child_taxons,
+ current_rel_dist,
+ child_rel_dist,
+ node_in_ref_tree,
+ parent_rank, child_rk,
+ taxa_str,
+ taxa_str_terminal,
+ is_on_terminal_branch,
+ red_bac_dict)
+ else:
+ taxa_str_red = taxa_str
+
+
+ results[leaf.taxon.label] = {"tk_tax_red": standardise_taxonomy(taxa_str_red, 'bac120'),
+ "tk_tax_terminal": standardise_taxonomy(taxa_str_terminal,
+ 'bac120'),
+ "pplacer_tax": standardise_taxonomy(pplacer_tax, 'bac120'),
+ 'rel_dist': current_rel_dist}
+
+ pplacer_row.gid = leaf.taxon.label
+ pplacer_row.gtdb_taxonomy_red = standardise_taxonomy(taxa_str_red, 'bac120')
+ pplacer_row.gtdb_taxonomy_terminal = standardise_taxonomy(taxa_str_terminal, 'bac120')
+ pplacer_row.pplacer_taxonomy = standardise_taxonomy(pplacer_tax, 'bac120')
+ pplacer_row.is_terminal = is_on_terminal_branch
+ pplacer_row.red = current_rel_dist
+
+ out_pplacer.add_row(pplacer_row)
+
+ out_pplacer.write()
+ return results
+
+ def _classify_on_internal_branch(self, leaf, child_taxons, current_rel_list, child_rel_dist, node_in_ref_tree,
+ parent_rank, child_rk, taxa_str, taxa_str_terminal, is_on_terminal_branch,
+ red_bac_dict):
+ """
+ Classification on an internal node is very similar to the 'normal' classification
+ """
+
+ # Persist descendant information for efficient traversal.
+ tt = TreeTraversal()
+
+ closest_rank = None
+
+ if len(child_taxons) == 0:
+ list_leaves = [childnd.taxon.label.replace("'", '')
+ for childnd in tt.get_leaf_nodes(node_in_ref_tree)
+ if childnd.taxon.label in self.reference_ids]
+ if len(list_leaves) != 1:
+ list_subrank = []
+ for leaf_subrank in list_leaves:
+ list_subrank.append(self.gtdb_taxonomy.get(leaf_subrank)
+ [self.order_rank.index(parent_rank) + 1])
+ if len(set(list_subrank)) == 1:
+ print(leaf.taxon.label)
+ print(list_leaves)
+ print(list_subrank)
+ raise GTDBTkExit('There should be only one leaf.')
+ else:
+ closest_rank = parent_rank
+ detection = "taxonomic classification fully defined by topology"
+ list_leaf_ranks = self.gtdb_taxonomy.get(list_leaves[0])[
+ self.order_rank.index(child_rk):-1] # We remove the species name
+
+ for leaf_taxon in reversed(list_leaf_ranks):
+ leaf_taxon_rank = leaf_taxon[:3]
+ if leaf_taxon == list_leaf_ranks[0]:
+ if abs(current_rel_list - red_bac_dict.get(leaf_taxon_rank)) < abs(
+ current_rel_list - red_bac_dict.get(parent_rank[:3])):
+ closest_rank = leaf_taxon
+ break
+ else:
+ pchildrank = list_leaf_ranks[list_leaf_ranks.index(leaf_taxon) - 1]
+ if abs(current_rel_list - red_bac_dict.get(leaf_taxon_rank)) < abs(
+ current_rel_list - red_bac_dict.get(pchildrank[:3])):
+ closest_rank = leaf_taxon
+ break
+ if closest_rank is None:
+ closest_rank = parent_rank
+ # if there is multiple ranks on the child node (i.e genome between p__Nitrospirae and c__Nitrospiria;o__Nitrospirales;f__Nitropiraceae)
+ # we loop through the list of rank from f_ to c_ rank
+ for child_taxon in reversed(child_taxons):
+ child_taxon_rank = child_taxon[:3]
+ if child_taxon == child_taxons[0]:
+ if (abs(current_rel_list - red_bac_dict.get(child_taxon_rank)) < abs(
+ child_rel_dist - red_bac_dict.get(child_taxon_rank)) and
+ abs(current_rel_list - red_bac_dict.get(child_taxon_rank)) < abs(
+ current_rel_list - red_bac_dict.get(parent_rank[:3]))):
+ closest_rank = child_taxon
+ elif closest_rank is None:
+ closest_rank = parent_rank
+ else:
+ pchildrank = child_taxons[child_taxons.index(
+ child_taxon) - 1]
+ if (abs(current_rel_list - red_bac_dict.get(child_taxon_rank)) < abs(
+ current_rel_list - red_bac_dict.get(child_taxon_rank)) and
+ abs(current_rel_list - red_bac_dict.get(child_taxon_rank)) < abs(
+ child_rel_dist - red_bac_dict.get(child_taxon_rank))):
+ closest_rank = child_taxon
+ break
+ if closest_rank is not None:
+ # when we have the closest rank found, we can find it in
+ # gtdb_Taxonomy and get the higher level from it.
+ for k, v in self.gtdb_taxonomy.items():
+ if closest_rank in v:
+ taxa_str = ';'.join(v[1:v.index(closest_rank) + 1])
+ # All classification should be at least to the order level if a genome
+ # is placed on a internal branch with only one order under
+ if any(x.startswith('o__') for x in child_taxons) \
+ and self.order_rank.index(closest_rank[0:3]) < self.order_rank.index('o__') \
+ and ('o__' in taxa_str_terminal.split(';') or not is_on_terminal_branch):
+ taxa_str_terminal = ';'.join(v[1:self.order_rank.index('o__') + 1])
+ break
+
+ return taxa_str, taxa_str_terminal
+
+ def _classify_on_terminal_branch(self, list_leaf_ranks, current_rel_list, parent_rank, term_branch_taxonomy,
+ red_bac_dict):
+ """
+ When a genome is on a terminal branch, we can guess the low level of its taxonomy,
+ based on the RED value
+ :param list_leaf_ranks: Taxonomy of the reference leaf
+ :param current_rel_list: RED value for the genome of interest
+ :param parent_rank: Parent rank of the genome of interest
+ :param term_branch_taxonomy: Full taxonomy of the branch , down to genus
+ :param red_bac_dict: RED dictionary for Bacteria
+ :return: taxonomy for the genome of interest based on RED
+ """
+ closest_rank = None
+ for leaf_taxon in reversed(list_leaf_ranks):
+ if leaf_taxon == list_leaf_ranks[0]:
+ if abs(current_rel_list - red_bac_dict.get(leaf_taxon[:3])) < abs(
+ current_rel_list - red_bac_dict.get(parent_rank)):
+ closest_rank = leaf_taxon[:3]
+ break
+ else:
+ pchildrank = list_leaf_ranks[list_leaf_ranks.index(
+ leaf_taxon) - 1]
+ if abs(current_rel_list - red_bac_dict.get(leaf_taxon[:3])) < abs(
+ current_rel_list - red_bac_dict.get(pchildrank[:3])):
+ closest_rank = leaf_taxon[:3]
+ break
+ if closest_rank is None:
+ closest_rank = parent_rank
+ # temporary: to delete
+ # All classification should be at least to the order level if a genome
+ # is placed on a terminal branch
+ if self.order_rank.index(closest_rank) < self.order_rank.index('o__'):
+ return ';'.join(term_branch_taxonomy[1:self.order_rank.index('o__') + 1])
+
+ return ';'.join(term_branch_taxonomy[1:self.order_rank.index(closest_rank) + 1])
+
+ def map_high_taxonomy(self,high_classification, mapping_dict, summary_file):
+ mapped_rank = {}
+ counter = 0
+ for k, v in high_classification.items():
+ # if the classification has an order
+ rk_to_check = v.get('tk_tax_red').split(
+ ';')[self.order_rank.index(self.rank_of_interest)]
+ if len(rk_to_check) > 3:
+ mapped_rank.setdefault(
+ mapping_dict.get(rk_to_check), []).append(k)
+ counter += 1
+ else:
+ rk_to_check = v.get('tk_tax_terminal').split(
+ ';')[self.order_rank.index(self.rank_of_interest)]
+ if len(rk_to_check) > 3:
+ mapped_rank.setdefault(
+ mapping_dict.get(rk_to_check), []).append(k)
+ counter += 1
+ else:
+ summary_row = ClassifySummaryFileRow()
+ summary_row.gid = k
+ summary_row.classification = v.get('tk_tax_red')
+ summary_row.pplacer_tax = v.get('pplacer_tax')
+ summary_row.red_value = v.get('rel_dist')
+ summary_file.add_row(summary_row)
+ return mapped_rank, counter
diff --git a/gtdbtk/tools.py b/gtdbtk/tools.py
index a2a4e241..87d6599f 100644
--- a/gtdbtk/tools.py
+++ b/gtdbtk/tools.py
@@ -16,7 +16,7 @@
from gtdbtk.config.output import CHECKSUM_SUFFIX
from gtdbtk.exceptions import GTDBTkExit
-
+order_rank = ["d__", "p__", "c__", "o__", 'f__', 'g__', 's__']
##################################################
############MISC UTILITIES########################
##################################################
@@ -42,6 +42,42 @@ def get_reference_ids():
results.add(raw_id[3:])
return frozenset(results)
+def truncate_taxonomy(taxonomy, rank):
+ taxonomy_list = taxonomy.split(';')
+ taxonomy_list = taxonomy_list[0:order_rank.index(rank)]
+ taxonomy = standardise_taxonomy(';'.join(taxonomy_list), 'bac120')
+ return taxonomy
+
+def standardise_taxonomy(taxstring, marker_set=None):
+ """Create a 7 rank taxonomy string from an incomplete taxonomy string
+
+ Parameters
+ ----------
+ taxstring : str
+ incomplete taxonomy string
+ marker_set : str
+ The marker set to use.
+
+ Returns
+ -------
+ string
+ 7 rank taxonomy string.
+ """
+ # return taxstring
+
+ taxlist = taxstring.split(";")
+ while '' in taxlist:
+ taxlist.remove('')
+ if marker_set == 'bac120':
+ if not taxlist or taxlist[0] !='d__Bacteria' :
+ taxlist.insert(0, 'd__Bacteria')
+ if marker_set == 'ar53':
+ if not taxlist or taxlist[0] !='d__Archaea' :
+ taxlist.insert(0, 'd__Archaea')
+ taxlist.extend(order_rank[len(taxlist):])
+ new_taxstring = ";".join(taxlist)
+ return new_taxstring
+
def add_ncbi_prefix(refname):
if refname.startswith("GCF_"):
diff --git a/scripts/create_genome_paths.sh b/scripts/create_genome_paths.sh
deleted file mode 100644
index 45b3ec42..00000000
--- a/scripts/create_genome_paths.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-DATAPATH='database'
-for f in $DATAPATH/*.gz
-do
- mkdir --parents database/${f:9:3}/${f:13:3}/${f:16:3}/${f:19:3}/ ; mv $f $_
- filef="$(basename -- $f)"
- echo "$filef database/${f:9:3}/${f:13:3}/${f:16:3}/${f:19:3}/ " >> genome_paths.tsv
-done
\ No newline at end of file
diff --git a/scripts/gtdb_to_ncbi_majority_vote.py b/scripts/gtdb_to_ncbi_majority_vote.py
index c9a366cd..59aa1b51 100755
--- a/scripts/gtdb_to_ncbi_majority_vote.py
+++ b/scripts/gtdb_to_ncbi_majority_vote.py
@@ -40,8 +40,8 @@
import dendropy
from gtdbtk.biolib_lite.logger import colour, logger_setup
-from gtdbtk.config.output import PATH_BAC120_TREE_FILE, PATH_AR122_TREE_FILE, PATH_BAC120_SUMMARY_OUT, \
- PATH_AR122_SUMMARY_OUT
+from gtdbtk.config.output import PATH_BAC120_TREE_FILE, PATH_AR53_TREE_FILE, PATH_BAC120_SUMMARY_OUT, \
+ PATH_AR53_SUMMARY_OUT
from gtdbtk.exceptions import GTDBTkExit
@@ -72,19 +72,19 @@ def get_ncbi_descendants(self, user_gid, tree, leaf_node_map, ncbi_sp_classifica
return ncbi_rep_ids
- def run(self, gtdbtk_output_dir, ar122_metadata_file, bac120_metadata_file,
+ def run(self, gtdbtk_output_dir, ar53_metadata_file, bac120_metadata_file,
output_file, gtdbtk_prefix):
"""Translate GTDB to NCBI classification via majority vote."""
# Set the output directories
- if not (ar122_metadata_file or bac120_metadata_file):
- raise GTDBTkExit('You must specify at least one of --ar122_metadata_file or --bac120_metadata_file')
+ if not (ar53_metadata_file or bac120_metadata_file):
+ raise GTDBTkExit('You must specify at least one of --ar53_metadata_file or --bac120_metadata_file')
ar_summary = os.path.join(gtdbtk_output_dir,
- PATH_AR122_SUMMARY_OUT.format(prefix=gtdbtk_prefix)) \
- if ar122_metadata_file else None
+ PATH_AR53_SUMMARY_OUT.format(prefix=gtdbtk_prefix)) \
+ if ar53_metadata_file else None
ar_tree = os.path.join(gtdbtk_output_dir,
- PATH_AR122_TREE_FILE.format(prefix=gtdbtk_prefix)) \
- if ar122_metadata_file else None
+ PATH_AR53_TREE_FILE.format(prefix=gtdbtk_prefix)) \
+ if ar53_metadata_file else None
bac_summary = os.path.join(gtdbtk_output_dir,
PATH_BAC120_SUMMARY_OUT.format(prefix=gtdbtk_prefix)) \
if bac120_metadata_file else None
@@ -101,7 +101,7 @@ def run(self, gtdbtk_output_dir, ar122_metadata_file, bac120_metadata_file,
ncbi_taxa = {}
ncbi_lineages = {}
gtdb_sp_clusters = defaultdict(set)
- for domain, metadata_file in [('archaeal', ar122_metadata_file),
+ for domain, metadata_file in [('archaeal', ar53_metadata_file),
('bacterial', bac120_metadata_file)]:
# Only process those domains which have been provided as an input.
if metadata_file is None:
@@ -249,13 +249,13 @@ def print_help():
The output file to write the translated taxonomy.
{colour('At least one argument is required from:', ['underscore'])}
- {colour('--ar122_metadata_file', ['bright'])}
+ {colour('--ar53_metadata_file', ['bright'])}
The archaeal GTDB metadata file (if processing archaeal genomes).
{colour('--bac120_metadata_file', ['bright'])}
The bacterial GTDB metadata file (if processing bacterial genomes).
NOTE: Metadata files are available for download from the GTDB repository
- https://data.ace.uq.edu.au/public/gtdb/data/releases/latest/ar122_metadata.tsv
+ https://data.ace.uq.edu.au/public/gtdb/data/releases/latest/ar53_metadata.tsv
https://data.ace.uq.edu.au/public/gtdb/data/releases/latest/bac120_metadata.tsv
{colour('Optional arguments:', ['underscore'])}
@@ -268,7 +268,7 @@ def print_help():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--gtdbtk_output_dir', required=True,
help='The output directory produced by the GTDB-Tk classify workflow.')
- parser.add_argument('--ar122_metadata_file', required=False, default=None,
+ parser.add_argument('--ar53_metadata_file', required=False, default=None,
help='The archaeal GTDB metadata file (if processing archaeal genomes).')
parser.add_argument('--bac120_metadata_file', required=False, default=None,
help='The bacterial GTDB metadata file (if processing bacterial genomes).')
@@ -295,7 +295,7 @@ def print_help():
try:
p = Translate()
p.run(args.gtdbtk_output_dir,
- args.ar122_metadata_file,
+ args.ar53_metadata_file,
args.bac120_metadata_file,
args.output_file,
args.gtdbtk_prefix)
diff --git a/scripts/rename_UBAs/prepare_gtdbtk_package.py b/scripts/rename_UBAs/prepare_gtdbtk_package.py
index c2009802..8c7a2ed3 100644
--- a/scripts/rename_UBAs/prepare_gtdbtk_package.py
+++ b/scripts/rename_UBAs/prepare_gtdbtk_package.py
@@ -182,7 +182,7 @@ def run(self, dirin, dirout, gtr, release):
copyfile(genome, os.path.join(
fastani_dir, filenamef + "_genomic.fna"))
- for dom in ['bac120', 'ar122']:
+ for dom in ['bac120', 'ar53']:
# MSA renaming
msadir = os.path.join(dirout, dom, 'msa')
if not os.path.exists(msadir):
diff --git a/scripts/verify_official_package.py b/scripts/verify_official_package.py
index d2dbd080..6a2ad421 100644
--- a/scripts/verify_official_package.py
+++ b/scripts/verify_official_package.py
@@ -70,7 +70,7 @@ def run(self, outf):
# Archaeal genome MSA is untrimmed
ar_msa_file = glob.glob(os.path.join(
- self.pack_dir, 'msa/*ar122.faa'))[0]
+ self.pack_dir, 'msa/*ar53.faa'))[0]
ar_msa = read_fasta(ar_msa_file)
first_seq = ar_msa.get(list(ar_msa.keys())[0])
if len(first_seq) != 32675:
@@ -95,7 +95,7 @@ def run(self, outf):
# Archaeal MASK is same length as the untrimmed archaeal genomes
ar_mask_file = glob.glob(os.path.join(
- self.pack_dir, 'masks/*ar122.mask'))[0]
+ self.pack_dir, 'masks/*ar53.mask'))[0]
ar_mask = ''
with open(ar_mask_file) as amf:
ar_mask = amf.readline()
@@ -105,7 +105,7 @@ def run(self, outf):
# Archaeal Pplacer MSA should have the same number of genomes as the
# Archaeal untrimmed MSA
ar_pplacer_msa_file = glob.glob(os.path.join(
- self.pack_dir, 'pplacer', 'gtdb_' + version + '_ar122.refpkg', 'ar122_msa_r95.faa'))[0]
+ self.pack_dir, 'pplacer', 'gtdb_' + version + '_ar53.refpkg', 'ar53_msa_r95.faa'))[0]
ar_pplacer_msa = read_fasta(ar_pplacer_msa_file)
if len(ar_pplacer_msa) != len(ar_msa):
print('ERROR: len(ar_pplacer_msa) != len(ar_msa)')
@@ -140,7 +140,7 @@ def run(self, outf):
# Archaeal Tree should have the same number of leaves than nomber of
# genomes in the MSA
arc_tree = dendropy.Tree.get_from_path(os.path.join(
- self.pack_dir, 'pplacer', 'gtdb_' + version + '_ar122.refpkg', 'ar122_' + version + '_unroot.pplacer.tree'),
+ self.pack_dir, 'pplacer', 'gtdb_' + version + '_ar53.refpkg', 'ar53_' + version + '_unroot.pplacer.tree'),
schema='newick',
rooting='force-rooted',
preserve_underscores=True)
@@ -164,7 +164,7 @@ def run(self, outf):
print('len(list_leaves): {}'.format(len(list_leaves)))
print('len(bac_pplacer_msa): {}'.format(len(bac_pplacer_msa)))
- # Taxonomy file should have as many genomes as bac120 and ar122 MSA
+ # Taxonomy file should have as many genomes as bac120 and ar53 MSA
# combined
tax_file = os.path.join(
self.pack_dir, 'taxonomy', 'gtdb_taxonomy.tsv')
@@ -178,7 +178,7 @@ def run(self, outf):
print('len(tax_dict): {}'.format(len(tax_dict)))
print('len(ar_msa) + len(bac_msa): {}'.format(len(ar_msa) + len(bac_msa)))
- # Radii file should have as many genomes as bac120 and ar122 MSA
+ # Radii file should have as many genomes as bac120 and ar53 MSA
# combined
radii_file = os.path.join(
self.pack_dir, 'radii', 'gtdb_radii.tsv')
@@ -204,7 +204,7 @@ def run(self, outf):
print('\n\nVERSION: {}'.format(version))
print('Length trimmed bac120 MSA: {}'.format(len(bac_pplacer_msa.get(list(bac_pplacer_msa.keys())[0]))))
- print('Length trimmed ar122 MSA: {}'.format(len(ar_pplacer_msa.get(list(ar_pplacer_msa.keys())[0]))))
+ print('Length trimmed ar53 MSA: {}'.format(len(ar_pplacer_msa.get(list(ar_pplacer_msa.keys())[0]))))
print('')
print('Number of genomes in fastani/database: {}'.format(len(list_genomes)))
print('Number of genomes in radii file: {}'.format(len(radii_dict)))
| Previously annotated genomes
Am I missing something, how do I used GTDB with already annotated genomes that have faa and gbk files previously generated.
GitHub issues are specifically for issues with the GTDB-Tk, please join us on the GTDB forum:
GTDB forum:
https://forum.gtdb.ecogenomic.org/
`iTOL` compatibility
Output trees from `gtdb-tk` are not compatible with `iTOL`, and `iTOL` is a very popular software for tree visualization. Standardization would help a lot.
Moving to ar53
| 2022-04-07T23:38:28 | 0.0 | [] | [] |
|||
AdCombo/flask-combo-jsonapi | AdCombo__flask-combo-jsonapi-51 | 825842efff8883883b95fd739c86a333529aa728 | diff --git a/flask_combo_jsonapi/resource.py b/flask_combo_jsonapi/resource.py
index 149c461..aa4fa4f 100644
--- a/flask_combo_jsonapi/resource.py
+++ b/flask_combo_jsonapi/resource.py
@@ -12,7 +12,8 @@
from flask_combo_jsonapi.querystring import QueryStringManager as QSManager
from flask_combo_jsonapi.pagination import add_pagination_links
-from flask_combo_jsonapi.exceptions import InvalidType, BadRequest, RelationNotFound, PluginMethodNotImplementedError
+from flask_combo_jsonapi.exceptions import InvalidType, BadRequest, RelationNotFound, PluginMethodNotImplementedError,\
+ ObjectNotFound
from flask_combo_jsonapi.decorators import check_headers, check_method_requirements, jsonapi_exception_formatter
from flask_combo_jsonapi.schema import compute_schema, get_relationships, get_model_field
from flask_combo_jsonapi.data_layers.base import BaseDataLayer
@@ -234,6 +235,11 @@ def get(self, *args, **kwargs):
obj = self.get_object(kwargs, qs)
+ if obj is None:
+ url_field = getattr(self._data_layer, "url_field", "id")
+ value = f" '{kwargs.get(url_field)}'" if kwargs.get(url_field) else ""
+ raise ObjectNotFound(f"{self.data_layer['model'].__name__}{value} not found.")
+
self.before_marshmallow(args, kwargs)
schema = compute_schema(self.schema, getattr(self, "get_schema_kwargs", dict()), qs, qs.include)
| Raise 404 if object not found
Instead of raising 404 if an object is not found, the library tries to serialize it using the schema, hence causing https://github.com/marshmallow-code/marshmallow-jsonapi/pull/302
What is the use case of this behaviour? Why not raise ObjectNotFound if object returned after get from data layer is None?
| It's JSON:API spec https://jsonapi.org/format/
there was a related fix recently: https://github.com/AdCombo/flask-combo-jsonapi/pull/33
Here's the spec: https://jsonapi.org/format/#fetching-resources-responses-200
`If the above article’s author is missing, then a GET request to that related resource would return:`
```http
HTTP/1.1 200 OK
Content-Type: application/vnd.api+json
{
"links": {
"self": "http://example.com/articles/1/author"
},
"data": null
}
```
That is only true for related resource AFAIK. For a single resource, it should return 404
```
404 Not Found
A server MUST respond with 404 Not Found when processing a request to fetch a single resource that does not exist, except when the request warrants a 200 OK response with null as the primary data (as described above).
```
To be honest, you are not the first one to ask this question
This time I made a bit more research and seems you are right and this has to be done another way
Here's an example from the [playground](http://jsonapiplayground.reyesoft.com/)
http://jsonapiplayground.reyesoft.com/v2/authors/101
```http
HTTP/1.1 400 Bad Request
Content-Type: application/vnd.api+json
{
"errors": [
{
"status": "404",
"title": "Resource not found",
"detail": "Resource Author `101` not found.",
"meta": null
}
]
}
```
but this one returns status code 400, which is a bit confusing too 🤔
also, there's an [example](http://jsonapiplayground.reyesoft.com/v2/stores?filter[created_by]=1,3) with `filter[x]=1,3` where `=` is used as `in_` filter and it means `x is one of [1, 3]`. (https://github.com/AdCombo/flask-combo-jsonapi/pull/22). I think we have to do it this way: if value on the backend is not a collection type (not an ARRAY), but is an int or str, we should replace `eq` filter with `in_` and parse incoming data as a list, not as a single value.
These are breaking changes, I suggest planning it for v1.1.0
@Znbiz
Yes, I was confused because we were using our fork https://github.com/fossasia/flask-rest-jsonapi and want to switch to this maintained repo. So, I switched to this maintained repo and I am trying to bridge the gaps by either writing plugins or reporting issues if it is an incompatible upstream bug. In this case, I cannot fix it with a plugin and it is actually an issue with the upstream library as well.
The example with 400 response code is strange because there is no error in the request at all.
I would love to contribute and bridge the gap with more features and making it spec compliant, so that I can switch the library to single version rather than maintaining another fork which will eventually become outdated. For example, I have issues with nested sorting and flipped source and detail in errors which are fixed in the above repo. I will open relevant issues and you can comment whether those come in the scope of this repo or not. Hopefully, this will become the de-facto maintained jsonapi repo instead of dozens of unmaintained one.
Thanks for taking the initiative
Also, there are a lot of examples without test cases which fail on actual implementation, so I would also add a few more tests to cover those implementation, e.g., #34
I support.
This needs to be implemented.
You also need to raise this issue here https://github.com/json-api/json-api/issues to be included in the spec.
Already a part of spec https://jsonapi.org/format/#fetching-resources-responses-404
https://github.com/json-api/json-api/issues/298#issuecomment-60863540
https://github.com/katharsis-project/katharsis-framework/issues/272
@iamareebjamal , you're right.
Regarding the inclusion in the specification, I meant this:
```
also, there's an example with filter[x]=1,3 where = is used as in_ filter and it means x is one of [1, 3]. (#22). I think we have to do it this way: if value on the backend is not a collection type (not an ARRAY), but is an int or str, we should replace eq filter with in_ and parse incoming data as a list, not as a single value.
These are breaking changes, I suggest planning it for v1.1.0
@Znbiz
```
@iamareebjamal great to hear that! We welcome any initiative and are happy to deliver any fixes and improvements!
Thanks for being involved 🎉 | 2021-06-07T13:34:58 | 0.0 | [] | [] |
||
AdCombo/flask-combo-jsonapi | AdCombo__flask-combo-jsonapi-40 | f426ebb1ccf52a0081d6d1f8b9a2f63cdf3e003c | diff --git a/flask_combo_jsonapi/schema.py b/flask_combo_jsonapi/schema.py
index 39ebca9..9d24afc 100644
--- a/flask_combo_jsonapi/schema.py
+++ b/flask_combo_jsonapi/schema.py
@@ -21,7 +21,7 @@ def compute_schema(schema_cls, default_kwargs, qs, include):
"""
# manage include_data parameter of the schema
schema_kwargs = default_kwargs
- schema_kwargs['include_data'] = tuple()
+ schema_kwargs['include_data'] = schema_kwargs.get('include_data', tuple())
# collect sub-related_includes
related_includes = {}
@@ -72,6 +72,7 @@ def compute_schema(schema_cls, default_kwargs, qs, include):
related_schema_kwargs['context'] = default_kwargs['context']
if isinstance(related_schema_cls, SchemaABC):
related_schema_kwargs['many'] = related_schema_cls.many
+ related_schema_kwargs['include_data'] = related_schema_cls.__dict__.get('include_data')
related_schema_cls = related_schema_cls.__class__
if isinstance(related_schema_cls, str):
related_schema_cls = class_registry.get_class(related_schema_cls)
| Allow fetching multiple relationships from same included resource
For example, let's say, we have this resource heirarchy event -> session -> track, location
And I want to fetch them all, my query would be
`/v1/events/123?include=session.track,session.location`
This should be possible, as evident here http://jsonapiplayground.reyesoft.com/v2/stores/1?include=books.author,books.chapters
But the library implementation overrides the previous include by latest include, so it will only include session.location, and not session.track
| 2021-01-17T17:59:21 | 0.0 | [] | [] |
|||
sigsep/open-unmix-pytorch | sigsep__open-unmix-pytorch-144 | 4318fb278e1863f4cf8556b513987faf14a15832 | diff --git a/README.md b/README.md
index ed10a838..0f053a83 100644
--- a/README.md
+++ b/README.md
@@ -149,8 +149,8 @@ Note that this requires the audio to be in the right shape and sampling rate. Fo
To perform model loading, preprocessing and separation in one step, just use:
```python
-from openunmix import separate
-estimates = separate.predict(audio, ...)
+from openunmix.predict import separate
+estimates = separate(audio, ...)
```
### Load user-trained models
diff --git a/hubconf.py b/hubconf.py
index 669017fd..ebc14745 100644
--- a/hubconf.py
+++ b/hubconf.py
@@ -4,7 +4,7 @@
# `xxx` take waveform inputs and output separated waveforms
# Optional list of dependencies required by the package
-dependencies = ['torch', 'numpy']
+dependencies = ["torch", "numpy"]
from openunmix import umxse_spec
from openunmix import umxse
diff --git a/openunmix/__init__.py b/openunmix/__init__.py
index dc3fbb8a..3a5324b9 100644
--- a/openunmix/__init__.py
+++ b/openunmix/__init__.py
@@ -5,14 +5,15 @@
This is the python package API documentation.
Please checkout [the open-unmix website](https://sigsep.github.io/open-unmix) for more information.
"""
+
from openunmix import utils
import torch.hub
def umxse_spec(targets=None, device="cpu", pretrained=True):
target_urls = {
- "speech": "https://zenodo.org/api/files/765b45a3-c70d-48a6-936b-09a7989c349a/speech_f5e0d9f9.pth",
- "noise": "https://zenodo.org/api/files/765b45a3-c70d-48a6-936b-09a7989c349a/noise_04a6fc2d.pth",
+ "speech": "https://zenodo.org/records/3786908/files/speech_f5e0d9f9.pth",
+ "noise": "https://zenodo.org/records/3786908/files/noise_04a6fc2d.pth",
}
from .model import OpenUnmix
@@ -26,15 +27,11 @@ def umxse_spec(targets=None, device="cpu", pretrained=True):
# load open unmix models speech enhancement models
target_models = {}
for target in targets:
- target_unmix = OpenUnmix(
- nb_bins=1024 // 2 + 1, nb_channels=1, hidden_size=256, max_bin=max_bin
- )
+ target_unmix = OpenUnmix(nb_bins=1024 // 2 + 1, nb_channels=1, hidden_size=256, max_bin=max_bin)
# enable centering of stft to minimize reconstruction error
if pretrained:
- state_dict = torch.hub.load_state_dict_from_url(
- target_urls[target], map_location=device
- )
+ state_dict = torch.hub.load_state_dict_from_url(target_urls[target], map_location=device)
target_unmix.load_state_dict(state_dict, strict=False)
target_unmix.eval()
@@ -43,14 +40,7 @@ def umxse_spec(targets=None, device="cpu", pretrained=True):
return target_models
-def umxse(
- targets=None,
- residual=False,
- niter=1,
- device="cpu",
- pretrained=True,
- filterbank="torch",
-):
+def umxse(targets=None, residual=False, niter=1, device="cpu", pretrained=True, filterbank="torch", wiener_win_len=300):
"""
Open Unmix Speech Enhancemennt 1-channel BiLSTM Model
trained on the 28-speaker version of Voicebank+Demand
@@ -66,6 +56,12 @@ def umxse(
residual (bool): if True, a "garbage" target is created
niter (int): the number of post-processingiterations, defaults to 0
device (str): selects device to be used for inference
+ wiener_win_len (int or None): The size of the excerpts
+ (number of frames) on which to apply filtering
+ independently. This means assuming time varying stereo models and
+ localization of sources.
+ None means not batching but using the whole signal. It comes at the
+ price of a much larger memory usage.
filterbank (str): filterbank implementation method.
Supported are `['torch', 'asteroid']`. `torch` is about 30% faster
compared to `asteroid` on large FFT sizes such as 4096. However,
@@ -89,6 +85,7 @@ def umxse(
n_hop=512,
nb_channels=1,
sample_rate=16000.0,
+ wiener_win_len=wiener_win_len,
filterbank=filterbank,
).to(device)
@@ -100,10 +97,10 @@ def umxhq_spec(targets=None, device="cpu", pretrained=True):
# set urls for weights
target_urls = {
- "bass": "https://zenodo.org/api/files/1c8f83c5-33a5-4f59-b109-721fdd234875/bass-8d85a5bd.pth",
- "drums": "https://zenodo.org/api/files/1c8f83c5-33a5-4f59-b109-721fdd234875/drums-9619578f.pth",
- "other": "https://zenodo.org/api/files/1c8f83c5-33a5-4f59-b109-721fdd234875/other-b52fbbf7.pth",
- "vocals": "https://zenodo.org/api/files/1c8f83c5-33a5-4f59-b109-721fdd234875/vocals-b62c91ce.pth",
+ "bass": "https://zenodo.org/records/3370489/files/bass-8d85a5bd.pth",
+ "drums": "https://zenodo.org/records/3370489/files/drums-9619578f.pth",
+ "other": "https://zenodo.org/records/3370489/files/other-b52fbbf7.pth",
+ "vocals": "https://zenodo.org/records/3370489/files/vocals-b62c91ce.pth",
}
if targets is None:
@@ -115,15 +112,11 @@ def umxhq_spec(targets=None, device="cpu", pretrained=True):
target_models = {}
for target in targets:
# load open unmix model
- target_unmix = OpenUnmix(
- nb_bins=4096 // 2 + 1, nb_channels=2, hidden_size=512, max_bin=max_bin
- )
+ target_unmix = OpenUnmix(nb_bins=4096 // 2 + 1, nb_channels=2, hidden_size=512, max_bin=max_bin)
# enable centering of stft to minimize reconstruction error
if pretrained:
- state_dict = torch.hub.load_state_dict_from_url(
- target_urls[target], map_location=device
- )
+ state_dict = torch.hub.load_state_dict_from_url(target_urls[target], map_location=device)
target_unmix.load_state_dict(state_dict, strict=False)
target_unmix.eval()
@@ -138,6 +131,7 @@ def umxhq(
niter=1,
device="cpu",
pretrained=True,
+ wiener_win_len=300,
filterbank="torch",
):
"""
@@ -153,6 +147,12 @@ def umxhq(
residual (bool): if True, a "garbage" target is created
niter (int): the number of post-processingiterations, defaults to 0
device (str): selects device to be used for inference
+ wiener_win_len (int or None): The size of the excerpts
+ (number of frames) on which to apply filtering
+ independently. This means assuming time varying stereo models and
+ localization of sources.
+ None means not batching but using the whole signal. It comes at the
+ price of a much larger memory usage.
filterbank (str): filterbank implementation method.
Supported are `['torch', 'asteroid']`. `torch` is about 30% faster
compared to `asteroid` on large FFT sizes such as 4096. However,
@@ -172,6 +172,7 @@ def umxhq(
n_hop=1024,
nb_channels=2,
sample_rate=44100.0,
+ wiener_win_len=wiener_win_len,
filterbank=filterbank,
).to(device)
@@ -183,10 +184,10 @@ def umx_spec(targets=None, device="cpu", pretrained=True):
# set urls for weights
target_urls = {
- "bass": "https://zenodo.org/api/files/d6105b95-8c52-430c-84ce-bd14b803faaf/bass-646024d3.pth",
- "drums": "https://zenodo.org/api/files/d6105b95-8c52-430c-84ce-bd14b803faaf/drums-5a48008b.pth",
- "other": "https://zenodo.org/api/files/d6105b95-8c52-430c-84ce-bd14b803faaf/other-f8e132cc.pth",
- "vocals": "https://zenodo.org/api/files/d6105b95-8c52-430c-84ce-bd14b803faaf/vocals-c8df74a5.pth",
+ "bass": "https://zenodo.org/records/3370486/files/bass-646024d3.pth",
+ "drums": "https://zenodo.org/records/3370486/files/drums-5a48008b.pth",
+ "other": "https://zenodo.org/records/3370486/files/other-f8e132cc.pth",
+ "vocals": "https://zenodo.org/records/3370486/files/vocals-c8df74a5.pth",
}
if targets is None:
@@ -198,15 +199,11 @@ def umx_spec(targets=None, device="cpu", pretrained=True):
target_models = {}
for target in targets:
# load open unmix model
- target_unmix = OpenUnmix(
- nb_bins=4096 // 2 + 1, nb_channels=2, hidden_size=512, max_bin=max_bin
- )
+ target_unmix = OpenUnmix(nb_bins=4096 // 2 + 1, nb_channels=2, hidden_size=512, max_bin=max_bin)
# enable centering of stft to minimize reconstruction error
if pretrained:
- state_dict = torch.hub.load_state_dict_from_url(
- target_urls[target], map_location=device
- )
+ state_dict = torch.hub.load_state_dict_from_url(target_urls[target], map_location=device)
target_unmix.load_state_dict(state_dict, strict=False)
target_unmix.eval()
@@ -221,6 +218,7 @@ def umx(
niter=1,
device="cpu",
pretrained=True,
+ wiener_win_len=300,
filterbank="torch",
):
"""
@@ -236,6 +234,12 @@ def umx(
residual (bool): if True, a "garbage" target is created
niter (int): the number of post-processingiterations, defaults to 0
device (str): selects device to be used for inference
+ wiener_win_len (int or None): The size of the excerpts
+ (number of frames) on which to apply filtering
+ independently. This means assuming time varying stereo models and
+ localization of sources.
+ None means not batching but using the whole signal. It comes at the
+ price of a much larger memory usage.
filterbank (str): filterbank implementation method.
Supported are `['torch', 'asteroid']`. `torch` is about 30% faster
compared to `asteroid` on large FFT sizes such as 4096. However,
@@ -255,6 +259,7 @@ def umx(
n_hop=1024,
nb_channels=2,
sample_rate=44100.0,
+ wiener_win_len=wiener_win_len,
filterbank=filterbank,
).to(device)
@@ -266,10 +271,10 @@ def umxl_spec(targets=None, device="cpu", pretrained=True):
# set urls for weights
target_urls = {
- "bass": "https://zenodo.org/api/files/f8209c3e-ba60-48cf-8e79-71ae65beca61/bass-2ca1ce51.pth",
- "drums": "https://zenodo.org/api/files/f8209c3e-ba60-48cf-8e79-71ae65beca61/drums-69e0ebd4.pth",
- "other": "https://zenodo.org/api/files/f8209c3e-ba60-48cf-8e79-71ae65beca61/other-c8c5b3e6.pth",
- "vocals": "https://zenodo.org/api/files/f8209c3e-ba60-48cf-8e79-71ae65beca61/vocals-bccbd9aa.pth",
+ "bass": "https://zenodo.org/records/5069601/files/bass-2ca1ce51.pth",
+ "drums": "https://zenodo.org/records/5069601/files/drums-69e0ebd4.pth",
+ "other": "https://zenodo.org/records/5069601/files/other-c8c5b3e6.pth",
+ "vocals": "https://zenodo.org/records/5069601/files/vocals-bccbd9aa.pth",
}
if targets is None:
@@ -281,15 +286,11 @@ def umxl_spec(targets=None, device="cpu", pretrained=True):
target_models = {}
for target in targets:
# load open unmix model
- target_unmix = OpenUnmix(
- nb_bins=4096 // 2 + 1, nb_channels=2, hidden_size=1024, max_bin=max_bin
- )
+ target_unmix = OpenUnmix(nb_bins=4096 // 2 + 1, nb_channels=2, hidden_size=1024, max_bin=max_bin)
# enable centering of stft to minimize reconstruction error
if pretrained:
- state_dict = torch.hub.load_state_dict_from_url(
- target_urls[target], map_location=device
- )
+ state_dict = torch.hub.load_state_dict_from_url(target_urls[target], map_location=device)
target_unmix.load_state_dict(state_dict, strict=False)
target_unmix.eval()
@@ -304,6 +305,7 @@ def umxl(
niter=1,
device="cpu",
pretrained=True,
+ wiener_win_len=300,
filterbank="torch",
):
"""
@@ -321,6 +323,12 @@ def umxl(
residual (bool): if True, a "garbage" target is created
niter (int): the number of post-processingiterations, defaults to 0
device (str): selects device to be used for inference
+ wiener_win_len (int or None): The size of the excerpts
+ (number of frames) on which to apply filtering
+ independently. This means assuming time varying stereo models and
+ localization of sources.
+ None means not batching but using the whole signal. It comes at the
+ price of a much larger memory usage.
filterbank (str): filterbank implementation method.
Supported are `['torch', 'asteroid']`. `torch` is about 30% faster
compared to `asteroid` on large FFT sizes such as 4096. However,
@@ -340,6 +348,7 @@ def umxl(
n_hop=1024,
nb_channels=2,
sample_rate=44100.0,
+ wiener_win_len=wiener_win_len,
filterbank=filterbank,
).to(device)
diff --git a/openunmix/cli.py b/openunmix/cli.py
index 23d2a37e..b89c2e8d 100644
--- a/openunmix/cli.py
+++ b/openunmix/cli.py
@@ -57,9 +57,7 @@ def separate():
help="Audio chunk duration in seconds, negative values load full track",
)
- parser.add_argument(
- "--no-cuda", action="store_true", default=False, help="disables CUDA inference"
- )
+ parser.add_argument("--no-cuda", action="store_true", default=False, help="disables CUDA inference")
parser.add_argument(
"--audio-backend",
@@ -86,8 +84,7 @@ def separate():
"--residual",
type=str,
default=None,
- help="if provided, build a source with given name "
- "for the mix minus all estimated targets",
+ help="if provided, build a source with given name " "for the mix minus all estimated targets",
)
parser.add_argument(
diff --git a/openunmix/data.py b/openunmix/data.py
index c07cac82..09f90328 100644
--- a/openunmix/data.py
+++ b/openunmix/data.py
@@ -169,9 +169,7 @@ def load_datasets(
"output_file": args.output_file,
}
args.target = Path(args.output_file).stem
- train_dataset = AlignedDataset(
- split="train", random_chunks=True, **dataset_kwargs
- ) # type: UnmixDataset
+ train_dataset = AlignedDataset(split="train", random_chunks=True, **dataset_kwargs) # type: UnmixDataset
valid_dataset = AlignedDataset(split="valid", **dataset_kwargs) # type: UnmixDataset
elif args.dataset == "sourcefolder":
@@ -240,9 +238,7 @@ def load_datasets(
seq_duration=args.seq_dur,
**dataset_kwargs,
)
- valid_dataset = FixedSourcesTrackFolderDataset(
- split="valid", seq_duration=None, **dataset_kwargs
- )
+ valid_dataset = FixedSourcesTrackFolderDataset(split="valid", seq_duration=None, **dataset_kwargs)
elif args.dataset == "trackfolder_var":
parser.add_argument("--ext", type=str, default=".wav")
@@ -271,9 +267,7 @@ def load_datasets(
"silence_missing_targets": args.silence_missing,
}
- source_augmentations = Compose(
- [globals()["_augment_" + aug] for aug in args.source_augmentations]
- )
+ source_augmentations = Compose([globals()["_augment_" + aug] for aug in args.source_augmentations])
train_dataset = VariableSourcesTrackFolderDataset(
split="train",
@@ -283,9 +277,7 @@ def load_datasets(
seq_duration=args.seq_dur,
**dataset_kwargs,
)
- valid_dataset = VariableSourcesTrackFolderDataset(
- split="valid", seq_duration=None, **dataset_kwargs
- )
+ valid_dataset = VariableSourcesTrackFolderDataset(split="valid", seq_duration=None, **dataset_kwargs)
else:
parser.add_argument(
@@ -295,9 +287,7 @@ def load_datasets(
help="loads wav instead of STEMS",
)
parser.add_argument("--samples-per-track", type=int, default=64)
- parser.add_argument(
- "--source-augmentations", type=str, default=["gain", "channelswap"], nargs="+"
- )
+ parser.add_argument("--source-augmentations", type=str, default=["gain", "channelswap"], nargs="+")
args = parser.parse_args()
dataset_kwargs = {
@@ -320,9 +310,7 @@ def load_datasets(
**dataset_kwargs,
)
- valid_dataset = MUSDBDataset(
- split="valid", samples_per_track=1, seq_duration=None, **dataset_kwargs
- )
+ valid_dataset = MUSDBDataset(split="valid", samples_per_track=1, seq_duration=None, **dataset_kwargs)
return train_dataset, valid_dataset, args
@@ -586,9 +574,7 @@ def __getitem__(self, index):
# assemble the mixture of target and interferers
audio_sources = []
# load target
- target_audio, _ = load_audio(
- track_path / self.target_file, start=start, dur=self.seq_duration
- )
+ target_audio, _ = load_audio(track_path / self.target_file, start=start, dur=self.seq_duration)
target_audio = self.source_augmentations(target_audio)
audio_sources.append(target_audio)
# load interferers
@@ -917,9 +903,7 @@ def __len__(self):
parser.add_argument("--root", type=str, help="root path of dataset")
- parser.add_argument(
- "--save", action="store_true", help=("write out a fixed dataset of samples")
- )
+ parser.add_argument("--save", action="store_true", help=("write out a fixed dataset of samples"))
parser.add_argument("--target", type=str, default="vocals")
parser.add_argument("--seed", type=int, default=42)
diff --git a/openunmix/evaluate.py b/openunmix/evaluate.py
index e59535cb..7f687456 100644
--- a/openunmix/evaluate.py
+++ b/openunmix/evaluate.py
@@ -90,9 +90,7 @@ def separate_and_evaluate(
parser.add_argument("--cores", type=int, default=1)
- parser.add_argument(
- "--no-cuda", action="store_true", default=False, help="disables CUDA inference"
- )
+ parser.add_argument("--no-cuda", action="store_true", default=False, help="disables CUDA inference")
parser.add_argument(
"--is-wav",
@@ -119,8 +117,7 @@ def separate_and_evaluate(
"--residual",
type=str,
default=None,
- help="if provided, build a source with given name"
- "for the mix minus all estimated targets",
+ help="if provided, build a source with given name" "for the mix minus all estimated targets",
)
parser.add_argument(
diff --git a/openunmix/filtering.py b/openunmix/filtering.py
index b0f4921e..ac5d97c6 100644
--- a/openunmix/filtering.py
+++ b/openunmix/filtering.py
@@ -260,10 +260,7 @@ def expectation_maximization(
)
# allocate the spatial covariance matrices
- R = [
- torch.zeros((nb_bins, nb_channels, nb_channels, 2), dtype=x.dtype, device=x.device)
- for j in range(nb_sources)
- ]
+ R = [torch.zeros((nb_bins, nb_channels, nb_channels, 2), dtype=x.dtype, device=x.device) for j in range(nb_sources)]
weight: torch.Tensor = torch.zeros((nb_bins,), dtype=x.dtype, device=x.device)
v: torch.Tensor = torch.zeros((nb_frames, nb_bins, nb_sources), dtype=x.dtype, device=x.device)
@@ -434,19 +431,16 @@ def wiener(
# multiply by the mix stft
y = (
mix_stft[..., None]
- * (
- targets_spectrograms
- / (eps + torch.sum(targets_spectrograms, dim=-1, keepdim=True).to(mix_stft.dtype))
- )[..., None, :]
+ * (targets_spectrograms / (eps + torch.sum(targets_spectrograms, dim=-1, keepdim=True).to(mix_stft.dtype)))[
+ ..., None, :
+ ]
)
else:
# otherwise, we just multiply the targets spectrograms with mix phase
# we tacitly assume that we have magnitude estimates.
angle = atan2(mix_stft[..., 1], mix_stft[..., 0])[..., None]
nb_sources = targets_spectrograms.shape[-1]
- y = torch.zeros(
- mix_stft.shape + (nb_sources,), dtype=mix_stft.dtype, device=mix_stft.device
- )
+ y = torch.zeros(mix_stft.shape + (nb_sources,), dtype=mix_stft.dtype, device=mix_stft.device)
y[..., 0, :] = targets_spectrograms * torch.cos(angle)
y[..., 1, :] = targets_spectrograms * torch.sin(angle)
diff --git a/openunmix/model.py b/openunmix/model.py
index 6c54776f..d824d1e8 100644
--- a/openunmix/model.py
+++ b/openunmix/model.py
@@ -289,9 +289,7 @@ def forward(self, audio: Tensor) -> Tensor:
)
nb_frames = spectrograms.shape[1]
- targets_stft = torch.zeros(
- mix_stft.shape + (nb_sources,), dtype=audio.dtype, device=mix_stft.device
- )
+ targets_stft = torch.zeros(mix_stft.shape + (nb_sources,), dtype=audio.dtype, device=mix_stft.device)
for sample in range(nb_samples):
pos = 0
if self.wiener_win_len:
diff --git a/openunmix/utils.py b/openunmix/utils.py
index bece46f8..dc53d927 100644
--- a/openunmix/utils.py
+++ b/openunmix/utils.py
@@ -112,7 +112,7 @@ def _init_is_better(self, mode, min_delta):
self.is_better = lambda a, best: a > best + min_delta
-def load_target_models(targets, model_str_or_path="umxhq", device="cpu", pretrained=True):
+def load_target_models(targets, model_str_or_path="umxl", device="cpu", pretrained=True):
"""Core model loader
target model path can be either <target>.pth, or <target>-sha256.pth
@@ -162,7 +162,7 @@ def load_target_models(targets, model_str_or_path="umxhq", device="cpu", pretrai
def load_separator(
- model_str_or_path: str = "umxhq",
+ model_str_or_path: str = "umxl",
targets: Optional[list] = None,
niter: int = 1,
residual: bool = False,
@@ -178,7 +178,7 @@ def load_separator(
E.g. The following files are assumed to present when
loading `model_str_or_path='mymodel', targets=['vocals']`
'mymodel/separator.json', mymodel/vocals.pth', 'mymodel/vocals.json'.
- Defaults to `umxhq`.
+ Defaults to `umxl`.
targets (list of str or None): list of target names. When loading a
pre-trained model, all `targets` can be None as all targets
will be loaded
@@ -212,9 +212,7 @@ def load_separator(
if targets is None:
raise UserWarning("For custom models, please specify the targets")
- target_models = load_target_models(
- targets=targets, model_str_or_path=model_path, pretrained=pretrained
- )
+ target_models = load_target_models(targets=targets, model_str_or_path=model_path, pretrained=pretrained)
with open(Path(model_path, "separator.json"), "r") as stream:
enc_conf = json.load(stream)
@@ -240,6 +238,7 @@ def load_separator(
pretrained=True,
niter=niter,
residual=residual,
+ wiener_win_len=wiener_win_len,
filterbank=filterbank,
)
diff --git a/pyproject.toml b/pyproject.toml
index 6ae8cc22..bb513b03 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,5 +1,88 @@
+[build-system]
+requires = ["setuptools>=61.0"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "openunmix"
+authors = [
+ {name = "Fabian-Robert Stöter", email = "[email protected]"},
+ {name = "Antoine Liutkus", email = "[email protected]"},
+]
+version = "1.3.0"
+description = "PyTorch-based music source separation toolkit"
+readme = "README.md"
+license = { text = "MIT" }
+requires-python = ">=3.9"
+classifiers = [
+ "Development Status :: 5 - Production/Stable",
+ "Environment :: Console",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: MIT License",
+ "Operating System :: OS Independent",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3 :: Only",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ "Topic :: Software Development :: Quality Assurance",
+]
+dependencies = [
+ "numpy",
+ "torchaudio>=0.9.0",
+ "torch>=1.9.0",
+ "tqdm",
+]
+
+[project.optional-dependencies]
+asteroid = ["asteroid-filterbanks>=0.3.2"]
+stempeg = ["stempeg"]
+evaluation = ["musdb>=0.4.0", "museval>=0.4.0"]
+tests = [
+ "pytest",
+ "musdb>=0.4.0",
+ "museval>=0.4.0",
+ "stempeg",
+ "asteroid-filterbanks>=0.3.2",
+ "onnx",
+ "tqdm",
+]
+
+[project.scripts]
+umx = "openunmix.cli:separate"
+
+[project.urls]
+Homepage = "https://github.com/sigsep/open-unmix-pytorch"
+
[tool.black]
-# https://github.com/psf/black
-line-length = 100
-target-version = ["py37"]
-exclude = "(.eggs|.git|.hg|.mypy_cache|.nox|.tox|.venv|.svn|_build|buck-out|build|dist)"
\ No newline at end of file
+line-length = 120
+target-version = ['py39']
+include = '\.pyi?$'
+exclude = '''
+(
+ /(
+ \.git
+ | \.hg
+ | \.mypy_cache
+ | \.tox
+ | \.venv
+ | _build
+ | buck-out
+ | build
+ | dist
+ | \.idea
+ | \.vscode
+ | scripts
+ | notebooks
+ | \.eggs
+ )/
+)
+'''
+
+[tool.setuptools.packages.find]
+include = ["openunmix"]
+
+[tool.setuptools.package-data]
+openunmix = ["*.txt", "*.rst", "*.json", "*.wav", "*.pt"]
\ No newline at end of file
diff --git a/setup.py b/setup.py
deleted file mode 100644
index c9e7ecdc..00000000
--- a/setup.py
+++ /dev/null
@@ -1,45 +0,0 @@
-from setuptools import setup, find_packages
-
-umx_version = "1.2.1"
-
-with open("README.md", encoding="utf-8") as fh:
- long_description = fh.read()
-
-setup(
- name="openunmix",
- version=umx_version,
- author="Fabian-Robert Stöter",
- author_email="[email protected]",
- url="https://github.com/sigsep/open-unmix-pytorch",
- description="PyTorch-based music source separation toolkit",
- long_description=long_description,
- long_description_content_type="text/markdown",
- license="MIT",
- python_requires=">=3.6",
- install_requires=["numpy", "torchaudio>=0.9.0", "torch>=1.9.0", "tqdm"],
- extras_require={
- "asteroid": ["asteroid-filterbanks>=0.3.2"],
- "tests": [
- "pytest",
- "musdb>=0.4.0",
- "museval>=0.4.0",
- "asteroid-filterbanks>=0.3.2",
- "onnx",
- "tqdm",
- ],
- "stempeg": ["stempeg"],
- "evaluation": ["musdb>=0.4.0", "museval>=0.4.0"],
- },
- entry_points={"console_scripts": ["umx=openunmix.cli:separate"]},
- packages=find_packages(),
- include_package_data=True,
- classifiers=[
- "Development Status :: 4 - Beta",
- "Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.6",
- "Programming Language :: Python :: 3.7",
- "Programming Language :: Python :: 3.8",
- "License :: OSI Approved :: MIT License",
- "Operating System :: OS Independent",
- ],
-)
| Hub loader doesn't use wiener_win_len argument value
I'm not sure if this is intentional or not but the `wiener_win_len` argument is not used by the `hub_loader` in the following [code](https://github.com/sigsep/open-unmix-pytorch/blob/master/openunmix/utils.py).
```python
# otherwise we load the separator from torchhub
else:
hub_loader = getattr(openunmix, model_str_or_path)
separator = hub_loader(
targets=targets,
device=device,
pretrained=True,
niter=niter,
residual=residual,
filterbank=filterbank,
)
```
## To Reproduce
Call the command line with default and different `--wiener-win-len` value such as `600` and the outputs will be identical.
| @papahabla good catch. Are you able to provide a PR to fix this? | 2024-04-15T15:27:33 | 0.0 | [] | [] |
||
htjb/margarine | htjb__margarine-61 | 3b0ce70ea91499440b59324c4f2bf147bdf51810 | diff --git a/README.rst b/README.rst
index 8130eeb..e072cbc 100755
--- a/README.rst
+++ b/README.rst
@@ -7,7 +7,7 @@ Introduction
:margarine: Marginal Bayesian Statistics
:Authors: Harry T.J. Bevins
-:Version: 1.2.7
+:Version: 1.2.8
:Homepage: https://github.com/htjb/margarine
:Documentation: https://margarine.readthedocs.io/
diff --git a/requirements.txt b/requirements.txt
index fb504ca..758aeab 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,9 +1,8 @@
numpy
-tensorflow-macos; sys_platform == 'darwin'
-tensorflow; sys_platform != 'darwin'
-tensorflow_probability
+tensorflow-macos<2.14.0; sys_platform == 'darwin'
+tensorflow<2.14.0; sys_platform != 'darwin'
+tensorflow_probability<0.21.0
anesthetic
scipy
-pandas
scikit-learn
tqdm
| Temperamental tests
Some of the tests occasionally fail because of poor training.
| Okay, in the tests we calculate the accuracy of the MAFs and I think KDEs too by running a series of two sample KS tests between samples drawn from the DE and the original samples used to train. Occasionally one of these KS tests fails because the training is poor (training varies with the random seed since the weights are initialised differently).
A simpler thing to do would be a KS test between the predicted distributions of log-probability vs the log-probability of the original samples. Might be more robust. Also I think the tests currently work with a NS run but should use something from the scipy.stats package.
Also appears there is a bug with tensorflow too after their recent updates (identified in #59 I think). | 2024-04-02T15:05:04 | 0.0 | [] | [] |
||
htjb/margarine | htjb__margarine-24 | 35243dfb12d7c3a4b52c00d38646aed93a80eefc | diff --git a/README.rst b/README.rst
index 0d2233e..ba40768 100755
--- a/README.rst
+++ b/README.rst
@@ -7,7 +7,7 @@ Introduction
:margarine: Marginal Bayesian Statistics
:Authors: Harry T.J. Bevins
-:Version: 0.5.0
+:Version: 0.5.1
:Homepage: https://github.com/htjb/margarine
:Documentation: https://margarine.readthedocs.io/
diff --git a/margarine/maf.py b/margarine/maf.py
index 302248b..fc557b1 100755
--- a/margarine/maf.py
+++ b/margarine/maf.py
@@ -207,17 +207,49 @@ def clustering_call(self):
if self.cluster_number is None:
from sklearn.metrics import silhouette_score
- ks = np.arange(2, 101)
+ ks = np.arange(2, 21)
losses = []
for k in ks:
kmeans = KMeans(k, random_state=0)
labels = kmeans.fit(self.theta).predict(self.theta)
losses.append(-silhouette_score(self.theta, labels))
losses = np.array(losses)
- self.cluster_number = ks[np.where(losses == losses.min())[0][0]]
+ minimum_index = np.argmin(losses)
+ self.cluster_number = ks[minimum_index]
kmeans = KMeans(self.cluster_number, random_state=0)
self.cluster_labels = kmeans.fit(self.theta).predict(self.theta)
+
+ if self.cluster_number == 20:
+ warnings.warn("The number of clusters is 20. This is the maximum "+
+ "number of clusters that can be used. If you " +
+ "require more clusters, please specify the " +
+ "'cluster_number' kwarg. margarine will continue "+
+ "with 20 clusters.")
+
+ if np.array(list(self.cluster_labels)).dtype == 'float':
+ # convert cluster labels to integers
+ self.cluster_labels = self.cluster_labels.astype(int)
+ # count the number of times a cluster label appears in cluster_labels
+ self.cluster_count = np.bincount(self.cluster_labels)
+ # While loop to make sure clusters are not too small
+ while self.cluster_count.min() < 100:
+ warnings.warn("One or more clusters are too small " +
+ "(n_cluster < 100). " +
+ "Reducing the number of clusters by 1.")
+ minimum_index -= 1
+ self.cluster_number = ks[minimum_index]
+ kmeans = KMeans(self.cluster_number, random_state=0)
+ self.cluster_labels = kmeans.fit(self.theta).predict(self.theta)
+ self.cluster_count = np.bincount(self.cluster_labels)
+ if self.cluster_number == 2:
+ # break if two clusters
+ warnings.warn("The number of clusters is 2. This is the " +
+ "minimum number of clusters that can be used. " +
+ "Some clusters may be too small and the " +
+ "train/test split may fail." +
+ "Try running without clusting. ")
+ break
self.n, split_theta, self.new_theta_max = [], [], []
self.new_theta_min, split_sample_weights = [], []
diff --git a/setup.py b/setup.py
index 86e587e..4f2e41a 100755
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@ def readme(short=False):
setup(
name='margarine',
- version='0.5.0',
+ version='0.5.1',
description='margarine: Posterior Sampling and Marginal Bayesian Statistics',
long_description=readme(),
author='Harry T. J. Bevins',
| Maximum allowed cluster number
The maximum allowed number of clusters is quite high, which increases the run time. Might be better to cap this at 20 and print a warning if it caps out, suggesting that the user can perform the clustering externally to margarine and provide a cluster number and cluster labels.
Clustering fails when cluster size is less than 5 samples
If cluster size is less than 5 we can't split the data into testing and training because we ask for test set to be 20% of total cluster size.
| 2023-06-28T11:30:17 | 0.0 | [] | [] |
|||
jupyterhub/batchspawner | jupyterhub__batchspawner-254 | f7de5ad0084e78dea799e416facc0309ea0e2902 | diff --git a/batchspawner/batchspawner.py b/batchspawner/batchspawner.py
index 7c2836c..4e81228 100644
--- a/batchspawner/batchspawner.py
+++ b/batchspawner/batchspawner.py
@@ -448,6 +448,13 @@ async def start(self):
# don't actually run the single-user server yet.
if hasattr(self, "mock_port"):
self.port = self.mock_port
+ # Check if job is still running
+ status = await self.poll()
+ if status:
+ raise RuntimeError(
+ "The Jupyter batch job started"
+ " but died before launching the single-user server."
+ )
self.db.commit()
self.log.info(
| batch jobs that started but failed before launching the server leave the hub on unrecoverable state
### Bug description
Once the job scheduler reports that the job is running, batchspawner waits indefinitely to get the server port from the single-user server.
https://github.com/jupyterhub/batchspawner/blob/0b15e4fd815fc07b0b8ec7330a7a5997a4b4badb/batchspawner/batchspawner.py#L455-L460
If the job fails to launch the server for any reason and stops, batchspawner will continue to wait until `start_timeout` is reached.
#### Expected behaviour
Batchspawner should catch failed jobs even it they already started and abort `Spawner.start()`.
#### Actual behaviour
JupyterHub waits until `start_timeout` and prints the usual timeout error. However, at this point it is impossible to start another server. Stopping the server does not work (as it is not running) and the hub will show a popup saying that a new server cannot be started because one is already pending. JupyterHub has to be restarted.
### How to reproduce
1. make a test `batch_script` with `exit 1` as the first script command
2. launch the server as usual with batchspawner
### Your personal set up
- Version(s):
- Python v3.8.10
- Jupyterhub v2.3.1
- Batchspawner v1.2.0
<details><summary>Full environment</summary>
<!-- For reproduction, it's useful to have the full environment. For example, the output of `pip freeze` or `conda list` --->
```
alembic @ file:///tmp/wheelhouse/alembic-1.8.0-py3-none-any.whl
async-generator @ file:///tmp/wheelhouse/async_generator-1.10-py3-none-any.whl
attrs @ file:///tmp/wheelhouse/attrs-21.4.0-py2.py3-none-any.whl
batchspawner==1.2.0
certifi @ file:///tmp/wheelhouse/certifi-2022.5.18.1-py3-none-any.whl
certipy @ file:///tmp/wheelhouse/certipy-0.1.3-py3-none-any.whl
cffi @ file:///tmp/wheelhouse/cffi-1.15.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl
charset-normalizer @ file:///tmp/wheelhouse/charset_normalizer-2.0.12-py3-none-any.whl
cryptography @ file:///tmp/wheelhouse/cryptography-37.0.2-cp36-abi3-manylinux_2_24_x86_64.whl
entrypoints @ file:///tmp/wheelhouse/entrypoints-0.4-py3-none-any.whl
future==0.18.2
greenlet @ file:///tmp/wheelhouse/greenlet-1.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
idna @ file:///tmp/wheelhouse/idna-3.3-py3-none-any.whl
importlib-metadata @ file:///tmp/wheelhouse/importlib_metadata-4.11.4-py3-none-any.whl
importlib-resources @ file:///tmp/wheelhouse/importlib_resources-5.7.1-py3-none-any.whl
Jinja2 @ file:///tmp/wheelhouse/Jinja2-3.1.2-py3-none-any.whl
jsonschema @ file:///tmp/wheelhouse/jsonschema-4.6.0-py3-none-any.whl
jupyter-telemetry @ file:///tmp/wheelhouse/jupyter_telemetry-0.1.0-py3-none-any.whl
jupyterhub @ file:///tmp/wheelhouse/jupyterhub-2.3.1-py3-none-any.whl
jupyterhub-moss @ https://github.com/vub-hpc/jupyterhub_moss/archive/refs/tags/v5.5.1.tar.gz
Mako @ file:///tmp/wheelhouse/Mako-1.2.0-py3-none-any.whl
MarkupSafe @ file:///tmp/wheelhouse/MarkupSafe-2.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
mock==4.0.3
oauthenticator==15.1.0
oauthlib @ file:///tmp/wheelhouse/oauthlib-3.2.0-py3-none-any.whl
packaging @ file:///tmp/wheelhouse/packaging-21.3-py3-none-any.whl
pamela @ file:///tmp/wheelhouse/pamela-1.0.0-py2.py3-none-any.whl
prometheus-client @ file:///tmp/wheelhouse/prometheus_client-0.14.1-py3-none-any.whl
pycparser @ file:///tmp/wheelhouse/pycparser-2.21-py2.py3-none-any.whl
pycurl==7.43.0.2
pyOpenSSL @ file:///tmp/wheelhouse/pyOpenSSL-22.0.0-py2.py3-none-any.whl
pyparsing @ file:///tmp/wheelhouse/pyparsing-3.0.9-py3-none-any.whl
pyrsistent @ file:///tmp/wheelhouse/pyrsistent-0.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
python-dateutil @ file:///tmp/wheelhouse/python_dateutil-2.8.2-py2.py3-none-any.whl
python-json-logger @ file:///tmp/wheelhouse/python_json_logger-2.0.2-py3-none-any.whl
requests @ file:///tmp/wheelhouse/requests-2.27.1-py2.py3-none-any.whl
ruamel.yaml @ file:///tmp/wheelhouse/ruamel.yaml-0.17.21-py3-none-any.whl
ruamel.yaml.clib @ file:///tmp/wheelhouse/ruamel.yaml.clib-0.2.6-cp38-cp38-manylinux1_x86_64.whl
six @ file:///tmp/wheelhouse/six-1.16.0-py2.py3-none-any.whl
SQLAlchemy @ file:///tmp/wheelhouse/SQLAlchemy-1.4.37-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl
tornado @ file:///tmp/wheelhouse/tornado-6.1-cp38-cp38-manylinux2010_x86_64.whl
traitlets @ file:///tmp/wheelhouse/traitlets-5.2.2.post1-py3-none-any.whl
urllib3 @ file:///tmp/wheelhouse/urllib3-1.26.9-py2.py3-none-any.whl
vsc-base==3.4.9
vsc-config @ file:///usr/local/src/vsc-config-master.tar.gz
vsc-install==0.17.26
zipp @ file:///tmp/wheelhouse/zipp-3.8.0-py3-none-any.whl
```
</details>
<details><summary>Configuration</summary>
This issue is independent of any configuration settings.
</details>
<details><summary>Logs</summary>
```
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: [D 2022-12-14 14:05:26.019 JupyterHub user:728] Calling Spawner.start for vsc10122
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: [I 2022-12-14 14:05:26.023 JupyterHub spawner:313] Used environment:
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: [I 2022-12-14 14:05:26.023 JupyterHub spawner:314] Used default URL: /lab
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: [I 2022-12-14 14:05:26.048 JupyterHub batchspawner:291] Spawner submitting job using sbatch --parsable
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: [I 2022-12-14 14:05:26.049 JupyterHub batchspawner:292] Spawner submitted script:
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: #!/bin/bash
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: #SBATCH --job-name=spawner-jupyterhub
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: #SBATCH --chdir=/user/brussel/101/vsc10122
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: #SBATCH --output=/dev/null
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]:
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: #SBATCH --export=PATH,LANG,LC_ALL,JUPYTERHUB_API_TOKEN,JPY_API_TOKEN,JUPYTERHUB_CLIENT_ID,JUPYTERHUB_HOST,JUPYTERHUB_OAUTH_CALLBACK_URL,JUPYTERHUB_OAUTH_SCOPES,JUPYTERHUB_USER,JUPYTERHUB_SERVER_NAME,JUPYTERHUB_API_URL,JUPYTERHUB_ACTIVITY_URL,JUPYTERHUB_BASE_URL,JUPYTERHUB_SERVICE_PREFIX,JUPYTERHUB_SERVICE_URL,JUPYTERHUB_DEFAULT_URL,USER,HOME,SHELL
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: #SBATCH --get-user-env=L
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: #SBATCH --partition=broadwell
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: #SBATCH --time=1:00:00
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: #SBATCH --cpus-per-task=1
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]:
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: set -euo pipefail
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]:
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: trap 'echo SIGTERM received' TERM
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]:
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: exit 1
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: module load JupyterHub/2.3.1-GCCcore-10.3.0
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: srun --export=ALL batchspawner-singleuser jupyterhub-singleuser
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: echo "JupyterLab server ended gracefully"
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]:
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: EOF
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]:
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: [I 2022-12-14 14:05:26.639 JupyterHub batchspawner:295] Job submitted. cmd: sbatch --parsable output: 7837730;hydra
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: [D 2022-12-14 14:05:26.648 JupyterHub batchspawner:322] Spawner querying job: squeue -h -j 7837730 -o \'%T %B\'
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: [W 2022-12-14 14:05:26.796 JupyterHub base:188] Rolling back dirty objects IdentitySet([<Server(:0)>])
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: [I 2022-12-14 14:05:26.819 JupyterHub log:189] 302 POST /hub/spawn -> /hub/spawn-pending/vsc10122 ([email protected]) 1032.47ms
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: [D 2022-12-14 14:05:26.849 JupyterHub scopes:491] Checking access via scope servers
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: [D 2022-12-14 14:05:26.850 JupyterHub scopes:402] Argument-based access to /hub/spawn-pending/vsc10122 via servers
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: [I 2022-12-14 14:05:26.856 JupyterHub pages:401] vsc10122 is pending spawn
Dec 14 15:05:26 jupyterhub01.phoenix.os conmon[17742]: [I 2022-12-14 14:05:26.870 JupyterHub log:189] 200 GET /hub/spawn-pending/vsc10122 ([email protected]) 30.26ms
Dec 14 15:05:27 jupyterhub01.phoenix.os conmon[17742]: [D 2022-12-14 14:05:27.199 JupyterHub batchspawner:437] Job 7837730 still pending
Dec 14 15:05:27 jupyterhub01.phoenix.os conmon[17742]: [D 2022-12-14 14:05:27.339 JupyterHub scopes:491] Checking access via scope read:servers
Dec 14 15:05:27 jupyterhub01.phoenix.os conmon[17742]: [D 2022-12-14 14:05:27.339 JupyterHub scopes:402] Argument-based access to /hub/api/users/vsc10122/server/progress via read:servers
Dec 14 15:05:27 jupyterhub01.phoenix.os conmon[17742]: [D 2022-12-14 14:05:27.706 JupyterHub batchspawner:322] Spawner querying job: squeue -h -j 7837730 -o \'%T %B\'
Dec 14 15:05:35 jupyterhub01.phoenix.os conmon[17742]: [W 2022-12-14 14:05:35.798 JupyterHub base:1063] User vsc10122 is slow to start (timeout=10)
Dec 14 15:10:09 jupyterhub01.phoenix.os conmon[17742]: [D 2022-12-14 14:10:09.386 JupyterHub proxy:821] Proxy: Fetching GET http://127.0.0.1:8001/api/routes
Dec 14 15:10:09 jupyterhub01.phoenix.os conmon[17742]: 14:10:09.400 [ConfigProxy] info: 200 GET /api/routes
Dec 14 15:10:09 jupyterhub01.phoenix.os conmon[17742]: [D 2022-12-14 14:10:09.409 JupyterHub proxy:346] Checking routes
Dec 14 15:15:09 jupyterhub01.phoenix.os conmon[17742]: [D 2022-12-14 14:15:09.386 JupyterHub proxy:821] Proxy: Fetching GET http://127.0.0.1:8001/api/routes
Dec 14 15:15:09 jupyterhub01.phoenix.os conmon[17742]: 14:15:09.397 [ConfigProxy] info: 200 GET /api/routes
Dec 14 15:15:09 jupyterhub01.phoenix.os conmon[17742]: [D 2022-12-14 14:15:09.401 JupyterHub proxy:346] Checking routes
Dec 14 15:15:26 jupyterhub01.phoenix.os conmon[17742]: [W 2022-12-14 14:15:26.023 JupyterHub user:807] vsc10122's server failed to start in 600 seconds, giving up.
Dec 14 15:15:26 jupyterhub01.phoenix.os conmon[17742]:
Dec 14 15:15:26 jupyterhub01.phoenix.os conmon[17742]: Common causes of this timeout, and debugging tips:
Dec 14 15:15:26 jupyterhub01.phoenix.os conmon[17742]:
Dec 14 15:15:26 jupyterhub01.phoenix.os conmon[17742]: 1. Everything is working, but it took too long.
Dec 14 15:15:26 jupyterhub01.phoenix.os conmon[17742]: To fix: increase `Spawner.start_timeout` configuration
Dec 14 15:15:26 jupyterhub01.phoenix.os conmon[17742]: to a number of seconds that is enough for spawners to finish starting.
Dec 14 15:15:26 jupyterhub01.phoenix.os conmon[17742]: 2. The server didn't finish starting,
Dec 14 15:15:26 jupyterhub01.phoenix.os conmon[17742]: or it crashed due to a configuration issue.
Dec 14 15:15:26 jupyterhub01.phoenix.os conmon[17742]: Check the single-user server's logs for hints at what needs fixing.
Dec 14 15:15:26 jupyterhub01.phoenix.os conmon[17742]:
Dec 14 15:15:26 jupyterhub01.phoenix.os conmon[17742]: [D 2022-12-14 14:15:26.033 JupyterHub user:913] Stopping vsc10122
```
</details>
| 2022-12-14T16:03:19 | 0.0 | [] | [] |
|||
morphocut/morphocut | morphocut__morphocut-113 | 95f81248ff8ff242b335949350996b5152aea2fe | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index ba6aed1..1a0463c 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -23,6 +23,8 @@ Added
- Added support for Python 3.9 and 3.10 (#87).
+- Added `scalebar`: Append scalebars to images. (#113)
+
Changed
~~~~~~~
diff --git a/docs/conf.py b/docs/conf.py
index a2c55da..999d61c 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -43,6 +43,10 @@
"sphinx.ext.autosummary",
]
+# Mock modules so that they don't need to be available
+# just to generate the documentstion.
+autodoc_mock_imports = ["matplotlib"]
+
napoleon_use_param = False
napoleon_use_keyword = False
napoleon_use_rtype = False
diff --git a/docs/index.rst b/docs/index.rst
index 6d53de5..88bf543 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -31,6 +31,7 @@ User Guide
batch
filters
stat
+ scalebar
parallel
formats
profiling
diff --git a/docs/scalebar.rst b/docs/scalebar.rst
new file mode 100644
index 0000000..d230606
--- /dev/null
+++ b/docs/scalebar.rst
@@ -0,0 +1,7 @@
+Scalebar
+========
+
+This module provides functionality for drawing and adding a scalebar to an image.
+
+.. automodule:: morphocut.scalebar
+ :members:
diff --git a/setup.py b/setup.py
index 0866d17..79dbadb 100644
--- a/setup.py
+++ b/setup.py
@@ -37,6 +37,7 @@
"codecov",
# Optional dependencies
"parse",
+ "matplotlib", # For FontManager in scalebar
],
"docs": [
"sphinx ~= 2.2",
diff --git a/src/morphocut/pipeline/__init__.py b/src/morphocut/pipeline/__init__.py
deleted file mode 100644
index b9f4203..0000000
--- a/src/morphocut/pipeline/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-"""
-Processing nodes are generators.
-"""
-
-from morphocut.pipeline.object_scale import ObjectScale
diff --git a/src/morphocut/pipeline/base.py b/src/morphocut/pipeline/base.py
deleted file mode 100644
index e26215c..0000000
--- a/src/morphocut/pipeline/base.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from abc import abstractmethod, ABC
-
-__all__ = ["NodeBase", "SimpleNodeBase"]
-
-
-class NodeBase(ABC):
- """
- Base class for all pipeline nodes.
- """
- @abstractmethod
- def __call__(self, input=None): # pragma: no cover
- """
- Process the input stream
- """
- while False:
- yield None
-
- # @abstractmethod
- # def get_scheme(self):
- # """
- # Get the Node scheme
- # """
- # return {'fields': []}
-
-
-class SimpleNodeBase(NodeBase):
- """
- Base class for simple pipeline nodes that operate on each object individually.
- """
-
- def __init__(self, input_facet, output_facet):
- self.input_facet = input_facet
- self.output_facet = output_facet
-
- def __call__(self, input=None):
- for obj in input:
- obj["facets"][self.output_facet] = self.process(
- obj["facets"][self.input_facet])
- yield obj
-
- @abstractmethod
- def process(self, facet):
- """
- Process the facet and return a new one.
- """
- pass
diff --git a/src/morphocut/pipeline/object_scale.py b/src/morphocut/pipeline/object_scale.py
deleted file mode 100644
index d1e1598..0000000
--- a/src/morphocut/pipeline/object_scale.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import numpy as np
-import math
-import cv2
-
-from morphocut.pipeline import NodeBase
-
-
-class ObjectScale(NodeBase):
- '''
- Parameters
- ----------
- input_facets : list
- The facets onto which a scale should be drawn.
- output_facets : list
- The facets where the outputs should be saved. This corresponds to the input_facets element-wise.
- pixels_per_mm : int
- The number of pixels per milimeter in the image.
- scale_size : float
- The size of the scale that should be drawn in milimeter.
- '''
-
- def __init__(self, input_facets, output_facets, pixels_per_mm=200, scale_size=0.1):
- self.scale_size = scale_size
- self.pixels_per_mm = pixels_per_mm
- self.input_facets = input_facets
- self.output_facets = output_facets
- if not len(self.input_facets) == len(self.output_facets):
- raise ValueError(
- "The number of input facets and output facets must be equal.")
-
- def __call__(self, input=None):
- for obj in input:
- for i in range(len(self.input_facets)):
- input_facet = self.input_facets[i]
- output_facet = self.output_facets[i]
-
- obj["facets"][output_facet] = self.process(
- obj["facets"][input_facet])
- yield obj
-
- def process(self, facet):
- image = facet["image"]
-
- width = image.shape[1]
- scale_x = 5
-
- scale_width = int(self.pixels_per_mm * self.scale_size)
- scale_y = -5
-
- complete_width = max(width, scale_x + scale_width + 1)
-
- # pad image if scale is too big
- if (complete_width > width):
- height = image.shape[0]
- pad_width = math.ceil((complete_width - width) / 2)
- pad_array = np.ones((height, pad_width, 3), dtype=np.int8) * 255
- pad_array_copy = np.ones(
- (height, pad_width, 3), dtype=np.int8) * 255
- image = np.append(pad_array_copy, image, axis=1)
- image = np.append(image, pad_array, axis=1)
-
- width = image.shape[1]
-
- pad_array = np.ones((31, width, 3), dtype=np.int8) * 255
- image = np.append(image, pad_array, axis=0)
-
- # draw scale line
- image[scale_y, scale_x:scale_x
- + scale_width] = np.zeros(3, dtype=np.int8)
- # draw scale line hooks
- image[scale_y - 3:scale_y, scale_x] = np.zeros(3, dtype=np.int8)
- image[scale_y - 3:scale_y, scale_x
- + (scale_width - 1)] = np.zeros(3, dtype=np.int8)
- # print(image)
-
- cv2.putText(image, '{}mm'.format(self.scale_size), (5, (image.shape[0] + scale_y) - 5), cv2.FONT_HERSHEY_SIMPLEX,
- 0.25, (0, 0, 0), lineType=cv2.LINE_AA)
-
- return {
- "image": image
- }
diff --git a/src/morphocut/scalebar.py b/src/morphocut/scalebar.py
new file mode 100644
index 0000000..f4d610b
--- /dev/null
+++ b/src/morphocut/scalebar.py
@@ -0,0 +1,146 @@
+from functools import lru_cache
+
+import matplotlib.font_manager
+import numpy as np
+import PIL.Image
+import PIL.ImageDraw
+import PIL.ImageFont
+
+from morphocut.core import Node, Output, RawOrVariable, ReturnOutputs
+
+
+@lru_cache(128)
+def draw_scalebar(
+ length_unit,
+ px_per_unit=1,
+ unit="px",
+ mode="L",
+ fg_color=0,
+ bg_color=255,
+ font_family="sans",
+ margin=10,
+):
+ """
+ Draw a scalebar image of specified length and units.
+
+ Args:
+ length_unit: The length of the scalebar in the specified unit.
+ px_per_unit: The ratio of pixels to units for the scalebar.
+ unit: The unit of length for the scalebar.
+ mode: The color mode to use for the PIL image.
+ fg_color: The color to use for the scalebar and text.
+ bg_color: The color to use for the background.
+ font_family: The font family to use for the scalebar text.
+ margin: The margin to use around the scalebar.
+
+ Returns:
+ A numpy array representing the image of the scalebar.
+ """
+
+ length_px = round(length_unit * px_per_unit)
+
+ h = 32
+ w = length_px + 2 * margin
+ img = PIL.Image.new(mode, (w, h), bg_color)
+
+ font_fn = matplotlib.font_manager.FontManager().findfont(font_family)
+ fnt = PIL.ImageFont.truetype(font_fn, 12)
+ d = PIL.ImageDraw.Draw(img)
+
+ d.text((10, 5), f"{length_unit:.0f}{unit}", font=fnt, fill=fg_color)
+
+ d.line(
+ [
+ (margin, 28),
+ (margin, 25),
+ (margin + length_px, 25),
+ (margin + length_px, 28),
+ ],
+ fill=fg_color,
+ )
+
+ return np.asarray(img)
+
+
+@ReturnOutputs
+@Output("image")
+class DrawScalebar(Node):
+ """
+ Append a scalebar to an image.
+
+ Args:
+ image: The image to append the scalebar to.
+ length_unit: The length of the scalebar in the specified unit.
+ px_per_unit: The ratio of pixels to units for the scalebar.
+ unit: The unit of length for the scalebar.
+ fg_color: The color to use for the scalebar and text.
+ bg_color: The color to use for the background.
+ font_family: The font family to use for the scalebar text.
+ margin: The margin to use around the scalebar.
+ """
+
+ def __init__(
+ self,
+ image: RawOrVariable[np.ndarray],
+ length_unit,
+ px_per_unit=1,
+ unit="px",
+ fg_color=0,
+ bg_color=255,
+ font_family="sans",
+ margin=10,
+ ):
+ super().__init__()
+
+ self.image = image
+
+ self.length_unit = length_unit
+ self.px_per_unit = px_per_unit
+ self.unit = unit
+ self.fg_color = fg_color
+ self.bg_color = bg_color
+ self.font_family = font_family
+ self.margin = margin
+
+ def transform(
+ self,
+ image: np.ndarray,
+ length_unit,
+ px_per_unit,
+ unit,
+ fg_color,
+ bg_color,
+ font_family,
+ margin,
+ ):
+ # TODO: Convert colors (see image.py)
+
+ # Calculate an alpha-mask for the scalebar
+ scalebar = draw_scalebar(
+ length_unit=length_unit,
+ px_per_unit=px_per_unit,
+ unit=unit,
+ mode="F",
+ fg_color=1,
+ bg_color=0,
+ font_family=font_family,
+ margin=margin,
+ )
+
+ # Construct canvas that can contain the image and the scalebar
+ cheight = image.shape[0] + scalebar.shape[0]
+ cwidth = max(image.shape[1], scalebar.shape[1])
+ canvas = np.full(
+ (cheight, cwidth) + image.shape[2:], bg_color, dtype=image.dtype
+ )
+
+ # Paste image (centered)
+ offs = (cwidth - image.shape[1]) // 2
+ canvas[: image.shape[0], offs : offs + image.shape[1]] = image
+
+ # Paste scalebar (aligned left)
+ canvas[
+ image.shape[0] : image.shape[0] + scalebar.shape[0], : scalebar.shape[1]
+ ] = (scalebar * fg_color) + (1 - scalebar) * bg_color
+
+ return canvas
| ImageCaption
In `pipeline/object_scale.py`, there is `ObjectScale`, the last remnant of the initial MorphoCut version. This needs to be implemented in the `image` module. Instead of `cv2.putText`, [PIL](https://pillow.readthedocs.io/en/stable/reference/ImageDraw.html) should be used. (Because Pillow is already a dependency and OpenCV is not.)
| Hey, can I get this issue assigned to me??
Of cause! Just fork the project and create a pull request. Let me know if you run into any issues.
is this issue resolved ?
Not at all.
(Where is all the attention coming from all of a sudden?) | 2023-05-09T19:08:56 | 0.0 | [] | [] |
||
morphocut/morphocut | morphocut__morphocut-61 | 01a051ea0d1b6d1f2529698ad7f82b47d4583886 | diff --git a/src/morphocut/parallel.py b/src/morphocut/parallel.py
index b5e6b50..b4f1ee1 100644
--- a/src/morphocut/parallel.py
+++ b/src/morphocut/parallel.py
@@ -3,6 +3,7 @@
import multiprocessing.synchronize
import os
import queue
+import signal
import sys
import threading
import traceback
@@ -61,14 +62,16 @@ def _put_until_stop(queue_, stop_event, obj):
return True
-def _get_until_stop(queue_, stop_event):
+def _get_until_stop(queue_: multiprocessing.Queue, stop_event, block=True):
while True:
if stop_event.is_set():
raise _Stop()
try:
return queue_.get(True, QUEUE_POLL_INTERVAL)
except queue.Empty:
- continue
+ if block:
+ continue
+ raise
else:
break
@@ -231,7 +234,18 @@ def _queue_filler():
continue
while True:
- output_object = _get_until_stop(oqu, stop_event)
+ try:
+ output_object = _get_until_stop(
+ oqu, stop_event, block=False
+ )
+ except queue.Empty:
+ # Check that the worker process is still running
+ if not workers[i].is_alive():
+ exitcode = workers[i].exitcode
+ raise RuntimeError(
+ f"Worker {i+1} died unexpectedly. Exit code: {_exitcode_to_signame.get(exitcode,exitcode)}"
+ )
+ continue
if output_object is _Signal.END:
workers_running[i] = False
@@ -259,3 +273,14 @@ def _queue_filler():
if upstream_exception:
upstream_exception[0].reraise()
+
+
+#
+# Give names to some return codes
+#
+
+_exitcode_to_signame = {}
+
+for name, signum in list(signal.__dict__.items()):
+ if name[:3] == "SIG" and "_" not in name:
+ _exitcode_to_signame[-signum] = f"-{name}"
| ParallelPipeline hangs indefinitely if worker dies
| 2020-08-07T18:28:45 | 0.0 | [] | [] |
|||
ipc-sim/ipc-toolkit | ipc-sim__ipc-toolkit-62 | 07514a7a62910f62ea71bea0d4150275ab8c983b | diff --git a/.github/workflows/continuous.yml b/.github/workflows/continuous.yml
index a98802c70..f2076fce6 100644
--- a/.github/workflows/continuous.yml
+++ b/.github/workflows/continuous.yml
@@ -16,7 +16,7 @@ env:
CTEST_PARALLEL_LEVEL: 2
jobs:
- Unix:
+ Build:
name: ${{ matrix.name }} (${{ matrix.config }})
runs-on: ${{ matrix.os }}
strategy:
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
index 31cf2d68f..09d852ff4 100644
--- a/.github/workflows/docs.yml
+++ b/.github/workflows/docs.yml
@@ -66,6 +66,5 @@ jobs:
- name: Deploy
uses: JamesIves/[email protected]
with:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- BRANCH: gh-pages # The branch the action should deploy to.
- FOLDER: docs/build/html # The folder the action should deploy.
+ branch: gh-pages # The branch the action should deploy to.
+ folder: docs/build/html # The folder the action should deploy.
diff --git a/.gitignore b/.gitignore
index deaadbac9..fe7342623 100644
--- a/.gitignore
+++ b/.gitignore
@@ -243,6 +243,7 @@ core*
docs/source/doxyoutput
docs/source/api
!docs/Makefile
+tests/data/ccd-queries
tests/data/slow-broadphase-ccd
tests/data/ccd-failure
tests/data/friction/chain
diff --git a/CMakeLists.txt b/CMakeLists.txt
index f70473ffe..106e85440 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -70,6 +70,13 @@ option(IPC_TOOLKIT_WITH_RATIONAL_INTERSECTION "Use rational edge-triangle inters
option(IPC_TOOLKIT_WITH_ROBIN_MAP "Use Tessil's robin-map rather than std maps" ON)
option(IPC_TOOLKIT_WITH_ABSEIL "Use Abseil's hash functions" ON)
+include(CMakeDependentOption)
+cmake_dependent_option(IPC_TOOLKIT_TEST_CCD_BENCHMARK "Enable CCD benchmark test" OFF "IPC_TOOLKIT_BUILD_TESTS" OFF)
+if(IPC_TOOLKIT_TEST_CCD_BENCHMARK)
+ set(IPC_TOOLKIT_CCD_BENCHMARK_DIR "" CACHE PATH "Path to the CCD benchmark directory")
+ set(IPC_TOOLKIT_CCD_NEW_BENCHMARK_DIR "" CACHE PATH "Path to the new CCD benchmark directory")
+endif()
+
# Set default minimum C++ standard
if(IPC_TOOLKIT_TOPLEVEL_PROJECT)
set(CMAKE_CXX_STANDARD 17)
@@ -150,9 +157,10 @@ endif()
include(spdlog)
target_link_libraries(ipc_toolkit PUBLIC spdlog::spdlog)
+# rational-cpp (requires GMP)
if(IPC_TOOLKIT_WITH_RATIONAL_INTERSECTION)
- include(gmp)
- target_link_libraries(ipc_toolkit PUBLIC GMP::GMP)
+ include(rational_cpp)
+ target_link_libraries(ipc_toolkit PUBLIC rational::rational)
endif()
# Sweep and Tiniest Queue and CCD
diff --git a/README.md b/README.md
index c5cde8305..b641b4df3 100644
--- a/README.md
+++ b/README.md
@@ -72,9 +72,9 @@ The following libraries are used in this project:
* [Abseil](https://abseil.io/): hashing utilities
* Enable by using the CMake option `IPC_TOOLKIT_WITH_ABSEIL`
* Enabled by default
-* [GMP](https://gmplib.org/): rational arithmetic used for exact intersection checks
+* [rational-cpp](https://github.io/zfergus/rational-cpp): rational arithmetic used for exact intersection checks
* Enable by using the CMake option `IPC_TOOLKIT_WITH_RATIONAL_INTERSECTION`
- * GMP must be installed at a system level
+ * Requires [GMP](https://gmplib.org/) to be installed at a system level
* [Etienne Vouga's Collision Detection Library](https://github.com/evouga/collisiondetection): inexact CCD
* Included for comparison with the original IPC library
* Enable by disabling the CMake option `IPC_TOOLKIT_WITH_CORRECT_CCD`
@@ -145,4 +145,4 @@ Additionally, you can cite the original IPC paper:
## License
-MIT License © 2020, the IPC-Sim organization (See <a href="https://github.com/ipc-sim/ipc-toolkit/blob/main/LICENSE"><code>LICENSE</code></a> for details)
+MIT License © 2020, the IPC-Sim organization (See <a href="https://github.com/ipc-sim/ipc-toolkit/blob/main/LICENSE"><code>LICENSE</code></a> for details).
diff --git a/cmake/find/FindGMP.cmake b/cmake/find/FindGMP.cmake
deleted file mode 100644
index b928326ad..000000000
--- a/cmake/find/FindGMP.cmake
+++ /dev/null
@@ -1,20 +0,0 @@
-# Try to find the GMP librairies
-# GMP_FOUND - system has GMP lib
-# GMP_INCLUDE_DIRS - the GMP include directory
-# GMP_LIBRARIES - Libraries needed to use GMP
-
-if (GMP_INCLUDE_DIRS AND GMP_LIBRARIES)
- # Already in cache, be silent
- set(GMP_FIND_QUIETLY TRUE)
-endif (GMP_INCLUDE_DIRS AND GMP_LIBRARIES)
-
-find_path(GMP_INCLUDE_DIRS NAMES gmp.h PATHS $ENV{GMP_INC} ${GMP_WINDOWS_PATH})
-find_library(GMP_LIBRARIES NAMES gmp libgmp PATHS $ENV{GMP_LIB} ${GMP_WINDOWS_PATH})
-find_library(GMPXX_LIBRARIES NAMES gmpxx libgmpxx PATHS $ENV{GMP_LIB} ${GMP_WINDOWS_PATH})
-#MESSAGE(STATUS "GMP libs: " ${GMP_LIBRARIES} " " ${GMPXX_LIBRARIES} )
-
-include(FindPackageHandleStandardArgs)
-FIND_PACKAGE_HANDLE_STANDARD_ARGS(GMP DEFAULT_MSG GMP_INCLUDE_DIRS GMP_LIBRARIES)
-
-mark_as_advanced(GMP_INCLUDE_DIRS GMP_LIBRARIES)
-message(STATUS "GMP libs: " ${GMP_LIBRARIES} " " ${GMP_INCLUDE_DIRS})
diff --git a/cmake/recipes/abseil.cmake b/cmake/recipes/abseil.cmake
index 5dabe8d5d..10113ed79 100644
--- a/cmake/recipes/abseil.cmake
+++ b/cmake/recipes/abseil.cmake
@@ -1,3 +1,5 @@
+# Abseil (https://github.com/abseil/abseil-cpp)
+# License: Apache 2.0
if(TARGET absl::flat_hash_map)
return()
endif()
diff --git a/cmake/recipes/catch2.cmake b/cmake/recipes/catch2.cmake
index 1cfe6caf3..4efb1cec2 100644
--- a/cmake/recipes/catch2.cmake
+++ b/cmake/recipes/catch2.cmake
@@ -1,14 +1,5 @@
-#
-# Copyright 2020 Adobe. All rights reserved.
-# This file is licensed to you under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License. You may obtain a copy
-# of the License at http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software distributed under
-# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
-# OF ANY KIND, either express or implied. See the License for the specific language
-# governing permissions and limitations under the License.
-#
+# Catch2 (https://github.com/catchorg/Catch2)
+# License: BSL-1.0
if(TARGET Catch2::Catch2)
return()
endif()
diff --git a/cmake/recipes/ccd_query_io.cmake b/cmake/recipes/ccd_query_io.cmake
new file mode 100644
index 000000000..18fa410ff
--- /dev/null
+++ b/cmake/recipes/ccd_query_io.cmake
@@ -0,0 +1,13 @@
+# CCD Query IO (https://github.com/Continuous-Collision-Detection/CCD-Query-IO)
+# License: MIT
+if(TARGET ccd_io::ccd_io)
+ return()
+endif()
+
+message(STATUS "Third-party: creating target 'ccd_io::ccd_io'")
+
+set(CCD_IO_DOWNLOAD_SAMPLE_QUERIES ON CACHE BOOL "Download sample CCD queries" FORCE)
+set(CCD_IO_SAMPLE_QUERIES_DIR "${PROJECT_SOURCE_DIR}/tests/data/ccd-queries/" CACHE PATH "Where should we download sample queries?")
+
+include(CPM)
+CPMAddPackage("gh:Continuous-Collision-Detection/CCD-Query-IO#36f6093af81a65acc27d9f05ad32d6b5729e8d15")
\ No newline at end of file
diff --git a/cmake/recipes/eigen.cmake b/cmake/recipes/eigen.cmake
index 0bcd2abc6..30992dee2 100644
--- a/cmake/recipes/eigen.cmake
+++ b/cmake/recipes/eigen.cmake
@@ -1,14 +1,5 @@
-#
-# Copyright 2019 Adobe. All rights reserved.
-# This file is licensed to you under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License. You may obtain a copy
-# of the License at http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software distributed under
-# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
-# OF ANY KIND, either express or implied. See the License for the specific language
-# governing permissions and limitations under the License.
-#
+# Eigen (https://gitlab.com/libeigen/eigen)
+# License: MPL 2.0
if(TARGET Eigen3::Eigen)
return()
endif()
diff --git a/cmake/recipes/evouga_ccd.cmake b/cmake/recipes/evouga_ccd.cmake
index 91b857139..69a1c760e 100644
--- a/cmake/recipes/evouga_ccd.cmake
+++ b/cmake/recipes/evouga_ccd.cmake
@@ -1,4 +1,5 @@
-# Etienne Vouga's CCD Library
+# Etienne Vouga's CCD Library (https://github.com/evouga/collisiondetection.git)
+# License: ???
if(TARGET evouga::ccd)
return()
endif()
@@ -15,11 +16,11 @@ CPMAddPackage(
# file(GLOB EVOUGA_CCD_SOURCE_FILES "${evccd_SOURCE_DIR}/src/*.cpp")
add_library(evouga_ccd
- "${evccd_SOURCE_DIR}/src/CTCD.cpp"
+ "${collisiondetection_SOURCE_DIR}/src/CTCD.cpp"
)
add_library(evouga::ccd ALIAS evouga_ccd)
-target_include_directories(evouga_ccd PUBLIC "${evccd_SOURCE_DIR}/include")
+target_include_directories(evouga_ccd PUBLIC "${collisiondetection_SOURCE_DIR}/include")
include(eigen)
target_link_libraries(evouga_ccd PUBLIC Eigen3::Eigen)
diff --git a/cmake/recipes/finite_diff.cmake b/cmake/recipes/finite_diff.cmake
index 5211de439..be4ba5455 100644
--- a/cmake/recipes/finite_diff.cmake
+++ b/cmake/recipes/finite_diff.cmake
@@ -1,3 +1,5 @@
+# finite-diff (https://github.com/zfergus/finite-diff)
+# License: MIT
if(TARGET finitediff::finitediff)
return()
endif()
diff --git a/cmake/recipes/gmp.cmake b/cmake/recipes/gmp.cmake
deleted file mode 100644
index 6c9068280..000000000
--- a/cmake/recipes/gmp.cmake
+++ /dev/null
@@ -1,18 +0,0 @@
-if(TARGET GMP::GMP)
- return()
-endif()
-
-message(STATUS "Third-party: creating targets 'GMP::GMP'")
-
-# We do not have a build recipe for this, so find it as a system installed library.
-find_package(GMP REQUIRED)
-
-if(NOT ${GMP_FOUND})
- MESSAGE(FATAL_ERROR "Unable to find GMP")
-endif()
-
-add_library(GMP_GMP INTERFACE)
-add_library(GMP::GMP ALIAS GMP_GMP)
-
-target_include_directories(GMP_GMP INTERFACE ${GMP_INCLUDE_DIRS})
-target_link_libraries(GMP_GMP INTERFACE ${GMP_LIBRARIES})
\ No newline at end of file
diff --git a/cmake/recipes/gpu_ccd.cmake b/cmake/recipes/gpu_ccd.cmake
index 81259d641..b38d20d74 100644
--- a/cmake/recipes/gpu_ccd.cmake
+++ b/cmake/recipes/gpu_ccd.cmake
@@ -1,3 +1,5 @@
+# CCD-GPU (https://github.com/dbelgrod/CCD-GPU)
+# License: MIT
if(TARGET gpu_ccd::gpu_ccd)
return()
endif()
diff --git a/cmake/recipes/json.cmake b/cmake/recipes/json.cmake
index 56cc1e45f..6ad2c7e7e 100644
--- a/cmake/recipes/json.cmake
+++ b/cmake/recipes/json.cmake
@@ -1,14 +1,5 @@
-#
-# Copyright 2020 Adobe. All rights reserved.
-# This file is licensed to you under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License. You may obtain a copy
-# of the License at http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software distributed under
-# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
-# OF ANY KIND, either express or implied. See the License for the specific language
-# governing permissions and limitations under the License.
-#
+# Nlohmann's JSON (https://github.com/nlohmann/json)
+# License: MIT
if(TARGET nlohmann_json::nlohmann_json)
return()
endif()
diff --git a/cmake/recipes/libigl.cmake b/cmake/recipes/libigl.cmake
index 93b4b5789..d1b85259e 100644
--- a/cmake/recipes/libigl.cmake
+++ b/cmake/recipes/libigl.cmake
@@ -1,3 +1,5 @@
+# libigl (https://github.com/libigl/libigl)
+# License: MPL-2.0
if(TARGET igl::core)
return()
endif()
@@ -9,4 +11,4 @@ set(LIBIGL_PREDICATES ON CACHE BOOL "Use exact predicates" FORCE)
include(eigen)
include(CPM)
-CPMAddPackage("gh:libigl/[email protected]")
+CPMAddPackage("gh:libigl/[email protected]")
\ No newline at end of file
diff --git a/cmake/recipes/onetbb.cmake b/cmake/recipes/onetbb.cmake
index 979f6dc19..318d6fdc2 100644
--- a/cmake/recipes/onetbb.cmake
+++ b/cmake/recipes/onetbb.cmake
@@ -1,14 +1,5 @@
-#
-# Copyright 2021 Adobe. All rights reserved.
-# This file is licensed to you under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License. You may obtain a copy
-# of the License at http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software distributed under
-# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
-# OF ANY KIND, either express or implied. See the License for the specific language
-# governing permissions and limitations under the License.
-#
+# oneTBB (https://github.com/oneapi-src/oneTBB)
+# License: Apache-2.0
if(TARGET TBB::tbb)
return()
endif()
diff --git a/cmake/recipes/pybind11.cmake b/cmake/recipes/pybind11.cmake
index e078f6e8e..d25797a13 100644
--- a/cmake/recipes/pybind11.cmake
+++ b/cmake/recipes/pybind11.cmake
@@ -1,14 +1,5 @@
-#
-# Copyright 2020 Adobe. All rights reserved.
-# This file is licensed to you under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License. You may obtain a copy
-# of the License at http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software distributed under
-# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
-# OF ANY KIND, either express or implied. See the License for the specific language
-# governing permissions and limitations under the License.
-#
+# pybind11 (https://github.com/pybind/pybind11)
+# License: BSD-style
if(TARGET pybind11::pybind11)
return()
endif()
diff --git a/cmake/recipes/rational_cpp.cmake b/cmake/recipes/rational_cpp.cmake
new file mode 100644
index 000000000..43e02d6a1
--- /dev/null
+++ b/cmake/recipes/rational_cpp.cmake
@@ -0,0 +1,10 @@
+# rational-cpp (https://github.com/zfergus/rational-cpp)
+# License: MIT
+if(TARGET rational::rational)
+ return()
+endif()
+
+message(STATUS "Third-party: creating target 'rational::rational'")
+
+include(CPM)
+CPMAddPackage("gh:zfergus/rational-cpp#687d4ea3436ada7231b8920f3cd5b02b438c21aa")
\ No newline at end of file
diff --git a/cmake/recipes/robin_map.cmake b/cmake/recipes/robin_map.cmake
index fa83d597d..713963384 100644
--- a/cmake/recipes/robin_map.cmake
+++ b/cmake/recipes/robin_map.cmake
@@ -1,3 +1,5 @@
+# robin-map (https://github.com/Tessil/robin-map)
+# License: MIT
if(TARGET tsl::robin_map)
return()
endif()
diff --git a/cmake/recipes/spdlog.cmake b/cmake/recipes/spdlog.cmake
index 90ca20dd5..beac4ce3c 100644
--- a/cmake/recipes/spdlog.cmake
+++ b/cmake/recipes/spdlog.cmake
@@ -1,14 +1,5 @@
-#
-# Copyright 2020 Adobe. All rights reserved.
-# This file is licensed to you under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License. You may obtain a copy
-# of the License at http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software distributed under
-# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
-# OF ANY KIND, either express or implied. See the License for the specific language
-# governing permissions and limitations under the License.
-#
+# spdlog (https://github.com/gabime/spdlog)
+# License: MIT
if(TARGET spdlog::spdlog)
return()
endif()
diff --git a/cmake/recipes/sweep_and_tiniest_queue.cmake b/cmake/recipes/sweep_and_tiniest_queue.cmake
index 190ea60d6..75c8361bc 100644
--- a/cmake/recipes/sweep_and_tiniest_queue.cmake
+++ b/cmake/recipes/sweep_and_tiniest_queue.cmake
@@ -1,3 +1,5 @@
+# Broadphase GPU (https://github.com/dbelgrod/broadphase-gpu)
+# License: MIT
if(TARGET STQ::CPU)
return()
endif()
diff --git a/cmake/recipes/tight_inclusion.cmake b/cmake/recipes/tight_inclusion.cmake
index d01b0c81b..7d24599d0 100644
--- a/cmake/recipes/tight_inclusion.cmake
+++ b/cmake/recipes/tight_inclusion.cmake
@@ -1,3 +1,5 @@
+# Tight Inclusion (https://github.com/Continuous-Collision-Detection/Tight-Inclusion)
+# License: MIT
if(TARGET tight_inclusion::tight_inclusion)
return()
endif()
diff --git a/docs/source/cpp.rst b/docs/source/cpp.rst
index ad6a56c5a..43d3ba0dd 100644
--- a/docs/source/cpp.rst
+++ b/docs/source/cpp.rst
@@ -73,10 +73,10 @@ Optional
* Enable by using the CMake option :cmake:`IPC_TOOLKIT_WITH_ABSEIL`
* Enabled by default
-* `GMP <https://gmplib.org/>`__: rational arithmetic used for exact intersection checks
+* `rational-cpp <https://github.io/zfergus/rational-cpp>`__: rational arithmetic used for exact intersection checks
- * Enable by using the CMake option :cmake:`IPC_TOOLKIT_WITH_RATIONAL_INTERSECTION`
- * GMP must be installed at a system level
+ * Enable by using the CMake option :cmake:`IPC_TOOLKIT_WITH_RATIONAL_INTERSECTION`
+ * Requires `GMP <https://gmplib.org/>`__ to be installed at a system level
* `Etienne Vouga's Collision Detection Library <https://github.com/evouga/collisiondetection>`__: inexact CCD
diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt
index 2f3565dd0..4240f3fac 100755
--- a/python/CMakeLists.txt
+++ b/python/CMakeLists.txt
@@ -27,6 +27,7 @@ pybind11_add_module(ipctk
src/ccd/aabb.cpp
src/ccd/ccd.cpp
+ src/ccd/additive_ccd.cpp
src/ccd/inexact_point_edge.cpp
src/ccd/point_static_plane.cpp
diff --git a/python/src/bindings.cpp b/python/src/bindings.cpp
index 74896f211..910e3fb67 100644
--- a/python/src/bindings.cpp
+++ b/python/src/bindings.cpp
@@ -36,6 +36,7 @@ PYBIND11_MODULE(ipctk, m)
// ccd
define_ccd_aabb(m);
define_ccd(m);
+ define_additive_ccd(m);
define_inexact_point_edge(m);
define_point_static_plane(m);
diff --git a/python/src/ccd/additive_ccd.cpp b/python/src/ccd/additive_ccd.cpp
new file mode 100644
index 000000000..885c85227
--- /dev/null
+++ b/python/src/ccd/additive_ccd.cpp
@@ -0,0 +1,169 @@
+#include <common.hpp>
+
+#include <ipc/ccd/additive_ccd.hpp>
+
+namespace py = pybind11;
+using namespace ipc;
+
+void define_additive_ccd(py::module_& m)
+{
+ using namespace ipc::additive_ccd;
+
+ auto m_accd = m.def_submodule(
+ "additive_ccd", "Additive CCD method of [Li et al. 2021].");
+
+ m_accd.def(
+ "point_point_ccd",
+ [](const VectorMax3d& p0_t0, const VectorMax3d& p1_t0,
+ const VectorMax3d& p0_t1, const VectorMax3d& p1_t1,
+ const double min_distance = 0.0, const double tmax = 1.0,
+ const double conservative_rescaling =
+ DEFAULT_CCD_CONSERVATIVE_RESCALING) {
+ double toi;
+ bool r = point_point_ccd(
+ p0_t0, p1_t0, p0_t1, p1_t1, toi, min_distance, tmax,
+ conservative_rescaling);
+ return std::make_tuple(r, toi);
+ },
+ R"ipc_Qu8mg5v7(
+ Computes the time of impact between two points using continuous collision detection.
+
+ Parameters:
+ p0_t0: The initial position of the first point.
+ p1_t0: The initial position of the second point.
+ p0_t1: The final position of the first point.
+ p1_t1: The final position of the second point.
+ min_distance: The minimum distance between two objects.
+ tmax: The maximum time to check for collisions.
+ conservative_rescaling: The conservative rescaling of the time of impact.
+
+ Returns:
+ Tuple of:
+ True if a collision was detected, false otherwise.
+ The time of impact between the two points.
+ )ipc_Qu8mg5v7",
+ py::arg("p0_t0"), py::arg("p1_t0"), py::arg("p0_t1"), py::arg("p1_t1"),
+ py::arg("min_distance") = 0.0, py::arg("tmax") = 1.0,
+ py::arg("conservative_rescaling") = DEFAULT_CCD_CONSERVATIVE_RESCALING);
+
+ m_accd.def(
+ "point_edge_ccd",
+ [](const VectorMax3d& p_t0, const VectorMax3d& e0_t0,
+ const VectorMax3d& e1_t0, const VectorMax3d& p_t1,
+ const VectorMax3d& e0_t1, const VectorMax3d& e1_t1,
+ const double min_distance = 0.0, const double tmax = 1.0,
+ const double conservative_rescaling =
+ DEFAULT_CCD_CONSERVATIVE_RESCALING) {
+ double toi;
+ bool r = point_edge_ccd(
+ p_t0, e0_t0, e1_t0, p_t1, e0_t1, e1_t1, toi, min_distance, tmax,
+ conservative_rescaling);
+ return std::make_tuple(r, toi);
+ },
+ R"ipc_Qu8mg5v7(
+ Computes the time of impact between a point and an edge using continuous collision detection.
+
+ Parameters:
+ p_t0: The initial position of the point.
+ e0_t0: The initial position of the first endpoint of the edge.
+ e1_t0: The initial position of the second endpoint of the edge.
+ p_t1: The final position of the point.
+ e0_t1: The final position of the first endpoint of the edge.
+ e1_t1: The final position of the second endpoint of the edge.
+ min_distance: The minimum distance between two objects.
+ tmax: The maximum time to check for collisions.
+ conservative_rescaling: The conservative rescaling of the time of impact.
+
+ Returns:
+ Tuple of:
+ True if a collision was detected, false otherwise.
+ The time of impact between the point and the edge.
+ )ipc_Qu8mg5v7",
+ py::arg("p_t0"), py::arg("e0_t0"), py::arg("e1_t0"), py::arg("p_t1"),
+ py::arg("e0_t1"), py::arg("e1_t1"), py::arg("min_distance") = 0.0,
+ py::arg("tmax") = 1.0,
+ py::arg("conservative_rescaling") = DEFAULT_CCD_CONSERVATIVE_RESCALING);
+
+ m_accd.def(
+ "point_triangle_ccd",
+ [](const Eigen::Vector3d& p_t0, const Eigen::Vector3d& t0_t0,
+ const Eigen::Vector3d& t1_t0, const Eigen::Vector3d& t2_t0,
+ const Eigen::Vector3d& p_t1, const Eigen::Vector3d& t0_t1,
+ const Eigen::Vector3d& t1_t1, const Eigen::Vector3d& t2_t1,
+ const double min_distance = 0.0, const double tmax = 1.0,
+ const double conservative_rescaling =
+ DEFAULT_CCD_CONSERVATIVE_RESCALING) {
+ double toi;
+ bool r = point_triangle_ccd(
+ p_t0, t0_t0, t1_t0, t2_t0, p_t1, t0_t1, t1_t1, t2_t1, toi,
+ min_distance, tmax, conservative_rescaling);
+ return std::make_tuple(r, toi);
+ },
+ R"ipc_Qu8mg5v7(
+ Computes the time of impact between a point and a triangle using continuous collision detection.
+
+ Parameters:
+ p_t0: The initial position of the point.
+ t0_t0: The initial position of the first vertex of the triangle.
+ t1_t0: The initial position of the second vertex of the triangle.
+ t2_t0: The initial position of the third vertex of the triangle.
+ p_t1: The final position of the point.
+ t0_t1: The final position of the first vertex of the triangle.
+ t1_t1: The final position of the second vertex of the triangle.
+ t2_t1: The final position of the third vertex of the triangle.
+ min_distance: The minimum distance between two objects.
+ tmax: The maximum time to check for collisions.
+ conservative_rescaling: The conservative rescaling of the time of impact.
+
+ Returns:
+ Tuple of:
+ True if a collision was detected, false otherwise.
+ The time of impact between the point and the triangle.
+ )ipc_Qu8mg5v7",
+ py::arg("p_t0"), py::arg("t0_t0"), py::arg("t1_t0"), py::arg("t2_t0"),
+ py::arg("p_t1"), py::arg("t0_t1"), py::arg("t1_t1"), py::arg("t2_t1"),
+ py::arg("min_distance") = 0.0, py::arg("tmax") = 1.0,
+ py::arg("conservative_rescaling") = DEFAULT_CCD_CONSERVATIVE_RESCALING);
+
+ m_accd.def(
+ "edge_edge_ccd",
+ [](const Eigen::Vector3d& ea0_t0, const Eigen::Vector3d& ea1_t0,
+ const Eigen::Vector3d& eb0_t0, const Eigen::Vector3d& eb1_t0,
+ const Eigen::Vector3d& ea0_t1, const Eigen::Vector3d& ea1_t1,
+ const Eigen::Vector3d& eb0_t1, const Eigen::Vector3d& eb1_t1,
+ const double min_distance = 0.0, const double tmax = 1.0,
+ const double conservative_rescaling =
+ DEFAULT_CCD_CONSERVATIVE_RESCALING) {
+ double toi;
+ bool r = edge_edge_ccd(
+ ea0_t0, ea1_t0, eb0_t0, eb1_t0, ea0_t1, ea1_t1, eb0_t1, eb1_t1,
+ toi, min_distance, tmax, conservative_rescaling);
+ return std::make_tuple(r, toi);
+ },
+ R"ipc_Qu8mg5v7(
+ Computes the time of impact between two edges using continuous collision detection.
+
+ Parameters:
+ ea0_t0: The initial position of the first endpoint of the first edge.
+ ea1_t0: The initial position of the second endpoint of the first edge.
+ eb0_t0: The initial position of the first endpoint of the second edge.
+ eb1_t0: The initial position of the second endpoint of the second edge.
+ ea0_t1: The final position of the first endpoint of the first edge.
+ ea1_t1: The final position of the second endpoint of the first edge.
+ eb0_t1: The final position of the first endpoint of the second edge.
+ eb1_t1: The final position of the second endpoint of the second edge.
+ min_distance: The minimum distance between two objects.
+ tmax: The maximum time to check for collisions.
+ conservative_rescaling: The conservative rescaling of the time of impact.
+
+ Returns:
+ Tuple of:
+ True if a collision was detected, false otherwise.
+ The time of impact between the two edges.
+ )ipc_Qu8mg5v7",
+ py::arg("ea0_t0"), py::arg("ea1_t0"), py::arg("eb0_t0"),
+ py::arg("eb1_t0"), py::arg("ea0_t1"), py::arg("ea1_t1"),
+ py::arg("eb0_t1"), py::arg("eb1_t1"), py::arg("min_distance") = 0.0,
+ py::arg("tmax") = 1.0,
+ py::arg("conservative_rescaling") = DEFAULT_CCD_CONSERVATIVE_RESCALING);
+}
\ No newline at end of file
diff --git a/python/src/ccd/bindings.hpp b/python/src/ccd/bindings.hpp
index 17445284c..fb2398c57 100644
--- a/python/src/ccd/bindings.hpp
+++ b/python/src/ccd/bindings.hpp
@@ -5,5 +5,6 @@ namespace py = pybind11;
void define_ccd_aabb(py::module_& m);
void define_ccd(py::module_& m);
+void define_additive_ccd(py::module_& m);
void define_inexact_point_edge(py::module_& m);
void define_point_static_plane(py::module_& m);
\ No newline at end of file
diff --git a/python/src/ccd/ccd.cpp b/python/src/ccd/ccd.cpp
index 61a98a419..95b6163b0 100644
--- a/python/src/ccd/ccd.cpp
+++ b/python/src/ccd/ccd.cpp
@@ -23,10 +23,30 @@ void define_ccd(py::module_& m)
tolerance, max_iterations, conservative_rescaling);
return std::make_tuple(r, toi);
},
- "", py::arg("p_t0"), py::arg("e0_t0"), py::arg("e1_t0"),
- py::arg("p_t1"), py::arg("e0_t1"), py::arg("e1_t1"),
- py::arg("min_distance") = 0.0, py::arg("tmax") = 1.0,
- py::arg("tolerance") = DEFAULT_CCD_TOLERANCE,
+ R"ipc_Qu8mg5v7(
+ Computes the time of impact between a point and an edge in 2D using continuous collision detection.
+
+ Parameters:
+ p_t0: The initial position of the point.
+ e0_t0: The initial position of the first endpoint of the edge.
+ e1_t0: The initial position of the second endpoint of the edge.
+ p_t1: The final position of the point.
+ e0_t1: The final position of the first endpoint of the edge.
+ e1_t1: The final position of the second endpoint of the edge.
+ min_distance: The minimum distance between the objects.
+ tmax: The maximum time to check for collisions.
+ tolerance: The error tolerance for the time of impact.
+ max_iterations: The maximum number of iterations to perform.
+ conservative_rescaling: The conservative rescaling of the time of impact.
+
+ Returns:
+ Tuple of:
+ True if a collision was detected, false otherwise.
+ The time of impact between the point and the edge.
+ )ipc_Qu8mg5v7",
+ py::arg("p_t0"), py::arg("e0_t0"), py::arg("e1_t0"), py::arg("p_t1"),
+ py::arg("e0_t1"), py::arg("e1_t1"), py::arg("min_distance") = 0.0,
+ py::arg("tmax") = 1.0, py::arg("tolerance") = DEFAULT_CCD_TOLERANCE,
py::arg("max_iterations") = DEFAULT_CCD_MAX_ITERATIONS,
py::arg("conservative_rescaling") = DEFAULT_CCD_CONSERVATIVE_RESCALING);
@@ -45,8 +65,27 @@ void define_ccd(py::module_& m)
max_iterations, conservative_rescaling);
return std::make_tuple(r, toi);
},
- "", py::arg("p0_t0"), py::arg("p1_t0"), py::arg("p0_t1"),
- py::arg("p1_t1"), py::arg("min_distance") = 0.0, py::arg("tmax") = 1.0,
+ R"ipc_Qu8mg5v7(
+ Computes the time of impact between two points in 3D using continuous collision detection.
+
+ Parameters:
+ p0_t0: The initial position of the first point.
+ p1_t0: The initial position of the second point.
+ p0_t1: The final position of the first point.
+ p1_t1: The final position of the second point.
+ min_distance: The minimum distance between the objects.
+ tmax: The maximum time to check for collisions.
+ tolerance: The error tolerance for the time of impact.
+ max_iterations: The maximum number of iterations to perform.
+ conservative_rescaling: The conservative rescaling of the time of impact.
+
+ Returns:
+ Tuple of:
+ True if a collision was detected, false otherwise.
+ The time of impact between the two points.
+ )ipc_Qu8mg5v7",
+ py::arg("p0_t0"), py::arg("p1_t0"), py::arg("p0_t1"), py::arg("p1_t1"),
+ py::arg("min_distance") = 0.0, py::arg("tmax") = 1.0,
py::arg("tolerance") = DEFAULT_CCD_TOLERANCE,
py::arg("max_iterations") = DEFAULT_CCD_MAX_ITERATIONS,
py::arg("conservative_rescaling") = DEFAULT_CCD_CONSERVATIVE_RESCALING);
@@ -67,10 +106,30 @@ void define_ccd(py::module_& m)
tolerance, max_iterations, conservative_rescaling);
return std::make_tuple(r, toi);
},
- "", py::arg("p_t0"), py::arg("e0_t0"), py::arg("e1_t0"),
- py::arg("p_t1"), py::arg("e0_t1"), py::arg("e1_t1"),
- py::arg("min_distance") = 0.0, py::arg("tmax") = 1.0,
- py::arg("tolerance") = DEFAULT_CCD_TOLERANCE,
+ R"ipc_Qu8mg5v7(
+ Computes the time of impact between a point and an edge in 3D using continuous collision detection.
+
+ Parameters:
+ p_t0: The initial position of the point.
+ e0_t0: The initial position of the first endpoint of the edge.
+ e1_t0: The initial position of the second endpoint of the edge.
+ p_t1: The final position of the point.
+ e0_t1: The final position of the first endpoint of the edge.
+ e1_t1: The final position of the second endpoint of the edge.
+ min_distance: The minimum distance between the objects.
+ tmax: The maximum time to check for collisions.
+ tolerance: The error tolerance for the time of impact.
+ max_iterations: The maximum number of iterations to perform.
+ conservative_rescaling: The conservative rescaling of the time of impact.
+
+ Returns:
+ Tuple of:
+ True if a collision was detected, false otherwise.
+ The time of impact between the point and the edge.
+ )ipc_Qu8mg5v7",
+ py::arg("p_t0"), py::arg("e0_t0"), py::arg("e1_t0"), py::arg("p_t1"),
+ py::arg("e0_t1"), py::arg("e1_t1"), py::arg("min_distance") = 0.0,
+ py::arg("tmax") = 1.0, py::arg("tolerance") = DEFAULT_CCD_TOLERANCE,
py::arg("max_iterations") = DEFAULT_CCD_MAX_ITERATIONS,
py::arg("conservative_rescaling") = DEFAULT_CCD_CONSERVATIVE_RESCALING);
@@ -92,9 +151,32 @@ void define_ccd(py::module_& m)
conservative_rescaling);
return std::make_tuple(r, toi);
},
- "", py::arg("p_t0"), py::arg("t0_t0"), py::arg("t1_t0"),
- py::arg("t2_t0"), py::arg("p_t1"), py::arg("t0_t1"), py::arg("t1_t1"),
- py::arg("t2_t1"), py::arg("min_distance") = 0.0, py::arg("tmax") = 1.0,
+ R"ipc_Qu8mg5v7(
+ Computes the time of impact between a point and a triangle in 3D using continuous collision detection.
+
+ Parameters:
+ p_t0: The initial position of the point.
+ t0_t0: The initial position of the first vertex of the triangle.
+ t1_t0: The initial position of the second vertex of the triangle.
+ t2_t0: The initial position of the third vertex of the triangle.
+ p_t1: The final position of the point.
+ t0_t1: The final position of the first vertex of the triangle.
+ t1_t1: The final position of the second vertex of the triangle.
+ t2_t1: The final position of the third vertex of the triangle.
+ min_distance: The minimum distance between the objects.
+ tmax: The maximum time to check for collisions.
+ tolerance: The error tolerance for the time of impact.
+ max_iterations: The maximum number of iterations to perform.
+ conservative_rescaling: The conservative rescaling of the time of impact.
+
+ Returns:
+ Tuple of:
+ True if a collision was detected, false otherwise.
+ The time of impact between the point and the triangle.
+ )ipc_Qu8mg5v7",
+ py::arg("p_t0"), py::arg("t0_t0"), py::arg("t1_t0"), py::arg("t2_t0"),
+ py::arg("p_t1"), py::arg("t0_t1"), py::arg("t1_t1"), py::arg("t2_t1"),
+ py::arg("min_distance") = 0.0, py::arg("tmax") = 1.0,
py::arg("tolerance") = DEFAULT_CCD_TOLERANCE,
py::arg("max_iterations") = DEFAULT_CCD_MAX_ITERATIONS,
py::arg("conservative_rescaling") = DEFAULT_CCD_CONSERVATIVE_RESCALING);
@@ -117,7 +199,30 @@ void define_ccd(py::module_& m)
conservative_rescaling);
return std::make_tuple(r, toi);
},
- "", py::arg("ea0_t0"), py::arg("ea1_t0"), py::arg("eb0_t0"),
+ R"ipc_Qu8mg5v7(
+ Computes the time of impact between two edges in 3D using continuous collision detection.
+
+ Parameters:
+ ea0_t0: The initial position of the first endpoint of the first edge.
+ ea1_t0: The initial position of the second endpoint of the first edge.
+ eb0_t0: The initial position of the first endpoint of the second edge.
+ eb1_t0: The initial position of the second endpoint of the second edge.
+ ea0_t1: The final position of the first endpoint of the first edge.
+ ea1_t1: The final position of the second endpoint of the first edge.
+ eb0_t1: The final position of the first endpoint of the second edge.
+ eb1_t1: The final position of the second endpoint of the second edge.
+ min_distance: The minimum distance between the objects.
+ tmax: The maximum time to check for collisions.
+ tolerance: The error tolerance for the time of impact.
+ max_iterations: The maximum number of iterations to perform.
+ conservative_rescaling: The conservative rescaling of the time of impact.
+
+ Returns:
+ Tuple of:
+ True if a collision was detected, false otherwise.
+ The time of impact between the two edges.
+ )ipc_Qu8mg5v7",
+ py::arg("ea0_t0"), py::arg("ea1_t0"), py::arg("eb0_t0"),
py::arg("eb1_t0"), py::arg("ea0_t1"), py::arg("ea1_t1"),
py::arg("eb0_t1"), py::arg("eb1_t1"), py::arg("min_distance") = 0.0,
py::arg("tmax") = 1.0, py::arg("tolerance") = DEFAULT_CCD_TOLERANCE,
@@ -140,10 +245,30 @@ void define_ccd(py::module_& m)
tolerance, max_iterations, conservative_rescaling);
return std::make_tuple(r, toi);
},
- "", py::arg("p_t0"), py::arg("e0_t0"), py::arg("e1_t0"),
- py::arg("p_t1"), py::arg("e0_t1"), py::arg("e1_t1"),
- py::arg("min_distance") = 0.0, py::arg("tmax") = 1.0,
- py::arg("tolerance") = DEFAULT_CCD_TOLERANCE,
+ R"ipc_Qu8mg5v7(
+ Computes the time of impact between a point and an edge in 2D or 3D using continuous collision detection.
+
+ Parameters:
+ p_t0: The initial position of the point.
+ e0_t0: The initial position of the first endpoint of the edge.
+ e1_t0: The initial position of the second endpoint of the edge.
+ p_t1: The final position of the point.
+ e0_t1: The final position of the first endpoint of the edge.
+ e1_t1: The final position of the second endpoint of the edge.
+ min_distance: The minimum distance between the objects.
+ tmax: The maximum time to check for collisions.
+ tolerance: The error tolerance for the time of impact.
+ max_iterations: The maximum number of iterations to perform.
+ conservative_rescaling: The conservative rescaling of the time of impact.
+
+ Returns:
+ Tuple of:
+ True if a collision was detected, false otherwise.
+ The time of impact between the point and the edge.
+ )ipc_Qu8mg5v7",
+ py::arg("p_t0"), py::arg("e0_t0"), py::arg("e1_t0"), py::arg("p_t1"),
+ py::arg("e0_t1"), py::arg("e1_t1"), py::arg("min_distance") = 0.0,
+ py::arg("tmax") = 1.0, py::arg("tolerance") = DEFAULT_CCD_TOLERANCE,
py::arg("max_iterations") = DEFAULT_CCD_MAX_ITERATIONS,
py::arg("conservative_rescaling") = DEFAULT_CCD_CONSERVATIVE_RESCALING);
}
diff --git a/src/ipc/ccd/CMakeLists.txt b/src/ipc/ccd/CMakeLists.txt
index ab49a7630..3a6f0d9c5 100644
--- a/src/ipc/ccd/CMakeLists.txt
+++ b/src/ipc/ccd/CMakeLists.txt
@@ -1,6 +1,8 @@
set(SOURCES
aabb.cpp
aabb.hpp
+ additive_ccd.cpp
+ additive_ccd.hpp
ccd.cpp
ccd.hpp
inexact_point_edge.cpp
diff --git a/src/ipc/ccd/additive_ccd.cpp b/src/ipc/ccd/additive_ccd.cpp
new file mode 100644
index 000000000..39af9dde9
--- /dev/null
+++ b/src/ipc/ccd/additive_ccd.cpp
@@ -0,0 +1,316 @@
+//
+// Source modified from https://github.com/ipc-sim/Codim-IPC
+// under Appache-2.0 License.
+//
+// Modifications:
+// • remove broad phase functions
+// • refactor code to use a single implementation of the additive_ccd algorithm
+// • utilize our distance function rather than porting the Codim-IPC versions
+// • return true if the initial distance is less than the minimum distance
+// • add an explicit tmax parameter rather than relying on the initial value of
+// toi
+//
+// NOTE: These methods are provided for reference comparison with [Li et al.
+// 2021] and is not utilized by the high-level functionality. In compairson to
+// Tight Inclusion CCD, this CCD method is not provably conservative and so can
+// potentially produce false negatives (i.e., miss collisions) due to
+// floating-point rounding error. However, it is much faster than Tight
+// Inclusion CCD (>100×) and very robust due to the gaps and conservative
+// rescaling used.
+//
+
+#include "additive_ccd.hpp"
+
+#include <ipc/distance/point_point.hpp>
+#include <ipc/distance/point_edge.hpp>
+#include <ipc/distance/point_triangle.hpp>
+#include <ipc/distance/edge_edge.hpp>
+
+namespace ipc::additive_ccd {
+
+namespace {
+ template <typename... Args> void subtract_mean(Args&... args)
+ {
+ constexpr double n = sizeof...(args);
+ static_assert(n > 0, "At least one argument is required");
+
+ using T = typename std::tuple_element<0, std::tuple<Args...>>::type;
+ const int dim = std::get<0>(std::tuple<const Args&...>(args...)).size();
+
+ T mean = T::Zero(dim);
+ for (const T& value : { args... }) {
+ mean += value;
+ }
+ mean /= n;
+
+ for (T* value : { &args... }) {
+ (*value) -= mean;
+ }
+ }
+
+ VectorMax12d stack(const VectorMax3d& x) { return x; }
+
+ template <typename... Args>
+ VectorMax12d stack(const VectorMax3d& x0, const Args&... args)
+ {
+ VectorMax12d x(x0.size() * (1 + sizeof...(args)));
+ x.head(x0.size()) = x0;
+ x.tail(x0.size() * sizeof...(args)) = stack(args...);
+ return x;
+ }
+} // namespace
+
+bool additive_ccd(
+ VectorMax12d x,
+ const VectorMax12d& dx,
+ const std::function<double(const VectorMax12d&)>& distance_squared,
+ const double max_disp_mag,
+ double& toi,
+ const double min_distance,
+ const double tmax,
+ const double conservative_rescaling)
+{
+ assert(conservative_rescaling > 0 && conservative_rescaling < 1);
+
+ const double min_distance_sq = min_distance * min_distance;
+
+ double d, d_sq;
+ d = std::sqrt(d_sq = distance_squared(x));
+ assert(d > min_distance);
+
+ double d_func = d_sq - min_distance_sq;
+ assert(d_func > 0);
+ const double gap = // (d - ξ) = (d² - ξ²) / (d + ξ)
+ (1 - conservative_rescaling) * d_func / (d + min_distance);
+
+ toi = 0;
+ while (true) {
+ // tₗ = η ⋅ (d - ξ) / lₚ = η ⋅ (d² - ξ²) / (lₚ ⋅ (d + ξ))
+ const double toi_lower_bound = conservative_rescaling * d_func
+ / ((d + min_distance) * max_disp_mag);
+
+ x += toi_lower_bound * dx;
+
+ d = std::sqrt(d_sq = distance_squared(x));
+
+ d_func = d_sq - min_distance_sq;
+ assert(d_func > 0);
+ if (toi > 0 && d_func / (d + min_distance) < gap) {
+ break; // distance (including thickness) is less than gap
+ }
+
+ toi += toi_lower_bound;
+ if (toi > tmax) {
+ return false; // collision occurs after tmax
+ }
+ }
+
+ return true;
+}
+
+bool point_point_ccd(
+ const VectorMax3d& p0_t0,
+ const VectorMax3d& p1_t0,
+ const VectorMax3d& p0_t1,
+ const VectorMax3d& p1_t1,
+ double& toi,
+ const double min_distance,
+ const double tmax,
+ const double conservative_rescaling)
+{
+ const int dim = p0_t0.size();
+ assert(dim == p1_t0.size() && dim == p0_t1.size() && dim == p1_t1.size());
+
+ const double initial_distance = point_point_distance(p0_t0, p1_t0);
+ if (initial_distance <= min_distance * min_distance) {
+ logger().warn(
+ "Initial distance {} ≤ d_min={}, returning toi=0!",
+ std::sqrt(initial_distance), min_distance);
+ toi = 0;
+ return true;
+ }
+
+ VectorMax3d dp0 = p0_t1 - p0_t0;
+ VectorMax3d dp1 = p1_t1 - p1_t0;
+ subtract_mean(dp0, dp1);
+
+ const double max_disp_mag = dp0.norm() + dp1.norm();
+ if (max_disp_mag == 0) {
+ return false;
+ }
+
+ auto distance_squared = [dim](const VectorMax12d& x) {
+ return point_point_distance(x.head(dim), x.tail(dim));
+ };
+
+ VectorMax12d x = stack(p0_t0, p1_t0);
+ const VectorMax12d dx = stack(dp0, dp1);
+
+ return additive_ccd(
+ x, dx, distance_squared, max_disp_mag, toi, min_distance, tmax,
+ conservative_rescaling);
+}
+
+bool point_edge_ccd(
+ const VectorMax3d& p_t0,
+ const VectorMax3d& e0_t0,
+ const VectorMax3d& e1_t0,
+ const VectorMax3d& p_t1,
+ const VectorMax3d& e0_t1,
+ const VectorMax3d& e1_t1,
+ double& toi,
+ const double min_distance,
+ const double tmax,
+ const double conservative_rescaling)
+{
+ const int dim = p_t0.size();
+ assert(dim == e0_t0.size() && dim == e1_t0.size());
+ assert(dim == p_t1.size() && dim == e0_t1.size() && dim == e1_t1.size());
+
+ const double initial_distance = point_edge_distance(p_t0, e0_t0, e1_t0);
+ if (initial_distance <= min_distance * min_distance) {
+ logger().warn(
+ "Initial distance {} ≤ d_min={}, returning toi=0!",
+ std::sqrt(initial_distance), min_distance);
+ toi = 0;
+ return true;
+ }
+
+ VectorMax3d dp = p_t1 - p_t0;
+ VectorMax3d de0 = e0_t1 - e0_t0;
+ VectorMax3d de1 = e1_t1 - e1_t0;
+ subtract_mean(dp, de0, de1);
+
+ const double max_disp_mag =
+ dp.norm() + std::sqrt(std::max(de0.squaredNorm(), de1.squaredNorm()));
+ if (max_disp_mag == 0) {
+ return false;
+ }
+
+ VectorMax12d x = stack(p_t0, e0_t0, e1_t0);
+ const VectorMax12d dx = stack(dp, de0, de1);
+
+ auto distance_squared = [dim](const VectorMax12d& x) {
+ return point_edge_distance(
+ x.head(dim), x.segment(dim, dim), x.tail(dim));
+ };
+
+ return additive_ccd(
+ x, dx, distance_squared, max_disp_mag, toi, min_distance, tmax,
+ conservative_rescaling);
+}
+
+bool point_triangle_ccd(
+ const Eigen::Vector3d& p_t0,
+ const Eigen::Vector3d& t0_t0,
+ const Eigen::Vector3d& t1_t0,
+ const Eigen::Vector3d& t2_t0,
+ const Eigen::Vector3d& p_t1,
+ const Eigen::Vector3d& t0_t1,
+ const Eigen::Vector3d& t1_t1,
+ const Eigen::Vector3d& t2_t1,
+ double& toi,
+ const double min_distance,
+ const double tmax,
+ const double conservative_rescaling)
+{
+ const double initial_distance =
+ point_triangle_distance(p_t0, t0_t0, t1_t0, t2_t0);
+ if (initial_distance <= min_distance * min_distance) {
+ logger().warn(
+ "Initial distance {} ≤ d_min={}, returning toi=0!",
+ std::sqrt(initial_distance), min_distance);
+ toi = 0;
+ return true;
+ }
+
+ Eigen::Vector3d dp = p_t1 - p_t0;
+ Eigen::Vector3d dt0 = t0_t1 - t0_t0;
+ Eigen::Vector3d dt1 = t1_t1 - t1_t0;
+ Eigen::Vector3d dt2 = t2_t1 - t2_t0;
+ subtract_mean(dp, dt0, dt1, dt2);
+
+ VectorMax12d x = stack(p_t0, t0_t0, t1_t0, t2_t0);
+ const VectorMax12d dx = stack(dp, dt0, dt1, dt2);
+
+ const double max_disp_mag = dp.norm()
+ + std::sqrt(std::max(
+ { dt0.squaredNorm(), dt1.squaredNorm(), dt2.squaredNorm() }));
+ if (max_disp_mag == 0) {
+ return false;
+ }
+
+ auto distance_squared = [](const VectorMax12d& x) {
+ return point_triangle_distance(
+ x.head<3>(), x.segment<3>(3), x.segment<3>(6), x.tail<3>());
+ };
+
+ return additive_ccd(
+ x, dx, distance_squared, max_disp_mag, toi, min_distance, tmax,
+ conservative_rescaling);
+}
+
+bool edge_edge_ccd(
+ const Eigen::Vector3d& ea0_t0,
+ const Eigen::Vector3d& ea1_t0,
+ const Eigen::Vector3d& eb0_t0,
+ const Eigen::Vector3d& eb1_t0,
+ const Eigen::Vector3d& ea0_t1,
+ const Eigen::Vector3d& ea1_t1,
+ const Eigen::Vector3d& eb0_t1,
+ const Eigen::Vector3d& eb1_t1,
+ double& toi,
+ const double min_distance,
+ const double tmax,
+ const double conservative_rescaling)
+{
+ const double initial_distance =
+ edge_edge_distance(ea0_t0, ea1_t0, eb0_t0, eb1_t0);
+ if (initial_distance <= min_distance * min_distance) {
+ logger().warn(
+ "Initial distance {} ≤ d_min={}, returning toi=0!",
+ std::sqrt(initial_distance), min_distance);
+ toi = 0;
+ return true;
+ }
+
+ Eigen::Vector3d dea0 = ea0_t1 - ea0_t0;
+ Eigen::Vector3d dea1 = ea1_t1 - ea1_t0;
+ Eigen::Vector3d deb0 = eb0_t1 - eb0_t0;
+ Eigen::Vector3d deb1 = eb1_t1 - eb1_t0;
+ subtract_mean(dea0, dea1, deb0, deb1);
+
+ const double max_disp_mag =
+ std::sqrt(std::max(dea0.squaredNorm(), dea1.squaredNorm()))
+ + std::sqrt(std::max(deb0.squaredNorm(), deb1.squaredNorm()));
+ if (max_disp_mag == 0) {
+ return false;
+ }
+
+ VectorMax12d x = stack(ea0_t0, ea1_t0, eb0_t0, eb1_t0);
+ const VectorMax12d dx = stack(dea0, dea1, deb0, deb1);
+
+ const double min_distance_sq = min_distance * min_distance;
+ auto distance_squared = [min_distance_sq](const VectorMax12d& x) {
+ const auto& ea0 = x.head<3>();
+ const auto& ea1 = x.segment<3>(3);
+ const auto& eb0 = x.segment<3>(6);
+ const auto& eb1 = x.tail<3>();
+
+ double d_sq = edge_edge_distance(ea0, ea1, eb0, eb1);
+ if (d_sq - min_distance_sq <= 0) {
+ // since we ensured other place that all dist smaller than d̂ are
+ // positive, this must be some far away nearly parallel edges
+ d_sq = std::min(
+ { (ea0 - eb0).squaredNorm(), (ea0 - eb1).squaredNorm(),
+ (ea1 - eb0).squaredNorm(), (ea1 - eb1).squaredNorm() });
+ }
+ return d_sq;
+ };
+
+ return additive_ccd(
+ x, dx, distance_squared, max_disp_mag, toi, min_distance, tmax,
+ conservative_rescaling);
+}
+
+} // namespace ipc::additive_ccd
\ No newline at end of file
diff --git a/src/ipc/ccd/additive_ccd.hpp b/src/ipc/ccd/additive_ccd.hpp
new file mode 100644
index 000000000..bef785e55
--- /dev/null
+++ b/src/ipc/ccd/additive_ccd.hpp
@@ -0,0 +1,138 @@
+//
+// NOTE: These methods are provided for reference comparison with [Li et al.
+// 2021] and is not utilized by the high-level functionality. In compairson to
+// Tight Inclusion CCD, this CCD method is not provably conservative and so can
+// potentially produce false negatives (i.e., miss collisions) due to
+// floating-point rounding error. However, it is much faster than Tight
+// Inclusion CCD (>100×) and very robust due to the gaps and conservative
+// rescaling used.
+//
+
+#pragma once
+
+#include <ipc/utils/eigen_ext.hpp>
+
+namespace ipc::additive_ccd {
+
+/// The default conservative rescaling value used to avoid taking steps exactly
+/// to impact. Value choosen to based on [Li et al. 2021].
+static constexpr double DEFAULT_CCD_CONSERVATIVE_RESCALING = 0.9;
+
+/// @brief Computes the time of impact between two points using continuous collision detection.
+/// @param p0_t0 The initial position of the first point.
+/// @param p1_t0 The initial position of the second point.
+/// @param p0_t1 The final position of the first point.
+/// @param p1_t1 The final position of the second point.
+/// @param[out] toi The time of impact between the two points.
+/// @param min_distance The minimum distance between two objects.
+/// @param tmax The maximum time to check for collisions.
+/// @param conservative_rescaling The conservative rescaling of the time of impact.
+/// @return True if a collision was detected, false otherwise.
+bool point_point_ccd(
+ const VectorMax3d& p0_t0,
+ const VectorMax3d& p1_t0,
+ const VectorMax3d& p0_t1,
+ const VectorMax3d& p1_t1,
+ double& toi,
+ const double min_distance = 0.0,
+ const double tmax = 1.0,
+ const double conservative_rescaling = DEFAULT_CCD_CONSERVATIVE_RESCALING);
+
+/// @brief Computes the time of impact between a point and an edge using continuous collision detection.
+/// @param p_t0 The initial position of the point.
+/// @param e0_t0 The initial position of the first endpoint of the edge.
+/// @param e1_t0 The initial position of the second endpoint of the edge.
+/// @param p_t1 The final position of the point.
+/// @param e0_t1 The final position of the first endpoint of the edge.
+/// @param e1_t1 The final position of the second endpoint of the edge.
+/// @param[out] toi The time of impact between the point and the edge.
+/// @param min_distance The minimum distance between two objects.
+/// @param tmax The maximum time to check for collisions.
+/// @param conservative_rescaling The conservative rescaling of the time of impact.
+/// @return True if a collision was detected, false otherwise.
+bool point_edge_ccd(
+ const VectorMax3d& p_t0,
+ const VectorMax3d& e0_t0,
+ const VectorMax3d& e1_t0,
+ const VectorMax3d& p_t1,
+ const VectorMax3d& e0_t1,
+ const VectorMax3d& e1_t1,
+ double& toi,
+ const double min_distance = 0.0,
+ const double tmax = 1.0,
+ const double conservative_rescaling = DEFAULT_CCD_CONSERVATIVE_RESCALING);
+
+/// @brief Computes the time of impact between a point and a triangle using continuous collision detection.
+/// @param p_t0 The initial position of the point.
+/// @param t0_t0 The initial position of the first vertex of the triangle.
+/// @param t1_t0 The initial position of the second vertex of the triangle.
+/// @param t2_t0 The initial position of the third vertex of the triangle.
+/// @param p_t1 The final position of the point.
+/// @param t0_t1 The final position of the first vertex of the triangle.
+/// @param t1_t1 The final position of the second vertex of the triangle.
+/// @param t2_t1 The final position of the third vertex of the triangle.
+/// @param[out] toi The time of impact between the point and the triangle.
+/// @param min_distance The minimum distance between two objects.
+/// @param tmax The maximum time to check for collisions.
+/// @param conservative_rescaling The conservative rescaling of the time of impact.
+/// @return True if a collision was detected, false otherwise.
+bool point_triangle_ccd(
+ const Eigen::Vector3d& p_t0,
+ const Eigen::Vector3d& t0_t0,
+ const Eigen::Vector3d& t1_t0,
+ const Eigen::Vector3d& t2_t0,
+ const Eigen::Vector3d& p_t1,
+ const Eigen::Vector3d& t0_t1,
+ const Eigen::Vector3d& t1_t1,
+ const Eigen::Vector3d& t2_t1,
+ double& toi,
+ const double min_distance = 0.0,
+ const double tmax = 1.0,
+ const double conservative_rescaling = DEFAULT_CCD_CONSERVATIVE_RESCALING);
+
+/// @brief Computes the time of impact between two edges using continuous collision detection.
+/// @param ea0_t0 The initial position of the first endpoint of the first edge.
+/// @param ea1_t0 The initial position of the second endpoint of the first edge.
+/// @param eb0_t0 The initial position of the first endpoint of the second edge.
+/// @param eb1_t0 The initial position of the second endpoint of the second edge.
+/// @param ea0_t1 The final position of the first endpoint of the first edge.
+/// @param ea1_t1 The final position of the second endpoint of the first edge.
+/// @param eb0_t1 The final position of the first endpoint of the second edge.
+/// @param eb1_t1 The final position of the second endpoint of the second edge.
+/// @param[out] toi The time of impact between the two edges.
+/// @param min_distance The minimum distance between two objects.
+/// @param tmax The maximum time to check for collisions.
+/// @param conservative_rescaling The conservative rescaling of the time of impact.
+/// @return True if a collision was detected, false otherwise.
+bool edge_edge_ccd(
+ const Eigen::Vector3d& ea0_t0,
+ const Eigen::Vector3d& ea1_t0,
+ const Eigen::Vector3d& eb0_t0,
+ const Eigen::Vector3d& eb1_t0,
+ const Eigen::Vector3d& ea0_t1,
+ const Eigen::Vector3d& ea1_t1,
+ const Eigen::Vector3d& eb0_t1,
+ const Eigen::Vector3d& eb1_t1,
+ double& toi,
+ const double min_distance = 0.0,
+ const double tmax = 1.0,
+ const double conservative_rescaling = DEFAULT_CCD_CONSERVATIVE_RESCALING);
+
+/// @brief Computes the time of impact between two objects using additive continuous collision detection.
+/// @param distance_squared A function that computes the squared distance between the two objects at a given time.
+/// @param[out] toi The time of impact between the two objects.
+/// @param min_distance The minimum distance between the objects.
+/// @param tmax The maximum time to check for collisions.
+/// @param conservative_rescaling The amount to rescale the objects by to ensure conservative advancement.
+/// @return True if a collision was detected, false otherwise.
+bool additive_ccd(
+ VectorMax12d x,
+ const VectorMax12d& dx,
+ const std::function<double()>& distance_squared,
+ const double max_disp_mag,
+ double& toi,
+ const double min_distance = 0.0,
+ const double tmax = 1.0,
+ const double conservative_rescaling = DEFAULT_CCD_CONSERVATIVE_RESCALING);
+
+} // namespace ipc::additive_ccd
\ No newline at end of file
diff --git a/src/ipc/ccd/ccd.hpp b/src/ipc/ccd/ccd.hpp
index d3082ee2a..edaae03ff 100644
--- a/src/ipc/ccd/ccd.hpp
+++ b/src/ipc/ccd/ccd.hpp
@@ -14,6 +14,20 @@ static constexpr double DEFAULT_CCD_CONSERVATIVE_RESCALING = 0.8;
// 2D
+/// @brief Computes the time of impact between a point and an edge in 2D using continuous collision detection.
+/// @param p_t0 The initial position of the point.
+/// @param e0_t0 The initial position of the first endpoint of the edge.
+/// @param e1_t0 The initial position of the second endpoint of the edge.
+/// @param p_t1 The final position of the point.
+/// @param e0_t1 The final position of the first endpoint of the edge.
+/// @param e1_t1 The final position of the second endpoint of the edge.
+/// @param[out] toi The time of impact between the point and the edge.
+/// @param min_distance The minimum distance between the objects.
+/// @param tmax The maximum time to check for collisions.
+/// @param tolerance The error tolerance for the time of impact.
+/// @param max_iterations The maximum number of iterations to perform.
+/// @param conservative_rescaling The conservative rescaling of the time of impact.
+/// @return True if a collision was detected, false otherwise.
bool point_edge_ccd_2D(
const Eigen::Vector2d& p_t0,
const Eigen::Vector2d& e0_t0,
@@ -30,6 +44,18 @@ bool point_edge_ccd_2D(
// 3D
+/// @brief Computes the time of impact between two points in 3D using continuous collision detection.
+/// @param p0_t0 The initial position of the first point.
+/// @param p1_t0 The initial position of the second point.
+/// @param p0_t1 The final position of the first point.
+/// @param p1_t1 The final position of the second point.
+/// @param[out] toi The time of impact between the two points.
+/// @param min_distance The minimum distance between the objects.
+/// @param tmax The maximum time to check for collisions.
+/// @param tolerance The error tolerance for the time of impact.
+/// @param max_iterations The maximum number of iterations to perform.
+/// @param conservative_rescaling The conservative rescaling of the time of impact.
+/// @return True if a collision was detected, false otherwise.
bool point_point_ccd(
const Eigen::Vector3d& p0_t0,
const Eigen::Vector3d& p1_t0,
@@ -42,6 +68,20 @@ bool point_point_ccd(
const long max_iterations = DEFAULT_CCD_MAX_ITERATIONS,
const double conservative_rescaling = DEFAULT_CCD_CONSERVATIVE_RESCALING);
+/// @brief Computes the time of impact between a point and an edge in 3D using continuous collision detection.
+/// @param p_t0 The initial position of the point.
+/// @param e0_t0 The initial position of the first endpoint of the edge.
+/// @param e1_t0 The initial position of the second endpoint of the edge.
+/// @param p_t1 The final position of the point.
+/// @param e0_t1 The final position of the first endpoint of the edge.
+/// @param e1_t1 The final position of the second endpoint of the edge.
+/// @param[out] toi The time of impact between the point and the edge.
+/// @param min_distance The minimum distance between the objects.
+/// @param tmax The maximum time to check for collisions.
+/// @param tolerance The error tolerance for the time of impact.
+/// @param max_iterations The maximum number of iterations to perform.
+/// @param conservative_rescaling The conservative rescaling of the time of impact.
+/// @return True if a collision was detected, false otherwise.
bool point_edge_ccd_3D(
const Eigen::Vector3d& p_t0,
const Eigen::Vector3d& e0_t0,
@@ -56,6 +96,22 @@ bool point_edge_ccd_3D(
const long max_iterations = DEFAULT_CCD_MAX_ITERATIONS,
const double conservative_rescaling = DEFAULT_CCD_CONSERVATIVE_RESCALING);
+/// @brief Computes the time of impact between a point and a triangle in 3D using continuous collision detection.
+/// @param p_t0 The initial position of the point.
+/// @param t0_t0 The initial position of the first vertex of the triangle.
+/// @param t1_t0 The initial position of the second vertex of the triangle.
+/// @param t2_t0 The initial position of the third vertex of the triangle.
+/// @param p_t1 The final position of the point.
+/// @param t0_t1 The final position of the first vertex of the triangle.
+/// @param t1_t1 The final position of the second vertex of the triangle.
+/// @param t2_t1 The final position of the third vertex of the triangle.
+/// @param[out] toi The time of impact between the point and the triangle.
+/// @param min_distance The minimum distance between the objects.
+/// @param tmax The maximum time to check for collisions.
+/// @param tolerance The error tolerance for the time of impact.
+/// @param max_iterations The maximum number of iterations to perform.
+/// @param conservative_rescaling The conservative rescaling of the time of impact.
+/// @return True if a collision was detected, false otherwise.
bool point_triangle_ccd(
const Eigen::Vector3d& p_t0,
const Eigen::Vector3d& t0_t0,
@@ -72,6 +128,22 @@ bool point_triangle_ccd(
const long max_iterations = DEFAULT_CCD_MAX_ITERATIONS,
const double conservative_rescaling = DEFAULT_CCD_CONSERVATIVE_RESCALING);
+/// @brief Computes the time of impact between two edges in 3D using continuous collision detection.
+/// @param ea0_t0 The initial position of the first endpoint of the first edge.
+/// @param ea1_t0 The initial position of the second endpoint of the first edge.
+/// @param eb0_t0 The initial position of the first endpoint of the second edge.
+/// @param eb1_t0 The initial position of the second endpoint of the second edge.
+/// @param ea0_t1 The final position of the first endpoint of the first edge.
+/// @param ea1_t1 The final position of the second endpoint of the first edge.
+/// @param eb0_t1 The final position of the first endpoint of the second edge.
+/// @param eb1_t1 The final position of the second endpoint of the second edge.
+/// @param[out] toi The time of impact between the two edges.
+/// @param min_distance The minimum distance between the objects.
+/// @param tmax The maximum time to check for collisions.
+/// @param tolerance The error tolerance for the time of impact.
+/// @param max_iterations The maximum number of iterations to perform.
+/// @param conservative_rescaling The conservative rescaling of the time of impact.
+/// @return True if a collision was detected, false otherwise.
bool edge_edge_ccd(
const Eigen::Vector3d& ea0_t0,
const Eigen::Vector3d& ea1_t0,
@@ -90,6 +162,20 @@ bool edge_edge_ccd(
// 2D or 3D
+/// @brief Computes the time of impact between a point and an edge in 2D or 3D using continuous collision detection.
+/// @param p_t0 The initial position of the point.
+/// @param e0_t0 The initial position of the first endpoint of the edge.
+/// @param e1_t0 The initial position of the second endpoint of the edge.
+/// @param p_t1 The final position of the point.
+/// @param e0_t1 The final position of the first endpoint of the edge.
+/// @param e1_t1 The final position of the second endpoint of the edge.
+/// @param[out] toi The time of impact between the point and the edge.
+/// @param min_distance The minimum distance between the objects.
+/// @param tmax The maximum time to check for collisions.
+/// @param tolerance The error tolerance for the time of impact.
+/// @param max_iterations The maximum number of iterations to perform.
+/// @param conservative_rescaling The conservative rescaling of the time of impact.
+/// @return True if a collision was detected, false otherwise.
bool point_edge_ccd(
const VectorMax3d& p_t0,
const VectorMax3d& e0_t0,
diff --git a/src/ipc/utils/CMakeLists.txt b/src/ipc/utils/CMakeLists.txt
index ee2e471b1..9c14a00ef 100644
--- a/src/ipc/utils/CMakeLists.txt
+++ b/src/ipc/utils/CMakeLists.txt
@@ -9,7 +9,6 @@ set(SOURCES
logger.cpp
logger.hpp
merge_thread_local.hpp
- rational.hpp
save_obj.cpp
save_obj.hpp
unordered_map_and_set.cpp
diff --git a/src/ipc/utils/intersection.cpp b/src/ipc/utils/intersection.cpp
index 8b4f0fea3..f7fd194b1 100644
--- a/src/ipc/utils/intersection.cpp
+++ b/src/ipc/utils/intersection.cpp
@@ -2,14 +2,15 @@
#include <ipc/utils/eigen_ext.hpp>
#include <ipc/config.hpp>
-#ifdef IPC_TOOLKIT_WITH_RATIONAL_INTERSECTION
-#include <ipc/utils/rational.hpp>
-#endif
#include <igl/predicates/predicates.h>
#include <Eigen/Geometry>
+#ifdef IPC_TOOLKIT_WITH_RATIONAL_INTERSECTION
+#include <rational/rational.hpp>
+#endif
+
namespace ipc {
#ifdef IPC_TOOLKIT_WITH_RATIONAL_INTERSECTION
@@ -20,7 +21,12 @@ bool is_edge_intersecting_triangle_rational(
const Eigen::Vector3d& t1_float,
const Eigen::Vector3d& t2_float)
{
- Vector3<Rational> e0, e1, t0, t1, t2;
+ using namespace rational;
+
+ typedef Eigen::Matrix<Rational, 3, 1, Eigen::ColMajor | Eigen::DontAlign>
+ Vector3r;
+
+ Vector3r e0, e1, t0, t1, t2;
for (int d = 0; d < 3; ++d) {
e0[d] = e0_float[d];
@@ -44,7 +50,7 @@ bool is_edge_intersecting_triangle_rational(
- e1[1] * t1[2] * t2[0] - e1[2] * t0[0] * t1[1] + e1[2] * t0[0] * t2[1]
+ e1[2] * t0[1] * t1[0] - e1[2] * t0[1] * t2[0] - e1[2] * t1[0] * t2[1]
+ e1[2] * t1[1] * t2[0];
- if (d.get_sign() == 0) {
+ if (d.sign() == 0) {
return true;
}
diff --git a/src/ipc/utils/rational.hpp b/src/ipc/utils/rational.hpp
deleted file mode 100644
index 23c178500..000000000
--- a/src/ipc/utils/rational.hpp
+++ /dev/null
@@ -1,138 +0,0 @@
-#pragma once
-
-#include <ipc/config.hpp>
-
-#ifdef IPC_TOOLKIT_WITH_RATIONAL_INTERSECTION
-
-#include <gmp.h>
-#include <iostream>
-
-namespace ipc {
-
-// https://cs.nyu.edu/acsys/cvc3/releases/1.5/doc/rational-gmp_8cpp-source.html
-class Rational {
-public:
- mpq_t value;
- void canonicalize() { mpq_canonicalize(value); }
- int get_sign() const { return mpq_sgn(value); }
-
- Rational()
- {
- mpq_init(value);
- mpq_set_d(value, 0);
- }
-
- Rational(double d)
- {
- mpq_init(value);
- mpq_set_d(value, d);
- // canonicalize();
- }
-
- Rational(const mpq_t& v_)
- {
- mpq_init(value);
- mpq_set(value, v_);
- // canonicalize();
- }
-
- Rational(const Rational& other)
- {
- mpq_init(value);
- mpq_set(value, other.value);
- }
-
- ~Rational() { mpq_clear(value); }
-
- friend Rational operator-(const Rational& v)
- {
- Rational r_out;
- mpq_neg(r_out.value, v.value);
- return r_out;
- }
-
- friend Rational operator+(const Rational& x, const Rational& y)
- {
- Rational r_out;
- mpq_add(r_out.value, x.value, y.value);
- return r_out;
- }
-
- friend Rational operator-(const Rational& x, const Rational& y)
- {
- Rational r_out;
- mpq_sub(r_out.value, x.value, y.value);
- return r_out;
- }
-
- friend Rational operator*(const Rational& x, const Rational& y)
- {
- Rational r_out;
- mpq_mul(r_out.value, x.value, y.value);
- return r_out;
- }
-
- friend Rational operator/(const Rational& x, const Rational& y)
- {
- Rational r_out;
- mpq_div(r_out.value, x.value, y.value);
- return r_out;
- }
-
- Rational& operator=(const Rational& x)
- {
- if (this == &x)
- return *this;
- mpq_set(value, x.value);
- return *this;
- }
-
- Rational& operator=(const double x)
- {
- mpq_set_d(value, x);
- // canonicalize();
- return *this;
- }
-
- friend bool operator<(const Rational& r, const Rational& r1)
- {
- return mpq_cmp(r.value, r1.value) < 0;
- }
-
- friend bool operator>(const Rational& r, const Rational& r1)
- {
- return mpq_cmp(r.value, r1.value) > 0;
- }
-
- friend bool operator<=(const Rational& r, const Rational& r1)
- {
- return mpq_cmp(r.value, r1.value) <= 0;
- }
-
- friend bool operator>=(const Rational& r, const Rational& r1)
- {
- return mpq_cmp(r.value, r1.value) >= 0;
- }
-
- friend bool operator==(const Rational& r, const Rational& r1)
- {
- return mpq_equal(r.value, r1.value);
- }
-
- friend bool operator!=(const Rational& r, const Rational& r1)
- {
- return !mpq_equal(r.value, r1.value);
- }
-
- double to_double() { return mpq_get_d(value); }
-
- friend std::ostream& operator<<(std::ostream& os, const Rational& r)
- {
- os << mpq_get_d(r.value);
- return os;
- }
-};
-
-} // namespace ipc
-
-#endif
| Co-dimensional simulations
Quick question: Does this toolkit include the ability to handle co-dimensional simulations similar to Co-IPC?
| I am not sure if you mean can the toolkit completely perform codimensional FEM simulations like in the CIPC paper, or if the toolkit has the necessary components to implement CIPC?
This code base includes the necessary components in order to add the IPC's frictional contact model to a simulation pipeline. This includes computing the barrier potentials, smooth friction dissipative potential, and performing CCD. What the toolkit does not have is the FEM components necessary to implement codimensional FEM (FEM basis, elasticity models, and optimization framework for time-stepping).
That being said, this toolkit contains most of the components shown in the CIPC paper.
## Implemented
These features from the CIPC paper are already implemented.
* Support for codimensional triangle-triangle, triangle-edge, triangle-point, edge-edge, and edge-point (in 2D) frictional contact.
* This is sufficient for standard IPC and a lot of CIPC, but misses some types of contact (see below)
* The IPC thickness model (i.e., $\xi$ offset in the barrier potential)
* CCD between codimensional triangle-triangle, triangle-edge, triangle-point, edge-edge, and edge-point (in 2D)
* This is using the [Tight-Inclusion](https://github.com/Continuous-Collision-Detection/Tight-Inclusion) CCD method for guaranteed conservative CCD under floating-point
* Thickness offset in the CCD
* This is handled through Tight-Inclusion's minimum separation CCD.
## Missing Features
The following components are missing, but could be easily added (I am happy to add them).
* Support for codimensional edge-point (in 3D) and point-point contact.
* These are important for contacts between codim. edges with codim. points and for contacts between particles (like in Fig. 22 "Granules on cloth" of the CIPC paper)
* Strain limiting
* This is more on the side of FEM but could be added to the toolkit. My only concern is it is a little out of scope.
* CCD for contacts between edge-point (in 3D) and point-point contact
* I believe we can add these using Tight-Inclusion CCD
* Additive CCD
* Instead we choose to use Tight-Inclusion because of its guarantees
Thanks so much. My main interest is having a more robust discrete elastic rod framework than is currently implemented in Co-IPC. Eventually, I would want to be able to simulate rods interacting with a deformable solid geometry. Co-IPC gets 75% of the way there as currently written, but the implementation of rod physics is limited (only considers bending and rods that are straight in their stress-free configuration). I'm not a developer, so I'm just trying to determine the path of least resistance given my skills. Any plans to bring discrete elastic rods to polyfem (hint, hint). ;-) Seriously though, your work is amazingly impressive and appreciated! Thank you for bringing these tools to the public.
Thank you for your nice comments. Adding shell and rod models to PolyFEM is something I have wanted to play with for a while, but I don't have a research project that motivates implementing it. I think it will eventually be implemented, but no timeline right now.
Small update: #22 added the thickness offset in the CCD. | 2023-10-04T22:46:24 | 0.0 | [] | [] |
||
ipc-sim/ipc-toolkit | ipc-sim__ipc-toolkit-50 | 32296157ba94e20eb2bbe903a9f075119f1934e0 | diff --git a/python/src/candidates/candidates.cpp b/python/src/candidates/candidates.cpp
index 1d899d3d9..82549e3df 100644
--- a/python/src/candidates/candidates.cpp
+++ b/python/src/candidates/candidates.cpp
@@ -104,6 +104,38 @@ void define_candidates(py::module_& m)
py::arg("min_distance") = 0.0,
py::arg("tolerance") = DEFAULT_CCD_TOLERANCE,
py::arg("max_iterations") = DEFAULT_CCD_MAX_ITERATIONS)
+ .def(
+ "compute_noncandidate_conservative_stepsize",
+ &Candidates::compute_noncandidate_conservative_stepsize,
+ R"ipc_Qu8mg5v7(
+ Computes a conservative bound on the largest-feasible step size for surface primitives not in contact.
+
+ Parameters:
+ mesh: The collision mesh.
+ displacements: Surface vertex displacements (rowwise).
+ dhat: Barrier activation distance.
+ )ipc_Qu8mg5v7",
+ py::arg("mesh"), py::arg("displacements"), py::arg("dhat"))
+ .def(
+ "compute_cfl_stepsize", &Candidates::compute_cfl_stepsize,
+ R"ipc_Qu8mg5v7(
+ Computes a CFL-inspired CCD maximum step step size.
+
+ Parameters:
+ mesh: The collision mesh.
+ vertices_t0: Surface vertex starting positions (rowwise).
+ vertices_t1: Surface vertex ending positions (rowwise).
+ dhat: Barrier activation distance.
+ min_distance: The minimum distance allowable between any two elements.
+ tolerance: The tolerance for the CCD algorithm.
+ max_iterations: The maximum number of iterations for the CCD algorithm.
+ )ipc_Qu8mg5v7",
+ py::arg("mesh"), py::arg("vertices_t0"), py::arg("vertices_t1"),
+ py::arg("dhat"),
+ py::arg("broad_phase_method") = DEFAULT_BROAD_PHASE_METHOD,
+ py::arg("min_distance") = 0.0,
+ py::arg("tolerance") = DEFAULT_CCD_TOLERANCE,
+ py::arg("max_iterations") = DEFAULT_CCD_MAX_ITERATIONS)
.def(
"save_obj", &Candidates::save_obj, "", py::arg("filename"),
py::arg("vertices"), py::arg("edges"), py::arg("faces"))
diff --git a/src/ipc/candidates/CMakeLists.txt b/src/ipc/candidates/CMakeLists.txt
index 222da6166..daa314882 100644
--- a/src/ipc/candidates/CMakeLists.txt
+++ b/src/ipc/candidates/CMakeLists.txt
@@ -3,6 +3,7 @@ set(SOURCES
candidates.hpp
collision_stencil.hpp
collision_stencil.cpp
+ continuous_collision_candidate.cpp
continuous_collision_candidate.hpp
edge_edge.cpp
edge_edge.hpp
diff --git a/src/ipc/candidates/candidates.cpp b/src/ipc/candidates/candidates.cpp
index d1b5a8060..8590ade0d 100644
--- a/src/ipc/candidates/candidates.cpp
+++ b/src/ipc/candidates/candidates.cpp
@@ -1,5 +1,6 @@
#include "candidates.hpp"
+#include <ipc/ipc.hpp>
#include <ipc/utils/save_obj.hpp>
#include <ipc/config.hpp>
@@ -125,6 +126,71 @@ double Candidates::compute_collision_free_stepsize(
return earliest_toi;
}
+double Candidates::compute_noncandidate_conservative_stepsize(
+ const CollisionMesh& mesh,
+ const Eigen::MatrixXd& displacements,
+ const double dhat) const
+{
+ assert(displacements.rows() == mesh.num_vertices());
+
+ if (empty()) {
+ return 1; // No possible collisions, so can take full step.
+ }
+
+ const auto& E = mesh.edges();
+ const auto& F = mesh.faces();
+
+ std::vector<bool> is_vertex_a_candidates(mesh.num_vertices(), false);
+ for (size_t i = 0; i < size(); i++) {
+ for (const long vid : (*this)[i].vertex_ids(E, F)) {
+ if (vid < 0) {
+ break;
+ }
+ is_vertex_a_candidates[vid] = true;
+ }
+ }
+
+ double max_displacement = 0;
+ for (size_t i = 0; i < displacements.rows(); i++) {
+ if (!is_vertex_a_candidates[i]) {
+ continue;
+ }
+ max_displacement =
+ std::max(max_displacement, displacements.row(i).norm());
+ }
+
+ return 0.5 * dhat / max_displacement;
+}
+
+double Candidates::compute_cfl_stepsize(
+ const CollisionMesh& mesh,
+ const Eigen::MatrixXd& vertices_t0,
+ const Eigen::MatrixXd& vertices_t1,
+ const double dhat,
+ const BroadPhaseMethod broad_phase_method,
+ const double min_distance,
+ const double tolerance,
+ const long max_iterations) const
+{
+ assert(vertices_t0.rows() == mesh.num_vertices());
+ assert(vertices_t1.rows() == mesh.num_vertices());
+
+ const double alpha_C = this->compute_collision_free_stepsize(
+ mesh, vertices_t0, vertices_t1, min_distance, tolerance,
+ max_iterations);
+
+ const double alpha_F = this->compute_noncandidate_conservative_stepsize(
+ mesh, vertices_t1 - vertices_t0, dhat);
+
+ // If alpha_F < 0.5 * alpha_C, then we should do full CCD.
+ if (alpha_F < 0.5 * alpha_C) {
+ return ipc::compute_collision_free_stepsize(
+ mesh, vertices_t0, vertices_t1, broad_phase_method, min_distance,
+ tolerance, max_iterations);
+ }
+ return std::min(alpha_C, alpha_F);
+}
+
size_t Candidates::size() const
{
return ev_candidates.size() + ee_candidates.size() + fv_candidates.size();
diff --git a/src/ipc/candidates/candidates.hpp b/src/ipc/candidates/candidates.hpp
index 28a1cce6f..bbdc13511 100644
--- a/src/ipc/candidates/candidates.hpp
+++ b/src/ipc/candidates/candidates.hpp
@@ -83,6 +83,33 @@ class Candidates {
const double tolerance = DEFAULT_CCD_TOLERANCE,
const long max_iterations = DEFAULT_CCD_MAX_ITERATIONS) const;
+ /// @brief Computes a conservative bound on the largest-feasible step size for surface primitives not in contact.
+ /// @param mesh The collision mesh.
+ /// @param displacements Surface vertex displacements (rowwise).
+ /// @param dhat Barrier activation distance.
+ double compute_noncandidate_conservative_stepsize(
+ const CollisionMesh& mesh,
+ const Eigen::MatrixXd& displacements,
+ const double dhat) const;
+
+ /// @brief Computes a CFL-inspired CCD maximum step step size.
+ /// @param mesh The collision mesh.
+ /// @param vertices_t0 Surface vertex starting positions (rowwise).
+ /// @param vertices_t1 Surface vertex ending positions (rowwise).
+ /// @param dhat Barrier activation distance.
+ /// @param min_distance The minimum distance allowable between any two elements.
+ /// @param tolerance The tolerance for the CCD algorithm.
+ /// @param max_iterations The maximum number of iterations for the CCD algorithm.
+ double compute_cfl_stepsize(
+ const CollisionMesh& mesh,
+ const Eigen::MatrixXd& vertices_t0,
+ const Eigen::MatrixXd& vertices_t1,
+ const double dhat,
+ const BroadPhaseMethod broad_phase_method = DEFAULT_BROAD_PHASE_METHOD,
+ const double min_distance = 0.0,
+ const double tolerance = DEFAULT_CCD_TOLERANCE,
+ const long max_iterations = DEFAULT_CCD_MAX_ITERATIONS) const;
+
bool save_obj(
const std::string& filename,
const Eigen::MatrixXd& vertices,
diff --git a/src/ipc/candidates/continuous_collision_candidate.cpp b/src/ipc/candidates/continuous_collision_candidate.cpp
new file mode 100644
index 000000000..d559cd539
--- /dev/null
+++ b/src/ipc/candidates/continuous_collision_candidate.cpp
@@ -0,0 +1,22 @@
+#include "continuous_collision_candidate.hpp"
+
+namespace ipc {
+
+bool ContinuousCollisionCandidate::ccd(
+ const Eigen::MatrixXd& vertices_t0,
+ const Eigen::MatrixXd& vertices_t1,
+ const Eigen::MatrixXi& edges,
+ const Eigen::MatrixXi& faces,
+ double& toi,
+ const double min_distance,
+ const double tmax,
+ const double tolerance,
+ const long max_iterations,
+ const double conservative_rescaling) const
+{
+ return ccd(
+ dof(vertices_t0, edges, faces), dof(vertices_t1, edges, faces), toi,
+ min_distance, tmax, tolerance, max_iterations, conservative_rescaling);
+}
+
+} // namespace ipc
diff --git a/src/ipc/candidates/continuous_collision_candidate.hpp b/src/ipc/candidates/continuous_collision_candidate.hpp
index e4d032ac2..03444a0dd 100644
--- a/src/ipc/candidates/continuous_collision_candidate.hpp
+++ b/src/ipc/candidates/continuous_collision_candidate.hpp
@@ -1,15 +1,14 @@
#pragma once
#include <ipc/ccd/ccd.hpp>
-
-#include <Eigen/Core>
+#include <ipc/candidates/collision_stencil.hpp>
#include <vector>
namespace ipc {
/// Virtual class for candidates that support CCD.
-class ContinuousCollisionCandidate {
+class ContinuousCollisionCandidate : virtual public CollisionStencil {
public:
virtual ~ContinuousCollisionCandidate() { }
@@ -25,7 +24,7 @@ class ContinuousCollisionCandidate {
/// @param[in] max_iterations Maximum iterations used by Tight-Inclusion CCD.
/// @param[in] conservative_rescaling Conservative rescaling value used to avoid taking steps exactly to impact.
/// @return If the candidate had a collision over the time interval.
- virtual bool
+ bool
ccd(const Eigen::MatrixXd& vertices_t0,
const Eigen::MatrixXd& vertices_t1,
const Eigen::MatrixXi& edges,
@@ -36,7 +35,7 @@ class ContinuousCollisionCandidate {
const double tolerance = DEFAULT_CCD_TOLERANCE,
const long max_iterations = DEFAULT_CCD_MAX_ITERATIONS,
const double conservative_rescaling =
- DEFAULT_CCD_CONSERVATIVE_RESCALING) const = 0;
+ DEFAULT_CCD_CONSERVATIVE_RESCALING) const;
// Print the vertices of the CCD query for debugging.
virtual void print_ccd_query(
@@ -44,6 +43,18 @@ class ContinuousCollisionCandidate {
const Eigen::MatrixXd& vertices_t1,
const Eigen::MatrixXi& edges,
const Eigen::MatrixXi& faces) const = 0;
+
+protected:
+ virtual bool
+ ccd(const VectorMax12d& vertices_t0,
+ const VectorMax12d& vertices_t1,
+ double& toi,
+ const double min_distance = 0.0,
+ const double tmax = 1.0,
+ const double tolerance = DEFAULT_CCD_TOLERANCE,
+ const long max_iterations = DEFAULT_CCD_MAX_ITERATIONS,
+ const double conservative_rescaling =
+ DEFAULT_CCD_CONSERVATIVE_RESCALING) const = 0;
};
} // namespace ipc
diff --git a/src/ipc/candidates/edge_edge.cpp b/src/ipc/candidates/edge_edge.cpp
index c049e8d57..bccbac225 100644
--- a/src/ipc/candidates/edge_edge.cpp
+++ b/src/ipc/candidates/edge_edge.cpp
@@ -40,10 +40,8 @@ EdgeEdgeCandidate::compute_distance_hessian(const VectorMax12d& positions) const
}
bool EdgeEdgeCandidate::ccd(
- const Eigen::MatrixXd& vertices_t0,
- const Eigen::MatrixXd& vertices_t1,
- const Eigen::MatrixXi& edges,
- const Eigen::MatrixXi& faces,
+ const VectorMax12d& vertices_t0,
+ const VectorMax12d& vertices_t1,
double& toi,
const double min_distance,
const double tmax,
@@ -51,19 +49,16 @@ bool EdgeEdgeCandidate::ccd(
const long max_iterations,
const double conservative_rescaling) const
{
+ assert(vertices_t0.size() == 12 && vertices_t1.size() == 12);
return edge_edge_ccd(
// Edge 1 at t=0
- vertices_t0.row(edges(edge0_id, 0)),
- vertices_t0.row(edges(edge0_id, 1)),
+ vertices_t0.head<3>(), vertices_t0.segment<3>(3),
// Edge 2 at t=0
- vertices_t0.row(edges(edge1_id, 0)),
- vertices_t0.row(edges(edge1_id, 1)),
+ vertices_t0.segment<3>(6), vertices_t0.tail<3>(),
// Edge 1 at t=1
- vertices_t1.row(edges(edge0_id, 0)),
- vertices_t1.row(edges(edge0_id, 1)),
+ vertices_t1.head<3>(), vertices_t1.segment<3>(3),
// Edge 2 at t=1
- vertices_t1.row(edges(edge1_id, 0)),
- vertices_t1.row(edges(edge1_id, 1)), //
+ vertices_t1.segment<3>(6), vertices_t1.tail<3>(), //
toi, min_distance, tmax, tolerance, max_iterations,
conservative_rescaling);
}
diff --git a/src/ipc/candidates/edge_edge.hpp b/src/ipc/candidates/edge_edge.hpp
index 93e15aa70..e60d09a27 100644
--- a/src/ipc/candidates/edge_edge.hpp
+++ b/src/ipc/candidates/edge_edge.hpp
@@ -10,8 +10,7 @@
namespace ipc {
-class EdgeEdgeCandidate : virtual public CollisionStencil,
- public ContinuousCollisionCandidate {
+class EdgeEdgeCandidate : public ContinuousCollisionCandidate {
public:
EdgeEdgeCandidate(long edge0_id, long edge1_id);
@@ -27,30 +26,6 @@ class EdgeEdgeCandidate : virtual public CollisionStencil,
// ------------------------------------------------------------------------
- /// Perform narrow-phase CCD on the candidate.
- /// @param[in] vertices_t0 Mesh vertices at the start of the time step.
- /// @param[in] vertices_t1 Mesh vertices at the end of the time step.
- /// @param[in] edges Collision mesh edges as rows of indicies into vertices.
- /// @param[in] faces Collision mesh triangular faces as rows of indicies into vertices.
- /// @param[out] toi Computed time of impact (normalized).
- /// @param[in] tmax Maximum time (normalized) to look for collisions. Should be in [0, 1].
- /// @param[in] tolerance CCD tolerance used by Tight-Inclusion CCD.
- /// @param[in] max_iterations Maximum iterations used by Tight-Inclusion CCD.
- /// @param[in] conservative_rescaling Conservative rescaling value used to avoid taking steps exactly to impact.
- /// @return If the candidate had a collision over the time interval.
- bool
- ccd(const Eigen::MatrixXd& vertices_t0,
- const Eigen::MatrixXd& vertices_t1,
- const Eigen::MatrixXi& edges,
- const Eigen::MatrixXi& faces,
- double& toi,
- const double min_distance = 0.0,
- const double tmax = 1.0,
- const double tolerance = DEFAULT_CCD_TOLERANCE,
- const long max_iterations = DEFAULT_CCD_MAX_ITERATIONS,
- const double conservative_rescaling =
- DEFAULT_CCD_CONSERVATIVE_RESCALING) const override;
-
void print_ccd_query(
const Eigen::MatrixXd& vertices_t0,
const Eigen::MatrixXd& vertices_t1,
@@ -80,6 +55,7 @@ class EdgeEdgeCandidate : virtual public CollisionStencil,
using CollisionStencil::compute_distance;
using CollisionStencil::compute_distance_gradient;
using CollisionStencil::compute_distance_hessian;
+ using ContinuousCollisionCandidate::ccd;
protected:
double compute_distance(const VectorMax12d& positions) const override;
@@ -90,6 +66,17 @@ class EdgeEdgeCandidate : virtual public CollisionStencil,
MatrixMax12d
compute_distance_hessian(const VectorMax12d& positions) const override;
+ bool
+ ccd(const VectorMax12d& vertices_t0,
+ const VectorMax12d& vertices_t1,
+ double& toi,
+ const double min_distance = 0.0,
+ const double tmax = 1.0,
+ const double tolerance = DEFAULT_CCD_TOLERANCE,
+ const long max_iterations = DEFAULT_CCD_MAX_ITERATIONS,
+ const double conservative_rescaling =
+ DEFAULT_CCD_CONSERVATIVE_RESCALING) const override;
+
virtual EdgeEdgeDistanceType known_dtype() const
{
return EdgeEdgeDistanceType::AUTO;
diff --git a/src/ipc/candidates/edge_vertex.cpp b/src/ipc/candidates/edge_vertex.cpp
index 12a20b1d7..97fcfb29b 100644
--- a/src/ipc/candidates/edge_vertex.cpp
+++ b/src/ipc/candidates/edge_vertex.cpp
@@ -44,10 +44,8 @@ MatrixMax12d EdgeVertexCandidate::compute_distance_hessian(
}
bool EdgeVertexCandidate::ccd(
- const Eigen::MatrixXd& vertices_t0,
- const Eigen::MatrixXd& vertices_t1,
- const Eigen::MatrixXi& edges,
- const Eigen::MatrixXi& faces,
+ const VectorMax12d& vertices_t0,
+ const VectorMax12d& vertices_t1,
double& toi,
const double min_distance,
const double tmax,
@@ -55,15 +53,18 @@ bool EdgeVertexCandidate::ccd(
const long max_iterations,
const double conservative_rescaling) const
{
+ assert(vertices_t0.size() == 6 || vertices_t0.size() == 9);
+ assert(vertices_t0.size() == vertices_t1.size());
+ const int dim = vertices_t0.size() / 3;
return point_edge_ccd(
// Point at t=0
- vertices_t0.row(vertex_id),
+ vertices_t0.head(dim),
// Edge at t=0
- vertices_t0.row(edges(edge_id, 0)), vertices_t0.row(edges(edge_id, 1)),
+ vertices_t0.segment(dim, dim), vertices_t0.tail(dim),
// Point at t=1
- vertices_t1.row(vertex_id),
+ vertices_t1.head(dim),
// Edge at t=1
- vertices_t1.row(edges(edge_id, 0)), vertices_t1.row(edges(edge_id, 1)),
+ vertices_t1.segment(dim, dim), vertices_t1.tail(dim), //
toi, min_distance, tmax, tolerance, max_iterations,
conservative_rescaling);
}
diff --git a/src/ipc/candidates/edge_vertex.hpp b/src/ipc/candidates/edge_vertex.hpp
index f8c3f00de..22f08a046 100644
--- a/src/ipc/candidates/edge_vertex.hpp
+++ b/src/ipc/candidates/edge_vertex.hpp
@@ -10,8 +10,7 @@
namespace ipc {
-class EdgeVertexCandidate : virtual public CollisionStencil,
- public ContinuousCollisionCandidate {
+class EdgeVertexCandidate : public ContinuousCollisionCandidate {
public:
EdgeVertexCandidate(long edge_id, long vertex_id);
@@ -26,30 +25,6 @@ class EdgeVertexCandidate : virtual public CollisionStencil,
// ------------------------------------------------------------------------
- /// Perform narrow-phase CCD on the candidate.
- /// @param[in] vertices_t0 Mesh vertices at the start of the time step.
- /// @param[in] vertices_t1 Mesh vertices at the end of the time step.
- /// @param[in] edges Collision mesh edges as rows of indicies into vertices.
- /// @param[in] faces Collision mesh triangular faces as rows of indicies into vertices.
- /// @param[out] toi Computed time of impact (normalized).
- /// @param[in] tmax Maximum time (normalized) to look for collisions. Should be in [0, 1].
- /// @param[in] tolerance CCD tolerance used by Tight-Inclusion CCD.
- /// @param[in] max_iterations Maximum iterations used by Tight-Inclusion CCD.
- /// @param[in] conservative_rescaling Conservative rescaling value used to avoid taking steps exactly to impact.
- /// @return If the candidate had a collision over the time interval.
- bool
- ccd(const Eigen::MatrixXd& vertices_t0,
- const Eigen::MatrixXd& vertices_t1,
- const Eigen::MatrixXi& edges,
- const Eigen::MatrixXi& faces,
- double& toi,
- const double min_distance = 0.0,
- const double tmax = 1.0,
- const double tolerance = DEFAULT_CCD_TOLERANCE,
- const long max_iterations = DEFAULT_CCD_MAX_ITERATIONS,
- const double conservative_rescaling =
- DEFAULT_CCD_CONSERVATIVE_RESCALING) const override;
-
void print_ccd_query(
const Eigen::MatrixXd& vertices_t0,
const Eigen::MatrixXd& vertices_t1,
@@ -75,6 +50,7 @@ class EdgeVertexCandidate : virtual public CollisionStencil,
using CollisionStencil::compute_distance;
using CollisionStencil::compute_distance_gradient;
using CollisionStencil::compute_distance_hessian;
+ using ContinuousCollisionCandidate::ccd;
protected:
double compute_distance(const VectorMax12d& positions) const override;
@@ -85,6 +61,17 @@ class EdgeVertexCandidate : virtual public CollisionStencil,
MatrixMax12d
compute_distance_hessian(const VectorMax12d& positions) const override;
+ bool
+ ccd(const VectorMax12d& vertices_t0,
+ const VectorMax12d& vertices_t1,
+ double& toi,
+ const double min_distance = 0.0,
+ const double tmax = 1.0,
+ const double tolerance = DEFAULT_CCD_TOLERANCE,
+ const long max_iterations = DEFAULT_CCD_MAX_ITERATIONS,
+ const double conservative_rescaling =
+ DEFAULT_CCD_CONSERVATIVE_RESCALING) const override;
+
virtual PointEdgeDistanceType known_dtype() const
{
return PointEdgeDistanceType::AUTO;
diff --git a/src/ipc/candidates/face_vertex.cpp b/src/ipc/candidates/face_vertex.cpp
index 34e1cda04..4abff5a3b 100644
--- a/src/ipc/candidates/face_vertex.cpp
+++ b/src/ipc/candidates/face_vertex.cpp
@@ -41,10 +41,8 @@ MatrixMax12d FaceVertexCandidate::compute_distance_hessian(
}
bool FaceVertexCandidate::ccd(
- const Eigen::MatrixXd& vertices_t0,
- const Eigen::MatrixXd& vertices_t1,
- const Eigen::MatrixXi& edges,
- const Eigen::MatrixXi& faces,
+ const VectorMax12d& vertices_t0,
+ const VectorMax12d& vertices_t1,
double& toi,
const double min_distance,
const double tmax,
@@ -52,17 +50,18 @@ bool FaceVertexCandidate::ccd(
const long max_iterations,
const double conservative_rescaling) const
{
+ assert(vertices_t0.size() == 12 && vertices_t1.size() == 12);
return point_triangle_ccd(
// Point at t=0
- vertices_t0.row(vertex_id),
+ vertices_t0.head<3>(),
// Triangle at t=0
- vertices_t0.row(faces(face_id, 0)), vertices_t0.row(faces(face_id, 1)),
- vertices_t0.row(faces(face_id, 2)),
+ vertices_t0.segment<3>(3), vertices_t0.segment<3>(6),
+ vertices_t0.tail<3>(),
// Point at t=1
- vertices_t1.row(vertex_id),
+ vertices_t1.head<3>(),
// Triangle at t=1
- vertices_t1.row(faces(face_id, 0)), vertices_t1.row(faces(face_id, 1)),
- vertices_t1.row(faces(face_id, 2)), //
+ vertices_t1.segment<3>(3), vertices_t1.segment<3>(6),
+ vertices_t1.tail<3>(), //
toi, min_distance, tmax, tolerance, max_iterations,
conservative_rescaling);
}
diff --git a/src/ipc/candidates/face_vertex.hpp b/src/ipc/candidates/face_vertex.hpp
index 0d2597dee..5c92a29a1 100644
--- a/src/ipc/candidates/face_vertex.hpp
+++ b/src/ipc/candidates/face_vertex.hpp
@@ -10,8 +10,7 @@
namespace ipc {
-class FaceVertexCandidate : virtual public CollisionStencil,
- public ContinuousCollisionCandidate {
+class FaceVertexCandidate : public ContinuousCollisionCandidate {
public:
FaceVertexCandidate(long face_id, long vertex_id);
@@ -27,19 +26,6 @@ class FaceVertexCandidate : virtual public CollisionStencil,
// ------------------------------------------------------------------------
- bool
- ccd(const Eigen::MatrixXd& vertices_t0,
- const Eigen::MatrixXd& vertices_t1,
- const Eigen::MatrixXi& edges,
- const Eigen::MatrixXi& faces,
- double& toi,
- const double min_distance = 0.0,
- const double tmax = 1.0,
- const double tolerance = DEFAULT_CCD_TOLERANCE,
- const long max_iterations = DEFAULT_CCD_MAX_ITERATIONS,
- const double conservative_rescaling =
- DEFAULT_CCD_CONSERVATIVE_RESCALING) const override;
-
void print_ccd_query(
const Eigen::MatrixXd& vertices_t0,
const Eigen::MatrixXd& vertices_t1,
@@ -65,6 +51,7 @@ class FaceVertexCandidate : virtual public CollisionStencil,
using CollisionStencil::compute_distance;
using CollisionStencil::compute_distance_gradient;
using CollisionStencil::compute_distance_hessian;
+ using ContinuousCollisionCandidate::ccd;
protected:
double compute_distance(const VectorMax12d& positions) const override;
@@ -75,6 +62,17 @@ class FaceVertexCandidate : virtual public CollisionStencil,
MatrixMax12d
compute_distance_hessian(const VectorMax12d& positions) const override;
+ bool
+ ccd(const VectorMax12d& vertices_t0,
+ const VectorMax12d& vertices_t1,
+ double& toi,
+ const double min_distance = 0.0,
+ const double tmax = 1.0,
+ const double tolerance = DEFAULT_CCD_TOLERANCE,
+ const long max_iterations = DEFAULT_CCD_MAX_ITERATIONS,
+ const double conservative_rescaling =
+ DEFAULT_CCD_CONSERVATIVE_RESCALING) const override;
+
virtual PointTriangleDistanceType known_dtype() const
{
return PointTriangleDistanceType::AUTO;
| Average body mass in IPC-toolkit vs average lumped nodal mass in IPC
I have a question about the arguments in adaptive barrier stiffness function called `ipc::initial_barrier_stiffness`, it has one argument called `const double average_mass` which indicates as average mass of all bodies while in `IPC` function called `suggestKappa(kappa)` and according to Supplement of IPC paper, it should be average of lumped nodal mass. Does it the same as `IPC`?
And what types of CCD `IPC-toolkit` does use for `ipc::compute_collision_free_stepsize`, it seems that it is `TIGHT_INCLUSION` while it seems that `IPC` uses `FLOATING_POINT_ROOT_FINDER` as default. Can I use this with `IPC-toolkit`?
| The `average_mass` indeed refers to the "average lumped nodal mass" as described in the supplemental document of our original IPC paper.
By default, the IPC Toolkit uses the [Tight-Inclusion](https://github.com/Continuous-Collision-Detection/Tight-Inclusion) CCD algorithm. To use the same CCD as in the original IPC codebase you can set the CMake option `IPC_TOOLKIT_WITH_CORRECT_CCD` to `OFF`.
Thanks for your clarification, I assume that `ipc::compute_collision_free_stepsize` doesn't compute the `CFL-INSPIRED CULLING OF CCD`. I also want to implement this part using `IPC-toolkit`, but I am not sure what is the equivalent functions for `sh.build(result, searchDir, alpha, result.avgEdgeLen / 3.0);` and what is the best way to do it using `IPC-toolkit`? | 2023-07-23T04:25:41 | 0.0 | [] | [] |
||
ipc-sim/ipc-toolkit | ipc-sim__ipc-toolkit-43 | 87af74f04da02dc4a74d1c6089763e02ff5c452b | diff --git a/src/ipc/ccd/ccd.cpp b/src/ipc/ccd/ccd.cpp
index 2edc5c249..eaf811caa 100644
--- a/src/ipc/ccd/ccd.cpp
+++ b/src/ipc/ccd/ccd.cpp
@@ -9,6 +9,7 @@
#ifdef IPC_TOOLKIT_WITH_CORRECT_CCD
#include <tight_inclusion/ccd.hpp>
+#include <tight_inclusion/interval_root_finder.hpp>
#else
#include <CTCD.h>
#endif
@@ -32,25 +33,27 @@ bool ccd_strategy(
double /*min_distance*/,
bool /*no_zero_toi*/,
double& /*toi*/)>& ccd,
- const double max_iterations,
+ const long max_iterations,
const double min_distance,
const double initial_distance,
const double conservative_rescaling,
double& toi)
{
-
- if (initial_distance == 0) {
- logger().warn("Initial distance is 0, returning toi=0!");
+ if (initial_distance <= min_distance) {
+ logger().warn(
+ "Initial distance {} ≤ d_min={}, returning toi=0!",
+ initial_distance, min_distance);
toi = 0;
return true;
}
- const double min_effective_distance =
- min_distance + (1.0 - conservative_rescaling) * initial_distance;
- // #ifdef IPC_TOOLKIT_WITH_CORRECT_CCD
+ double min_effective_distance =
+ (1.0 - conservative_rescaling) * initial_distance;
+#ifdef IPC_TOOLKIT_WITH_CORRECT_CCD
// Tight Inclusion performs better when the minimum separation is small
- // min_distance = std::min(min_distance, 1e-4);
- // #endif
+ min_effective_distance = std::min(min_effective_distance, 1e-4);
+#endif
+ min_effective_distance += min_distance;
assert(min_effective_distance < initial_distance);
| Unexpectedly slow CCD
Hi, it's me again :wave:
I noticed that CCD sometimes takes a very long time relative to other components of my simulator. For example, for a small impact problem, with a block dropping (with some initial velocity) on top of another, CCD can take ~20 seconds or more where factorization takes milliseconds. I can reproduce similar behavior for a very simple example, see [this Gist here](https://gist.github.com/Andlon/34d9d869d5267d28d86fb36f35f72538), where a single tetrahedron is squeezed so that the top vertex gets within `dhat` of the lower face. Here I'm getting timings like this:
```
Potential duration: 6.1011e-05 seconds.
CCD duration: 2.25247 seconds.
```
Here's the most crucial part of the code (see the above gist for the full setup):
```c++
// Run this several times just to demonstrate that it's not due to some initialization or similar
for (int i = 0; i < 5; ++i) {
const auto potential_begin = std::chrono::steady_clock::now();
const double potential = constraints.compute_potential(mesh, deformed_vertices, dhat);
const Eigen::VectorXd grad = constraints.compute_potential_gradient(mesh, deformed_vertices, dhat);
const Eigen::SparseMatrix<double> hessian = constraints.compute_potential_hessian(mesh, deformed_vertices, dhat,project_spd);
const auto potential_end = std::chrono::steady_clock::now();
const auto potential_duration = std::chrono::duration<double>(potential_end - potential_begin).count();
const auto ccd_begin = std::chrono::steady_clock::now();
const double alpha = ipc::compute_collision_free_stepsize(mesh, rest_vertices, deformed_vertices);
const auto ccd_end = std::chrono::steady_clock::now();
const auto ccd_duration = std::chrono::duration<double>(ccd_end - ccd_begin).count();
std::cout << "Potential duration: " << potential_duration << " seconds." << std::endl;
std::cout << "CCD duration: " << ccd_duration << " seconds." << std::endl;
}
```
Build type is `Release`. I don't have all that much experience with CCD, but this seems slower than what I was expecting. Is there something I should configure that I haven't? I've basically just followed the docs/tutorial here. Any hints as to what I can do here, if anything, would be much appreciated :-)
| (I had to choose, so I labeled this as a bug, although I'm not sure it is one)
(Also, for the record, I'm aware that the Hessian computed here would have to be passed through `to_full_dof` in order to be consistent with the call to `build_from_full_mesh`.)
It seems like you have setup up everything correctly, but I did notice one thing. Here
```
const double alpha = ipc::compute_collision_free_stepsize(mesh, rest_vertices, deformed_vertices);
```
you are doing CCD starting at the rest positions and going to the deformed positions. This might be what you intended, but normally we perform CCD between successive deformed positions in the Newton optimization for example.
What usually affects CCD performance is having tiny initial distances and/or large trajectories. The former causes our algorithm to refine further to avoid producing a conservative estimate of 0 for the TOI. The latter can also cause larger running times as it has to refine further to match our length-based convergence criteria.
When I have some free time, I will try to set up a unit test from your gist and see if there are any obvious bottlenecks that can be improved.
Looking at your gist it doesn't seem to be either of those cases, so it should have been able to get you a quick answer. I will definitely look more into it later.
> you are doing CCD starting at the rest positions and going to the deformed positions. This might be what you intended, but normally we perform CCD between successive deformed positions in the Newton optimization for example.
Yeah, that was just out of convenience for this example. In my simulator I'm indeed incorporating CCD into the line search. | 2023-04-12T02:26:40 | 0.0 | [] | [] |
||
honzajavorek/fiobank | honzajavorek__fiobank-40 | eb8bf130071acc24ae3e862ab82e14bb535b4cd8 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 97ed11e..886ea98 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,7 +2,7 @@
## v4.0.0 (master)
### Breaking
-* Dropped support for Python 3.6 and 3.7
+* Dropped support for Python 3.6, 3.7 and 3.8
### Fix
* Added deprecation warning for decimal=False
@@ -11,6 +11,7 @@
* Re-introduced CI tests
* Re-introduced Dependabot
* Updated .gitignore
+* Added support for Python 3.13
## v3.1.0 (2024-06-13)
### Features
| Tidy up the repo
- [x] https://github.com/honzajavorek/fiobank/pull/26
- [x] https://github.com/honzajavorek/fiobank/compare/master...honzajavorek/facelift
- [ ] README https://github.com/honzajavorek/fiobank/compare/master...honzajavorek/facelift#diff-7b3ed02bc73dc06b7db906cf97aa91dec2b2eb21f2d92bc5caa761df5bbc168f
- [ ] pyproject.toml https://github.com/honzajavorek/fiobank/compare/master...honzajavorek/facelift#diff-50c86b7ed8ac2cf95bd48334961bf0530cdc77b5a56f852c5c61b89d735fd711
- [ ] use ruff
Releasing and CI inspiration:
- https://github.com/juniorguru/teemup/tree/main/.github/workflows
- https://pypi.org/manage/projects/
Drop also 3.8, because:
- https://devguide.python.org/versions/#python-release-cycle
| 2024-10-25T08:13:55 | 0.0 | [] | [] |
|||
disintar/toncli | disintar__toncli-43 | b851fdf9f9325e050d6901d25352dbef27faff38 | diff --git a/INSTALLATION.md b/INSTALLATION.md
index b9e2b14..c25e3b7 100644
--- a/INSTALLATION.md
+++ b/INSTALLATION.md
@@ -5,7 +5,7 @@
### Linux / macOS (intel)
-1. Download needed prebuild for your system from [here](https://github.com/ton-blockchain/ton/actions?query=branch%3Amaster+is%3Acompleted)
+1. Download needed prebuild for Linux : [here](https://github.com/SpyCheese/ton/actions/runs/2585669126), for Mac : [here](https://github.com/SpyCheese/ton/actions/runs/2618664609)
1. You need to be logged-in to GitHub to download pre-builds
2. Install `Python3.9` or higher
3. Run `pip install toncli` or `pip3 install toncli`
@@ -24,7 +24,7 @@
4. Install `toncli` by running `pip install toncli`
-5. Download the compiled TON binaries from [here](https://github.com/ton-blockchain/ton/actions/runs/1713804021) (you need to be logged in to GitHub)
+5. Download the compiled TON binaries from [here](https://github.com/SpyCheese/ton/actions/runs/2618774052) (you need to be logged in to GitHub)

diff --git a/README.md b/README.md
index 30717d4..19a484e 100644
--- a/README.md
+++ b/README.md
@@ -15,57 +15,61 @@
[](https://www.codacy.com/gh/disintar/toncli/dashboard?utm_source=github.com&utm_medium=referral&utm_content=disintar/toncli&utm_campaign=Badge_Grade)
[](https://ton.org)
-The Open Network cross-platform smart contract command line interface.
+The Open Network cross-platform smart contract command line interface.
Easy to deploy and interact with TON smart contracts.
-## Installation
+## 🔧 Installation
-Toncli support Windows / macOS (m1 / intel) / Linux, installation guide can be found: [INSTALLATION.md](/INSTALLATION.md)
+> ⚠ If you want to support new tests engine - you need to use special binaries from SpyCheese repo. More information could be found in docs.
+
+Toncli support Windows / macOS (m1 / intel) / Linux, installation guide can be
+found: [INSTALLATION.md](/INSTALLATION.md)
+
+### 📚 Usage and docs
-### Usage and docs
New to `toncli`?
Try: [Quick start guide](/docs/quick_start_guide.md)
All other documentation lists in `docs/`
-## Configuration
+### Lessons
-On first start `~/.config/toncli/` (on linux, other systems will have diffrent directory) will be created. If you want to change fift/func libs, network config or other stuff - check it out.
+[🇬🇧 Ton FunC lessons Eng](https://github.com/romanovichim/TonFunClessons_Eng)
-## Contributor Guide
+[🇷🇺 Ton FunC lessons Rus](https://github.com/romanovichim/TonFunClessons_ru/)
-Interested in contributing? Feel free to create issues and pull requests.
-There is two main tasks and many TODOs.
+## Contributor Guide
-Main tasks are - not to use lite-client / fift / func. All can be done with python.
+Interested in contributing? Feel free to create issues and pull requests.
-There are many TODOs in code - feel free to fix them and create PRs
+We are trying to process all tasks through issues. You can take own one 🥳
## Features and status
-| Feature | Status |
-|------------------------------------------------------------------------------------------------------------|--------|
-| `fift` / `func` / `lite-server` usage | ✅ |
-| Easy bootstrap project samples `wallet` | ✅ |
-| Deploy-wallet for auto send TON to contracts and tests | ✅ |
-| Compile func to `build/` from `func/` with `files.yaml` | ✅ |
-| Auto send TON to init contract address | ✅ |
-| Deploy to mainnet / testnet / ownnet | ✅ |
+| Feature | Status |
+|-----------------------------------------------------------------------------------------------------------|--------|
+| `fift` / `func` / `lite-server` usage | ✅ |
+| Easy bootstrap project samples `wallet` | ✅ |
+| Deploy-wallet for auto send TON to contracts and tests | ✅ |
+| Compile func to `build/` from `func/` with `files.yaml` | ✅ |
+| Auto send TON to init contract address | ✅ |
+| Deploy to mainnet / testnet / ownnet | ✅ |
| Project interact after deploy: easily send messages, run getmethods, run fift parsers on getmethods output | ✅ |
-| Load from hard project structure (example: `src/projects/wallet`) | ✅ |
-| Run remote contracts locally (get cells from chain and run locally to get error / debug / etc.) | ✅ |
-| Get contract address by `toncli addrs` | ✅ |
-| Docs for contract creation for beginners | ✅ |
-| Project tests with `runvmcode` | ✅ |
-| Windows & Linux support | ✅ |
-| Gas auto calculation for store & deploy | ❌ |
-| Add more project samples with advanced usage | ❌ |
-| Project debug | ❌ |
-| Library support | ❌ |
-| Init Message support (with signature) | ❌ |
+| Load from hard project structure (example: `src/projects/wallet`) | ✅ |
+| Run remote contracts locally (get cells from chain and run locally to get error / debug / etc.) | ✅ |
+| Get contract address by `toncli addrs` | ✅ |
+| Docs for contract creation for beginners | ✅ |
+| Project tests with `runvmcode` | ✅ |
+| Windows & Linux support | ✅ |
+| Unit tests engine | ✅ |
+| Gas auto calculation for store & deploy | ❌ |
+| Add more project samples with advanced usage | ❌ |
+| Project debug | ❌ |
+| Library support | ❌ |
+| Init Message support (with signature) | ❌ |
## Commands
@@ -103,6 +107,9 @@ git clone [email protected]:disintar/toncli.git
cd toncli && pip install -e .
```
+If you wish to update toncli locally you can use ```pip install -e path/to/toncli/rootfolder/```
+command anytime after you make changes to the toncli source code.
+
## Version migration
#### 0.24
@@ -111,9 +118,10 @@ Please, use `toncli update_libs`
#### 0.22
-The easy migration method - remove `fift-libs`, `func-libs` from `~/.config/toncli` or delete whole directory `~/.config/toncli`
-
+The easy migration method - remove `fift-libs`, `func-libs` from `~/.config/toncli` or delete whole
+directory `~/.config/toncli`
## Help and questions
-Feel free to ask questions and help in official telegram chats: [Russian](https://t.me/tondev) / [English](https://t.me/tondev_eng)
+Feel free to ask questions and help in official telegram chats: [Russian](https://t.me/tondev)
+/ [English](https://t.me/tondev_eng)
diff --git a/docs/quick_start_guide.md b/docs/quick_start_guide.md
index 418beda..df76cfb 100644
--- a/docs/quick_start_guide.md
+++ b/docs/quick_start_guide.md
@@ -1,5 +1,4 @@
# toncli Quick start guide
-Provided by [disintar.io](https://disintar.io) team
This quide contains simple steps how-to deploy example smart contract to TON.
@@ -7,6 +6,8 @@ This quide contains simple steps how-to deploy example smart contract to TON.
Please, follow [INSTALLATION.md](/INSTALLATION.md)
+(for test engine special version of builds needed)
+
## 2. Create simple project
### 1. start project
@@ -32,6 +33,7 @@ To call a GET method of your contract you can use this command:
`toncli get hello_world` in the directory of your contract, where hello world is the name of a GET method
## Other types of projects
+
1. External data
1. To use this function you need to run `toncli start external_data`
2. It loads data of another smart-contract
@@ -41,6 +43,11 @@ To call a GET method of your contract you can use this command:
2. Using this function you can load code and data of another contract
3. For detailed usage info you can read [Example](/src/toncli/projects/external_code/README.md)
+## Configuration
+
+On first start `~/.config/toncli/` (on linux, other systems will have diffrent directory) will be created. If you want
+to change fift/func libs, network config or other stuff - check it out.
+
# Other docs
1. [All commands of cli](/docs/advanced/commands.md)
@@ -51,4 +58,4 @@ To call a GET method of your contract you can use this command:
6. [Project structure](/docs/advanced/project_structure.md)
7. [Interesting features](/docs/advanced/intresting_features.md)
8. [Send internal fift messages](/docs/advanced/send_fift_internal.md)
-8. [How func tests works?](/docs/advanced/func_tests.md)
+8. [How func tests works?](/docs/advanced/func_tests_new.md)
diff --git a/src/toncli/lib/fift-libs/Asm.fif b/src/toncli/lib/fift-libs/Asm.fif
index 93bff03..053b190 100644
--- a/src/toncli/lib/fift-libs/Asm.fif
+++ b/src/toncli/lib/fift-libs/Asm.fif
@@ -1171,6 +1171,12 @@ x{FE3} @Defop(s) PRINT
{ 1 DEBUGSTRI } : PRINTSTR
x{FEF000} @Defop LOGFLUSH
+// local toncli primitives
+x{FEEF10} @Defop GASLIMITSTEMP
+x{FEEF11} @Defop PRIVTOPUB
+x{FEEF12} @Defop SIGN
+x{FEEF13} @Defop RESETLOADEDCELLS
+
//
// codepage primitives
x{FF00} @Defop SETCP0
diff --git a/src/toncli/lib/fift-libs/AsmTests.fif b/src/toncli/lib/fift-libs/AsmTests.fif
index 0e124f6..1ade853 100644
--- a/src/toncli/lib/fift-libs/AsmTests.fif
+++ b/src/toncli/lib/fift-libs/AsmTests.fif
@@ -1,15 +1,17 @@
library TVM_Asm
// simple TVM Assembler
variable @atend
+variable @was-split
+false @was-split !
{ "not in asm context" abort } @atend !
{ `normal eq? not abort"must be terminated by }>" } : @normal?
{ @atend @ 1 { @atend ! @normal? } does @atend ! } : @pushatend
{ @pushatend <b } : <{
{ @atend @ execute } : @endblk
-{ `normal @endblk } : }>
+{ false @was-split ! `normal @endblk } : }>
{ }> b> } : }>c
{ }>c <s } : }>s
-{ @atend @ 2 { @atend ! rot b> ref, swap @endblk } does @atend ! <b } : @|
+{ @atend @ 2 { true @was-split ! @atend ! rot b> ref, swap @endblk } does @atend ! <b } : @|
{ @atend @ 3 { @atend ! 2swap rot execute } does @atend ! <b } : @doafter<{
{ over brembits <= } : @havebits
{ rot + -rot + swap } : pair+
@@ -1171,6 +1173,12 @@ x{FE3} @Defop(s) PRINT
{ 1 DEBUGSTRI } : PRINTSTR
x{FEF000} @Defop LOGFLUSH
+// local toncli primitives
+x{FEEF10} @Defop GASLIMITSTEMP
+x{FEEF11} @Defop PRIVTOPUB
+x{FEEF12} @Defop SIGN
+x{FEEF13} @Defop RESETLOADEDCELLS
+
//
// codepage primitives
x{FF00} @Defop SETCP0
@@ -1220,6 +1228,10 @@ variable asm-mode 1 asm-mode !
} : PROGRAM{
{ over sbits < { s>c <b swap ref, b> <s } if } : @adj-long-proc
{ // i s l
+ dup 0< {
+ negate
+ @was-split @ { drop 0 } if
+ } if
@adj-long-proc over @procdict @ @procdictkeylen
idict!+ not abort"cannot define procedure, redefined?"
@procdict ! 2 2 @procinfo~!
@@ -1229,6 +1241,7 @@ variable asm-mode 1 asm-mode !
{ @have-procinfo? { 8 8 @procinfo~! } { drop } cond } : @proc-called
{ 1000 @def-proc } : PROC
{ 0 @def-proc } : PROCREF
+{ -1000 @def-proc } : PROCINLINE
{ @procdict @ @procdictkeylen idict@ abort"procedure already defined"
} : @fail-ifdef
{ u@?+ { swap abort"first bits are not zeroes" } if } : @cut-zeroes
@@ -1238,6 +1251,7 @@ variable asm-mode 1 asm-mode !
} : @PROC:<{
{ 1000 @PROC:<{ } : PROC:<{
{ 0 @PROC:<{ } : PROCREF:<{
+{ -1000 @PROC:<{ } : PROCINLINE:<{
{ dup @proc-called CALLDICT } dup : CALL : CALLDICT
{ dup @proc-called JMPDICT } dup : JMP : JMPDICT
{ dup @proc-called PREPAREDICT } dup : PREPARE : PREPAREDICT
diff --git a/src/toncli/lib/func-libs/error_codes.func b/src/toncli/lib/func-libs/error_codes.func
new file mode 100644
index 0000000..ac2fd88
--- /dev/null
+++ b/src/toncli/lib/func-libs/error_codes.func
@@ -0,0 +1,16 @@
+int error::exit_code_not_success() asm "100 PUSHINT";
+int error::exit_code_not_unsuccess() asm "101 PUSHINT";
+int error::data_changed() asm "102 PUSHINT";
+int error::data_not_changed() asm "103 PUSHINT";
+int error::data_incorrect() asm "104 PUSHINT";
+int error::actions_are_not_empty() asm "105 PUSHINT";
+int error::actions_empty() asm "106 PUSHINT";
+int error::actions_unvalid() asm "107 PUSHINT";
+int error::slices_not_equal() asm "108 PUSHINT";
+int error::slices_equal() asm "109 PUSHINT";
+int error::wrong_action_id() asm "110 PUSHINT";
+int error::wrong_send_mode() asm "111 PUSHINT";
+int error::not_internal_message() asm "112 PUSHINT";
+int error::not_external_message() asm "113 PUSHINT";
+
+int error::not_supported_yet() asm "0xFACC PUSHINT";
\ No newline at end of file
diff --git a/src/toncli/lib/func-libs/math.func b/src/toncli/lib/func-libs/math.func
new file mode 100644
index 0000000..58944b7
--- /dev/null
+++ b/src/toncli/lib/func-libs/math.func
@@ -0,0 +1,91 @@
+{-
+ math.func
+
+ Extends FunC's arithmetic operations.
+-}
+
+(int) power(int x, int exponent) inline {
+ if(x == 0)
+ {
+ return 0;
+ }
+
+ if (exponent == 0){
+ return 1;
+ }
+ else
+ {
+ var result = x;
+ var counter = exponent;
+ while (counter > 1) {
+ result *= x;
+ counter -= 1;
+ }
+ return result;
+ }
+}
+
+(int) sqrt(int x) inline {
+ if (x == 0){
+ return 0;
+ } else {
+ if (x <= 3) {
+ return 1;
+ } else {
+ int z = (x + 1) / 2;
+ int y = x;
+ while (z < y) {
+ y = z;
+ z = (x / z + z) / 2;
+ }
+ return y;
+ }
+ }
+}
+
+(int) avg(int x, int y) inline {
+ return (x + y) / 2;
+}
+
+(int) exp(int x) inline {
+ return (x >= 0 ? 1 << x : 1 >> (x * -1));
+}
+
+(int) log2(int x) inline {
+ int n = 0;
+ if (x >= 128.exp()) {
+ x >>= 128;
+ n += 128;
+ }
+ if (x >= 64.exp()) {
+ x >>= 64;
+ n += 64;
+ }
+ if (x >= 32.exp()) {
+ x >>= 32;
+ n += 32;
+ }
+ if (x >= 16.exp()) {
+ x >>= 16;
+ n += 16;
+ }
+ if (x >= 8.exp()) {
+ x >>= 8;
+ n += 8;
+ }
+ if (x >= 4.exp()) {
+ x >>= 4;
+ n += 4;
+ }
+ if (x >= 2.exp()) {
+ x >>= 2;
+ n += 2;
+ }
+ if (x >= 1.exp()) {
+ ;; x >>= 1;
+ n += 1;
+ }
+ return n;
+}
+
+(int) mod (int x, int y) asm "MOD";
\ No newline at end of file
diff --git a/src/toncli/lib/func-libs/stdlib.func b/src/toncli/lib/func-libs/stdlib.func
index 0431d32..e56b984 100644
--- a/src/toncli/lib/func-libs/stdlib.func
+++ b/src/toncli/lib/func-libs/stdlib.func
@@ -1,6 +1,6 @@
;; Standard library for funC
;;
-
+forall X -> tuple unsafe_tuple(X x) asm "NOP";
forall X -> tuple cons(X head, tuple tail) asm "CONS";
forall X -> (X, tuple) uncons(tuple list) asm "UNCONS";
forall X -> (tuple, X) list_next(tuple list) asm( -> 1 0) "UNCONS";
@@ -51,6 +51,7 @@ int check_data_signature(slice data, slice signature, int public_key) asm "CHKSI
() dump_stack() impure asm "DUMPSTK";
+cell get_c5() asm "c5 PUSH";
cell get_data() asm "c4 PUSH";
() set_data(cell c) impure asm "c4 POP";
cont get_c3() impure asm "c3 PUSH";
@@ -105,6 +106,7 @@ int slice_depth(slice s) asm "SDEPTH";
int builder_refs(builder b) asm "BREFS";
int builder_bits(builder b) asm "BBITS";
int builder_depth(builder b) asm "BDEPTH";
+builder store_builder(builder to, builder from) asm "STBR";
builder begin_cell() asm "NEWC";
cell end_cell(builder b) asm "ENDC";
@@ -199,6 +201,8 @@ int cell_null?(cell c) asm "ISNULL";
() raw_reserve_extra(int amount, cell extra_amount, int mode) impure asm "RAWRESERVEX";
() send_raw_message(cell msg, int mode) impure asm "SENDRAWMSG";
() set_code(cell new_code) impure asm "SETCODE";
+() set_lib_code(cell library, int x) impure asm "SETLIBCODE";
+() change_lib(int lib_hash, int x) impure asm "CHANGELIB";
int random() impure asm "RANDU256";
int rand(int range) impure asm "RAND";
diff --git a/src/toncli/main.py b/src/toncli/main.py
index f1b2954..fcd3cf5 100644
--- a/src/toncli/main.py
+++ b/src/toncli/main.py
@@ -34,8 +34,8 @@ def main():
local_lib_path, global_lib_path = get_libs_paths()
logger.warning(
TextUtils.VERSION_WARNING,
- local_lib_path,
- global_lib_path
+ global_lib_path,
+ local_lib_path
)
parserUtil = ParserUtil(parser)
diff --git a/src/toncli/modules/utils/check_hash.py b/src/toncli/modules/utils/check_hash.py
index b95c06e..b1bcd02 100644
--- a/src/toncli/modules/utils/check_hash.py
+++ b/src/toncli/modules/utils/check_hash.py
@@ -28,8 +28,10 @@ def check_2_libs_actual():
local_fift_hashes = get_dir_hashes(os.path.abspath(f"{local_path}/fift-libs"))
global_func_hashes = get_dir_hashes(os.path.abspath(f"{global_path}/func-libs"))
local_func_hashes = get_dir_hashes(os.path.abspath(f"{local_path}/func-libs"))
+ global_test_hashes = get_dir_hashes(os.path.abspath(f"{global_path}/test-libs"))
+ local_test_hashes = get_dir_hashes(os.path.abspath(f"{local_path}/test-libs"))
- return global_fift_hashes == local_fift_hashes and global_func_hashes == local_func_hashes
+ return global_fift_hashes == local_fift_hashes and global_func_hashes == local_func_hashes and global_test_hashes == local_test_hashes
def get_libs_paths():
diff --git a/src/toncli/modules/utils/commands/command_classes/local_version_command.py b/src/toncli/modules/utils/commands/command_classes/local_version_command.py
index 6da4567..a53194a 100644
--- a/src/toncli/modules/utils/commands/command_classes/local_version_command.py
+++ b/src/toncli/modules/utils/commands/command_classes/local_version_command.py
@@ -10,6 +10,7 @@
class LocalVersionCommand():
def __init__(self):
update_text = f'\n🦋 New {bl}TONCLI{rs} version is available. Please install it using "{bl}pip install --upgrade toncli{rs}".\n'
+
version_local = pkg_resources.get_distribution("toncli").version
try:
version_global = requests.get('https://pypi.org/pypi/toncli/json').json()['info']['version']
diff --git a/src/toncli/modules/utils/commands/command_classes/update_libs_command.py b/src/toncli/modules/utils/commands/command_classes/update_libs_command.py
index bba9aa5..355e6d3 100644
--- a/src/toncli/modules/utils/commands/command_classes/update_libs_command.py
+++ b/src/toncli/modules/utils/commands/command_classes/update_libs_command.py
@@ -6,7 +6,7 @@
class UpdateLibsCommand():
def __init__(self):
global_lib_path, local_lib_path = get_libs_paths()
- folder_names = ["fift-libs", "func-libs" ]
+ folder_names = ["fift-libs", "func-libs", "test-libs" ]
for folder_name in folder_names:
shutil.copytree(os.path.abspath(f"{global_lib_path}/{folder_name}"), os.path.abspath(f"{local_lib_path}/{folder_name}"),
dirs_exist_ok=True)
diff --git a/src/toncli/modules/utils/commands/commands_executer.py b/src/toncli/modules/utils/commands/commands_executer.py
index c08286a..1551296 100644
--- a/src/toncli/modules/utils/commands/commands_executer.py
+++ b/src/toncli/modules/utils/commands/commands_executer.py
@@ -25,8 +25,7 @@ class CommandsExecuter():
string_kwargs= []
parser: ArgumentParser
- def __init__(self, command, string_kwargs, parser):
-
+ def __init__(self, command, string_kwargs, parser):
_, kwargs = argv_fix(sys.argv, string_kwargs)
if len(kwargs) == 0 and not command:
diff --git a/src/toncli/modules/utils/func/commands.py b/src/toncli/modules/utils/func/commands.py
index ad1f4d1..ba002ea 100644
--- a/src/toncli/modules/utils/func/commands.py
+++ b/src/toncli/modules/utils/func/commands.py
@@ -1,35 +1,30 @@
+"""
+Build func file(s) and save result fift file to location
+"""
import os
-import subprocess
-import sys
+from subprocess import check_output
from typing import Optional, List
-import yaml
from colorama import Fore, Style
from toncli.modules.utils.system.conf import config_folder, executable, getcwd
-from toncli.modules.utils.system.log import logger
-from toncli.modules.utils.system.project import migrate_project_struction
from toncli.modules.utils.system.project_conf import ProjectConf, TonProjectConfig
bl = Fore.CYAN
gr = Fore.GREEN
rs = Style.RESET_ALL
-
def build(project_root: str,
cwd: Optional[str] = None,
func_args: List[str] = None,
- contracts: List[TonProjectConfig] = None,
- use_tests_lib: bool = False) -> Optional[str]:
+ contracts: List[TonProjectConfig] = None) -> Optional[str]:
"""
- Build func file(s) and save result fift file to location
-
- :param contracts: contracts to build
- :param func_args: add arguments to func
- :param project_root: Files to build in needed order
- :param cwd: If you need to change root of running script pass it here
- :param use_tests_lib: Use stdlib-tests.func
- :return:
+ build method params are :
+ :param contracts: contracts to build
+ :param func_args: add arguments to func
+ :param project_root: Files to build in needed order
+ :param cwd: If you need to change root of running script pass it here
+ :return:
"""
if not contracts:
project_config = ProjectConf(project_root)
@@ -41,28 +36,33 @@ def build(project_root: str,
output = []
for contract in contracts:
output.append(
- build_files(contract.func_files_locations, contract.to_save_location, func_args, cwd,
- use_tests_lib=use_tests_lib))
-
- if len(contract.func_tests_files_locations) and use_tests_lib:
- output.append(
- build_files([f"{config_folder}/func-libs/tests-helpers.func", *contract.func_tests_files_locations],
- contract.to_save_tests_location, [], cwd,
- use_tests_lib=True))
+ build_files(contract.func_files_locations, contract.to_save_location, func_args, cwd))
return "\n".join(list(map(str, output)))
-
def build_files(func_files_locations: List[str], to_save_location: str, func_args: List[str] = None,
- cwd: Optional[str] = None, use_tests_lib: bool = False):
+ cwd: Optional[str] = None):
+ """
+ build_files method params are :
+ :func_files_locations: location of the func files
+ :param to_save_location: location to save the files
+ :param func_args: add arguments to func
+ :param cwd: If you need to change root of running script pass it here
+ :return:
+ """
+ func_files = []
+ for root, _, files in os.walk(f"{config_folder}/func-libs/"):
+ for file in files:
+ if file.endswith((".func", ".fc")):
+ func_files.append(os.path.join(root, file))
+
build_command = [os.path.abspath(executable['func']), *func_args, "-o",
os.path.abspath(to_save_location), "-SPA",
- os.path.abspath(
- f"{config_folder}/func-libs/stdlib.func") if not use_tests_lib else os.path.abspath(
- f"{config_folder}/func-libs/stdlib-tests.func"),
+ *[os.path.abspath(i) for i in func_files],
*[os.path.abspath(i) for i in func_files_locations]]
-
- get_output = subprocess.check_output(build_command, cwd=getcwd() if not cwd else os.path.abspath(cwd))
+ get_output = check_output(build_command,
+ cwd=getcwd() if not cwd else os.path.abspath(cwd),
+ shell=False)
if get_output:
- return get_output.decode()
+ return get_output.decode()
\ No newline at end of file
diff --git a/src/toncli/modules/utils/parsers/parser_utils.py b/src/toncli/modules/utils/parsers/parser_utils.py
index 22c82c4..163f0e1 100644
--- a/src/toncli/modules/utils/parsers/parser_utils.py
+++ b/src/toncli/modules/utils/parsers/parser_utils.py
@@ -107,7 +107,7 @@ def set_shorcuts_parser(self):
self.subparser.add_parser('build', help="Same as func build",
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(TextUtils.FIFT_HELP))
-
+
def set_fift_parser(self):
parser_fift = self.subparser.add_parser('fift', help=TextUtils.FIFT_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
@@ -127,7 +127,7 @@ def set_fift_parser(self):
default='',
help='Pass args and kwargs to lite-client command in sendboc mode, '
'e.g.: -la "-v 4" - set verbose level')
-
+
def set_liteclient_parser(self):
parser_lite_client = self.subparser.add_parser('lite-client', help=TextUtils.LITE_CLIENT_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
@@ -142,28 +142,32 @@ def set_liteclient_parser(self):
parser_lite_client.add_argument("--lite-client-post-args", "-lpa", type=str,
default='',
help='Pass args to lite-client command at the end')
-
+
def set_sendboc_parser(self):
parser_sendboc = self.subparser.add_parser('sendboc')
parser_sendboc.add_argument('file', type=argparse.FileType('r'))
parser_sendboc.add_argument("--net", "-n", default='testnet', type=str, choices=['testnet', 'mainnet', 'ownnet'],
help='Network to deploy')
-
+
def set_wallet_parser(self):
self.subparser.add_parser('wallet')
def set_runtests_parser(self):
run_tests = self.subparser.add_parser('run_tests')
run_tests.add_argument("--contracts", "-c", type=str,
- help='Set contract name from project.yaml to run tests on')
+ help='Set contract name from project.yaml to run tests on')
run_tests.add_argument("--verbose", "-v", type=int, default=0,
- help='Set contract name from project.yaml to run tests on')
+ help='Prints more debug information')
run_tests.add_argument("--output-results", "-o", action='store_true',
- help='Set contract name from project.yaml to run tests on')
-
+ help='Stores results as json')
+ run_tests.add_argument("--old", action='store_true', help='In old versions of toncli tests had to have '
+ 'specific method_ids (starting from 0). If you '
+ 'still follow this convention, and want to run '
+ 'tests, provide this flag.')
+
def set_updatelibs_parser(self):
self.subparser.add_parser('update_libs')
-
+
def set_func_parser(self):
parser_func = self.subparser.add_parser('func', help=TextUtils.FUNC_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
diff --git a/src/toncli/modules/utils/system/check_executable.py b/src/toncli/modules/utils/system/check_executable.py
index 6f14173..bb8ea5b 100644
--- a/src/toncli/modules/utils/system/check_executable.py
+++ b/src/toncli/modules/utils/system/check_executable.py
@@ -3,9 +3,13 @@
import shutil
import subprocess
from typing import Optional, List, Dict, Tuple
-
+from colorama import Fore, Style
from toncli.modules.utils.system.log import logger
+bl = Fore.CYAN
+gr = Fore.GREEN
+rs = Style.RESET_ALL
+
def safe_get_version(executable: str) -> Optional[List[str]]:
try:
@@ -54,6 +58,13 @@ def check_executable(executable_config: Dict) -> Tuple[Dict, bool]:
version_output = safe_get_version(config[item])
if version_output is not None and len(version_output) == 2:
+ logger.info(version_output)
+
+ # logger.info(f"""You should also update your binaries for this version. Please download the binaries depending on your system at the links provided. \n
+ # Windows : {bl}https://github.com/SpyCheese/ton/actions/runs/2618774052{rs} \n
+ # Linux : {bl}https://github.com/SpyCheese/ton/actions/runs/2585669126{rs} \n
+ # MacOs : {bl}https://github.com/SpyCheese/ton/actions/runs/2618664609{rs} \n""")
+
is_executable_changes = True
founded_executable = True
else:
diff --git a/src/toncli/modules/utils/text/text_utils.py b/src/toncli/modules/utils/text/text_utils.py
index e25dc5e..ee1ff74 100644
--- a/src/toncli/modules/utils/text/text_utils.py
+++ b/src/toncli/modules/utils/text/text_utils.py
@@ -1,10 +1,10 @@
from colorama import Fore, Style
-
gr = Fore.GREEN
bl = Fore.CYAN
rs = Style.RESET_ALL
+
class TextUtils:
FIFT_HELP = f'''positional arguments:
{bl}command{rs} Which mode to run, can be [interactive, run, sendboc]
@@ -75,5 +75,4 @@ class TextUtils:
Credits: {gr}disintar.io{rs} team
'''
- VERSION_WARNING = """\nIts seems that your local fift and func libs (%s) differs from their actual versions (%s).
- You can update them automatically using "toncli update_libs" or disable this warning by changing "LIBS_WARNING" to "False" param in cofig\n\n"""
\ No newline at end of file
+ VERSION_WARNING = f""" Its seems that your local fift and func libs ({bl}%s{rs}) differs from their actual versions ({bl}%s{rs}). You can update them automatically using "{bl}toncli update_libs{rs}" or disable this warning by changing {gr}"LIBS_WARNING" to "False"{rs} param in cofig\n\n"""
| Fix c5_parsing
* Fixes c5 parsing for action lists with many entities
* Returns actions in FIFO order
| 2022-05-08T17:32:56 | 0.0 | [] | [] |
|||
wgwz/flask-cookie-decode | wgwz__flask-cookie-decode-11 | e650477c7ba7a054f0ea5461aa3bc299f1ab91f3 | diff --git a/HISTORY.rst b/HISTORY.rst
index 75affbf..4680d51 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -3,6 +3,15 @@ History
.. towncrier release notes start
+Flask_Cookie_Decode 0.3.2 (2019-11-29)
+======================================
+
+Bugfixes
+--------
+
+- The `fcd` client now handles compressed cookies. (#10)
+
+
Flask_Cookie_Decode 0.3.1 (2019-11-29)
======================================
diff --git a/flask_cookie_decode/__init__.py b/flask_cookie_decode/__init__.py
index 9e7ce0c..98810d2 100644
--- a/flask_cookie_decode/__init__.py
+++ b/flask_cookie_decode/__init__.py
@@ -13,7 +13,7 @@
__author__ = """Kyle Lawlor"""
__email__ = "[email protected]"
-__version__ = "0.3.1"
+__version__ = "0.3.2"
from .cookie_decode import CookieDecode
diff --git a/flask_cookie_decode/__main__.py b/flask_cookie_decode/__main__.py
index 74ffeac..7ce13ce 100644
--- a/flask_cookie_decode/__main__.py
+++ b/flask_cookie_decode/__main__.py
@@ -12,7 +12,7 @@ def main():
@click.argument("cookie")
def decode(cookie):
s = URLSafeSerializer("foo")
- if "." in cookie:
+ if not cookie.startswith("."):
to_load = cookie.split(".")[0]
else:
to_load = cookie
diff --git a/setup.cfg b/setup.cfg
index 56ad35b..9785784 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 0.3.1
+current_version = 0.3.2
commit = True
tag = False
| new fcd CLI is currently not working for compressed cookies
i thought i had tested this but forgot that i never did!
here's the test for uncompressed cookies (notice how i forgot the compressed case): https://github.com/wgwz/flask-cookie-decode/blob/master/tests/test_flask_cookie_decode.py#L26-L30
| 2019-11-29T21:15:30 | 0.0 | [] | [] |
|||
PyFE/PyFENG | PyFE__PyFENG-113 | bfc915e41c75921190211248ee1625c0f0217074 | diff --git a/LiftedHeston.py b/LiftedHeston.py
new file mode 100644
index 0000000..891d269
--- /dev/null
+++ b/LiftedHeston.py
@@ -0,0 +1,178 @@
+import numpy as np
+import scipy.integrate as spint
+import scipy.special as scsp
+
+
+class LiftedHeston:
+
+ def __init__(
+ self, vov=0.0, v0=0.0, rho=0.0, theta=0.0, lamda=0.0, H=0.0, n=20, rn=2.5
+ ):
+ """
+ Args:
+ vov: volatility of volatility
+ v0: initial variance
+ rho: correlation between price and volatility
+ theta: mean_reversion term
+ lamda: mean_reversion term
+ H: Hurst index, measuring regularity of the sample paths of V
+ n: number of factors
+ rn: rn ↓ 1 and n*log(rn) → ∞ as n → ∞
+ n and rn are fixed according to (26) in the paper
+ """
+ self.vov = vov
+ self.v0=v0
+ self.rho = rho
+ self.theta = theta
+ self.lamda = lamda
+ self.H = H
+ self.n = n
+ self.rn = rn
+
+ def weight(self):
+ """
+ Generate cn from known parameters
+ according to equation (19) in the paper
+ """
+ cn_v = []
+ for i in range(1,self.n+1):
+ alpha = self.H + 1/2
+ cn_i = (np.power(self.rn,1-alpha) -1) * np.power(self.rn,(alpha-1)*(1+self.n/2)) * np.power(self.rn,i*(1-alpha))/(scsp.gamma(alpha)*scsp.gamma(2-alpha))
+ cn_v += [cn_i]
+ return np.array(cn_v)
+
+ def mean_rev_speed(self):
+
+ """
+ Generate xn from known parameters
+ according to equation (19) in the paper
+ """
+
+ xn_v = []
+ for i in range(1,self.n+1):
+ alpha = self.H + 1/2
+ xn_i = ((1-alpha) * (np.power(self.rn,(2-alpha)) - 1) * np.power(self.rn,i-1-self.n/2))/((2-alpha)* (np.power(self.rn,1-alpha)-1))
+ xn_v += [xn_i]
+ return np.array(xn_v)
+
+ def gn(self, t):
+
+ """
+ Generate gn(t) according to equation (10) in the paper
+ """
+
+ mean_revs = self.mean_rev_speed()
+ weight = self.weight()
+ integral_array = []
+ for i in range(0,self.n):
+ integral = (1-np.exp(-mean_revs[i]*t))/mean_revs[i]
+ integral_array += [integral]
+ integral_array = np.array(integral_array)
+
+ gn = self.v0 + self.lamda * self.theta * np.sum(weight * integral_array)
+ return gn
+
+
+ def price(self, strike, spot, texp, cp=1):
+
+ """
+ Calculate the price
+ Firstly solve the phi function using discretization scheme in (A11) with a number of time steps N = 300
+ Then get the characteristic function
+ The call prices are computed via the cosine method (Fang and Oosterlee 2008) for the inversion of the characteristic function
+ """
+
+ price=None
+ return price
+
+class LiftedHestonMc(LiftedHeston):
+
+ def set_mc_params(self, n_path=1000, rn_seed=None):
+ """
+ Set MC parameters
+
+ Args:
+ n_path: number of paths
+ rn_seed: random number seed
+ """
+
+ self.n_path = int(n_path)
+ self.rn_seed = rn_seed
+ self.rng = np.random.default_rng(rn_seed)
+
+
+ def vo_path(self, texp, N):
+ """
+ Simulate one single path of Volatility process and U process of each factor
+ simulated according to modified explicit–implicit scheme (A12) and (A13) in hte paper
+ """
+ dt = texp/N
+ tobs = dt*np.arange(0,N+1)
+ zz = self.rng.standard_normal(size=N)
+
+ u_n = np.zeros((self.n, N+1))
+ v_n = np.zeros(N+1)
+
+ for i in range(0,N):
+ v_n[i] = self.gn(tobs[i]) + np.sum(self.weight()*u_n[:,i])
+ u_n[:,i+1] = (1/(1+dt*self.mean_rev_speed()))* (u_n[:,i] - self.lamda * v_n[i] *dt + self.vov * np.sqrt( np.fmax(v_n[i],0) * dt)*zz[i])
+ # u_n[:,i+1] = (1-dt*self.mean_rev_speed())* u_n[:,i] - self.lamda * v_n[i] *dt + self.vov * np.sqrt( np.fmax(v_n[i],0) * dt)*zz[i]
+ v_n[N] = self.gn(tobs[N]) + np.sum(self.weight()*u_n[:,N])
+
+ return u_n, v_n
+
+ def vo_paths(self, texp, N):
+ """
+ Montecarlo Simulation of paths of Volatility process and U process of each factor
+ simulated according to modified explicit–implicit scheme (A12) and (A13) in hte paper
+ """
+ dt = texp/N
+ tobs = dt*np.arange(0,N+1)
+ zz = self.rng.standard_normal(size=(self.n_path,N))
+
+ u_n = np.zeros((self.n_path, self.n, N+1))
+ v_n = np.zeros((self.n_path, N+1))
+
+ for i in range(0,N):
+ v_n[:,i] = self.gn(tobs[i]) + np.sum(self.weight()*u_n[:,:,i],1)
+ u_n[:,:,i+1] = (1/(1+dt*self.mean_rev_speed()))*(u_n[:,:,i] - self.lamda * v_n[:,i][:,None] *dt + self.vov * np.sqrt( np.fmax(v_n[:,i][:,None],0) * dt)*zz[:,i][:,None])
+ # u_n[:,:,i+1] = (1-dt*self.mean_rev_speed())* u_n[:,:,i] - self.lamda * v_n[:,i][:,None] *dt + self.vov * np.sqrt( np.fmax(v_n[:,i][:,None],0) * dt)*zz[:,i][:,None]
+
+ v_n[:, N] = self.gn(tobs[N]) + np.sum(self.weight()*u_n[:,:,N],1)
+ return (v_n, zz)
+
+ def mc_price(self, strike, spot, texp, N, cp=1):
+
+ """
+ Calculate option prices
+ Parameters
+ ----------
+ strike: strike prices
+ spot: spot prices
+ texp: time to expiration
+ N: time steps
+ ----------
+ """
+
+ dt = texp/N
+ zz = self.vo_paths(texp, N)[1]
+ vo = self.vo_paths(texp, N)[0]
+ zz2 = np.random.default_rng(self.rn_seed-1).standard_normal(size=(self.n_path,N))
+ ww = self.rho * zz + np.sqrt(1 - np.power(self.rho,2))* zz2
+ st = np.ones(self.n_path)*spot
+
+ for i in range(0,N):
+ st = st + st * np.sqrt(np.fmax(vo[:,i],0)*dt) * ww[:,i]
+ #print(st)
+
+ n_strike=strike.size
+ price=np.zeros(n_strike)
+ for i in range(n_strike):
+ price[i] = np.mean(np.fmax(st-strike[i],0))
+ return price
+
+
+
+
+
+
diff --git a/Report-Lifted Heston_v3.ipynb b/Report-Lifted Heston_v3.ipynb
new file mode 100644
index 0000000..0a91dea
--- /dev/null
+++ b/Report-Lifted Heston_v3.ipynb
@@ -0,0 +1,1643 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "<img align=\"left\" src=\"https://statics.phbs.pku.edu.cn/uploadfile/2018/0119/20180119080526567.png\" style=\"margin-top:50px\">\n",
+ "<h1 align=\"right\" style=\"margin-top:60px\">Applied Stochastic Processes (FIN 514) </h1>\n",
+ "<h1 align=\"right\" style=\"margin-top:20px\">Module 3, 2021-2022 </h1>"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "<h1 align=\"center\" style=\"margin-top:40px\">Lifted Heston Model</h1>\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "<center>\n",
+ "<font color=black size=4 face=times> Team: Ji Yucheng & Wang Mengjie<br>\n",
+ " Instructor: Jaehyuk Choi<br>\n",
+ "\n",
+ "<center>"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "##### Main References:\n",
+ "\n",
+ "[1]Abi Jaber, E.,Lifting the Heston model. Quantitative Finance, 2019, 19(12), 1995-2013. \n",
+ "\n",
+ "[2]Abi Jaber, E. and El Euch, O., Multifactor approximation of rough volatility models. SIAM J. Financ. Math., 2019, 10(2), 309–349.\n",
+ "\n",
+ "[3]Cont, R. and Tankov, P., Financial Modelling with Jump Processes, Vol. 2, 2003 (CRC Press, Taylor & Francis Group: Boca Raton, London, New York, Washington).\n",
+ "\n",
+ "[4]Fang, F. and Oosterlee, C., A novel pricing method for European options based on Fourier-cosine series expansions. SIAM J. Sci. Comput., 2008, 31(2), 826–848.\n",
+ "\n",
+ "[5]Gatheral, J., The Volatility Surface: A Practitioner’s Guide, Vol. 7, 2011 (John Wiley & Sons: Hoboken, NJ).\n",
+ "\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ " "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Brief Introduction\n",
+ "### 1.Model Set up\n",
+ " \n",
+ "  This paper defines the lifted Heston model as a conventional stochastic volatility model, with n (n ∈ N)\n",
+ "factors for the variance process: \n",
+ "\n",
+ "$$dS_t^n = S_t^n \\sqrt[]{V_t^n} dB_t, S^n_0 > 0, $$\n",
+ "$$V_t^n = g^n_0(t) + \\sum^n_{i=1} c^n_i U^{n,i}_t ,$$\n",
+ "$$dU_t^{n,i} = ( −x^n_i U^{n,i}_ t − λV^n_t ) dt + ν\\sqrt[]{V_t^n} dW_t, $$\n",
+ "$$U_0^{n,i} = 0, i = 1, ... , n, $$\n",
+ "\n",
+ "\n",
+ "  where λ, ν ∈ R+, \n",
+ "\n",
+ "  $c^n_i , x^n_i ≥ 0$, for i =1, ... , n, \n",
+ "\n",
+ "  $B = ρW + \\sqrt[]{1 − ρ^2}W^⊥$, with $(W, W^⊥) $ a two dimensional Brownian motion with ρ ∈ [−1, 1].\n",
+ "\n",
+ "  $g^n_0$ is a continuous function on t\n",
+ "\n",
+ "#####   [Conditions for $g^n_0$]\n",
+ "  if $g^n_0$ is non-decreasing such that $g^n_0(0) ≥ 0$, or\n",
+ "\n",
+ "  $g^n_0 : t → V_0 + \\sum_{i=1}^{n}c^n_i \\int_0^t e^{−x^n_i (t−s)}θ (s) ds$, with $V_0$, θ ≥ 0,\n",
+ "\n",
+ "  then the strong solution of $(S^n, V^n,(U^{n,i})_{1≤i≤n})$ exists, such that $V_t^n$ ≥ 0, for all t ≥ 0,(proof in Appendix1)\n",
+ "\n",
+ "  Specifically, let\n",
+ "$g^n_0 : t → V_0 + \\lambdaθ \\sum_{i=1}^{n}c^n_i \\int_0^t e^{−x^n_i (t−s)}ds$, with $V_0$, θ ≥ 0\n",
+ "\n",
+ "\n",
+ "\n",
+ "#####   Setting n=1, $c_1^1$ = 1 and $x_1^1$ = 0, it's the standard Heston model\n",
+ "\n",
+ "### 2.Approximation of Rough Heston to reduce parameters\n",
+ "  The weights and mean reversions:\n",
+ "\n",
+ "$$c^n_i , x^n_i ≥ 0, $$ for i =1, ... , n \n",
+ "\n",
+ "  They can be calculated by H and r_n, according to the approximation result (Abi Jaber and El Euch 2019, Theorem 3.5), that is, $c^n_i , x^n_i $ is chosen in such a way that sending the number of factors n → ∞ would yield the convergence of the lifted Heston model towards a rough Heston model with parameters (V0, θ, λ, ν, ρ, H).\n",
+ "\n",
+ "  that is, $$\\Theta_0:= (V_0, θ, λ, ν, ρ, H)$$\n",
+ "$$ volitility smile: σ_n(K, T; r_n, \\Theta_0) → σ_∞(K, T; \\Theta_0),$$ for each pair of (K,T)\n",
+ "\n",
+ "$$c^n_i = \\cfrac{(r^{1−α}_n − 1)r_n^{(α−1)(1+n/2) }}{\\Gamma(α)\\Gamma(2 − α)} r^{(1−α)i}_ n $$\n",
+ "\n",
+ "$$x^n_i = \\cfrac{1 − α}{ 2 − α} \\cfrac{r^{2−α}_n − 1}{r^{1−α}_n−1} r^{i−1−n/2}_n , i = 1, ... , n, $$\n",
+ "  where α := H + 1/2 for some H ∈ (0, 1/2)\n",
+ "\n",
+ "  and the sequence $(r_n)_{n≥1}$ satisfies $r_n$ ↓ 1 and $n ln r_n$ → ∞ as n → ∞\n",
+ "\n",
+ "\n",
+ "### 3.Tractability\n",
+ "  Like the classical Heston model, the lifted Heston model remains tractable. Specifically, fix u ∈ C such that Re(u) ∈\n",
+ "[0, 1]. The Fourier–Laplace transform of the log-price is exponentially affine with respect to\n",
+ "the factors (Un,i)1≤i≤n:\n",
+ "\n",
+ "$$E\\left[exp(ulogS^n_t)|Ft\\right]= exp \\left(φ^n(t, T) + u log S_t^n + \\sum_{i=1}^n c^n_iψ^{n,i}(T − t)U_t^{n,i}\\right)$$\n",
+ "\n",
+ "  for all t ≤ T, where $(ψ^{n,i})_{1≤i≤n}$ solves the following n-dimensional system of Riccati ordinary differential equations:\n",
+ "\n",
+ "$$(ψ^{n,i})' = −x^n_iψ^{n,i} + F \\left(u, \\sum_{j=1}^n c^n_j ψ^{n,i}\\right), ψ^{n,i}(0) = 0, i = 1, ... , n $$\n",
+ "\n",
+ "with\n",
+ "$$F(u, v) = \\cfrac{1}{2}(u^2 − u) + (ρνu − λ)v + \\cfrac{ν^2}{2} v^2 $$\n",
+ "and\n",
+ "$$φ^n(t, T) = \\int_{0}^{T−t} F \\left(u, \\sum_{i=1}^n c^n_j ψ^{n,i}(s)\\right)g^n_0(T-s)ds , t ≤ T. $$\n",
+ "\n",
+ "\n",
+ "  In particular, for t=0, since $U_0^{n,i}$ = 0 for i = 1, ... , n, the unconditional Fourier–Laplace transform:\n",
+ "\n",
+ "$$E\\left[exp(u log S_t^n)\\right] = exp\\left( u log S^n_0 + \\sum^T_0 F \\left(u, \\sum_{i=1}^n ψ^{n,i}(s)\\right)g^n_0(T-s)ds \\right) $$\n",
+ "\n",
+ "\n",
+ "#####   Option prices can be calculated:\n",
+ "\n",
+ "  (1)Solve the n-dimensional Riccati system of $(ψ^{n,i})'$ using explicit–implicit discretization scheme\n",
+ "\n",
+ "$$ψ^{n,i}_0=0, ψ^{n,i}_{t_{k+1}}=\\cfrac{1}{1 + x^n_i\\Delta t} \\left( ψ^{n,i}_{t_k}+\\Delta t F\\left( u, \\sum_{j=1}^nψ^n_jψ^{n,j}_{t_k} \\right) \\right),$$ \n",
+ "\n",
+ "$$i = 1, ... , n$$\n",
+ "\n",
+ "  (2)Get the characteristic function\n",
+ "\n",
+ "  (3)Compute the call prices via the cosine method for the inversion of the characteristic function(Fang and Oosterlee 2008) \n",
+ "\n",
+ "### 4.Simulation\n",
+ "  Because the Lifted Heston Model is a Markovian and semimartingale model, one can adapt standard recursive Euler–Maruyama schemes,\n",
+ "\n",
+ "  this paper consider the modified explicit–implicit scheme\n",
+ "\n",
+ "$$\\hat{V^n_{t_k}}= g^n_0(t_k) + \\sum^n_{i=1} c^n_i \\hat{U^{n,i}_{t_k}} , \\hat{U_0^{n,i} }= 0 $$\n",
+ "\n",
+ "$$\\hat{U^{n,i}_{t_{k+1}}} = \\cfrac{1}{1 + x^n_i \\Delta t } \\left(\\hat{U^{n,i}_{t_k}} − λ \\hat{ V^n_{t_k}} \\Delta t + ν\\sqrt[]{(\\hat V^n_{t_k})^+} (W_{t_{k+1}} − W_{t_k}) \\right)\t, i = 1, ... , n, $$\n",
+ "\n",
+ "where $t_k = k\\Delta t, k = 1 ...N, \\Delta t = T/N $ and \n",
+ "$W_{t_{k+1}} − W_{t_k}∼ N (0, \\Delta t)=Z_1$\n",
+ "\n",
+ "##### [the reason behind using explicit-implicit scheme]\n",
+ "\n",
+ "  If using explicit scheme,\n",
+ "\n",
+ "$$\\hat{U^{n,i}_{t_{k+1}}}=(1-x^n_i \\Delta t) \\hat{U^{n,i}_{t_k}}-λ \\hat {V^n_{t_k}} \\Delta t + ν\\sqrt[]{(\\hat V^n_{t_k})^+} (W_{t_{k+1}} − W_{t_k}) $$\n",
+ "\n",
+ "  $x^n_n$ grows very large as n increases. For instance,for n = 20, r20 = 2.5 and H = 0.1, $x^n_n$ = 6417.74. Consequently, if one needs to ensure the stability of the explicit scheme, one needs a very large number of time steps N. In contrast, the implicit Euler\n",
+ "scheme is stable for any number of time steps N.\n",
+ "\n",
+ "\n",
+ "\n",
+ "  Given the paths of $\\hat{V^n_{t_k}}$, we can get the pahts of $S^n_{t_k}$, \n",
+ "\n",
+ "$$S^n_{t_{k+1}}=S^n_{t_k}+S^n_{t_k} \\sqrt[]{V^n_{t_k}}(B_{t_{k+1}}-B_{t_k})$$\n",
+ "\n",
+ "  where $B_{t_{k+1}} − B_{t_k}∼ N (0, \\Delta t)=Z_2, cor(Z_1,Z_2)=\\rho $ and finally get $S^n_T$ to calculate the price of options"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "  This paper compares the estimated Hurst index of realized volatility of S&P Index, Heston Model and Lifted Heston Model,\n",
+ "founding that the Lifted Heston Model enjoys the similar roughness with realized value.\n",
+ "\n",
+ "<img src=\"./2.png\" width=\"70%\">"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### 5.Strengths and Weakness\n",
+ "#####   Strengths:\n",
+ "  (1)Lifted Heston Model is an approximation to Rough Heston Model, keeping the characteristics of well reproduce the at-the-money skew of the implied volatility surface, therefore catching the roughness of volatility change.\n",
+ "\n",
+ "  (2)By introducing n factors, Lifted Heston Model can get a semimartingale of $V_t$, therefore, $S_t$ is tractable and can be more easily simulated to price options.\n",
+ "\n",
+ "  (3)Compared with other methods like adding jumps (Cont and Tankov 2003, Gatheral 2011) and stacking additional random factors (Bergomi 2005, Fouque et al. 2011), with more parameters slowing down the calibration process. Lifted Heston Model only introduces one more parameter H than Heston Model.\n",
+ "#####   Weakness:\n",
+ "\n",
+ "  (1)Compared with Heston Model, the prices and volatilities can only be simulated through conditional Monte Carlo, simulation process is slow\n",
+ "\n",
+ "<img src=\"./1.png\" width=\"70%\">"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Simulation of Lifted Heston"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 109,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "The autoreload extension is already loaded. To reload it, use:\n",
+ " %reload_ext autoreload\n"
+ ]
+ }
+ ],
+ "source": [
+ "%load_ext autoreload\n",
+ "%autoreload 2"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 177,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import pyfeng as pf\n",
+ "import LiftedHeston as LF #a module in pyfeng\n",
+ "h1=LF.LiftedHestonMc(vov=0.1, v0=0.05, rho=0.1, theta=0.05, lamda=0.3, H=0.1)\n",
+ "h1.set_mc_params(n_path=1000, rn_seed=1234)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 111,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "factor, variance = h1.vo_path(texp=1, N=1000) #captures the change of a single path of simulated U and V"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 112,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([[ 0.00000000e+00, -1.14908380e-03, -1.12144470e-03, ...,\n",
+ " 8.23606966e-03, 8.55297571e-03, 7.91712098e-03],\n",
+ " [ 0.00000000e+00, -1.14908368e-03, -1.12144446e-03, ...,\n",
+ " 8.23524421e-03, 8.55214936e-03, 7.91629379e-03],\n",
+ " [ 0.00000000e+00, -1.14908337e-03, -1.12144385e-03, ...,\n",
+ " 8.23318094e-03, 8.55008383e-03, 7.91422616e-03],\n",
+ " ...,\n",
+ " [ 0.00000000e+00, -8.14528398e-04, -5.57786731e-04, ...,\n",
+ " 4.30097229e-04, 5.29513895e-04, -7.53793362e-05],\n",
+ " [ 0.00000000e+00, -5.66934251e-04, -2.66077125e-04, ...,\n",
+ " 1.29847010e-04, 2.20419033e-04, -2.04967119e-04],\n",
+ " [ 0.00000000e+00, -3.22134366e-04, -8.25588728e-05, ...,\n",
+ " 4.72271521e-05, 1.02081333e-04, -1.49637968e-04]])"
+ ]
+ },
+ "execution_count": 112,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "factor"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Replication of Figure 7"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 113,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAA4IAAADFCAYAAAABvEwoAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAIABJREFUeJzsvXmUXNd93/m5tfbe6A1o7CDBTaQISjZFLbY2W7IVWTEVjZzY8XiJlWhsR4lPPMqJZzJWJnaSM1YmyYyXk8iO47EniSVZI9myRImSSC1cRYIEQWLfGuh9qX15613mj/vqdTUWAiQbaBC4n3P6oFD16tWt7qp37+/+fr/vVxhjcDgcDofD4XA4HA7HzUNmowfgcDgcDofD4XA4HI5riwsEHQ6Hw+FwOBwOh+MmwwWCDofD4XA4HA6Hw3GT4QJBh8PhcDgcDofD4bjJcIGgw+FwOBwOh8PhcNxkuEDQ4XA4HA6Hw+FwOG4yXCDocDgcDofD4XA4HDcZLhB0OBwOh8PhcDgcjpsMFwg6HA6Hw+FwOBwOx01GbqMHsJ6Mj4+bPXv2bPQwHA6Hw+FwOBwOh2NDeO6550rGmInLHXdDBYJ79uxh//79Gz0Mh8PhcDgcDofD4dgQhBDnruS4dSkNFUJ8QAhxXAhxSgjxGxd5vCiE+Fzy+PeFEHuS+98vhHhOCPFS8u+PdD3nB5P7TwkhflcIIdZjrA6Hw+FwOBwOh8Nxs/OaA0EhRBb4A+BvAHcDPyOEuPu8wz4GVI0xtwH/Afid5P4S8DeNMfcCvwD8v13P+Y/Ax4Hbk58PvNaxOhwOh8PhcDgcDodjfTKCDwCnjDFnjDER8FngwfOOeRD40+T2F4AfFUIIY8wBY8x8cv9hoCfJHm4FhowxTxljDPBnwIfXYawOh8PhcDgcDofDcdOzHoHgdmCm6/+zyX0XPcYYI4E6MHbeMf8DcMAYEybHz17mnAAIIT4uhNgvhNi/srLyqt+Ew+FwOBwOh8PhcNwsrEcgeLHePfNKjhFC3IMtF/2fXsE57Z3G/KEx5n5jzP0TE5cVx3E4HA6Hw+FwbCDHF1rMlr2NHobDcdOzHoHgLLCz6/87gPlLHSOEyAHDQCX5/w7gS8DPG2NOdx2/4zLndDgcDofD4XC8johjydGVFi8utDZ6KA7HTc96BILPArcLIW4RQhSAnwa+fN4xX8aKwQB8FHjUGGOEEJuArwL/izHmic7BxpgFoCmEeFuiFvrzwF+tw1gdDofD4XA4HBtEzdPUo5hyGG70UByOm57XHAgmPX+fAB4GjgKfN8YcFkL8lhDiJ5PD/hgYE0KcAn4d6FhMfAK4DfhNIcQLyc/m5LFfAf4zcAo4DXzttY7V4XA4HA6Hw7FxLDR9YqWJlSGK4o0ejsNxUyOsKOeNwf3332+cobzD4XA4HA7H9cm3j6/w/GITAXz0ni3sGu/f6CE5HDccQojnjDH3X+64dTGUdzgcDofD4XA4Xo6VRkjNl4BVAHxpoUWlGW3soByOmxgXCDocDofD4XA4rjrHllrUuspBV4KQM0491OHYMFwg6HA4HA6Hw+G46lT8mEivtiRpA2XP9Qk6HBtFbqMH4HA4ri61dkTDl+wa79vooTgcDofjOuB7J8oUshnetnfkmr5uqBTtSLHSjunJ2VzEUN4Fgg7HRuECQYfjBub0UotHp8poY/hoYZKxoZ6NHpLD4XA4Npj5dkAWwVvUMNnstSsOa4YKrQ3agBdrBotZWrG6Zq/vcDjW4kpDHY4bmJOlNsvtmOV2zNlqsNHDcTgcDscGE4YRvlS0pWSpeW29/KQxqPPE6gOlqLedYIzDsRG4QNDhuIFZ9mLqgaIZauYaLhB0OByOm51KW1LxJDVfMlX2r+lrK23QXbZlWQGxMszWQvwo5smTVZ6dql3TMTkcNzOuNNThuIHxokSm2xiqvu3DeOTYCq1I8uC+rRs5NIfD4XBsAKXAzgvKQMW7dpm4OJZIrVF69T4/NhRzhpVWSNWLWWgHZIXgjnYfuWyG/h63THU4riYuI+hwrCPPnq3y9JnqRg8jpRVrau2YmidphHbyP1JqcbLicXS+scGjczgcDse1pp74+AE0I/kyR746vCDm1JJHHK89dxBr2rGmEa72BFYDSaSgEUmWvRBPKpqx5OmzNZ6aqhFFTkjG4biauK0Wh2MdeeRMGWXgzdv7KRYLGz0cWqGko9TdiuzkW/YkgdQ8MV2FjOANk4MbOEKHw+FwXEuaYZyWZ3qxvszRr4woinnyTA1PKbQe5I6tq8vMA7NN4qRB8EduG2Wsr8BfvLhIrA3NUAIGL1ZkhWA5CMkKwVKzj51j+QteJ44lJ1cCevJZyq2Q/kKOu7cPrOt7cThuBlxG0OFYR0qepOpJ/vLwCpXmxvfkRV1d+X6sCYIQL9ZoAytezDMzrhfD4XA4bia8SLPSlpQ9SaQ0rXUsDz2x7LPkh1TDmMXWav9hFMUstkOUtn2BH7hzgvt3DFHMZZDK4CtFM1Z4kaYZKpQ2xNow37i4mM2xJZ9TpRZHFxvMNHxOl9vr9h4cjpsJFwg6HOtIvS2peZKXlts8eqay0cNJd18BQqk5V4sw2iCVoeorFpshUyutDRyhw+FwOK4lobTVIdpArDRzjfULBGfrAbHWRErTjlZLQP1Y40mJNoaevF16CiHYPlQk1oZIGUK5Ol/VfEkjkFQvYTY/W/epRjHlMKIZS2qRUx11OF4NLhB0ONaJIAjpTGN+rC+5k3ktkXp1Yo2V4VzVp+5LGr4kg6ERas5Urq1qnMPhcDiuHX4Uc660ep0PlOq6bagH69MnGEYx9TCm4ksagaIaxDx/rg7YwK4d2WqU/kKOLxxc5LnZOtuGe2z2T2mkNtyzpZ9bR/tQxla0NCJJ018bDLaDiHpS3qqNfW6szcWG5HA4LoPrEXQ41olasNprUfcklcLG77MobcAYDDYonGsEac9gXy5LS2qWWm4n1eFwOG5UDkw3KXkRXqx4w9YBArk6V0VS0wrXZw44vuTTiiUddwgvVpytefzA7mFqvu1N18YwVMzy9WMlAP7B23dggFAashnBjuFebpvo58X5Jo9NVWjHkhdmGmwe7OHOrf0AnCuFtKWi5ksEYICMWJe34HDcdLhA0OFYJ6ZrPu0wRmsY7M3Tjta3Cf+Vcmi2TiQ1lcSot5ArUgnidOKcroX0FbOpmqjD4XA4bjyWvJBGJDlTajM2kCfsahmQGlrrMFc1/Zjpmo/fFWRKY2jGkiiKaaVKoYL+/OrSc3KwmIzDblj+twML3DHRz0+/aSuPTVUIlGbRC6kEMUM9GU6t+IRK48f2fJ13oo3tQywULhSWcTgcl2bjUxYOxw3CbD0kjDWx0hhjiOTGBYJNL+SpmRpRl2FTrAx1X2GA998xxkR/niBStK+CfLjD4XA4rg9CqfEiRS2MePZsLS2jzAgbgJ0/V0mpmKsESKkudrqLcnyxRS2MiKThjol+jDF4kSaINbW2oh1LjDGo5KfDaJ8N3KQ2tCOFMnB0uc1yKyKXEUhlaMeSWhRzZKnNohdQCUIiabhtvI981qYCswKWGs5qwuF4pbhA0OFYB753qsyLi6uiK3UvIpRWpXMj+MaJCovtyJaGJihl0izlbeN9fOCuCbRZtZVwOBwOx41B3Vvd4KuHEi/WNGNFKYyQSUZw22ARaQyR1rSTPrzji22em25wYK7OkYUrV+Is+TF+Uvq53IpYbktibWhFioVWiB+rVLysO/A8U/boyWWQ2hArw56RXrIZwR88Oc1cI5lHpUZpQ9nv+AzaDc3bxvr4e2/ZSU8ugwFeXGwyXfZe+y/P4biJWJdAUAjxASHEcSHEKSHEb1zk8aIQ4nPJ498XQuxJ7h8TQnxbCNESQvz+ec/5TnLOF5KfzesxVofjarB/rr7GJFcb8CPFsdK1F2JZqgecq3s0Q71aN4PdcZXaYIxhtC/PtmFbkhPErsne4XA4bhSmSh5PnilzcskGcp0EXBBrQqlTEbG9Y32YZDPw6SlrJXR0qcF0w6ccRszWr3z+8mJFO1JkM4LjKzYY67xOxYsIlaEaKGKpeToRkAH4vcem6ctniJVBGbh36wB/7/7ta87djjRVX9KWKgkobdXNcitmuWWDRT/WlP2QM2UnfuZwvBJecyAohMgCfwD8DeBu4GeEEHefd9jHgKox5jbgPwC/k9wfAL8JfPISp/9ZY8ybkp/l1zpWh+Nq8OWXFllpR9QTmetdIz2AnQSPLl17b6PnZ+tUfIUxZo1qaKdctR1K/ue/PMqmXtun0V0+6nA4HI7XN3O1gOUgYi4J5DqlmMpgPfoMtALJ2UQx2peKRT/EDyPqsaQdS1tOGl9520AzlCgDk4M96X0d0/pGJAmSMtNizi47920d5L5tgwBkhKDTtrhnpJc7Jvr59XftASCfXV2m+rGmHdkfZeBrx0t85ukZFlsxtUChgIofUV5HOwyH40ZnPTKCDwCnjDFnjDER8FngwfOOeRD40+T2F4AfFUIIY0zbGPM4NiB0OF5XBEHIY6fKvLDYpOGrdNf1vq0DFHMZtDYstK59aeh8MySUmlgaMkIwUMiSy4jEM2q1H2QpsbeQGlr+xltdOBw3Gi/NNnj0WIkocr1LjmvDXDWg5kcEUlPzY16YbhBKRSOwm4NgA7RIGg7MNQFr0xAqzVw9Iog1jUDRDBX1SLH/bO2KXjfSNkt3thpw21ifvU/acs9WpNINx9HePH35DP/4Xbv55XfsJNtRL0vYkojHTPQXyAoo5rLpY37cJUSjIdclFSqAum+D2P3TNY7OO4N5h+NKWI9AcDsw0/X/2eS+ix5jjJFAHRi7gnP/SVIW+ptCiIuKAwshPi6E2C+E2L+ysvLKR+9wXCHHFprMlFcnl8+/tMIjZypU2pJQmnT3c8dIHxMDBZQxVDxJvX1tg6xGKPEjTStU7B7pYbg3z0hfnqSnPpXZ/vSjZ9Da9l6cXHF9FQ7HenOi1GKm6W24grDjxubUkse5kr2GH5ips+RFtELFshdxutKmGWp8qfGSQEp1qYbqxMy96kvm6iGtSKViMoHUnKtd2dwgtUFpQ9WPefP2IQpZQagMFd9mAyO1GoQOFm01Sj6bYdtwT6ooWsgKCkkGMJsRbB4sstgM1/S6d87RCCTvvnWUt+wYBiCTsRnPdtIHOVu/cNylRsTjJyqUrgOPX4fjemE9AsGLBWjnNx1dyTHn87PGmHuBdyY/P3exg4wxf2iMud8Yc//ExMRlB+twvFq+eGSJLxxaAmC+5nG60qbqSbxIobSh4VuD255clu3DPWhtaIaSvzyydE3H2YoUQay5f+cQGSEY6skx0punJ5/FGIM2cM/kAJHUBLEd+0zdTYwOx3rS9CLKvjXXXmquLVWrNCMOnKu/IlVGh+NSHF1ucGixyUItoBxFafYt1oaWXC3vlBra0WpQBrZKpFPNUvbWfk4NhtZ5n9G5SkB8XsloEEbEStOONdmM4J7JAUZ7V20cImXS14ykYbBn1T5i10gPtUSoppBduyT98D2biZQhkxGE0mYc//Z9W/mB7cOY5Lkf3beFfZMDqT9uIZsh1prmeWrYL043eWG2zqIXcGyphcPhsKxHIDgL7Oz6/w5g/lLHCCFywDBQebmTGmPmkn+bwH/HlqA6HBvGUjNiPuk9+OrRFcptRTtUaAN5kfRCtCM+9dVjbN/UY32NpGa6thpkLVR8vvLS0lVVE+3srr7/jnGagWSwJ8f4QAGZZP8AfvyuCT50zxaCWCOVZqXteiocjvXk8GI7UVGE5VaYlodGUcxjUxWOlptpebbD8WoJw4h6JKmHMadKHkGsU8P4QGoagUx7BAOpaUWa7gSb0qvVLPXQfkaNMUTKWj+0I5V+ds+VPF6Yq/P8zNpAaroa4cWGQBp+4q4J+vJZoq4XCZUm1nZMvtRpRhBg+3APXmzVRgu5tUvSW0b72DpYZDnpARzpzfOFl5YotZN+/E09CCEY6s1jjB131Zc0Q5X6DHY4W2tTCiJrRO+8cx2OlPUIBJ8FbhdC3CKEKAA/DXz5vGO+DPxCcvujwKPGmEtmBIUQOSHEeHI7D3wIOLQOY3U4XhWVho+XlFuWmwFnawFBUmYTxiqdmDof6k3JjmcYa+qBSgO/r51c4YWlJt88+bL7IK+aKIrTndfNgwXqQcxwT45dI700fMm+rbY5f8tQkbsnBwC7I9yteOpwOF47sw0/VTM8V/d57LTttZqqhJT8iKovWWm5DRjHa2OxEdMIJY1IUm7b0k6lDc1Q2V5AZTivsjIN/MAGgrGyZZ2Bsp/XeqCo+opAGivQEtq5bqER2rLLxtqyy7l6kAZ6b94+BIDXZUsUxibdhPQjxWBxte9v21AxHccP7d7EF19c4ompavr4ZPI4QGzsGJ6bazDWl6e/YOfZTvbx7791F2/fPUKkDK1YrenNrUUxgVJEUhO5TLzDkfKaA8Gk5+8TwMPAUeDzxpjDQojfEkL8ZHLYHwNjQohTwK8DqcWEEOIs8O+BXxRCzCaKo0XgYSHEi8ALwBzwR691rA7Hq2Wq4icTpuaxM1VqnkQbgx9J/IsYsv/h4+fQxu60tkPF51+0WcCFZkQjVJypXh19pOdnm0hlEMCh+QY1X7J1qMitY70YIE5KhiaHioz228lTaUM7khxZaPHIsRJeGPHMVJUwdItUh+PVEEUxtSCm7EkaoaIeSmZbVqFxuRVQCySB1FR8JyLjeG0stuLUa6+RzEV+bPsBvaQ3VRvD5GAhfU4nDtw10oPShlAZSp7EizSNUBEmm4lS27mknChiN6OYWGlaseTAuQZLNTuPVf0IpW3/eV8+gzGGiYHV15NJQGqMNY0f6ioN7dgY/c03bKbmSx46usKfPDOXzlUddWtgjTVEJ+AE6C/YwPLfPHqG2yf6yQqItWahufr9ktrgxTr9PjocDkvu8odcHmPMQ8BD5933qa7bAfBTl3junkuc9gfXY2wOx3ow3Qhp+jG5jOBIqU0kDVpp/GTX88P3buHD+7bw4c88i8FKaQ+IHPmMQGnDiUrA/tkmgVQ0fUktf1Hto9dE0wt5camJ1IZ8Fn7vsXPcPtHPT9yzmXpgJ77vn6sx3JOzpTnJYsAYO0E+M1OhGSkWWgGNSNIKJT9yl+u7dTiuhCCWZIyhUMiz3IppJ6bXAF6k6bi01JLvojGGduwyE47XRsULbcYvEUqB1coUA2lf+OaBIvu2DvKNE+U0I3jnRD/T1QBjDEIIzi/UKmStUXunh6/uS+qBoieXYarWpuSFvKc/RzO25acDSUD2+49Pc3C+yS++ZRtfPrqC0rYPUSXWtgNdpaEjvXl6chkWGyFnq6uB3n96coa+QpZ37x1ZM6Ydwz3M1gPeumtTet9AV4bx4HyD3nyWQGqOL7XZPlzEGEM9kGlmNE7e51I9YLQvRz6/Lkthh+N1yboYyjsc15JDc41r/prldoxKDNlLLbsr2ghWdxV3j/aSTybNv3XfJEBq26C0IYg1J8s+NU8RK0PNlzTa65sVfOhYiaVWhNIaL1TkM4Jfe/du/q9vn+FLLyyk5Tg7Nlmfp4Hiqq1EIDUrnhW2WGyF1HzJbMO5ujgcV8q3jpV55KQtaZupBfhxlzJjl6enH1tp/uW2pO0yE47XSDO0thDG2BLPZqhoJZlAmwGzt8f68rzv9nE++e5bmOgvkM8K9oz2AqQB0mhfYc25a4GkHSkayec0UDbQDKWmGUuqQUS1LfFjjdYw1l/gbMXn4Ly1pegtZBkq5pDaoIGGb8/TnREUQrB5oMB8I+RcNeADd41z37ZBDs43eepsjYmBIp963162DhbJZQT/4K07+Fc/fjvDXefoy68Ggs/NNhjvz6O0oRJENAJFKNf2RUplODLf4sBsk0MLL28zMVcJ8J39i+MGxgWCjtcVn37kFP/6W6f5z0+dW3P/46dKfPKvj/Kto8tX5XWrgZ0IlLalnl4k1+xC7tjUQz3ZNb19op9NvTmGenMsNSOCWFHzJMvtEJPs1YbScHRp/Swbmu2Q6XpAM7SBZiOQ/O03b2OlFfHQ4WX++/453rDF9gRuG7aBoBCC0b68XaQqQ81XtCPNcltSCxRl3y1SHY4rIYpiFr2AhWRzZ6UVEcpVy4iKbxejxxfatCKVyvi34qtjK+EFsVu83iS0pb1eN0Lr1eed95kKpCZWhqmyh9aGzQMFtg4VGe7JpXNYJ0N461j/BedvRZpWUvnSKRk1YOfBWDFT94kSwZnR3jzPTNfT5zZ8ya6RHmsYn/QQ9hWy3LdtkEdPlDi5YoOw0b48x5bbKG24Y6KfX/mhXfzkPZsBmKsFDBRzvH33Jn78jnF681kKuQyh1Pw/z8zy9WMrfOXwMlGseWDnMFU/5uBCm1BpfKWYqwXUfUlGQDb5kcYwU/VZCQIWm5cWbKq2I15aaPL8Oacy6rhxcYGg43XFi/Mtqu2IZ6bXZgW/fLTEXDXkG6eujghLoysoCqVGKsO7bxtNDW03DxT4iwMLAAz15tkyWGSomOOWsd603KbsSTprw1gZpmo+68XT0w1qgUJru9sJ8OYdQ9S81XF3mu6zXSa8Y/0FTJKtkOcpCviuod7huCIqvkoENiSL1YBGGBNIzXDP6mZRLVC8sFjD6yoH7Q4W15PvnKrwvZPVyx/ouG6RUnFotsnh2UsHIaV6RCDt5l/cZdFwPkGseX62yeOJCEvVixnpzaeZtE5F6EPHLvRiFqx+TuNEebq/kMGXNkBcaITEUqMMDPVkeXamzr5tgwgBh5daDBZyeLGmJym//PAbN5PPCn7/sXP8sy8fA2Ckb9VqYvdoL7mM4D23jQIwW7ebK2/bvYl37x1Nj3tiqsrjUzW+cHCJZ2catELFR/dN8rEHdhBITSQNXqQptaO0LNRmJ0EqTS2K0Aba8aU3POdqAStBwLLnqmMcNy4uEHS8rmgFMWGsqJxn0r7cCAljReMqiS90L96iZFK8c2KATz94F//g7Tv56uFlvviCDQSHe3JMDvWw1Iy4bbyfTNJ74ceaWCqafozWhuV1VAycbwaEUhMpTSErEMDEgFUN7XDraC/vvX2Mv7VvMr1vtK8jGGPfVz4Za8dj0AnGOByXZ6art+lUuU0rtgvj4WKeWju2PVhAxY/XBH+RXv9AcLkasOSHLHgBxxdcJuP1yktzLY6XWpwoNy95zMlSi1DaOhNluGQg2Nn6e+RkmRMrbUrtmJG+PH2FtYEgwC/cv52f2jfJb7z3Vu7fPogBAqmYLnv4sabkSW4dGyAr7GtWghiZPD+Stu3hbbs3MVTM8cJck68escFlNRGc6S9kqXhr5+nOPJTNCIaSLOVQT47BYpZz1YtvmH77VIVdIz0XnOP28X7G+vIUcxkCqWmGkmakiJVmrhGx3I5px5pAKRqBXKOgej6llg0Wu70YHY4bDRcIOl43+H5AkARk7UDx6391JH0siBVRrC5odl8vglinfkthkim7baKfPaN9bOrN8ZnHV0tVh3vzbBkqstQI2TPaixcpMgL8SNMO7YQUSp2KRqwHjdAa20fSsHukl9H+PPlsJi1XBdtL8o/etWeNmtvEQAGpDEpZhbmlpu35aAW2z6Tiuaygw3E5Sm1rB9EIJEutKN04emGuiTa2lC4jbM9W2LVYjy+xcH8tHC+1qfmSmi85U3n5/ifH9ctsw6clJY1YrrFB6Ga5HaaegWADwfNlyD72wI7UNmiuHvLpR6eoeEkgmGQEb0l6BfeM9HLPlgHesnOY0b58mqkLpOZUySNKIr6nz9UYKubsfKg0UWL2fmC2QU8uw33bBte0TsBq+Wl/IctCPVxzfyeIK+YyCLH6Dt44Ocj3z9X5/rnamnPVA8lCI+Rtuzfx02+2G5uF7OrzxvsLdMLfVixpBZKKr2gmJa7GQCvURMpQfZl5uBlJmqFEXYXvqcNxveACQcfrhmfmmsTJRBQrzXxi1L5U82gFVnih0lr/jGCtZbNtrSAmCCXGQG8+gxdJ/umXjvCvvn5yzfHDvTm2DhWJlE6lr8utCKVXJ2mlrFHveuHFthTmLTsGCaVmy4AtA635knwyQVY8m937r8/M8onPvwTA1qFiupuszisNjZVhvu5KYhwbQ82L+PrhZb5zvHzJhfD1Qj2URMrgS5OWhXZnGowhTcvEuiPsodMy7vVkqW1VJJVZ7W12XN88earKU6fXlvK2IknVj2kEknL7wr9jEEY0wtV2A7AWCUM9WX70tjG29BfYMlDgtrG+NZ5+HYw2afn/3rE+/sk79/Dxt+3EixSLzZB/950p8lkbzAVKU/ajtH2gFSlrEp940EbKkMsIZushP/+W7RRzmbRyBmByoJBmHfsLWRa6hMhKrYiRxAew5zxD+V94yzaGe3L80dOz/P3PHUJpw1IzZKps++tvGe3lfXeM897bRml2eeGO9+dphhJjDEFirdGN0qtWSi+n392KFaE0zoDecUPjAkHH64bHz9RsaWXT9iTUk8Bm/0yDKLmoy6uQEfza8bI15dUm7a+7d9sg/8c3TrF/uoYXKR7cN8knf3Qvt473MVDMcev4atP9++8cB+DBeya4Y6IPIMkKmnVTDu0Ywr/rtjGWmiGbE8+ouh8zMVBgsJhLFxN//NQ0hxeanKt4bE2EY3Qywb9113B6TqUNS21XGurYGI4ttZlp+pyut1luXt8BTavLS7QV2YWxlKvXIqmsINNSK0YpTd2TNH27mF5vmqFEamvg3b5KYjSO9WW66TGdmLRLqfj+mRr1SNmSfWVYusjn/1w1oh0r6/UnVgOaTT158hnB0aU2t431kc0ImqFk71jvmud/9vl5fvlzhyjmMvzVoWW+fGiZXEbwj790lP/toZMcXWpzfNlmlCNl0s8VQD4jmKqGqYiMMjDcY4O5N05aUbLuwOyDd03wjt3W7mGhEfCfnphOH/vlzx9KNyt3j6wdYy6bYWdX+edTZ2v884dO8qWXlsgI2JUcP1DM0o5Uupk53l8g1tY2I1I2EMwIGO/Ls2WggDKGzh7MpVYMi7UAP8nsi5cNFx2O1zcuEHS8bjhX8QlDhTEgpSaWGt8POF7y0tINfRV6bqaqPnGyo+hHtvy05ctU8QzgjVsH+Yk3buGPf/ZNZIQ2VsIqAAAgAElEQVTgljEb8J0te/zS23aSEba8ZrFpAyttIJSK5+cu3f/xSuiMT2B7MTrKoHU/Zrg3z2h/nkoS1HW0Yv7k6ZnVHsEkgP7Ivi3cu9VO5EpDzWUUHBtEyYtohIqaL5l/GWW/64F2tHrdaccqLSNP7wsVrcAuKv3YrPqZKY1/Xh/uc2drPHK0lGZBTyy2OX4ZiftufKkoe5KSJ4mV4uEjK9d9RvVmpta2pcTtWNHyIuZqIWfr7TRjBTBd9Vk4T1xssREQKvs525aogAKM9ecJkg2ATj9qM1RsGSzyQLLR191C0ZuzE8KzM/ULWis64Y9UBj/Jcg/35PhHP7wbsHNExxolJwQ9uUzad9ixpgD7nehk/U6X7PvYPdKbiq3V/JhfecdOfumt2y/4/WwfXg0E/+rQEmBLXHeP9FJMMoiDiS/hTGJwf8toLxlh33esbfmqNrB10Cqmdhe/nF8J0+HkcjvdqBFAqRHS9Nz3yHHj4QJBx+uGmhenE1VW2Mlsth6y0AzTRZfUEATru2is+jEyCTANNnt2dLHJA7tXDW33JIFfh8GeHJsHCpwpeRRzGbYMFvn8gQVWWhHbh23ZZiQ1py/RCP9KkUm52TePlxAC3pWoq9V8yabePKN9BSpeTChVOgl+92SZbx5bIZ8VbOrJ8as/tIux/gK/9q493DHRi9Jmza6uw3Et8SJJpGxvbtW7vjPTQZfCbqSMzULEOl14w2o/YLc6r9QwX1/73k5W2sy0PM6U7XXsxcU6BxfrnFm+smAw6MpEtiLNohdwauX6DqRvZqarPlVfUvUl5yohp0sevlSpkrPUhpUw4uji6t+/2oypeNbPNlKGXcM9bE56v8+UfL74kg2YvnG8zKe+dpJGIBnqyfHxt+/kEz+8a01+a43NyXmBzlIzRGtDpG2JZU8+y0hvnsnBIndv7kcZw1jiPai6ev0AfuWHdvFP3m0DxpWkb1Zgvyt9+QyffvAu/svf3QfAVNnnB3cO05tf21cItpQ0fd9d6t3dCqKdQPBfffM001WfbUM9vHfvGKEyxFrjJ2Ivm3rzjPUV0oz5Sjte893tphxEaZBogAOzDZ45V083VeJYcmS+xUojJHgZ5VGH43rHBYKO1w1etNq0LZXdVT+06KVqZGCDtH/56NmLPv/hI0s8eab8yl831kRdJVY/e/82Kl7MA3tG0l3O7vKVDndsGeDJqSovzTe4OymXKWQF773dlorGyqwZ+5VwqZ19pQ1hrPjm8RLvu3OczYOdHsGY4Z4c24aLzFR95pMm/X/+47dz/65hHjq0xORgge3DRX5gx1AaaG9PrCbakZvgHBtDPVRUfUXJk9TXUVhpvWl6IZG2SrsyWZhLbbP+nTI5gI4GxtpsBCw11waC1SCm7ElOl9pIqagEMbUw5sjy5RVAoyjG71rY+1JT8WKmqtdeNCaKYk4strlK+l03DCtd6tGLLZ+yH1L2JIHUlD0bIEptqIer1/79szWqYUyczIMH51vMVAOMMZwpr91cnG+ESG3SYOlN24f47Q/enj7e6Ppuna2sfe65akAzkGsyhZ2+9y2DNrPWaUsIYr3GBqK/kOWeyUGGe3LM1ALaoaK3kKUZSIZ7rZjZQDHZMC1f2lP3nbeOcP/OofT/H37jZt60fZC37l7dZBnsEqbZP2OtpXZssnNyJE3aI9jZoAWYGLDjb4Ya7yLq2H5shd3qgSSQinIYUQ4iDi3YsR5eaHOm3OK5mQbfP1O/4PkOx+sFFwg6Xhd4no+fTDhDPTmiSCGlZqrq005KrrQ2SKWZqwZrsoK1VsCfPzfL105U+KujF/oknc9yI+CPn57m9EqLlh/QCtSaMq+vJ3LYb5gc4Pf/zr38iw/eQTF34U7mP3zXHgpZwZcOLvCxt+3kP//MPj77iz/Aj79hArDBW+MVLHCfPF3hz56f54nTa70S/+rFBaS2dg/3bRvk4+/YBcBz0zUq7YiJwSK3jvfTCCQvzdlJcvumHt531wTLrYiBYp7FRshXDi3xK58/xGIjpJxI3nux5sC5mrORcFxz/C7LlsZrEFbyo5j2VSxxnqmFSGVoBYqGb/sDO5mEjncnrJZkdy+qldZU/bXfLT/ZdFryQvZPN2iGimaoqFyBNU4jtBYyWWFfTycBacW/9t/fp882ObhYdxYWl6FbPbrixdSTz3ons6sNNAOZ9qEqpSn5IZG2CrQCQ6llvfJeToW2O1iqdWXWukVdnji7Vp0T7GaFSjz4qr5kW/KZ7gSE842Q4Z4cjUCmG6Pd3DrWx1TZpx0p+gtZ6r7NTqaPj/el4i8XH3eOX37HLu7c3M9oX54P3j3BJ354N/ns6vK1O2v41NkqzVCmAZ/Uq9n4wWKOyaR/vmPfJASUGmu/W3EsaceKSNu/Q81XBFLhK8VyK0zfdzmMKQchK35I7LKCjtcpLhB0XJd84flZPvgfv087Uev67MFlwmRh+Nsfvpt8LkMYStsnIW1Q2GpFRLGiFUhOlYM0ePnswUUeO1unEUhKzZjF+suXY37u4AKHl9t8/dgKT51rEEud7moXshmmyh75rOC28X62DffwniTDB/DSbJ3nE6nryaEe7t46yJmSR08+m5bN9BeyDBZzaG3wXoGh9LPzNWYbESdLqwurIAg5XvbwI9sD8cF7NpMRgpPLLX7zK8fYPdrHT715G3sTkZrvnLQZ0e2betg21JOMJ8NiM+ShIysstyJ+9S8O8Z1TFeslKDVPz9V48lzjisd5LXn0eImvHVre6GE41pnFerBGSKU7KHylPHK8wjePl9ZjWBdlsRGuWYAbY1IFisnB1UBQG/iBbUNrsibKcEH5dUbYhWs7VpyrrS6QW1eQnV9uhigN+awglxEoY0vG26/h9/dqWWh5NCPJYsspD78crVjSjhRerGjENuDIClLlZ7AfJ19qlhsBs9WAVqRohVZsaPdIL4HUtELFYCF3yde5Y/OqgFnHVqiYy6zpkXtxvsmbtluriY/cu4W37h4mI+ym5UAxB8bQ8BVepNjUs/o5/ofv2EUjkGtKQzvcOtbLcitiuhYwVMxRD2zfeoddI70sNcM1JaoX4+d+cBu/9s7dZMSFwi1bBovctbmfv/2mSVqR4n/96gkaviQrbCl2R0xuoJhjor9ARkA5qcYxBmbqAY3k/6VGxFNnGgRKpyJqIunxD6SmFUuWGyGNpDpHGoMnrd2Sw/F65NJXDYdjA/n9757D82P+9cMnObzYIpcVqMQj6YFbRhgfLLBYD6n7kkhqoo56mbILnz9/cQFlBP/wrTuYrYW0A0UziMlnM3zzeJmfe2DHRV93ttLmbDWg4SuWC5Jm3EBpQy5RNbtn6wAHZhvcs3WQQu7CfZRf/JPnAXjuN98LwN7xfp6eqhJKRTGXpe7H/PVLS+zcVOT4infF8vHlZsBSK0ZqqHYtHA8ueJQ8mU7mOzfZ3r7f+eYp+gs5Pv3huxnsybE3UTE9MFtntC/PUE+esQF7nkJWECvD4nmCHNoYImmoB5LZywTPG0EcS05UWigNYRhRLBYu/yTHdU8UxTw+VVkTXIXKEEUxhcKFC83LsdAOMMl5X83zL0ctlES6O8tn7WUE0FfI8tbdw3z/XB1j4Fsnyrx5+xB1TxJKu3lzfpBW9iShMmQzOSpBTDO0FQnZ3hylZsj4YJE4luTzF07fK60YbQw92QwGaEaa5bYkn7n2qoeNSNEIFV6/6zN+OUKp0yAi7LM2QMM9OY6XfIaKORqhTLJyhmfPNdDGWg9pYz9rW/qLgBUdywpBPrmed/j423dy50TfmuCrkxG8Z3KAQwtrBct++R27UhGXrxxeToTNNC8tttg90suXDy8zUwv48L2bAdg6WKTqxRhgcujCa/DepH9+oRHyvtvHOLHU5M7Nq5/dnZt60Abm6wG3jPXhRwohoOe8fsHu7Pr5FHIZPvneWwC4bbyPf/OtMxycbzDWV6AWxEht309PTnBgrslYb56VJPBTxrDYCommG7z3rjEOLTQp+XZDpfNbFEKkglD9uSxTZQ9PKvxY05vLoDAs1iNG+gv4oSSTgeJFvp8Ox/WIywg6rkva7QilDC/NNWj4MZVWhNaGnaO9FHIZtgwWMMY2e0utUcmOnzaGhhcxV41Yqoc8NlWl4sU0k9KwWGkOLzcvWer45wcXaYVW9S+UisVGhNKaoWKOfFYwmWTR3rR9+KLP71BKykf2jvehDfzWQyfQxvAnT8/wx09NU8jandjOhH1wpk69bZ/z9cPLHJxZ23NwesUjVgY/UqmhPcCJUgtjTKr6OdST48W5BqdLHh//4d2MJ+UxA8Vcqr62e9ROzNZ0dy0fuW8yva0SgYCKryhfQVnateZcJaAZKtqRYrpqf3dPnqrwwsyF5U2O1w8H5lostSNCqWkHkiBSRErzndOv7u/aSEorq/7VCUhakVyzoWOFm+zC9cX5Bn4o+Zt3T6SPH1tuMVDM0VvIJtk6ycNHViglvYIdMRmBwIs1XqwJpKEVac6WPeZrPl8/VuJ7x8sXlKM1ohhtYLkVU/NWH/OlTq8v14pOhsdzFhaXpO5FhGr1c+lFViTmeKKsec8W22PqxfY7UApDymGEMqSKmdmuIH+2HrBjuIeP3LuFD75hnJHeHD+wfXBNEAi2dxzg7slBImX4tx+6g/ffMcanfmxvGgSCnTfAZs2kNuzbanv1XlpoMjlY5J23jPAL92/nWCJkdOfmxDoikHz3VBltDLeOrwqp3TLWSzOUqcIpwI5NVl10thZwpuTxc//1Bf7dt6de9e/01rE+tgwUWGpGbE6sIrSxugJfO1riM0/NpO+r894asWTFD2l4ESUvpC3t5ktnf6e7z7UVSUrtKM0QVgNJK1SUE0Grp6ZqPHv2+qygcTguhgsEHdclnQtwK5CEoURrg1KauyZt2cqt4wMYA0EkkWq1dNMY+9y6F1H3YqYqftoo3inTWmlJHj978ebu2XqEHyn7fF/iJTuvGQGjfXnekIg/3N+lGHox/v03TiG15u5kvE9OVTlb9tJ+jE4GL4w1pWbAt8+W+eKhRWYrbQ4uNfnO2TJH5lcnk+lGQNNX+JGm3tXfseLFNHyJ1rZ8pTefYTaRGb9v+2qDffeYtyWqpYVchqGeHHGXyuBH75vkJ+7ZzGAxm0jg2/uvx8XckeUWpbaVyT9b9Vmo+hwuNXluvs7hdbLlcFx7pmteusgKpcGLNH6smW++8hLDassKZcTKMN2l0Lue/TztaLWEDOx3u+pLbhnt5X//2km+c6qyxijbjzWDPVkGi1mMAV8aFto+L8zVefxkJV2I+1KvKduLlGa6FvDSXJNSEDHVaDNbC9P3E8cy9XrzIk09WPVVC6S55l6MBlsmK6+Cpc+Nwkw1XCPu093Pdtfmft556whg/37NUOHHNgtljGG8v4DABom5jEh7UDf15vjg3RN8ZN8k//Yn7yKXzRArzUzX57/uSwaL2XQuqPqSv/PmrakvX4dOX6E2sGWgQJBkr6U2fOapGe6asH17x5bbqYVFKDW/8N8O8n9/9yxHF1vkMiKdewtZgTasCUy3DRfJCJip+XzxxUW0gedm6iw2QvxX2Ru8ebDAcitivL+A1vZz2PBlKiRzy0gv//Q9t7BloJBkVg2+Uhyaa+HJVbXiqGNL1d3XawytSBLEmr1jfewathnNVhjTDiLKfsSS51R6Ha8fXCDouO544dyqoEsQKcJQ0WpFGAMf3LcFgG2JIlgQKeKuIKUjxNC5bJfaMTIpF33fHWPWezBSHF+5sDl9oebhRzKd7AJpJeCV1kxXfN531wQfeuMW/uzn38w9WwcveH73ZPHw4WWePFVhYrDIH/3d+wA4XfJSL7/OpBwpzeGFFvONmLO1iK8eX6HsS+YaMY+eKXMg6TcstaM0U9DdSmGzl/Z9F7MZhBAs1EPyWcHYeRm/zpi7JbrH+gu0I8Un3rmbP/rpe+nJZ/nY23byntvG0Npw10QfWhuCWK27LcdrZcUL07/zshexf7ZhfecCxbEVFwi+XmmESf/TGkN2TSOUPHum+oo88WZrAWVPUvElS4k4xEo95CtHVth/EWGMKIo5teS9okDRlwplbF/exECBYpLt7/ZR0+fZVg8Wcwz15KzhdaIOudAKOV1vr26+RCo17Aa7+C4HEfPtgIonqYeK6arPwZkGj5+u883jZVqRWrOx09l4ipWhdI090NqRYrktL+htPDjTYLEeIC8h23+j0fIjvnu8zMxFBFEWGz7dvx6pDQjBloECv/SWHYx195Nqu+ngxZp2rDm40CKTEUyVfW4Z7WUiqf7YdBHBlr8+tMyvffEIx5dsf3nFixjtK9Cf9BS2LxFwDaYZQcP77xhP7SX2jPTy/GyDbxwvYYxhquJzW5L52z+9+r3q9OH9+nv28Itv2c5vP3wKgOHe1YxcPmvtlebrIUeXWty9ZYCMgF/9i0P86l8cuqTP38uxZaDIcititC+HAeLzzlH2Yib6C4z159HGUA8UQaxZ9EJ8ZWgEiiBRG92cBIudtYXU0JYSA9y1eSD1DG5Firl6hKcUrdew0TS14rPcuL7mWseNzboEgkKIDwghjgshTgkhfuMijxeFEJ9LHv++EGJPcv+YEOLbQoiWEOL3z3vODwohXkqe87tCXKRD2HFD8sv//Uh6O+7qn+ktZHnH3jGA1B5BJkaxHXYmO5pSWcP5dqTwIkmtHfF7352iFcQEsaJ0kTKpZ6brtAKVlnlJZQgihdYGAzy4bxIhRPoa59NpwP/QPlteOZsEe7eM9ZHPCk6vtFPD25mqb0s6tWGqZnsFvVgxW4+IpM0wLDRjnpip2nOHqxOL6go420kQ3F/IpoHffCNgcrC4pmQI4L23j/FLb9/J/9jVHzk+UKDUCvmRO8bXBI7jAwUM8Pxck5on8WLNVOX6Ug6tBarrtmSxHVDzJY1QUb+KKpGOq4s119ZrNlZ8afBjxZFykyemrlyqfalLmr8RxDxxssITZyssexEnKheqWT4/22T/XJXnpq98IyFWBq0N4315xvvzqRJw2HXtap0nCOOFkuNLLbTWdKpKG6Gk7ku0MQwVs9y9ZSBVjhztzSM11AOZyvXHyvY2HV1pMtvyWPZD2klpYSERi+lcG7UxzNV9jsxfOwXPzrWpu5qg2gw5Xmrx/XM1vnW8fFOoEb8032a25XN48cLffdmPkdr2vucygt58lkYguXfrIEobvnasxFt22DaE7ut+Z5Pkvq0DnKv63Lm5P1Xz7C67TF8nKVv8/15cBOzG4lh/noEk43cpm6CBHvv4R/dNsm/rIBUvZnKwyD/70VvYM9pLzZdU/RgvUqmF0vdOVygkPfW1JBAc7cundg8DhSz371zbWjExUODQQpOqF/POvaP82rttv189kKlK5yth82CBUGryGbvEzQhBNgPbh4tM9OdTxdDhYm61+ihStCL7XsBu0vbls7xtl62k8WJt52Zj0t//rpHVANyXiuWmnb/lqwheOxxdanJw1m1kOq4drzkQFEJkgT8A/gZwN/AzQoi7zzvsY0DVGHMb8B+A30nuD4DfBD55kVP/R+DjwO3Jzwde61gd1z/GGGKpySRBTHdt/o/dPcFCzefj/+W5VKjFGNKyrM2DxXQHsx1IWkFMK5Rr5LE7r9Etnw2wWPU5WfaIlWasL4/AlotIbchnBJt6cxyea6wp1zyfF6btAvVtt45SzGVYTHb1shnBntE+Pvf8PPP1gNG+vJ38hR17qW3LO5u+sqpkgaLmSUKlWWrbibRbkax7h7STvRzvL6SKbQv1gK3DF/oa5rIZfu6BnWvKcsb6C2t8rDp0junsgobSMFO7tMT3tabZTha9yk7OzUhRD6zIhh9rauHNkW24EekEOt3ffWOs8EnJk5yre1ecFezeEKiHklO1Fst+RCtSVM/re/3eiQpTVY+SHzFVu3LfvVgbtDFsGSymGZztw0Wem1m9Vvz5c/PsHC7yOx+6g5+6bwtPTlUptSKkJinrE9a3LLnmRFLz5q7S7rsSxUeV9GqFyYK0HklqYUw9lNZ7LlE6HO7NM9aXZ/tQkb2jvSgD1TDi8FKDWvvqB19NL0z/ft3Xq+lqQDOWVMOIJT+k1L7xJfcrfoQv1RofQLCZwnZsA/exvjx7RnpZakUY4N7JAb53psIXX1zi4WMltg8V6YjoTg4WUdrwnr2j3DnejzZwx0Q/W1Nbhwszgp3Nzf3Tdf7Jl44wVfYZ6y+ktguXzAgmGcOOpUnZixnty5HPZtgx3EPFi9Py5B3DPTRDyYHZBj/+hgnyWcFDR5Z54kxlzWv8zA9uoyef5d88fJKvHrLG92P9hXQD5a4tA7xz7yi/85N3AXCucnGhslhpPvv8PE9NVXn42AqHu0Rv7pjoJyvgGyesSrYX2/aReyYH2D3am855fYVcWsIMECXtELeM9JLPZtg8UEitLlqRphpYe5hIGTb15Piz/XMcmGsSKU2oNI0gtlUp/qv/XJeDkHLoMoKOa8d6ZAQfAE4ZY84YYyLgs8CD5x3zIPCnye0vAD8qhBDGmLYx5nFsQJgihNgKDBljnjL2G/pnwIfXYayO65zZkg2megoZPC9e47m1b8cw//Ivj/LtoyucXLQXfWPsbnwuK9i7uX/NLjzY0tGOkMxv/cSdgJ0U/a5d6rYf8If7Z5muhShteOPWfgQ2qwh2IbNnrI9Pf/0kn/ne2YuO+ysHF/nkXxwCYKQ/z9bhHhZrqx/r+3et9hS+P/ERzAi7Y9/xkZLaUO8SePACRRhrGu2AIMmQeKEklrZf8sB0HakM2YxtYB/qyWGMYaEeXjQQvBhbh4pUvDgNKDt0PKI+um8SkciHL14kYNwIoijmudkmkbJS5q2krKcZrv5Ng67g/8RSiydOVi52Ksd1SCdD0unNBZtFiLUt8a6Hipnay38WTy21OTzXpNGVSW/HirInaSZiUJHUtAN7numVNlP1FtVQEkpDJZA0/csvxsIwIlYapW0WopNV3zvWxzPTtTWZj4NzDcb6C+zuqigwxvYTLjSj1H+w4Uvm6xE7k++wFyrOlDzbb6dsv2MtUJTatoe5Hek0QyGThexIb47h3hxnyj7LzdD+3gJFLYo5eZGy+PXmTClIA0GpreKrF8YstSNqvqTiSZqBZLFxfVxTrhaLtcAG6oGidd419shCO+0DHe8vsCXxtxvvzzM5WORUyf6dMsLep4yh4knmG7Ykfu9oHy8ttCjmMtw+0dcVCOZQ2vD1oyvEyRxW79r06ARWY315evN2Cdi+xMZZfyGLENBM+vRXWrakFGCsP089kPzuY+cA2D7cw3dOlpHa8K69Y2zqtZm3jvBLK/kuDhZzxErzyPEVHjtVTs/VodO3uDNp/5iuXrw3+LunKnz+wAL/9tEzfOaJaT710In0sR2bevjg3ROcLnn05IRdJxir4jsxUKDcjvAixYHZBlob3r57hHxGpMHqPZODZIRgrD/PpvMyrEGs0xLeMxWfJ8/VqAeKSBuaV2Dx8nKEYYSGNdY5DsfVZj0Cwe3ATNf/Z5P7LnqMMUYCdWDsMuecvcw5ARBCfFwIsV8IsX9l5fJm4Y7rm2dnbfnMStlHJiWSd20b4EP7JvmxezZzPNn1ayY7bj/15m1sHiiwbbiHTb15zpY9hnty7Ntmd9NjpVHGkBE2GOvJZwilWuNZdGihzVwtpJGcc6CQp9wKCWKFMbYkbddIL+VWxFRpNVOw3Aj53UdO838+fJJ/8eWj6f0jfXkmh4ssNlYnsI//8G4e+cdv51v/6O185L6tAOSELdnyurJ9nc3zv//WHcTK0AwUT5+rEytNIwnY/Ejy9Nkaj01XbJN6IJmvhwz15Hh+pk4zlNzZ5Rn1cmxPJtuF+trJdjjxiNrUm2P7UAGlVzM1G83XjpU4Xmml3lBgRXcipRnvs15XndKcph/x9EyVw+Umc5XrJ6PpuDhhGKGMDQQnBwpM9BfozWcY6y9gTGKtIDVTl/lbvrBQ5+BSnXpkS72lssqbBhuYlDxJM9J860SFp05XeG6uQdmzgVUoNUGsWLqCIKUWrir/LtVDykm2bfNAniDWPNAlKtXJLDS6DMRt+at9fitSlDyZBlAH55t89N4tBLHmyFIbraEeKirJdcqwuuGhjSGXsf1LxhhG+/JpZuhcNbSCGAYagbrAJuZqcK7up12RsTY8cqLKoycqLLWDdOyhMhtidH+1ObPicXLRzhPPz9bT4OB8q6CFVkCQ9Jc+OVXjrw/Z9cu7bx1FCMFMEgBpA315W8IYa6uUnRGwa6SHg/NN3jg5QD6b4Y1bB3nHnk3cOtbHoyfL/OGT03zlsPVYrQerBusd/vT7M/z2108isO0EHc/ecjvidMnjUw8dp+rHjPTmKbUjjiy1aEeKN261gmnd5vG3T/Sx1Az502dmeePWQW4d611j8m6MSf0yB4o5FuoB2pC2SnQUrHtymdQoviefZctggRfnG2lA281XjyynwTOsNZUH0g2XTT359DvVn8/ajKqBvz68zKmShx9rNg8WedP2ofT7U/Zi6oFkpDfPbD24QCzGGMNMLeANm/u5b9tgokqq11jBvJJe5g5ebPAidUEVk8NxNVmPQPBivXvnF0hfyTGv6nhjzB8aY+43xtw/MTFxsUMcryOOLrUThdBE9MXA371/O//ywTfQ8CULycQxV/URwOnlFvmMYOdoL0O9OYJYM7Pc5t99xFYnS2XQGnKZDL2FLA/eO0ks9f/P3nmHyXWWZ/93prftvWhXq24VS+42RhhsYzAtNEMCBmITSgikkQTSKPk+QiAJXwoEkhBCIGA62BiDkW0M7pYlq9ddabW9zE6f08v3x3vOmTO7K9s4Nshk7+vSda12Z2dnzpxz3vd5nrug6iZ3HZtnpqRyzI1mMN2Jw93H593fFfpDw3Loboxj2g5TedUtxix+/+sH+K8Hx7jl0Ym699CcitLdlGCmWL/hEjoFifZMjGhYIvQBw+8AACAASURBVCyJTqXXzf/A1UP+Yy9f3UxrSnR298+KosdbjCwHDsyWmHajLTxntbmyxqfuGaE9HeOajeJasGyHrz4yviSOwoMXKTG56LV6Yv6iarK6NYVlC5v7XzaOTJUYK6kUVQvNCFBkLdGlnS0ZlFTLn+buGS8xXxVZbIdmV3QX5zIeOZ3n+0eymJZDtmJwJq/SmIjQlIj4C5XliGJo/knojXNVnbmqcAAuKSYlRdAuC6qgUIIb0K1onMxXmVN0t7AS0zbZsMlWn3wjN5FX/ELwJ8ML3HZwlksHmnyKujfVgOULQceBG87vYedQi28C5Wl7f3BknrWuvhjqzag86JbI+cy7BlOybuE4gh4Y1Irpps0a19Qirz77xVde1XHcJduyHeZUEXuguJRWj+lRepqukOcyDk6XODBTYteReRH14H6ui10OipqJYTl1rJc/3DnIJaua+OQ9p5gqaQy5hkORRXrvC/saKSomRdX0C7OGeISbL+snFQtTcinRnsN0UVnaHJR1i91nCqRiYX58LMt7XEbLO752kD++9SiHpiv84LAotmZLOrtOLJCJhdneK0zHglO8D1y9hpPzwujovTsHkSSpThdb1ix/IpiJh/0CcKakYlh2rRCM1m9JX7a5kyMzFW47NFs31SwqBmdyCtdt6uBF68VMoTVdT4n1JqRdDXF/85iKhf0174hrnCMB//7IBIYljrNq2nzPLaBjYYkvPDJJNaBHT8UiXD7QgmwI+vY697rSLAfddciVwNf3/jyoauLec640XVfwvwPPRCE4AawK/L8fmDrbYyRJigBNwBNxtSbc53mi51zBryCmChqVAAXRcRxesFEE1x5yi5mGRIRTc1Xe/cIhHhjOcTors74zU+e0FQ5JxCMhN0PI8TOXLh/y7LhtfnQyy389NslEUaVY1ZE1ExyHafd5HEAzTMIhiQ0dGf97YwsKf/PDE5ycrYn/f/eaNWxyoyWaU1G6GxMsVPUllEsQBWF/c9LPKTIsm6Ksc2KuwuvP7yIVkbjz6DxNyajIBKsadQHBtu0wWzGwHNADi83ogoysW/zF9et9DeXhqRJ//+Nhbv7iXn8hDsJzX51aFBjfEI8QkkTelNdZlQ37l27ucGimTEUXU5GgIN/LZCyqItOtoovXOl0RtNqCavqf6wrOTZxcqDIr66gB2rZiWMTCEodmKuxc3cKVq5vFpPxJuu2qS9cMugWqRr0TqeNOyHKKQcWdBHobMNmwyT2F7MzZiuZTVj3cdGmfX0T2BgpBCdfGftFEsKAYfui2+B7EIxJlzeQb+2b8az8ZqZ94REISipsx6BWR3nyxMREhFZiQGJbD7vESqmGjmPYzGp+xGEVZp6rbWO4k1nREgHlJFf/mqyYFVeR/KsvcH5/ryGs6ZcNgShY5p7Ih7leKWT8tqhqWMBoK1AvxSJiRrMwJl757yYCgFt96aM7XWr5uWxev2drFmFtMDbYIzVveNWb5z4fH+cpjYrt026FZvrx7gqJq0JiM8pkbtnBBf2Pd+eoZu5Q1a0mzwbAcuhvijOaF2c3LN3f4E7sWVw978SrBvslWdUISPj26FNDnzpY1vzCUgK89NgmIc30ir/iNx+SiEPlXbu2ipzHOVx6b4qavHkA3bU4vyJyYFxPXDR1p3rtzkGs3tNddVyCmjJGQRCwU4sYLBAsnHQvT0xhHYmnz884TWa4YbEYLZoK6H5l3XLzmimrZSMD69pSvCzYth0go5LOQyurPf41VA/c++X+BkdIKzg08E4XgbmC9JElDkiTFgF8Hblv0mNuAt7lfvx64x3GWaW+6cBxnGihLknS56xb6VuDWZ+C1ruAcR26RQ5jjwOfuGWE8J3NoskRIgtdc3MfwbIXnrW31H7euM8MNF9fYw3nZoDkZFY6fDnRkxM26NXDTLsgWZ/IqE6VaDEE0JHHlmlYiIQnHAd2wef6aVoIN2eG5CncfnefVF/T631vfleFzb9nB5992AfFImME2UTydWcYyHGB9Z5qSYgpzHDez6D8enqA9HWWioPKvD44RkWrHIEgrsh3RzS0rpk+Z+ehL1zM8X+U3Lu5jeyDsfiFQVM8vQwlrTERpiEeYLNRTQ8MhicZEhKJi+pMMxbA5NLP0/ZRkHfVp0GCeDrKKTkER2Y7xcIi1bUkSkRBtqRjNiQgpt6NsWg6PT1QoKMJmXzMdCssUwis4d5BXDRTD8rO7LNvhwGTJ18cdmCqzrl1MNcr6UurURE7m9JxMsaoh6xYlrZ4CvphtZTtig2e52W1BF1rLEbqoJ0NB0bFsp84QZTSnMFUU00wvJgIEDe51X9jLfEUjFpYIS+I6W6gadVETjuOwujXFjr5G9kwIw5mOTIyCYvDOy2r90Y50lCDbcH17yqfANScjdUWWt9wW3cJkobL89TqRU7j3eI6qpqPrBkenKz93zMNYTlAeFcOmpFioriGKbjn+69XdZo38LBaChmFSqBrousFDIwWfTfJsoiTrlDWLvCJy5hRD6DcNyyGvWH6zsqIJcxHDduoojZNFlT3jJSIhiQ9cPcRVgTXOowteuqpJXBtTJcIhid7GOH979wj/cv8ZdNPm++40y8N3D8yiGDZNiQg9jQn+6Oo13Hx5rXcfPM9nF60RZ/IKLW6RFgtLXLO+pujpbojz3ucPcJPrQr1QFWuuN9F+2ebO2vOWNL8R+dDpHIcCxi43f2U/DfEIGzvT/M7OwSXHdDBwbew6nuX93zvKlx6dJCTB2vYUkiTRlIxQUs26QjYckuhqiDFd0gi77qGpWJh4JER7YHoY3Ik2xKNopsPF/Y1sd51bASSXpLa6JUlRMTg6V6W3KU46FvH3FJYjGDQ5Reh3i09jqlcNfBZBv4AVrODZxP+4EHQ1f+8F7gSOAt9wHOewJEl/JUnSq9yH/QfQJknSMPCHgB8xIUnSKPAp4DclSZoIOI7+NvB5YBgYAX74P32tKzi3kZd1xhfcMPTVQltzXleGf/vJaT75g+McmiiytjPDe65ZQ0Miwqd3jfi/u74rzWVrWvl/b9wGwGRBYagt4RaCDkNuxlGr2620bJuqZiJrFrJrMvLBa9cwX9HZ1JUhHQ/7sRFr2lPkAzSxHx2eRTEsLlld0/+s78zQkIhygWsKs6ZDbFhH5pd3H9zYmUE2rCVF3l+77ykkwe4zBQzTpiCbdRNBoVu0MVxziVQs7FtsX7a6ZdExrb3uhbOYvfQ2xZlyNYLB/kxTMsqu41k+9uNhDMtGNx2OZ+st0DVN5/tH57jjSHbZ534mMbEgk1csHOC6DW2ohsVQa4q17SksS2gtN3eLDCrLhoPzZd+IB0QG1wrOTWiasF2v6II6mIyG2OJO2EuqyZWrmxkvqCTdSXdwaujhkbECD0/k2T1WoqLXNuEegqHvHj1xvirMYwqqSUiCLV0ZtnaJa7fyFGiL4vXWaxlGsjITBdU3vfjsG7byss012cKh6TJNySgNiQjpWJh7R3LsOp6l021WOUA8LHHpQK2hs607g245PkXQtBz63Of38OL1bbzSNaJqTES4dkM723sbuHSgibAkcf2mdkA0dE6fxYlx32SRM+UqR6ar7J+ocnCmxOmF5R97NsxVNHSrdl+LhkK8aF0bb790FeFF/Ej1WdRCHZ6uct/IAveczDNarPL4xFOPHXm6mCho6O6Uzzt/vHy9kASPjBY4NFFmvqpjWUIW0ByvUXjH8ip7Jops7c6wviNNPBIi4xaKO3obufHCXiRJ4pbHp3n4TJFoSCIUkhjLK5xekDk8c3b6u+cEnYyG/dcE9cXHsdn69erQdJkfHRVSiU2dGd/N28OOvkafbZN1A9w93HhJH1+6UWToTpc0ypq4xjxDlt95wWrOc6/xkWyVj79yE5u7l+bz9gem6t/cNw2IgnlNW4qEO0H0MjkXu592NcSZq2h+JIQ3JQ/SZNe2JXnLhb0koyFuPSKK6M1dGd58Ya8fM+GdtgMtCSxX27jNfa1NiahwAHeEwziIa7j6FBuPZcXgoZE8R6crdRrD3P/AeXQFK/h58IzkCDqOc4fjOBscx1nrOM7H3O99yHGc29yvVcdxbnAcZ53jOJc6jnMq8LurHcdpdRwn4zhOv+M4R9zvP+Y4zlb3Od/7RBPEFfxq4Lf/ez8Vl4411J4mFgn5geqHJkrcf2KBKze00ZaJc/5AM9MFhXddtRqAQZda5bll/uYX9nJkvOR2/B2mcyr/9rPT7D6dJ+wWCpphUVJqjpmGuylZ256iMxP3u4Ft6Rg52bObDvPAsGA1b1/VxAev38DajjTti4T4g20pwiGJU2crBLvEAmjatv93zu+tLYKf/43zaU3HkF2jAdt1UkxEQthObSNsOw7NyYivBVlsHZ4PaKnOVgj2NSeZLKh86/Epbvyvx8nLOhN5xbesB1E4C5ON+knCA6NFZio64+Vnv9t+IltFNW1iYYnVLUl0y2GwNcnGzjRTJY2cbLChI00sHHJDgk1/uhQJSaiWjaqu0EPPRVQMh7yrzbMdQU2+ZFUtPqHFjVz567tOicgQy6Ys13+Wc7LOvKwzXq4VLotNHkBMGgtVkwHPldP9mzdf0s/bLu5jgzt1VJ7CJKzqGkoFMzsnCgoj2Srr3edJRsN0NdSKtknX2KkhHqEhFsZx4PuH57lysIUbL+jBcaC7Mc75PbX7wfY+8XW2atDbEKekmDx0uug3bl66sZ0B1/Ie4PBUmRNzFd63c5Cexji65XBpfxMSYvo5Ly9/L8hpBhXNIlvVmZNVSrrxc1Oq86qJato+y6KiW0wUNH54bJ65RbpLzbSflqnGU8FUWSWnG8wqGlXDJPcL0EbOBxgt6ViYjR1pn+bpADld53i2zKkFBdUSjcZwSKIpEaEzE+PukwvkFZOLAuf+371qI52ZGIZl++fEvklR8L14Yxu5qoFuOeRkgz1n0YIDdVPnYIPQCDRLvu/GOQQxUVB5/lALb72kd8nPgliQ9bp1MCRJZOIRuhvinFqQqWgW6ViYomLSno7x+gt6+dtXi77/yBM42QZNaYL0z5ecV2uueFP3d339YN1jOjMxslXDL8rTbuH45ot6ee/zB+hqiGHaDtt6Gtg51ELOPS6dmbj73sW9pCEe4bXbumhP1d6f91mEQxJtqSiDLUmu31Sbgi7ODl0MXTc4PFFh71iJmYrKWF5BM2xU03aN4lYKwRX8YvCMFIIrWMEzgdEF2adpbOvL0BKgb0zkFNLxMO+5Zi0gNob5qs47dq5m91+8kGg4hGnZ9LXUuocTeRXbdjBNm4dHcvzrT0f58+8eoSUdY1Vzgpe6C4lhCfOYr+0Ruoq1HWna0jF/E9majpJzC6rtrh18X0uCrsYEN1zcxzfefSnSok53NBxisDXJyNzyheCQW7jatoPpCszf94LV7u9KNCejbOtt8KcY3pQkHQ/j4PjHyXFER7KgGEgIR7Ygggt+9qyFYILZssYtj00yVVR57b8/xlu+9DgX9TfxwWvF8Y6GRHG1WMR+Ki+Td00LlsPhyQpjZ6HHPhWMZmWqrlZiQRYW+9jwN3efJh4JMdiSoC1Ve8/rO9KkYiGRO6fZVA0bw7QpKSamZTNdWgmaPxcxXdT8hojjNjeCG7rmZNQPbq5qQnc2Waydz4qq+VPyomqKCIVEpG5SJyEcdj3KVbZq0JasnTtr21IcmCrz+Ucm0U27Tiu0HIpVzTU/EdOeVCxMb1Och0cL6JbD5u4Gfja8wJ/ddpSGeL32qSkhKKORkMQal0b+7QOz/NN9Y4Cr0Q1JvOPyfs7vbWBTZ4ZEJMTnHhz34x9EDIZ4vheuacWwHKaKGmEJvrJnik/efQrVsGq6JtOmLR3FsB0/X/GRUwWOT9fuUSVNFHF51aCgmhTV+rxFXTe452iWn53IcWRyedqobAhn13i4lgV7/2iex6eWTqt0WxgDPRso6yZFxaSiWRRVi/IvwJgmr4r33p2J867LBzg4U/WLEMt2KGsmZcMkq2iopk00JFHWLHqb4qxtT1FUTSIhie29tUIwEg65kQcGOVlntiwyVF+/vYtf29rFVMCd+sBUeYmxzCu2dPKR69ezvqPW2PPWhY2d6SW05pdv6SQeCfH2y1dxxepmUrEwv3lpn99kPDFXrfsdENdstqLXGch4WN+Z5uR8lYpmkolHKCgGzS6dMh2P0NuU4OR8ZcnvebhmQzs3XtxHxr2Gbrqsn8tXN7NzTY026xWCqmFz30jNfqIzIwq9CZcWnHQngtFwiB19jfQ1JfyC7crVLSQiIcKSiNY4tSAz4k7Dq7rF5QPNrG1L8fyhFt50QU/d9LOnIc5sRSfrNliEWcwTn2+nshqnchXmFY2SYVLUdFRLnKs597xdwQp+EVgpBFdwDsHxC58NHcklFsrXn99Ni3vzbUlHKcgGkiTCmB87nee8D/6YY1NlMvEIDe7C4FFDPXiB8U2JCL91xSr+6Oo12I6Dolscmi7zvKEWOjIxf6ECMRFcqOg0JiL0u8YpV659ovQTge2rmnhgWNjTL4anVbCdWuByczLK53/jfD57w1ZAaDAcalmJLakomVjELwL/6qXriIYk+poSFBWDxmSkbjIBYsHva04QDUv+RPD7+6f51I9P+o/pbUpgO/jdUA+jOZlLB5vZ3J3BcSerumVTVTUUTef7B2dZUAxf77PYSKZQ1XhwfIG7RrIcWWYT+GTIVzXuPZXlruPi+OVVYRAzUxZ/52XntfO73z7MJ+46xU2X9vGuK1axqjlBW0oU8V5GV9nNGdRNmCrXXmOhorNvrLTs317BLxYLsoHtQGc6SjIiLN6LAWqUZtp8/OUbuGF7N5bb3JkJfJYzZQPZsClrNY3hquY416wXdMgNHSkM06kzV1IM29f3REJw98kFPxNN0e06OvZyOL0gHEO9e0x3Q5yOdMy/jjZ3Z/jwD47z0On8Espag3uPquoWf3btWl65paMuhDrlTi4uG2zmd3cOiqmRW7TW6x7Fvevzj0zwu985wr6pEo2BZtADp/P+Br6gmKxuSWJYDlXDYr6gc6pQ5dCcmCKdydYacVXDQnY3skG34JJmM1lVOFOqcmiuxOPjS69rxY1E8GMAIiGuGGzmt69YteSxpuUwUXx2pvQVXTjFevRT/Uk+z2cCsiGMcPbPVPjAHSfIK4ZvxmI7oJkiz1E2bUzLoTMdZbqk0duYYMClQG7qTNcZ/YAoTLJVnT/5/gn+/A5x7/YMhqYCx2+ioDLUluTXtnVxsUstfvPFfX6ckoe8bGDbjlscOr7hSywscfNl/dzytgt4+ZZOBluTyLrlG3ON5mQ++P1jfMU1e/GfTxHsi450PTMGhKFLTjY4NlsRDVxXv+9hXUeK4SeYCMYiIV67vZsNHWlWNSd4xZZO/uSatb4hGgitu4cHTuf8Nd9rHh2eKdOSjCwpkjOxsF+oJ6NhXn5eB5cNNBMOSTw0WiAeCfGKzR1ops10SUM2LF61uZMdi45nV0Ocharhu6FK0pPTnuerGjndoGoKVoFh2wQZ75X/YSbhCp4dHJ2qcGBc7BseHy3x0Ej+l/yK/udYKQRXcM6gKhuo7hSgp63R1+W9/hJhAvNK1/kLoDkdoxowg/js3UJbd2SyxI/+4Hl85z2XAfhmMel4mH/49W3YDoSB8bzCN/dOMdSa5BWuqP3y1S38xUs3IEmSH6gOYiOz68g8W/safYvqiwP6wLPhD168jtZMlFtdXcNitKSiOI54fQ3xMOGQRGsq6usYPfc1w42xaEvHaElFRSbiqkZuPTiLatq0pqMUFdPP/gsiL+u0pGO0ZWJk3anmR247xlcemfDNY/oC4fM3BI6xZyCzqjkpDB8soU0cmVfZM15hJF+l4uorQxLMLursn15QhHheNjg899QLwdmCyh2H5nhotMCCYjJWEl1ZxbDRDBtJgk++ciPXbmj3Hfcu6m/0HfY6MrFamHVg86dbtn8MAO4ezrJnusBE/ufTQK3gmUdRNXCAVDRERbdoSkYoqgbNSeFe600wPHqbZjnkA4Hvs2Wdil6zXld0ixNzMgenynSko1yzvm1JIZBXDN9iPhYK8Y19M/7PhAvtE3fkZzzHUMQkorsxTodLKRtoSdTFBfQ1JYiEJN56aR+DrUnO72ukJRVlwX1fGzrqrf0XFwIAv7a1q+7/yWgIzbBQdJtHx4pYjjgO4PjmVtMlzZ8I/nQkR0c6hgPIps2+6SJFzSCvGhyerLB7QkwyS+5US3MpasHCc6akUtRMyrpFWTc5U1y6gVcW6TcvG2jiNVu7GGpN8ZfXrOXGC3oYaEogIei6C89SluDi3L7lMqmeaSy3+f+jq4ZoSYoGXmc6huM4VFQTyxEO2Jpp09sY55KBJrZ0Z7jx4qUUzLZ01M/hA9xJsigEx/IKiUjIL3JaUzHedmk/f3rtWr5504XEIyEKssGdR+f8Aikn61QqOt98dALbgbdc0s/vv3CIz75hWx27xSuwvOn8vNtMvPtEvSb8sGv+4kkegvD0eNmqoO4XFMN3HAXBjpkuqmhPMkF77wtW8+HrNyxh3wC++UtHJsax2Sr/8NNRFN3ysxPzikl3Y3zJ72XiESqa6R+XywaaebV7nc1VdHoa4n5W4V/+8CR/dNtx36DNcRwOTpfRTJset2n7mGvu5DhPHgpf8SQpls2Cm2tqBI6BspIleM5hoWxwaqHKWEFhtqQxXpKZrWgUniTO6FxH5MkfsoIV/GKgB8w8gplAH3r1Zt75ojUMBTZL3kJSqOp0Nsa5312YHERnLxEJkYqFMR1RDLY2xtne30Q4JFGUDeYqOp+7/wy7zxR44QYxNXjnlQM+dSS4UD16Kk9RMXj3C4dY25FmsDXF1ZuePLMyHY+wuaeRkbnlaS+tbncUakVfEB7NpuIuwu3pGJGwxKmFMJ3pGP/tdmVbklF307xMIVg16GqMi5DcRY6s9xyb542X9NPv0mmbkxFuumKA4XmZxyeKjLqh3f3NCXTLIeVqE08uVFFN15XREhOJSEgEIA+01T6jyZJK1qXhpaNP/Ua5Z6LIqYL420XVJOk6gWqW2JgOtiRpTUXr9DATBZV1HWm+tW+agqz7k1TLFpviRCTk0gZrxep0RUOzHGZKqj/pXcEvB0XVQNEt9k6Ia2VkXub+Uzk/R63g0hMH3HPVsh1yAepoTjH84l+3HFTDZkrXAZ3zexroblh6fTkOfO/gPMlYCM299zQnIwy2JNk/VUazHB4YzrO9L00mufT38y7d2LYdZMOiuzFO3J2Cbelu4KHTtU5xSJL4xk0XAvDqbd2ohsWR6TKyLorWNYH4CMOyeeRM3m9seBD5hGH+/t5RQMQGHAtQz1tTUXKygWHZdDXE0UybvGz4k8QHRwscmqkQDovreK6q+QZKh+aKVA2LgmKiWTUKeEG1SMfEe5ouKIznNdeAx3ELvqVTNo9S6xWEQYpbLBLin+4boy0VxcbBtDgrrfx/Ct0SrqSRkEResWhNRjBNi0hkaZH9TMErmtvTUd50QS+tySjJaAjDtDFsm0sGmhgvqOybEgVDyC1Pe5viNCej/IGreQ8+n207rGquvz/dsL3bZ38cni6zqSvDQlVnvKD6U25JEs60AF/ZPcG39k2TjoV5/to2su5aYFgOCTfS5AUBh1IPtexLg9ZU1JcXlDWLR0YLbO7J0BCPcHCqTDoW9s/jmZLKw6fzvHJbN2sCxjQbOtPcstuoa7QOtKZwgPG8uIefDcutbx6aklG++tYdxCMhvrx7ku8dnGWoNcmrtnWRioWRdYvexsSS38vEw77JzGJZRbaqs6o5seTvPni6wFXrWtkzUeJzD44DsKOvwW++bGhPcWxeXlLYHpuqIEkSazsSRCJhFN2iqJh+xI1u2qiR2h5ItSxU3SARO/v7fi6jKJukohCNPnfKkOH5CnlX03xgokxBFw3MMwsKzcvs4Z4rWJkIruCcQEkx6jromUSEf3rLDt599RqSsXBdEQj4F12+apCr6v5kqOAWVpIk0dOUYF17ig2daVpTURqTUa7Z1MHYgszbLu3nstXN7Bkv8vfuNLEnMBm7Yqi2KI7MV0nHwmzuaSARDfOSrV3LdiWXw7rONGcWlCU0V4CWVMx/z8sWgqn671mWza4jcxQVg9sP10T9LSl3IphcekPNyzotqSjtmThzJQ3NtPzO+P/bNczjYwVaUjH+5Y3buOWmi0hGw3zqdVt42ZZORl1tnz81CUtYtsN4SWVONqjognqp6DZl1WJuUVcsWzVci35B6yzJT40ClnVzyKpuRlpesdh7poBiiL+31XWaOzpTK7DHCyrTJZWv7pni/lN5DMum4lJCOzNxOjOCLlrUTZ/CKhs2Vd3yjYBW8MuDbNr+NWLbDvefEnTgVCxMSzLKyfkqqmGRjIZpSkSwbLF59aba3kQxGKLu4acns7zv20fqJkLPW93MC92Nr6KL8+DmS/v4u1dtYuca4bxrmA7DhQoPn1lqwCFrOmXdwLDF+e04gsrtUdE2d2e469i8//jf+9Yhdrn/tx2HG/9rL996fArbdshWdeKREFevF6+nohrcc2KBGdek5bP3n+HDd5zAsh16A/eo4CQf4G2X9PKmC3tIRsUxa0lFmS6pVFXTn5iUVFNMO21xLXgo6SKU3ivrLMfx6YDelOux8RKzsmAJrGlJEglJwp01QAnXNB3dsnGc2iQxSIv14gkWZIO8bGI6zrJZgnMljZni0zegMgx3aqnZ5BXx/GXNfMpOjk8X3hTo5Zs66G9KkIqFOTJbYSyvIWs2X9g9RSoWrq1XiinMr1qXNqLO5BR+478e52O7huuMuz7+8g1cs6HN/X1BR9zSk6HPpZYGm5iL8R2XnTIToJM6DstmzEKtEPSMSzxGRTIa4hN3j/Dfu0Uz8shsmc3dGcIhiYpm8hv/uZd/vPc0+ydLhCSJze49e7AliWratKSi/Nl3DvOdvVP+ez+TkykqBh+54zi3PDZx1vdw4B9BtQAAIABJREFUNiSiYSRJ4q2X9tOcjDBZVAlJkk+57czEyFV1PvLDE35B600MZ8v1a4BiWCxUDdrTsTotIODTP+8+ueB/7+hslb+4Zi0ffvE6Btyi3WMgmKbFZF7h1EKV4WyFveNlDMOkYph1OacOonnhwbBsZn9FNe2Fqs6jowX2T55dG3ouIqcYWI6D5TjkNd2/X85Xn9uf00ohuIJzAoeninV5PpIkcf353bz/+g3LPt6bGBZknbFsjZ5UDOjc+poTwvTBsH1t4Uu2dlLVLC5c1cSHr9/IVetqWj/PijpX1fmnu0Z400V9vPGiXkbmq6ztTD/l4i+ItR1pLMfh9gOCdjZX0vj7H59EdxdDj7401JpgsTFu6yLh/fcOzJCTDXTTrtMT3Te8wOkF2bcHBzEN+8buCebLOv0tSYbaU5zJKYxmZRzgj1+ynkw8wi2PigX3vO4G//2D0A0WFBPVnXSA6J4KwwPLpymtdRdxSYK5ql6nEywHNpqKafPomaemx8upBguKyUIgR+mx6aI/YfBodMPZKqtbxYZ0LK+w61iNrmRaNY1XWzpKV0Mc2xa6oUddXWBBNano9opN9zkAxRDZkGvbkvQ01jZeUyWNZCzMaE7hy+4EvDUVxbJFgXJwUnyWnhHIJX0NPt36wv5Gehvj/mLtZUz+yYuGuPmyft58UQ87Ak693gTOKyZN2yFbNZhfhvazf6JCRbew7JquOREJsaY9xbUb2tncleGxsQLXBZwN//pOoe06nZVZcDcOjuP4m+s3XdjLP77mPD+g+kF3orjreJaD02XuOpGlMR6mPR3lDTu6fcoawGWDTWzoSHP1+jbKmklzShSCx2arvO87R/jwdWv55Cs3uK8zjOHSvKu6hW7ZlFSLaFjypyINsYhfrBiWwz3HsmRVDdkUOkzDdoiGRME4Ww5E1MhCxxuM6ggWgjOLNtymJTSFI7NVFE08j6rpPDya56HRAntOF5Yc+6eCakDXVoPE/LNkTOOhNhGsfTZecLl3e7/18DzxSBjHcZgoqmzqyvh6yiDuHRaFxpGZCrFAjERHwJnzoKu93tbTSH/T2QtBr9B7fKLEu27Zz7HZ2gbcNO2zOlR6haA3tc1WdLoaYvz1KzaJ91ZQUQ2L6aLGWnfyd2+gQFpwz+0/vXYtH3rJej58x3FAaPPuPDzHx35wnP7mJCFJFL5feGiMn55c4N8eGPPdvJ8OuhvjvtvtFa6M4+hsmd/62kEOTJV5wL22VrlF4mcfHOOAeyyHszLv+85RTNuhLR1dcjxzskFBMTg5L/PqbZ28fnsXmmkTkiSS0bDvT6BbNoWqwcHJKvsmSuR0g5ymM1VSOZXV/CJw51ArzYkItlOvYVYt279P/KphIq+S13S/2fVcgWoKOZJiWCiWhWJYlFSTivHc/pxWCsEVnBP4waFZf/MQegr1VrM7LcvLRl1oe7AQ7GlOMF1QyVV1f+LW5dJDFio6yViYj7x8I9GwxGrXuc92HD747cPcc2yepkSEd105yPBclXWdS7UPTwWb3Y3mx35wnNmSymfvPcVXH5ng3uPzviW+4zj8+wNjfOqeUziOwyd2DXP/SI5koDALFone15981SZetL6V213L76CW5z8fOMMnfnSSnevbePPlq1jXmcGyHe53F+mtfY1cv62Ln53IUlaX3sR86q1i0JGJ+fRK4cYptFjxsMTeCVf754hA8AMBB8KqYVHVLCKSoL0spydaDnJAY2S5x6esWb42oyUVxXYchrMyGzrTDLWlOD5XYf9kiW09DcTCUl1TwbQdOhuENkozbc4UFfe5xc+fSl7cCp49TOVlFDdTc6AlyaA7bQLoysR43fZuAA5M1sLVHUcElE9WNObyqjA30Sz2TJS5xm3ubOhI8dGXrvP/jqdVas/UqHPepropEfE3415DxTs/qstsSKcrKrIhqMeeu/Bf3n6Md9+yn/fsHPTNby7ob/Tva97EPmjKZDv1br7pWNif9H1r/zQj2ap/LI7NCGrZ37xiI9dtbK+LpHjH5av811+QDTERDDSGDkxVaElGaXYNM3Q3g7Sii4lZd0OcV5zX6b+2qbLuX4em7TBRUSgqJlVXh/nYZJnRgo5q2L5uDGC+bGDZYhqSjIbY1Jnm2FyVP/3BCT7yo2H+7aHxuuNoOw6yYfP4dJGfDudRdIPDMzJ53aBkGIwWZeaLGtVl7lFPhNmyvsTZEkRH/9mEd+vyQ8Ztx5+CShK8ZmsXvY1xpkoali0mgtt6Mvzjvaf57P1n6p7rdGBdk3WL//Oy9Xz85fWN0UfOFGhORljbnvIngq2pKCfmKrzoHx/0r5mge/SJuWrd/dG2nSedCPoawarIChxsTbJzTQsLss6ZvIIDrG4VheCdR+Z8WYP3d9PxCOf3NXDSpTIH6ZaxSIjuxgTjeYXHJ0q+PvbDPzjOd/cvr69/MvQ0JvxC8MqhFv7q+nXcc6JWoHpT6FZ/nTN9o6iv7p3yH9eWitUZzIRDEjnZ4KhbSJ/f0+Ab5HgNnbT7+nXbYc9YkTNFmZyuYzlCT1wyDGbKir+eXbyqica4aLwYQWM70/mVNYzJygaKZVF8jhVQmiUaYVWXDVXVbXTLIbGMpvu5hJVCcAXnBPa5LkybBprY/7EXP+njvWiJfFXnTFYmJMG6rgxFJTgRTFLVLfKy4T/eyzlaCHT5b33npfzLG88H4O/uPMkeN7vQMG0mCypFxWDtE2gXngirWlN86JWie3p8puJ32Y9NV/xiy9uw3H5olvtGcvzoyBx/efsxRhdkvnHThf4kzkMmHuGqda2s60jzYlffCHChG20xnpP53L2jXLe5k0+9cRuJaJh17ib4LjcceLAtySWrWzAsh7FlwqW915aTDaLhEO3pmLvpFW6AhuUw7dr3b+pMiwBd3WbMzV06OVPxzV3mygaa6ZBTTMafQpSEN8lzHIeibFLVLLJVwz8GTYkIY3kFWbdY155ma0+GY7NVTucUNndnaE5G/c15ezrKG3d0+x36qm5TUE3yAb2kR30zDJNdR+f5zv5pDk48826iBVnnJ8cX/DiMFQgcn6+iup3wtpSIQulpjPPHVw9x18EZ/uqO49ywo5vJosau41k60lFsB7cba3BgpuyfayMLCnvce0lzMlqnT7tyqIUPXD1Ea4By7W0EE0EHQtem3vIaEPZSWndRq02+RIOkpotzHIcx14Dom49OUqnobiRGrblSg5g6BlHRLQZbk0Qkif96dNKfbHnXlofldI+aaSMbNs2pSN0kY/dYAUmSGGpN+eHawYbL+T2N/OvDgh2wOOrCdFkAlkvzBrh8oIl4WKKqi6gJD3nFEJMNw+b83gZ/kjRf0ZlYhupp2yJUvWKYzCkaj46WmSgqFBRhMlXQDB48k+fBU8tPBg9NlJelkM6WdRabhDosPvbPLBRNFJ+OS1sG+NJjk/zUdRVsTkS4qK+RKwbFhEo3bSTgwv4mfjqSY9fxGqNhz3iRA1Nlf4o9X9FpiEfqpoGGZbN3osglrsvlRauaeOl5HWzqyvDoqDheD7gU67xiMBignwYbi6IQXL4Z1hCvFYKO4zBXrmUFtmdizFd07j4uCqzBVpFHe2i6zGu39xANS9x5dI5Hz4j3X9VqQfJBR17TtulqiHFirsJ4XuHGS/q5dLCZR88U+Kd7Tz8tOm9PY5y8bPgFX29jwr/WAWZLXnFe33UeL6hMFFT/OA+01NOv17WnyMk6h6YrNMTD9Dcn/Md6zIEfHs2iuhPprKpR1A3XMdYmGQ1jOQ5FzcRyHMKSKEKPZWVko96p2LCh+itaCFZ0E9MSTbTdp4qMLagcnqigPkuZos8Ugk3BfIBJtJy057mElUJwBecE5t3F/LyeDMcmi+w6MM1bP/3gWekhzW6nbvepPKNZmd7mJB0NsbqJYG9z7SbuOXE2p6JI1IerJ2Nhf/p238kFrlzX6joV6vzbz0aJhUNcFSi4ngglxaCwSHPmGcsMz1WZdTOfHjuT9xemGy/u8x/74R8c97/+4K1HKMgGO/oa64Kxn7+mhd+7agiAA67pwC03XchLXffTw1NlLMfh7TsHCbkL3er2FJGQxInZCp0NcRoSUTrciYKnswrCmyZ4msuexri/uXnBUAsXuvbZb9jRzZVDYmMjG7avE9w7VfRD70E4+FV0i72TT1xg5SoiX0vWLGTXyEI3HSwHTFtMi+86Ps8ffvcokZDExQNNbAkEb2/uztAUKAR//6rVtKdj/nlk2sK+/dBMBdN2UHXLz3vaP1VhpFBlsqxyYuGZ1y7cP5LjZL7Cw6fPHvr8vxHZquEbjATt5Y9OlMjnVY6dLvgBz5+9/wyjbuPCsKFq2MxUVAyrFmB+2v15YyLCXOA6z1Z11nekUQ2Lj/7wBKcX5Br9OrAfjIRDxMISti3O6fwi6vCx6QqaKYKfQ5JEPCLVFZLzFZ3xnAKOwz7X0Mhx8K/FYLMqGQ0zXlDqNuZVzWKgOcEVQy0cct0Ye5viTBbUuobQchRAbwLTkozWuWZ6OWoDLQmKitjUBwvBL++dIh4J8dqtnbzrslVct6FGmbcd0SyxnVoUwwV9jXRlYlhOfRFR0kWBbDkIJ8xVjVy7oY2bL+3jE6/Y4E9Lgs9tOg4LsklJM5kqyxS8IttBTCw1nRl5abGXr+ocnS9zYLLekXh4Rma+qmE7jm90I/5W/Xt+ppGXLbHJV0w+cPsJvvDIBA8EqK29TQlikRAX9YuGnWHZrO9I1W3CFN0iJxt87MfDAFwyIO6tx2YrS6QDY3kV1Su43XPqnc8bYN94kYdHRfFVVAzmKxoF2ahz9PSeant/I47t8N390zzs0iWPzpS5f2SBt37pcebKGq2pKPMVneGsTE422OQ+T3s6hu3AXSeyxMIiVP0/Hx5DAl68qYPWVIxTWZkPfO8oUJsqlhSDP/7mIf+1TBc0OhrifgzG9v5GPvryjVy7Uay5Z5ZpVD4Z+t21/ycBmqpu2bz0vA629mSYCax577lygOs2thEOSXz98WlsB35tSyf/9oYtS4xi1rWnkA3h0rutp4GQJPlNxmzFwLQdjs9VkXVb0Nctcd2komHKmsVYQRWTdcMUofGaxT8/cIay64AevL5tx1mWjfCrgIpuUlBNyprFZEXh6EyZ0UKVAxNV5kpPXxv8bGAyp7J3tIiqG37ms4cXb2gnJLGszvm5hJVCcAXnBGRVXEg3XNjDyz5+L2/7zEPcdXCGM/PLB7LHIiHe+aI13L5vmtv3TTPUkabJnSZ4CBaCQ+1iIhYNh2hOResKQQ9FxWCqoHLhQDMt6Rh52eCnx7Ncv62LnualjmPL4dq/+RmXfPieuu81JCJ0N8U5MVvhjBtQe3K26lPQvPiCV22r2cN/5g3bKKkmf3f3ML+zczUvdYvJdCxct/E6nZVpT8foDjiiec8XdMKMhkOc5xZMazoEhcfr7M6Xlx6LlgD1FmBLTwPTJY3OTJQfHssylldoT0e5bmO7vyG1bYeybqFpOrNV3XUXhJZkhLAkoZoOs9Un1gScXpBRDRvVnfAEYTsOsbDEFx8VWrGXb+mkORlla3cDO9e0cNW6VjZ1ZWhO1rIWW1NRfvfbh/nMfWdwHIcQYlM7lpepqCaybvvaw+PZik/1eDacDGerOmVtqanO/3ZUDRPd/QwaExHfXv6Q2zQwTRvbtHj5lk4y8TB7x4siW9Pd2JcNC9UteoKmGo2JCPOBye9UUeVnIzn+6s6T7J8q8y/3n/EbHl4dWNFMvrx7gmQk5DcTTNuhoornkTWd/dNFSprQB+qmTdjVBnk4PlthLK/UGdc4jsPpBZk/vfVoHRuhuyHGw6MF3n7LAZ9aVtFN0vEIOwMujhf2N6Fbjk/DzLmP/a3L+vnQdWv9x3nh7y2pKAPuBKg1FfW/v6o5geO+J6hNQte0JvnAi4aoaDZ/dsdJ0tGwmBSpwhkVoKzZvstoUyLiT2aDU4uSZvgF6PHZCl98ZIIL+xp53lALbekYf/eqjXzq1zZxxWAzmViYSEgiHg6xuiWJZjqUdIuKLgytrlrTimk75GSTim7VmdKAaKxV3CBuD7pucHC2SF4TU5i+wH3RdkA1nr0Jy4nZim+OBcKlFeBPr1nD+vaUT7+MhCTe/4LVxMIhehrjjAcmvWN5xS/iehrjvHSzuO//64NjPDRaPxX1qKNDbSn+8vZjfHLXMI7j8GffP+bHOdx5dJ43/Mce5soajw7n+PJbL/DpyiEJtvU3YdkOC1WdP71NFGzv+fpB/vL244znFb5/cJbepjhTRZU7jswRj4T8IPfgFOSvXraR3WMF7j6e5cZL++loiPtaOQ+eBGFktr7J9ru37K+b1q1pS5GIhrnpioG697kYlu1w24EZjs6UGZ6v1l3rFw80s723gS89OuHTt2XdIhML09UQ9+m6ILTEb9jRw/NWN/suvL1Ncb9xA/DrF3TTkozQ1yQaUg7CxReEoVU6Fma2rNXRpC3XtVo1HMYKKrGwxIs3tGM5YuqvmjZl16jKe06jrhCsl3v8qqBQ1VHc5qtu2RRd7WRO1ZkqqxyaqmBZNtaTxG/8ojA8X2WipLBvvMLil7Slq4H+puSyNPTnElYKwRX80nFytoxp2oRCEhcM1ltYByd8i3HTCwb9r7f2N9KUjHJqrspedyENFoLbV9UCYNsysTpdDggTl8PudG1TT4Nw3CuqVDSTVcs4up0NXuGkLeoQre/MsOvIHHNlja7GuO+sB3DSLXZvunzAf/zmngZesK6NU1mZeCSEaTtk4mG6G+N1G6+cbPh6DA8TeYWOhlid+QvApUPCDbHTnQS2poX2b9mJYKpe4+F1pnf0NaDoFiMLCltcJzjT7XpatjCg2DNZoeCGv0uICWbVNdZ4sgJruqwtG+R99dpWmhNRcMRU8J9et4W3uPmSsUiIP3jRGn7vqiGi4RBNiSipaIg37Ojm8HTZ1z0Zlk1DPIJmOeQ0E6+5p5s2ZVknJ+sUVZO5ikHxWXAX1CyxydXt53b38JmGoEmJrxtiYUHlTkXrKH/HZyu8/fJVvOmiPqq6MJZJRkJopk1JM9EM13jBdfwEUax4boD9zQl2jxX5h3tPc2xWXG+ybvmb2Qv7GymrJrcdnOW7B2aR3b8BYsM57xqiHJ+RySoGiimyPS1HbPjCAR3RaE5mJFvFDpzHV7jX3sOjeQ5MlmhNRUnFwv7fLygmPz42zzcfn6aiiQ2rd30Bfjj4SLbKZEHlt752kA/dcYJLB5sYCDR8vOu1ORll55oWPv36LbxgbSt52cBxHN8cwxufbutu4HeeN8A7LltFOhZh32QtB02SxDTei5MIZuQ1xiO+3tqbslUV3b3OxWO8wrYUuOaj4RCNiQhvv7yf/uYEYUlCsxxes7WLrkyMqi60wBXd4th8VVAt3b8xnl90z67q5BWz7lo9OitT0I266YptO5QVQUV7trLZ8lWdyapWtyF88YY2brq0jzVtSTLxcJ1pTlsqimLYNCejjOdr5/loXuHB03lWNSf4zA1b3cB3gfFA3ul4XuHLuydIREN0NcQ4OV/lxJz4txwU1WRsQeb+kwt8+GUb2dSVoS0TY8g1ePEaZ8aina5m2fQ0Jjg+V+WnwzmuP6/D1/B5xVtHJsaGzrSfO/u6HSKLNqg7LKti4gu1Qvn/vnozyWiYsZyC6X4u0bDkr1vdjXESkdBZC8F7TmT5fz85xXu+fpB3fHU/f/zdI/7PIiGJ569tRbeERlMxxPWajkfoaRRGaMPzVV77H3v8c/5l53UQkkRTKKi/Bbh2Qzt/+6pNDLQkiYUl+pri/mQU8HWfU4FplmU75BUT2bDY0dvIb16yivNcrwHDdvBOxYv6G9nanSEEOIhzVdGFIZNXGB6brvLoqQLGs9jI+EXh5FzVd9f1qLm6bWMDRcMgq+gcmapy33AB5RygiuZUnbxuMFfVfDqvd53nFIMdvY28aN1TY4ydq1gpBFfwS8fXdk9i2w6pZIT8omnJEwV1Nge0Plv6G4m7C8gbP/MIAA2BgPV4IDuqLROr68qbts31//gg7/vqAQA2dmdoSUU56XYuvZDonwdHpurpSu954RpuvnKQy9e08ObLVgFQdTdII9kq4ZBEYzLC12++iK/ffJH/dxeqQncyV9boaoiTjkd8rQUIrWPrIn76RF5ZNhdvh6sh9Kab4ZC0bFEMorhKx8L+JGGgJUFzMsJsSed9Owd57fld3LC9G8Ww+NAdJzFNy5+uncwKfWA0JNGSivqubbppo5g2j58poJ/lBi8iJ5YWgt/aP+vbv69rT9PfnKjr2AbRnIxQ1S2u3dDGWGCTlY6G/OcvaZZPJdQth7tOLlDSRFyFAz4t9ZlEThEi85z83F/Mn0lopjAEMi2brz8+jW4JPV3wGh11myWeM6EEhCUJywHVrBm2tKaivP+Fq+nJRPnXB85wYEoUXa/Y0unTIz3kFYNMLMwnX7mRV27u5Le/cZBv7RfuvnlZx3Y7BaYDs+41Ml5SBU3S1YIBYnLtOOiaSchxOD5bZXRBZq6ocn6/aEDddWgO3S0EjsxUSEXD2KZNWILf2TnI+o4039o3wy2uUUU6HkGSJD70kvVct6mdLd0NJKMh/v4np/kLlz5+ZKbC1CJ93ERBQUJMkyRJorcp4ZtSVXSL1lSUhnjY3/i3JCN0N8TrClkQ8Q47V4umnG3Xh8Q7buHrmWSo7qbu+JxM1RANH+/zAc7aLe/MxFBNMRH52D2naHfD7jVLmNgcmK74brAA04soYwU3MkQPPP9kyaXe6Ta66fDwaJGCLGh4Vc181grBkfkqqlnTeL3j8n7eeEEPVw61IEkS6ViEyaLGP993Rmhb3aKoKRFhrKCQiIr77Z6xIkdnKn7jICRJvNVteAWdoj99n6ATJiIhclUD1bCZLWvcP5LzHxN0bvU23LcfmKEpGSUZCYHlMONOI72m5GIa5lhO8YuisARvvLAWdj/YmuTKoRb+/DphyDRb1ohHQr7BTCHQxJ0qqr4zqaJb9LckuX5bF9949yUAyO6algpQh0OSxNqONPecyPqa2yC++fgUsYDb6viix3jZixMF1dcZZlxdH8CtB4XJ2qOuJ0BHJsbONa0MtCSIu5Nyx3H49v5p392yuyHOZ163mY++dH2diUxfU4LJourTW0MSfnPrbRf3c/W6NhriwpAqEwujmzaWe8xft62blmQUG9jYkRGOvro4lzx98miuylRF5djMUzNcO9fgOPD4mRL3Hl9gtqL7jATTbQ5rpi0yMx0HxbIYK8gsKBozRZ1c1eDQRPmXVgR797eSIaJ3psoGWdnk+k0d/O29p/l3d+r8XMZKIbiCXwp002Lzn/2Yj//gOHceFI6h3S1JPwriL167FWBJYVhSDD79o+N+5zLtGhts7W8iEa2dzt7C9onXbeG/f+viuudoT8c4NFniQ7ce5dBkienABvF9V6+hJRWjORUj55o4tLumDO/54l4+vWv4rO8pqOH40LcPUw7QVDd0Z/idq9fwmTfv4CpX+5Ct6ETDEobl0JaKEpIkOhvi/sSuIyM0GHnZ8J1PM7H6znKuajzlQvCKta38w69v46Yra5PUjkx82YkgiKngWE7mc/ePUtUtVjULM4CtPQ287LwOEtGwH9ngGVCopk1WMfwbfXdDjM5MnHXtKTTTQbdsHp0qcuexBUrL5PeVdRPLxs9+CqKqWxRVkx39jUt+FoTQCML+yRK7zxRoTETY3JUhGQ2jGmKDWKdftB0mKyqKaZOOhQlJYNj2WYvVpwvvmBjP8UXjmYCq6RycKJEri9w52wFZN3ngdB7HcfjZ4VlOTdcoZJ4DoBcX0pKMkJOF1k01hYMtiEnYeV0ZJgoqPxvJ8/hEifUdaS51DTrqXoNh8/mHxtBNmzuPzdfpx3RLaNR008a0bIourS2nGOiWjeFOqoSjrYlu2aiaRaGs88hoHtt2MCyH67bU6N6quxF2HIcj40XmCirZis41G9p54bp6JkTGva/t6G/k3VcOEnabKlAfwD48L7N3vMhtB2f52I+HOTFXpashjiTVpjtBva8kSWzoSKO6pjZfe3yGzz045j+fN8W7dzjHCbf49orslmQUx3EoyCbvv+04B6Yrfl6gpulMl0XYvO04NCYi/vl+NudDL/Q+EwsjIXHPSF4cR/dzuKS/Cc10fD2m5/h5al7m6FQF2WVdBDdhJXe6bDnOMg0lqS6n7ZnEXEWn4GY0Qi2fzoP3ee6fKvPRO4c56k6lm5IRxvMqq5qTbOxM89h4EQd43lBtsv3q87tZ05byqY+O4/hF8W9dMVBXJO12jVlM06ZS0THcY+StTUeny3zktqPsGy8yMlPh7+846f5c/P6esXr66WNjBX7kZtau70z7BRKI6e77r17jT6TnyzqdmZhfdP7a+d3+Y2dKqj8Zrmimr83taUqQjof9gPvUIhbLH169hqpm8d+PTvCIy/QBIeM4OVflbZf1+yY4Q22put/1XFQnCqovp0jHwqxyDWAOuvTZxgCF9c0X9fDn19ao1rNlna88NsWnfnLK/95yMVJ9TXEUw/ZjKSQgHg7xlov6aE+LY7LrRJbbj87R1RATxku2Q9qlRzclIhiWkD54MG0Hw7JRdIO8ZlDUDSaK6nNyKjgyKzNRlMkqGhXDrIt28bSSZdcXwLRsCrpBxTSZK+scGC9xOi8z9yxHvyyHQlVHNS0qmoVp23VrxL5As3/6LHuo5wpWCsEV/FJw695pDMPmS/eNkitrSBI0RuCVn7gXgHXdQs+2eCL4+198jL/+7mHuOzoHwBffeQm/uXOQ3uYE737RGl7i6uw8DeC1mzt9bZyHTe7/f3Bghn++Z8TX7f3Lm7fzm26RFDRi8CaCj5zKcf+JLGdDKdCxPTZd5r6zPLa7SXTgJ/KKT69ZXMxBTcP39q/s4/hcldZU1J0Iiny/v77zJHnFqNNXjMxVmS/rrHE1kXMllRd9/Kd8etcwkiSxc32dzt26AAAgAElEQVR7XWZVR0OcubPcxDoyMR4bK/L1PVM8eCpHX7PoenqbCsdxuNMNyW5xTXhMy6GkiWKrrFlctKoJw7K5YnUzlusQWNZMRksKd7l23qfmq8y5xbjsTlv6muJs6kxz+WATn37tefQ2xrHcFut5XU8c5dHs2vT/nzuHOTJbobcpTnsmhuxORAx3Y+nBtB1kVxu4vaeBaEjCtKGgPnObRk3T/eO2NN/sfxfGF2TuO1Vk30yRXcPz/obHmyDlixq37RGTMW/P5V3PUVdXFQtLaKaNaTlYNjTExTXgffbBCVdPY3yJ6UNTIkJ/c4I7j2V5//eO8IVHJtjYmeaPrl7DTZf1A6IAqqgWiuFQdjXMVd2koFiYNliWQ1s6hm45dTlswfdyfqBp4b0XK0AZ9ZpN53XXn9OLTVUA3nhBb93/45EQ95zM8n9/PMwXH51gz3iRvRMlBlqT3Pzf+/n9bwlDDp/m7RZSGzrSmHYts+zgdAXbdvjcg2O+g2lRNX29lHe6/tFVq3nFeZ04jqB73n1iAdWwMWybyaJBTjV8Z9GmZMSfjpXPQrP2iqW3XNjLn7xwiNZklJJqodsOqWiIa9a7oemqRVG1KOsm80WdfZNFDs2VUEyLvDv98/JLvSgYy4ZlzF6ftSZMQRPUW+/aXkwt9MwkzutKU9UtvrpXxCI0JaJMFBQGWhI+DbSvKV5H9wXobIgxV9H5yckF7j6xQEWz+O3nD/K8oRYmCiq262B7Yq7KUGvS11dZlsPnfv18tgbWwO/vn6kzYRO5m+J1f25RhAUI6cL23gbe94LVT3gM5iqab0AG8M7nD/Kdd4gm7FRRDZjFmP56J0kS6zszzBY1QhK86/mDdc+5pj3NUFuSXcfm+eCtR6loJveP5Djoyji29Dbw6Tds48JVTUuyENOxMK2pKOMFxZdTZOIROjPi/uG9nqAOLyRJhAL3Do9F8GTnjVd0ZqsGiUgI2xHuu52ZOKZl85ORBXadXOBnp/IML4g11HLERB5qxehi/bhmORyYqGLaonFT0A1OZ587GvNHTuU5NFlmNF8lrxtUTQvZtDAsh9mKUcc0ANFILrlOzY7rrppVNXKazsIvoRCczGtUDctlLth1a/e+qTKd7nk8+jQMjc4lrBSCK/il4Ju7hVV5KCRhmjbxWJjTEzU3xaHONOGQRLas8c8/PM50XkE1LH60TyygXgzBjoFm/vxV5yFJEg3JKK9xHTgnl6GSeHjTZau4+/3P510vWM1jowV+72uCEro+UGD8f/bOO8yOq7z/n7m9bLnbq6RV77Lk3nvB2MZ26MT0BAgBEkoSIBgTQuiETkJvCbGpBozB4N4tWZbVy0rb++7tder5/XFm5s7d4hLMkzg/v8/jx9rd22buzDlv+ZaaQrA+hGZY5MoGA0twMFTd5Kt3ymnhTddtBKiZNHoj4PPRnYgwkiq7oizdjQsnYI4stbNhNcdDxO2J4I8eH+cPdhHm8IwGZ4v84y8OEg/5eYnN0zg2VWAsVeaLvz++QM3UObbpJUxd337+SjbZCepoukJPY8SdymXLOi/7zhNM5lRa4lLpsSUWcHmCumHREgvwjYeG+fMfPMkaG9JX1ixmilItbKJQ4ZETKe4dmOPOE7IorGgSstndGOF9F63kwtXNCODdF/Txiu0dNce7VHiFcwAUFJpjQVIlnW1ddRimwLutm5aUghZCMJlVmSvoVHSTgWdgdfFMoljRODxdspNvawEP5/+nUFWNh0fSDOVKzBR1posaFV0Wc6YluGZzO5vbqp19p/ueKWrueWuJh9BNi5BfQTPsBKms09sYIeiXIi+Zss6O3gZ8iiz67jo66xZXrz+9l89dv4kPXiYhbU4y8sHL1nD2yiZ3wuDs+VIEyUBVNYq6VCc1TNkdPqlLGth7G1aWJWi114+GiN+dyiiKwo7ehhrkgAOfm5/4RwILC8HzVjfz4RetdX9e2RLlwORCddugTybeh6YKDCZL7lr2kd/2c+fRORde61U6/fDv+nl8NEd92M9rTu6qeT0HYhj0+9xJ/V+fu5wN7XF7YioFPUq67fUpoDkadIufQqWWE5stSyuYdhtpMVPQCAV8XLulHVPICUFDJFDT4AKZJB6bLZDTdYq6QcWQXpIVwyJtN210S0JNJfR7YfLuVVJ9LqOoS06pEBJCGQ3WplYOHPFV27s4qy/hFh8Bn4R8LktE2Wp7znrhl0601YWYzqt8+f4hvmYXa5vs/WowWaJSMahUDAQwPFNEtSdgO3obWNUSJVPS2dxdLQbn+wh6f/6nq9Zz2ooE/3z1es5b3Ux9OMBNV66jy15X56uXOuFMBAF+9Ngof3vzPhSkV+fx2RL5iiHpBqVaOsOa9jiDcyXufOdZXLB2IdfKEXoD+K/Hx7nxtiN86d5BfAqsb6+jLhxgbVucjL2Ge6OnMcJkVq2ZCPp9Cr2J6v02v4D0xpgNna17Gp+4Vc1RLl7TzNWb2rhiQysCyX0+Plfkyw+P8Nsjc/gUuGxtCzMFjQ3tdcRDfhpt+oojLDWart2LdUswmS9T0E1SZQPVNBnPLp7blCo6w3MVlvh6nrMwDBPdFnsxDJM9wzn2jeYXPC5XlCiBwVRRNkrA9n+tFlRedNM1tuq5EHJNTpYMippB0X6v7LP0EgV4ciTH4Ox/fx+fK2nuPlAxrAWWNG87axmJaIChp8g3nw/xQiH4QvyPxFEblmGYchOKRgIEPJOq1oYIjbEg3777OJ+49SBf+u1R9gxW+Q8nphYuPCC9AwF+tWeSiae4OROxIH9+5jLOW1uVSfcWf15oZX0k4E4k0iXdhavuGUrzvv/axy93T3DJp+7nu7Yh7YrWGPGwn4klCkGQ4jFHpwruJrTD5u95Yz43sTkeIh72k6sY/HLflPt7RyzmX24/ynRO5aaXbCBhH4tXHXV2kYKvpykqifyL+GutaYvz1VduY3lTlOFUyTW6HkqV2TuRRyBVCK/Z3IElZPHmeKtppmBjRx1lXS76+yfyBP2KC9fKVEwyFZMnp3OkKyZTBZXZvOpyeNrrQ5Q1k7//1RE+f88gTbGg25X1fk+HJvM1vBjAVXZzYnNnHc02T6qrQaom6kZ1RW+JBTGFXAx3j+XQTTm5HMs8N4v7Pf1p9k7nKagmhYpJwTPB+P8p9o3muO1wkpmiRq5iIoSET6q2cJJhCRqjwRp+raNuWKgYfNuGMLbGQ6RKOid1N6CZ8jUyZYOwHz58+1FOzJWwhFTa/OmbTuEL9wzwsd/1u5CwU5Y10hwL0tkQ5jIbph0J+lz4njM9dAogAZQNi5mCUfWctDOCtrogwoaQOmGagpDfR2djmOu/8ChlO0Fd2RLjX1+6hb88uyoKlbGLIp+i8P5LV7PDniDOLySc8EKmVzTVQuHeeX4fpy5rrPFJfHQwVWMs/7UHhyU/DEh6/Aun8hov3dbB56/byMVrW3jDaVVLm/pwgKs3tnF4usBXHpTfQXM0SHdD2LV/mCiqVAwT1RQoioRnOxNI70RQMyze+KN9fPR3/Tw+kkEBV8p/jQfa12GvfX+2pQqtLesmM0WVVNkgWTLQ7O/AtARDcyUMQ0rwF2x+r2VJH1EnnCnM8enFm3nzYyxV4sFjKeZyT3+vaqZUTLaEIBryL4APdtWH+eTV6+hJRNxJJ1TFfZY1RdjUWc+3X72Nc21VTtMS/Hr/FPf2z9FeF64R0WqNh9wp1J7RDJZpoQi59nq5eY8NpHnJVx4jVdRqkDHegsk0Bddtk+f5qi0dnL+mhU9ft4lzV7ewqjVGXq0qx84WVC7+0iPc57FlANkYSRY12uvDHJ7M87nfH+eh4ykOTuTY0FHHkekCuYpBXdhPpqRXbVuQhV5BNRblqoOc6DvxM7sRPFvQWN9R5wrLNEaDNbBiJzobwkzlVbfgqLN9EV+8SQrDAC5fc7EYdSaCT9NACPh9vOaUbq7b2uF6L2ZKBt94bIzJnMqalhjvOmcF59qQ31goQFm33AKwuyFCUzTowp3Bvl4tQU43KGuWWyBll6AsPDlW4PD0H1f4PJPYM5rnoRNSuObQZImRbInRTPU9dd1gz0iOXSNZsrr0AnXglbK4swjY94d3cr62Nc7alpgrDgVQsHl5Rc181p6KmqYzkilxdPq/bwUlqSrC9fO0hKhpoNWFA7xsayeXee7p52O8UAi+EP8jUbYXZifZaoiH3N+BXNgT8ZC7sJc1g0eOzaEosKwlxsDM4jd3j53s/eDBYS76xH2kixqWJRYtCuPhAJ97xVb3Z+/mfandnXJ+n/RIUw/OFrnzwDQ3fH0Xv3xigvfdvI/pbPXvzXVhuhLRJSeCIP0Sx9Jlt9g4eZFCsDFaK7/dEg+6Uw2vEl806OfQRI49I1necsFKLtlY/ewpbyG4iE2EU/COPUXRvKI5ylCqzNq2GA2RAD/YOcaesSzxkJ9/vX4T3XbhlYjKpNjpdnsNrw9MSnNky4KwX6GnIUxBMylqcnPIayZHpgpopsC0LL736CifuVvyMp6wJ8Xpkk7Ir7iLMsBf/3g/N952pObzetVS33vRSl5xcrfbgY7bz9XcjrxCXTjAhrY4iUiQlliQgE96yKX+Gx1IkMbxuwarCm8j+TKzRc1NJgxT1MBOdd3gwHj+/3xxeHSuwGSxgmZK3tdM0SBddoyF5WMabcEov82VCfoVggEfpilcWf3WuiCZskF3QxhhT5B8irzGDkwW+KZdMCbm3T/vOr+PG07tqWkUOB5gjZGAe/87jQYnWbYsQdkwOTwtr8+AT8ESEPIrElZtJ+AOxM6xXehNRFx/NyGkoMj3Hx6ukSA3LeEWA6evSPDBy9Zw/dYOfrhzbNHJS1MsiE+Bl53U6d53IEVyTlveyAcvX0OyoNIYDRAN+kgWdaIhP289Zzl/dY4sQEczZfceOqm7nu099dx42Wqu3Njmvt6KJq/tguDM5Qk+d++Qy09sigXd+90wLYq6QVmXSp26KRMnZ+32FoIO13P/ZJ7v7xynvS7kwqqCtn8jVGGjZ65I8BenS6huxZTm9VWFS/v7ETCULTOdVxf4BK5srhaXlv1Zj85W9465nMYjJ9KMLwLtOjhVZKRQYudIekkp+4GZEsWKbotdyKZmQ7j2ujsyXeCDtx3lXrt46qgPc+XGVjZ2xDk4VcCnwBp76uVtcv3m4DT/erdsYmzyQIcT0QB/d8kqQIqFDSXLWPZ9YBgLP+dMXiVbNmiJh7j9b86iqzFSIySDkJzpW954Cu+9eFXNcx2LI2e/GbA5/N94aKj2PCRLCOR+8gtb8AhkI3JjZx0T2QpD9uTSErKpeXA8x0yu4iqXDi2BwFjrsYTxwjivP6k6uXbu9a8/NFzDGe20/W8dWwdnsnfxulZ+8saT2dZdv+REUAjBcZsnm1mkUbpUOE0c51O8/tQe3nR6L92NEaJBP03RAAenC1QMiw57jwwHfLz1zF5O80DJBXJ9MCzhwsrLuoVm1E7Yj0wU2DeaY7aokqxoDCafWaPjvxvjuQpzZZXpvMZkrkJG08lousup3zdeZDRTIqXKny0k5cNLxwjZiIeJnEpJlw2UoXSZ9vpaFEDZsMhV5NpSeJZefamSSVrTSWt6TR70bMJRj97R00idLbLlIBW67CJ2XVt8ART8+RbPSSGoKMqLFEU5qijKcUVR3r/I38OKotxi//0xRVH6PH/7gP37o4qiXOH5/ZCiKPsVRXlSUZTHn4vP+UL874i8ZzN3or0xxLRHAc/nU2q4NIfGstyxd5KNPY3sWNnMwBJdnnpP99sSMDRX4nf7p7jg4/ex48Y7+cnOsZrH+30KP3jzKfzwzafU/D4U8PGff3Eq/3bDdqB2snZipshHbj3EKo+0d6PnfZvjIboTESafYqK0sUsu+K87rZdL1rcuCg1VFIWPv2RD9XVjIeKeJONbrzlJGuR217ub6FmrakUnvAXsYqIwy+xCcPQpMO59LTHGM2WCfh/vOK+PoVSZ+46n2NRZJ5VH7WTa6ZTFAn4iAZ8Lwzqpu56Dk3la4kHXUuK8lU1YAmaKkjhe1CzGc2WplGYKxrOqK+vt7OsZ21bAhQt6Ot/ZJTbqbT0NBHyKu3j7fFLRTTcFumFRVg2mchVecVIno5kK23saaI5JQ/q8ai4qaPN0cf/xJPtmchycLKJqOiVbCtwJSwhmCtVr/YnRPI9PpHloKPes3+t/OmazKslFGgzzQ9N0kmXpLZkuG2Qqjo+ULAJ9yIIr4veh6iZ+v0Ik7COTLmLpBoZhcWg0y9Hpgnu9OUVDQJFy7g7c6LidrCaiwZqk0DAFf3ZSZ03Dx+HhehVoHX87r32EZkpBIdMSNEb8bGiL0xoPMZqp0BAJSPhoXqNS0lnfHmcsXUb1KM9ebU9cvnTXAIcn867aoRCCGc896vcp/HDnKA8NpDhhH8eX7h3gxtuOIIRAURR++qZTeM2pPTVrxrdevc2ddswVNdrrwrTEq+rIV2xo47zVzfgUODZTdKcWK5ujvOPcFS4c1oneRIQ3nt7DpWtbyJQN3v6zQzV/rwv73UaVbkJBNdHMKsTQm1x7bQTG56mcdjaEOJEsu0gBZw05PF3g6w+Pki7pbpKlGdL+QUGqNzqKipYQFA2T47Ny/VCAN53agxCC1rraxFK3BGm7waNpOrtG0gxlS+weyzJjN/MOjufZOZghU9Eo6SZzFXXRRlmupPHkZJaHBjJugdJRF1rQwHO+xxnPffLSbZ2898KV7B7NsrYtzj39c/zu0EzN8/aPV9eDZZ5J8NdfudXlE+4aztQUdfoSybJhWHzpjn5Cfh//cv0m95wG/Qo+ReHWPZP8+bd2oSgK9x+b4/b9U7zvx/vdyY2zvjrX00S21ipj94hs1m3sqON3B6a5xG4qJIsaG22+/7GZAqM2H7+tLsR1X3iY677wCH12ITg4V2I2r/L+nx3kPx8ddV/7rJXNfP+1293r/cNXruPqLR1c5JnCOPvvL/dN8YgHOdRpH+eR6QIhv1IjKKcoCg2RQE1T1Ru7RrLu/Z0u60tCYueHo3yaCAf4yzN62dxRV6Mw2lkfdieNq+xGxXimQiTg59RltaJWloCgT3IO+5qiCCDv+Y4rms5gqih9cQ0DQwi3APtTRU6XXL+RtOpOJw0hyJUFum4wkauQ1nRKhkmmbFDU5KQ+6FPcBo+Xa5dXLWaLBv/2yCi5ilVzng1LuHDMZ+up6PCdNctiaO7ZT0k1TadkSDRHyO/jzacvpzkWpCka5EOXrObtHmTH8z3+6EJQURQ/8FXgSmAT8GpFUTbNe9ibgbQQYg3weeBT9nM3Aa8CNgMvAr5mv54TFwkhtgshTuWF+D8RmZLGG78p63q/RyGrycbKJ2JBTlkpi5nhOdnZ2ro8wYHRLAdGM7zl0jVsXZ5gaLbI4fEsi8U9Hzifn77zTAAm0mV3EShUDD5tS697Y3N3A5u6FypRbuiqd7335jxF1L2HZ5nOqrz6rGXu7z71yq2caj+2uS5EVyLylBPB9XaHNxr086EXrVvycWetbHanFologDNWNHHp+lbefNZyVrfF+YfL1xIO+N2iqGmep2CyoLkdyuQiheAzmQj2JiJYQkK4Tl3eyCt2dLGtu54/t/mYnQ1hYkEfjw1JxcdkSWdVS5TpgkYk4OPCtS1kKwZhv90FzGr8+uAsp/TUnvOJgoplF4rz4+Bk3hbGqU4Zd3kU7hzZ828+NMyteyc53zbjdhJepxDMVkzabdhZSTPIqwapksFn7xlCMwWbO+vorA/J5FI3F5g4P5OYKKpkKgZjuTK/PDhDtmKSLptukmxatbC8oUyJZMlgPP/c8wxUVePY1J+uQ3zvQJJ7T1RFke7vT3F0SjZpjk0V3QbKUEpOa7KVKqTPiRUJqRw4OpHjDf++E8MUhIN+/v4KyYfL5eV9VChovO3mfa6/oFO8OYq2uinY5uFBNcWCzBZUymWdYlFbUIRAdSLodU9QFIVEVE6HVzRFXGPntK2GO5ZW2TuRl0IU6QrxkN+dGOmm4MBYDiEETw6lFy00dw2laasPEQvJDvP8Bo2TON/bL8/rL/ZO8eCJFHccnq15XHfj4l1oqSQcpDkeYudwxoXTRYJ++ppjHJstutL1nQ2Lv4aiKJyzsom1bbFF/+6zzxE4Cp2yqHcmLifmitSF/ZzUXc+ByQLv/OlBPvDrI3z27oGa12mMBClpJm//6SF+c2iGgN+HaQn2TRTYNZrllicnaQj7XQGnii7VBedKuuvDZgnJs5stqRhCEPQr9CQimEK+/t+ev4IXb5QQYNOSUMBiWWM4rTJbkROJlKqx1xYg6Z8rMJgpklENsjaU+/icvDd3DmbYNyofN53XyOsGU6WKe01rpnDXHJCFvgN9bZhnsJ4qapyYK3FybyOfv3uAT3kUqU1LuP6ypiWhz6/c0cW5q5pqxL4ePJEiGvBhGJZtwi2IBH1STbdS5cxpmomqW9y+d4qTljW6HPJTVza5fpeOcNG7b9nPjbce5p6jc+yyVTAd6wqvsNgVX32UX9kUhSdGM/S1RLnr8AxFzeTVp/cSD/n5yePjzMy7705ZkXAbobN5lfb6MLGQn6G5Il+7Z4A/HJrhC3cer+FSL2+Osa27njP6Ely0rpX3XrK6hkriFYN6yEMVcK7v/ZN5uhsjCyC79ZHAkhPBnbbi9HVbO9BNwaGpAkOppy8oHOXT67d2sNbDb3Si11PUt8aDPDqU4aY7jnPrgWlUz7Qv4vfRHAu5AjKn9ErUkGZWFa2PTZdJqzopTaoZlzSTsmFQ+ROhSzRNx7Qhq8mShmqa5CoGBdVkKl+hqFtkdfnZnGmmQw+pDwfY1tWAJQRzS/hD33k85TYJQRbBTqimxWMDGaaW4EjOj1xFp6iZaKZFsqyTLurcdzTlQmezJZ2DY4VFvap13WAyZ6CaFkXd4ua9UxyfK1LQTOrCfhoigRoF3ed7PBdHcjpwXAgxIITQgJuBa+c95lrg+/a/fwpcosg78lrgZiGEKoQYBI7br/dC/B+Nn+0aZ+9IFiEExVwZQzcIBHxYFR1FgQf/+XJ+/f4LAei1OSMX2DDNl56xnFectYLXnNtHPBzg63/oX/Q9eptjrLaNW8fSZZf/dt66VnJlndKzxJoDLn+hpynC7w9IOe0zVlenbxu66/nWm0/h1r89m3DAR1ciSrKg1aizeSMRC+JXlAX2GIvFP75oLVu76+lqjNDREOYfX7SOG2y4lBOZko5fUaifl2yMJUskUwWwLBcamiyori9bNOSnpS70lIWg04l1OI+vOrmbj1y5zhW4iAb9vPms5Qymyu4muKY1zmS2QmdDmDNWJGy/q+qxzhU1Fyam2+bPJV12/wK+hRypG28/xvHZUg106uBklUs2lCwxlCzxo8fH+eK9g1y4ppmbX7/D/XtTLIhfkcbkmzrk5uwkSVs66xjLVgj5Fda3x2mvC7vJ/2iu8qxtJFTDoqSbpMs6EwUV1cNlAjnBSHsUZou2yEb5WUJfnkncfTzDzrHUkiJHf2xMFlQm7InW4EyR/lSBJyYyPDqQZtd4igdsEaATqaJtSl4tAs9Y3shrdnRhWoJwQKHkwiglD0O1z4cloKc5gmlK6OVt9v3ntQJosxsgl66vik0kokGm8yq6LhNkx3MvXzF4+bceZ89o1oWU1c2D8zXFArTVhTjXNqg3LPmf6fHUk4b1KsGAD8NzXJYQRIN+iqrpNose60+6xWK+YtAUD9Fo2zHMzJuoOoqjt+yeYO94lqDdMNs7Vtv4al/C3zRV0miOhWiJyyLrK/cNugn8qpYYA8mSC43uehpIk5ef9dpTa0VMnHNnWbhczTaPit5pyxMuT3A8W+HoItdgfdhPb2OE1niIX+yfIREJoOpy6nfuyiYeH83x4yenCAd8Ul3WEhR1i3F7ereiKeKKUGRVA8uCmA2JBfk9bumqZ4O9H1hCvsZIWmM4XaagWWQqBjnVZLakomk6Od2gqJvu9VXWLVIVjd1DWQYzRfqTstGRKmqky1L5ULcEkYBCQTMJ+nDXuu89NsZvDspJ33yO08N2kbWjt4FyWadS0dEMC82weMW3H2dgtohW1imXdabzKq88uZv3XFSFbqqGya7hDI2RAKWSTtE+5vUddaxuicniTzV5x8WrONu2J/nNXtkUmMmpNMWDrOmoQzVMdy300jOg2iBxhDqm8xqt8RBvPXcFpiV4eDCFEILDUwU64iG+dNcAZ6xqYvuyRprrpBDZB35+iNa6kNsIu3JrB4Oz1WtBURSWNUlroscG08RCfiwBv3xysgap8veXreHjL9m44BoCXE48wD39SVdVtLMh7DYWly1iSdQQDlDQzEV94GYKKt2NYReBcOPtx3jPLw6758i0BA+cSNVMvAGiIbl3lZZYzy9Y2cx5K5u4cFUzb/3JIb712Jh7br0cx1DARzToY2tnPc3RIA8PZ1wz82RJvvZ0QUUXEjpa0ExKuvTCnS3+aSwmZosG+YpU6y1oOgXNdAWbkiWNubyGYVmoulWzx1lCTkK3ddVz/qoWTEtw9jxLH+dnzRSupZBXIEa3BFOFCocmntleVlAlpDRXkUqlJ2aLJCsqx2eLjCcr7BnNMZQp8cTIQiTOntECh6fy6JZwr41bD8xQVE3qQoEFj3++x3NRCPYAo56fx+zfLfoYIYQBZIGWp3muAH6vKMpuRVHestSbK4ryFkVRHlcU5fHZ2dmlHvZC/C+JA/YCrWsGpmmhawaJxjATcwW2LEvQ7Elubv2787njHy/mpWcs58y1ra63YFM8xCmrmjm+hGAMQF0kQFMsyHiqTKqg0Z2I8LpzV2AJ2D/67CF4ybxKLORnU4/syrU3hFnVFueD12ygoyFMdyJCPBxgsz3lcuEus4svWj5FIRELuvygp4rtvY186eVbazrB8yNd0mmMBRaYrI+nSggBxUKFWdt76lVffYzLPv2Au8D1NkXdQnCxDdEtBBeZqDhxkj2J2ccks84AACAASURBVNFbz2tO7mJHTz37JvJstAn9Z/QlJI9ECDa0x7l0XQsHJgv0JSLkKybZkkzGLEtOFzZ11i94j4ph1QhfHJnKs723gVjIz2CyxK/tAgHg8FSB0DzPqys3tXNPf5IdvfK1nXO1o7eB1S0x3nneCsIBH81xWwDEFOQ0nWMzz25Sly6b5FWLlM1/A5moVlUoqzL3IKX6U2WpdPhcRknVGMmVmS5K+NCfInQbNjmaKvHkZI5MxWC2qNOfKjBd0JkqqqiqxlxRcxPrc/oSvOe8FSgC6kMBTiTLBJQqFNPQTYZG03z4ln3u+6xsiWFZglxOZTxdJh7yc9TDE3Ymvq3xEDe9aC2XrGshGvS5RtAgmwDffGiYl3x9J3NFjX97YIieRIRX7Oji3ReuBOT1/+CJJI02HMxR9StpUiXU8BSfJVvh1jDlNMbhCJqmcJPOM9dI+NqJmSIlz73eHAu603svNBRkArOtu4H6cIBvPzzicuHmS5T7fQqv3NHFh2xDb+fzp0s6LfEQLTWiMWn7PEYpqCZXb26nMRJwlTudqOgm9x1PuoWB1w/v/FVNfOrqdXzqaolgcO7F+rDfVumUTSFLSHGH5U1R3nD6Mq7d2sHbz13Bv71iC/OjqJl85EVruOmK1XTUhxhKlumoC7EsEeHqzRJe+IdjSUZSFQxLuE0VkPdUsiCLJ0tA2TAxhSARDbo+e9220qUjBGRY0t5jMl8mVa7K1+umIKcZDKVVchXJXfVOODMVnePpAkXdJGdzf9MVHdO+ly0hCPkUSprJvf1J/uLm/XzvsTF+fbAK98zPu78fHc6wojmKYVqk0xXS6Qoj6TKPDKZIlXR03aKsmei6xRfvGSA3j7N8YraEapjMzluXd/UnedJGMpzWl+ANZy8nbHOy9o1kMUyLyUyFzsYIfa1xhKiqiO6fh7JxkCbO/2fyKu31IV51Sg9nr2xitqAxbYuxODDSj14rVby9FiifvW4j126VvoLJvMrrv7Greg2oBu0NYfYMZ5jOqfzVhStZ0x7nE7cf48++9qg7GVQUZcH+5kR3Y4SPXbOBH73hZBLRAO/6yQEeG0oTDfpZbsOeexILvXUdSOliHMDpvEZHXXiBeu2dti3UAydSfP7eQV73H3v55J0n3L/H7UKhsMR6Hgr4uGZTuwtbBdlUKmlmDY9OQa47l69vYzyn8eREXnLoBUzlVIZmy+Q1Cb2MBQME7OlZWbf+ZFYL0znNpTkUbRsIJ9JlnWOzJQqaSV5beOxlw+Lrj47y4GBaKptvbuddHruQqze2ceGqZhSgqFnMFnVCfh9bu+rd6aLkIz598zxf1mv8S8u6SbIoJ/gpVWP/VJ5kWSOlqiQrKsVy7WtOFST30bSq1JRUWSqfPp2C7PMxnotCcLE7c342udRjnuq55wghTkZCTv9aUZTzF3tzIcQ3hBCnCiFObWtrW+whL8T/onAglqbdLYtGg0SDPh4fSHOJRyEOoLspxtblCdZ3N/Dz951Pu4cT01wXetppWk9zlPF0mbm8Rmt9mO0rGvH7FB44+uwaBhXd5K5Ds6xsi9NlJ3gXbWxDURTeeH4fD9540QLIyRpb2vv4UyhWJWLBminZHxPpouZaUXjDK2vvCOY4UNl9NrRyWVOU0XSZ7z40zGX/+iBzBbVGRbQpJr+jpyoEm2JBGiIBSqrJxWtbuKdfJpPX2r6OF6xpQbWtE/qao1ywupmQX+GJsVqFSNMSVHSTzvow12/r4HWn9fDTN53sSt6fY09oNMPixFyJjR31rG+v48BknidGM5xuTx/Ti3D7XnqSTET2juf52ks3ucmuaQk+cOkq15/Qy2PIVyyOPUvyvVNMlw2TGbsz662vHdgpSPntop2MVgxB3nPe5/Iqv9o3xUPHa1VRn2kcmSqRVw0sIUiVn/sOcaWiMlfUSZUMdo9lmSqpaKaFasoi2CkIjsyUyaoGhgWXr2uhpyHCNx4d4/bDc3ziLgkVTNmiTtFogIXbB67iYbksZfJXtUSl+bb9HToiIc2xICf1NHD9tk6GU2VOeBox9/fP8aPHx92fS5qJT1F41cndtNeH0U2LX++f4sbbjjJX0EiXdBfOZwkhC0HPF+nYExTKOpYlqJRUtIo8jvpwAL9P4aR5IlBBvyInKGM5Dg1nCCrwhM19dCKvGnQ0hLlgbQv7bQXV7sYww6nSAp7SK0/urhGaypZ1W4wjWIMOcGDUfTYnqSHs53PXbljQXPr+zjG+eN8QR2aKHJ0u8Irv7XE5aIoi+cDOhCQalOIuyzwJ9kxBcxOlnsYw23sbeP3pvVy6vpWO+jA/fO12vvKyzWzpqiPoV1ykRdDv489P7mauqDOcrtASD7qw3er5hopu0R4P8d7z+7h0bQsj6Qq6KchWJPTREvIx335sHL9PcS14HLimYQkqhoSv52wu1Q0n99AYDlDRLQaTJTfR1UyZjBqWhWpYrm+hA5P0JvqWwIUqOgXfr+zG1IeuWMOa1tgCCOJkTmVNa4xDkzm59pmCgbkifzgyR0s8SFPITz6vUS7rHJspcMeh2n2rf7a4QCXUnKec6TQmnX1XNSw2vv/33Hdklt6mqLufOdfVfcdq1UAf6E+iqgZZDzS0w54St9setMdtqJ0ChPw+V5jLW1wlYiHOtRE0B0Zri82RZIn2hrCr7HlaXxNfeOU2dzI49gw92s5Z1UxXY4Svv/okQN5XIKfgUBXi6Z8puMfrwDRHPIgYIQRPjueYLWi01Ydonzc1d7wFf3u4WuTv8/A560N+fAqusNJS4TSBP3DJKjZ31pEp6y4Pzq/I72qupKEalssxzNvG5rMFjSMzeSqGSVm3GEqX3QmkaYkleY9/bGQr1X1VN+V0Peh8Nt0ko0oeeHs8SEd9mEQkQCTgQzcFj45kGUiVmS5orGyO4lMUej05XcDvozEaQIALtb54bSupksFcyXAbAupT2C8ZhiktLUbz7r4Kcjrr2FBUTIu0qlEwpHBR0TCZslEZu4ey7BnOkdcNKqaJYVropuB0zxo7nC7zS/vevv3wLF95YKH35vMtnotCcAxY5vm5F5hY6jGKogSARiD1VM8VQjj/nwF+wQuQ0f8TMTxXQlGqG4+CYGY6T19bnHdcuf4Zv05TPLRACWrfcLqGON9jT7rmCirNdSESsRDnr2/lF7snajr7Txe3PTnJSLLE3714nbsJnr/+qZsOfW3SB7H/KQrBpvgzmwh6o7iE1HWmrNfAYwzT4kePjNQUyxOZcs35+dnj4wgh6G2KMpvX+O6Dw2TLBi/58qNc9NkH2W9D0RRFobsxwvhTcB4VRWFVS4zBZAlLCB4YSLFjWaO7iW7tqica9LGmNca1W9pJRIN84bqFMB9HBbKzIcxrT+vlum2d+BSFD12+hu+8ZhsbOuoo21MLwxJs6qpne28Dx2eLDCXLbOtpIBELusmaNxqjQda1x9k9miXgV1wSuldoRgjhTkAT4QBlwyJd/u8V6w4MsqTKiSfI5Fgmq3JTmsqqbnEhEPz+aJVvt3s0y1ihwpFkfsnJ8lPFWE5yNmaLhiuQ8VxGqix9lXRLMFPUKKiSC5kpm1R0qQpa0CwOzxYoaHJi1hwN8u8PjzKSrl5LsaCPSdsU2zQslEUKQSdhNQyLSsUgFvRT0S2u39LOx69a64pYOMneR28/yhv/40lufnwcXTfRNJPcvGsiWdTcol0Iwbt/dpAv3jsISKhxrmJw8+5xLCFY6RFT2dJVx42XraYxHCDgU8iUJbrBsv+LhfwYpsWK1hjjqTK63Zle3R7n9r85m0s3tHFiukCuLKGMA3Ml3nbzPhfOlrdl9i/2wFzP6GuirFsLYKQLvhN7PXmkP8mjHq6U4xXqiMIMJBdPrh3I5XRO5Te2eMmLN7by0SvXLHisoii014VdqJwQgsFUmW57jZzJq1z0xYdrOJDxkJ/uxggfffF61rbFXTVHgLVtMVcd0fkebzilCke1bPuH5U0ROurDrjqnsOGemq3cesjmxPoVOV0VQnBP/5wUIwr4pGm1ZlAx5YT3v56cpD4SwBTSN0wISEQCtMbD5FT7ejakr5n8HFAoa1RMy1VKtQRUtFqhiwvXNPPGM3rZ3tNAUyxYY1NgWoJsWacpGqxp9DxwLMnOoTQYwoXvG4a0DvCqrz40kOIL9wygCGrWdHXeFOob9wxyxWceYDavurx0J7qbojREnXMof3fzzrEF3XlVNZkpqExkK4xlKqxqkdD69voQBdV0VZtVw6K9IexO7bwCaxOZirvOOn9/yQ7JU3z79/fQ4tm7+lpjdCUifON1Etp/4lmufY3RIH0tUXe/uuHUHs5d1URvIsx3HhnhLf+1jwfse8O5H/75juP81ObS3t2f5KO/k7STjvqFE8G5osZcQaN/tsTrTuvhdaf3UDEsFyLq8yk0RgILpowSqlj9flJlnYBPYVVLlKZYkGzFcIu5jZ11qLq8vm4/MkuqrNNRF0K1rY0yqkayopGz15bL17WyrjXOSldQ5k9TCHqRLM40fYvdpKuYJqot4NQUC/Hq7d10NUZZ0RSlpJtEAj73XPZ57LnOWN7IBXaDNzGP2pIs6TxhF9ma6XDMLfYuAucEeHQgyyMDWWZLKmWz+lk1S0jhp6LkUmqWhWUJ/ChYlmDOnqCO58qMZEuunZEubVFZ3RLjhpO7uWRNC3f3p/j1wVnGMxV+vm+aJyfyT+lF+XyI56IQ3AWsVRRlpaIoIaT4y6/mPeZXwOvtf78MuFvIFfNXwKtsVdGVwFpgp6IocUVR6gEURYkDlwMHnoPP+kL8ieL4VL5m+rRUzGbLFLLVzrapW+QLGpds6SRmQyq+dNtB7t4/v5dQG011IbIl3S3o/uuhIV708Xu4Y2/1eb1NUcZT9kTQ7gxfs6OLmZzKkcmlYaXz49B4jnjYz1lrWnjDeX18802ncNmW9qd8TjjgY0VL7Ck9q5pjoWd0zpz4yt0nOP/TD/DowMIJUbpYLQSFEHzgxwe46eeHaPN0M8dTJcY9Sqa3PDbG7XunXMEYpyPrdCUPePzc+lpiHJ0puOp+i8XKliijmQoHJwskizrneRRM/T6F1niIsN/nTiFCAR8ne+SyO+rCrojFfDnmxqgjVy/4wC8P8/E7+mmNhzizL8H23mq3bpuddHkngrppucnojt5GTsyVmPDYfTgbtmkJPvDro7ztlv1EAz7bnBxyi8BclopKRZWJqWG5sBmnQ28JgapLPo5qSN+t/rmSWwjqpmCyWEFVNXIljfF8hbJukamYnPhvSIJnKhpFW7my8CyO4ZnGXLF6Dku66b6XbgkX4lTSLebKGhX7fHztodGa11jVHOW1p3Sj6haVUoV0soCum7TM+/4Nw+LgJy6nMxGRiWlWqvkdmy3SHAtx3/Eka1pj7rU1YyehliVsrpRR9QW0hTRKmsln7zzORLbCnUfnajin6ZJUCXxiLEdFM1ndHOPK9S34kMp3vYkIjwxlWN0aI1fSMRw+oyU4oy/BwfEcm7sbOPfDv0eraAghiIUCNMdDXLWts3reVMPNwn/25CTv/flBippJfTjgJlgAZ/bJRKl/tkiyqPHn39vNj3bVKiCDLG6FENx1eJbHTqRY0xrj7JVNbqEcC/lZlohwxG5Q3dufrBHAMOxEbiRddouDpljQhVgCTOYqlOzrqbMh5J5r3ZQT/WjQh1/BtUvoXyKR76gPu0IqIKeCcZtj7MBaL1zTzLsvkPAxR0Xa+SxOMetA7FRDoHm4mrpp8aYf7eP1/7mX/3h8grImDc0N+/pUDXlvTeRUdo/n0U0pUV/QTCbyGmM2AsIUtSqHAANzUmQmp5rMlWSxOZZV3fXxby7o410XrOSaLR34FIX6cIDhVJkf7BxzJzaWkOf24Fg1qb3j4DQl1eDQSAbdXjeEkJY7Xpjz121TeU0z3QktsKh9xOBsiYl0mdNX1ypKJ6IBd9J06YZqY/P2vzmbSza08c/XbqQ5HiToVxhLV/jV/il8ClyxST7W63N74doW5vIqnR4Boxuvripej3u4+kXVYGVbjE+/ahvnrmthLFV2OavhQHV/6GuNoQADS1w/umnxvYeG2TuaJVnQarj/vYmoa83UFAvynotW8Xc/P8QPbdXwKRs67BX2+dHuCUxL8JM9k+7vWuMh/B4lqUjQx1xBY49dnOzobXSvv2lPk6YxWtuMtCzBp+8e5EO39zNoK3ynPSrYiWgAS0h/REWB9W1x0mWDbZ11PDIsp/mXrWtFQd5nBd3EEALTkpPYrV0NXL6+zeU6l/4E6z1UeY+NkYDbrtMMUWMdA9AaDaBbgntPpLh/MENzLMTyRIQNttptT2OEhwbT5CoGL93ayVW25ZVXfR2qdjMAuiWPPauajGZLrj0TQDqv8+iJNLNlldmyak/7BLrrUWu5KJyiKjn5Jd1iuqiRrRhuk8axwshrJtmK6d73XQ2S3+gtmG66oyrudHwJ65PnS/zRhaDN+XsHcAdwGPixEOKgoigfVRTlJfbDvg20KIpyHHgP8H77uQeBHwOHgN8Bfy2EMIEO4EFFUfYCO4HfCCF+98d+1hfiTxOaYXH+TX/gzz53/5KPGUmW+NRtRyl5FseWhjBlVY78d6ysblI3/ugJrv/EXTxwaGqxlwJwuYSO4tOXfyvVQNMeNcae5iiqIYuAVnuxXm7DRGYWMVd3Yu9IhpP+8Q8ctCdiRybzrO+sx+dTCAV8XGjDQheLz//mMB+6eS8AW5c18sDRWfd15oczEbSE4KZfHmaPDd8a9WyMTggh+O5D0h9tZB5U5n0/3s9Qsiqk8sRQhlufmODtl6xm+7JGt8tuGIJ7beXB7/3lqfQ2RfnZrnGWeaYdr/dIIo974DJn9DWRLulPac66sjmGYQnuOCLfY3tvrSpocyxIah5k821nLXPNq4/OFGmMyERwKTXDx0ey7LU34XdfvIqA38fmrnrecMYyXn9GL5s662mKVietQgje9ZMDvOI7u9EMi1Ut8lh3e+BJDuzpzqNzHLOTjnjQx1ReJtVF7ZlPj6fyOtmKSb5iuh1TJ4+oaCbHZopSTMaweGQgxXRRpaJbpAqSr1TRLSbzGg8MpMmqEoZWMSxSpWffcfSqr1WMZ2dir+sGhvHUyYR3mp2tmDUebkXdoqchjEDyPTRToNiZg6LANZvb+NTV6/jgZatJlnQ03UA4Yjqm4MU75CRos339zuVVQgEfl25qx7IEw8kSpy5r5MmxHE+MZpnOa7xse9VXrKKZrqef9PiTsDlVNSgUNDTNxGda/O7wLO+4ZT8fv6OfroYwrz29l1faUygn9w/4FAqqyWnLElQMi7Z4iF0jGabyKldtakc1LAzDQtd0DN3g3kMzpIs6Mx5ephDCnc57JyWmJfjPN5zMVZvbua8/yROjWSxLMJWpoCgKn71+E9dt62R7byMhv4+P3H6Uj/3uGBNZlW8+PMLsPH7hRLZSUwxctamd1W1x17geYHNXvWvw/aX7h3j/r+REp6Kb7qT21v3T7Pb4d3o/71//5CA3/PBJvvrAEG3xELNFjb85ZznbbDsZzbBoqwu7Tbollkq6GsKkS3rNlCTgr/VyhGrh54joONDtATv5chpXqimTUZ8CbzithzOWywaRF8IZtG1BSrqJasgJ46u3d1Ef9lPWJZS5rFvkVfn/c/oS+JTqdD9TkZys48k8Jc/94XCBL17TgqYZ+OYVjg5M99b90/zzHf1u8d0UCzKdqWDoBoZuoOsWlbLuFoHt9jo4l64wbD/HMC1mCyqxoA/NsNB1k20e6Np85WiQ13J3IsoHr6kWZx//2X7e/s2dAJy5uok3n7uCm99yGu0NYT798i28eFsnLzmpC8MS7B3P8rM9k1y4toU9wxlUw3TRHj4FbnrxeqZyKp2ehsFV2zq59+/OA2A8U3YngrmyTou9f99ko0KcqbJXtCka9NPTFGXPaHbRBuSv907x5bsHeNP3nuDyzz/E+35cnRX0NEaYyFZqnlf0FEfeybqX87V7NMtMQePcVU1s665nTVut6ueG9jpmixp7xrI0x4LudFq+prwXP3nnCfIV3W0wjqbLvOsXh5nISej8Z+4ZZM4WGmqyJ7IO33YqpxIJ+NzXPKW3ke6GME3RIKuao9SF/Zj2tKqkyYLGKeZ/e2SWO4+n0EwJY/5TRMWQ98bxZBkhBJ31Ye4+kSKnykagA4tOlk0+6VEInsiptMZDXLWxjdfs6CIe9PHdneN857HaZpZ3IthRF3KngVA7+U6rOgXPvtw/W2C6qJLTDfK6LAI1wyJVNkmVTUwLt8lsCrlfybU8iCmgqBtomu4Wt04j07AEfgVXBCs/D5H1lrN6CfgU17Lo+RrPif6pEOJ2IcQ6IcRqIcS/2L/7sBDiV/a/K0KIlwsh1gghThdCDHie+y/289YLIX5r/25ACHGS/d9m5zVfiP+dcb+NlT8yvrQIy7fuHeRb9w1WMSjASSua3H/vWNkkld88hcLVH/sD40tMQprsGzNVlMqcDoym6BnRd3vgBy128uAUhMklDEaPTOR5/Td2UdJM9gxnEEJwdDLP+q56/ukn+/jRg0NLHiPAZ351mO/ccwIhBP9w1XrCAT/ff3BxDHkiFiRXMZjJqdy2b4q/+P4eVMPkuq8+yg3ferwGZpT0TA4LnmMsaQb32HDCor0IH7dFNF5xRi+zuQqb7ImZEII79kts+6r2Oq7Z0cVD/XOu79X2ZY289YI+ttkFnLcTe/oKmRA5ohPeuOvoLAcn86y0i+yHB9P0JiI13VaQ/or9syXeest+d0rg8yk1Xl9nrUigUCtS4Y27j85SHw7w+3ecydn2xNHvU3j9mct4w5nL8fsUeyIoN+Enx3Lu9GP/RM5VOnUU+yQkRz72nv4kq1piJKIBGqKSvK/ZvMZMYenGgTfGc5qbFFpCkCnqbkERDlQzYt0UTNk2E04iW9Ik/Gw4pTJRqFDQLFSb//RsoZ26btSo1mmm4MDkM9+s7jyW4reH557yMdmKQUWXCnEOp+PFG1rpqg9zSm8Dr9nRjYLceHUbRnd2X4LPvmQD127pcCcZyaK2wP9sZXsdt3/gQm7+m3NJxEKk7PN/+mq5Zkyly/QmIhQ0U0J9fQqWZfGHI7OUNZNkTiWf12i0LRrkObFQVbNm0gW4ypZfeNkWXn/GMiI+nyv8AtJ78s7+JPsn5XXUVhdi1C6YOhvCGIaFYZhoqoau65LnKQR3H5hyE/mrTupkNFXmdV/fWeOXKoQ8/pM9/mGlks5PHx8nU9I5ZXmC153eS1EzaK0LYVqCJz0TpKPTRUxLcHS6wF1HZxmcK6GIquruWLpMSzxkWyzI49zSVU/FsPjVfkd9VTCVU3nrLftrJgnOJNu79niLwruOJTkyVUAICROdzuv0NUXIqwZNsaA7HVhKNMOBYHsnXQ7fyAvHS0QDRIM+l1957/EUn71nkGH7O8hVDJpjQVvYQYrFnLuqaQHA2O9T3O/esFVOfYr0vtvR3YBqWlhCMj8TkQB9TVHOXJ6gqz5MxbDwKT5UQ0ilUc1A90wfnclBOKCwZ+84r5zXFB2eLSKEoDES4NBUgW8+LCfjkaCPsmqgqTq6ZqDrJgW7mbmhq543n98HyAJ8aKbIJ3/fz2fuOkFFt7hiQxuWJZjOqlx1Uied9vk8ta/Jve680Vof4o3n97Gpp8G9tkeTjrWSydsvWsXajlr4aJMtnOUc42Xr2/jAzw/xqd/202VP/244rVfy1jz8wUePJ/nPh0eIBn201Yfony6SLRtEgj5SBZ3W+hDL/uoX/OuvDxMO+Fwv0o55n/va7V3sHEzz090LEUI/fGSEjoaw22h7zLM39SQi6KaoUcNurZNKuo3RAJOee/BT127gn1+8joZIgO/Y/oVnr2ziI1euqxG8AdjQUUdFt3h0KMNpyxslPNqdCEqY/87hDP0zRTJlg39/eIR/+v0JKobFhvY4f3fRSjRTcGym6E4EAXctPJEsEw74XG6yYQn+9rw+PnDxKurCAVpiUn1VqmxLaL5qWNw/kGLQbhCXNMnTfjZoo2camiUnaZaAl23rcvm0Pg/dp6SZ3DeYXqCa2hoPEfT72N7dwKT9fafnwWfjtofracsauXydhMY3RaXit0PlEEKa1M/kqseXLMtJnrNu5yqmKyzlFHORwEKRl9OXy/2kqJuU7EZQUTPdppNhCdrqqlPhXKX2mE7pbWTTvHvm+Rj/d4wwXoj/sfjFrtGnfczgbJH2hnBNd/jiTVUOzIrWON/4/VGW/8UtNc+b70HkRLNdKKQKKsOegsULCez1FIKO+bsDEV3MXB3gn2495BZUJc3k6KTk8qzvqufHj4xw686nP1aQ07S2hjBblzW4vmrzwxF3Oe6RVT/vUw8Akhfxg0dGPK9XPQ9eueq9HgXUa2wxlOG5EgEflFWDsWSJXL5MwAcIwWMnUtRHAnQ2hjm5T5q6Jwsaf3jPOXzr9TsIB/x8942ncNW2Tgbmqp+rMRqkNxF1u/BOTOUqfOx3/fz9rYfobAi7Jt/r2xf6Jzk8kNmCxo2/Ocrjtgmx0207b1UTMwWN5niwRvHTCSEEu0YynLy88SkVVCVHUE5aj3uujd0jWVrrQkSCPvpni4T8Ctt7Gjg+W2JgrsTxuSKnLm+krS6MZlisbY2h6jJhXIpTNT9mPXBJw6wqhb7u1G53wxBCGmOXdJO8ariJllM0TRelCp9X9KHwLDkfs3lJ2jdMi6LNqziRfmaFoKbpjOTLjBWe+pgLukVJNSXUxhLUhXzc/MQU61pj/NmWDu48lmRVc5RM2cCwVd+29zTQOJ8HskghGA742N7XTEt9mNaGsCt24fiPFSqGey8/NJhmVUuMz9x5go/f0c/XHxp2p2JeL08HWuh+ftXkKlugKhLwoQjB9x8e4ct3nUDXLXeaoNrfw7ft7nVbPMRcUaMxEmAiW8E0LTRbREHYE8g+e/L8hgtXA7iKio8cT/G4J2EVu+MCKQAAIABJREFUQibyW7sXquQ+YPsIvuUHe7j0cw/x4k21cHSfIrlil375Ed528z4+9rt+fn1gmkjAR3ciSkdDmLmC5hZVjjDVSvuz3e4xMH/7Tw6QV00uWtvCv16/kc0ePpnX89IpCj90+RpOW97IqA2/u7s/xXC6zOrWGJmyQSIacMUdlhKucKwpvPAvBw7bHKteI4qi0N0g/RwDisLOkSxH7DVzVXOUsm7RbSfjlqgWkfP51wGf4hanmzvrqQsHWJaIEAn4WN0ac70QLQEb2uO8/ezldNSHWWb7qE569oyCZtYomBqmoCkaoKxVGw0T9v32+GCaH9wzwGyyxD9cupqz+hIuJDZb1NF1E2EJhCX5r6YpeM3Zy/j1e87hPBuyaZoW2bLOHYdn+f3hWSxLsHsog2lf55ds7mCl7fnYmYjw0I0XcaUNQb54UxvnrWt17Y4+86qtXODhn4KoEQfzhrNHWZZgTVvcndzde3SWtrowP37zKbzhzGWcmCliCVjREqN/Ks9rv76Lj/ziELuH0mzqauDQRI5cWacxGmSuoLoF1k8fG2VVe5zZnMoNZy7jEy/dXPP+bzxnOZ2NYf793gHe++P97j05la0wkirz2rOWc6bdEFznSchPXtZIJOjjE7+vwvcKFYNL1rextbuhxlO0qyHC5q56rtnS7sKcl81TGP3QFWu4eG1LjQfgOfb7xkN+6sN+JnOqCzkFOYF8fDRH0K/wt+ev4H0XraSnMULApzCULpMuG+612tsY5swVsmGbiAZpsJEx8++d5lgQQVUcSwhBsmxw2+FZ9/rULcmZHX6GIjvPNDRNr4FITxU0hlLyeC3hNFpko2SVB2Xk5ARtHgizw98M+nw1r6koCh+9Yi0v29rB1q563nx6L3911jIS0aA70TMsWbClPNesMwFNl6UvsGFJ2KwTOVXaOTlc74vXtHDDyT3u+a8YFsemi5RtXqbzmQI+pUbdNa8a9Hjgz36fwrvOX8HLT6rC/Z+P8UIh+EL8UTE8W+Q3u6tKfEt1ofYMphibyaPrJpFIkI+9dge9LXLTf9U5K1AUhU/+rCoX/7qLpDhBaolJTJOdBKYLGgMeGXnv+3sngqfZi3Y46KchGqgxiHfCMC0OjuV43bnSRiBV1PjM7UdpiAa4ZFMb6aLGiemluYXeCd7+YZnsre+s5/h0YVFbhmYbwnPM85rO405e3siX7hrgbhvK6e1segvBPSMZ/IrCA/9wHmfYx3hkPEcxX+H8m/7A8GyeI6NpTF2n3p78re+qR1EU2hxRh6xKczxUA3dd0Ry1i4lqgr68WaqLeuPHT8hObTjgw+9TOHtlE02xoLtJ1h6v/M4S0QDt9WF+uGsMIQR3H0ty/ZZ2XntKt4QXLeFtNpwqkyzqnLY8sejfnWiKyQnI3rEcjw2laYgE2NRZx6GpPD6lqnLY0xjhlTu6QIH3/fIwloDtPQ201YWYLWicuSKBYUl7hOFMuQYqu1R4O5zeDW5rV707GbGEwLIkdLNseyhCdVieLEl4qW4JXmqrrlZ0wf3HUow+w2LueLKCbglyZQmBU3WLVFl9Rp6IJ5IVCqpJSbMoVJaehM7v+MYCkn/1i/0z/NcTk9zVn2Qso7KhPU7CtmFY27rQoHwmr5G2E4OXnr2C609fxktOkz6ZZc2gpS7sTgyWt8TwKXK6F7abASXNZG173LWn+OW+KqTcCyey5t2DyYJG0L7kE9EA13zlUb56jwSraJrpFvFBn+LaD4DkRh6aKtBaF2IoWZITQbtQl3BUyy1SL9y8kEv82IkUXYmIO6Gayas1XF7nNrxl1zgV3WTAhh3pusmNV65zH9fXHOOuRRSQhRC01kt1z9m86k4XnMlbazyEgkx+Ap4J3Gev3cg7z++jrznGlZ6iM2k31+48OscHbpMQ/Ja4VFPMqwYXrWnm3hMpDEvaZmRKOoloENXm5+SXmGY797lXjfiV2zv58OWraZ6ngNzTGKaiW5zUXQs3326rYrpiI/MKQa+JuymkYERnfYgD0wWmC5rbnFnZLK8rx2bBm7R2N9b6zylIOoTpWe8tIWitCzGaLGFZFpZl8bff3U1JNbjl0RFK+TIh02RDRx0v9lwTjwyksMwqF9AZY/7VxdIvsMt+b1U1a4Rg/JZg/1gWVTM5dWUThbLm8qs67Od8/OVb+Ok7z+TrbzyF7/zlqXTZ6966znre67mOwn6Fb983xJ0e+x0nHN55UzTIuy9e5U7SsmWDkWRJXkuK4vLWT1/ZVIOAmcmpbOmpZzhVZiRVpi7kJ1PU2Xmset2u6ajjxEyBd1+2pqZ5C7IwWNNeR7ZscO/ROQZmi/zLb466KJiTehu46SUb2NRVX7Mv9iSi3HBaL0emC+Qrkn5SMSzqwwG6GsJM2nzOPxyZdWkYV25sdydS86kJJ/c28o7z+1jTGqOzPszJvQ2uyjTIwnEkXXaRAs5OurwpwldfusltcPh9Ct0NYR4azGBaglW2iq+iKLzpjF5uumIN+8ay/OcumVPNLwRlg0W4ibtqyMbF6csaqQ/72dgexxIOfNjj25vT2DWY4dDE0tSOp4tUsarcCfDIcIaSbrLNPrYXb2jHuYQ3tMd5/0Urefd5fe7fWz2QZYd/O5Qu87afHOS2QzN88f4hMraAjpOLrG+Lk4gGaYwECPt9cu8UkpOYKWkUy7qLfpE0m6rm9CVrq82OnsYIZyxvJBoM8JYzl7Ojp5H2upCLQNBMqdDqhEA2qkq6VeO1mq8YJKJBrtvSzjvOrdJonu/xQiH4QvxR8cP7B9FNi1BY3uSLbSa6aZEtqOia5AFFIgHedO5KLtrSwUdevpWPvVLKPRfVasJw2UmSN5ZeohBsjsubM13UGLChf811teIrXvn0qAfi0VofXnQiODBbpKybbO1toLlOqpLuHEhx3Sk9bidyMlOpgZ96w6vo+clfHiJZUFnbWff/2HvvOLnqev//OX1mZ7bX7GazSTa9E5LQOwEEUUABRVCKBa8VLIjlckXlKiioWOB6QUGRIr1DCiEkENKT3WRbtvfd2enl9PP745w5e2azQe79Ph5e/T14/5XszM7OnDmfz+ddXgVJ0SwFzrGEwF0vtyEpmgVXbTUnhk9+cVIY996rVjKvKsh/v9UDGBNGBwZ0xg4NHYhmqSnxWUI7AK2DhlfUF9bP56JVBt9JEBXLCHU4nOKk771mwXDGEkdPXa2Jq20aMMs0/c1xf/YPxHnmgJl0m7vvV8+YwwOfXGElaG93RSzVt1wXeG55AR9dXk1/TODGJ5p5dO8QD+zox+EwpLmnJl65yCWMc6cpJuyRg9vc/PQh9vTHqS8NUFPkt6Tqz55veLtpuiGBfqnpbzWjyMf8yiCVIWPis6rOOMBERWM4LfBW99+3cUjZCogcRPS28xoNLpB5z2kmhM0wzp2cGuauT1JSEFWN6qBhro1uGOy2R5PsNnmkfy/G0oIFOQXDPy0hqfRE/j7EtTOcJiOpJEWVodjkehqLCrzROkHG5BraX1+UNVrHJovUrV1GI2QsJXHVqhmUBjzUFHqtNalqOm1jKWRVo2MkacHsVjaU8tsb1hLyuekYilNz7aPoqsKergk2HBzG6XQQ8rsN3p8N/LegsoBIUsKhGcWHKCqU+lx50C6HrQ5UFA2vy8HD2/sQRYWxuJjniaVpOic1lHDR0ipSksp3TYn3m85o4D83dDIYF6gMeemJZCx+V26KrasaqqIytyrE7EojWXxsW7elHioqGpWFPooDHpyOyQLtiRuO56umnyFAy3CSP23vs5LKtpEUi2zJZ2NlMO89X2xON1VNpyLkoyLkZUdXlOv/uBdd1/n3l9r42esduJ0Oqwl1zoJyvn7mHO792NK8dTXP9u9wSqJ7IsPvbAl+edBDWYGHrKxZkwyAmpCXlKRSHPDQ3hul+UA/veOTyaeu69yzuZOmwQSqrlNd6M1DGbhdTgu+bY/aYsNe4NBIilmlfm47r5HLV9ZYE5qmYcMSQNV0qxCMZWUW2DhesqIRychUh3yWkMeyamON+91OTqgvYdTcI8rM9fqnnYOETJjt3LIAZ88rs5LdeeVBCkxxG103hEe6x1Jk01my6Szb2sZZ+e2XeWSL4TGXg5cvrArRWFGA2+lg8+ExVFtyrWkaK2cVU1McIJGV2Xp41Gow5qbcFy+rQlY0i/968XEzWP/jzTy1vdv4Dsz9czCSsbiD+3si3PD7HZZa8ahNNMztMHzWvvjQPgCe2zPE33YO8NMXWvGZ9/TNZzeypKaQEXMPLvC6uPR373Kvaf+yoytKY2UQj8vBc3uHOM+8F8NJiaVm8b6/P05zbwxZkmm18ebnVQUZigl555o9Gm3f4b2bu3h67xA/f60Dn9vJ/OoQFSEfx80qztMHgEnbiN5IxlJ1DPlczCwNIKkaT+4b5o7XOnjUtJQp8Lr49LqZrF9UkScQE05NqgtXFfr43RXL+P758/Oek2uS5ibkJQEPn15Ty63nzD3K+3Bmid/aO+dXTq4zo0npR9F03uqK4nU5jioESwPGRPBTx9exrCaEbHJiL1tezQ/Onccy039XVHWSklEktQylaB5OMpQU6ImkEd5HM3C6iIsKsgb9QzF27u+3JvknmUbwCXHy7Cv0uSkr8DKjyMdly6r5won1FvxV1/WjPFGfbRqjaTjFHRu72DdwNMXIUN1VGU8rZExuYEJSaB5O0xkWySgqdsCHy0EeP7TY76G0wMcbnRFeNpvrr7SG+c83uk2IrW7ZyYDR6Gkfz3FydX6zrZeMpJIQZN7YP8TLuwasHOf/D/FBIfhB/D/Fmy2jOJ1O3B4Xuq5z85/35nmhAQxFBXRNszrdwaCHbS2jrPjaM5QVuCnwuRFl1YJgASw2eW1TvQIlReXFXX0WIb5vPE33WIryQh8NFcGjJpL3Xbua5286Oe9nlYU+wjayuK7rfPaBPdz8iCHysnRmMWVBL73hDIKsUVviZzwuosgKqqJaE8i93RF++VIrsbTEYCTDlx4wTHLXNpbTMZzkzcNjLDYPwcvv3UEkLfHL147wX290s+nQGBUmYT6nYDqzNMDXzm3kpvWN+D0uVtUXW1CTgZhAZaGPsqA3r/MZTknW6xjXR2M0LuBwwK2XLGWJyfdTVA3FlFNuH4zTG04jySoup2Na4ZxcR97uc1hfGkDRdIYTIt0TGW566hBgGLJHs/JRAjcZSeV7L7Ty2b8eoHM8TX2pkaCcvaCC0+aWcXx9cV7XcjwlkZZU6wDXdJ0fvdJueTTlYGl2lbxISmRgClzVDt8BQ2SiPOghbCoqnruwgnMWlHPNOqPZcPGyKj62sobbLphveY/Jqm4aZDsNI3NBtczRpwtZVoilJbKKxlRhg6KAB0XTCVvXUqfY5zanjZMTwUhGJiMaJHZFM5odD+8eIpI2zK3jgspo+r0P8ReaRtjSNkE0K+cVCaqmk5F0Wsb+vlpuRJBJmgbqw7Z1sqM/Smc8xc5e4/uwCxLkvvtc19ftdPD5k2aiajq7+uN0jKeZbyZ0o0mRx/YOcesLbVz5p32MxARD2U1RSCSz/HFTO1Wf+SuPvmUkmaPRNJVFfh7d3gNASdB7lH9abZEfQVCIp2QEQUEUVXrHM4TjIlVFPlbNKrbEYvwYPLyF5vsRRZVYSuKshRV847x5XHeK0enVdJ3ZNojTTWfMRtcnPa7KC7x0jWfImN/rBauNKeaMIi/7uqOcML+cYhvXTbJ5cJWFvJQGPRR4XJbqYWXIx6UrZ1Bb7Oe8JVWsqi/mzzv6rHK3K5y2JhUnzy09qmC6fHUtj123moyoGnwoG882B4t9tWWc0aSI15ymziwJcHpjWV6jDIyE996PLeXqNXUkRZVvPNuS93jQ67IKLr8Nxu03X6fI56KjcxxZlNltm/6EUxLPN43y1Sebufi+ncyrDNI+lj7KH3FTe5hnm0ashDknGDOcEJlTFqC+NMD5iyqYZe4pR8IZVM1Ys2UFhrpwNCPnNZUkVack4GYsIbJmZhFnNZbhcMCfdg7SF81y3sJy67nFAQ9bjkTY1h3l2aZxzmos4xOrZlBt7rXr6ks5MJwEDE6SphtKij3Dk0msw5HPWbcP0H968SJ+eeli+sczaKpmeeSeu6SKR25ch6bp/O61dj57/7uc2GDs4ZoJHfU5DOP6D5sNrCVTpqQlfjd/e6eXs364kefNIufuF1t5Zf8QG81p+XtBpr/52EG++7dmHtjaw+ZDBnw4xy8diYvMLi/g/mtWAdA6mkTTdQ4NJlgxs4gfPHUIQdb4yvp5eFwOBqNZFtVMQuLlKX8LJvn7nWPTT6tyHHaAbR2TPodrZpdYFIGSAi9ZWbUKXYBZZblCMGudmYV+N3PMn289YryWHdmwfmEFnztpctKTFBQuf2A39/8dbYBZpQEyssbGNuM1VV3n9MayaSkMOaXsypA3TxwH8lFFRX63pWapaYbSdIXFq5YtCGPA47SKzZLAJLcwo2js6E7QHckwnhUtRcy2kf8dZDQlGpz5l99oZ9+hYWRFZU5ZgNmlATxOB387OGJZrNg/l9vlpLF8suAdiAmE0/JRFAGnwzgD/7Cj/6iJph0llJt8J2WFiYxEXzRj3VfHmcVZRchrWenUFHoJZySGzVxqz2CCb7/UxhZzip0SVURFs6wvwChqc/Db9vE0+weTPLp3mNaBOAe7Jnhl/5CpuTDCvRsm4cf/qvFBIfhB/K8jmZVpGYjjcDoYH55AURQUWWVXZ74pbW84habpFAR9LFpUhVvXueWhXQxFMrx5yDiY9nTmC1M0VBmHR3TK5O7G37/Np+55k/3dEc5aWs39GzvY3DxCY3WIkqA3LzkEOGdplVWM5aKi0EtTf5yX9g8TTUuEkxJvto7TNpJicW0hcyuDlAY9tI0YSXNVkY+xhIAoiAhZga7RFL94oYUP/3QLdz5/mCU3v8jaW19lg3nIfv5cA9Y6GMmwuLaQq00lzsODCWua1jwYt+BjgzGBkgIPHpeTT580i6tPNJ5fXeQjnjW8h3JE/JDPlSe+MJGatMZQNZ373+hClBR8Tp1YWqTfxvMLJwRuvmC+9f+328NUFvosjzF7lIXyeUWAlRTv6o2xx5xM3XhqA+tNHkt4ShFuh3y90x1ldlkBf7lmFSfPKcXrdnLr+kbrcZcDuk0e3qzSANu7IrSOptjcHuZbzxw234vx3dqFJFZ88yXWfTdfULh+SoK8sDpIRciHqGikRKP4/dJps1ltNhv8HhefWlNnkf5zyWNPJEtVyIuqGZ5JoqLTOTE9Z3VzR4RXW8cQlEmOUC50XefdniianiPVG9YZdrntQhN6KJgcBchXthMV4+eCoubJZtsjJYgMJAU6YikSkmoVmKUBNz63E1HVGMtIR62pqWFXRovZPBTHTMjqYEIwjasnP6jTAXPLA/z0wwv54fnzuPuji1gzs5j6Ej8P7RoiK2usnlmEput88YlmnjowCd+UFRVN0xCzIj98dC+v7jW4eL94zlABbB9MsHpOKS1mp7i2xI+u6+w2ZdWdDkyupZGUy7KWx0VeM6eU9cuqkRRDLCbX+IilZU60Ser/6NIlXHVCPSvM+2IsIVpqfjnI78GhyUQ/JSn0RTKIZif5mjONdd8zniItKlyytj4Pbl1U4OHMxcZaiadEFFkl5HfTPJjgvHu2W56jKUGhMODm3CVVFk90aW0hfRNZNF3nqc+u4bYPLWRm6WSR88CnVlJfGsDrNjg3BV4nRf7JdbJ+QQX3Xr4MgEPDSQviVVcy/fQ991hD2dHTOTCgbBYEM6swq9TgPuX41aINLmuf1PRNgVfXFfmIZGS+80IrO3qMKXIsK/Pbt3p5eOcgfzULmVobL8eeFJYEPHznHANGWWE2r0oDHkPoSdUpLfDw6bV1rDOnFnXFPkaSElesnMH5Cyq4580etnVH+emmLgRZI+RzISka7/bE6DAnAj3RLKvriiyIGsBLreOGZURUxO8y1m6hz8XA2OT94XK7cNkEKuxFisvp4MWmEWRZRVM1LltXj9ftpL7MT1pUOO221/m1qYR9qD9ORaEXTTMgyw+aU+zHTRSFnV/739ev5pO/2sbX/rQHgMMDxhrJwUU3HDSsEUbMydXcqhBpG3Q3PuX8zO0huYbscFygptjHktoizllUyWhcZCgmkJZUFEXj1YOjfPHsuSyqLaQ85OOhbb1cdPd2fG6ntS8uMaGCHz9hJpqm4TUna0dGUyiqxov7h2myqTp/ZOUMzszjNBrxaVvBloOwvn5okvdqcNadxkTQvC8LfW5mm0VJzi7GPw0fPRc5pdYdPe+NxGgwz5xwWqLA6yJjqhZPF6vqiviP8+fxtdMbjnpMshXKRT63NRF8ummULz11mPICDy6ng72DCd7ujSOpunXfAxYEX9WMAjecFZkQREvhVtZ0hpJC3hT6/UZGUvNQW0uKfXz6+DpcTkceTBbylVhzISoaLaMpdvbHcTjg2jUzmGPbw24x4dBup4NH9g5z60vtlniVHZ6Zg7NrOqRkmZgooWg6VSEv60zRrZSo8YbpFVkd8jGRli1xKXt8ZIkBZ1U0g3cpqRohW2Gd+9xgQGH7hpNIgoyYlfjcg3v48sP7+fXrR/LW9r9ifFAIfhD/62juj6GqOlLWWGCKCTmY6tHXbAqaeL1uErEMh9tGaO4zDv2o2fV5+p1e/J7JzcPrdlEY8NA5kuSiH73Oro5xIimRp97pAaBjOMGPrlyJIGuMxASWzyqhuMCTpzp6rKgrNWwlvv7IAe58qY1us1i6YEU1j9y4DpfTQVnQayU1VUV+i/gP0DYU5/cb2lnbWD7t69dXFFAa9DIUyeJwOPjyeiNBbB9OWtCaHUci+D0uq3NWOY1KZo431DKUJJwUqTA7iDkIjabr5kTQ+N27Xm7j168fwakqTMQyrPnGc7TbEteMoFiKoAB/eaubykJvnophLsqDR0NDF1SHOG5mMX/Y3svzTaPUFvu58vjJAmpsSoFh9yw8MGgc7AW2A8LpcPDFU43DUNXhj+/243QYPmXff6GVLz3eBGBxvybSkuF9ZXZZm/tjFhTFPo0M2O6jW9bP47MnN1jXKPw+1D8XVAZxAO1jKWqLfAYnQdGZyCiWMps92kZS9MSzjKQNgRbAShhdTgc/3dDJL97oRtd1yt1OZFWjN5JFlDULPmqXy5c1w/+oYzzDqXOMg03RdIPwLijHNBXvj0pkZI2EoJKVVRRVpzzoMSZHupEcxAWFQ+9hAQKQtMlyxwSFQwNJ3uqIIKnGtDIuyjzbNIqkanT3RekbjCEquiV5Xlfip8Drwul05BmC67rOxx/ce9TfW14VRLF5gL26bzDvcVnVqC8voDecJpmVmVVegK7D20cmuP+KZTx41Ur6o1kEQUEScgXh5O/vOzLOn984umublVULDlkccFv3TY5XFk6JlJiFYMzsdB8Zn7RpWWbCvlWz4Dl9iTGh0TWdqiIfw5E0V9y1mWzakFpfUlfM3Vet5PylVWw7NMLh3kl+GMAz+4Z4cFuvxW07z8bTO7mxHEnVGIoKlAW9eFwOZtoELeZWGNPNsGl58osX2/mzCSsHA0K+uKYQn9vJ4ZGU9RnqjgHDzoX98XsuXUKB12Up+uZQAxNpiVvPmcuvLl1sQS4PjyQnhT2iGTa3h3nl0CgPT5GLryr04XY66Axn+MXmLnojWbaaBuvzKwt4pWWcHT1RkoJiwXxnTOFvzSz24YC8tRQ2963yAg+XrKjhPLOQKPa7mTAtK3qiWUaTEpcsN67zM02jfO3UBjwOB6+0ho0C0Nwv95pNiNaxlLXXnGhylXMQuZDPTcrWDDtxfhn1DaXMmGk8b2ry/bd3B63vf0FNIXMqgxweiHPHM4fotgmIjSUEqop80xrHF3hdHB6cLJqap0DHc3tlxDxn32o1CqWRWJag10l1sQ/RlsRua89vyI4nRMs7T1Y1+qNZas37rrrYaCLmGqY5pcXPn2Uk9LlJ32hc5P5rVnG22TCMpgQKPfDQxjaEjMAvX27F7YSO0RT3bujkpkcO8LW/7LfeQ2nQyy+uWM6imhABj4vnvnwid1y6hOMbJrniufv5hy+0ssfk57ucDupL/fRFspPQUL+bQr+bchtfbaolgD1ygiuFvqMLG3vMrwpy8bIqPrF6Bh9ZVm1xy48VM0v803Lh7VYqRf7JQvAtE2r/eluYhhK/OY02osR2duSmp5puCMYkZAUNYw9NmNYnCUnOE8t5v5GWFbpskN6+4QQBt5MjI0nW1uWLXU2ddEqKxg9fO8IvtvTwSkuYioCb82/fRNZmsTO3PMAly6vIyJr1eUeSIv3RLJvaJ6zCui8mIKma2ewxmqOqpjOj0MfewQQhr5su2zldEfQykZFpswnHBb0uLlpUSW1RTmTK+L6iWZXOqGg1uUJeJ4Nx0RKIEW33Ss6KC4wm/79yfFAIfhD/6+gaTSFJEllzauA0Dx37IQbG9EvXdVxuB35XPl5+LJ7l1b0DPPpWJxesnslbd1zEc989FzAOgMe2dbGtZZS/vNnJ1kOTU4SO4QRzbMqUK2aVUBL00j2W5odPNr3n+/7SuY385tOrWFxbyJO7BvnCH43k9NsXLaTQTPzKbIVZdZGPLpvy5+Nv95IRVW48b3K69oOPGd12RVbYcnCYGaUBBs1uYnnIS2Whj9bhpGXJ0DqcRNN067CcTu47x+H73MP76ApnqCj0EfK7SYkK/ZEMa3+8hZRoqCeOxQX+uKWLpTUFZM3NKp6ReadtjHozSdR1nRd2G4nYtWfOZeeRCQRBnhYamrPnsNtsOB0Obj1/Hi6ng/5olpPmGNLLuSJ2IJZ/uOQmgucvrqR5KHkUdBQMKM7NJi9qLCVxxrzyPGlve0ykJUtwBrA+C5DHQ7LHSeb0MQepCdsK27bRFD96pf0oIZ8Cr4tZpQHaxtJUF/oMArp5CI2Zhu/2aBlNkhQN+KasGDwllymnrWq65U2sGLWwAAAgAElEQVQ4MpLk+W1dxGJZdPPv5MQtTptTasmga5qOpkJtkY+r19ThdTksBTRVg7bx6QVjRhMiCcHwHTS4h4Z4x5yyAHHBMFTPyBpD03BCc6EoxuGqqBqSopGSVA6OxmmPpIhkFeKCSkxQGU4LSIrO4Y4xmloNXvB0lh+NFQX85ML53HZeI5vbJ5EC9cV+zp9Xxm8/vpSmvtgxu+e5yE00fvNqG7PN4i2ckhhPSxT53fROZAx7CNmAzwF8/sw5XLamjiPDSTpHU3mcHoC+cMZqEIT8bpJZmS0t41x+7w4URSOeka0kKweNG02KLK4O8fQNx+NyOlBVHVU1psx/fO0QPqcxlTx7aTWf++02Xts3iGZCjhJZmUK/m8tNSDIYk5Yt3zqV9UuqeHzXoCVWU+T3UB7yWnvAKfONyeXh4SSjCYFzfrGNl22iOLkIpyQ0U6E1lpFRVY2Ax0U4KeJyOlhYFaJlJMk3z27ksyfV5wk4TBf2BlVDWYAHPrmCX5vKjnZRFo/Lic/ttKDsBwYTli+kJKv86JV27tzYmTdRBWPi9Ni1x/HgVSsJ+tz8blsvzSNJ6or93HhKA4qmc+emLr75XIs1FbQLeTx9YIThhEhVoZc+c/8pK/AwEMuiaTrRuLFWctzUXDE5FBdpHTXW5elzyzixoYQDQ0lCXrcFmwU4e14ZtUU+Dg4n2doZ4emDY6QElXPmlbOoKn8KEnA7kWQVh9OBx+vmjX0DSKICOHC6nEcVgoMTGVRFJeB18ZuXmhmLGHSCJ97uZd28cgJeF58+fQ66bqAkNE0nm5WpMvey+rIAv792NTs6Jou3P2/tyvsbe7sjtAzGLfXtkZjA1pYxesbTjE0k2bi3z1AsNdffn7fnWx29sH+YYp+b/miWLW1hkoLCGaasf3WRj6ys8u0nDYqAKBvc0KDPxX0bOhiamIT8LppRyKdOMKDT/aMJRswCQNd1OoaTVIZ89IYzbDxk7CX9kSxtUxrK157SwLcvmM/M0gDnL6vOm7bbvRPfbJu8HjNLAgzFBavYy1kaLbB9d/HssQvBnN/jdCrW9nA7HVx3Qj1XHFdrNQHtvoXvN+xK0VUhLyNJiZGEaPGEt3ZG8xR9AYptRZfX5STkdVlCVLqukxJVA9ap6gbiRFHpj/zPC0FR1ei3QZ/3d0e4+6UWTr9tAz99qplbzQYATK6zCdPea1PHhKXICpCMG4Xa4+9M3m8Oh4OyKYbyIwmRu9/sYc9AwuLjAkSzBicwIakI5lmXlFRebBmn0ywCL1tezZdPmcWq2kJK/G4qgh5uPWsu162t47b18zijscxqePtc+eqloymJFTMK+cKJs0gICksrC5hdGsjz1pVFmU+tq0USJA70T+8V/a8SHxSC/+DQNI2z73iDkfD7E334Z46DfVEUWaG4OEBJeQhFMeAQXVN4SAMTGSRBordzFLKTG1BjTSGjsSy3P76PGaUF3H7ValbMLuPZt9p59I02qygDQzTmjaZhCgMeFtYVc2QonncQrGwotWAt92/oeM/3XeB1c/7yGn599SoKvC5rwlZr67DbC47KIh9do5Mb4FAki8vp4JSFldbPzlxazfoVNYiCyA/+uoe6sgBDNhjUgpoQz+wZYjQuMqPEb3D5EoIFc1hlMwTORVVhfre+PGhOBEWFe21mrRUhH3/Z1s3I0ARv7OlFVVVWzSnjex83RHjmmIIIuq7z/O4BHA74/mXLmFMVJJwQGIplj0rEAx4XBV5Xnh0HQGXIx52XLuFb5zby+VOMaV5dScBIEt/uzeMvDsYESgMe1i+qRFA03j6G2EqFLdn8yumz8yClufjWM4cYT0lWQQeGMFEODrjXhJXJplLhWQuMaW2OTzDdRPC7z7ewuT1sibjYY1F1iMMjKauzmcvhJgSFXX2pvAJ5PCuREFXCGaMYDEfTPP9mO5KsWkUhwNhYgkwqQzyRZUlVECcOgl4XIa+Te7f2cK6Z7Kuaoei4tCZkyVdXh7z43A4UTWd0ylSzaSDJeExkQpDJKgaXUFQ0VA1mFvs53ry3Ql4XGVkjJsrHVA+NCYb0diKrkhJU0rJKOCsTExSL46RqOqKiI+s6oiBa3LdjeT9WF/qYWeKnaThJdaGXYl1n055Bfv5iGz96toXBSNYqlnLxIZNvl4uqQh8up4N7X223oLGypLLFnGDstU1CcmbcS2cW8dMrllk/z3nr5ULRdPZ1R617/5JfvcPnHjQgdZKkkhQU3umKgK4TNQ3Zx1OS1cnvmcigmsIwmihw8/1bEVJp0HVqSoznfPlCwzD7lAUVlvBV2NZ4SZhF1IlzJz1VAYKmfPyjn1/LA9cex+IZht3B9545zJceOUA8q/DQ2334XQ4uWzXD+r3hKYbyl6+u5ZLjZlhCSfOrgnSFM5QVeLhwSVXeHjpd5IrnnGqqz+20kuKA10XA48yDhO/sizOjyMdA2CgCNFVBUzQaKwq4wQbly8V4UsTpcFDkd3PdCTPpGE+zuy9OdaE3j58pq7pl9ZArQCfSEn/ZPciWjgnqTYir2+mgfSzFH98doKdnnKt//gabDg5ZyXlOaXYgLnB4NEVdsY8iv5tF1UGyskZfNEs0K3PewnLuu3wpi6pDNJQG6BjP8PDuSS+78xdW5O11YHACVVUzFJlrSpAkhciE0aByOh2omk6viT7RdB1RUpAlhYtX19LUGyVlNhsUTec316+l896PcoEp9pWb9MiyxhGzQLrzEytYWV/M6weGOW1RBZqq0TfF2PrNw2Occ/smxhKCpRL9iV9u462Wsbzn5dbAnp4YjVOsf3pGUxwZS/GHrT3Ulvg5yYRTV9vM4z+xbiZjCeNs23BwhNufbGJgLGlNPIdjgkXbsDfectxUn8vBocEE7SMpbjh9Nl63kw/fvZ1bHp9s6q5fUsVHbPe6PYptucIrzaPWPl9b7Gc4IVpm9rlC8Kaz53LVmjrcTof12HTRY9IVplqRvFfkiqCMdOwC81hhnwieMrcUr8vB79/uIyWprJtVbPhZ4uCrpzawwBSaCU2ZVtabHoqCYvjSCorhN1huWk8IipZnvfB+Q1J0RsIpUxJVp30wzjOmndahgbjFTwRjHeq6zi0vtvOfm7poHUtTX+JnrmldM2DyQT1uJ7ed18jtHzJQU3baBxgTwRys96JFlVy3ZrKJZqhvGwqiToeDd3onizGnA46vK2JWSYDqQh/fOXsuN58+h9ICD4ttTYAinwuPy2GJItkjZxPU1hXm6//1LtccVw2qiiiIaJqGLCn818YjyJLCvr8DHf5njw8KwX9wnHz7JvY0D3DCbW/8X7+V/6cIJwQ2HjD4Br5ggEDAC2YHangKFjuSFFFNoZKm3skFc/rSGnrGUhzqj/GJ0+ZSVeRnW/MQ97/UxPW/2ECHrfv03M4+Hn2rk7OXz2BRXbH12O8/u45Vs0tprCnkAtshkX0f3bjZlUG2/eAs6//2icEyG4RyOJplU9MIqiiiyTKarnHi/AqKAh4uWm0c1PNqCrn/8ydYv1MW9FkTQYAvnD2Xy9fN5KR5ZVx32mzA8PvrMQ/utXPyE0GAqqL8xLqi0CgE06LKltbJrmd5yMuOtsmDXVU1TlpYxbcuXc59N57MpWvq0HWdgNdFLCPTUBGkwOfmwuPqGJpIkxKUPAjo5Gfw0juR4bebu/KSniU1hVy4tNpKCF1OB185Yw6xrELz0GQToDOcobbEz6qZxVQEvWxoOVrqHiZ5SjmY6FBcYP6UZGR3nyE4Uhb08sQ7vdzzUgst/THErMHb3NcTRdN1PnHfTr7w8D5uWT+PV/5t8vvISeYfsSVKOX7edB5aFy6pRFI02kxrjxPri81DR6c1kmRzxziSJBM3TeEFExKq6jpNhwYZHo4zEU0Tz0iWmpliThKFrCGvP5aSCKdlC1KVFlUcDmNapmq6pXhYU+QjJaqcOKsEVdOJ2XgaKUFk/0iMt3on8jwMc/lEqd/NaweGqC3y4cCBpkNcVHm9bfqivH0skwdpysqqyY+clMuXNMNYV1I0NNWQyi/0uZhTFuDq+9/lSXNS+3Z31Cqy+6ICvaMpNu4aZFtb2BIB2dA8akyOdMNHLQe3Xjk7335EUlTafnUxpUEvG01+oaJobD0yQdtoisPmfVdvKx5mFPsZT4iWnH9OKvzL5zby8jdPZdnMIl7aP0I2q9A7kqLPJjqkKMZU9OebTMXHrMxE2uCiVBd60XWdHd1RHLqOIkqIZoPL43ayqLaI7pEEZSEfZy4z9qTyQi/DMYE5X3qWAdu+YPgICqy0NYIEQeE7jzahajrFAQ+r6ktwO51UFRr7QbftHv72ufP4t9Nmc2QsxZttYY6MpYw8TZJQRQmnw0FFyEtaUslKKvMqg4iKdkxoWF80y20vtSLIKpqu0z6W4qFPreT3Vyyf9vkzivwMxY3vOCkoHB5JsrgqSEfrEKoooqTTuHWV//7UKq5eN5PbL1poqZsCeU2YdTaoX2XIsCXIKfwCrF9Qzi02FcYOc9IeExRLOKck4OaODZ1EMjIJ8zPe9OBONh4cNvh4uqEq+nZ3lPbxNCtM/8aFZnK4w+RdlQQ8VpOuvjS/IZdLGqeaYbePpVEVBTEeJzwwhs/vYWIszrkNhaTiKRRF4dyfvUXnWIrBaBbZ3FNnmwl9PGPAev/j8uXMNHlsM8zPVThFWMPrdnJcQwmv7BsiK6m8uquXbMbk/U1jcN0/keGspZPX3d7403Wdv9xoKFYvqSvi5W+cyoUra/jhZUsAA146EhfpHE9zywULrHMyRwnwe5x86/z5DEWz1Jb4ufP5w9br243rYxkZRVaRbY0KSdGM66lrloDNBStruN3820/vHsxruh0rZpcX8NVz5vKbq1aSkVSuvH8X7SMp6kwVzk5zzeSKpsqQj8+d0sDahhLix1ArHY4L7DehkNOdj8eKHP3hfzIR3NsfJ5ySEBSNLVtaaWkZAh0+uqyKQXN9nT631BDgiQvMLPZbvLmcqftQXOBv+0eoL/Gbpu/5zd3j6oqpLfKRlTXSssI7R6IcGX1/dkTG39GIxjNIiQRqOsVYPEvXaAqP28lgJJNX4GdllV39cTRNZyAm0DqWZkaRjytWzaAy6KHfRPAoqk59acASgyqdUgg225BYsqazuDrEJ808rzLoRTbRD8EpUNSKAsO8/r63+3inJ4rT4bDWsz0cDgdVIS8pSTuqGV7sdzMQE+kbiCJkBb7/xEESKQFFVhDM/X6WibY6Y8H0NKF/lfigEPwHRziWQZVE0qn/+Wj+nyk+8avt9JmL2R/wUmARlvWjlD5zB1wuFtYV89Qt5zCnahJXfuriaj57z0bWf+dp62e5gy3nKVhfEeKe609g1dxyOkeS7Oua4KNrZ/LyrWfhdBiTh19eezxAXrIFBmxlOvhZod/NbZcstg4e6/0sqMDrdhL0uXhl3xCipKCKAko2g67pltHvvdevZe/PPmRBo3LhxICW5YqMk+aVc8fly3j4C+tYv8zgpPROZPiwaUS6eMbRhtJ2SwgwJ4LmpEDVdeaZxVJxwMPgRAZd1/ntF07m1o+t4EsXLjE2uUIPX7p3M15docJMJBeZylprG8vRdGNK3RfOh/OCMUXbfiTCg9t78+A208VCM/nIGbg3DSZoHU1xxrxyXE4H5yyq4N3e2FFiPmB0aZ+8fjXrTR7PQCSLLkzeM1fZuoDJRJavPLCTnz17CEVRUFVDbGF/T5SXDwzzzsEhXt3Zx8bDY1M4p07OXVTJ8wdHrIljTjBjakIHhuDM8bOK2dUXx+dy8Hr7BElBJSOpxLIKw2mJ5qE0XRNZS866vsSPy+EwJkSyzGybEMe/XzDfgpXIsprXxc0Z7nrcTmqLfLSOpXE4YJ6ZIC6oDBLJyDxzcAxR1kiIqgVP7ZkQGE/LDKckYraExud2IIoyl97+Kl+/bzsuRWUobqhzZmXDDmO66I6mEWWNaCzD2HgSSTW4iTFzyuhyGB3vrKIhySqaoqCpChc1lnDl3W+xee8g33p4Ly82j/LzzV3cu7UHgJbRFKPjacSshJAREbMi2bSAbsqfAyjZDGI6zaUnNvC58xZy9/Un8PIPzgNgPCFQ4HXzsRPq2dcTwe9xIisaA5EsO7qjFuzuM6dNCjBUFvnoHp+U88+ZaF9zagPzq0P88lPGxFxRNDKiyidPrOczpzbwsbXG/aZZcFyN11vD/G2/0fiqKfKxtz9ueJQlBXRVwe1y8vXLjkOQFJp6Izz7bi8XrK6jykQZ5Ip9UdF4x8bD0nWd4ZhgCViAMY0E2G82zR7e1st/vdHFZ80GUi5cDgdvdUyw7idbuPL+Xdz8RBOP7xoERWVsPE40kmQ4Jljw83BKotGE1R4ZP3q9A/zmzW62HomwbyDOu91RvvDoQSIZKc+Gwx51JX6rqDw0kjQ4u5pKNi2iyWbjw9ZEOm1eOR9aUsVvL1/GvMpgXiEY8LgskYkcZOuLpzZw81kGdDyaUazJHxiFF0A8KzMr93PbFp8VZXRdp288xTce2kNXd5iUqLK2vpjOiSyaDsebwkDFfjfLZ4TYZKpS2icTOX5QwONkTX0RomLApmMZxdrzdV3nqf3DqKKIrmmkYkk+f65BHbjv1RZUVUMWZVRF5bJfv8Prh0ZRFZXyYj+7TWinrsMrt55p/R5MFoK/eqnVmq4tmlHII19ch9MBD7zRyazyfEGfNXPLcDsdqOrkmacqKq/u7p0s0Ew4aM73UlE0Xv3WqTz5lRNxOh386upVXHXSLG44fbYlcFYe9HLC3FLufqWdiZRoQbS/d9FCwDhz97aPcqA7jCIY4mpXrjMm+wPRLLG0hJgVUSQRJZngp9ccj6rpNFQUkLHdI4tnFPKxtTO579rVABx5D+/eXDgcDj5zcgMnNZZx/zXHEcvIbGwZs4S/dvfGDJizA57bP2zt/cV+9zEngs81jeBwwEeWVxvQ/2kUoaeL4P+gEEwICs81jfLj14/w1z2DZGUVMZ2mv3eMhKBw1rxyPrqsivULyplXUUB5gYcJsyhdW1+C1+Vg5QzjPH9i/wivtYVJCsbfPW9BJdevrbe8N2sKfayqLULTIa0ojKZFeiLT7wNgIBdah1MW0iqelhDSWdB1AwmhKkiiRCyaRJYVhqNZTphVzILyANc+tI87XzvC628e4WDLKJmszF83tnP7Y/u5ZFE5wyZ8M5WVOf8/t6Bp+dYvYNhS9NmGCrlzOvecQp/b0giQVZ3qkJdvnD7b+KxFPiRFY3d/ggfezeecT43jaouYyMhWA9Rjnscup1F0J5MGhHvT/kELbZJbP1lTDCrxPrQp/pnjg0LwHxzZVBZVEJAyx16A/5ehqBrZ9wFpONAdRjKTUa/bgdvtxOEASZTJTDEQTgkyqiTi0UTWNJZRU+BAlWVae8atg2l1YzlPb8sXdFAFgS99aCHLZxnTsps/sozyIj83nLuA8kIfdz/fbD33wU0drP+PV+k3YakbDgyzx5QH3nJ4lDlffo66G5/hticOMjWuPqWBT06BLTkcDt697Ww2fecMDvbF8NhMyBqrg7zdMkz1Z/7K6pueJZmRSAkyPTb569zG0DF89CE2oySAx+WgN5zhuxct4M1vn4bbOf1SfOfWM6xucGWhj1Kz4HY64E/XH889Vy5nWV0Rw+MJ5GQCl65wy2UriKcMuOdg2HhPmqJYncru4ThX3rWZGjN50jWd3omjO4NfOWeu5eE03eP2CPnc1Bb7rATz2YMjFPndXGwaop+3qBJV03mjY/qCMtfllxSN9t4JHn+9hfNnFfLItav53CkN3HhqA5esqOHVHT0IGQGXpqDb4IQdQwnufqkVMCdFU6BPADecNAtF03nTfA+5TuqxYD+nzy0jkpFJmQe6anYfs7KGIGu0T6QZTAgIikaJ381njq+jOuQlm8qiZDO09E4gyyqxsTjfe3CHleyIkkKx322p1eUEAaIZ2ZpQzCz2WzCm42xE/BzPo8/0AxxMGP53GVlF0Yzrd+bcUlbUFDIwFEM1obKDYwlUHTwuJ7KqW8qk9sgIIuGMREZU2PhmG2++fYSsPCl6ICk640mZa46rpb7YTzojI6dTSMkk33v0AG0m/0uSFP64o5/+wTj7e6Iomk7TYIJ4LGNZsCimWuKMoAtRlI3CVtPYd2SMC1fW8Mk7XuacZTV87hev4dQ1xk3u2bJZJYiyxro5JaiKhiSpvNExjmpOZNfMLqW0wIMiK2w4MEyPja+8oDrIgZ+ca8G+GyqCXGqK2Tgd8P2PLub7H13MGSbkOwdXza2bTSbHsarQR9uooYScSonoqkp9dTGzq4sMQQ9dJyOqfPOS5VSYXftYWrT2urfbxi1OsK7DSFzA4XDwX59exSnzynCZicirTcbk80fPtXDXy+2UB738+pMrrM8zp7KAl5um4QlOJK33v+vIOHEzcQynRBrKDP+6jmNI9ecEV0YSIv3mhGYwduymZV2xj3BKQlQ0Do+k8LocNHdNIKeS5BR77Cq3GUnh4/e+wxf/vJ9IQjyKD5xDGeS4iS6nwype+2P5Qk057m00I1sFoqbrOICLl1YhpjPIyQSaqpJJZYjFMsQFhdMbS6kv8bNsRogG27TvShuipNQGNWwsL2B+RQE3nTGb5WbDrjeaZXtP1ILDSbLGzp1deXvSdWc28od/O8X6v8PhQMiIpLMy97zSYYhHFfrY0jzMwtoidE1jxEYnECSVw33RvGIO4PmbTmbVrBL6whn290RZN3dyeq7rOuOxDGcsLEPICCiKce0lSWI4kuGuT62ivrzAKP4yaVRRQFONpmVjVegou4PSnJWOrnPp6hns7Ynx+81d3PJ4E8UBD3t+cBYXLq8hlpEYn0jR3jeBJEhIpmpsOivhcTnoGc8wnhTRNR0la6jf/uX1ZjRFoarIZ4nGeVwOfGYDb6mJymkfmf5ejaYlPvvAbv6wpZvNh8doN0VrltUVMbM0QF8ka6FNwmmJQq+Lf/vLAW5/oZUXTX5tccBzlFdfLvojWepLAhaf8GtPNvP5vx7IayhH0tJRDeag2eTLiO9dCPZFs1z7yAEe2mkgKJKiSkZU0SQJNZslISi4nA4uXlrFlcfNwO1yUh70WoVgVcjLjy9YwIa2MD/b3MWQCTlvHk7iwODclxZ4OHteOR9ZUsWOvphliSApOnFZJv4eQjnNwym6JjLs6Y0zEhOYSAio8uRZqWuTysCyKNM5muJjy2v45VPNvLWrj7d396HpOoMjCVo7x2kfiLOxaYRL79qKrutIkoSqqjT1RFjx3ddZ+p1X6RxNc8WqGv79vEauWFXDzGI/ly6vIuRzWed0TsDLzt2PCwo1RT4qgl5CPhdzTV58LsbeQyTu+JnFOJgUv7pkaTVnNZaxsraQzvGUxXUWJQVNn1zfJeVBXC4nDoeDzYeP9s/+V4oPCsF/cOS4OZqqcdxtG/+P383RceXP36Dm2kff8zmSoiGZExtVFDi8q5WmHS343TqqoiKZ07etreOcdcebZLMymiSRTgnUFbrZsKeXS/7jBf70WjOqKHLR8TMtgZNczKkxDoGL19Rz7TnzeeqWc7jq9LmkBZnXd/dyfGM5vbZk5h6zKMwlEj9+upmLf7aF0Xg2T0ntD5v+vueLqum8uGcQt8tBecjL2y0jpDOTG0l50MMrewZYUl/CYCTDmm8+T931j7HqpmdRZQlVknhiWxeqqtI6dDSJ2OV0UF9eQPd4GrfTeZTClj28bqel9Fla4OGcxZX8+JIlPHDtagIeF6ebxP1Eykggvv67N9m4t4+1X36Mhza00D1iJOeyrOA2Jy97j4zz6r7BSQK8rtMbPrrQW1VfwhM3rmN2eYFlCv9eMa8ySMd4mqys8nZXhDPnl1tTubkVQUoCbguic6wYSQhEwkZR+8Crh7nuni28tm+AK4+v47z5ZXQMGt9lKisjSQr+gLF5pzIS7QNxS7DIDkHORbVpGP92lwEjVacpBDVdt2CwOW7dbFuy6HI4SEoG9yIqyESyMqoOTnRue7WD4aRkNUh6R+Js3drGnqZBtrWMoqoammLwgtKSyl0fWch3z51rTVTCKYml5mR1+YxCsrLKnZs6aR9Pc7rJIXMAkqbTZ/5OJCMxkVVICCpxQSElqDx9cIzWsTTj45NNiIHxFGUFHkRJQ1A0YoJylOjNaMLwpjrSHUY3pwm6efB9ZEmV1UU/MJTk4ytqyNimin3htNUl1TSdnXv7GRyMcbhjnEPDSbZ3hPOI9rnoHE0ZE1STX6xqOp+563W2NQ/xsdtfNO5fWbQKwaXmBKe+NICmG+qJR0bTFtyspsTPK986DTEr8b3HDvDLl1usycfhgRiSbEy/b354D91jKasZUlnomyxCzCJNMYvL6pCPT5gQ8IbSAFUhr+HF53SSTqTRNY2TFs9glolw0DWNUxdX01hTRIUJeXppVx+iYOwhiqYT8rnwux3WRBDg+IZSfnHFcqv58uftfezqmoTwvrR/mLk2c+2F1aE8X7YLzaZLNpVByWTQVJXe0SQ/fMaA6t27qZOxpMjC6pDlzfnT1zvYbJtQpswmYHfYSNyBPD8vgPve6uGp/cOEUxJ1xX50DFja4ZEUC6pCbHy3M+/5Oe44QFN/guhEiu7+CB2DcQZjQh6kLNeNt3OBq0I+vC6HZT3xWus4j+wepMUsEOKCQnHAw4oZhfjdTkoLPFx3Yj1ixriummKsb1GQSAoK1YU+bjt/Hl8/fXYeR9IuQlNaMLkn+z0ubjlnLnPLC6wC8Zdbe8nKGj6XkxMbihkajZPJSHmFYNtAlMtPmcPHT54NTCatsihb553PZTQNx8ciKEKWTnO/7hpN8pMn93P+D1/jkuONAvXydXU8+ZUTrfe827RpSmRENEVGMxESL+7s5fmdfQDMqw7x82tWW83Une1jrJpdiqZpxhpXVXRNJ56dfqKRa5r85JIlfOGMOUyYCfXOzmje897tjCCJEko2y1WnzWXz7R8C4PimaK0AACAASURBVPHtPXh0jeaBOKNxgXQqTc7bZW/HGEomzWA4RTRlNEpCNghsdZGPooCbjmMUgn/c2sObrWHufKmNL/xxL1fft9N6bFZZgL6IIQaVUwHe3xNjj3ku5JA6xQG3ac1z9N40nBCpKfZZ0+nW0RSd4Qw7uo3P3jSY4GP/vZurH9rHI7smhcty53niPYosyBdXm1niN+DnKRFUBbTpRb3KCzxM2KZPcUFha2eEjvGM5Zs3npKoDHkZTBjX9OBwku29cbb3xHipZdwSiNJ1HUk7uiGYi4ggMSGKTAgSB4eSDEVSoGkUlhhnlM/lIFjgZUZlobG/DiUMmklCYNYUIaXBkSQrGkqtCTfoyKJsnZWRaJpEPMsXHtxNral0Xeh3c9v5jVy0pIqygIdoJif4Y+QUuwYS6Bi8+bigUFtk8Mi/e9ZcTmwoIZaV2Xuwn+HRuJV3TPc9B70uyoMeS/23NODhQ4sqcTsc7GwdQ9c0NNmY5muqhmbSnNKJLAGvi4KgF/XvcK3/2eODQvAfHIqsgGws0JHhfz6loQ17+9EUhYyoMBaf3ni0zywKdFVFFScLpJxohKZqhFMSv9lwhO7RBIokWwfk37bmC7ksn1XM7794Cg9vtBkWayp33mB0UsPxLF63i3NX1qLrcMJXHuPTd76GKiuEzY0yIyqWX55dYhugcyRlbfqfOnU2QJ4VxHRx94stfP6/3uWF3QNkJZWRaNqA7xkyXGxtNiBiv/rsiUf9rprNogrGdXM7oPUYssJL64poGnh/3/9PLl3CnR9fSk2xH7/HxYeWV1teZwCH+qKWl1kiI/Hl324B4N8feod9RwxenqJqjMezHL77w3m/53IagiXvNfFrrAzS/T4KwfmVIYbiAluPTCAoGmctyPd+qgz5/q59w2BcIDFhXLPhcJId7ePc8JttALy0xzhsv3vFccyvLyUQ9HPmqnoAkwem4/YYh/DIMaYYJ8wu5fBIkhGbYEeuEFRUjS8+dpCL79vJts4IAdM0u6zAw/fONRTRckRzHcN0Ni4agjBHwgJJUSUtKpaNSmLK+tEVBSWTRhRE4lmFgMdFScBjSe43DSe5d2sP3zxzNh9ZVsW2zgg7emL854ZOPrykkouWVCKrOpKiWclArqMrqvmWCYNxkfFwClXIoikKA+E0FyyqIC4oqBqIis5YKj9RGU1JpCSNI0dGkNMplEyatTMKOb6ukDUzi1hpTkPe7YvRMZ5mIiYY+ElNQxREMukMmVQGSZQsk2pRVPjhK+30mpzeuz+zmjf/41xOXlSJ15y0aZqGrh19QB/uM4ogVVUJm4Xv/BlFRrGg67icDoPLZ3IVfW4nZUHDSD3odaIqKl0jSZRMGjmVZOPBES64YzMnfu81Htveyy2P7GOGCR3z2GDdlSaEWtMMCNRYUuCylTV86bQGbr9oAaNJkZ5IFo8ThISRpF5/wRIacoWgrvHty5bz6Btt/G1rO5o5lclNdDRNo6lrnIloChc6Azap89xE5uT55ZQUePLMijvHUlaxcslxM2iwwUkBbjh1Nn+6bjWyIKApMqqQtTr2iqJxYCDBpx/Yw9KaEG1jabonMrzWMs6PXmkHYE9fjA5zgvpC8yhPmlDYsK0QTIsKj+8d4jdvdnPVn/ZYE8TH9w3TG8mwoCrIRDSNw+WiuMpY/5qq8Y0/7yUjKmwyVSEVWSM8lkSQVMZte0KO62fnCrmcDupK/PRHBYbiAvdv7+OpAyPoGBPJhClw9NXTG3A6DAhjwkZHyF1/Hccxpz+5+P76Rk6bW2pNHaZGiVkgiopGTaGPq9fUct26mYwORQ3agKoSKDSK9QOdYRwOBw98+TSCfg/oUFgcQJZVJMGArXb1RykrcDESSePUNQ4PxNjZPs5xNz3Lr180/FO3HRphblWIREZmpWlZcdfzh/nKH3cbptb7BlAyGeM+F/PRDRcfP5OrTp1Nsd+FKopsaR7mrmtWs7jGeI8OTUPVVGtqPDVyheDM0gBOh4NBE6qXlVUe3tZr+cy+1RZGSmfQZIkij0595aRS9XgsS9NAnJGYMaHV1cm1Xljgpbk3gq4ZENUi23V3OBysrC/hub1DHJyiyqhpOo/u6Of0hRXWBE6xNUXqywIMRLI4MBqUAOGBUUZ6DMGf3JS7MmSsp6mTaV3XGU4IlAU8nHfr84yPTv79vea5/bd9xmsNxQWe2DspJFTkd+N0/H2BmQnz2t13xTIWVYWYSEsMT6QhkwAxY3Fg7VEe9JA0DdABdvXFUXX48qmzOKOxjPMWlpORNaqCXg6PpvjZlm7+um+Yw6MpPE4HKUnF7zZEwyYyCinJmNpOF4KqoumQUVUmBInxSAZ0nWDQuGZzZ5fzoXOXcOqSanRdZ3vbGK/sN67DBavrjnq91pGkpVCeQ8N43PmQ8+7RJF95eJ/1/1zTo7TAQ38sy1efPkz7WJois9ieWeynOuTD53ay1syJ3C7DB3AkIXKka5y33ulkLCUxkhT5ytMtvGMKuwwnRP6ye4jBmEBdkZ+seU2DXidZWWXPQIKe/qhFCdJUFUUQUNIpNFkmHk3RfLCPeDSJ5Hxve5F/9vigEPwHh5JOQTaJnk2RSWVoH4r+/V/6B0VzX9RKAC+8/TXmf/FJNjcN0TYY54zvv0I4ISDKKt/+yx6UrHHwAKw4eQkz6yusYk/XdZr74+zqGEcSZBRJAlWltvRoz6rykI8/vNzEt/+wjZMX14AiQyrKt371IgDjsSy9YwlUVWNX+yidZvEsyQrjCYPz1DOWRFMV5FSKI0MxfnP9Gh640RAK6RlPE06K1JT4ueZ0g2vyznvw3Z7a0cfdL7YgiRKjcYG/vdOLlBXJJFOQjBjfna6zpL6EFTZBi+vPWZD3Oi6HTkmBh92dExbHwh4r6osZjYvTevhNjUK/h3MWV3HxD57jB396O+8xTdM54eanUQQBhyJRWx6kbyzJwpmlTCQFXtvdA0IaNI3+cIr+8GR39YltXVQW+fB7nPSGM+ztifKr1zrQNJ2kjTcxtzJIfzQ7rf2DPRpNTttzB0fwuBwsqSlk68EBIknjM1aEvIwlJfoixy46h2ICstkl1JX8g7RzJIEiZLnz0R0kVQgWBUlPGBt1LuqLvWiqlsc5efHACN94ogld11lQFUTT4d2eyXWXO7BfbB61eEe5CWhtsY+RhMis0gDFfjdDcZEfnz8Pp8Mw7E1LinVdSgNuhLTBGUMSyGYMcSFdM7qIucmEIits7giTFBU6zQZGVchIgIYTIgurgui6zsuHJ8V1uiNZZpnrR9F0Yubns/NQNF2nq2uUWCxNNiuRTmVQJQklk2YimrFgbV6nA1XXLSgRGBPjiYyEpOrEosY9oqsaAxNpVA2+/mwru0w/0MG4yMO7hxiPpCAVgVTEKjgAHLrO0tllXHbiLHRNJxoXSCRFNFXlV8818dG7tnBwMIEv4KGypgifz4UqCCCkqc6vbcz3prK9ZYRwQsDrdjK7KkR/OMPi2kIUxbCLkGSNYl3kq/dvR9N0hocnSMfjKLJiJJ66zmgsm6eqGElJzMhx+EyioqxqvNsRtqaI6bRM/4TBizlnQQUFHhc3PnaQAwNxhkYSaIqMx+3gkq8/yI13PA2SwHcvW0F9WcH/x957R9dVnen/n9u7erdkSe7YxuAGpppeAwkkgYSEMCGkEkiBEAiBGUJoSWCSACGAKQGTBFNtsAEbF4x770VW77rS7e308/tjH11JmJRZM7/5rqw1ey0vS/cenbPPLu9+y/M+Lzc+uopv/udqtKxg0JxYFeKGhY1j6Pp9TjtLtnXzwsfteTnRO5gk3B/hopmVbLZq6hX6XDQPZLDZbGy882x+dtlUxo8yBF+5aR4NZX4KvK78+5q6nifpKvO7OWdqGYmcSrHfhW6YY6IYr+3q5fa3hOExvnhsztloQ204OnjJCeUUel28saeXa2ZXs60jjm5CQ4kPOSdj0zVcHifkUui6zisftTLp1mU8/u4hctlcHrIoy9oY6OktZzfwg4UN+dyu4Ta+yEdXPMeeTzjWhglmhp19vbEs3R1hVu/twdR00LURw8M0/6Eh2FDi44b548YUlB7dyvxuKoNuJpb6+On5jUwo9YtyOp1hDFUFTUGNhJlWHeCJZXvoj4n9XRR04/Y4eOV7CygOeSx4tMZAPIsHC3quG+xqHuL7z24e88yeaJaaEh/do5x1/2nB4J0YpEfxDRijzmBD0/jL+hbe3trBjsM96LJEW1+Cpp44WUkBOYuhSBiaTvNAKp/bP5SU6Ilm+c/lhwm4rTqE1nfDTNi1xT7uX3qY36wQToStLdF8pOT3b+/hzB++KmSftc7TkibOXSkDUgaPkso7c4b7fUJ1iB9cMJFLHljDL98QCJ+HrpmJzQaPvtfEC1a+MQj5HM+qXHZSFVdbOeTjRq3b8SV+MorOUFrhUqseZ/hIE9FjzTSW+um23mMYdvzart583dvehMTnF+1AUg0GB5Oo6RTdLX35e4dTMhlZY1NbjC/OrubG0+pE2R4r2mS32Sj2u/IRuuEWyShjjMOhjIJN1ykPuikJuEjkNLoGk2AaYOocGUgfBzstHVW7c7gvPpedk8cVcP28mnxh+2GW2dF9uHBKKSV+F4OjjH5JM+iIHu+cVRSVnKYjqQbji3wohkF/JAXpGP2Hj+IxJNJpAWlfYBF8rd7Txw9e2IGmauxvGWTRt06hyOfAZopxsZkmNmu+baaBoakUjSr7Mb22EE3R6OhP8sH+fi5/dEP+bK0MuYnlNLKqwbuHBrn1zHqqQx5iOVHb9ySLVXl0O9ITR00l0aUcvQkpf7a/trefze1xHlvXxrqWKL/7uCNfmgYEN8PLO3p5enMX0WhK7GuEQ2lYxx2tc8iSynnTxjq9/9Xa/xmC/8tNtxLoTV0cSIu39/GNV/Zw2ROb/+bfKKrO3S9tRfmUsPb/VHt9QwunfH8EErqrRRhLVz20mlN+sow9rUPc/9o+XlnfxurdXfnNAeD1uqgqDyFbcFHTNPnGou150gNdliATo7ejW0QQFAlMgzOmV9EzlObJZXupKQ1gZpOQFYZee28MTJMPdnYw7caX+OPy/by7pTV/cGSyMopmkMqpNPcl0TIZTEPnWE+Mq08dz4WzqnHabXQMpmnpjtHb0k59mZ+6Uj8vrW/7m+Pwi1d3k02lURWVrUfD/Hb5YXRFtpgjTNBELtP8CSWs3t2ZF9Q/unKG+FnOgpylNOCmosDD3o44v11x5LjnzLJgh/v/yfozg4kcH+5o4zdLdrB4zdH850++u19EIDUFM5vkhoUTOGdWLQ9/4wxu+exJoMmg5EDOYOoGb2xux1BVJpd5eX93Dw5ETk3HUJaH3jnCEx+28MUntjDn3tXssSC140sFDK8n/ukR4uE2uVxEyw5bELF0TuHin73NRRYBUFnATVskyw0v7+HP27s/lbxne1MYU1Ugm8TUNJRkgkRSCPB9LYMYikI2I9HT3EUyHGH1jjbB5moaOJx29jUPkMvm8gQVvfEcP3/zIB/s7aN1MEujpTyvOxbB0A0cipovkbHyyCBTKgKUBlz0W7C46gJBP+6w2/jszApaIlkOhzOU+p2ohomkmSiaSWOJj9vPbaTCpoGcAymNocpouSxqOiW8iJoGpomh62RknaX7B2gezDIYTvDSXzbR2SH2XDSr8vqefjpiOb5nMan2xCUmWn3XdDOftyjpJqoFxfbYYfPGw7y3fCcLKn15WQOQyUoU+5wE3Q5008QwIWqxjB7uSfP2wTB9aYVkMoeSk8DQAZMP9vWz5lgUzTCRZY2Bzgh3XzABG4xRQLGMXYfdhtMOz39zPrWFbgzDID6UQZZU0HUOdcXp6U8w0BulvzuCLCnokiSUdSXHQP8QGDqzaoMiAp8TcKSspHHfq8JTXFPsozeWZWatyMkbLpewa28Li5bu4PfvHUGXrFxsSc5H8o1RkQjDMBgaRanvsNnY1hzhJy/v4ocv7gRdz0dYNd3MR22bwmkSOY1ENE3TPhGtc+ky6ZzC9sM9OHSZ/miGd7aI8i5XnmbV1jJNSoIe7rvmJM6eWoqaiKFmswxXtHhw2RE+PBgmlVM5uusQ61Zv46zJI46mc04oZzAlE88qeF2iTlh9yYghOM0y8lvDwslIJg6mYTGaKhT7ndx5qXBWDUd/Vo9yiP3h43ZqCr08dvUMnv/qydx0+kjO9LDxl5RUPrLIVC6dUcm8+iL6kjLnTBphzKsMuDCzKYxkhKGDB0CVQVWQJAmnBYU1dAM5J6MqKoqsj4HIBT1OFk46noGvrthHJKOyqTVGdYGHn1mRuymWzFl5VBjvW3e28ua727nugeUgZ8Q4aCpKMoEqSYTTMt3/QI4Nt2hW5dVdvWPgZG6nnQcun8JdF0wk6HZw9wubONhu5csbOqgympQjMRQlmpJ5/v2DZCQVPZejOuRiXkMxn5krDBdFUSj12ugcSHLOiTVgGhzsiHK0J8HD18/jvBOr+drCCYJEI+CiJ5rl53/dy++s8+Tk+iJ0RcVtqvl8TNM0sSkiaqFlM7T1xrjhd+vJSjIoEoamcf6973O0KyrOqlxKGIwb2jnznpUAzPrJCubf9T6/XnaYD60c1OGC9N3RDM5sit9/ReSqtg8J1un2oSyaLGFXMvzsS/PweZxoUk7ARVUN0zQF06KSAyWHLMlcf+4k4ikJhx1smCyYUMzb27rY1xnnqVVNZCSNykIvU6tCbDoW4cF3jtA2mOHHf97L+xZz8Oz6Iu757Al88ZTavLEKMKu2ABvwyHtNXDCtnL98fY4wsDA5erAtX8ajzjKc3j88yHf+KvgD/nNNK7GsMGI37RUQW0MflW+WUtjdnUA3TE6fUEK1Bf8ejTIp8bvGGH26YfLz5Ue59Y2D7LWcGftah1ixbBuvrT+WL+1wpDMmnOGaQjghHRdVHCYuaovm+OuuPnb1JMfksw4TqdQX+bhh3ji+dFIVpdZn4wq9goAlLnHl9ErcDsEi3RnL5VOWNE2nZSBLV1yluTvOnqZ+DvSnRFH2oZQ1hiCn08SHkpxZ7eNni0Vk2jBFProsyXy4q4svP7qWnnCSdFpCkRWikSSpWAoTk8FoGi2bpb8/mndePPY1QQ4kKzo3v7ibQ11xUR9WN9BH7UGHXZSbaSjx0R7LkdMMaj/hOOpNSrz8sdDzDFVlb1uUdgt5kZQ0ntvaTSynMXtcAdGsmmdg1XUDt02kETQ39yNlhPOlKODGtIjRUEVeuM/vZrlFZja56Pg60P9K7f8Mwf/lZqoiWoBhoEk5drREWLO9m4NHB+kY/HQY4ZceWcVjS7Zz8x/W///Wrzue/Vh4O3Ii8mWoCugajlHJ47tah1h/sE8YdkrOujbC9mVr6DnSgqmKA8kwDCSLHMHlcmDIIwdvpUcFSTxj45YDHG0P0x/NcvPlM9m8XwhdDF0InFSEZZuEsvXyh4d5ceUh6goclLlUwlbUYjApsat5UBz42QRDcSHgnQ47taV+2gczbN5+iEQ4zGk3L+aMySVsb4lwrP94EhdZ1TnW0ouSSqGkkrT3JWjtGsJUZIodcv6wnTYuxLINR/nMPcuwSxlqCxw88+4+FtSH8oagy2bgdtiY3VDMllE5OBlJ4/UtnUy2csGGC8wPk3r8rfbe1lbrHeMs2TBSQ/CPS3cJL6sqDqE31uzjgRtO4YZ7/8LFJ9dwwcm1+TE1TZPXN7WjpZMcbOqirsxPPJVD1QwSOTUPvxn+f7g2zjAlf1f07ytQpQFXPh9jRnWIzYd6Qc5ysDXMYCI3pl7gs5s6eWGLqEGUU/V8NGTj3i5LeVOEEQtouRzRlEx3OAGZOH6fC13TScdGjGhNVslmRhEtWDDldUcGCfcniA6lWXM4THWhF4/Tzr6eJG37mtj47jr2d0ZpHcpwdCDNgoZiKkMewpYhWFUgoGcZWeO0hiLsNnj8404GU1oepqmoOu3HemntjnKgZTDfb1RFvMfwvCoSpCIYqoKm6bQOZWkZytDVOoCZTRPtE0p2X1JmW2ecWTUhLphaRoHXSU9CosgnYKqaRfbSGc2QkzX++IcVfLByL70DKVAVDE3lrQ0toGs4TRV0DU3TWHlogIZSH7GsiGLGrOjIwcEk/WmZaE7hTy+tFespHQMlx1BU7LN5dQVs3tHOR7s6WLK+ha/OrSGbTIt8Fl0TCIFMmlKXTiYjMfPWN3no9b147CYD0QyaqpNJZ1EzaYxRbIZ9XRGSyayAQ1nNnkuy71C7ta4lkNLMqi9iW5OIkApDMMcUqz6mqhoipyuXglyKR9/YbTk/siLSmIlDOoauaWiqZjGW5ugeTGEaBgGPg28ubOBzv/6IJZs7MQyheIyGmUctZ8HurgSKpDDUFwFNxaZK5JJJ7rj+LL79ufmYuk77QII3NjRTV+TmwpOF0m8agvBGUjS8pgLpGGY6TiancMEMEbH4w+oW5ty7GjOXAkOnvVMQHmUSKdq7xdpo6k8Tyyhc99RW9ncdnwe7bl+PkEGGDlIWXVWRczK90RzlIQ/VhV5aBjOjSiOIvX3VSVW8eP3JTCn38/rHx5hswekUWc0bgj956xAvbOmk5WAHh1oGKA+6iWaUMUXph6Jp0MdCzRw2E0xIxASDq2kpjIos8mnb/wERleinUPQODaSZWhFg3vgibj6znqvuep2O1n6W7O6jLynTfqRdrANVFnsPhGINaLKMywZv7B0h2MkqOssPhj+VOXLFwTCv7u7jVx+2HPcdQFt/ksde+ZhTb/5zfo0Ny+G+SIpTp5Rz/yvbKPvC03T2xRgIC3n1s88Khmo1laR/IMrlc2vZtadJoE0Mg7qyADNrC5hU5mHRO7vRFQW/20E4KfP82hYeWSoit58/ZRyxaBxV06lwKyAJKLgsCeZSTCMfwVAyGeGcsiIZpizl5dLwfoxllDH5mkCeOG44Wrj3aD/th5q46+k1XDm7mr64RFN/SqA4cmkMKYcXlV9/8ywrKq1Z/TFRsjnQ1Twd78vLtoKcZXypH7fDRnN/ik1Ng0yoDGKacPviXSzd3s3UUYzav17RxDu7+/j9qmYKfU7e3thCNCUT8jiIpkcImabXFHD9aXWsPTpEVtFwYVpOCZnm1n6aWgcZSsvc9Kfd+X2elnWiGSWfP9vT1s/2/V15fQ1EHc6BlMz2jjg+l50Z1aF89HoYXvr85k5k1cjLjGODGb74wi4GUoI45/4PjtGbkNh5oAtSUb7x6/d48f1DpFIS7f0xyCUgFUGSVCKfNASLvATdDt4/MsSHxyLEc9qY+n2l1l5MSBozKoPMqS3kljPquXxaORNL/cyqEmM5kFZoKPajG4I0pj0q+rq/J0PTUJpjg2mWrtzPS69tRZdUjoQzwiGr5PAUCid2LJbih79dRWooii5LlBb5wARdVfFZcN3hwICmagR9Lmw2yKayuBA6nqpq5DI5Am47freD80+stpwGMnJOYXdbjLtfO8C9Sw7kAyEZi4Rn2HgDKHDbePiv2+mL50hJGr/9qIOhoWReFuw6OsCRcGaMjlXgdXJ6o0AUeJ2CYObZx99i3GcfZfHrW9m66TCGouAyVdLRCEE32CSB6ENTeP7mMzgwKGTXnrYI/8rt/wzB/+2WS4KchlwSQ1HojWZIJ8W/y367hW8u3n3cn6za0QrpGG9ubuXyX67kzpd35AV0x2CaO/+887/VpWRWoa9vUBxkchY04aVXMhmRA2gaqOk0R7tibDvUhR4Ljyhp1kbv64+CkhVQnFGbLRDyYkhZyMQocOoMDCWF8mh56VFlyKW4+4nlYOpC6GZi+cggug6qxN6WMNl0lvbOfoYiCbp7o5i6TiQps35PR97z3D8wAvmrLw/QMZghl0pDNkFbxwDPL98DQPcnyiU89fZO/rz2KKaUsRQpg46BBPJAD6SixOJp0S8pzeBggoFIiq+cMxlZ1WntivDo6zv5eH93XgFt6xrkWE+MmeOLONidyI/JL1/by02PvMuGIwN4XXaiGQVZ0QiefS/3P7fmb87Rk29sEYeoptHWO0Ig0dU9IOZCyUImTlN7mDO++QzJtMSN979JOpmEdASUHA6bIUp+ZOKQSXByXQHheI54WrI8erBwFMShy4Jw1lrKYnfs0w3BnKzlCynfdPp4Lp1ewWdnVfHKqoOWYZzhj8sP5JkAT6gKctbEEl7Z3k3bUIbL/rCVe5eLKOfhIx1iXeiaMKSs6HHjt5cQi8ZBkcj29zKhzMOcxhLm1voExEnX0RSNi2ZWWKQJBqpusGpfP5osoakqy3b2WJEU8T6DLR2gqcSGknzjlb0YJsyvL6KywMOA5d0drm/Uk5BxOexcZJUgiGZFgXFJM1n1ziYWv7mR8257jV2Hu601LUhOSMcgOSQOD0kUGzcVmellftqjWZqHsgx1doOURk0J50RTOENnNEdjkYfP3PsO5KQ8fG5KuR/NEEyee3vS7NzTBlKaY/ubee3tbeK5UpbNB7ogm0JLJSCbxNA0nl55jDMaBGmEpBrEJJV1TREGLUhoNJHLG4CYJsg5fKaOyw4XTChGTWdQ41FW7Ohk4aQS1GwWssI5YVpz1T2YwmvTKfS7qSryYWdYEVDRMilMKYuWTmFk06gWZNKUMkJpl9JCWdVUsdaVnFjXmkJVoZejvQkiSYmaYj+DyRH6esMwqfIYeeU/MhiFrDAKTXWYfEFDUxSknISqqmi5HJqicNEv13DliRU8uuwQuq6jyAqZeJJ0Vsp7qg3DzNcRe2PtQXa9v4GhY62QTWDm0kyoKWb1R7vo7u7HUBVW7Whn+8Fuujp7ueWRtyCbYFqVn7b+JHU3vcqBlv78u8ZSOZ742my+eU4jh4drcMoZSEd4dvlenrlxDrGOTpZ/uAtNUXlyVQtPrW5he2uMO5ccoMDj4KsL6vJ7ce3eLjGWchZUAceVchJ9UTHW02tCHOlP5WsSnt5YzFvfnM+t50zA5bBz65Nr+bcHlpFLZsj0K91KfgAAIABJREFUD7Fr1VaaOgb5/bpWmsIZYRg2NfP1+9/CZ0UUEpLGWROKmVjmZ/HKA6BI2E2NoqAHpDSmoREsDBIK+TBNEzURF2Q2qoom6xz8FKdcczjN4VGf1xWNwP5qrZ//sq6J2FCUtkPtgMiXUjJpcS7l0sIhI6XFupCzmIZOTciVJ44wTZNfrW7huS1d3Pb2YTIWiuBL9y9l3rdfYOnmFta+v4stzUNj8iSH29sbj4GUQU9FxfOUHKgytqxwZJ03a1Txc10jllG48fGPCXic3H7FNMzEEKSiLF+7h2RGAl3jrBPK+f5Fk7norrd44u09oAnG7b7IWCU26IL7XtoCco5ZVR7CkZSI+OWy4uzMpYRD15KNyAIVIsbBwJRzYu/KGUx9BMLZGk6jKwq6qpCJRhmI53A5bAylBdFOW3svqDKbd7dQUyzy8J78sIV4Xzjv+Hritc2s+PgQGEaecfG0icWo2YzoWyaaj1ij5PA7baiqzqp9/SSyKj++fBrnTK9g6fZuvrtoWz4vE0Td0eHW09zB3Ys+ouHrL/PYW/uIxzKkRkF/T7RyxtqHsmw82C30LjkDuRSJaJJFH3ewvydJfaE3H1V7fksXim5w+/kT0cNhSEUs2SyMw5nVIWJZlXXHIsyqCfHSykMEXcKp0peU0Q2TJ1fsZ8uRPiIZlQdXNvPTZSKCu6ChiF9cNhXDFOV0jh7tBE1By6R588P9HD3URX//yNmeSeeOc1DYbTamVgTyxCapeIoXF6/lB0+u5lhPjMff3IUNc0wk0e92sHBiSR6yWup30RLJUl3gwQSymk5PXEJRVHqSOaKSQlxWaT/ajpGMsmT5XuqLfeQSSZAzyN0tYm/pGrIidDlT08imc+SyOfT+DhLtbWhZgYbRZQlNlsnE4hQ4dEzDEKz5qahYl0A4kmLubUuFDgj5Uim726O8tqWTbCpHzhqL9liOZzd3MX1UvczFy/dy39MrmXDdM5x+11KWvLOLtqOdlj6WY2AwSXNfgteW7iYTSfGjhQ3cfcEESiwjOpyW+daCOtRUnFw8RiYSseY+g5pKoMkyyURaOFDkLMg5Pn/P2/z09ysxVZmtf4MN/V+l/W26wv9Cs9lslwC/AxzAItM0H/7E9x7gJWAuEAGuNU2z3fruLuAbgA7caprmB//MPf8VWzhmGU8gvJRyjv6BFOm+XlAkoh43H+sGkqwy7epfEcmofPTMd9DiEZBSZKMR1mzJsW6niz9+cIQVd53D+bcvAYeTtkiOV28585/uyxurD9AXS9MbV6irLBSb0hLMmAa4/ZQWB4hkrJwyRSLjciINJoRiO1ywydApLC/Bg42EoiLncthdQslw2lR6j7Va8C6dZCwGLu/IGNgsP4QmW3CIUVh1QwcpBQ6n2HhuP6YLvHoWWdUxvD50WWIwKXHgUKsw0mx2ovEgBztjzBhfTEN5QNDXJyJCscwlkJMuvMUl9I/KzRuIpvnxw0vAYXm2TVPMh9MOiQEwdJxOB5ppgCoxEElCJs4rb28At1cYq4YGdqc4aEBAO2IZplSHSGRV/rKxg+vObOD1FVuQe9p54vUdlAaLiKYVNuxphWych55ezr03nc9n7/wrX7pgJl++YGa+j4cPNgvl1uEkMjQSCVATETE+NlGUN2BTyGhOHGqG8JBGf9+ApdRnKfbaGcpaRoppsH1/B6bpJifrBH0mNpuNu688gfVHP8Y0R2i7i3wugh7ncRHBnUd6SGZVLvvpEj539jT+cs+VXHFiFVecCK+vOcBbK3eISI/Ly9KNzSyY0wAIiOgXZ9fwcUuUG1/ZC8DmthhZRSMTHhBKAqZYh1JarJnCYtREDLJxnD4/rS3dVJQECA/Gwe7AKCjG0FXeXbMHHC50j5s73zjIio8PkmpuAm+Qo4VCgTxvShlH+lNCKTE0apwKKaAAjW/d/yamz4+/fhyGadJo1eh673AYl6OSq0+sZOHEEu59/xiyZpKMJoi0tYnxV2SyOeeIEyOXFt5dp9vyhOtifbg92A2deE4nGUuiJ6OgyaQSAdwOG6ubhug42sHtb60jnVOw7wpwxefP4OOWKBVBt1iemqgFuP2jnWK83F5h9EhivHG5QUpjMzVMuwtTkdl1LMy8ugLWtQh216Si0xLLkJJ14lmVg019whAcbt4gfeEknj0dzHlzF2pkAFJRdh10c/OzW1GSo65NRYQB4vJiOEp45Ctn8/CSnfRGJV6+/XxueXw1maR1WLp96C4vYOCwm6gZy1kxeu/r6ois0HVcNgPThAnfeY27r5ktbmPR4Bq6zp5tB4RiC5iegJAHmGLsrfkwVBUjPgiyULq1ojIIBXl65RGyXa1oNhfYHGAaSIBRIth6DcPMe/dXrdgI2Qw43KDmwOGkszlGi6bDwQ4hr4wSXFoONZsgGAqQVnQONPeDw4k7VEBra4/1XhqqJGTYqRNLeHadBVm35OP+wx1MLvdDQkQG05FyNjW7WH+oj8G9uymeOo0fXjudH/9iMcs/aKQzIhFNZMX+BrDZMFUF0+NFysnEsyrTqoKsPjzIZL9bMDuv281Un8658yYC8MbKXZBKsGTtIXoOHoN4P51HOnirIIimavS1dIu8UJeX7zy8ghkLpjKUVvjRuRPYvK+D8x5ZC1IKA4inxXzomRTv3H4mk6oLqfnayxAfwHS60RxOssUhjoUzbGiJcHJtIUGPkx8/toynXtuEu6ScZ+67lmvn1VIRcuNx2slkZZ5b/CH+7Cnc/8I6yMQx7EJpXHuwR5xNwxTvqizWkZyGnA5qCJsGvVkJVRcR9T/9ZR2N1UWYkxpYsruPry+oY+m7G8HQOdg2BIrE9k12tp43ictnVPDBgT4qC32cXFfEe5uahFHj9guHhdVMACnNh1uOsuqBK/jKL98h3DuEaZbwxuZ2Cv1uvnh6vdhrpsHcWRPp6gkTTihoksy/v/ixWLOqKs6VQCHLd3Ti9XlxupycfUIF76zYhGGdqzv3RcVatDstOHUS3D5xltvtaIYp5kxXhWKbTgmZYQg4timPRNI2Hx0k2bQfsIGp89f3FRqnNNIxlOXd3b1kh8KQiZI1dcqsvOZ1h8PI/V1C1ukaAWchz761FWwOdJcLm8PB+BIfXjlJ3jwxNEhHsfkL2H2kB1cwRMDtRopFuPmBJVx+/hwuObma9/f05R2Rw83nhEQqR7yrAwBNKyAtyzi8XhY++BGr7zybkoCbRstR1DqY5Z21B60IqAmajJxMsrlpkNatO7jqxov5+dWzuOPtQyw/MEDn3sM80dFCX2fPiK5it9NzpIWjZhJsBSRyKq8sXs3QUIK6+nGUT2ugJyHRGclw6MMN4HJTc8OV7OhKEPI4uOP8icyoDqEbJh6nnaZwhvRgWMgsQ8Pm9qCmsyipEedHKpHJQ9I/2NLEB5uaePRHl3PR1DJaI1liOY0NK7cQ7ezi2NEOnnl3P2gKl1+xgG6rVvBwi6dy9EVSnNBQwaQyP1s7E6I2om6gaCYxWeFwOEc4lSUjq/j9bsxMAgyV3fvb6GiZLJwsllwhl6JsXA0pzYYaj2J4gsSSkkgZSkfA4cRwukDXMFwe0FV0KcNAutCSyXGht9gd6EohhqZidzpZtrkVr8+HrsookUH2dZSSjsbJDoUpPW08tcUCDrq1M8E3FtTy/TPGc7A7xtcefyMfqWs+JBzm6Jo4B1xecukcie4h1IEeVn+scfflU7n2qQ1sO9LPxEmVDMysQJJV0ScQ57kqgabicpiokgROl1jjqiy+9wVRsgJufM3Cifwrt/+2IWiz2RzAk8CFQDew3WazLTNN89Coy74BxEzTnGSz2b4EPAJca7PZpgNfAmYANcCHNpttmHXjH93zX6LdsmgjB5r7+PY5jXz3l0vy0BFUCRID5NIVEG4HQyeuyWjjJ/Cnd7Yx0CuSk6++/U+QDAshFhsAmw3D5UUOFHHpXW9Afys4XLy/zg2jDMHdh7toGFdKcYGfpvYBvvvLVwnHMuiGwc6//ISv3rFICGOnWyhc0ie8sol+Iik7bn8ARdFAlTAwMVKWUTWqZeNJEooqlCBPIYauYddzDO7bJbyEADY7hUEPidQoY8I0LEEovEAFQS/J0XlHqiyUINOAXIrcqGrBRjqG6Q7QMZgmNdCbv5+ZTXHaT5ex+f4LyUSGiGeUkf7KGUg7UKRqBuIS4WiKopCP//zzhhEDbrjJDnTMfN/GUN+nonmlE0kIuzGVjAESYXS3N091fPvLuzhtShk9La2gymzYvI8LPnchkbTC4nes2oaawiW3LOKjjftYuWYHU8ffxuzJVby7br/wpgLoGul4nHueeJcnX90wIrgs5SeTtqCmgMNIjaqKrWLXFPRIOA+Z6mztgNIavD4vs2oLMLqbOevaB7j35s9w26NL2S2fApxC72CCOouOe7g99/YWvv+LP1tU4DbeWZmEe64E4I+vbeBHD746Mia6ytGWbk6pL2JCyI67v5PxBfXcc8kU7ltxhB2vvUOgrIRb0r1CQXE4LaUlLd5LV1GScSHoEYXHsUmEh9eOzc6F08tY89EOjFQSXB6UQIBn//AaasLysCo5wr1Cob765Gr+tHSLWP9AvH+ILy48kbvvWYSay4HLQ1VaJpadT3nQTcDtYNWBPlYfGmDZdxfgtRlU++3kTDsr3h+1dtTciHNjeL1Z85qHqQFIWTSrCP32d1bl954cj6APRWiJuzm4Zr1YV3YnhhSgqXmA+yMJzpwhoL6SbhCOZ8kMWoa+NIpiXbcgm6pkzUAO/AWk0xJH+lOUBVw0WSIlkpF568317Nl+GJfHO3Ydy2kGo0nCHW2Qy1g5UBJabJAXl++CzKj81mxCzJWcIasr3Hjvn4UiGiph8bpjDDUfGVmrWcs4NnSkuGuMEj28XsR4DjvNFFKpDOfMrGLt/j7e2CSg0YpVEyt6cC9SeIS1T+xN6z3S0fw8G1IWhkYIUnRD56JL5/PG2+vQIn1iLTtcYJpoqoxeXUr6aAu+xskMpRXaeyMj62lYjusa554xnUd+fDWP/ulDFi/bApkEaiYKpkk6PiwnUuDyosZdmMNrwdAwpCw90Rwn1RagKzL9m0fB/3MpzvnRq9a4mWTD/RSUl5BobUZPDDK0O8WDz+Xo7eqlt6tXGAJO4YSz2cBUspAYQo32I084gcGUnIfTpiWV5g1b2drSynsfbOWP91zLH9/YghzuAUzeWbefeHgANIXM0CCHVq0jNTAkctuH9+RQH0f3uIl8YRaGYXDpt38PVokM77hGpJ5hw1bhjFv/SrnXQB+KjxiqQC5RDYzjnnePUlPoYdF1J7NoyUcgyygDErf9dgVb5o1jekMFtUVBFr3wOlIiwZp1u/JrQzENygIuVq3dMWIE5veDbeRnQyeVkTGw0x3Pce2Pn6P/QDP9B50UH+mk0Hc+182uGlmP0V7QNTKmztKtrby4+H3eXb0PRyDIuie+zqYN2633G+sgCxaGSCdSbN95mAu/2SQcnKYBdjv2omKeX93ER9uP5tflzr0jTNof726F+IAlL4blp4ZZVIqu62QO76QlWYYR6cmfTSNNsaDaKgxHkhJhYXBaz0LOCH1DHXXOKlmUcC+G18vPnl074sgC9FiYwUgZB7q9rNu0HyMxlL/P7b9eirO0HCUyMMaR09baJY4FhxuUHJrdyfZj48gmjocym9kkhCowdB0lnUJq2Y9kmrzyapS3fncTSz86hKrq/O7KOr7963dRXD66O9rwFxaIvQ35vai7/STGlfP2jh5uXNhIbbEPp91G61CGzbtG8urJxFHcPjas3oLU08FTzy/nlgsn85W5NWw+FqZ3zx56P5mmkbPRtWUrz2+BWVddQde+Q8RahBzqasrQPRDltKmVPPTCarHXFXHuV7kMLqp2ct9jr/Px7jaCQR8XX3Mxy/f1YKSGZUMaU9dImRohh8awNExGk8Stebz61j9iqApzplbz1cvn8tBlk7nkRy8Q6+gYMXgUwRWwfu1u6saXi+i7pvPgsx/wynu76e4d5NyzZ/PCL77M1s4Eh7sijCsrIKPoBN0a6/e08R+/eY1sJktRVeXIuk5H+Pkz6yAnfi+dNZ/I3m0MdfdaDi0VfBJmqBAjMZhfsyTCwsHvdIn1P4xUcDhGzgJdRbf6rztcGFUNGIqC1t0Ecpbt+4tJt4izo/1oA6tfXcYZl56No7yCpKRRV+hlzmd+S8bK5UMWSIsxQQVVIptIsW1zL8R6SGXjzL/hCQttZHIkGaPnsul8uHWEnC2vD+cSaA6HcPZnk8LABFAyMNQpBK3dyZp9PVxzav1x6/tfpdn+Xl7SP3UDm+004D9M07zY+v0uANM0Hxp1zQfWNZttNpsT6AfKgTtHXzt8nfVnf/een9bmzZtn7tix47/1Pv/TzTfvh2CaOB02wazncGKz2wWU6tOaN8D08SUcauoa9aGN4wyNT37u9hFb/yA7DnTgcjk454ZHsdtsbHzlDq764dP0h0eEsNfrzudQ/b1neNxOEfofbg6nZfSAzeHA6fVSMXUKl104h85NW/ngY8H0ZauagNnfOuZeuL289sjXuP7OF4TnBRhfU0Zn7xA2u50Db99DbVUxhaf8EIAVf/w+l33niTG3qCwrZOkT3+WK7/2BwWgSXD7sBcUYkV68RSVI8djx71DTgNzbjs0bJOjQSGUkCBRy7mkzWL9mMza7fYyR53K7mD+9jk17WvNRtvPOmc8Tt13Oyo2H+OHDSwCwOxx88aLZvPre31lvwVKuvfBE2nuj7DjQTmNFkOY2S2G1O/nSt68jltXYu2IF/YPHH5IAoZCfVMoScoFiIfCHvb1WswWKMDNxvv+Vc3G7HOw+1MV3v7SQZWv3sm1/O6ec2MDid7ZSUF1LcmgQNBWPyy7m1unBO+UkfnrFNO575KUxz3YWlPDvXz+be373Npd99jyG/OW88e05PP3Xj3h40QdjjWObndceu4kz505i8qX3ks7KFJZXoOAgF+4Fp4cffO18Wtp6eHetiAKOqyymZxSUd7i5J57ImROCrFl1PIGSzRfkp9+8hMeeehu/182sqbWs39GEzRvAlDLY7XYB6QuViijVmPkoZu/iWwj5PVx+81McPiaMgoLiQsaVBjnc3IPN5cZUFRyFJbzwy68SMlWeOphm7Z+WoEoyd33rUp7660fkFI2Tzj6Fbas2gsONvaQSIyxyXJ1lNWjR8Ijy9cnm8nHZZ86mrMTLS88tBW9QzKel2BVWVZDoD4PLY8EadTzl1ciDfUyY1shpn7sYuyazesn79Hb2CQPgU54VrKll5ryT2LJsOXgCOCrHc/oJFRTYJAZNDwVeO1vXbCeVGusEcddNwmWHTEez6NtoI/OTzeE6zjH0iRkDj4DvOkJFlHlMBoaOJ0iaMa2e9c//gG/9x8u8sXI3F50xnZUbR/n9XF5OnFDGgaYeCBRSMHUWv/7qHH7+lx2Et20AuwNCpQT0NJn0WMOyIOgTdTY9/hEFJN89Wz5Xyul05tkssTvw108k29ZEQd14zpjdyMEdB+jsjYzcx+Fi+mmnsPPxLwOweU8r5339Meu+dja98hP+vGI7Tyz+NMi3kLe2kmq+97m5PPfKB3hCIRJD0U9cNtI/XF68TgTpRn5cPCNG6ajP1i36Pufc8OjIbQpKmVkTxDQ05AmzmDO1iiVPvDSGyv9vNqcrn2d3XPP4+dPj32fzmi388a/rwRfCUVZDSX09O+8/n/rz7hTRptHvMarZy2q57fozeeb5d8hIGuNry2lt6Qa7XTixLBkMcM/93+X+e57i+LPKxvmXncG2ncdIDQzgHDcZrecYNpeHyoY6+o9ZJTjsTi79ylVQGKRkoI1XXltnGf46GAbeklJmjS9i257jcwKH5UJ+iF1OVFUb0xdPxTjsWo7rvnwRb7/yDpH4p5fcsdntYkxMk+/822Vcf+FMWruHuP6nz+evqasupatvrPyqm9hIV8sw0Zl4bijg5Zn7vsrW/e389k//tbrEUybV0dTc9Y/3uMtD4YTJJI4eEPPhcI1VtK3WcPLJrHzo8/xl+TZaugZ5aemW/HcFM+eTPLAd3D6euOOz+DxufvDQq6SH6/NWNuJQs+jRgTF6BkDjxPG0tXQe3y+bnfG15XR2jS3mXVRRjplLc9MXzmRtp8T8udN4/rcvoho26upr6GppB48fGwamLOEJFVIRsGO321DdPnrbe/L3coaKBLzebs/LZ38oKFjEHS4KQz4S8STY7JzyuYvZsWylgOLa7Pz2gZu48z9eQFI+sXdsNnyFBeTiCVwFxQRMiXgqB043Tocdzcrj9NZN5sHbrqBt+24ef2UtAA6nkzXP/5AX3trEi29ZDOJ2Z57cS3TazXW3fZ3KwTaefHkNijr2bPjCJfOprCnlyeffp3HSeE6eP43EUIw1q7Z/6vRbnRb3d7ioOfsCwhtW5Zmixda24XC7BbEeUFNTTm/v4Jg7hEKB/Fljd7sxVG3EcfNPNJfPj5oT8tsb9DOjsZLGmhJe/2DniBNP+xt69afJ/vyr2Tnn2s9QmY3w6rKNx309+dLLGNq9g1i/cCS7/H6RFjGqNc6czqGXv/dPv8v/VrPZbDtN05z3D6/7HzAEvwBcYprmTdbv1wOnmqb5/VHXHLCu6bZ+bwFORRh9W0zTXGx9/hzwnvVnf/eeo+79LeBbAOPHj5/b0dHx33qf/+nmO+veEc9VURXuqjqK9SgDx0YdNja7WMijPXwunxA+cgZKa7Fl45i5NFMWnsXdl07mhp+9JJQ/t08IzlyKiXVltHT9A6yy3SGe43Bzw5cv5M1tXaTD/bhCReiJCHoyQqCggMsXzuDxO65m39Fubnt6HQePdKKnhNLurJnIzNNORrfZCARcXD2nhgvrfMy++pfW+1iHvtsHwRKI9mALlZBd/wsUVcMwTMLRFD9/YjmvrdjKxIYqDrz1cwAOtfQRS2Y5Y/ZEfLPFdF910TxWbjzIqkU/YPa0Wl5fuZPrf/rCmNc6cf5sDu45KEhuPqUVNE5lw39ey6yr7h9RSGx2MXamCU4PF3/mXJb89ALcLocw4HUNbHaW/OEHXLFgIoqqUXr6bWiazuXnzeH1R2+k5vy7iUUTtK18gKde38iLb21myW9u5JwbHuNTjXe3D5weyMYJhIKMmz2HpvXrBbyooExAUe1OguNqSXe1j1XyysZDdNgTbANvAOQs3oYT2PrY1dTXlOBxH1//SlZUik790cjSChay7ulvc/PDb7JnfzOO4koaSr20NHfiKShCTlnRHZsNu01A4zweNzZ/ACkWE9Nrd0GojPHjq0hHBon2dI99aKiUGXNPoqTQj9zVzLZdo5lTbTh8fkE6ZJpjD4OyegLj6nj4+jnc8pMnxn7ncFFwwklsfPTzeE2NopCgbi89/XZM0wC7k+ce+gbfuGvRyF6yO6GwQnisP6kwfwLSha8AR81E9I6DoCnY7bbj6lKK7tvE2rHmwVY2jkDNeFzZCJl4Al/DNDJHd1sKhJPG8y9gsOkYdrebVFcHpjTqmTb7yLyONqjcPlwN03ng+nncce8zY5ToYFEBpmGQTWcxHR7wh4T31RfEU10PUgZfaTlV9TXMaShmyaJX0aRPz++0OV3Y/CEMX5GIFNgdVJ+2EF3XCG9eZ42hg/GN4+ls78YeCOEyNYsFVMFT24jcKRTt+rMXAtC5eTP+8koCpaWED+wFbOD2cdKlF/DurafR0RvhN39ag8/r4nDrAF+6dA5fuXw+ZUVBJFnl9Q928sVL5nLLA69y3qlT+frdfxqzdsDE7nLhANRh9uLiGpxl1fzhxjl8554XMUyT7X+9gx898jq/v/OLzL3moTHFvo8zYB0uYht/xVOvrueFFXs5drQVm9M9Er0bbp4gtsrxmOEO3DUTefGOS7nqVJGjp+sGJaf/GEXRmNhYw4E3f8ZgLMX48+46btw9JRXI0TB4g3hRUNWRUg8AgXENONQcyfAA2OzYfEERPRm9biz4HgAuL7biSsx4mGBtA4Nv3EzBgttQ5eMVdrvTgTlMTuXxCyeE20/9xHr6U6qICn6a0uQrEJ5ym4OCWaeSPLIHpAz19VV0dPSL/lRPwF9eRWlZAU2PXk5w3g/y7zVz+gQOtw0IluPhfe32Uei1kc4q6NhHFLnCShyBEHpfW/4dhx2TnvJxBD12IuGhMdDf4Xn0nTCXsoCJbHo4bUopSxe/lf+6tK4OKZkgk0gKOVxaw8VnnMDq99ejZUegbyVTZ2LXZLLOINmmvXnkwZhz2lsAhaUw2ClSJE6cx7jJ9Xz/vAlcfkIJ4UiSw6391FUVcf5NT+TLx+AUUDkcTna/eTfT6kqRZJXKs+9AUVQaG8dx4PWfsvNQJyu3HuOXTywdOw8uL4RK2f7sjdRXFhIKeAlHUtRfINbZtGkTOHJkrCM2WFjImmdv5pRrHsx/dvetn2fRq+sYGPiEw6ygHJKDjDZy7S5RlofiGuwFRdjTEbTBEYOJknHc+m8X8sj1Quc0DIPA3FuPW0K2khqyq38GwJ4jXZz25UfEF8N6id3JORefzcb1O1Cl3Kc7mfyFIGexVU+g9aVv8O1/X8zKj/eNeoiFyrDWWKikmFQ0Bv5CSqefRGT/jr9pFNjtdgybg8suPZMVa3dTPGUG8eZDmKlPOCxtdihv4I5/W8iil94j2m8RENlsIhKqyZwxeyKb9rZh9wZwFxSR0+H0yWVs2rRHjKvLy0lnnc7Tt5zLgut+lUe85CHxNjvlFUUMDkTFmvEXQmpw1FlgzU+whNopk+nt7MGhZFGTUXyhALlUBpvdgWk5Civq6wl3dv1tx53DCQUVXPu5Bbzx+ho0Web0Ky/EmY2xe9thUrEEeIPUnnYGt8wJ8O+PL+WayxfQWFPMfb9/a8yt/vzU7WzZvIfecILXV+4GQ2P2+WfR0dJFtKODExaeSfvhZnIDI2U5aiZNpLf5E3qxlaaEmvtUR5KYNAf+hqkNWOBiAAAgAElEQVQESkrxq2k69u4Gp5uy6SehA7H9O8V9HE5xr1wKnG4cRaWUFAYZbGka+8xgCaSGcJRUUVRbS+Wker40p4JHfvUypsPFwqsu5L3nRUCg/cMHabjoHkJBP+GP/m6c6v9J+2cNwf8JsphPK7jzyRn7W9f8Vz8//kPTfMY0zXmmac4rLy//ux39f9FcpZVCyHkC2EIl+IMBHr7tKpY9ebMQGk4PFNdw+/e+ACWjinB6g9iqJ0DVRDxVtYw7dQElJ83nN99cyDUXz8HXOA38RRAqw1koaMZbuoaY3FjDhPoq8IbEP4dLePwCJeArhIIK8cxgES0JE9nhw1XdSOOk8QSnnEhg2hye+dV3ePG+6xjI6BzLuIgW1lEy42TwBMAbpGpCA7Jh4vE4cDrsnFgVZFpjFZFNj+WhVdgdUFTFlHkzobyBGWfNB8DtcuL1uBhfXcL3v7wQj8/Lb27/fP61p0+s5ozZY/HWT959DUMbfs3saQIW94WL5nLewrkjF/iLuPDUKWNrP3mC4r0BHC4qKkqYXF/Bo//xDdE3RCSNknFQ0QAl1fzwc7NwuxzWn1gMl24fC2fV5fv+5L9fD24f933vMgD2vH4Xy5/5IVXlhdz33cvoWHk/p85qpGFiPWB5qdw+8FnzUVQNxZXgdJNJpYURCODyUjZ5Eq76adiqGlG9RTjKLcZPbODy4giERvIYnW4oFX0vqaxgSkPlpxqBAB63i4LCUH4sxjeMY8708Tx+9zVgd6LHBmhp7gCXB9lXLAyTgnKLAdYEXwhdNywj0IY7WETZuHHYg4W8cPeVxPGLPtodlBQXgNuPM1BIU3+aK+bXsfbZ71E99QRxwAF4A+jFtVBaC6V1UGDtEX8hzuJSHA4H7x0a4qobPkftWedSOGkqzz3ybeZceC4FVVVUF/qorSwm6Pfg87q599arwBvk1AWzuO6iE7F7R+WUFNfgLinDWThCgjNlQg1Or0/sH5cXsOEsKsdRUsWsieU4g4JJzB0I4g2GhDByuMT+CRTjLalg7twZYo0FirAHCigtC7HgktM5+aKzsdvt1M2fJ+7tC6KaToomn0BB/UTcBYX5fgSDfvAXYPMFsIWKxRi4POLvgsWcUF/OLZ85UUSCXR7RX08ATRXMe6YnCKES7KU12CsaOOW8s/CXVTJp/snMWTCVtKSzpzfFuLmzxV7whsT/TsG2Znc6KaysxgiUcu1ls8W8l9TgcrvwhULY3F6c/hBTTjqRa264jMkXXkT1/NMoP/Vs6s46G+e4SZRPmUzZtBM48ZILCJSVcdLMBmZ+9kpOPv8Mnv/ZZymfcyqe2okUTp3BvMkVlBUHmTujnr/86us8/4uvsnnxbfzgK+dSViT2qtfj4qtXLsDjdvHMfV/lS5fNJ38ceIMi0uv2Y6gq6nBE2u4Atw+bw8HlZ0wjtfVRWj/4JTMn1bDq2Vs5YWI1y5++Rfy928dZ553O2pd+wsN33wCFlfl97vW4+NHXzufrXzhT5Nd90ggMFEOwGLvbg7txJs5giItPHiEEcTjszJ01GYBzTj0BgPLi0Nh7ON0QKObs02cJxURKI8kKuickFL1AMfgKKKms4IOnvoutehKU1jH/1FlCjniDYt35C6GwXCgungCOuqn4qmrxTZnNxWcLav/Q8J635hubDfxFuLy+EeIRfxGUN1I/fTpfvOI0zGAJjvHTsI2bIp7hHXWP0nFQXg/ldRQWBnGWVAEII9DhEmvZ7cfhdODzCFlkWM957sEb2bb4R6Q2Pszj9/2bOEtcAtmQySm4C4r52fc+K55nd4I/xAVnngjl46GoCtw+wRpod6C7AySD1bgaZ2L3WTJ+2KnnDeB2uygaX4e7oIB5M+rwBEfkZqSri0wiiaugEFuwCH8oxNrDEcyqieKMDJZCsIyJDdVMP3kmnoIi8d4uL4TKhLwC0ffSGjwlFeJdnB58RUJuTK0MUlESYubkcXzx4rksOGkiHo87v4YaZs4U505xDZNqivPr/o3Hv0eguJhF/3Eddrud+TMb+O4XThfjMVyw2huEsvHMmDmZmRMqCQWETK0oDTF+Qj04XJxz6jQ61zzEvDnTweFm79J/J7z2l5w4uYblz/yARQ9/k8apk/jhtadzzWULxq5Pb0gowkVVUFYv1gBgqAo2jxe8PrwBP688eD228vH5PrnKazj3xJG9YLfbR/rscAn55Q0RLC7OX3PytDqa3rufCVNGnfdOF9dfOJPExw+w7JkfCeeDtW/xhsQ5WlYL46ZQXFFOVbGfpb//FgU11rzYnfir6sQ+crjA6RZGoP3/Y++sw+Sqrz7+ua7jOzPrrslGNtm4ECOBhASChEBwKy1UgApaKNBCKYW+FVpapELfUseKFSmuQeMkxH0t67szs/P+8bs7u5sECJT2bfv0PA8P2Zm5d+7c+5Nzvt/vOUcF06U70SfGrax412UOjB83QjA3HwIxHl/bil5YRUpSUH0D14ztz/xmMxjgp09vojOQN7AvG64AaIEX3txAWjWYNrWOXs3lt984gce/fyayP0sEJL4ssoIuZ/34JRRfeGBchXLEOpHuY+/uJkx/ANwQ/oKCzHzULQvckDiPHeDFW47n3Xsu4PDD6gCJrrYONNMilJ2T2Wdb9RDRYaPEOLYDYrxrprjHVgAjKxs7ksXefUkmzZ6KVlLLtNpcfnfVcXz5itPIHTcRf0UNkgSfXTKF5hdv5vYrjudzS6aI6zZccT26zcjSCN++6Bhuu2YZd337HMKFJUTKixi/YAaTly4mVlRAzqiRGFne/DF9BEeOxlcxbGC98cfENYZzxflBXK/hiuclq2Jc2EHSuknANWnRAhCMoxVWYmTFcGJxQnn5oGhI4Ryyx03EHTaWvMPmMHXuNM44drI4pzdWsANoOcXIRbXkjxjBCUeKlhYza/NoeOZGln7mBMryIxy/ZA7fvvpM4hE/37z4OH5/y7n8O9unUSxmG1Aw6O98YMcHfGabJw0NAE0fcexHnfPfwvzRCI2NQVB1NNsmEHFZOqEYgKLKcsZW5/GFpVOZUJ3N7U+tp63Lq6ppOui2Q9qysWyTCxfVkus3OXy42Hy/cNo0brrzWXTHRu1LktyzDUnVea9BOC95eXG6+iSaGpqFfAEL+tLEQjZ7dqYxAyFeea+BReOLuPWs8Zz8/RfY09GK7g/SnejjD69t5/Lfr8j8DsN1hBMmSZQWRGjoSdGXBlmWGJsvNmPb0tGCWSRam0DRMXwOw6pzKavMxW8dONTG1xbS8uLNH3jvrrhgMc+8sYGQ/8Au0zMnVvPU82+LBcJ0Oby+mF/8OUrj7r3exmF6+m0FJJm8qNjQFk0q5xLDgVSCtOmCZiCrKpKqUVcy0LcrFI/RsGMX2AH89kDbg9OOque0owYAlljIZda48gOu74KTDuMr//OXzN+2bZFWVLrQUG2XpC8iNOf9eWSGhabrhHOzad7bIogyb5ETi56MYpikZA+70UxUx4cky0SCB+nCvZ9dfv5RXPrDR0FWqB8uNvAxFTlgB5DSSRzHpr2jx1sQJcFGe0i7PxiktU0T9VskCX/Qoay8kN5Umu8/uhbJcsXmQpqmJGDohCMBelSVB1Y38dSmV/nx147mmEt+AR37UJwgt198ODf/YTlrtjYLJyGdAtuPrGrIisz6hk7hTBgOgdJKTp47grveakZu6MTZr3HspWfM5NhZIyjPF88vFI3SuL03UzSlLD9Ck62yu30fKBrr9vaC5oBhglUgen/pFpppISsqfW4Ykim6kUQM4uogSVRUF1JQmsfGdTtZvr2VqhEVrN3WCopOKGAyqTjIUy2edCUQwFc1CklRh6ji3LwCevYIFLRdskE30EyDQOEo2nY30N0gihNhuswZnUdjew/+3AJaGxu9AiXddHsBSn5ekG2dCrKiIEfi7OsSgVFeboDioMm6jS1omkxORTG+QJCVr68i3SsS4Elo9KkaE8dU8NS7u/jTS1tRQxFIQx9QkmWTnDab6sIAbd1J7n12MyChKhJhR2dvG2RXVWAYCv5hw0gBQUfjt+eOoyuRwlRlJEmifmwFmxsEwHXyhH5g4+PZ+HG1vLpyK9h+ptaV8vxraz0QrX9eSqAbWIZKyNWRJImciDvkHDPrK6geXsWaDdu55cK51JZkMXFYLr98ai2r3kmAPlCK/ITDarj8u46Qy+lWpoeoHQ7RmdKQPAdXVWTs/cbij69cwslX3svlZ8/OvBbLzxdVHRNdwkG1A5y7YCRP/W05qU4hcddCWUyqjvPsCiH1jUeDjC7NYtbk4by0Zje/uPQoak7dPiCZUzVk3RQskyShGCa6riHJMqNLRR++I6eP4tcPvowVDNHV1CjmmRukp6tdjOm+PhTbwQ4FOfnIUXz3z+8iyxKGzwWfQ5uiihzvNnGfNccFVziiX15Uw7f/t5UdTXsQRXkMcU2aiqIomYbWN1x8PA+/sIqTPYcK4JyFYzjtiFHEjv4ePa0tJCWJ6pJcbvzzCrDEmqwYJq9sbEELR0mnkiQbpIw0M6XoaJIkfncwSxRNkVUxJjQDVddo8arsVsR9bH7kai66/W/85p6HAKirH8Gba3eCbtGdkrjxjLE89c4Onng1RbJX3M+V29sxrCSfmVPOD+5royOZRHb8KKZJojMMuoVqOyiaip5fTqKjDd00GFsUpL54UODgmT8ri67uHo6aMZKfXbqI6//3VSxdAKn9Nmd8BQ1PXTfkuHDA4ZgFU3jg+bWiEIeiUlmWyx+umHfAdzz+o3M578b7ueKMw8gKOjzyg7N5b2sjlYUDAPmscQKoWDZvFABHTa/hB3d7+5QTBlVDsy3Sro+x5VFWr9tK68YOkGQU24dkGPgdg0X1+eTkxdjR2Q66iWbojCoKD7meUHY2zU2tgnXuE0H8sLKcIZ8pyA5x5XlzOeua34oKkqbL3LEFSJLEnLoCpFC2VwSrQ5xHVlBNC1lROGLMgIuYm58t2HPDpVO2wJK8/HU8RlkWe70sY8bzaO/s8BhDb3E2bO69YRmrtu7j2l+/gtS/z0oSdnYe+5obxOecEEgqWD6CAZe0ppCdH2XNrrCYm5ox8L1AIOjnudV7Of+okaza0caPrnucrPw89uxpBsvh5Q1N6IaOGoyQat8HhoMWjCC5Pnq3rIFUEtmwQXXp6OlDDYRJygq9kgxOAIwEbihINGChqr0UF2WDEwRJIiHJZMWjNO3pAjRSyLQkJEI5uTTvE89U7jVYML2GjXvbWbGhmfGVOTy/WkhtJVnm7Q1NPLNiF0tGZvOHp8MkGzsJOzraoHHrd0z+8pPPceU9r/HmG2uBNDlBATadfs/b1Oa4nP+5RTR3JtAVmVV9abbvbsd0XcqmT2PrqvXIilhTzXCENicEmokczKIvmcRwbHqSvWIMyLLY13vEem2aOt2KRSIFO5u70A0df2k1siIjyRJ1xUGKRszgrgeWEyws5OQpxazeEeWqo2vID9s0tnbxnTsjA2oh3UI3DRTXob4iSmNnAkWWeHZ9IyPz/LR0JcgNmHznsmMyv//iZdMPmIv/bvZpMIKvARWSJJVIkqQjir88sN9nHgBO9/59PPBUWkCSDwBLJUkyJEkqASqAVw/xnP8WlpsTBF8YbD+mY3P+3IGgYc29X+TX1xzPhGoR3BUXeEiR4YBuYFomlm3h+m2+OLOcE8YOOFLXLBrO6Ik1HDGjBn/E76EiNledNYMLjp/I9vY0TT0yXzplBrkFeR5iqnDeUaPEQqEbWLrKjafVE3IN1u0cKBZz5Z9W8svnhcT268fU8MPTRnP1MTXEa6rJrqki7HqMgixhaDI+e4CBySv02EjNYMyEaiqyXWJ+g9LI0Mpfh2JXnjObv9523kHfmzqi0GN0DLBchhVFePCWM1hy9DSBlJkOhs8vAhtFpTRHBIJ5WY5wIA0Hw3HIzouLDV3XCTgDzuCpC+rB9lNeUXjQ7/8ou+BYcTyWD3xhOlWHLslANS1mjsoXz8B0UMJx8TsUlQcumUrCy8l87TuLqCnLzvw+/Fkomobkiwiny3Jx/A6qplIUdT/iauCLS6dw0WmzwA6w7PARgHh+ps9HWtZo70mBZuAGXIxAQCDtlh8sP61pg3DQFYuwJHHBCZNYvqGBEWUxnlmxE1lVkSPZ4IswdkQxRiBIb1phfGWU97Y0s3z1bjbv60E1TDBsDFPn969uY/7UauZMrEB1xe/TbJssv4kiSySTffR6TdP7+tJ8/b5VtHYlMXXloL+vsjBLBI5AUX5cXLuqI9su3zptAsPLckSOld/PjRfMo6I0D0k30fxB8ksLufiEetB01u1uR/P5wQ2iR7KYOKkWJAmfYxHw+dnVlmTjnnaQZNbu7BJIo6owsjDExTNLGR5zUGSJtrZextUVE4gGM0FgXsTCF4kIZ8IJoVq2AHxMA8M0GFdfjuQK1kkxbUYVh5l07dNE87OR/WFkywHLhz/gYrsWY0ZXgCQheZvyzhaxmd14dA3jvKpxiiLR1ZWgpiRGvKZSjDs35DlvBo+9uZ1wVgBJkpAVFVlV6UtLFERsQo7O5j0d7GwYkFENLwjw4tdnUp7tZsZQv0W89iCWpiB5ztD3l47Eb2tEfDqTSoc6iodqj3z/THY8egWbfnchj95wLJptiwBNVrz/ZGRFYdG4wsz3Hsz++t2l3HbpYoYXDzQsnzmmGAwb3RfMvJYf9dh7x0PcLR9Yfqoq8kGS0HQN27UOCsBUFUVZ/qvPk5s1ULVv9e8vZtmiiWD5icdCPPXdE1g4sZRIdsz7DpNEso9n1zSKtUmWiXvnfvCKOez8+UkUx1z0UBjJDYDpIOkmtt8Va4MvjGmZ/Pi8Cfzo7HGcO1vsMTd/4Qi+evZc5k4bIVgKjwHJKy3MMGiSquPLCvOjR9eiyBJVRVFUVWVCVRzd8aF634flI+Q3MSwd3dB56K1dmH4PcNNM5k2uFmi6riFJElGvt9oXl03nsdvOP+A+6ZrC2JEloKjIms6K7R0cM7kss+bIukHA0cX5FOF4y6pgxmRN5ZWbFhIKOpj+AHKsCPwRESiYDmkGgJfSqE3ANTlqapVYZ00fb67dyZjqPFANZFXh2TV7Kc4OIFsO+eV5KKaJrCoURx0Wjy+gviYPOZiFHgiiWTa4QbBcAmEX0zKxgwECefkoqswVC6rQ1QPdqqrSHLCDHDtjGEHX4ObzpnHdGZM/eNAPst9cewILZo4Wz8H288dL51Ac8x3wuaLsII9973SygoKRcm2DuqrcDz332Kpc4VhLsqg0bPk4Y+4wnrtxIQ9eOZex1bnivll+kqikZZVYQIzNquJssce5QfKifrKDQ5t6P33bOXzvq4uZPqlWBC1OkCPGHrifHju9Gsl7dqg6WX7hK0iSxIlzR4rn6vOer2agqAqKqnDl8SMz56gozBJjx7DQXBfJFmqN7NwYSAIMRjcI+QzCQRspEBF+mbemoqgsveExrv31K8iyzJKppcSzfBi6iuk6nlLDQHa9Pd2w6EhK6LZOl6KINVUzRO6h7a0flh/JdIiHbD63aBS3PLCCLQ0dNCUUSkpy0WwHRVFYODaPUw4fjpmdT0FlKZbPwfAPrEF1tSVi3MsKKc1Cs2wmjqkQa4XpEHTFfb/p/lXc89I2sP24wSC2z2Xd7g7qxpQSjGdRWRwh4LdIqAbT6oopL4jQZwV48LUdrNjUSiBgs7u5hxkjc7njC9OIhB1eXrOX83/2Ksfc8DTbdrWBBFG/QZPXVzKR6uPrD69lfbfC3246XuwvTghdVVjlVR1fsbOd1bvayQ+anDkhn374zvLAooJh5eRVlXDXGWMIRQKeesqhqLqIWH6cypp8ZDcgcs1N8V7/XOjWxT4vyQqqqiIrMooq9p+HL5lCYyLFu619fOX0mXzlqGquPXYYf/j8JEYVBom4OsUxH2ooKsaX5Rd7gWmgGxqTK2Nsb+5CU2UeXLGbHz+3iebOBEH74Mqrf2f7uwPBdDqdBC4EHgNWA79Lp9MrJUm6VpKkRd7H7gQikiStBy5moEjMSuB3wCrgUeCCdDqd+qBz/r3X+v9hU6qzUWwXzecnGHH50qwD2aN+mz+2QCwmstiggxEH22cxaUT2QT//4mUzuPe88cSzxEKmuz6KCuKMHl4kEvwliT+9vJk121s4floF5y8cRTTiY+nMKlKyxjdOqiM/4rC9qYtEKs2yqcXIiowsy6zZ2cY1i4dx6pQi5o3IZkRBAM3QUXU903DV1BV8lj7kmhZO96h9RaOsIMS8yiyWjsrhwkkF+1/+32WjK+Kgm0hukKuWjScWtKmrzOYXVx7DTefP4ImbjqcgOyQWFTtAdaFwQiVJwgxngWGTkHWa2gRyHPUP3cS+de4MOp++indvP+UTXZ8kSfzisqOoKMkeyFkAJE3j5OllKIYh2hwkUkLSpKoccc2jtO8Ti2dZjo/qwojYYAyx8Wq6KiSNlg/JdJFlBdMyyYt8NCMI8K3zZtJw34UcPnagulVNZYFwBPr6QNXp6hVl+mVt0GKnqOQU5HLdV0+gdFI9KdUQjuv2bro6epAkCdWyOWJcCcs3t9OTVmnv7aM0J0BvT5JkIsUDr28nnh0FzaBLMXn63Z388C+reHbVHiRFRdZ10C3GVcUxdJXe3oFAEOChd3bR2pXANT9axDB/cqUIFAybK5eO4Ygx+cwalQ9uiG7J5PsPr6GxR7CekizTmYSWnjSqIhN2TXyuhaRb9Cbg5ZW7uOSsubTJNq+v3cWqtzeiqDL1k6pRZImK/BCSrDC+PIIiS2xr7ETxArP2ZJq0J2v8yZlj0A0V3TYyAUxStUCS0UwBQHT09mH5fci6gWFbrNzZTjoNnX0yps+HYllIqkaX6jDn2Fk8/sZWFE1F03RsV0jyVFWmMGiydlc7hibT3Z0kkehDViVUTcf2ORkppRWNYRg63SmJ8VWxIfewNOpw09IRdPekSKdhQZ1A8ufVCinlXefUU5PvRx3k9OYFDwR78kMWT1w0hfsvmPihQdqHmW3qhHwW8ZCNosgU5YtxdPZJs7CicTEfZIVlH1HCOytgcea84UOuY3JNLlgutjP02mXT9ORjKkgypqHR1NyFomnIioIkyQQHAUcfdf3HzBwOpsPnT5jIpBpxL6fXC3bGMM0BaZkkobk+yvKC3p8SuirAj1vOmYrp86HaNqrtUFEQAd1C1oVzXF8WYfH4AgKegiHos/jGubOZN75UzAcngKxp7GjsJH90LWgGKUmmYXcL3V29/Oi8SbR4bTKWb2jCtEwMW8wjdJOkpKFpIhBctaONtl4JnCCK4+PpNU0gSeiGjixL5IY/ek06aWY1uCH6rADhkMt5R41CkiXQdGRFIa1rGKaB4zrIhkmfLUCMnCwfd7+yld4UKKqC6RNzBsNGUjQUj12QJCjOEkFRfXlMPEvN4JKTpzBtfDWSF9Q/v7aBe1/aQsDR2bO3A0U3WFRfwIOXzuDl95uZUhPHCQUxbQtFU5EtF9X1kUxKQnQiC/ZbVWUKwgcHPP/8rRO58+vHc9Kc4Yc0Zva38xeOyjBi5XnBjz7gEM2xdKGgccPi/4rKtSfXM7okgs/SqC2NZfYvWdeRVJU6D9AZVx1H8QfRHZffXjT1gPldVRTlM4vH851zp4rxLUkcPbnkgGswNIWCfLH+qMbQOXX3F2dQURzPsN6q7XDFCaNZ/YPFlMQHguH66nyx1zt+NEOofFBUdrWlCBbkIfnDyIpKPGiRF7bRHB+aL4BkinQXLD915TFuPHsqimUztiwLFAVTV8XvcsNgBzBsBwyPDVYUYkGLeFAEZ9gCwJA1Xfhxqk5LZ4rzjxrJs6t205Po464LpxH0WWxv70OSZWRF5pbTxuJ3TexojBljS1AUBdN2vFx0idc2tiCrKvXDcpB1g4Th4+UNLYwqi6IockYJs2pbC7IsIzsB2tM6vbpDbn6Ulesb6exKMLEyxrJ5w+hNy5QUxti0L4mhqwS9wP7IccXsbOkibRu82dyL41iMLYtw6+ljM+B/KtlHZ7KPc+59l1RfmgdW7GbFznYeXb0XXZWJlRQQLc7ny/et4qqHB3LvupN97Gzq4vI/rqRhXzchW0PTFMaWDbDnI/P9XDC3Esl08MeysGyDYSMK0G0T3fFk8ZoufAUPFECSUBQF1zFRNAXTFtcpyRJ+W6elrZe9zV18cV4F588qO2CM6qpMWVme98wECKdqKqZj4rM19nWKwnoA972zi65EX6bn5H+SfSoN5dPp9MPpdLoynU6XpdPpb3qvfT2dTj/g/bs7nU6fkE6ny9Pp9Ph0Ov3+oGO/6R1XlU6nH/mwc/472udnl+ALBwlEAkwfefCArt++dGSVGOBuEEXXuGHpCC44qopfnj7mQ4+ryA+BE6RXNjjn1ic599YncUyNS0+sZ8ueNtJp+M55U7n/jZ185Rev09wrIckywwvFJHx/t2ADj5tQiOWYmckyzWue/fx7jZz58zcy36co4n1VkfCZQxmaE6eUiuDF9vGVGSVMKYswozJC2P/xGcEPM1NXWfmrz9Jy/+e5/KTxQ977/NGjmTI8l/oqj1FTVCpzB3KzIpEAGDaKrvPlY2p58tojePTqA6U2n9Rx7bclh1Xy+g+WsnRWDartoFo2Z86p4sSpJUTDvkHSNkDWaPNKRVu6yu1/XU9xzIdiGKi2g+b68DkGdiAApoviOJk8n9zQod9bxxy6iB05vgQsFzXiBdb9TpQse+heEENXWbmlhSzXRDUMvn/fW9SWxkh5BSfmjhFM9fS6AbRXUlV0Y+C7Vm/bR25U5HgEXYttdy7l4kW1FGQ5wokyLTRNoaUzKZzB/ay9vZe+vjRZ7kc738dPqxCIoWEzpUbMuSPG5CHpBrKus6Oxg6a2bsGAyTLtXQnueWYDtqUztjxCwDVRbZvzFtfjBhy++4c3MnKfZUfUMm32aKLZIR78zhKmjClFUWRG5Iu5tHFPeyYQbB7UhLomx7ISA7gAACAASURBVEdje69gLU0vf1WSkHUdTRfjYM++HkzHQjVNinP83P/mDlRVOAt5UT+WY6EYBolEiqcef5Oe3iSybiLJEpIkYVkamqlz/t1v8LfVewm7Ov35dc1dSaZWRsgvyUN3RR5La3sv9aOK6OxOMqIoTDwWwHIsJEmit6+PNQ2dfGamcNwuW1DFc1fO4JzpxQDkBExm18aHzJH+vl37myJLWNrBmdxPYiNKY2D52NXUSQLh3EuKwrjyrI8+eD+bMSoP1bK58pShuVKO32P0ZBUMC8UfZPOeNiRFzcw7/8dAhheML+Ghb5/IRccN5DYfOb4UnAA9qkV21I/h86FaNpIsEwuYB5zj7DkVvHrTImRVQ9FU6iuyUCwRCL5505Hkf0DwNXtUnmARNQ3FMEmnYdeWvYJl8+Z7Ra6fpdNK6RjUkFtRFVRNRTEtVMvO5PsB/ODUOqrz/ci6SUox6E2J9/rSEoqqkHOQ69/fTpxeninAsq87zaJvPSECOkDVtEznG0mWRM625xg3d6X41eOi5YKqKuimTiAgGGpJEfPFNFXiYQufBxyFXSFdBThhzij+9ML72LYJksKVx41gxvA4PtfOsHll2X5+/eIWvvPwOp5YtRdVVRmWH+CbJ9WhWhY+ny1AU0XGdAwMS8dnakPz1AeZbWqcPKgv7Me1WaMLePibx3DPZUd+4nN8pMkKpm0SdAb2pbryqNjP3TC26yJJEqfNFGB2fVkWim7g89nU5AU+6KyMLI3y6g9P4htnTqMm/0DZLMDkkYXghsT+tp/NG1uI5vOjeAHeuXMqiQeG7nkL6gtRXD+zxlfw5WNGoJkWqmVhGiqSapBG+Dxh16A45mJYBiW5IVRdF6y2qvGrr81j4vBcJEXB7xp09aRQFFFN1AgEUX0BNMNA9weoLIogKzLnHFbKqZOL0CwTSVWRTbFGD+RHquRHfXzl568ysjjMxKooY711KuQa+C2NVza38MiavaTTsGVvh0i31zzpp+mQSKaRdYNZtTk47sDv3tGWQnF8WJbOt/68gjc3eTn8jo2uKSQTKWYOG5DiTq6McuL4ArIiLvc+9z6yolBTkcfpJ0/m5ovmkhcXz6YgJ0CyL01nd4K8sM2Jk4v4/cXTvCEi05UUE3NTUydvbROkgCJBqi+N329j2BZbmgdVNgZ6elM8tWoPmxo7aetOEnTEXHxv74Da5BuPrCPs6tSMH0Z+uUgnKAgYdHUncf39+6VQVfVbMOgIgFSSMoCMYemYtsGWQX2PX3h/v2rMg+yk6SWotoWkqKi2w9H1uZTFPGUBIh0m2z/gd/yXEfyvfWwrjQe47KTRfPPUMfzslA8P6IKDJrmm6yyuy+eS2R/MIPbbYTVRJFUFWSInLBDQyrwgV548nhvPnsJPL5rDt/+0gsY2IR17+l2Ro5QJBPeIPLWSuDOEcelHNx9buVugp/0BoCrjOBqmrlKbPVSWOKY8BrKCZZtUZg9tavppW2lOIFPc5WA2p26AhawtGtiA4lkBVNvmzs9P56olddSXRynP+cdcq64p3H3xbBaML0bWNK5fNhZJkpg9Mk8EXrq3aSgKi8YX8tCVc0HTuP5PK7jjifUohilyGGWZkGMQj4XQwlEs18k44eXZB8qEDtUuXCTkNclECiSJz80fxiNfn8uiicUCQVdU7IhAHD978yNsXPk+rZ29DKvJZ++uFqpyA5wwtRSAa377NpedNJ4FkyswdYXGDhHYjqvIoqsrQciTLFUWCrT56qV1vP29Y/jrNUcAUFsY4r1dbRkWbbB51dapL/5oNLyqYOBZ15cLpLkyN4DlOdqmbZIVtJF1HcMyiIZd5tXlkUTmxQ3N7PMcgOdW7iI4SOK34qencMeFM3jogkn4DIWVuztYvW0ftq2zYcc+8s//E1sbO8UmDhlG8xfnjWP5FtEeJOJquDnZZBfmotoOuuOg6iqqN7d0y8C0TSJBh46eFPlx4XxNrs3h+qWjkTUVWVFo7+xlwvA8JEXJBM6uoyPLMi9vaGLXvm7UQTLaPa09LJlQQGeij0h4YM4WBC0sXeE3L26hvSuJLMuEXJ1HVu7hrmc38eX5Vbxx3RziAZPsgDlECtroyYOmVEYIuToLPwLo+rRsohfc/+Wl9SS9qpixkH0AyHEoFnYNWn93JhfMHzbk9WjYJ4AQywHdoqMnje2zkTQtI0EeXXjozIwkScyuKxxy/+aNLRRMiaywtz1JWpIFkwGo8sG35rJsF8uxOHNONYcNy0bx2LP9HePBVhjz8/nFY3jx1iUE/ILN6G/9IkkyPkvj5tPH8dk7XiPVl0bV1MxvlCRRFl7WtCFBfzxgUJTlonpsthtwUB1HBG2qQt4hgFM+SycWjyApCrKmoekasm4Ip19TOXlyAZZrohkaqqFnwJjB64PiyZDTsoLmumi2gyzL6LpCZ2KgSqyjKxnZ9cQv/Y6te9voSUvIiszs2jhLJhXR3Jng1nMmYVompXGX59aJaprbW3owNYXLFg/nsRV7sF0bFJXJVVF0U8w5VVNxDkGt8PfYzFH5HDflH9C4WrdAkjhzwWheuuW4IW+NLg57ufYyXck0rm0w0QtkRhWHsV2bU2d8tI8yojjCV48b9YHvzxmdD7JC7SDZdr+dNacS19SYUBllzqhcggcBA4cXhXjvjmU8dMXhXLa4lmjQQtZ0JN2gudGrtivLRP0m48siqKrKvp7UEAaypSvBhl0CFL/hsfdEgbRkH5IsYbk2TtCPrMgEgy7dKQldV7jpoTXc+OAaVF1Dsx1ys3xomgZOANXx4fPZvLahGUWWuO/S2UiSxHGTiokHLSrzQyQlieseXYfm+THvbG5GQkKR5UyhuQWTy5FVFb+pEo/6icTCXH/mFPZ1izn8xsZmfvjoOpKpNMGghaKq9Gkmo8uirN4hfs+okjDz6/KoiTuMHZnHyDLxDDfvaacgYLKnN80dT66jtDDM+VOLmVEaprs7SaEH7kV8BjedNR7d1NG9feXt7W28t7cDW1dIpWFvey/PXzmT4RURFtXGWVgbY1jUxtFk2jp6sXWFvJAAiAY3jbBtDdvWeOa9Ru56aWj18XxPqeX6bTTXh2qKv1XHZcncEUyeNhxkhd5UGp+t8devTsusX5uaBtq3vLKxmfaeJOf9+i3e8/zdHS3dXPeXtcweFscwDTTHQVYUJldGqS0O8T9PCb4qlU6zaEScgpCF31Spin10Ks6/m/03EPwn2BdmlnHaxEPLNVNtW1Slsw5NdgSwsDYLzXUxAkF+eemR/Opr8zhuRhVH3/AEp8yuYee+Xu56UtD0Po/WriuNELB1fvPCJl5d34BjqPzkqfdp9xDheSPirNvdzu3PbOThd0XycH15mBMnF2DpCooi4xoKF04bKvWQJInvfOFwfnXF/EO+/n+UjSqJoFo2quOSNwgtv/nMiXzt+DEcP7n4n3Ytd31+GhtvX5K5/2fNqRDsmyMqgsmywnlzq0ghpEbfOGEko0uG5lRl+U2Kc4JYro2qaQwvEOjplKqPz4T0W8g1yfaCHUlRGV0SYXJ1nBGDEv/7kCj15CeJngS23+HVTftIpfq46YxxTK+J4XqMr2WbpCSFqrwg2z1E7tbT6pAkWLunC83npyRnwIFWZJm60jCfmVfFt04ZR1dvirQE+YMkVuccNjDGFtcPqqz7IXbjudOZObYU23PONFXh6evno6gKKUUnPy8qZDSKTJ8kM602T/zt5Zupmsb7u9tobu4mNx6gpjCcyTNt7ewlz9VZs7ud97a3EPCbXHXvW/R3muiXpqU8luRHT2/gmgdE+4xJZRGCsSwC+XF+fPFs5k8uzzjdIFiYry0eQXNPGstUufmE4WiazKbGTqbWxNA0DcWyOf+YOs6f7+V6etKtLGeoTLsr5ZVPN1X6+tI0diXQFAnXFpLUY6ZV8seXNlMY8w9x8seUhkilRMXYZKovw6rsby2dItD/5tE1PHHRFEo8Gd4/2haOL0ZWVQqyB8bRqJJPPgcOxvwXxQMZWWi/+SIil7L/8/VlBzqtH8cifpNYRMjJHGeAQSuN+zh95sEdfkmS2PLjY7n51DHUlQiH1jA/eq+46ezJjC6LMqEylgk2ASxL5+pl45BVlQde34aqqeimhm71N6aX0AwN3RgYW7Iikx0wqcr1oXqKBckwBcMuiTlVET+0sXDKnGo0x8X2uwRCvsz91TWFry2oQpZlNF0UoNFcH6rjoigKX18ykqDfzMy1JFKmiI8sS+QETL44e+AeSp5stZ+pGV4cQVI1Ao7B5tYeJlZmoasyl/3mbRRV4e7nN/PW5hZMU0WSJc47ooqQa/Lmln2Z5z+2LDJk7HzQPPmXN1MUgvveuZOp3o+xy404yOqAbHnhuIIMmFGY5fDI5TO57sSR+5/xY9vCiaWUZPu5bMmBYHlNfpAdd5/E49ccwR+/NvsgRwvLCdmZ51Hr7V+JvoHnIysqsaDJXA+w6k30ibXeU+vMuupRbntkNRLQ7a1taSSOrc/LME6appBIpWnuSGAYKolUWihaNJW8mJ9Jw7KRVJGTmFQMxlVl88BrW5hXl0/EC2pOnVHOutuOp7uPDIiXFRC58TAAbiimUHvsaUuiqzI33L8aXZFp70mRkgZAvo6eFCXFYepH5xGPuiIQUlVW7e1lzY59/Oi8STz7zQW8s6WFDbvaGFOaxRHTK/nBuRPp6Ely+W3P8dTrW+jqSTF7XCHVMYe6bJc0UDDIb+pK9nn3QVzn797cQW8qzawKsQ7ubO0mLYn1Ie7TOW1cPo+8vYu1W/ahSDA818fiOpGz2tg50MJCVeVMikFHYiAdxFBl7n9LkBaWLYqAKYos5OeKwn0vb+HZF9d7z1amOjdAYcQWaQ0njmDbIFZyc2Mnf35rJ8vfb+aah9awtbmL0+5ezn1v7OCi379LJOQgKzKarlEQcXhzy0BfZ1mWKMty+Pmpo7nvvHGURf85+9w/0/4bCP6LmaLpqKZJdvyDpRb7Wyzox3Zs0n1pFlz/V2zb5Nrfv8tzq3bzxDs72NUyQJEfO1Hkh02oiPLku7u45Jdv8ODy7VTk+rjnRdHE/vSpRcwYHuPkn73OT5/dlDl27e4OtjR1ZWRA/g/QSl84t4oFoz9ZhcBP0ypz/ciZnJ6BDWFSdYwrl4z+u6WfH8dsQyXsG3DYxldERY8fz1mXZJkTbnmek/9HNDQ9bmIBs2rjqJqK5VioqigKkxMccH6uXTKSHbcf+6FswKHYYaMFcyqrCqO84LMq149qO+TnZtHV0Y3p8/P8rSdQkRfkrPkjyfGJZz+yKEx2wOKixaMwLZ3fPL+RVVtbqMoLsHlvB7ahUhrzMa4ySlObyCes30/CV3PxQ/T0yXT09rMUErkhiy8fWck1i4fxtaOqePyr03j4y1OpzT+0efHFo0fy8DeGghEji0LkREXQs2prC5IsoRsa3b0pbrhvpWC6LRVdU9B0jbu/MJ1kqo9oXpTHblhMs8eAnXzL3/jR75bz/Ftbae9KEHJNOntSZPkMJlVFD2A0V+8aaNY8wivi0tGR4IX3m9jb0YsiSximiqH1S1s0WjoTlMUdRuUF8Fka2xo7ueOZTQRdQ7DDkQBtPd798jbmmMfCxP0Gt54yinRaOKejCwKossTLm1sYluenuTuJ7vPx8Nu7KY657O1IcMaM0sw19kt/AHbuG+hD98aWZhb96GUa2sVrzZ29SIDvEzBxf4+VZPv53gUz2dWR0Q4yofKTB4IHs5GlBwZ5h1WL/MiQa/D2d+Zz3IS/P+/5qRuPYU59MY9cfQSmpnDC5GLevPWYD80/1FUhVyuKOhiWgaYfegBytqcwMV3hzCTSElf/9m0uvPM1ESyZGtceN5zJFRGB/psaui6KtliOie2zsByTqN+gOteXcTp7PABRN0ROVeEh5i1ftriWY6eV85PzJxILWplOIY6lYesqIwtF8N1fAEJWFFxb4/zZ5ai6qDBsuyaqF9xKsoSmKTz0hUmcNnko+FpSmY8TCaG5PoryY0iShM9n8IWfv8niH77MtUtGkvLQnPW7BWtgmgq6rnDnM5tYetsrgvFUZH549jh69uuFHXKHAjH/Lnb9GZMZXhQ+qLrGb2kYji1kj5rO1UuGsnqji8NDKp9+UvPbOqvuOI05dZ+sONv+NmN4NoYlJOPT64qoqchFVhSiPpOSqJNJb5FlwcL358TvbO5EN3U6vbU+FjCYMmhtkQaBdpIkM39UNpcvrEZVFVq6Ujzx7m4hUfQ+19OXZm9rN2fOrjjgGhtae1BVhW8trOawqihjR+Zy3NQSkVIhgek4qI6Pd7c0U5wTRJIktjV0kE7DDx5eNeRco0vDqLpKWdxFlmWyvKDzCwuGc8qMcv76zk6Wfu95Dr/+KSoiFhubunhyeyez6oSf9s7qXbiOzjePHoamyBmfMT6oANA+L39YliXiPj3jBx5elUU6nWZDQycvvC/6LTq6wgvrBaOeSqdp7UxQFLE5fXIhn5tdiqzIXDFv6D35/vG1GWUMgOQFhzkBA02RqRqWx6RJZUwfO7Du9nqBo6aqjCsPI8sSj311GkePzWVjg5jDErBrXzdveMHdmu2tHPPDl2lu70VVJDp7UowuiWDZFrqhs35P+5DKr5IkUZJlIw8CAf/T7L+B4L+YlZZEcX02T395wsc6zh+0sbzqnZ/9yYuZ159btZt3Ng3oo69aMprPLxjG144dwR9f2QLAd06p4/BRA0zL75Zv5/evi2axVy+sZt6IOKapkuxLs3JHG7IsocgSUedfWyutqwqnzSznf86Z+NEf/iebqsgZOQiIDWZwblzYNSjIcjBMUfLasAwKIjYjB8nRRh2kTPknsfn1RaiWYKJr8sX5RxSGkFWVPe0CudvZ0snYijjv3H4KNy0bS0XYIjdsE/YZXPSL5cjJJK7fZsPOVna1dKFoKp09CUpiwuH8xnG1ohrhsBw+e0RN5rtbOnrp7Enx+5e3sHZHa+ZelEYdPjOrlGWeM1cSdaiI//2SjAWDKu8KtkEjGLQIuzqKrjE8P0DQc+je39NJTsRlx952Rl90H5Wf/QM7mjp5cc0eADZubAAgYGrIEjx/3VwuWjRsSBN6SQJrkEQz4uoZ9POt95vZ1dKNY6koisxxXnuFB98UKOg4ryhDadyhJ9HHA2/uxO/Jx19eu5tfPr0eWZYIBizGjM6nzCue8NsLJ2IYKqlUmpIsm9o8P929KVbuaGVSeQTFKz4SD1osmlhCTzLNonEFfGNZHbZr0TYoT2zdnoFqwt9+dD3bm7r4vieZ2dPa84FVXP/Rdu7hVVwwvwbN9aE5DrWFn6wi6QdZfUUMWdNQ7QH0t7/wSG7EJuo3PxWnoCzHz/2XH86Y0ghb7jiRn35u6iEfq8gSP/vMBJ686oNZkv1tZm0Ouq6RkhQ0nz/DRje09RAKWjx56WGcPKmQz80pzzBx/dafiwqgKTK5QQtVUwkMau8jyTJ+Sz0kaSiAa2rc/dlJHDM2nx+eNhrdy5fN8sCtn545hjOmFaFqA8FuLGjzuZ+/QVdvSvhrkoSiyqK4jWl8oEQzO+xi+1wkWeaJd3agqAr7uvsEW76vh0dWNzDJc/otWyMQMEil0kMCHUmSsFyTxePy2dzYOdhfJHoI+cv/inbJ8WN4/UcnH/Q9SZIozvaj2Q5+1yQndGgB/v+3jSoOoXgAcJ+q05UU630sKOZteW5A5MAOAlGKK3K44Ng6LK9wlK7JlMV8ZA/Kd5W9tVPVFBJ9aWYNi1EaE2wSQF8aMVY9JcG7W1o5ZkIRM2pzeOLdnXzp569zyS+Xs7e1m32dvei6wriiIJt2tdHY0ZthAyVJwrRNjqgvYtNPl9DR28ecEdnIHhPY2png8wuGkRfzYZo6Nx87nJ8sqWWR19d0RIkAsvoLaP3mhU2Z3/C3N7YRslQ0TWH2pFIevmourqlx8cLhqLLM2T9+mSvvfRuA+1c18PR7IqDb59UwUGSZ08aJvWpedRYX3PMWbW09/O/yHdz58lb6+tLc+th7XPTbd4c8k9aeJOf979s8vHIP2X6D2VVZ/PXCAd+sNtfHWZMKsSwV1xUS1M8cVsykyiz6AN3QMAyN3KyhfoAsy8iawrSqgTYpd7y0hbe9/MVYwKClM8G6QYBsv50ySfgXxXEfpi1yfb//9Pt0dCeQZQm/T8c1lAPUNv9p9t9A8F/M3rh+HttvP45Q8NAZQYCCHJ/IpdBUWjw06/BRufxtxU5WbGnmuEnF/P4rs4gGLK5fNpbr/rSSB5dv55hx+SybVsLbW/dlziVJEuv3dHDpkZWMyPfzflNXBkEDgQj5LI2Cj1Gk5P/LfnDuJM6YdSAa969g7uBKhbLM4SOzWVSfz3keal+wH6peHHMIeInKi+rzsT8GE/BhNrokgqxp1JdnZRy94v108B0dPewcxCyv3NrMsIIg25s6+e2Lm7n5gdXEBy3QTYk+Er0pFFXhsz9/g/yIg6ErbG/u5rN3vMqMa/7Kebe/wpJbn8scc+dT68nyHOzqnE+e9/hhtmhsHqom5F66qZMXskimJQrzgqiqwpzhcWIBC1mRefjN7VQVBNm3r4v27iSJVB81F/4RgBtOFb0kVVWmuzfF6OIQfktjnZd/0D9f0mnoTvThN1WuWFBF2NGxLJV40KSlI0FXTyrTF7Gf6dngbVgzq6Jcff/qIeNA0xRkReaFd3bw9qYmTMsgHjA5bVweT60QEm7XVFm/t52+vjTDc33MGy7yJHt6UwwrDDCuPAvTMtEtk7v+9j5TamLc9MwmfvvmTiRZomuQPOf9Qa0jROo8vLaxmbtf3ExzR4Lw/+MGObUm2wNQ5CE5wJ+GjSgU7T1kVUV1XOZMruLJt7cjyzJl/6DcZ0NThuQQHootGJNH9YcU6tjfTE2hujCUcTb7A0FFVfjfCyZR5OUE1ZeEWDAqm6PGDLQf2D/wzQ6YQmqNxP1Xzc0UGppSmfWJguTRRSF0Q1TALfSC7ohrcOqUIhRVyTjbO1q6+etKAcYYnuNsmppoK6AohD5gTI4sCmLZouqpaajoho4ky1y2sJqimMObG5v51YWTsF2TkKsT8XpS9o97WZZwHI10Gh5+exfPr20QDL7hASv+f89A8KOsHxyM/p3Kk3+mHTYszhfnV6MoCqu3ttDU1o0kS5mg7u7PTOCW08cS8ZsoiowiS9QVhfj1y1tJJpIcO6GA3FiAkrhLYUSw4LqpoygylmMQyxK52xPKwhRFbCFdVmWmDhMqHtU0sR2TrhQcPaGQVF+aC+58nd+9tIXfvLCZ/3l4Lek0xIImv3t1G0+8s5vmlm72tA4oMBRVJpGG3mQfLZ0JptfEyAqaRKMBjhxbwBUnjCKW5cMyNSxNIeYzyA9a6LqCaeqsu+14ynP8NLR2s2rbPhaOzeNLC6r54ytbmV8eQupJsLulmyk1cbbcsYSLFw7n3hc28chbO1i9vRVZgvZUml+/sZNfvradlk4BECZTfby1uYWfLR2Blk6zvaV7SGG0ptbuIYVa+u25DU1saOikqTPBiFyhzNkfZIn5DLHHeetgPGjxxNoGJEnk3G/e0crejh5s18Z2bHyuKSqnhyymVES4791d/Oq1bfzmlW20D5L3dnQnh9zbLJ/O+TNKqM0Va7ltqpniWACJVBq/rVEcsSmNOP+xTGC//ZuK2v9r+9uZU4q4blc7id4EyYSYsGfMqmDZrc8gSXDB/BpREtmzl9c1YKgylx8rKpmt8NATGGDFf/biZm57QfQTHOygaKqMpavU5f5ji8H8p1skYNO4R6BtsqLwpQU1jBmUF1gQGapFL4w4jC4O4bM0Fow5tFy5Q7GKXD/fPXM8R9UPSC6UQQu0pmskehM88sZ2zppVzq7mTlZsaWbhuEIeeXNH5nMjCoO0N7dzVF0Of3pnLwDv7elkQ2M3Uyp24loajfu6eXD5dgxVHtK7EgTDNKsun1c2NFH+KbB/B7Ph+QFs1yaVTKFqCt8+cQSn//Q13tvZjizBcfW5vLS+kVW6yurtrVTmDH0G04dnM60mzueOrCEn5uOVba3c+/g6zvRyujbsEUGcqsqkUgMB1fWLhzGlPMLGho6MdG631/S6PzeksT1BbthiW0MnlqnS1NHDQ+/sAiAUMunoSNDYmcAwDHq6uzl+cjF/eXsPm/d2csUg9PXiP67MsJIVcR8FYZuaHB+bmzr5zfIdVGa7vGob5ERsjh1fgOSavLW9lR5Pmts9qMjG1qYunlvfwAvrm9jhXe+e1h5++KRgBXODH10d8h9lI4sH5krhp5yfWO4BEdkhm13Nnby0rpFEbwJN1zI9FP9d7Yi6PN5ZLwIpTZXx+wy+uWQUIwsGAkpdlfneKaN5ds1e/vjiJiRJ4rbT6+hKpFC98Rr1C7VCMg1/fGmLJ4mTmD8y/omuS5ElwbT0pocUwCqM2GiaYPzS6XSm0inA8Hw/y99vJunJ1JAYwuAMtpIsR7SbsA1SyT40D3h5b087w/P8bN7TwX1v7ESSZSxNpifRR8BSafLk+7atZebVV+59BxDz3DBUDEMl6vvPDASXTi3hode2cNaHtL76V7SFY/O4+f53afMkjZquZSpsF0cdiqMOtz++joYWkzW3HsWGhi7Ouns56TTU5Pl5bHUjuUGTuN/MVIiVJMiLuJRFHTY1dpIdMEmk+pAVCV/QIakoGblpMq0gSzBjeA73L99GW1eCnLwAe3e1ctfTGwBwLZ2b/rI2c839snthEpv2dPDCWqE8Kc/xURb3sWJHK8fNquKEO5ezvbErU3cAIMvVCfhNlm9oIuIz2NLQwcQrHgPg5CnFnDO7nJ8+8R43/nkl725pIT/bx7ULKkVxGuCe5zYysjDIHy6ZxuUPrKU8x0fM1XliXWOmn2zTvm7ueG4TS8fn8epGIQWVJYj7dHa19tDRmWDhqGyqs328vW0fzR29vLapBddUCTs6O1t7GDZoDb158TB0z9+I7ievRqX/pgAAIABJREFU3tQovlOWJRLevrSloQtNV0kmUvShIEkwrjzCk+saueOFLfT1pWltH5Cx7usWAWFfX5pbltSyp72X48fkIklSJt2jqSvBL86r57xfvJkJ+obl+NjY3MXIvP98P/e/jOB/iC0bX0BWWGx04ZDDhJpsbnpoLRfOr+HGU+sZW5bFn1/dym2PraO1K8Hmhg6+ML+K/LBNc0cv+7oS1JcIVL0/6EukwTVUVEUeglbrukxRxKQ66z9z4/tnWTRgohgGpiEqEcb262WYu19PqqjfwGdpLKrPzwQPn4ZJksQ5h1eRvZ/sJxK0UTUVTRcbzd9WiqDkwde2kk7D0eMLeeJd8ZosQWXcxQ77mDg8j57uJLGAmUHxn1nbkOlFFPIZPP2Nw7nsmOFEB6HokiTkI8AH9uP6ey3k6MTCDpqhEfCZTCgL87UFVQAsmVBA0NYpizmomsqCsXms3TEgJ/n+hdN58IrD+eqxojjC4rH5XDi1mN5kH2Ueg7rF27gG99cD0SMJyDBopYMSztt7xW9+Zl0DWn/5+rjDQ+/sznzGNBRcV1ROVFWF4pIY1V5F1MGSYstSeb+xk/e8gLQ/sX1GVRZdPSk2N4ogUzd13KiPUMzHq5uaWfVeI7s8NrM7kco4te/t6eC6h9by+9e209mTIuY3WDCoOmhJ9P9PKpY/iCn9tBFbRZbZ/fOT+e5ZQqKf6BXOhKqqDPsYDNy/oh0xaqCkfCxosfo78zlu/MFzuiuyfViOieOaTK+Osqgul/ne8ZoiZ2TUf3xF5JfLskzd38HO2o6B5ZiMKBhakTXiKQVEQSeJgN/AtjXmjxJjsT84TPelPzA/cdnEfFxTMIEl2T78QRtNlXl89V6e9/KZfvBX4aDv2tdDW3eSkycUEHR0/H4D01QyIGkilWZYYQBzkAw19h8aCC4cV8hL3z6KC+fXfPSH/4WsIsePpg6ABrIsk7ffvhL2GUiSxFd//RZrt7VQ6wEQNz8qipDkh60hIPjY4hDbmrp4eUMT0zwZsabIxPwGqb4063a1ZXpLglDbPLG2gUvueRNJgmWTi9C8tkqaKrNyUwsdPSlMTSaVStPgBTD9bYP2tAoFTcjRGVUUoirHR29Pknte28Ybb22no7M3MwdBFAwLBi06epK8sbGZh5Zvz7xXnRf4v/buOz6O6lrg+O/ObN/VatV7tSQXuVeMbWyaDaYHQk3ogRASXvLSywu8QF56SHmEBBISAoGE8EJNAgESIBjbYDDFuPduy+pWWW25748ZrVa2bMvIki3pfD8ffyztjqQre3Znzr3nnoPP7eC8KYW8b++Xa26N0GRvBahv6eC9rQ2cOT6PgMeJdhhk+p1cPimPs0dnEu2sZ2D/c7y5qZ4VO5oJehzENdw2pzRRMK0yJ8AlU/JZsq2R1bWtjMgNMDY/yBmjrPTN0UkTPVOKQ4m98zlJryFDWb37oPvEtAImjM2jPKnSe3aaj3v+vZldNS3ssa/BX11Yhc/nxOUy+eS8Mn52xXjmjszio1MKEteLNL8Lp2mwrb6NCcUh3O6u17Pf5aClI0b5ABVBO54kEBxCplVY6W3hKKzY3sz63c2cNDqXmxeMYuX2Rj7/+7e56y8r+OJDVk/AUfaK3mZ7pqepI0YwaL0xOhwGHqeJ02mS4nXicpr4/U5SUlw4DJOvnVqCzzd4UkVORPlpXky3B3/IKoiQeUBqkdM0+K+LxzLWnqnP6kVvrmPpof84hftumcXH5pRhGAZPLd7E469v4r5/rKa6KERpTgpL11mzlXEN2X4nkZjmyeU7iYSjjLTPr5kV6SxZX0uG/SbfGolx5f3LqCoKsey7Z/O5863S/SNyUtjd2G7NLvbj73rqmGxcbidV9oXk2jklPPO5Wfz3RdY4zrJXNBxuJy6HQU5uKpnZQX735o6DvtfmGivgCgVc/OWdXeyyV80MQ5Gb4cXvd/LHm6YlCqqkeByYhiI71c1H7H5FMQ2mfWHa19JBYY6fM8bk8Mamej4xpxSAyyYXcNY4q29fVtBNuD3Ks+/usX+W9TY+rTyNdPtGp3PlotwO1OZWZaKBSCRG2L6gFwQ9/HtjPdGkPYEA0bgmN9WNz2Wyfs9+apu70n5SPA6+dcFozrFvwKcWH9uUzKOhlOKem2by2BdP7Zfv73GZjEwK+kaVZ2GYRqKk+mA1uiA1sU+0OCtw2CA6N9VN0OugLMuP23Hw7UJVfhBfStdrNdXv7NNrtzTD6s9XXdh9Fr4sKVU94HPywudn8ZvrJjOlNI1AwJW4WXe6HJQe4v8nxeNkYnEIwzSIeV2EI1ZrgE/NKyM76CHF72Sf3WIp3e9iblUmZ43NYZS9etHREe/+b2V2/3fLSx2agSDAmKK0QZce53IYFGQE7P5yVjBxYGGrcvu8euatHXz+929TErKCqs7zKS/U/R5nit2+KByNM29015608UWp6LgmYDeg75ycazMM7nrqA8LtUW47bww3zynFb08G+uyJ0ZMrM4grRTyuE+/bDofVkuTWc8dwzdxy/vjZ2aT5XZRlB4hE4zQkbdPISAqefC6TvEw/SsGXHrbu9wDKcwJMHWFlUNx+yThuXVCFy2HQ2trB4s1WUPj6GquX4ZzR2bRHYt2ap58zOisRCHZmBDy8ZBttkVgie2dvU5i4fYzbYfDwm1YrCKUUNa0RCkIerplRxD2XjmPkITJ+MpOC2mtPsjKUPA6j28S327SKqs0enY1hGFYhKdNgx97uewAL030Y9r7mnJCHaaVp3P3PDey1X+M1zWEeWLyVnJCHzTUtrLJbh3jtyZ1Fm6zaGuW9LHw1mEkgOITcPLMQl7t7tu97Wxp44o1tnHHnS7Tbe3+esWeJRtpN1rfageAO+83F7Tbxeh14XCapPqf1x+tM7CtxOhQpEgT2WZld3KO5LYLbaeLpoWrbLfOr+NtXT2XRnfPJGOBiBLNGZnHxScVMLEvD6baaJd/yy9dZu7OJb1w6kddW7yUcjSeqFm6wexYt21hHXGsy7RvCzy6opD0Sp9VOPTQNg7JMH994ciXLNjfw6Fs78Xid/PS6KTS3RUn1OY/piueB/uuC0cysSOc6uzm6UopR+SmJi//EohBOh8H725qozAvicDgoLrMu+vvD3YOmzfYq2q8Wb+Pnr2xiX3M4cfFpjcQxTaPbPjpDKUI+J7UtEb576Tg+dWaFHdx1let/7ObpfLCziZIMHzeeUkJO0M2exnZmjUjH6TRwu500NLWz1y7k0nnTkZ7iIhKJYwBVWX4K0jyJfaQV2X6mlISIRuLU2LPOl4zP4X/OqaKxOcz8sV3pfFpDfsjL9y+pTtyYXDgpD6ep+Lh9cf7v80bx8I1TOT3pZuh4+Ni8ChZM6r8KxXlJqa+X2RWXiwb5DLHPbbWc8Pq8if1fh6KU4oLJ+YmVtwNNKQ1hGgaGHRRV9bEf6/9eM4nLZhRSdcCN4tiCYOI8L8rwkeZzMbU0jTF5KdwyrwzDUPhSvLg8LioO07pisn0j39QURmvrhnt+dQ7nTchN/A4AE0tC/PiycRSmeZlS0vVv1LlPcO6EHPY0hbu9Tw3VPYKD2ciCVLwBL4FggJQeKvFOKe9eZOq3/7JS3scXpbJgXA5j7QmJzgyV5N6hM0Z0fe3E4hCxuKbWrozdOTkXczjoaOlgYmkaXz5nFH63gzFl6cyeUogvYL23vG+vIsbjOrEi2Xktimr4zpUTGWf/3M6U6dbGrkCwM5PoldU1XPCT1ylJ85IW9LB2VzPpAReP/scsXvvW/ERGTsjv4usfGcsdHx1HJBrnu0+tpCUc5dcvrScr6GbR9maeXGGljqfbNQk8TpOTS0KYhqIzC/sD+3q/xq7Oee/LG9lZY338s1c28eDS7j0Bi9K8mIZizGH2/ztNg/86u4pHrp3MVdMK+cTJxfzwI9Xdik81tkXYsXc/0ysz8QY8eHxuNu1rSbRr6tScdK3e0dDOuzuaePr9PTy7Yg97msJ8+alVPPTGdmJotIZfvLwJgJNGpBMMdhV1K5VAUAwmo/LT8NovXMM08HucLN9cx1sbu6qGdgYbaX4XJVl+Vu9sZsPe/Si63nw6b2QzUlycPTKd+VUZlKR13RAFDtE2Qhyd5D1OqYcpuuEwjW4z4gOtujAVh8NBashHRzTOnDE5zBqdw+cefBu3s6vy2n0vriPSESXSGfCZJhkBF5NL05g7KpNttdbFqzI/yK2nlRPX8OlH3kUpRVpGAK0M4nGd2MfRX1J9Tn5/83TOnZjX4/NKKUqzfOxpbCcjxY1Da8blpRCPx1mzp/us4+aaFhyGotmeZIlG491uDhWQ6us+OWMoePa93Tz+1k5W727G5zL57sXVpNmv3Y01Lby/o4kZZWk4DIPSTB8b97VSmeXH5TKpD0cxDUWkI8q0qqzETH3A7aS9PUpDU5g1u/Yzu6KrBYJSiutnlxCLa97dYe0HXrSulufe3U19S4Rr55Tw2QWVVOZbF+lJRanMHJHBzXNLcZrWqsmSr83jPDstUCnF6H4q6HMiCXic+H0eXG4XP3xmFdNGZAz4hEx/MAyrmfrYoiOnuX7zwjF85sye94dV5aQQ15CemYIvxcuEXny/wxmRHeCuS8Ye1JJgVmUGXr8Hr98q/99JKcVl0wsx7VUCgOL0QweCY/KDOJ1GYr9RXpqXwjQPM0eko1GEQh6cToOqpD1MI+2b78I0LxdPycc0DdbVtBKNxslIWsEI+YZ2ZcHBaGp5GmgryMrsIXV3YlLl7eqyDBZOysd0mJwzMZ//vXpSopDXH2+dwW8/MZVTRmXxhYVVvPy1ud0mbquT9pHNqEjH4XQQSg8QdJo0tXRwfVJf0IrsADVtUaJJbXpcdtCxq6EdpUisvu9p6uqFBzBrVBYep8GOfV3N0rVdzOjRxdtYuaOJNVsb8dh9QO+9cTpzx/S8Z3dedQ4Bj4O1m2r52p/eY+n6WiaNzOaDPft5ca2VKp2WdK/XGo4mJmM6m8pnBKwCOqapEo3agW5tIDovhwW93E9+WlUmeakeDKW4cloh1Xkp5ATdpNsFnDr5PA7+/JmT+N+rJ7Kz0fp3+q9zRyaer7X3/+WkuNje0MZKuybBQ29s5/LfvsWm2lZG5QRoiVjX7FV2YFsc8jI5KTV90PYHPQoSCA4xAZ8Lj8+Ny+MiHNO8s7meldu7KoKeO8UqMjKpLI3lWxo47+5F3PvPjbicXRdSpRSGUlRnB7h0ciFXTClgTI4fn9uBz+0g4B76L4yBMCWpV9mJXJGtKj+IAmIYTB6RyQ+unc47m+upbQ6D00Gqz4Xba11kq7P95AVd+Nwm+/aHE43hbz61nA4Nqel+5o3N4eQRGXz/kupuP2ftnmbicd1vFUOPxhfOrsIwFB/sbGZfU5jHX1rH5g21LNpYR3skRod9Ed9cs58Uvwuv0+Tq6YW4DEXyvKTPZeIwur/NXm2XrF6ysY5Vu5oZmRtgXEEqD99oVSJ9aXUNrR0xxts9E0flpLBqVzOX3PsGHpdJXCse+NRMlnx7ARlBD4ahcDkNXt3UNeHTEYsz364W2mlCYSqGslb8UnxOfvfvLdzz4gZOH5PNtPJ0bjm9nLpwFIehEit9N51SxmtfOWVIBD8fmmkm9slef4hG74NN503slPK+pfaOK0pFKWhri6KUSqy4HWszKzLsPoIGo/O7vz+kep2E7FZGhqHIOUyKZmVOAJfLpDjDR3rQxUnl6SilGFsQJNXrwO0w8Hqd3fYFjSsMYij4zzMrEnuZWlsjaG2tFjzyiancdno5zmPQT08cWzMruwrkFfawslOe5cfj9+D2udlc28Z504rx+NzdJhvAWnWbXZWJy2Fw86nlB7VHmVKWxmUzCvnldZMZZ6cf56f52bS9kY+eVNyt52i1fX3TcY1hKG46pYzZdlP2rbWtVjVS+3VZY6cx/n7xVi6//00MpfjM2SPxuU2uP8OanJk72nqf7yxUtr22lYKCVO66ahJzRne/BiQrzQrw7g8W4vc6+fOizZiGwvC7ObUiPdEaLJQ0iVnfGkkEdU6nwbWzrFYP1ufds5k+MiGPb50zkv88rZyT7VXXgtCHv8fJDbqJGooOBeV2S6p7Xt0MpsGEkjQaWyPkhTwssDNbppWG+NVrW3CaisrsADsa2vlgd/NB3/eqadb9cNDnTFzTH39nZyKIvGLKsSvKdyKTd64hJi/dZ5fRNnA4TVrDMZbaG+EBPnNWFekBF59aMJIH7Gbxk0tD3VJK3U6DoM/JhWO70r5OrcjA7TRxO02K0o5fpcChpCDpwlSee/yDn0PxuRyUZQdwmgYzxxcxujDEcrtamGkYXDenFIfTZFRBkOaWMJleJyNyUnh7S2MifWZqWRqZKS7G5Af59GnWzfTpo7OZXpZGR0eMaDTOD5+3NuiP72XT+P502uhsRuan0JpUQTMSifHEO7u46meLmPSlv7FyeyMrtjVgOk0KU9zc94/1NLdFMU3FKfaFvaUjdtD3vnJGEfPHZLNqVzNrdu9nlP1/nx1043Ea/N9bViXWCUXWTeeZSQFdZ3W1mLZ6TW7a14rbbZKdE+hWoGZ0XgoTDvh39LpMqnICRCJxivMCPHLLdH76sQn8/OqJADS0RohE4kwsTu2WznpgIDvcjLH/Hd/8zllcMK3/0lAH0qO3zeKcyflU9bEVRkVOgG9fMjbx+bHo9dkTj9PEZ7dpmFRycLBZZb+GnKY67ERlbtBNRU6AhkiMKIo59uvUaRp8YUElbfbrNTnNM83n4s1vnMrckZmJ/YfxuKYgzcOXFlQyMjeFa04uOTa/qDimqpNWdg5siQRWpVrTNHA4rAqftz/xgXXsURbB8jhN7rpkLKePySbD3rO6Ze9+wtE4V84u7ba/8qOT8vj03DL8Lqv40HWzi6kuSMXpNAj5nHjcZqKJfef+7J/9Yz1vra3l6098wGcXjuKDH53LXR8dz1s/WEhtcwcX/uR1Nu9rJeBx0NgaQSlFatJe3R0N7Xz3xQ08t6qm27i9LgdXzLOux8U5KRiGwakV6Xx6dgmj0j1cdc9SLvjJ6/zHw++wbFN94vcwDMWO5jBN4RhzKzMSgWGnwpCXORUZnDcul6/Or+T2hVUU9yHTp9x+3SmlSLWD1GgszuJN9Ty2fCeRaJwJBUHcDoM/3zKdOjstNBLTFIQ8bKlr480tDYlA9qIJudx9cXUiOE1eaVRKsaWujXPH5nDT7OHxuh7eV/ghaFZFOm63icfjwHQYiSqEf/jMyfzu1pkow2DFj87l4cXbeGHFHs4cm8NPrpqIMqwCMUpZvZlSfS7KsrqCk/w0H+kBF2l+Jx+b2PN+EXF0kmeQp1VkHubI429mVSbhjijL7VWnRWtrUErxtfNHJTbNV+WnsnxzPa+vrcHlNInFNR+Zas2oKaXID3mJROP8ZdkO/rlyL20dMX586Vja26O0tkbosNMrK0+Q8vyfmFuWqHraad2avSxeu4/6lg6+/ug71v5ah8GqrQ3dKr6dWX3omViAUXkB9jSFCUfjiUDQUIqpdqpSSYaPfPsiNSo3kLhQdaYQfbCjiVhcs6uhPREAdu7nu+30cu6/ZlKPxR2m25WBd9e3M6UsjYUT8hLn4dY6a69w4SDoDzqQnvziKaz88bkUpA+dvSLTKzO5/+aTDkrB/DDOT+o1eKiKncdCrp1aNqn44ECwc8LpUM3kOymlEk23wdoP1OnspH2yh9rvl5tU2fmnl48/aOVInFhSvE4WTrLOz/njek6R/MMt0/nxleP5wRXjaQnHMA1FUR9e69kpbpTdUD4r6GbqCGuyYXtdK0s31NLQGuHiiXm0d8SsyXWHSV7Ig9fr5JTqbIJ+F4UhL0pBY2uEWFwTsVernl62k10N7bjtFbhMv4vfvLKJ+pYOqguCXDfHClyCLpON+1rRWvPK+lp+vXgrb2xp4Pdvbu/WJxbgspOKmTaxgPLyTC6dmEt+qoeCkIf125vY3diOAl5bu4+2jlhS5VDFUrvIzHljrSJmyUWi0pNTOF0m8yr7dn8zImmFvnPrRUtbhB0Nbby2vo5YXDOxOMRNj7zLZ/68gpqWCLlBNzfMLGZqUQi3wyAr4OJP10/lwY9P5LZ55UwsTKUg1YOCbmnDnZfNA1tZDGWS4zfEXD89l8fe2G73XYnhT/Hw8M3T+e5f17J8i/XCffHLp/CWvaLzpYVViRtAl8vE4XAS9Dk5szL9oO89vzINlKI8+8RdvRpsPD4P8Vg8sRn8RPXlC8bw2JKtrN7eQCyuWb2jCdM0uGJmMdFYnBSPg9fW16G11X65pSNGVW4g0VYBID/Nw3Pv7eFbT64CIM3vZFbSBaLzlvRECQTPrM62Ggi7HbSFo1w0vYgn3rDK5AdS3ImVdtM02NMYJtXrxOsyadWaifZeqYxD7P2sTurBmbzXbm5VJq+tq6Uss3trhKduPYkHXtvCbxdtwTQV72xtYFdDGzE7vUhrTWWWn/e2NnLBxHy8PRQeArhlXjnPfbCXffutljFpSfuatthNgPMGuDrtiS7gkT3Rh+N2GEwoTmXrvtZjElgeylfPHcWra/fhdR182zK5JITbbfaqx+MlU/JBQ3VBSrfVQ6UUP7tiPH9Yso3sQwSCWSlu7r5sHGWZvj4FC2Lg/OLG6bz0/i5Oq+55Ant6UsGYd7Y0sGFvS2LC7cPIC3mszeHAOZMLMO3356t/9Sbb6tpQCp64bSat4SjZ9uRG5/m2oaYFn8vaW6+Uoq0jxsNvbKMtHCUz6GZfU5i7X1jPDy4dB8CbG+tpaovyvUvHUZDpSxQ8CblNFm9u4PNPrmKz/b6e6Xexr6WDV9fXsSCpyNfIbD/VxWlML05l/siu6/HepnZmjEjn1zdMpXZ/B6d8+2UcTqt9y0UTcnni3d0UhTxMLEwl4Da597Utia9NO8Z1JJIDwTX2XsSWtiivr6slEo1b+yYb29lgFz4E+NlHx5Jlb2l44hPTiGtt7/nsuua5HAbZKS5CPgd+r4O2cCwxgXqoa/dQJIHgEBMK+BlfmobLYfBy0y7C4RgPLtqaCAIB/veF9dS1dPC180aRG/KwtLMpqD3TEvI6uKqHFKhLJw+NtKgTiWmamKZJ9QmQDnk4mUEPVXkprNzRxIsrdlPbHKYwO4DXZQIm918/hcvuWZI4fldzB9dMzO/2PTpXuFK9Ti6ZXsBvXtnMs+/sSjzvMhQZGb4TpuhCZzqadnp59pbpjC5I5dNnV/H8ij385b3drG8O4zCtPYEK+MeX5/Dw4q08vGQ72UE3f/nUDPzungOyKSUhbpxTwvvbmyjJ7FqBm1+dzZKNddx6anm3470uk0nFqTywyJqw6bxhAYhE4jQ3d/CenUaU6j3027rLYVCZE2BvUy21LVYgGInF+euKPSxab6329lcPRzF0PXLLDLTWRz6wD+aNymLeqJ6r1I7KTcHtdjCyF1VLfS4HV59c3ONzsyoymJVUZKknp1Sd2NkbojuXw+DsSb3b6/XNC8f0+eflpXpwOEyiHdHEnuJ3tzWyra4Nl8ugoyPOQ69vJa672iSVZ/pxmoptdW2UZfrI8FvppaaC37y+Fa1hwbgcHl28LbEXEGDZpjqUgllVmXzzr2t4d6dVCMxp38t1BoEA+T4HHeEof125l/mjMhMBj8NQfHth5UEZJHubwokWYxkBFz+/bjJf+fMKlKE4f1wu6T4XcyszMA3FvMpMnl2xB7d9vQv5jm0gmPz9rOr11l73SDROezjG6PwUnlqxp9vXZCYFcl5Xz9dhgGklabywuoZvnjuKu55bS4bfqurtcQ6fhEkJBIeg+y63Gl5PWbmXcDjG397d3e35J9+29iAFPA5O+f6/idkpZS6HQTSuyeyhzLLoHz/4+CT++NoWMoMn/irMyVVZfLCtkVvufxOA+RO6Ar2JJSFSvE5Oqczg5MoMvvf3dcw+4IapM7VrTEEKX1w4krGFqXzuD+8mnq9vifDgTdMG4Dfpveygm017WhJlu0fnpzI6P5VNjR1U5aZQkeHj4UVbGVsYJN3vYndTmNxUNw7DoOQwaXJKKW6ZV37Q4wG3g+8n7blKNr7IKvbicpo0tkW550Wr+XVyw+PO7304+fbNx/aGViqy/Dy3ci8/fXkTETtlqFRWOsRR6ssKyrEQ8jm5akYhJx8hiBOiv2UH3ZgOk2mjc6jITWHj3v1cd/8yAKaNSOf11fv4P7snbWfGTHbQzX1XT+KFlXs5Y3Q2aX4nhqFoC8fQdrXtmSPSeertXextDCd+1p7GMH63g3v/vZntDW12xXdIdZncMrsEtObpFXtZsbWBP6y29geOrsxg7d6Wbr38DrxmxOKamuYwWUkpk+FonLjWOJQiJ+jmY9O7FgaumlbAi2tq6Awp0o5xIAjw5E3TqGuNcP3D7xAIuCgJevhgZxNaQ9DrgOYw00tCvGEvevS27+WF43N5dsUe3t/ZhFKKT84upSMW73M662DSp3dvpVS6UuoFpdQ6++8eS5Appa6xj1mnlLom6fEpSqn3lVLrlVI/U/b/nFLqDqXUDqXUO/afhX0Z53Dl7WFlICsp5eU3i7YkgkDTVHjdDtICbrKHUW708XbV7DKe+cq84z2MXllo7wWKRGMU5VjBXCfTUEwqDrF3fwcd9sLAhAPSXUN2ukjQa/UJPHdiHn/7wmyut/v5AYzO71vximOtONOPBv60ZBvvbu1aVR+TF2BLfTs7mzvo6IgxzU4v2tHQTn4vy2QfrYDbwajcFDwuA6/bZPmWBhymIvl6d2Al1p4U2yt+a3a3oLXmT8t20NQUpq3N2mDf3+07hOgP/zm/kpPKD97SIMRAcjtNnKairjWC1prP//F99rdHKcj08dMrJmDa6dNKWXvFO40vTOXz8yuZUJSK0zTIDnmozA3Q3hEjI+DiX2v2EfI7aWiNJL5md2M7kbjmmRV72NXYjqHA7bKuDWeOzOTMUVl87/xRhJImanbv2c+7Ow93aNQkAAAWiElEQVSuoJmsdn+YuKZbinRDm1Up1+syD9p6kBv08H83dk3i+g+zAvdhpXqdlGX4+J/zRqGUoiLHT2cSQkRryjN9fMluc3M0P78800eaz5kIINP9LhZW5/RrL+MTTV+n8b4CvKS1rgResj/vRimVDtwOzACmA7cnBYz3AjcBlfafs5K+9G6t9UT7z9/6OM5hKeB14vE47NYQ1mNNdjUll8OgZn8H5Vk+xpeG8PmcZKa4SfU5mZB/YuzREieW6SMyePDWmSz//kJe/ebppB4w6zepNMTaPftZuqGO4gzfQf13RtgzkGdWd23ar8wJcLFdwnlW5Yk3m99ZofC/n1zFJT/vSn2tzgsSjWuWb2tEa+vi/+XHV7B2936K+3FFbVJxKuFInOL8FN644zTS0r2Jmc9zxudy+mHKhXcqtwtc/HHZdr74xErWH9AbMXCIdFYhhBBHFvA4aG6Lct5PX2fFtkaKM3288IXZpHqdicl4h8Po1qD+QFkpbkYXBnngxinkZ/p4YVUNCgh3xAhHYnzh0fd4dc0+lKG4bV4Z1TkBGps6cDhNVmxvonZ/GK01zy7fyYbd+zl/Uh63njGC+qYwL6+uIRKLH/Jn722yVh2TiybV2sXQ0g6xdcOXFHz1djXuw8i1x5SdlEVV2xKhPMNHus/JFVMK+OFFvU/xVUpRle1nt/07B4dB38AD9TUQvAB40P74QeDCHo5ZALygta7TWtcDLwBnKaXygKDWerG2Nhf8/hBfLz6k/DQfLpeJx+skEHDh9zsTs1Eul0mKx8Hetiib69owlGJifgo3TCvglMqe92GI4U0pxZnj88hMcSca3iabUpqG1vD6ulrGFBxcUGh8USr//sY8LpjSfe9gZU6AOy4aw91XTei3sX9Y08u6X6gbWq2L4ZRia09nZ1PgR97YxourauiIxRM9ofpDRU6AuIam9iivb6oneVuWr5ezoJ2VD/ODHpZtbUDHdaKMPvTvRVwIIYa6kN9FazjKmh3NpAdc3H/95ER15pmVGaSkuPB6nd0KhR0o3e+kvjXCnJFd92P19mrg397bzVP2Fh+HQ3HB+FzawlZqf+dWgUXralm6oY6vP/4BbZEYo/OD3HBKKaahWLahjsm3v8T3n1vb7Wdqrbn9Lx/wk+fXAdAYjiZaqjS2RRLjOp5y7OvXH9/eyY1zSjhnXA61rREqsvwopbhpdkmiEndvVSUVtUuVQPCo5WitdwHYf/c0HV0AbEv6fLv9WIH98YGPd/q0Uuo9pdQDh0o5BVBK3aSUWqaUWlZTU3Oow4al0yrTcTo70xA6++UYZKZ5MBwGEaw3DdNQuJ0ms0pDzJY9FuJDmlDcVfBm3CGK3+T2UJFSKcVVJxeTdgJW6arOT+2Werl4fR11LR20dcT43Gnl+B1dff069Wd6Wmej63hc87sl2xItI6znercSmZPqxucy2dcUpiToIRrT3HRKKdkpbnIP04xbCCHEkWUGXIlJul9fP4XyrK5AY97ILJRSTCpOxXOI6s7W93Czrb6NxrYI2+rbOG1UFtq+Fv347+sSx3mcJo8s3cbW2s6KmRqvy+Sxpdv5wqPvJY6ryg2Q4nUyZ2Qm9Y3ttIdjPGHvVez07Du7eGTxNl5ZvQ+A3y3bwVeeWc3Oxnaa7EAwed/gge69bBw/O8Qe92MlecIzphQerxOP02BBL7JhDmV00n7J4GGKrQ1VR/yNlVIvAj3V3f16L39GT9PL+jCPg5Uyeqf9+Z3Aj4Dre/rmWuv7gPsApk6d2r9lywaZi6ozeOjN7bSFY7S2WvndDodBh72KYZoKv8eBy2HiNA2qc6RIhPjw/G4Hc6oy2dnQxhUze67KN9h4XSZ+v4tYLE57e5SvPbaC/XZ69Rt3nMaKygwe2tuC22Fw/7WTaI/E+7VwRmdbiZmlaazYu594XHNKZQaXTS9M9Ag84u/kNPnlxyfynb+tZdXOZq6bVcyY/CBPfnpGv41bCCGGi9JMP29urGdEtp9xRd0nRc8el8P4wuARMzjOn5jHcyv2cOX9bxLXcOaYLJZuqqO9PcruxvbEcW3hGHe/YBUOywm62dMUpiDdy9INVhXocYVB0gMuJts9ar+4sIq1u5vZWd9ONNb9lvmhRVupyAnwy2snccdf19AY1ezd38Fn/7KSJjsbJucwgeDRrsR9WB+fXshDb2xnV2M7K3c3M68io0+VSmeUpSWqhbodw29rxBEDQa31GYd6Tim1RymVp7XeZad67u3hsO3AvKTPC4GX7ccLD3h8p/0zE3VglVL3A88eaZziYH6/j3GFqazZ3YJSilgsjtaaqHUfS8DrJCvFQ8jvIMVl4vHIaoDom19dPxmHoYZUeuHckZn8e10tpmmw394nAXD/y5vYWNOCwzSYVpbGuIL+bwHidzvITXXjMg1+c+VELrpnCaWZvqNehazOD/LQDVMIR+OJWenheAEUQohj7fTqbP78xnb++yM9F+8q6EVBrullaXzq1DLu+ZfVG3B0XpDCNC8tbVFa7NW5WEwTjcX51OnlBNwOXKbBd/6+Fo8dZH713JFcNauk21aOqtwUXv7qXK68703e2lBHRzSGy2HS1Bbh3a0N3HxaOSWZfpxuJ3NLA1w9rZBvPb+OumZrD13mCVBM8PqZxexoaOeV9bXE4pqqnL7VtTCU4tHrpnQrxDOc9HUN9GngGuC79t9P9XDM88D/JKV3zge+qrWuU0o1K6VOApYCVwM/B+gMLu3jLwJW9HGcw9YPLxjDM+/v5peLttLWEUVpaG+P4XKZpPldXDk5jzMP0ZtJiKPl7MeG0sfLty8aw9o9+7nxweWcPyUfE9jZ0M4/V+5lX3MHMTQTigauD2R1XpDlWxtQQDSuP3Rja6XUYVOThBBCHL3TRmfx9p1nHFQw7WhdO6uEzICbimw/ReleCkNedja0Ewx5iMXi1Na24XSaXDmjiIDbwdt25UuP18HPPz6RBeNyepyUVUoxOj/IsvV13PnUau68uJol6+uIa5hVmUlca+pbO8jwu0jzOfmPuaVc//vlgNVT8ERQkeXjn2utFNaKpIbzH5bTNA6b9jqU9fWu7bvAmUqpdcCZ9ucopaYqpX4NoLWuw0rvfNP+8y37MYBbgF8D64ENwN/tx79vt5V4DzgV+FwfxzmsnTcul/GFQYoy/QR9Tnw+J263ychMnwSBQhyB3+1gUnGIBdXZ/Ht9HRPL0ijP9rNhbwuNbRFM8/A9A4+1k0aks6cpzKvrrIugNIAXQogTh1Kqz0EgWCtV50/MY4zdVik/zUM4Euf8cTn43A5SUlwEPA4Cbutn5dl78Hc0tnPamOzDZuacMyGXgN/FH5ds461N9fzynxvISnEzKi+FhtYIMQ0ZdrplabqPUfaqW8YJspd/clFXIbfe7o8XPevTmaq1rgVO7+HxZcCNSZ8/ADxwiOMO2lmqtf54X8YlDvadc0cBcO0f3qGhNYLHZXJqpfRcEqK3vnRWFc9/sJcf/WN9tyItpqkoGcAm7DNHWK/bX9gpQ0XS908IIYa8/JCXjlicj00rZH9blGfe3YU/aW9cVtCFoSAS1aza08wEe7tCLK5ZvKmO/FRPouDY5KJULj6piIf/tZHLf7EUgM8trOJTj68gy171Sw76Anb2SH80i/8wRuYE+Or8Clbv2Y/fPfwKvBxLQy+PSxxWeYa1Kpgb8jBH2kQI0Wshn5Mb55QAVrXdK2cVM2tkJoahKMoYuGAsL9XDnReOpjjdS3mmr1vTXyGEEENTsZ39sWxzA0++vZNYTFOY1lWJ22EYFKV5icc1q3d39Yd96I1tfOOZ1Xz96VXE7XKmSikum5JPRtJE4vr6NtoicbbWW8Vo0pOCvqb2KEpxQgVd80dnc9u88uM9jEHvxPkfFQPi2hn5dMTjXDyup0KwQojD+eTcMq6dVcJNDy5nR2M7hRk+alo78A7wXruF43JZKK9hIYQYNiYUpuIyDX74j672EQduS5hens7jb+1g1e5mWsJR7npuHUs31bF/fweb2qMs3lTPLLu4WEWmj5FFQdpaO7h6dgmvbmviuhmFrK9pYdGmerKT9sztD0dRSuHvZb9aMXhIIDjMFKf5uWvhyOM9DCEGJaUUXqfJ5JIQf3pzO9vq2qi2928IIYQQ/cXrMplaGuL1DXWJx7IPKHAytSTEn5ft4KXV+3hlvXVca1sUrSESifPr1zYnAkGlFLNHZNDQHmNlXRsOQzGrPJ2zx2RTlublloeWU5zu45SqzETbJAkEhx5JDRVCiKM0pSREJKbZt7+j1/37hBBCiL64ckYRE5OqVAcOSNWcXGIVUelsF9bWFiEajVOR7cfrMvlgexPra1oSx8+0+wvua4nwtTMrSPc5cRiKl1fXsHr3fl5es49vPLmSbbVtmIbCMQQrgw938j8qhBBHKblv37RSCQSFEEL0v5kj0vnNtZP51LwyAA4sDJrud5GX6qEg6GFBVSaRSDzxddecXEwspnnmvV2J40fm+Bmfn8Its0uYWNiV3VLTHGZWRTovf2k2Xz67CgCtuzegF0ODpIYKIcRRcjkMHvnEVF5YuZfyLCldLYQQYuBceVIR7ZE4F03OP+i56vwUXlxVw8aklb+JRalMKg7xy5c38eraWhaMyaE03YvP7eAOO9BLtrWujQ21rdz593V8em4pf30/SEsk1q+/kzg+JBAUQogPYWRuCiNzU473MIQQQgwzXqfJraf1XDFzfGEqL66qYUSWn7PH5fDKmn1MKQmR4nGSF/KwdV8rH//1MuaMzOQnl4476OsjsTitHTFcLpPFm+pYsrmeWFwzJjfQ37+WOA4kEBRCCCGEEGIIuHRaAZNLQozOsyYqr5tVknjuhlkl3PXXNQCs2N7Y49fX7u8AYGRugP+5cAzfeX4dK3Y109we7eeRi+NB9ggKIYQQQggxBDhNIxEEHuiiyfl8cUEFAO32/sFkyzbX8+42K0DMTnGTn+rhm3bqaIpH1o6GIvlfFUIIIYQQYhi4fHoRz6+s4b1tjYSjMdwOqyXE8q0NfPKhd+gsCZOfajWrz0px88BVEwhIIDgkyYqgEEIIIYQQw0RVjrXf75+r9yUeu/flTYT8zkST+uRCaGWZfrIC3XsWiqFBAkEhhBBCCCGGifPG56IU3PH0KlrCEZraIizf2kBLJE5TNEYg4KI4XSpiDwcSCAohhBBCCDFMjC0Icu2sEqIxzYOvb+PlNTXENRRn+PA4TQxDkRVwHe9higEgCb9CCCGEEEIMI9fMLOL3r2/lkTe2YwCGobhyegEjMv08t6qG3KDneA9RDAAJBIUQQgghhBhGUjxORualsHJHEwChoIu5FZmEfE7G5geP8+jEQJHUUCGEEEIIIYaZK2YUAuBwGNx13mhCPudxHpEYaBIICiGEEEIIMcycVpVJRsjDhZPzmVmefryHI46DPgWCSql0pdQLSql19t9phzjuGvuYdUqpa5Ie/7ZSaptSav8Bx7uVUn9SSq1XSi1VSpX2ZZxCCCGEEEKILh6nyZM3T+fL8yuO91DEcdLXFcGvAC9prSuBl+zPu1FKpQO3AzOA6cDtSQHjM/ZjB7oBqNdaVwB3A9/r4ziFEEIIIYQQSXwuE0Op4z0McZz0NRC8AHjQ/vhB4MIejlkAvKC1rtNa1wMvAGcBaK2XaK13HeH7Pg6crpScpUIIIYQQQghxLPQ1EMzpDOTsv7N7OKYA2Jb0+Xb7scNJfI3WOgo0Ahk9HaiUukkptUwptaympuYohy+EEEIIIYQQw88R20copV4Ecnt46uu9/Bk9reTpY/U1Wuv7gPsApk6deqTvK4QQQgghhBDD3hEDQa31GYd6Tim1RymVp7XepZTKA/b2cNh2YF7S54XAy0f4sduBImC7UsoBpAJ1RxqrEEIIIYQQQogj62tq6NNAZxXQa4CnejjmeWC+UirNLhIz336st9/3EuCfWmtZ7RNCCCGEEEKIY0D1Jb5SSmUAjwHFwFbgo1rrOqXUVOCTWusb7eOuB75mf9m3tda/tR//PnAlkA/sBH6ttb5DKeUBHgImYa0EXq613tiL8dQAWz70L9R/MoF9x3sQYsiS80v0Jzm/RH+Tc0z0Jzm/RH86Uc+vEq111pEO6lMgKHpHKbVMaz31eI9DDE1yfon+JOeX6G9yjon+JOeX6E+D/fzqa2qoEEIIIYQQQohBRgJBIYQQQgghhBhmJBAcGPcd7wGIIU3OL9Gf5PwS/U3OMdGf5PwS/WlQn1+yR1AIIYQQQgghhhlZERRCCCGEEEKIYUYCQSGEEEIIIYQYZiQQPIaUUmcppdYopdYrpb7Sw/NupdSf7OeXKqVKB36UYrDqxfn1n0qplUqp95RSLymlSo7HOMXgdKTzK+m4S5RS2u4XK0Sv9Ob8Ukpdar+HfaCUemSgxygGr15cH4uVUv9SSi23r5ELj8c4xeCklHpAKbVXKbXiEM8rpdTP7PPvPaXU5IEe44clgeAxopQygXuAs4ExwBVKqTEHHHYDUK+1rgDuBr43sKMUg1Uvz6/lwFSt9XjgceD7AztKMVj18vxCKZUC3AYsHdgRisGsN+eXUqoS+CowS2tdDXx2wAcqBqVevn99A3hMaz0JuBz4xcCOUgxyvwPOOszzZwOV9p+bgHsHYEzHhASCx850YL3WeqPWugP4I3DBAcdcADxof/w4cLpSSg3gGMXgdcTzS2v9L611q/3pEqBwgMcoBq/evH8B3Ik1wdA+kIMTg15vzq9PAPdoresBtNZ7B3iMYvDqzfmlgaD9cSqwcwDHJwY5rfWrQN1hDrkA+L22LAFCSqm8gRld30ggeOwUANuSPt9uP9bjMVrrKNAIZAzI6MRg15vzK9kNwN/7dURiKDni+aWUmgQUaa2fHciBiSGhN+9fVUCVUmqRUmqJUupws+9CJOvN+XUH8DGl1Hbgb8BnBmZoYpg42nu0E4bjeA9gCOlpZe/A3hy9OUaInvT63FFKfQyYCszt1xGJoeSw55dSysBKZ792oAYkhpTevH85sNKq5mFlM/xbKTVWa93Qz2MTg19vzq8rgN9prX+klJoJPGSfX/H+H54YBgbt/b2sCB4724GipM8LOTj1IHGMUsqBlZ5wuKVmITr15vxCKXUG8HXgfK11eIDGJga/I51fKcBY4GWl1GbgJOBpKRgjeqm318entNYRrfUmYA1WYCjEkfTm/LoBeAxAa70Y8ACZAzI6MRz06h7tRCSB4LHzJlCplCpTSrmwNiM/fcAxTwPX2B9fAvxTaz0oZgzEcXfE88tO3fsVVhAo+2vE0Tjs+aW1btRaZ2qtS7XWpVh7UM/XWi87PsMVg0xvro9PAqcCKKUysVJFNw7oKMVg1ZvzaytwOoBSajRWIFgzoKMUQ9nTwNV29dCTgEat9a7jPajekNTQY0RrHVVKfRp4HjCBB7TWHyilvgUs01o/DfwGKx1hPdZK4OXHb8RiMOnl+fUDIAD82a5BtFVrff5xG7QYNHp5fgnxofTy/HoemK+UWgnEgC9qrWuP36jFYNHL8+vzwP1Kqc9hpexdKxPxoreUUo9ipa1n2vtMbwecAFrrX2LtO10IrAdageuOz0iPnpLXgRBCCCGEEEIML5IaKoQQQgghhBDDjASCQgghhBBCCDHMSCAohBBCCCGEEMOMBIJCCCGEEEIIMcxIICiEEEIIIYQQw4wEgkIIIYQQQggxzEggKIQQQgghhBDDzP8DjDiE/w52ocUAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ "<matplotlib.figure.Figure at 0x1bf101914e0>"
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "import matplotlib as mpl\n",
+ "import matplotlib.pyplot as plt\n",
+ "from matplotlib import rcParams, cycler\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "\n",
+ "\n",
+ "df=pd.DataFrame(factor)\n",
+ "data=df.T\n",
+ "cmap = plt.cm.Blues\n",
+ "rcParams['axes.prop_cycle'] = cycler(color=cmap(np.linspace(0, 1, 20)))\n",
+ "\n",
+ "fig, ax = plt.subplots(figsize=(15, 3))\n",
+ "lines = ax.plot(np.linspace(0,1,1001),df.T)"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "<img src=\"./3.png\" width=\"85%\" align=\"left\">"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 114,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<div>\n",
+ "<style scoped>\n",
+ " .dataframe tbody tr th:only-of-type {\n",
+ " vertical-align: middle;\n",
+ " }\n",
+ "\n",
+ " .dataframe tbody tr th {\n",
+ " vertical-align: top;\n",
+ " }\n",
+ "\n",
+ " .dataframe thead th {\n",
+ " text-align: right;\n",
+ " }\n",
+ "</style>\n",
+ "<table border=\"1\" class=\"dataframe\">\n",
+ " <thead>\n",
+ " <tr style=\"text-align: right;\">\n",
+ " <th></th>\n",
+ " <th>0</th>\n",
+ " <th>1</th>\n",
+ " <th>2</th>\n",
+ " <th>3</th>\n",
+ " <th>4</th>\n",
+ " <th>5</th>\n",
+ " <th>6</th>\n",
+ " <th>7</th>\n",
+ " <th>8</th>\n",
+ " <th>9</th>\n",
+ " <th>...</th>\n",
+ " <th>991</th>\n",
+ " <th>992</th>\n",
+ " <th>993</th>\n",
+ " <th>994</th>\n",
+ " <th>995</th>\n",
+ " <th>996</th>\n",
+ " <th>997</th>\n",
+ " <th>998</th>\n",
+ " <th>999</th>\n",
+ " <th>1000</th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th>0</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.001149</td>\n",
+ " <td>-0.001121</td>\n",
+ " <td>-0.000664</td>\n",
+ " <td>-0.000572</td>\n",
+ " <td>0.000016</td>\n",
+ " <td>0.002173</td>\n",
+ " <td>0.000821</td>\n",
+ " <td>0.001510</td>\n",
+ " <td>0.000150</td>\n",
+ " <td>...</td>\n",
+ " <td>0.006947</td>\n",
+ " <td>0.006498</td>\n",
+ " <td>0.006520</td>\n",
+ " <td>0.008262</td>\n",
+ " <td>0.008510</td>\n",
+ " <td>0.007529</td>\n",
+ " <td>0.008193</td>\n",
+ " <td>0.008236</td>\n",
+ " <td>0.008553</td>\n",
+ " <td>0.007917</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>1</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.001149</td>\n",
+ " <td>-0.001121</td>\n",
+ " <td>-0.000664</td>\n",
+ " <td>-0.000572</td>\n",
+ " <td>0.000016</td>\n",
+ " <td>0.002173</td>\n",
+ " <td>0.000821</td>\n",
+ " <td>0.001510</td>\n",
+ " <td>0.000150</td>\n",
+ " <td>...</td>\n",
+ " <td>0.006946</td>\n",
+ " <td>0.006498</td>\n",
+ " <td>0.006519</td>\n",
+ " <td>0.008261</td>\n",
+ " <td>0.008509</td>\n",
+ " <td>0.007529</td>\n",
+ " <td>0.008192</td>\n",
+ " <td>0.008235</td>\n",
+ " <td>0.008552</td>\n",
+ " <td>0.007916</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>2</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.001149</td>\n",
+ " <td>-0.001121</td>\n",
+ " <td>-0.000664</td>\n",
+ " <td>-0.000572</td>\n",
+ " <td>0.000016</td>\n",
+ " <td>0.002173</td>\n",
+ " <td>0.000821</td>\n",
+ " <td>0.001510</td>\n",
+ " <td>0.000150</td>\n",
+ " <td>...</td>\n",
+ " <td>0.006944</td>\n",
+ " <td>0.006495</td>\n",
+ " <td>0.006517</td>\n",
+ " <td>0.008259</td>\n",
+ " <td>0.008507</td>\n",
+ " <td>0.007526</td>\n",
+ " <td>0.008190</td>\n",
+ " <td>0.008233</td>\n",
+ " <td>0.008550</td>\n",
+ " <td>0.007914</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>3</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.001149</td>\n",
+ " <td>-0.001121</td>\n",
+ " <td>-0.000664</td>\n",
+ " <td>-0.000572</td>\n",
+ " <td>0.000016</td>\n",
+ " <td>0.002173</td>\n",
+ " <td>0.000821</td>\n",
+ " <td>0.001510</td>\n",
+ " <td>0.000150</td>\n",
+ " <td>...</td>\n",
+ " <td>0.006939</td>\n",
+ " <td>0.006490</td>\n",
+ " <td>0.006512</td>\n",
+ " <td>0.008254</td>\n",
+ " <td>0.008502</td>\n",
+ " <td>0.007521</td>\n",
+ " <td>0.008184</td>\n",
+ " <td>0.008228</td>\n",
+ " <td>0.008545</td>\n",
+ " <td>0.007909</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>4</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.001149</td>\n",
+ " <td>-0.001121</td>\n",
+ " <td>-0.000664</td>\n",
+ " <td>-0.000572</td>\n",
+ " <td>0.000016</td>\n",
+ " <td>0.002173</td>\n",
+ " <td>0.000821</td>\n",
+ " <td>0.001510</td>\n",
+ " <td>0.000150</td>\n",
+ " <td>...</td>\n",
+ " <td>0.006926</td>\n",
+ " <td>0.006478</td>\n",
+ " <td>0.006499</td>\n",
+ " <td>0.008241</td>\n",
+ " <td>0.008489</td>\n",
+ " <td>0.007509</td>\n",
+ " <td>0.008172</td>\n",
+ " <td>0.008215</td>\n",
+ " <td>0.008532</td>\n",
+ " <td>0.007896</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>5</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.001149</td>\n",
+ " <td>-0.001121</td>\n",
+ " <td>-0.000663</td>\n",
+ " <td>-0.000572</td>\n",
+ " <td>0.000016</td>\n",
+ " <td>0.002173</td>\n",
+ " <td>0.000821</td>\n",
+ " <td>0.001510</td>\n",
+ " <td>0.000150</td>\n",
+ " <td>...</td>\n",
+ " <td>0.006894</td>\n",
+ " <td>0.006446</td>\n",
+ " <td>0.006467</td>\n",
+ " <td>0.008209</td>\n",
+ " <td>0.008457</td>\n",
+ " <td>0.007476</td>\n",
+ " <td>0.008140</td>\n",
+ " <td>0.008183</td>\n",
+ " <td>0.008500</td>\n",
+ " <td>0.007864</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>6</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.001149</td>\n",
+ " <td>-0.001121</td>\n",
+ " <td>-0.000663</td>\n",
+ " <td>-0.000572</td>\n",
+ " <td>0.000016</td>\n",
+ " <td>0.002173</td>\n",
+ " <td>0.000821</td>\n",
+ " <td>0.001510</td>\n",
+ " <td>0.000150</td>\n",
+ " <td>...</td>\n",
+ " <td>0.006815</td>\n",
+ " <td>0.006366</td>\n",
+ " <td>0.006388</td>\n",
+ " <td>0.008130</td>\n",
+ " <td>0.008377</td>\n",
+ " <td>0.007397</td>\n",
+ " <td>0.008060</td>\n",
+ " <td>0.008103</td>\n",
+ " <td>0.008420</td>\n",
+ " <td>0.007784</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>7</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.001149</td>\n",
+ " <td>-0.001121</td>\n",
+ " <td>-0.000663</td>\n",
+ " <td>-0.000572</td>\n",
+ " <td>0.000016</td>\n",
+ " <td>0.002173</td>\n",
+ " <td>0.000821</td>\n",
+ " <td>0.001510</td>\n",
+ " <td>0.000150</td>\n",
+ " <td>...</td>\n",
+ " <td>0.006620</td>\n",
+ " <td>0.006171</td>\n",
+ " <td>0.006193</td>\n",
+ " <td>0.007935</td>\n",
+ " <td>0.008182</td>\n",
+ " <td>0.007201</td>\n",
+ " <td>0.007864</td>\n",
+ " <td>0.007907</td>\n",
+ " <td>0.008224</td>\n",
+ " <td>0.007588</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>8</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.001149</td>\n",
+ " <td>-0.001121</td>\n",
+ " <td>-0.000663</td>\n",
+ " <td>-0.000572</td>\n",
+ " <td>0.000016</td>\n",
+ " <td>0.002173</td>\n",
+ " <td>0.000821</td>\n",
+ " <td>0.001509</td>\n",
+ " <td>0.000150</td>\n",
+ " <td>...</td>\n",
+ " <td>0.006153</td>\n",
+ " <td>0.005704</td>\n",
+ " <td>0.005725</td>\n",
+ " <td>0.007466</td>\n",
+ " <td>0.007713</td>\n",
+ " <td>0.006732</td>\n",
+ " <td>0.007394</td>\n",
+ " <td>0.007437</td>\n",
+ " <td>0.007753</td>\n",
+ " <td>0.007117</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>9</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.001149</td>\n",
+ " <td>-0.001121</td>\n",
+ " <td>-0.000663</td>\n",
+ " <td>-0.000571</td>\n",
+ " <td>0.000016</td>\n",
+ " <td>0.002173</td>\n",
+ " <td>0.000821</td>\n",
+ " <td>0.001509</td>\n",
+ " <td>0.000150</td>\n",
+ " <td>...</td>\n",
+ " <td>0.005099</td>\n",
+ " <td>0.004649</td>\n",
+ " <td>0.004670</td>\n",
+ " <td>0.006410</td>\n",
+ " <td>0.006655</td>\n",
+ " <td>0.005674</td>\n",
+ " <td>0.006335</td>\n",
+ " <td>0.006377</td>\n",
+ " <td>0.006692</td>\n",
+ " <td>0.006055</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>10</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.001148</td>\n",
+ " <td>-0.001120</td>\n",
+ " <td>-0.000662</td>\n",
+ " <td>-0.000570</td>\n",
+ " <td>0.000018</td>\n",
+ " <td>0.002174</td>\n",
+ " <td>0.000822</td>\n",
+ " <td>0.001509</td>\n",
+ " <td>0.000150</td>\n",
+ " <td>...</td>\n",
+ " <td>0.003056</td>\n",
+ " <td>0.002606</td>\n",
+ " <td>0.002626</td>\n",
+ " <td>0.004365</td>\n",
+ " <td>0.004609</td>\n",
+ " <td>0.003627</td>\n",
+ " <td>0.004287</td>\n",
+ " <td>0.004327</td>\n",
+ " <td>0.004641</td>\n",
+ " <td>0.004003</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>11</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.001147</td>\n",
+ " <td>-0.001118</td>\n",
+ " <td>-0.000659</td>\n",
+ " <td>-0.000566</td>\n",
+ " <td>0.000021</td>\n",
+ " <td>0.002175</td>\n",
+ " <td>0.000822</td>\n",
+ " <td>0.001508</td>\n",
+ " <td>0.000148</td>\n",
+ " <td>...</td>\n",
+ " <td>0.000344</td>\n",
+ " <td>-0.000105</td>\n",
+ " <td>-0.000083</td>\n",
+ " <td>0.001656</td>\n",
+ " <td>0.001900</td>\n",
+ " <td>0.000919</td>\n",
+ " <td>0.001579</td>\n",
+ " <td>0.001620</td>\n",
+ " <td>0.001934</td>\n",
+ " <td>0.001296</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>12</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.001144</td>\n",
+ " <td>-0.001112</td>\n",
+ " <td>-0.000651</td>\n",
+ " <td>-0.000558</td>\n",
+ " <td>0.000030</td>\n",
+ " <td>0.002178</td>\n",
+ " <td>0.000823</td>\n",
+ " <td>0.001505</td>\n",
+ " <td>0.000145</td>\n",
+ " <td>...</td>\n",
+ " <td>-0.000861</td>\n",
+ " <td>-0.001304</td>\n",
+ " <td>-0.001277</td>\n",
+ " <td>0.000463</td>\n",
+ " <td>0.000707</td>\n",
+ " <td>-0.000272</td>\n",
+ " <td>0.000390</td>\n",
+ " <td>0.000432</td>\n",
+ " <td>0.000745</td>\n",
+ " <td>0.000109</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>13</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.001137</td>\n",
+ " <td>-0.001098</td>\n",
+ " <td>-0.000633</td>\n",
+ " <td>-0.000536</td>\n",
+ " <td>0.000051</td>\n",
+ " <td>0.002185</td>\n",
+ " <td>0.000825</td>\n",
+ " <td>0.001497</td>\n",
+ " <td>0.000137</td>\n",
+ " <td>...</td>\n",
+ " <td>0.000550</td>\n",
+ " <td>0.000100</td>\n",
+ " <td>0.000121</td>\n",
+ " <td>0.001844</td>\n",
+ " <td>0.002069</td>\n",
+ " <td>0.001078</td>\n",
+ " <td>0.001723</td>\n",
+ " <td>0.001748</td>\n",
+ " <td>0.002043</td>\n",
+ " <td>0.001393</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>14</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.001120</td>\n",
+ " <td>-0.001064</td>\n",
+ " <td>-0.000591</td>\n",
+ " <td>-0.000487</td>\n",
+ " <td>0.000099</td>\n",
+ " <td>0.002198</td>\n",
+ " <td>0.000825</td>\n",
+ " <td>0.001474</td>\n",
+ " <td>0.000112</td>\n",
+ " <td>...</td>\n",
+ " <td>0.001966</td>\n",
+ " <td>0.001478</td>\n",
+ " <td>0.001461</td>\n",
+ " <td>0.003121</td>\n",
+ " <td>0.003283</td>\n",
+ " <td>0.002243</td>\n",
+ " <td>0.002832</td>\n",
+ " <td>0.002802</td>\n",
+ " <td>0.003039</td>\n",
+ " <td>0.002342</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>15</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.001078</td>\n",
+ " <td>-0.000986</td>\n",
+ " <td>-0.000495</td>\n",
+ " <td>-0.000379</td>\n",
+ " <td>0.000196</td>\n",
+ " <td>0.002208</td>\n",
+ " <td>0.000804</td>\n",
+ " <td>0.001400</td>\n",
+ " <td>0.000038</td>\n",
+ " <td>...</td>\n",
+ " <td>0.002029</td>\n",
+ " <td>0.001483</td>\n",
+ " <td>0.001412</td>\n",
+ " <td>0.002960</td>\n",
+ " <td>0.003009</td>\n",
+ " <td>0.001904</td>\n",
+ " <td>0.002409</td>\n",
+ " <td>0.002301</td>\n",
+ " <td>0.002457</td>\n",
+ " <td>0.001709</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>16</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.000987</td>\n",
+ " <td>-0.000824</td>\n",
+ " <td>-0.000314</td>\n",
+ " <td>-0.000192</td>\n",
+ " <td>0.000340</td>\n",
+ " <td>0.002145</td>\n",
+ " <td>0.000682</td>\n",
+ " <td>0.001177</td>\n",
+ " <td>-0.000157</td>\n",
+ " <td>...</td>\n",
+ " <td>0.001620</td>\n",
+ " <td>0.001006</td>\n",
+ " <td>0.000883</td>\n",
+ " <td>0.002254</td>\n",
+ " <td>0.002149</td>\n",
+ " <td>0.001004</td>\n",
+ " <td>0.001432</td>\n",
+ " <td>0.001267</td>\n",
+ " <td>0.001360</td>\n",
+ " <td>0.000622</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>17</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.000815</td>\n",
+ " <td>-0.000558</td>\n",
+ " <td>-0.000071</td>\n",
+ " <td>0.000014</td>\n",
+ " <td>0.000427</td>\n",
+ " <td>0.001832</td>\n",
+ " <td>0.000340</td>\n",
+ " <td>0.000729</td>\n",
+ " <td>-0.000446</td>\n",
+ " <td>...</td>\n",
+ " <td>0.000873</td>\n",
+ " <td>0.000301</td>\n",
+ " <td>0.000229</td>\n",
+ " <td>0.001397</td>\n",
+ " <td>0.001166</td>\n",
+ " <td>0.000131</td>\n",
+ " <td>0.000563</td>\n",
+ " <td>0.000430</td>\n",
+ " <td>0.000530</td>\n",
+ " <td>-0.000075</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>18</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.000567</td>\n",
+ " <td>-0.000266</td>\n",
+ " <td>0.000095</td>\n",
+ " <td>0.000092</td>\n",
+ " <td>0.000335</td>\n",
+ " <td>0.001230</td>\n",
+ " <td>-0.000060</td>\n",
+ " <td>0.000310</td>\n",
+ " <td>-0.000518</td>\n",
+ " <td>...</td>\n",
+ " <td>0.000274</td>\n",
+ " <td>-0.000086</td>\n",
+ " <td>-0.000032</td>\n",
+ " <td>0.000844</td>\n",
+ " <td>0.000538</td>\n",
+ " <td>-0.000218</td>\n",
+ " <td>0.000220</td>\n",
+ " <td>0.000130</td>\n",
+ " <td>0.000220</td>\n",
+ " <td>-0.000205</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>19</th>\n",
+ " <td>0.0</td>\n",
+ " <td>-0.000322</td>\n",
+ " <td>-0.000083</td>\n",
+ " <td>0.000105</td>\n",
+ " <td>0.000055</td>\n",
+ " <td>0.000180</td>\n",
+ " <td>0.000655</td>\n",
+ " <td>-0.000195</td>\n",
+ " <td>0.000138</td>\n",
+ " <td>-0.000342</td>\n",
+ " <td>...</td>\n",
+ " <td>0.000040</td>\n",
+ " <td>-0.000115</td>\n",
+ " <td>-0.000026</td>\n",
+ " <td>0.000481</td>\n",
+ " <td>0.000204</td>\n",
+ " <td>-0.000218</td>\n",
+ " <td>0.000125</td>\n",
+ " <td>0.000047</td>\n",
+ " <td>0.000102</td>\n",
+ " <td>-0.000150</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "<p>20 rows × 1001 columns</p>\n",
+ "</div>"
+ ],
+ "text/plain": [
+ " 0 1 2 3 4 5 6 \\\n",
+ "0 0.0 -0.001149 -0.001121 -0.000664 -0.000572 0.000016 0.002173 \n",
+ "1 0.0 -0.001149 -0.001121 -0.000664 -0.000572 0.000016 0.002173 \n",
+ "2 0.0 -0.001149 -0.001121 -0.000664 -0.000572 0.000016 0.002173 \n",
+ "3 0.0 -0.001149 -0.001121 -0.000664 -0.000572 0.000016 0.002173 \n",
+ "4 0.0 -0.001149 -0.001121 -0.000664 -0.000572 0.000016 0.002173 \n",
+ "5 0.0 -0.001149 -0.001121 -0.000663 -0.000572 0.000016 0.002173 \n",
+ "6 0.0 -0.001149 -0.001121 -0.000663 -0.000572 0.000016 0.002173 \n",
+ "7 0.0 -0.001149 -0.001121 -0.000663 -0.000572 0.000016 0.002173 \n",
+ "8 0.0 -0.001149 -0.001121 -0.000663 -0.000572 0.000016 0.002173 \n",
+ "9 0.0 -0.001149 -0.001121 -0.000663 -0.000571 0.000016 0.002173 \n",
+ "10 0.0 -0.001148 -0.001120 -0.000662 -0.000570 0.000018 0.002174 \n",
+ "11 0.0 -0.001147 -0.001118 -0.000659 -0.000566 0.000021 0.002175 \n",
+ "12 0.0 -0.001144 -0.001112 -0.000651 -0.000558 0.000030 0.002178 \n",
+ "13 0.0 -0.001137 -0.001098 -0.000633 -0.000536 0.000051 0.002185 \n",
+ "14 0.0 -0.001120 -0.001064 -0.000591 -0.000487 0.000099 0.002198 \n",
+ "15 0.0 -0.001078 -0.000986 -0.000495 -0.000379 0.000196 0.002208 \n",
+ "16 0.0 -0.000987 -0.000824 -0.000314 -0.000192 0.000340 0.002145 \n",
+ "17 0.0 -0.000815 -0.000558 -0.000071 0.000014 0.000427 0.001832 \n",
+ "18 0.0 -0.000567 -0.000266 0.000095 0.000092 0.000335 0.001230 \n",
+ "19 0.0 -0.000322 -0.000083 0.000105 0.000055 0.000180 0.000655 \n",
+ "\n",
+ " 7 8 9 ... 991 992 993 \\\n",
+ "0 0.000821 0.001510 0.000150 ... 0.006947 0.006498 0.006520 \n",
+ "1 0.000821 0.001510 0.000150 ... 0.006946 0.006498 0.006519 \n",
+ "2 0.000821 0.001510 0.000150 ... 0.006944 0.006495 0.006517 \n",
+ "3 0.000821 0.001510 0.000150 ... 0.006939 0.006490 0.006512 \n",
+ "4 0.000821 0.001510 0.000150 ... 0.006926 0.006478 0.006499 \n",
+ "5 0.000821 0.001510 0.000150 ... 0.006894 0.006446 0.006467 \n",
+ "6 0.000821 0.001510 0.000150 ... 0.006815 0.006366 0.006388 \n",
+ "7 0.000821 0.001510 0.000150 ... 0.006620 0.006171 0.006193 \n",
+ "8 0.000821 0.001509 0.000150 ... 0.006153 0.005704 0.005725 \n",
+ "9 0.000821 0.001509 0.000150 ... 0.005099 0.004649 0.004670 \n",
+ "10 0.000822 0.001509 0.000150 ... 0.003056 0.002606 0.002626 \n",
+ "11 0.000822 0.001508 0.000148 ... 0.000344 -0.000105 -0.000083 \n",
+ "12 0.000823 0.001505 0.000145 ... -0.000861 -0.001304 -0.001277 \n",
+ "13 0.000825 0.001497 0.000137 ... 0.000550 0.000100 0.000121 \n",
+ "14 0.000825 0.001474 0.000112 ... 0.001966 0.001478 0.001461 \n",
+ "15 0.000804 0.001400 0.000038 ... 0.002029 0.001483 0.001412 \n",
+ "16 0.000682 0.001177 -0.000157 ... 0.001620 0.001006 0.000883 \n",
+ "17 0.000340 0.000729 -0.000446 ... 0.000873 0.000301 0.000229 \n",
+ "18 -0.000060 0.000310 -0.000518 ... 0.000274 -0.000086 -0.000032 \n",
+ "19 -0.000195 0.000138 -0.000342 ... 0.000040 -0.000115 -0.000026 \n",
+ "\n",
+ " 994 995 996 997 998 999 1000 \n",
+ "0 0.008262 0.008510 0.007529 0.008193 0.008236 0.008553 0.007917 \n",
+ "1 0.008261 0.008509 0.007529 0.008192 0.008235 0.008552 0.007916 \n",
+ "2 0.008259 0.008507 0.007526 0.008190 0.008233 0.008550 0.007914 \n",
+ "3 0.008254 0.008502 0.007521 0.008184 0.008228 0.008545 0.007909 \n",
+ "4 0.008241 0.008489 0.007509 0.008172 0.008215 0.008532 0.007896 \n",
+ "5 0.008209 0.008457 0.007476 0.008140 0.008183 0.008500 0.007864 \n",
+ "6 0.008130 0.008377 0.007397 0.008060 0.008103 0.008420 0.007784 \n",
+ "7 0.007935 0.008182 0.007201 0.007864 0.007907 0.008224 0.007588 \n",
+ "8 0.007466 0.007713 0.006732 0.007394 0.007437 0.007753 0.007117 \n",
+ "9 0.006410 0.006655 0.005674 0.006335 0.006377 0.006692 0.006055 \n",
+ "10 0.004365 0.004609 0.003627 0.004287 0.004327 0.004641 0.004003 \n",
+ "11 0.001656 0.001900 0.000919 0.001579 0.001620 0.001934 0.001296 \n",
+ "12 0.000463 0.000707 -0.000272 0.000390 0.000432 0.000745 0.000109 \n",
+ "13 0.001844 0.002069 0.001078 0.001723 0.001748 0.002043 0.001393 \n",
+ "14 0.003121 0.003283 0.002243 0.002832 0.002802 0.003039 0.002342 \n",
+ "15 0.002960 0.003009 0.001904 0.002409 0.002301 0.002457 0.001709 \n",
+ "16 0.002254 0.002149 0.001004 0.001432 0.001267 0.001360 0.000622 \n",
+ "17 0.001397 0.001166 0.000131 0.000563 0.000430 0.000530 -0.000075 \n",
+ "18 0.000844 0.000538 -0.000218 0.000220 0.000130 0.000220 -0.000205 \n",
+ "19 0.000481 0.000204 -0.000218 0.000125 0.000047 0.000102 -0.000150 \n",
+ "\n",
+ "[20 rows x 1001 columns]"
+ ]
+ },
+ "execution_count": 114,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 115,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[<matplotlib.lines.Line2D at 0x1bf092730b8>]"
+ ]
+ },
+ "execution_count": 115,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAA3QAAADFCAYAAAAc7rD2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAIABJREFUeJzsfXecXFX5/nN3sz272eymkYQUWkhCF0NVQIqoFEXwC1+liICg2ECUJkgRBEFERYTvDxFBqYJ0KdIEQRKKgZBAQgJJIHWT3Wy2l/v7483ree+Zc9vMnZk7m/N8Pvu5szN3Zs7ce8r7nOctjuu6sLCwsLCwsLCwsLCwsCg9lBW7ARYWFhYWFhYWFhYWFhbZwRI6CwsLCwsLCwsLCwuLEoUldBYWFhYWFhYWFhYWFiUKS+gsLCwsLCwsLCwsLCxKFJbQWVhYWFhYWFhYWFhYlCgsobOwsLCwsLCwsLCwsChRWEJnYWFhYWFhYWFhYWFRorCEzsLCwsLCwsLCwsLCokRhCZ2FhYWFhYWFhYWFhUWJYlixG6Bj1KhR7pQpU4rdDAsLCwsLCwsLCwsLi6LgtddeW+u67ugo56aO0E2ZMgVz5swpdjMsLCwsLCwsLCwsLCyKAsdxPox6rnW5tLCwsLCwsLCwsLCwKFFYQmdhYWFhYWFhYWFhYVGisITOwsLCwsLCwsLCwsKiRGEJnYWFhYWFhYWFhYWFRYnCEjoLixLAs88CDz1U7FZYWFhYWFhYWFikDanLcmkxNOC6wMaNQH19sVsyNPCZz9DRdYvbDgsLCwsLCwsLi3TBKnQWecG11wINDcCKFcVuiYWFhYWFxdDDK68AEycC69cXuyUWFhbFRiRC5zjOoY7jvOs4ziLHcc41vP5px3Fedxyn33Gco8XzuziO87LjOPMcx5nrOM7/JNl4i/TigQfo+P77xW2HhYWFhYXFUMQ77wAffQQsX17sllhYWBQboYTOcZxyADcA+ByAGQCOcxxnhnbaUgAnAfiL9nwngBNc150J4FAAv3IcpzHXRlukH8OH03HjxuK2Y6ihq6vYLbCwsLCwSAO6u+lo1wULC4soCt0sAItc113sum4vgLsAHClPcF33A9d15wIY1J5/z3XdhZsefwxgNYDRibTcItWwhC4/sK41FhYWFhaAInSdnYX/7ldfBd57T3njWFhYFBdRkqJMALBM/L8cwB5xv8hxnFkAKgFkOOE5jnMagNMAYNKkSXE/2iKFYEJ3/vlAdTVw2GHe13t7gbIyYJhNyxML69YB48cXuxUWFhYWFsVGMQndHsIKfPddYLvtCt8GCwsLhSgKnWN4LlauPcdxtgBwO4Cvu647qL/uuu7Nruvu7rru7qNHWwFvKIAJ3cKFwOGHm1+fNauwbYqDvr5it8CMdeuK3QILCwsLizQgLS6XaV0vLSw2J0QhdMsBbCn+nwjg46hf4DhOA4BHAVzouu4r8ZpnUaqoqfF/zXVpAXjjjcK1Jw5efRWorASeeqrYLcmEdbm0sMgfXJfcyCwsSgHFVOgsLCzShSiEbjaAbR3Hmeo4TiWAYwFEKnG86fwHAPzJdd17s2+mRalhYMD/tWJm5PrgA+D444GeHv9zXnyRjo8+WpAmxYJV6Cws8odf/xqYNg2YM6fYLbGwCEexCF1/v7kdFhYWxUMooXNdtx/AmQCeADAfwD2u685zHOdSx3GOAADHcT7pOM5yAMcAuMlxnHmb3v4VAJ8GcJLjOG9u+tslL7/EIlWQLhhbbOF9bf78wrZF4rnngDvuAJYs8T+nooKO+qKVBlhCZ2GRPzz5JB0/juyDYmFRPBTL5VLfELWEzsKi+IiUksJ13ccAPKY9d5F4PBvkiqm/7w4Ad+TYRosSRG+vejxunPe1d96h44gRhWsPgxeiIIWOE7WkidCVl5PqaQmdhUX+sGEDHTkG2MIizSiWQqevn0HrqYWFRWEQqbC4hUVcSELX3Ox9bfFiOtbVFa49DF54gnYUWaFLS6D34KByYW1vL25bLCziYvFiYN688PPSAB5fadrMsbDwQ7EInb5+WoXOwqL4sEnjLfICSYYGtbymra10LEYgNy88URS6tBA62da0tMnCIiq23pqObqzcyMUBK3TWQLUoBViXSwsLC4YldBZ5QW8vMHIkZbvUSUgxCV0Ul8vycjqmZZdeqp3ysYWFRbKwhM6ilMDrmFXoLCwsLKGzSBy//CXw7LPApEnA2LHKSGIwoevtJdJUyOLiUVwumYCmhdBJ8mkJnYVF/sAul9ZAtSgF2Bg6CwsLho2hs0gcZ58NtLRQLbeKCn+FDii8q0gUl0smTWlxb7SEzmIooBRIEo+vUmirhYWNobOwsGBYQmeRKGS8XBRCV6ydxSBCx+21hM7CIjmsX1/sFgRDKvLWQLVIIw44gGolMmwMnYWFBcMSOotEIQlaRQW5U5oI3ciRmecXAlEIHZOmtLhc2hg6i6GAtJfcWLtWPbYGqkUa8dprwBtvqP+tQmdhYcGwhM4iUci0+iaFrr+fzhk/nv7XF6InngAeeCB/7eOFJ2gBsi6XFhbJI+2Ebtky9VifH1pbVekQC4tiobfXu8YWO4Zu5kzv/xYWFsWDJXQWiWLjRvW4ooL++vuBv/0NuOAClSBliy3oqC9Ehx4KHHVU/toXR6GzhM7CIjmUKqHr7CSPgvPPL3ybLCwYrktrgUwyVgyXy8FBpWbffTdQVWUVOguLNMASOotE4afQfelLwBVXqPg5k0JXiKLZcWLo0rLryO0oL7eEzqJ0cPnlwOzZ6v9SJXTPPus9WljkAx0dwGWX+W8kcghAsRW6Sy4BvvENelxdbQmdhUVaYAmdRaIIc7kMInTS+MsX4rhcdnTkvz1BuO464MorVXuGD/dey/b20ijWbLH5wXWBn/wEmDVLPZd2Qrd0KRmozc1qfujvB/78Z3q8557Fa5vF0MdFF9HfXXeZX+eNPZNCV0hCJ9tXVUVjxhI6C4viwxI6i0Shu1zqSVFaWuhoInSvv07Hpqb8tS+Oy2UxCp9LnHUWuXlxW+vrVduWLAEaGoCbbipe+yws/CDHfGUlHdNO6JYtA7bcEqipUQbqjTcCd95Jjx2neG2zGPpYs4aOfpt0vA7wpungoFoPCulyWV+vHltCZ2GRHlhCZ5EowhS6Dz+k48SJdJSkiXce5YKRNEqJ0DEWLKCjJHQLF9LxvvuK0yYLCxMGBoDbb/eOHe6zUllII5jQSQP1vfdoDhs1yro7WwRj4UJg0iSv624ccJ+rrlbPXXwxcM459Jj7H6+xvIY5TmHXquHD1ePqavpLS3iChcXmDEvoLBKFidBJ1e7llykWbJdd6H+5EPHjfJYL4IUnaEeRCWhaCB3H7khCV15OR5t5zyJNeOEF4IQTgKeeynwt7YRozRpg7FgvoWttpc2nysr0JEmySCfeeIPI3KJF2b2fVbaKCvXcpZcC11xDj6XLpeuqPtrYSK8Vai3QFTobQ2dhkQ5YQmeRKExZLiVefBGYMcNch44f59Nw4oUnikLX0VHcGLURI+j45JN0HD48s0aeJXQWaQKP/6VLM19LO6Hr7ATq6ryKQ1sbjcPKyvS3P18491zglluK3Yr0g10ms3V/5LXJr5/x84OD9B18PocoJEWqDj6YNmUGB82fKRW6igrrcmlhkRZYQmeRKKRCZyJ0770H7LYb7fI1N9OOPqOQCl0UQjc4CPz1r/lrSxjYMODrIRU6TtgyOFj4dllY+IENu48/znwt7YSosxOorc1U6BobN2+F7qqrgFNOKXYrCo/ly+Odz6n8syU3YQm75Jq1YUMmoUvKo+Tpp8lt+uc/B7bZJnM9loTOcSyhs7BICyyhs0gUUqEDMgkdAOy0E7kMnn468OCDaje/EApdmMvlunXAqlXAmDHAdttRqYVioLeX/rheH+AldHydrUJnkSbwuProo8zX0k6ITISOFbqKikxC+vzzVIfLYujhueconvLee6O/J1eFjt/X3U2eIU8/7X1d9r/2dtVHTd4uSeC222gcv/mm9/lhw7z/2xg6C4t0wBI6i0QhFbqBgczJH1Ak5YADaOHiRCnFdLns6KCU0c3NFLM2YQKw886FzR4mwYTtq19Vz0mXS37dKnQWaUKpKnT9/TTv1NRkKnTscqnPS/vvDxx7bMGbalEAsOfIG29Ef0+SCt1vfkOujxJ+Cl2+CN1779Hx+ee9z+vjuLoaePVVYMWKZL/fwsIiHiyhs0gUktD195sVuuZmOnI6cz2rZCEUup4e+t5776XU5KecQkVdGcWKDWhro2vIhG377Wmn9vTTaeHWXS6tQmeRJgQpdGkmdLxxY1LoGhvNCh1j1arCtNGicOASG3FK6CQVQ9fdDTzwQObrfgodtzEfm49jxgAvveR9Tl+f332X1qFTT03++y0sLKIjEqFzHOdQx3HedRxnkeM45xpe/7TjOK87jtPvOM7R2msnOo6zcNPfiUk13CKd0BW6bAjdwED+kpFIl8vHHwe+8hXgW9/KLOZaWUm79YVW6Jqa6PrwdRw+HDjwQCKdVVV0bQYGFOHLZ7yhhUVc8HgpNYWO5x5J6AYHSQnxU+jY+2Du3MK2tZBIu5tsvrB+PR1N65cfclXopMvla69lvl4IhU5uEO65J20o6vUj9T5xyCF0bG3N/fstLCyyRyihcxynHMANAD4HYAaA4xzHmaGdthTASQD+or23CcDFAPYAMAvAxY7jjMy92RZpBS9qQPYKHb83achCrD09amfdVDC4srI4Ct3gIC2YTOhkimi+Xn19itBJAm1hUWwEJXYoNULX3k4bS34K3fbb03EoE7piuZwXG0zo2BMiCnjty/aa8Vze1WWe18MUuiQInWz7V79Km4h6v9f//9WvgF13pXMtLCyKhygK3SwAi1zXXey6bi+AuwAcKU9wXfcD13XnAtAjej4L4CnXdde5rrsewFMADk2g3RYpRVKELh87w3Ih6ulRi/ZBB2WeW1FRHIWO8dZbdJQZxeT1YkKX9mLNFpsX/DZAHCc3QtfXB5x3HrlA5gMml0v+Lj+FrqGBjtnWHSsFpKUWZ6HBm31RCZ3rKpfLbDYB+/sVifPr41Khy5fLJd/vyy4Dvv1tc7mOvj7KfsnKXXk5MG6c3Vy0sCg2ohC6CQCWif+Xb3ouCiK913Gc0xzHmeM4zpw1PCtalCTWrFGqkl9SFCYpOqGTi2c+FDq5IHZ304JUWQmMHp15bn8/GXa9vcVJPCKLiTPk9eJr1dZW3Fp5FhYSfsaszNCaDf7yF0qj/pOfZP8ZQTApdOxC5pflkv/Pp/JY7BjZzZXQLdtktehZm/3Q3q4Iv06sZs9WZM8P8nUToRsY8PazfLlc8mdMnEibMH6ErqlJfS9Amxt2c9HCoriIQugMDmmIakJGeq/ruje7rru767q7jzZZ1xYlgf5+Ur3GjVP/mxQ6dnHk16RCx6/lQ6GTgeas0DU1eUnTXnvRsauLFDo+t1AYM4aO//wnHcMUur4+mzLaIj3wI3QyQ2s2YEMzX+SJP19muWTDmuvQvfEGzU88N3Fb8jFXuS5w1FE0R5oSzBQKm6vLJXuaRCV0kpDpY2DWLGCGHqSiYckS9dhEjHp7vfP8X/8KXHcdPU7S5ZI3Cmtr6VhVlbm+9PZmruuW0FlYFB9RCN1yAFuK/ycCMIS8J/5eixJDSwsdwwgdw+RyOWIEPc6HkfT1r9OxtlYpdCNHKtJUVwd85zv0uKuLDDt+XCiw2sZFbcNi6AC7kFqkB/lS6FixNyn+SUBX6AYHldvdyJHeeYwN3HwSulWraAPKdb3GfqGxOSp0HMcMRHe5lKEGcr3gPiJfN2HxYvXYpNBdfrk36cirr5LyB+TH5ZIJnZ9CZwmdhUX6EIXQzQawreM4Ux3HqQRwLICHIn7+EwAOcRxn5KZkKIdses5iiOCuu4DDDiPDgxetE04A9t6b/PB54mcyIiEJSl8fGW1M6EwulzvtBJx2WvZtLSsDpk+n9nV2KoWOCV1trfp+SegKmRhFXzzDFDogs/CrhUWx4GdU5kro2PWwUIQOAD74gI7jxnnnr0IodPI6FjN74OZI6OT9zFWhk+SMN+lMYEK3zTZmQnfFFcCFF9JjJluMfLhcBhG63t7M9byhgchvsV2ELSw2Z4QSOtd1+wGcCSJi8wHc47ruPMdxLnUc5wgAcBznk47jLAdwDICbHMeZt+m96wBcBiKFswFcuuk5iyGAwUHguOOARx+lSZ4Xta22oto106ZFI3S9vWohCVLo3noL+L//y66tnIb8q1+lpCwbN2YqdIBKdCBdLgup0OmLp1y8dUL3uc/R6w8+WLj2WVgEIV8ul7zBU16e/WcEQU+KAihCN2aMV5HguUk/Jgk/UlBobI6ETroYRiV0vJk5fLg/GTeVImAsWQJMmEDuvX5KF9+LUaO8z+eD0NXV0TGOQgfYxCgWpYEVK+JlsC0VRNrvdF33MQCPac9dJB7PBrlTmt77BwB/yKGNFinFq6+qx729alGTYZC8o15ZSem9eeLn5/i9UQhdLpDxMGVlRO5WrCDVjxcv+f3FUuh6esiAXL0a2H9/aitDT4oyahRw8MHAE1bztkgJ5FipqKD+29OTO6ErlkI3ahT9DrkhpSdDyTehswpdYZELodtyS++940zKQPC1XLyYNkJdN9x1sbkZWLpU/c9xn4WKoevrMyt0ALW9sTH3dpQ6OjspDv6zny12SyxMGD+e4lrnzSt2S5JFpMLiFhYmyIKjUqGTO4i8k1dRAey4IzB5cuZrJkKXdJZLSehYkVu1yqvQua6X0LFCVyhCNzBARPPoo4Evfxn485+9r+uErq4OmDkT+PDD4mTitLDQ0d2txn91tRrjubpcMmnKN6Fj4xggQrfFFvTYpNBtDoRuc0yKIvup3MV//33glVfM71mzhubnMWPUNZs9mzxYGH7riOsCb79NHi3V1dEInYTj+JfYuece4O67gz9PIqrLpa7Qcaz3hg30e371K+Dxx6N/71DD6acDhx4KvPtusVti4Yd33il2C5KHJXQWWUPuCPb2kuvluHEqUyMQ7HJZXk5/ktDxTl/SRhIbRY2NXkVOxtDphK7QSVF44Zw0CbjvPtpFktAVzdpaSi/d30+KnoVFsdHdrfptdbXqs7W1uRE6NobzVaLDpNAtWaISPBVTobMul4UFK1JlZV6FbpttVBZkHWvX0kZGTY26dwcc4E124kfoli2jzdFdd6W+FxSHVllpTjRWW+u9Vz09wDXXAP/zP8Cxx/p/ng6Ty2VPD7DzzioGMMjlcsMGYOVK4Ac/AD7/eeC996J/91DCggV0LObYtQjHUCv5ZAmdRdaQC8jixcBjj1HSEhnnIhU6E3gHMI7LZTYESxI6GTOnK3S80zh9euEVOjYQTeRXPt/T4yV0gKqbZGFRTOiE7i9/ISN49Gjq39kuoKyU5KtER1cXKR1VVYrQdXaaCV1Uhe6JJ7I36NKWFCUoW3G+8a1vAbfcUrjv4/vKsdZRsGYN9fHqanXv9Bgdv3XkjTfoyIQuCFVVZu8VndBdfTVwzjn+nzN3rqp1KmFS6Pj8m2+mx35JUQAidJztGiBVMwwLFgD/+lf4efz5J59c3DERBVx+aagRhqEAeU8+HmI59y2hs8gKK1d6/fjfeYcGyqc/7T0vSKHj56O4XMpBmE3t+agKXVkZ8MwzwNNPF16hY2M1jNBt3EhtralRhC4og5qFRaHQ3U3G3fDhNH4++1ky1mprqc9mmwUv34SON0gcx2tUM6GThIaJqU7sJNasIZerr3wlu/akxeWS52W/OakQuPFG4JRTCvd93Meam6MnTlizxqvQmVzg/fou1zfcaadwQldZad5A0F0jJakyYeedgc98JvN5/r28mVlVpV6TNWKDFDoZisGlP4IwfTqwzz7RyM+NNwK33gr84hfh56YBltClD3INevvt4rUjH7CEziIrbLEF8JOfqP85KJwJGYOTevgZBBUV0VwuJcF7+OH47Y2i0DEOOIDcRoul0MlFVIIXUQ60lwqdJXQWaQC7Kjc3e41TveZkXBSK0AHedrP7uK7QyfnIZGCz0cDqS1zwnDNyZPEI3QcfABdcQI/LNiNLgftYU1NmzU8TXBeYP59cMlmh4wypEn7ryPLltHFQV+c/9zOqqsz9raLC+3y2MdWdnfRZpo1YSeiCFLo4hE6eG0Ut4e9NuyuwVejSC7mGDLVQlc1omrbIJ3hHUBbCBqK5EXKSD8Df5VIagmeeGd83P4pCp6PQWS7DrhW3kxXK2lraFa6sLH1C9+qrySfCsSg8urtpI6SpyUzo4sSbtbRQHM7KlWp+yNdYXL9eGaWy3Twf6QqdnI9Mv4kN6mzby+8bN654cTg33qgeb071xfje7rMPHb/5zeDzly6le7TLLkqhW7TIe05VlX9faG1VmSGzVeh4Y5SRLaHbuNG7PpoInSkpSraE7qmn1OPZs71ZQU0otNdMtuBrlWY8+mj6XVfzATlO0t6P4sISOotEwAqdLEsAqMUn1xg6fWc/ittlR4fKGNbWRhnyamszFTpewPTdtKDF4/DDgYsvDm9DHIS5XDY10fGjj+hYU0MLx/jxpe0L/uyzwB57AL/5TbFbYpErurtp3MycSWnYGdkodHPnUqa8l1/Ov0K3ejUwdiw9lkY1z2e6QhdG6HhzIldCN3Zs8YwueR3ykfglCopBJLmPHXYYcPzx5H7/s5/5n//mm3RkQtfVpeZoRhCha2tT616UGLqZMzOf1xW6qNdN30RbsUK5GQPmtcjkcslrqiR0o0eHE7olS9Tj446jNS5oc7IYtWFzQVoVug8+oP5dSFfmtECuIYUsSVUIWEJnkQj8CN2229LxzDPN7+MdR93lUl9odEMwaNe6rw+45BJaJDmFOu+COo6/QqfXjAlyuXzkEeDSS/3bkA3CFDrexeUFj13Ehg9PvwtKEDg4P5vYSItkIA2rXMCE7o9/9JbdyIbQ8cK7Zk3+FbpVq4IJnV62ICqhy5YISYWuWISOjfGmpuKp58Uw3KXr+9ZbE9m/8EL/8+fMoXVlxx2VyyUTuokTyWulutp/M0ISOrk2mVBTA/z2t8Cdd3qfDyN0fgRPLwS+bBnV0mOYYuhMSVHKy2kdYkI3bBi5oK5cSRszfrGI69fTZ22zjerzbEuYUGoKXVoJHdsQ+sbDUMZBBwG33WYVOguLUKxdS3EWTDIYY8bQpPa//2t+n59Cd9hh3vgTHoRnn03HIEL33HPAT39Ki1hfH/DWW8Cf/gRMmUKvS4WusZHa/N57ZIRKFKtsgV8cxbBhdH10QlddXdo7TfPn05FrflkUFn/+M6lp//hHbp8zOEhGa3W1KknCyIbQ8bmrVxdGoeN4uTCF7ktfAu64Q/1vIm25Klo854weHT0xR9JYuZJIyve+R3N4MWpdyrm3UHOc9JSYNCnzdWmk9/ZSko4DDiAyNmIEXadFi4gIL15Ma2PQHC0JnVybTDUXZ82ief+II7zPhxE6vzUsjNDpxG1ggH6/yeOmoUERuqYm2iB5+21KwPKd75i/v7WVvGSkKhiUUZV/V9w1ubc33J0zSTChS6urMseO6TUNhyr6+2l9O+kkq9BZWIRi7VraiYzrO64TOqnwSRc8Nu5Gj6ZjmEIn8cILFBvAqa95F7S+Xi0e226b6e6Sthg6gBZK3lUbKoSOC3yW8m8oZXCx5Fwzft16Kx2nT898LSmFLh+Err+f5q8ghU4a111dwA9/SI9ra4MVumzR3U0bZHV1+SOxYWDVkn97MVQ6OSeEZW5MCnJjzUTopJH+zDM0H//gB/Q/e1HMmwdMmEDrS2VldEInY9BffRU47zzvuQceSEf2HmHohE7vk34eHLKIeW8vkXg/QtfdHRxC0dBABHHdOiJpY8cq4rBwofn716/PJHRBcwR/f9y14otfVCELhQDbQWmNC+dN4VwJXVdXtEymxYYk81ahs7AQMLkRrF2b6W4ZBZLQ8cLH0BMRANEInXytvl4ZRJMnq+8cNowWkiCUldG5+uKRr53qsBg6wEvoeFEvdULH5S+G2uRaKuDxnGsg/y23UBzRMcdkvhaV0K1dC5xwAhmaJoUuH/28pYWuQZBC57fT7kfoTM8FuZLpYNdVrjtWDHVs5UoytItJ6OScUChCx/OwH6GT95bdYbfemo5M6ObPV/UY+bP0vtvZSe9btcpM6GbMAL77Xe97DjiAjvpYrayk+LfTT6fv0VVdP0InFbqPPqJx4Ody2dUVvOmoK3Sf+pR6bfvtzd+/fj1ds7iELu5a8fjj8c5PCmkndLpHVVwcdJD33qUVMlGPVegsLECT/WWXKRc5iba23AldXZ2XxJkIXWMjuXJFIXT770+faVqEhg+PtmNXV5eZtjqXSWBgwN9vnXczwwgdYygodL296vpaQlccJEXo2tuBqVPNKe65T3/pS8HuiNdeC9x+O2VYDFPo5s/P3U0UUDvMrNBJI9YvppdRVxdNoXvqKdqM+vvfo7VJEjog+3IP2cJ106HQFZPQVVaqsjAS8n5L8gcoQtfR4SV0pjn6/ffJJVO+TxK6ykrvWtDZqTY0dVRUUGKsm24C7r4705UyikK3bBkd/RS6rq5gha6+3kvoZA1Gv/5rUuiC5gd+bcGCaEXLdRQqpk2WeEgjmNDlajdELQhfbMi5wyp0FhagHduLLjIXJAUySxZEgSR0tbXhhK6qinYzoxC6rbbyphiXi1NdXbhCB9DCJHd3gNwSkPziF2Qk8ELO+MtfKMsXEFyLaKgROqlalHJil1JGUkYOkxATeOxJI9YEdodubVXG8sqVivRLQjdjBu0Q5wreSGGFTrpX8pwWl9Dpz3EmRJmmPQg6oSu022V7Oxk7aVLonnkGuPfe/H+nXGt010YgmNDJOqwTJqjHpqQocqPQpNA5jnct0NvywguqPIKetEffhJSKnRzvkvitWJHZbp3QRVHoNm6kDdOKCnK1ZFdME7J1uWxpoUQqcVGoPpx2l0sm70kRmrQmf2FIQqcrdK5LyVIKGWOZL1hCZxEZPCn7+UznqtDV1nqNqYoKmnAGB70LSRiha22lz2looAWgpyczScOYMd6Fyw/NzZk7w5J4xCVSvKP12mve500ZAU33iR6NAAAgAElEQVSQhG4ouFxKQlcqu2XLlinjZyghV4WOa9CZIPt0UJIPWc+Kx/zixcpg6O6m8XzCCbm1VUIndBJhZCaqQscGe1SjoauLriVft0ITOp7zRo1KD6G7/HKv6pMv6K7vg4OUVIsh77eeyIqVNiBcoZMbhSZCJ9tgwqc+pVw9JaEbGMgkUHLMScIkFTp+LElpHIWOCZ2cB7bZhjZewgidVB6jELpsUSilO+2EbuVKOia15qZViWT4KXQvv0zxryedBJx/fsGblTgsobP4L9au9WZv0xGWPjsbQscFUU0K3cAAPXfKKfEIHQeZV1UphU5fGP/6V3LvCkNTUzChi1v0l2MyPvzQ+7z8jqBFXKqKQ0Ghk6UKSoXQfe1r/lnbShFJuVx2dYUrdEAwqWHDuK1NGdbcL8rK6LmnniK3TEauu8PcniAX7CBCNziYGeOmGzg8VqOWICi2yyXPJzU1itDJ3/TOO8ANN+S/HcUuWwDQuDj+eODmm+l/2Rf8XC6BTEL38svAueeq5+Sc70foTJkuTZDr5uBgpkJ3wAFqM1GuX5Jo8WPZBj2GLiqhk/NAfX1me55+Grj/fpXl0uSNY0KpEDpGWokOz3lJecUUK3FTVMixJn/zwoXA9dfT41IoBh8GS+gs/osvfYkWLr8i1SZjRA6CbF0u584FHnwwk9A98wwd//Sn7Agdk0UToZs61bwjryNMoYtL6Hih0+MQ5W7t5hBDN3s2qVylqNCtXJnphlvKSIrQBSl00jANInTcjzdsyDQSxo2j53QjN1djguc1qUzo8Jvb2EVUN9yk0S/r1mVL6AptMPF9qK42K3S77061RfPtalWMec0vORWvTdm4XPLrV12lnpNziMy8LMFjcq+9gtusb4TqBAoAXnyRjnKelQodEzpZOiEbl0t9Y6e+PlOhO/hg4Mtfpv4zciSwzz7qtaFA6NKs0A0Oqjk4qTU37faHHGvsYabXfIwSgpN2WEJn8V/85z909Js0TcaIdGPM1uWSoRM6jjs54IDsCF1lJS0YnEEzGzQ3B8fQxSV0vNDOm+d9XhKboBi6qVPVY75WXMy2VNDaSvWUxo8nMg+QclkqMXTt7YXf6S0EciF0rhus0MlxEkSG2TBoa8u8xmPH0uu6OmAyXh9+OHq8WmsrEdGgcfeNbwDXXJP5fBRCt3Gj+l2lQugkUeHrLX8Tzzf5blcx5jWTiz4QTOj4NRl3pyt0OuRGIRNj08bBokXAk08GtznM5RJQBFNeU12hq6kx14/k94UpdKwO+hG655/3emUAZEg3NFCiEyB5Qic3HQqlmKWZ0LW3K4+CXMaXvK6lpNBxuIS+gefnFlxKsITO4r/gDu03yE3GiNwtz4bQyfdL9x79nChJUe6/H9hhB9p9YkIH0AITZKwFoamJvuvSS9UElguhM8Uhtrd7VYsg8jlzZuZzrNClPTCZIY2Tu+6i44QJqt8NDFANprRiqBG6JBQ6Tq3vR+j224/UfwD41reA++4zn8d9QCZFYbBCp/dzE6E74gjgkEOitb211esqZ8KwYarWmAQTOr0/SMMxF0LHc8HMmZS9sFAIU+gY99+f33hS01qUz2LNt9wCXHGFeR3yI3RVVd6x09hI7sHSA8Q0Ltato8+89lqVaExXDQCKk5OqmQmSYHGcqQ7+TfKays2V9vbgGL4ohI5hInSDg5R5ev/9ve9jrxP+rihZLgF/bwAd8vdal0vvPc+F0Ml5N+0KnSR0HD/IhG76dOp7ltBZDEnEIXQS2UjW8jO7uswLRUeHV6EbNYoSGehxK6edRsrX/Pm0qPJnbdyYm0IHABdfbM4MJV1WooAnQWmU6AZpUFs5CF6iupqM3DQuHiY8+qgyUBYvpn5TX6+u60UXAXvsodS7NIF3oNO+IxkHTJByMZRlzJUJVVWUSWzYMPoeU606+TmrV2de47FjiVToSq5ciF0XWLIkXttNhO7TnwaOPdb7nKkcQz4VOl01POWUaO9NAlEJ3Ve/SmQ9XzCtRfkce3yNTd/hR+j0+ZrrqklSaJrTW1qAKVOAs85SfcvUx6JArpumskKAWkPlNZ09W20umgidHkPHY89EPOV7TYSO3/vOO973bbEFHaPUquzro+8+77zo5EyOOetyqTaPR4zIjdDJTeh8jskPPwR++9vcPqOlRSnPOqHbc09gu+3i23JpRKTpw3GcQx3HeddxnEWO45xreL3KcZy7N73+b8dxpmx6vsJxnNscx3nLcZz5juOcl2zzLfKBOIROFg8N2+U2QcbrrV1rXtA6O72EbocdqI16HRp2cTEpdLkSOkCpcUkodKbAekZQW007x7x4pm2X7Ic/BK6+OvP5ZcuoADUv5KNHk7st97uHH6Yj18pJEzhb3FBU6HLZEOB756fQAWTkmNz2TJ+zZg2NC6l8cJ04fczJDZFbb6VyJXFgInTPPw/ceWf4e/0Infx/zRqv8hilSHhXFxnT0qAupEsyzyVVVeFZLhcuzF87+LpJN8Bijb0ghU6isVHNbQx57zZsIIWa67Ul2TaAkq+YoBO6/fajTbNx42jtjaLQ8VgzKYaS5OmErr/fv44glyyISugqKmizY2AgGmGS84UldIqIjR+f25wilb582h6HHUZJyDgbcTZYvVoVt9ddLhsbVQ3FUkcooXMcpxzADQA+B2AGgOMcx5mhnfYNAOtd190GwHUAOPT3GABVruvuCOATAL7JZK/UsXQp8O67xW5FcpBuTH6DUyd0d95J2SIZ2RA66a6j+9YzdIVul13oMcfYMWTMQlIul1J1NGWGiivT84JoSn3NCCOfhx3mVepYFUkbobv2WuDHP858nt3Jpkyh/2fOpN/A15Un7qVLC9JMX/znP5mkku+3JXRehCl0Jrzyiv/nDAyQkckkDlCGn24YSkL30EP+r/khisulH6IodG+9pX4Xx/SGobOTPlvOBVGIYFLgTaYwhS7fYPIh70++xl7Y9Y1K6M47jzw6JCSpOOMMUqifesq7YZgLJKF77z3zeqcTuoMPVq+1tOSP0PG5fsp5toRO/pYgFFOhS6PXDBMxGeaQDQql0PH9y8W+kYSOFTrulyNGqIQ+pY4oCt0sAItc113sum4vgLsAHKmdcySA2zY9vg/AgY7jOABcAHWO4wwDUAOgF8AQuGzAOecAJ55Y7FYkBy5QCgQrdDKQdK+9vAtANkaR/C6ZGERCV+hmzKAdW07iwhg1Sj3eZptkXC7lwmQidEE1tUzgBTFIoQuLZXr4Ye/9SqtC5wc2grjd++xDC3RXF/0GJvYffFC0JgIgo+uSS7zPMaEbSi6XjFyMnSgKnY733vP/HIA2e+rqVNIELvkh+z6g7sm8eZlzgl/NTIl8EDr5/9y53rEZhWRyceZsN6Jyhcnl0s84zdZNMGo7KitVRl8gf2MvzFU3KqE7/HD6k5CETn6PXLNygR6qsOOO6jGPAb0MyCGHkLcLP5croZP3SFfoAEpuZgJ/B/+GMEI3bFi8Ne/119Vjq9B5FbpcCJ201/Jpe3C/yDbGjTcHt9uO/mcRge/R5kboJgBYJv5fvuk54zmu6/YDaAPQDCJ3HQBWAFgK4BrXdTNynDmOc5rjOHMcx5mzxk+mSRk2bBgaHQCgXeNp09T/QYRu223V//pufDZG0ZNPktG8ww7kLsXP7bqrOkdX6KqrqR16rIBcaL/4RbVQdHRkT+j22w+48kp6/ItfkKqQC6EzuVzmusiUKqHjncI991Qul8uXq53yYhO6desyFxGr0JkhCUBUmFy4ZR9euZL6yZgxNLdMnkzP654RbGTusENmn+Hd2LB2ZEvoeIPLT6Hbdlt/Qtfd7U/uNm4kQ7jYhC6Ky2U+CR0XWJfXIV9jL8wjwERsTYTOBEnopNv8xImZ5/7f/3kLmUeBTuimTgX++EdK+KAn7uH1vb6e1jSA7reJ0FVUAPfcQ5vXnZ3ZKXRRVftsFbr+fv+5q72dYhQZltB5Fbr+fvp75ZVoG00Sixerx2kmdC0ttMaNH0/zNfcBtjM2N0Jn0gv0fHp+58wCMABgPICpAM52HCcjwsF13Ztd193ddd3dR48eHaFJxQfXNxsKaG31ulz6Ebr1682FrRnZGEW77UZJMN56C/jf/6XnDj5YZQhrbs5U6AAagLrrEv9fXw9suaV3IsjWMHIc4PTT6fFLL5EqKb8n7iQYpNDdfjsFqceFH6E7+mjghBPif14SkO5L+qLW00Nt/v3vqRbRrFnK5VK6cejF1wsN2e8YQ5HQsTFUCJfLc8+luNuKCjOh0xW6ykqKsQwidEELfRihc93sCV1ZmYqB8lPodt0VePtt7+/iTaDddjOnqe/vp+tZTIVOulyayhZIFILQSYKQL4UubEybrkNvb7R79PnPZ34OYCZ0p5yiMsJGhU7ompuJhL3zTiZR4r5YU+MlRiZCB5CnwrRpSukA4hG6qMnSystpvb3ookyFk9HfT79Vrnk77eS/YcvlT046if4vtAtkGgnd+vV0/XjuWreO7Bq5iR4FMnY2nx4ruRI6DuEYM0b95ooK1Rc2N0K3HMCW4v+JAPTS0/89Z5N75QgA6wD8L4C/u67b57ruagAvAdg910anAX196fSPzgacvfFXv6KjH6FjNyBGEgqdH9hA3HJL5YoHeOv96JNIVxfwiU8oF5MkkqIAmeUYOjvptw8fHk+hc91ghW7CBCrYGxd+hO6vfyWSWAzI67JokTdjZXc33b8996TkALzj2t2tCN122xU3hm5w0FtIl1EMl8tFiyhJR77AvzHqfLb//pkxQlFdLq+8EnjhBZorTAXGu7vVeOPEIN/6FhWxbm6mTaRly7zv4U2SiRMzDdIwQtfZSWMxm7nrV78yu+EBanxvvTW1Twb0c3vZw0CP3ZIqiD5vFao0SdQsl0BmvbYkwbUNC6HQhfX/qC6XJlx8MXDzzfRY3u8ttzSfHxcmQscYNoyIEl83nptra9V4DSJ0gIplXbSI7rdpPfUjdAcfnDlf6DXAAGojf+4jj5jbYVLo/LJ6Amrzlb+vUBtxSXg95AsbNtAcy9eQXYAXLYqXIXjhQtXP8qnQcZ9IgtDx5kJlpeoLNTVqc76Qccr5QBRCNxvAto7jTHUcpxLAsQC00HM8BIAjyo4G8Izrui7IzfIzDqEOwJ4AFiTT9OJiKCl0bDiz26UfoevtpcmUd2T1hTybOnR+4AmCdzDb2rzFXrn2mkRXFxlmPFHxRNDfnxuh03egOztpMayri0foenpol3PYMGoTT/qyiG82kIRucJAUxTfeyO6zkoLc7dpxR2DnndX/JiOI7xkb4BMnFrfQOPetNCh0J55IJMqURCQJxCF0AwNELi+9lO7rJz9JZCtuUpSRI/1dLicIh/6qKuC444BTTyWDj1U6CSZAPT2URl/CL7Me4+ST6WgyMIOwejVlXvMjdPw/G+zSUNLnDJ5/XRe44QZFWE0KXVwX72xhcrn8n/8hN6uPPvKem0+FjjcRC6HQ8T37wx/MdTD5Xn/96yqO01S2wITyctUXZHbAfBE6mT2TiRKP85UrqT1NTWq8dnZS3/IjdJy4ZNEiuh+mOG+/GDrHAfbd13tuczON5csv9z4fdi2Z0EkiGgR+nW0T07w9OJi8Ic+bH2lU6HRSLN3U2Y2yu9vbxzkO8cc/pvu2ciUROo7VvPRSchU+5ZTkN524b2eroJkUOikIVFWp/hHX4yptCJ2KN8XEnQngCQDzAdzjuu48x3EudRzniE2n3QKg2XGcRQDOAsClDW4AMBzA2yBieKvruimsLhUfvb3p3H3JBlEJXV8fTbjz5wN33JH5epI7tWxQ8M7g+vXeyd6P0EmDUi5ySbouffwxTQBxFTomA6wG8CKiu5PGhSR0a9cCN92UndKXJORuGi9qXOfMROjYGOAyFmPHZi6+a9YUboFkMqm3gReVwcH8FjiW4O+56ab8fL4kdI88Qgu5TCQgIdWxt98G5swhEhI3KYqfQtfV5c1Wq48JE6GTqmlVFfDaa8B11xERCTP+//EPOh56aLR2MzgyIEyh40QuixcrBUM3GubPJ3LY0EDk+He/o+dNhM4vcVQQTLGg69YBL77o/56eHrp+5eWK0K1bR4qj7iaYD0K3eDHNrawaFVKh++Qn6U8H3+uWFlUIPKpCB6g5Tt5Dk8tlNghS6AAvofv4YyqrUF6u1kveLPIjdFyGYeFC/yLnfgodkKmAjxxJROKCC/x/h4lk6WQkTBnieSlIodt/f29ugCTAc3YaCR27rZoIHV+vCy+ka7J8Odl6s2bRvHr11WQv3nYbucRzUp0FC6gO8C23AM89l2x7k1LoRo/2FrHneau6WhG6Une7jDQVu677mOu627muu7Xruj/b9NxFrus+tOlxt+u6x7iuu43rurNc11286fmNm56f6bruDNd1f5G/n1JY9PUNLYWuokIZH0GErqKC3OH0nfCkwQsAx9K1tnqNu6qqzMmcXSEZ8vxcFDodjz8OfOELtIDF2dHhc1n258k+SYWOv6PYrgOmyZf7FcfQSbAxwIRBJ3SXXUZ94Q9/SL6tJjCh0wmB/F2Fcrvkexl393DFCmDvvcPrhPF17u6m2JVttyXXZZOxZMpMye8FohO6IIWusVEZv/qY4PlAYv584NvfVuVJdtsN+P73zXOEjr4+4HvfMxPFKAgjdKzADA6qjIZ8H5m4zp9PhXP5+ddeo6PJ5TJMcWRs2KB+O6shEkcfTbGMfnM9u0UD5tqXEvkgdFtvTUZWayuRjEIqdDo5YsjneZ6KQ+h4bZKELh916IBgQvfRR0oF5+t67bV0nDrV/Pms0K1f70/oZBvCCJ2fi7Nct1asyFR74pYt0AldVxfwwAPez/3nP2kDIUllicd/Gjf9OVMor7myni9fL/YoeOIJug8DA97YZd50MvWXpNfoXGLoTj0VuP9+etzU5FXo/vAH4OyzgT322MwInUUmhpJCt2wZGR7l5TQRhxG6QuAvfwF+8xvlqmdS6EwxdNLtI1+ErqeHgtbjulwySeDJgyf9pBQ6joNIA/wIneuajSA2dhcupD7W2EjXhxd4rneoF5MHKD4vqqEbFaYC4s88462bWKgNHVay4qaY/vnPqcjwPfcEn8e/Qy/YbVrc/MjhFVfQMarLZZBCJwP29X6iJ1iorKSsuL/7HfUVeb5pjtDR3R2vdp6OqC6XgOrj3LfYMJbZ4gBVeiEXl8sRI8hQYejXmo0zvc4ig2tFAsUhdADdzzlzCqfQ8XwchdABwL33ZkfouP1HHhleoiYqohA6HgsffaQ2E2Tf//znKUmVCWPGqPvsR+gkTMXWJfzCM+S9PeAApQAxdJfLuArdTTcBRx0F/L//R//LpOp6zHZUde2996hkium9aVXohg1TfeSdd9RrfL1mbKo0/eSTauONr8/kyeqxrBPK0K9FrmCiHde26e+n+/z882QXspsxQONh8mTgmmvo+SOOoI0Wme29FGEJXZYYSgrd6tVq55vrgZngR+g++1mv8ZAExo8n9yPeRdIVukK7XB5xhPf/XXbJJHSrVtFuH0CLp75AcH/h35QPhc5khBdDrTO1QyYZ0X8ru7AtXEhGO7/O5zMZNqk6Bx9ME3OS0F0uV6wADjxQJQ6Sr+UbHHMTN/D8scfoGBanw79Dv7Y6wQP8Cd3bb9MxCYWupsa78Orvk9h/f+//sl+FKXQDA/Tb45Ra0BGk0JWXe2N6dYWO26aPFb4fJoUuTh+YGxDcwPO9nmCGIVV0ndDpal/ShE5XStKo0DkO8N3vZudyCVC//dvfsmpmaNuATOUvTKEDgJkz/T+/vFzN0VEInT6m9BhVv81LOacuXOglG0DuCh1v/L31Fh1lrDnPYQDFg1VUqBCAIEyblkk80+5yOWyYup/yd/P14nY/8ICaR9iekW7CJkInNz2feSYz5jYu/OZJPyxeTH1ZupLyBgL/Zr3+Y00NEdx8JngqBCyhyxK9vYWNo8knOjrUJJ0Nofv73/OXsIHJT0tLuLGmE7okFboHHyS3KP6sysrMGLoDDgA+/WkySM44gwwfeS15seKFvVAKXaESKUj4KXQyHboET7QffEBGMF8LPj+I0LW0mMlHLtAJnen3FMLlcnAwe4WOg9rDiGcchc6PADDiKnS68c4KHROOIIXu0EPJZUYiTMWX8OuLcWAidPPnk3rD2QU5/oiNbB6PYYaKKflEUtnk+Pr6ZZL1c7l88MFMQpe0EaT/xnwrdB9/TLGUcQjdSSdRYoi1a+MrdEBmyZ9cEdXlsqODxrmJ0PnFzzG4H2dD6PT2+V1jEwGSz2WbFIUJHR/Z7ZWJHUAJPZjwsdtgtqVz0uxyyTF0vOa2tSlixteL2+26wFNP0WOe+2XSKlNJitWrlV184IG5b/bzPB1VoZs3jzYC2HUdUH37xBOBP/+ZXEmHIiyhyxJJ1G5KCzhrI5AdocsnuF1r1mQGXUtjzXXzH0PHbiO826PH0HH65LY24Omn6bF0p5JpcoHkFDpuV2uredLLNXNTR4d/7JQfwgidn0IH0CKh107yI3R9fUppSRKS0J1xhtkVQ//Orq7kyXNbmyI9cQidJErZEjoTSV6zJngOiJMUpb8/M5MpK3TbbUf/6+NWjsHHH6dNlFmz1OtxFLpsiqGfcQbFnzH4WshrPGMGjRd+jRXS2lr6C1PoGCbDOcomQpRNRt6h9iPo0uVS3u8jjvCO1STwpz/R/fbr5/lW6PbdFzjooHBCJ4ntTjvRsbU1O0In17IkoLdZN7SrqqiPslrCRrn8TWGEjol8NoROR5gd8fjjwC9/SY/lnKordGGZkHVCx94q7Gq5apWKu125khQlwJvlNSrknJtmhY5j6Bob1f3ne8vXq7fXW/sTUJs/ktCZYiEHB2n+ky6+APDQQ5Q8JS74XkQldDyOZcka7tsjR1K947C+XqqwhC5LxK3dlGZ0dKgFJm2EjtvV05OZFlkaa319NJHIc2RbkyB0vEjy4qC7XPJ3t7So3UxJhPKl0NXU0HvXr88PoTv2WCI0cfo6t0O6/sh6gvpCOWKEWlwaGzNdLvk664RO31FMCpLQ/f735nN0ovTtb3uN/SQgY5/iqDPy3DADOA6hW7vWnAGQEXV+4LEkf5/rKoVu++3pOf1+8/v4vldUADfeqF6PE0MXt9QCQLF6996r/uexbJozuT9zam9W9TdupD/+fj912WR0ROkDcTYV4rhc8rXdaivvubluppx4IrnX8Rg2Ebp8KnScAIL7YhSFjgkdkJ3LZT4Vuvr6TNWUFTqd0EmEGbmc2VPGnfkhjNCFrXXDhyviKNcvndDJ+cO0kcF9iYkHr0uc+bClhdTM++6j//VkWHHsHTnuSiGGznHUxs7229P/ktBVVnpdKk0KnV+5lxkziJRLHHkkMH16/PYmSeiGOiyhyxK8qAyFOLoohI7dS4tF6PTHVVXUnl/+kjJwcQyVn0KXRNkCnhR4cairowWAd/349ZYWlRVMZobKVwyd4xBxyheh4zTEYUWaJTZsoNgauZsfpNDJxUV3uezvV9dOT+4gF6Ak4ZflUkJ/beFC/yQT2YLj5xob4yl08p7n4nL5xBOUKIH76po1ZMy2tVESBR1RkzzwWDG1s6ZGETq90C0TOkng5ZiPo9DFLbVggsnoZPC8wImdVq6ksf/MM/T7+ffqCh0b4yYVJwqhk23xy9zHfdfPpUy6XPK95/H5059S3Sn9s3KFzLYqke86dBwDyEa+XxIYufYxSQeib8TJGq75UujGjzcrvkzoOCbMROjC6sgecggd33wzvD1ha1mYHRFG6Lg/yJp+pk09vQ4dfxaT0rVrqV8zwdbn/TghNdx/5PvSSuj4+vN1nD7dmxCPS1SxHQOotTeI0Mn4OpkcJZcNV92T4dVXSe3zA3/XqlXquSRrJKcZltBlCe40Q43Q+WW55N+bZLbIKJDuHabCpWefTQOX69nk0+WSv3+33ejI14yvF7dVZlyMo9DlQpZHjjTXmwJyz3zJBDYOWeH6UdJwCYqhA5SxM2OGV6GT/bEYCp0ONvj011pbk58P2GAZPz4eoZO7xVEVOt2QXriQ4tTuv5/IyMAA9e3Ro2mBlLu3w4apmL0o0A0oQJG3ceMUodMzQJpiNvwIXVSFLl+Ejj+fCd2779L5emIZ3QDfemtqk4lYRCEzugFsAn/O88+bFRfpcjl+PLm23nkn/V9bC/zkJ95zk0j5zv2w0Aod90U2yKModKNGKSMx6kac46i+mi+Fzi9Bja7QyVqPjDAVY9o0cle7/fbw9pg2dv79b+Dii+kxr6F+CCN0FRW08SHX2iBCV11N79EJHSt0epIVHrtxCNkrr6ixneYYOna5BNT6sv323s383l66XqakJ7Lv6GNl662938OQxDsu+F7wWrHHHqT2+cEqdBax4LpDJ4bOdcn4C4uhC4svyBfkQAwqXMrwy3KZBKH7xCcoff7119P/+oIjFTomUTKLFl9DndD19FD7cklhLRU63YUsV4WOjeiwhBgSfoQuKDaBd48/+1mvQifLPbS2eo3HIOKVC0xlCxisVOivrV+fP0I3YUI8l8tsFDrGnDl05GB4gH7bunV07Vl1/d73gCuvpMdbbuldzMNgInT8vbvvTr93q62odImEidDJuSCqQnfjjSozXS5lCyorybA0uTnydeXsgZ/9bGb8UUVFJqGbNs0/TimuQudHALneX08P8Mc/ml+XMXTPPEN160xw3WSUiCBCl0+FTid0fgqddGN0HBXnGcezIl+EjudLvzWEyxZ89BFdT5OBG2b0Og4llDAp81Ewaxapu2+8QUllghBG6ABaW6QqZprnurro/pSV0TXgdbm/n/ptSwvN53pMHvexOPbdV79KfaKjI/0KHfdxnnuY0PH8wgpdGKHTId2x5b3JpayQTujCYAmdRSzIQVrqCh0nlZAK3auvAjffnHkeUHhCV1WlJh+5CPoton516JJwuQSohnCXSlMAACAASURBVA1P/nzN2JgzEbo331Spl00K3fHHA1ddlTvhlApdfb3XEE6K0MVV6Boa/BU60/1gorT77l6FjifyCROoH0qDL98ul729mQYeZ5DTDcvW1uSNTXZzyUWhi0voGhtpHpg9Wz23bp3a1eb7tPPOwI9+BHznO8DDD0dvG2AmdK+9RmNr+nQywN5/nxQBiaQUuquu8p6XLRxHxcW99ZY5zXltLbl2XX11puvPmDFksEiycNpplBLfhKQIXU8PlV7ZdltzhuI46fj92jV3LiVRiWqIpUGh49iiKOCYsjjFiKdMoWO+XC6DCB0rdCZ3SyAZo5dJbhB22SX8Gg8frq6RH6FrbPSmxNfJ1w9+QGOO5wd9je3poXHZ3KxUP53QZUPINm5MfwwdX8NJk+i41VZe7yxW6PSkJzI7JkOWu2hsVDHGktBxVtG49iPXrQUsoYsCS+iygFxQSl2h48WIJ88TT6Tjyy97zysWoWODCYim0En1Jl+FxRnsP85GNy8cLS00qe+/PxkIt95Kz5sI3R130ONcCadU6OrrqYYfx8fkSuj4nschdBs2xHe5nD2bNhPKy71ZLpmc8M6gdLvMt8ul62beGyZ0+jzQ0ZFfl0sulRIF2bhcMmprqW/LsSQJnVzQy8qAX/86uIaVCSZC9/rrRBKDClmb1LRsYuhkvGAuhA5QhG6nnfyN5eZm+l16TUDeAZexOocd5nVpPOkk4Jxz6FrHdbn02wRgwrbzzqqQuYRU6KLAdJ3PPpuIPmf8DUO+FLrVq4ONQe6Lq1bFW98+9zk6xlEf2NUwCRdViTgul359NIk4o9mz/UthxEFtbbhC19jo3UCR81hnp6oZynOCbgN0dNC8xvN5TY3qJ1FdLk33sbtbjec02ofS5fKFF4BHHqFro7tcVlZmzgG1tZnk6O23Kfsvv77ffvRYunLzGImSIVW2c9w4tebpY5iv7U9+ouwo+bwklElt6KcdltBlATlxlLJCN2eOMs7Y8D7qKNq11RfNYhE6QE0gphg6gDLP8YQik2Yk7XKpgwkGLyo8iV92GaXn3Wor4AtfoJgDmdhDd7kEci/+zQodEynAnHgiGzA5yCaGTt6zMIVuyhSVQZHv15IlZOAC6nrLe5xvhQ7I7PMml0smmfkgdHV1avMgqttlLi6X8vuYgKxfr3ZZk0hbbyJ0q1apHeMgNDQA3/+++l/en6gKnSR0ubhcAorQRYH++7geXBBuvZWUhrAkLwzZFlPdRkARtp13JiVUb39nZzih4yQZgPk6s9Eo60HpMHm76ISuri43hW7sWH93UcCbMTHO+rbfflSAmt2Oo4AJnSx6nAT4WhdboWtoyNy0yAZlZeGEbuRI71ogyZOsM8beMroNsHIlrbs8n9fWZhbWDiNkpn7Pibzk56QJ0uVy8mSyUQAvoWOXS52A1daaNw14Pq+rU/MGEyoZ6ximTLe3K3vjqafUZ1RW0pwkCXR7O3kBXH45eTox+J4NhRrRcWEJXRaQgzyNOzBRIVU4PYOkbjgUk9DxZOHncrnrrmTwnHMOcMIJ6nm5uOVjh4YzOjHR0Q2N+npSPFetAl58MZPQycUo1/plTU00wa1fr3ZaeTLONSkKt83kTuYHJnRy8g+LoZPg1y++WCmNJoWOCUG+FDog0xAwuVxym5Jwufz4Y5WKf906urdRC+kyghS6I49UCtDAQObCJwnOnnuqdpgUumxhInTcZ8LQ1gZcd53632+cBxEgaRgkodBFSeUOZBbl1gndoYf6v5dLtfznP8FuftIAlskIeNPo2mvJEKqqUun3ZYHlvj4ydv0Mf8YTT1ANOcB8nVmpefFF/8+Qc6BO6Ni91nFIAeb5NsoYGxwEfvtb1b9ef93/XLmmxVnfHIeKUZvijPxwwgm0JvzoR9HfEwd+Ch3XoVu71n8TIW1uaVEUOgm5BkjVlPuAPo/zus3zeW1tpgoURsi43192GXDXXeq5UomhkzAlRTn6aHKp56y2cm2Q/UXaaHwOz4mS0IUpdA0NdD+uucabfKepia6pnGfa22lDhcHzjckWSFoRTysCnFss/DBUFDrTgAQya7wBxSV0pjTe0ggbP54miquv9v8MWXw4KYwdS21jP37d0KivV4ki1q7NLCzOsXVA7v1IJi7hlNqcCSxOMg0TeEH1q5dlAsfQ6YQuaokGXnzlfeadVPl7CqHQ6TWdTC6XbJj29tLikUuCmy9/meKavvAFRei4z8RV6NiQk3joIfq77LLMxU+PJTnzTMpyuW6dutZ8H3KBidBt2JC725eu0Jmul76450ro6ur80//rCFLoLrgAuOQS//dWV1Mm0V12ITdMduXW4Ufoenvpfv/wh/R/VZUiSZKQLl1KxlOUJDd87fTr3NurMvzKxFA6pOGtZ1t98EFy15w5k8bTsmU050YZ6088QYYoJ9oJgpy3g9x9k0BNjTkJTa5gsh6k0HV10Z8fcSt0BmsTHntMbS6w8S83L6S7oE7odJdLHfrv4yRfJpdLRhgh4zlx9Ghvzdy0Z7k02XE1NWod6+ujsV1VRS71XKeP5+0lS7zkjH97bS3dH0niysqUd4ffhoNETw9tzks0NdEmk4yL43hUnueffZY2SzZnQmcVuiwwVBQ6afzoZEknJ0mk1s8WPAn4uVxGcVvSi+EmgfJy8vFmQtfbS4oGG0L19V5DXFfoFixIri1cwHv5cm9tmKhuWkHwK+odBHb9lBP4pZcCP/85PQ4zorlvynvOC28hk6IAmdcvyOUSyH1Xlhe/7m5aYEeOzEyrHQa+Z01N0VL3M/h6//WvZEx/+tMqPnPNGupbSRh+PN/wdR4cJCKSq0qgK3Sm366n0E5CoYvqQhdE6IYPz9w8kKiqAv7+d3q8YoV6fs4c4JhjVL+Tirz8rT09XgJVXW0mZO+/T8cohI6vt36dly6l9sycSX3Hb+7gvg5kKnTbbUcEV1dgoyh03K6XXlLP+Y0d+dvD1rcpU5R7f5rA5Oagg8yvV1YqYz1OHFOh8bnPKfWS55mLLiK1lbOLR1HoTPfaT6EzuVwyoip0TH6A0nK5lGCF7je/oU0Yeb2kignQOJAbe7oXlZxTe3vVvJOtBwvfa5kEh7H33nTkucRkkyedVTatsIQuCwwVhU5OXjqh81PoirGLZyrGqu/E++HCC4FHH81PuwByS5IulxMnUgwi4CV0XV2ZhE4G7eYKVugGB70LnZ9KEQdMDqIqdK5rdrkEVFHaqAqdNHCZtJoUOn0Sz3UhlS6LenyRyeVSuo7l6nbJi213d/Yul9zmkSOD5yjd1Zf75qxZwIEH0uOmJuVymYQ6B9D9dxxF6Li9+VDo9N1ZSYaAZGLoosbAysK7gJfQhY0JOc9Jo/zoo2kHfelS+q2yoO4pp6jHOqGrqgomdFE2wfwUOu6n7NK5ZIm5H0qipxM6033hWLAw8LwjayPqNQ0ZcryGEbolSyhmO20YM4YU0V//2vw6ly0AMgndlVf6E8G04I47lBsjrw1JK3Qml8uwDXvZV+VYKFWXy7Y2yrDb2ZlZdxHwJ0Yyho4/izE4qGykbNdGfp8p7IMzq/qFX3z725SgaXOAJXRZQHaYQw4hN4FShJy8ohK6NCp0Qbjssuzr5kTBxIlel8vKSkWu5CTf1UXXsKxMGW65FNvUwWQHyI7Q9fSYie/goDL629ujBRrzgmYidIyoMXRSbTAROlMdukceoX769tvhbfWDHBvc9zlWJkyh0w3O66+nZENRwWOsvT17l8uODvqc4cO9i6hOPGSNP8C8YHPCnTVrkomfA4jMSQOK3aqSJnSmGmm60ZaEQhcVlZXk7sqQhC5ss0y2U7pIynqWDzyQWbuPYSJ0fL1kv1q8mJ4Pqjelt0nvl9znpk+n4+WX02cyWWRId7oohC6qQmcaJ36F7+ModGnGttv6t1/2Lb2/nnuut+ZkGrH99pn9Ikih6+zMnMv0hBy6Qscul3KNG6oKnZ/LpcxQKvtMGKGTLpdA5pzK7tdB61fQtQoidFOmUFvZTtEJ3VVX5b5pVyqwhC4L6AbbnXcWpx25Qu72pzmGzqTQ5WqEJYVx45Rfd28vTepM6DZsUBPJ9deTUVNZqXbHkiR0sj6XfBzV5fLMMymb5Ny53ue5j7BxF6XmEpOwhgb/mI6oCp38viCXSzmJ/7//R8cLLghvqx90o//kkynF8847K9UhKqH7/vfJ0I7qns39QxK6bBQ6zg4o26O/nxdBdtM1LdhNTUQEkiR0/F38/Xyfk3a5BPzVI0YhCR0AHH64Up6zVeikqyIbQhs2eLP76QhS6CRJWrqUkrdEiXcxJa6Qn7f99nS8/346yrhhwKv6S0JXXm5ea6IqdCbSx8ZgRwfVWWXlNo5CV6oIInSlANf1kicgsyalrtDV1AA33UQbfEAmoVu2jOZauZnV2en1WogaQyc3b02bgWmCjEOU0EmPHAu6y6WOIJdLQMUYB9kiJtvi5JOBb3yDNh0As8vlllt6Nwf1a765lCwALKHLCvqCEtUguOYaVRslDfBT6NKW5dKk0KVlkNbWqmvFtVs4KQkXaAaUu49O6PRdxmwRpNBF2dFmQ1BXcIJqwPlBGud+RmHYmDERuiCXSzkmmZS++mp4W/3ABgGjooJcO958E9hiC3oursvlkiXRvpv7x5o19FlxFboVK8hgravLNIDlmHdd9T/3GdNO5rRpwPz5ZBAnTej4++UmQC4wuWLvuKNSioBMQhcUtxYFcu4cNy7ae7h/NzWp74+6yQF4FTqemzdsIBVqyhSVcU+ip8e7ieQXQ7d2bXTXWr5fujHGfW70aO9n8W9cu5bGp59C5zc/mJL8mGAag+yK+v3vA9/8JiVRAIaOQheEUiV0v/89HVtbMxU6GSsOZMbQ1dYCp52m0vLz7+b3L19O4483HTmGLBtCJxU6OcdmE5LzyCP+7sFJIMjlUkKv49vQ4K907bsvka9ddzV/FqOnh+KNZ8/OfM1E6MaOpQ1adlU3EboxY2gONhG68vL8JzpKEyyhywL6DkBUQnfOOcAPfpB8e7KFnHh0hS5NdeiYFJiMtWJn5uJr5bqK0J12GvC3vwFf+1rmTrNO6PRFKVtIEmdyufznP4F//MP//RxPoC9AvPPOKcyjxNGxcV5fDxx8cObrU6eG9yO+13KS5wU5KIbu448VcYqqZpnQ2endAZb9jO9fFIVOuoy++2607+bP513NuElRrrtO1ZzSXdSksSJ3o5mIyI0Bxr770jVfuzZ/hC4fLpf8+MMPvQmIcukXJnC/rK1VO9lh4P5UU6N+c9hcxn28upqUNt584efb2qikwUEHUSZMHb/+NSnxDD+Xy5aW6L/Dj9DJbLYybpDHxje+AeyxB20U6K91dfkbhDIWTOJf//Ju4JjO4Zhldvvk67c5EDq/+Mu045vfpHp/ra2ZCh3PHVwKRN+40tUk/t285ra3Zyb26Oz0rnFhChu3SSp0co6VG31RcfjhwIwZ8d8XFX6ETlcw9fno1FOpbSY0NQG33KI+g6+F9NCZPJmu59Sp5qzjJttCj8mbNy/znK228np7yHuWFk+uQiESoXMc51DHcd51HGeR4zjnGl6vchzn7k2v/9txnCnitZ0cx3nZcZx5juO85ThOyV/ibBW6tIEnxL339hpSaXO55B1sqR7xZJPPiS8KqqqoXf39ZERUVREBPfJIRUSlcSIJXUeHuu5hNZ/CUFGh3NVMhO7CC5XbQhD0+86TJLcvikInCd0Xv0iL2q9/rbLmRcmepy8mZ51lVhP0GDpOeDF9em7JYDo7vddRtsdxMpUCUz0tQMUO6I+DwP2DY35Gj47ncsmGzH77BSt0bW3q/wsuoMQaXP9OYt991eN8K3S5ulxKQ8VvXk6a0PEu/uTJajxPnqxqN5nA82h5uXpPmELH12raNJpzeCzy98+ZQ2Rsp52Uiixx883e/6uqVIrxfBI62Ra+z6ww3nabek2WLfAjdCaFbs4cYJ99vMXDdUK3zTaK0PEmVXU1XTsZMzVUd/PluC0lQgfQPCwJHfeNgw6iteSKK+j/sBg6ndAB3n7O85FMKhTH5ZLHL6+ZDQ00x8ZR6fjzkqhn6ge/sgXsHs3Qz7nmGsqmGwU898rNnLB1nwndY4+pe6XH5s2dq5KgABTuwoTOpNClxZOrUAgldI7jlAO4AcDnAMwAcJzjOLoZ/Q0A613X3QbAdQCu2vTeYQDuAHC667ozAewPIIVexfGQrUJXKLguydRhakpnJxnqL73kdY2ThK6nBzjgAFKcgOIQOnZh1CfiP/yh+AlpJMlghU6H7ronjYaGBlI+5E51tmBFSRIRdp9dtcqfjMnnwwhdHIWOjb3GRqoJddZZ9H+UZAvDhqndvXHjqBjysGH0F1S2gGOEJkygvhs1+6COjg5/Qsf/mwqLA97nZf+Mq9BxUpfJk+MpdGyEPP20t50DA97d47Y2r0L35S+r2j4SY8YAn/iEOi8p5EOh8ysyLpE0oeNrc8styij6059U8XYTHnuMaiY1N0cndHyv2KBhUsT3+6qr6Jp+6Uv0mWF9n+cu3SOD4zajoKqK5jSpRANqPFZWegmdKZsp/+5sFbp77lHv52uinzNhQiah6+3NPG+oKnR6eYxSwsiRXpdL7rdjxtCmF48/ndDpfchE6KRCxy6XstZZNklRdK8HGbfqh/Z2ysa4cGH4ubmAk0SZNi523937fy7eT3zt5UZ1GKHjNWDUKH9CB9CGFoMTldXVeRU6fn/abPN8I4pCNwvAItd1F7uu2wvgLgBHauccCYD32u4DcKDjOA6AQwDMdV33PwDgum6L67oR8uSlG/qOCw+Od96JlgUw33jtNZLHTz01+DzTLhbgJXTLlwPPPad2d4ux4F1/PWXh2mEH7/Nf/7p5J7qQkO4f/f3hhM51Mwldc3PuygSgjDBTDN2aNf5kTBZF1gkdK0+80xY3hk6CDdHDDgv/DMcxFxfX1WNe5Pv76dpKQgdkp9JxbJmfyyX/ryt0vCnCz69ZQ3W0jjqK6nEFLez9/ep1nkOY0E2ZonaSZeyUH3p6qM8NG6YUjdWr6X+Z1nzDBkWowur0vPQSGc5Rd2ijwETokhgHjDCFLily+oUvUD/bay+6vjfd5FWLTPjkJ6nAdFlZdJdLvlY8jtauVbW5GCefrMaqnpBIj9fluVyOqe5u+p6oCp3jUPujKnRMpmTJFj1rbNwYOhkX+MIL3u9njBlD37lggXK57O5W5/E9sIQufWhspPlVd7lk8D3Tkz/5KXRy40RX6Pr6VPZLILuyBUwsuN/LOXvuXLP6dsMNVA4jSNVPArzJYyJ0OuHKZSxwwiEZuyyVNSDTVmb7pKFBETk/QvepT9HaytAVOlakrUKXiQkAlon/l296zniO67r9ANoANAPYDoDrOM4TjuO87jjOj0xf4DjOaY7jzHEcZ86aKBZLkaEvKD095E41cyYVwTQhnxK6DknG/LBsGbXZj9ANDJCRqS/UxYhZq65Ob50cnsR5h9o0gehFNnVClxRMCl11tcqW6EfoZF0unQC99RYduXhn3Bg6iYMOImMqKingaxmF0AE0kbNxlwuh45jIIIVONyxbWzMN0+XLaRwdfzwZE3omQIkzzqD39/V53W7q6+m+NjRQeyT59gNnW+V29/SodNR33KHOkwqdHj+ho6qK7luS4z9Jl8sLLlCxNIwwhW7evOB7Egf8XcOHUwytX3ZXE+K6XEpCp49HroFpgv5bZUwejxPeVIhK6IB4hI7vsyR0el3Hnh5/QmdS6Nato2tSXU2xdID6PaefTm7fTOimT1fjs6dHncebYWnYkM0HSp3Qtber/uuXjXHePNrE7usLdrl0XbX5phM6wJu8SlfoXFfVUgWiKXRs0m7YQCoYZ2GW4E0GabMlUfKgtZUI0Jw59H9Q6IzjeK9HLnM9z7EcJ7tmTeacotdA5TlkxIhwhe6FF7y2tp4UpaGB2m8VukyYliY34jnDAOwL4Kubjl9yHOfAjBNd92bXdXd3XXf30UkGaeQJ+q5NT4+S6f/5T/N7ohZlTgIccxa0OE2aBLz+utm1RboR6u0eqjuY2YKvFU9GYQpdPgkdGyXSpaSqylv43ERwZG0X3Vh67TVSiNgVL0qQd5BxHqVYMYOvpTR0a2rMMXQAjUtdocvGvY4/M0yh010u2Wg64gjaMGE1s7HR6xJiwq230rG72/v7Jk9W5GDSJG+dID9wPURAEU/T/PPAA6q0Q5hClw/U1pIK+dJLNH5kko64uPxyyp4moS/mep2zESPCiWwhEJXQcbuly6Wu+k6a5P/+/n5KlvLNb9L/fD1kVuNcCF1vL92H9euDFTrXpbbrfU4SLb9r4afQjR1L8dS8AcXf/7vfUT8fOzZz7pIKHY/1NKaZTwLSrCp2IrG44I01jm3TxzX/nl/8gsjSCy8EE7reXqVUyRgvfn3hQkXGdFJ1332UyZHdfKVCV1ZG9pGfQtfaSv3LFF7x7397j4B30yNb8Mb9M894f49frOiyZcBxx9HjJAhdUxO5oY8alTmm9fUwSKGTNtQee2R+n54UpaKCxrRV6DKxHMCW4v+JAPTyfv89Z1Pc3AgA6zY9/7zrumtd1+0E8BiA3XJtdLFhUujY6PKLWygGoYsSP2Qy5GSBTH3n1RI6L/RsjKZJUKZFz7dCV1vrvUes0DFM/VASOp3wvfYaxSgMG0aEJQqh4O/LdSfYz+XSFEMH0LVdt46uKZPJXAhdWAwdzwOuS8Yi+/O3tQHnn+8ldMOHBxM63nzp7va2Wca0TZ6cvUJnqnn4+9+rNhVj4ePY2BNOoGuVtHKgb1bxb+3qojGSa7mCpMAbMGEGFJOPIIVOVylN38VzPn9eUgrd/fdT3OCPfuSNoZNxShs3KvK3227edkQhdCaFbv16Mhx33NFL6Cor1bps2ieWCh0TujQWgk4CxdiwSQo8D7MnSVC9NIA2ME1xmDy/SNK+pbBsuc+/846K89b7A2eD5rIgHR0qSRZARz+Fjp83la/R6zMC5hT9ccH9m2PzwghdTY1aO3Ox9XgNlbG4OhHX18PWVmpXTU0moZNt0UNv+Dyp0FVUeMtGbS6IQuhmA9jWcZypjuNUAjgWwEPaOQ8BOHHT46MBPOO6rgvgCQA7OY5Tu4no7QfA0HVLCyaFjgdKFEK3YAHVo2M/448+oqyIptpB2YDbEsV9JEyhs4QuGLpCZzJEZJ/Qi3omSehOPZV2KU3tY5gInZ/LZWcnuYJwCvTtt4+W2KO9nSbYXA3muC6Xvb1kkMat26aDFwbpphTkcsnxk0zo+HwmdCNH0vWI4t6nK3TShW7y5OwVOp3Q7bOP9/84LoJJ4cc/ph3vxYuBu+9OXi3zI3RBWRSLgagK3T33UMbRpiYy0NeuzezfQQod4I31kS6XTJJyIXTPP0//P/usV6GTZKq9XSkPXLOqo4PmCh5Pfsml+PNMCt3IkUToVq6k66KTQlOSF6nQ8etDVaErZTChYy8oP4WOsWRJuELHkISONx6WLycyNmxYZn/g8fHyyyokpaHBW1qJ55kxY2heDSN0eqZVxse6bJIFdELHvycomyvbeEkodNLLJUyhe/dd8uBxnExCx5g2zbxWmRS65ubS3sjIBqGEblNM3JkgcjYfwD2u685zHOdSx3GO2HTaLQCaHcdZBOAsAOdueu96AL8EkcI3Abzuuu6jyf+MwkJfULq7VWeKQug+/3mqR8e7MhdeCDz0EGVGS7J9+iRxxx30PRLSZY1hCV108LXi+2uaBGWfyKdCN2sW8K1veZ/TJ1Gd0M2bR9lCp06l/6WByK6avOs/bVo0QrdhQzLJLeIkRQFUDF1zc7w0/zp4TEhXMb3fS6WAXbkkAayqiudyybjkEm8chYw3nDSJ7l+Y2i8NYlYSdde8xx7LTGNfDHBsbFtbYRW6UiR0xxyjXPpHjSJDUSd0OnE57zzvnFRbSwra8ceruUKOKSb+0hCL0v4NG4Ann6T/339fFYSuqqK4tUceoTlm40Zl4DKha2/3Kt5xFTrOysk79/PmZX6G6feYFLpsCkFb5BdhhE6fmxcsCCZ0kqTJDRCpJI8dS+u0rtDx3LxyJSnSTOgY1dVqnqmspLWI+ztv6H3wgdrMB1Sf02uzRVkvwsBjRVfoguw4tk/KIhU1M0O6XDLCFDr2BgLMhO6DD+gcE/SkKBUVVI/1qquyan7JItItc133Mdd1t3Ndd2vXdX+26bmLXNd9aNPjbtd1j3FddxvXdWe5rrtYvPcO13Vnuq67g+u6xqQopQaTQheH0PEOzaObqC37hieVOMWP0B1/PCmBEqaYKBtDFx1RYujkfTBlucwnwhS6r3zFuyEhDUR2+eB4tGnTaPfblK1xcFCpR+3tyRA6dkULiqHr6vJml9QVumwInawjxAhyuWTipit069fTbmJ9fXhSFMYtt3j/33NP9ZiJdZjbpTRmKytVzJJu2M+cGd6efEOSuEIpdEFZFIuBqFkuJUaPVkqUhL57fcUV3rpaNTVkIP/pT97xxWOKr1Gc8culVxYvpkQFn/wkGV/l5Uql/8IXSPXYuFHNHzNmUHuvvDI6odMVup4elZGW1ZYVKzI/Q8/wCdCcvdde9Hiox9AB9FtNZUnSjrgul2++GexyKfuPVKLl4y23pM81uVzuuScpSX/8Yyahq6qisQDQHDNihApBkPOPHJM8hvXsz0mUV+Fx/fHHNPbCXC7la7m4H/M11ksoSZx8Ms0ZAK1Py5YFE7rJk/3XiLo6Grv8V1FBBFkvxTDUkQMH33yhq1qS0HV1ZapagPm5P/6RBhm/Vw7yXCAJ3aJF3roqOkzGua7QyQnTEjov9CyXYYQO8E6mclcwHwgjdDxBfuYz1K4wQgeYVborrqAJd/FiuhZJEFV21QqKoevsVN9lUuhycbmUi0eQyyVfU+laVlFBRG/ECCKcrNC5ejopH5x9Nr1fGuhM6MLcLqVCIsPf5AAAIABJREFUx4voypXevjZsWLR6gPlGebkyvPJN6JhQp02hO+oo4Kc/jXc/Ro3yulw+95z/PC/JWViZmqhlLCQaGrzlTTg2Uh8zw4fT3MDfNXw4bQSdcYZ3PIUpdL29ahzx9zY1qQ2V1aszM2VKw/Lee+koXd/4Gg1lQvevf5njt9KOMIWurMzr3s/rUxSXSzm/SkI3aZK/QjdpEq2FXApIV+g46cnOO3vHllSk5H3g9owa5f0dSRA6ueGzaFE8l8tcxsKTTwLf/a43QZt+3959l8Y+APznP3Rk1d7P5dIPfK87O/0Lp28OsIQuC7S0kEHQ0UE7CpLQvf22txNfeinVJdIN6fPOI9eAX/5SLaL5IHTbbkuuKH7KoSlZAg88TooiJ6zNdaD4QU+KEhZDB3gn7XwTOm4PL1wmxXWHHSg7mO7OGIfQsdq8YkVyCp0fodMVOh5vXG9t9OjsFLqBAXJJXrCA/pcGQVCWSyYK0iDo6yNCxsZIXR31g6gq/MiR3nkEUO5BcRU6gOJzdXe8JIuE5wLuK4VU6NJE6LbcErj44nhxjLrL5bhxXoVYorxc9SW/mGn+HFa848zzcn0YP17dT30urK+nscLfJdtSWanKSIQpdIAyNiWhGzmSfuuqVcEKXWMjGbRyY4SVgaGaFKWUIRU6WZ9Ugp/bc0+1oaATOp5f+vqA/fZT91z/DIDGpB5D9+9/k+vixInKzdik0AHUH7femv7nOV8SusWL1WN+vbJSKd1AsgodQG2P43KZC6HbfXeqHyznNNOY5mvOqiaHOYweTfNS1I0lvreW0FnExtq1tKDW1tKfJHQSg4O0UJ9+eqYhffzxNDEsWaLe29KSzILChI7TeLe0eL9ffsfhh2e+nwceu1zKCStoZ2dzRJQYOl2hk77phVLomAzo/bCtjUh/XZ2Z0DU0qJ3NqVNpojQROu5zFRXJxdCZioNKl0vX9RI6rrU3cWJ2hO6HPySX5HPPpf9ra9X1C3K5ZEInCVNHBxE6duXiaxi17pnJ8B47lr43jkInUzxXVAA77aT+T4vboV5zKCn4xWykjdBlA93lMiz+jo1iP4WOP4djj+KQS+nGN2GCGvsmhU4SOnl/ysspPvCoo8IVOkC1V8b8lZXRdVm9mr7Dj9A1NNB38zh69VWlfg9lha5UUV9P/bGzk+6bqW+yAT9yJCWcAzLXVu77o0eTos212UyYNCnT5fLrX6fjrrvSmtPWRmudXiYIIHc/x4mn0FVVUT9l1+GkFTpJ6KIodElvbphipPna6fVQv/1tUpSjJlbje9vRYQmdRUy0tKjJguMPdELnuiRxy/dINDfTxLJmjXqv66qdilzAg1guTstEaXieKM46i9w+degul3LCKkY2vDQjbgydjkIRum22oXvH8V4MeX9lLA1AhI7VOYAWga23NhM67mv9/ckrdHJhqa6m73/4YeV6xe3nRXLChOySonCtHul2xgtMkMulidB1dpJ6IBU6gK4pK4BBMPWjsjIyNOIodHvuSVkHAeCNN4BXXklmjkkSvNAnnRRFn6uGGqFrb1frShg550050++W4z6bazNjhnosFTp93quvp/nGROg48+XTT0cjdDz2mNDx2OMC4vpnSANPJ3Rjx6rfbAld+lBWpuZ4v37O96++nsIH3n4bOPpo7zkjRlC9z7//Pfw7WaHjtcd1aYP8618HvvY16kNM6OSGN4decBZhE6EbMcJL6KRCB9D3DhuWrEJXWRmd0CWh0JlgKh2iEzomZg0Nyv0yCtjbZO5cS+gsYoIVOkDtbuqErreXdv7keySamlQcRGenypCXq9vlffeRm6eOnXdWj9mo32Yb88IpScrSpflP3FHKiFu2QIcpWD9JcHvGjaPFzqTQ8f2VO/UAubjocT1+mS558u/qSj6GTsaf8vU+4ghFvHRCJxW6qDF0AwNUgFX/fl5gglwueeybFDqd0O2yC2X9kzDthPplOItC6PS072xc7LQTXRfpGvrOO173n2IgXy6XOtIaQ5cNtt+ejm+8QccwhY5fjxJDFzfVN7cFoDWN76eeMbKxkeYbHrfSOJdZ8WQdRb/foWeYZSV87FgzoZOor/e6wslxbgldOsHzqN+45QLh3PdmzjQb9Sed5C1V4Ie6Oq/LZUsL9VH2cBgxgsbM2rXetW7ePDoecggddULnODT/+yl0jJqaZBW66dOJ0PHvCSI8+VLoTPM7K3C6QhcXn/kMeQr86lf0G3MpuVDKsIQuC6xdq4yiqioyOPWA9I4OFeg5cSIpcXJXZNgwr0K31Vb0fK6E7phjwg00bqvf5MgL7emn02/QVR0LhSiFxeVO9ec/730tl9TAUcD3cswY5SbCGBwk8iV3PyUBamvLJJzTppHyfMklStECMgldkgqdJHScAhpQmyRBhC7qovjhh97fzkVJgwidrtBJJVsndLr6JEm+yQ3TlH0WoEVL7rSaoBvEFRW0Y81p5SWmT1clK4qFfLlc6hhKCh0blrxpGKbQcX+IEkMX99pIAlhWFkzoBgfVGDYRrhEjaL7MRaEzxdBJsEIHUFtratRvtmUL0gmeR/36ObvM5rqR+LOfUeZnwKvQ8Saa/j29vebv5Pg8qX5v3EhjZautghU6IDObM0BtOekkKnMVlWzxZ+y1F7mYctmFIIXuiE3FyI49Ntp3RIXJu4vXZya72YYBlJeTIvvvf1uFziImdJfL998H7rzTe05HhzJEu7rI+NSLvo4aRbuJXV0qDiFpd6i99wa+/33vc2GEjhdoNv633pqKVltkgicgNsBN1/T222kHqa0NePDBwrUNCCZ0GzeSK4lU6OQiYiJmY8fShPnTnwIHHgi89BI9z4SOs9glQeh4EWc3FoAmbMYrr9CR23/77XQcPz6+yyW7QbKbBwdn81jQF0Dd5dJxvPdej6HTyYokqaYMuKZCyADFvLa0UF0vP8jC4oyZM/2TZhQbbHhbQhcdW21FfZPrS4UpdDweosbQxcWUKWoc+hE6HgsrVpDBZYqPCStqbFLoHEdtpvi5XErU1anXeEzwbz7+ePN7LIqLMIWObatc3bbPPx+4+256LGPo2D2XCZ3cvJOE7l//ojWe+7YcWx0d1PcmTFAlGIDoCt1HHwG33Uakk5X5MPB6/oMf0Pdcfz39H0Topk0ju6AQKf9Zre/spGuTS0jP2LH0G9ets4TOIgAffghcfTXtbvT30yIiCZ0JGzeqAdneTruSOqEbPTozQ14utehMKdFHj6YCixI8mfgt3NKwGjWKMiDefHP0lOubE9jwYBXTtMO0337AP/5BE3+hk8oEETp+7KfQbdyYuUDqC+q++9KOOBM6joVJgtDxZ0hC97vfKSOMyaS+Q1pTQ/fFcaK7XHK8K7sn8vjmsaAvrrrLZW2tV21ta6Prp7tcytcZOqE7/3yKbzXhsMOIbN51l/9vCXJZSyP4uuWb0LHxMBQIXVmZKqRdURGu9AcpdLw5MTiY/bVZsEB5l/C41dcLJnQrV/rvxPOGZhyFbsQIZUBvtRWNxzff9P+MsjLvvAjQvNzaCtxwg/k9FsVFmELH3hxJ1fIFzAod23B+hG6vvZTCBWS6XNbV0V9Pj/Lc8VPo9DVHroMyJ0IQeHNvu+2A/fdX8dTFSm6nj0mek/na5ALuA1ahswjEBx8AP/4xGX2sxIQRuo4ONSB7e6mwo0mhY/CElcuEpMdHAd54GUaYQicH1owZ/mqBhXITYEKXNkOR+6eJ0DGRMBE61zUrdHITgMsY7L232iR4+P+3d+5BdlR1Hv/+8prMTOJMyEOSTEJ4RRIICkQgaMCENwKhJGAQAXmIPERdHyiurgjUlqArFAW7oCYFSIm8XAi7ICKgQSoCAVQMEAhJIJNEIOQBMTMEwtk/fvPbPren+3bf9+17v5+qqe7b3XPvmTun+5zv+b3u062fKKFY9thDB6EbbwyOHX+8flZbWxCvMHmybocP14B1ILCYpbXQ2Xdhf5MNeJYUJhwHGXa5NOHb0wN89rN6vwPxLpe+G7M/UAPAqafGD7iDBunfa+/v8+ijwIMPRlvo6hlblS13UpQwPT1Bv670Z1UD65tp3JTsORDVr/wkWMVa6Fpact0Yo/DTz4fbfO21urUkL2lj6DZsyB2fvvhFzTCY7z38c77VuqODWZzrlSQLnfWBqDJMxeLH0L3wgs6LbD7li7h8bp5xgg7InR8CyRY63zXfXCeT6O0N7rXDDw+O10rw2HP3e9/Tdq1dqx5u5RB0/nyago7EYg+RrVuDFUS7seMGU1/QATohzCfo7IFUig9/VDpz+4wXXgjSsZugixu4hwwJBrZKr5o3AkOHBkIprQ94V1eQBrmSTJ+uAuETn4i30Pkul6+9puJm2zZdncxnoTvpJN36MZsPP6xuKYceWnrbhwxRkWJWM0NE+7UNapMm6Wrn228Hbpf296QVdFu26OeZq6X1/xtuAK68Uq2sPi0teo//8Ie5gm7oUP0+TbClsdCFE6Ak3XMWextm9mzgqKN0UpwlC50Juko/a3p6As+JeqnBVwp236b5X4eFUNT7vPOOfjfFCDqfOEGXz0J30UVamzXc3jBhC93GjcH72u9Z7E94cv/yy8CiRbpvn2/JNEh9Y8/X8BzKsAVEv4RGqZiF7p13NKTmhBOCZ1WchS5MlKDzU+xv2KAlFIDKWejsXvLH5FotXNj/8aSTdCH4ySc1OR8FXXmgoEuBX4X+2mv1pt5zTz0WdnW5+27dhgUd0D+7kp/G1QalUix0UYLOhOceewRxcEkWOiC4uSjokvEnH2kF3erVwIIFlWmPT2cncOutQaHqfBa6lhZt1yGHBINHeHLm95m4bGGzZqWvH1MsI0cGg1pra7TLWVRgeRz//KcONjbQ2bazE7j44v6+/Tb4Xnqpiitf+PoTYhN04ZTN/v8h/JxImlBbnFA+smihq/Qko6cnePbVazxhIfgLMUnYNVHji02ELONyqV4GSRa6jRuj2xwWZlHY8aee0m3YQgcEC0DhOmO77QbMnKn79j2kyXhIas955wGXXx7vEnvwwSrWv/3t8n2mxdD97nc6Hn7pS8E5X8SZ63MU4cLivqDbuhU48kjgJz8JrjWiFiN9QVeMhW7ffYPjtRJ0Np8UyR3nyulyCVDQkTxYx9uyRePJzjoLmDZNj4XjX8z9K0rQjRunndZi2swaAASDWSkWujVr+h/zHzw2cU+y0AHBzVXqam0z4E9O6qVYcxRJFjqLI/vLXwL3jnwWuri+UelSDIBOQi0GIZ/rcNpC3lu26PW7766vjzsu//W+YHrttdzByN+3+7q1Nbdv+C6XhQq60aN1InvCCcEgH56oZ9FCl6+8R6m0tOj3bHFejWShSyPezz1Xt7Nm9T9ni37r11fHQgdEPyf950aShe5rX9OkSFGCzhIbHXBAfBttQSTO4kPqiz33VDe9fGPLzJnlFSpmobNnho0NQK6Fzq/VGmbo0CA+NexyuXVr7qJDUpZLe9ZPmFCYhc7uNf+7qZXg+dzndDtyZO5z5vXXaaErBxR0KbCO949/6A1u1jkgN734vHlBpzRB51sqPvxhnTha1knLmAQED4hSBF04FsdvOxBMACzeKd9KrP0eLXTJ2AOzpaW+C693dGj/soHCRIX1veee0+0OO8Rb6Pz+1Nqqq6If/3j/z6k0fmxo3AS0qyv9wGcWuqlT9f4477z81/sTzlWrcoWvf8/4ExB/8pjPQpdkIbGVyHvvBR56SPfDizlZstCZdbWSSZc6OxvXQpdGCM+Yod9vVImKclvo4uITP/Sh4PlYqoUO0D4fdrkEdDK3alX/zNM+5rJMCx2Jw2LoLK7TXziwey/JE8WPT129WhfxfZdLn3wxdGvWBO7CU6emH9d6e3Pf15IAVbpcUhzf/a4upowblztud3eXPtf05yr1lsugWlDQpcA6nk2a/BvbJsUPPKADSFjQ+WbgXXfNfV9/0jVsWG7mvGKIKkbs3ySDB2tHp8tlebHvsZ6tc0AgtExMrF6tA5JZKywV/pAh6Sx0ra26KnrRRdGfU0n81bi4fjxpkk7s0mAWOkC/jyRh7t+7776b+z35+76g8wsw57PQJQ22/jPFnkVhd+ssCbqDD9btbrtV7jNM0DWiha7U2ml2L82dqxavUi10cf13wIDcBExhCrHQASo+N26MTtq10075E9/YM5CCjsRhFrq33lKx4Pe9oUOB664Dnn8+/3tYP1+9Wu+tKVNyXS598sXQTZ4chGjss4++X9QCvrFmjWYm910uAeCcc3LbVW1EgvHLf85s2FD6XNMfs8P1fpsFCroU2ITR/Jb9ASSc/tg6pQXfR02+omhvz82cVwxRgi48cenoCCw0aVwuKeiS8RNi1DNhQbdqlVqxzBXjmGN0BW39+sCVOF8Mne2H69VU20KXT9CtW5cuMYpZ6NISFkz+7+6yS7DvT1JvuAE4/3wdePJZ6JLwnyl2L5ugs/s9Sy6XF1yg7r5hS2856ewEXnoJ+MEPVFhEZf/NGuUSdOHvolyr2yee2P+Y74Kcrx1pLHQvvqhu1/7iTqH4YQ+E+FgM3fr10c+LCy8MQmzisP5qdeOmTMld9I+6Fugv6Hzxd9BBam3/61/jP3fmTI35W7o0932vuEKP+e6jtSL8DCjnXNOyVTcbFHQpGDxYJ71Rgu4XvwCuuSbw27dOahY6E3pxmMm+rS23WHExhB8Qd97ZP0OgP9mmha48mOipdzO//e/NOvTqq/2zgo0apYOYWaPTxNBNmZJbKLXagi6uj5p7WVSyoDC+hS4N4Qmn/7t+8Ll/fOzYoI5eKYLOf6bY79rfaNnesmShE+nvvVBuRoxQz4S33lIXxUon7akGcQW8CyW8EFWOuOnt23X8CWNjZ9Tily+u4vqvf/zJJ3VbTGbDBQuAo49mmQISj2+hK3bRwPq5L+iKsdD57Lefbp95Jvr8unXAypW6/9prufeaSHlKCpUDi4E3yjHXfPXVwAOtGaGgS0lbW+C37Au6HXcEvvrVwNw7YIBeG+VyGcWBB+rWiiGX0+Vy7tz+19hku709v2sXBV16bGKVFQvdBRfoCt+qVblxnEAwcNmAkBRDZ/iW4Gq6XLa357fQAerqaK52cRRqoQsLAv8+GTcu2I9y3ezoyHW57O0tLPYyzkI3ZkxwLkuCrpKYtdTvk41SV9MsdOUspgyUx7o7YEB0n7bYxahnpd+v49rg33dPPKHbqLjAJM48E7j//sJ/jzQPfgxdsRZ96+eLF+sYMWFCblIUn7SCbtw4HW+feUbHNSsWbrzwQu7revXWCP/95ZhrTpzYGPHRxUJBl5K2tmBSmDQhGDasv4Uu7JZm3Hsv8KtfaSeshMtlGJvYJAlNCrr0ZEXQTZumk6ynn1YBsHZttIUOCGLPkmLooqiGoLOJ3UEHxV/jx2Q98ED+9yvUQhcejAqJ/xo9OrfsQE9PYdbdMWOCYuu+hW7ixMpN8rPKokVaSsb68cCBQUH6rFMul8swlgSiEtjCT9Sz0hdrcZPQMWOCZ5bFEJWz9hghhrlclkPQPfZYUMcuLimKv8De2qpicvv2/pYsQF0KX3lFDQKzZ+cmlLKxxT6nXuclYcHKuWbpUNClxLdMJAm69nZ1qdq+XSdv990H/Pa30deOHAmccorul8PlMpzxK4xNtpNcQSno0mOTxXp3uezsBO64Q/efflpdzwq10KUpW1ANQTd7NvCpT6kLYxw77qipzYcN67+KGaZQC104IN0SexgrV2r5hyi6unKzUpqgu/vu3MLo+ZgzR7e+hW7ixOC7zxcw30yMHw985jNBv500qTESogCBoCt3dtBKuizZ6nmSBTlfDN3KlcBRR+nrzs7qlEkhzYfvclmsoPP78fnn69bGTT9Dehh7XvX2Ri+wjB+vY4gtvPriyAwPFsMaJQjrgfCiKO/j0kkl6ETkKBFZJiLLReQ7EedbROT2vvNPiMik0PmJIrJFRL5ZnmZXH7sJhw9PrnHR1qaBtIDemMcem+6BUA6XyyTLGy105cdET726NviYYDNhZy6/4fOrVumKYXh1b+DAoP/X0kI3apSKtCTL2AEHaKzMH/+Y/7pCLXTh1VU/gyWgwuGjH43+3a4ujce1iXhPj37Pn/kM8PnPp/t8++57evR9TNBZXwzXx2x27Pvy63JmnXL+Lc88owuPc+ZoYqRKYWI6PJkLk/QstfGL1jlSKYYM0ef85s2lW+iAwEvL5pJ+3HkYv/Zx1ALL+PHqYWP4C3hvvKHj9Gmn6es//anwdlcDW5Q0Dj20Nu1oJBIFnYgMBHA9gKMBTAVwioiEwyrPBrDRObcbgKsBXBk6fzWABKen+sZusDTxF21tmoYVKMxqUw6Xy7SCjha68mGT6EoWRi4X1j9uv12tc1Om5J63frFmja6YRcXBWJ+upaArhF12yT94btumK7GFWOguugg49VTg0kuBH/2osBi4ri5debVnRKEul0CuoNu4Ue/9iRODQTGcDKnZoaDLzz776MLjPfcUF5OWFrPQ+TGkUSQJOqsFG16QIqRcDBsW1Cssh6CzPj1woO6Ha4f6mOfMypXR8d/jxuUu/vuC7vXXdRw/5BB9bQn76o3TTstd2GnWzJTlJE2Op/0BLHfOrQAAEfk1gDkA/AoccwBc2rd/F4DrREScc05ETgCwAkCKCK/6xSYEaQRda6tm2/F/Lw3lcLns6sp/jU22k1wzTcCWI+NZo2OC7v33a9uONPjZuo44or8QaW/XieLbb+cm9/Bpa9Pzcb759eaz7xdUj2qb1dwrZPFihx2AW28trj3jx+u2u1snCsUIukGD1ILa2xtkuJw4UYXc5s2NJVzKgX2/WbCipyWLf4sJOj/Lq8+ee2qMY5IXzMUXa1r2els8Io2DH25QrDtg3FjY1pZf0FlZgZdeCixsI0YAp5+u+zaGGGEL3ZgxaiBYsaK+k0C1tmrYRJKBgaQjjaAbD8CvS98N4IC4a5xz74vIZgAjRaQHwLcBHA4g1t1SRM4FcC4ATJw4MXXjq4kJmzQZdEqx0JXqcpkk1GwCnzQZoIUuPfbgf++92rYjDf5K47Rp0deMHZtf0LW26k8hVqlaYpO+t99WIRROVW7uk4VY6ErBFl26u9UtsxhBJxJkQlu2TI9ZRkeKuf7Y99to2T9vvFHdirOCjU9xFrrf/U6zT6YZxxhzQyqJPx6EY8nTYvOs8O+3twfxb1HsvLNa8n77W+C224Czz9YSWUY+Qff668E8tZLW9nJhsYWkdNLE0EVN28Jh2HHX/BDA1c65Lfk+wDn3M+fcdOfc9NFJPoM1wgRdmhuktTW4wartcpkkwEwwJgm6nXbSa7hykkyWLHS+mIkrimpCLknQhVm8GLj55tLaVwlM4Nx9t678h+v3FGOhKwUTdFYGpRhBB+jqb2+vJrgZMiRwQyP9se83yfKTNc49Nz5Wsx6xvn/YYdHnx40Dzjmneu0hJA5fhBW7SGYJSSZMyD3e1ha4c0YxZIjONS32++STc8+HBd0Wb4ZtFjrSfKSx0HUD8LtjF4C1Mdd0i8ggAB0ANkAteXNF5CoAnQA+EJFe59x1Jbe8ypj1JU0QdprU7lG0tATJVArh4os1hW0aQWeCMWml+vjj1ZWrWN/xZiJLFjqfcgu6Aw+sz5gWs9DddptuH3kkt/i3DYbVstDZ6qkN6D09xd1nZqF78UVg770bz/pUThrVQpc1Ro7UhYxmrhVFsoE/HhQr6PbYAzjjjP6JhmyeNnx4fEbiyZODWonhwubjx+uiyLJlej/Ze2zfrklUeH81J2ksdE8B2F1EdhaRIQDmAVgYumYhgDP69ucCeMQpM51zk5xzkwBcA+DfsyjmgGDylUbQxRVfTqJYC92Pfwz85jdqfWtvB846C/j5z6OvPessdWexUglxiHCVJy1ZFXRx3s1Jgq6tLVuxlTYYW4r/bds0gc099+jWMkJWy1Vx8GC9Ty1tdbEWutZWDSp/5hlgv/3K28ZGwzKKNpqFLot0dfH/QOof30JXrMvl4MHATTf1Xzy17My77hr/u/5cM+y4NmgQ8NBDwOOP62sTdC+9pONcXDgFaWwSLXR9MXFfBvAggIEAFjjnlorIZQCWOOcWApgP4JcishxqmZtXyUbXAivWmNblMmo/iWKSojzxRO7r9nZg/vz46/fYI4jvI+UhSy6XgMaebNqUW8jXx4Tc2LHR5+MsdPWKWeisVs+2bcCddwLz5gHXXBMI22omWNhhh9KyXALqcrl4sf4vmdUyP2k9EwghBCiPhS6Oww7TMeiDDzTTdFSGbD/BXdhCZ9jcwwSdhRNwga85SeNyCefc/QDuDx37N2+/F8BJCe9xaRHtqxssdWyhFrpC4nIKTYryhz8As2blHmMSk+pjmayyYqF7+eXAWhWFDSRxGVNPPTVbiwIm1My1src3yLK3eHGQXKGayURGjMi10BWTGbS1FXjuOd0/8sjyta0RsXuTliFCSBrKEUMXh5WX6eyMr5Hqj79xC34mOn1BN3Ro/7qopDlIJegIsHAhcP318SslPv7NV0gmrkJdLqNSP1PQVR/LyPbpT9e2HWlJ6sPHHQfccgvw8Y9Hn7fUyVnBBmNLE71pU3DvrFlTfZdLIFfQ9fYWb6EDdDWW7tH5sYW4eq3JRAipL3xBV26PlF13BRYsyF9MO6kEFaCul34Svr//XZNjhTM5k+aA//aUzJrV3xoWh2+hK8SNq1CXS8ug5MMJS/Xp6NAEMjvuWOuWlIeWFi362SiYUDMrzfr1wcLHsmWBuCs2TqIYRowAli/X/VJi6IBsZTmsFUcfDTz5JDB9eq1bQgjJAr7LZSVK9Jx5Zv7zaQQdoOPW/PnAV76inmRWlJw0H2mSopACKTaGrlCXSz870q23al2TvfZK//ukfEyYQHeuemXw4Nz7cP16zQQGaLKj1avV2lXN+Cqz0L37rlroSkkyk8YNnKjFOSu1EwkhtaXnH6hrAAAK/klEQVSaC3xRhEsTxPHGGzqWnHYaSxY0OxR0FcCfPBYygUjjctnTA3z/+5rdzhd0xxzDlRlC4vAt5S+/DCxdmvu62sW4TdA99phmYIxzb81HIYmaCCGEpKdaZWzisEW+uORlYd54QxcoWbKgeaHLZQUodrW9pUXdwpyLF4Lz5wNXXKE+0mZRePzxII6LENKfjo7AKrd2rf50dKi75UsvVTfDJaBZLrdu1dIJQ4akd+f2WbdOtxR0hBBSXurB4+aPf0x2vZw9W2urLl+umbZpoWteaKGrAGahS7uyYphAMyvde+/1LzRu7/3KK2qhGzQImDGj+LYS0gxEDXLm3rxmTW0sdIAmWzrooOKSGZlApaAjhJDG4+CDgV12yX/N73+vtYgtRpyCrnmhoKsAZqErNCanpUW3V18NPPwwcPnlWlDyssv0+PvvByJx7VoVdMOHMy6EkCQsjfMppwDd3bp/ySXB+VoJutWriy8Cu//+um2UZDyEEEIKQwT42MeC1xR0zQtdLiuAWdEKFXR2/SWXqLn/5JP19eLFuh0zJkh1vm6dmuJrHbhLSBYwQbd9uwabW4bYyy7T/WoLOl+EFVsz6IEHVJwO4LIcIYSUnQcfzMaCme+lRUHXvFDQVYBiLXT+9WPHBklPrE6WiTlABZ1Z6Agh+fnIR3S7dq1uTQSNGaP3UrUFnVnXgOIF3Q476A8hhJDyc8QRtW5BOnyXfQq65oVruxWgWAvdjBnAzJlqnXMuEHK29dm4UQskU9ARkozFIVjNOcOKrFdb0PkDcLGCjhBCCAE0jm7oUGDkyFq3hNQKCroKYEKuUEG3997AokXAN76hCQ82bdLjUYIOUMsCBR0hyXzkI8CFFwK33JJ73GIPapFY6Kqr1G167NjqfzYhhJDG4Zvf1LJWhSbjI40DBV0FsOQmxSY7GDdOMxatWKGv33kH+OCD/tetXUtBR0gaBgwArrsuN3gcAG66SQfBU0+tfpu+9S1NisKkRoQQQggpBcbQVYAJE4D77tOUs8VgK/a+y6VfRNzYvJmCjpBSGDBA3VQIIYQQQrIKBV2FOPbY4n933LhgX0Sz8FnNqTAUdIQQQgghhDQvdLmsQ3xB19WlW6udFabayRwIIYQQQggh9QMFXR0yfnywnyToRo+ufHsIIYQQQggh9QkFXR0yeHCwnyToLO06IYQQQgghpPmgoKtTzPJmgm716ujrKOgIIYQQQghpXijo6pSwoPMtdG1t/a8jhBBCCCGENB+pBJ2IHCUiy0RkuYh8J+J8i4jc3nf+CRGZ1Hf8cBF5WkSe69vOLm/zG5cxY3Tb2qpbX9D5hSNpoSOEEEIIIaR5SRR0IjIQwPUAjgYwFcApIjI1dNnZADY653YDcDWAK/uOrwdwnHNuGoAzAPyyXA1vdH76Uy2CfPjh+tp3udy+PdinoCOEEEIIIaR5SWOh2x/AcufcCufcNgC/BjAndM0cADf37d8F4FAREefcs865tX3HlwIYKiIt5Wh4o7PPPsCzzwI77wy0tAAbNgAjR+q5ffcNrmtvr037CCGEEEIIIbUnjaAbD8BPydHddyzyGufc+wA2AxgZuuZEAM86594Nf4CInCsiS0RkyZtvvpm27U3BwIHAlCm6v99+wKJFwMKFtW0TIYQQQgghpD5II+gk4pgr5BoR2RPqhvmlqA9wzv3MOTfdOTd9NLN89GOvvXQ7bRowcyYwYkRt20MIIYQQQgipDwaluKYbwATvdReAtTHXdIvIIAAdADYAgIh0AfhvAKc7514pucVNyLRpuVsAuOsuYNu22rSHEEIIIYQQUh+kEXRPAdhdRHYGsAbAPACfC12zEJr0ZDGAuQAecc45EekE8L8ALnHOPV6+ZjcXn/wkMGgQMGNGcOzEE2vXHkIIIYQQQkh9kOhy2RcT92UADwJ4AcAdzrmlInKZiBzfd9l8ACNFZDmArwOw0gZfBrAbgO+LyF/6fsaU/a9ocA46CNi0CZg8udYtIYQQQgghhNQT4lw4HK62TJ8+3S1ZsqTWzSCEEEIIIYSQmiAiTzvnpqe5NlVhcUIIIYQQQggh9QcFHSGEEEIIIYRkFAo6QgghhBBCCMkoFHSEEEIIIYQQklEo6AghhBBCCCEko9RdlksReRPAq7VuRwSjAKyvdSNIQ8M+RioJ+xepJOxfpJKwf5FKUq/9ayfn3Og0F9adoKtXRGRJ2tShhBQD+xipJOxfpJKwf5FKwv5FKkkj9C+6XBJCCCGEEEJIRqGgI4QQQgghhJCMQkGXnp/VugGk4WEfI5WE/YtUEvYvUknYv0glyXz/YgwdIYQQQgghhGQUWugIIYQQQgghJKNQ0BFCCCGEEEJIRqGgCyEiR4nIMhFZLiLfiTjfIiK3951/QkQmVb+VJKuk6F9fF5HnReRvIvKwiOxUi3aS7JLUx7zr5oqIE5FMp2om1SVN/xKRk/ueY0tF5FfVbiPJLinGyIki8qiIPNs3Th5Ti3aSbCIiC0TkDRH5e8x5EZFr+/rf30Rk32q3sVgo6DxEZCCA6wEcDWAqgFNEZGrosrMBbHTO7QbgagBXVreVJKuk7F/PApjunNsbwF0ArqpuK0mWSdnHICLDAXwFwBPVbSHJMmn6l4jsDuASAJ9wzu0J4GtVbyjJJCmfX98DcIdzbh8A8wD8Z3VbSTLOTQCOynP+aAC79/2cC+C/qtCmskBBl8v+AJY751Y457YB+DWAOaFr5gC4uW//LgCHiohUsY0kuyT2L+fco865rX0v/wygq8ptJNkmzTMMAC6HLhb0VrNxJPOk6V9fBHC9c24jADjn3qhyG0l2SdO/HIAP9e13AFhbxfaRjOOcWwRgQ55L5gC4xSl/BtApImOr07rSoKDLZTyA1d7r7r5jkdc4594HsBnAyKq0jmSdNP3L52wAD1S0RaTRSOxjIrIPgAnOuf+pZsNIQ5DmGTYZwGQReVxE/iwi+VbDCfFJ078uBfB5EekGcD+Ai6rTNNIkFDpPqxsG1boBdUaUpS1c1yHNNYREkbrviMjnAUwHcEhFW0Qajbx9TEQGQF3Fv1CtBpGGIs0zbBDUXelTUA+Dx0RkL+fcpgq3jWSfNP3rFAA3Oef+Q0RmAPhlX//6oPLNI01AZuf4tNDl0g1ggve6C/3N+f9/jYgMgpr885lvCTHS9C+IyGEA/hXA8c65d6vUNtIYJPWx4QD2AvAHEVkF4EAAC5kYhaQk7Rh5r3PuPefcSgDLoAKPkCTS9K+zAdwBAM65xQCGAhhVldaRZiDVPK0eoaDL5SkAu4vIziIyBBpwuzB0zUIAZ/TtzwXwiGN1dpKOxP7V5w53I1TMMfaEFErePuac2+ycG+Wcm+ScmwSN0zzeObekNs0lGSPNGHkPgFkAICKjoC6YK6raSpJV0vSv1wAcCgAiMgUq6N6saitJI7MQwOl92S4PBLDZObeu1o1KA10uPZxz74vIlwE8CGAggAXOuaUichmAJc65hQDmQ038y6GWuXm1azHJEin7148BDANwZ1+undecc8fXrNEkU6TsY4QURcr+9SCAI0TkeQDbAXzLOfdW7VpNskLK/vUNAD8XkX+BusJ9gYvqJC0ichvUHXxUXxzmDwAMBgDn3A3QuMxjACwHsBXAmbVpaeEI7wNCCCGEEEIIySZ0uSSEEEIIIYSQjEJBRwghhBBCCCEZhYKOEEIIIYQQQjIKBR0hhBBCCCGEZBQKOkIIIYQQQgjJKBR0hBBCCCGEEJJRKOgIIYQQQgghJKP8HyqXdZ4hx218AAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ "<matplotlib.figure.Figure at 0x1bf079b7898>"
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "plt.figure(figsize=(15, 3))\n",
+ "plt.plot(np.linspace(0,1,1001),variance,\"b\")"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ "<img src=\"./4.png\" width=\"85%\" align=\"left\">"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Foundings:\n",
+ "\n",
+ "Different factors $(U^20_i)_{1≤i≤20}$ are highly correlated. \n",
+ "\n",
+ "Short-term factors : fast mean reversions, responsible of the ‘roughness’, \n",
+ "\n",
+ "Long-term factors: slower mean reversions,determining the level of the variance process"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### An example to use the module to calculate option price by simulation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 116,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([[0.05 , 0.05121006, 0.05914061, ..., 0.05225546, 0.04408349,\n",
+ " 0.02695603],\n",
+ " [0.05 , 0.04724264, 0.05275854, ..., 0.01732654, 0.01456418,\n",
+ " 0.01276219],\n",
+ " [0.05 , 0.05668886, 0.04864526, ..., 0.04017572, 0.05075225,\n",
+ " 0.04905138],\n",
+ " ...,\n",
+ " [0.05 , 0.04062341, 0.03958003, ..., 0.04078279, 0.05564083,\n",
+ " 0.05612033],\n",
+ " [0.05 , 0.04396372, 0.03284474, ..., 0.06108645, 0.06617895,\n",
+ " 0.06299508],\n",
+ " [0.05 , 0.04195399, 0.05705365, ..., 0.02939353, 0.01993236,\n",
+ " 0.01616433]])"
+ ]
+ },
+ "execution_count": 116,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "h1.vo_paths(texp=1, N=1000)[0]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 117,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "array([16.1606798 , 21.48967354])"
+ ]
+ },
+ "execution_count": 117,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "h1.mc_price(strike=np.array([85,78]), spot=98, texp=1, N=1000, cp=1)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Comparison Between Lifted Heston Model and Rough Heston Model"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Lifted Heston model nests as extreme cases the classical Heston model (when $n=1$), and the rough Heston model (when $n$ goes to infinity)."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In order to visualize the convergence to rough Heston model when $n$ goes to infinity, we generate our benchmark implied volatility surface, for $9$ maturities $T \\in \\{1w,1m,2m,3m,6m,9m,1yr,1.5yr,2yr\\} $, and with $26$ strikes $K$ per maturity."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The parameters of rough Heston model is given by\n",
+ "$$V_0=0.02,\\; \\theta=0.02,\\; \\lambda=0.3,\\; \\nu=0.3,\\; \\rho=-0.7,\\; H=0.1$$"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The implied volatiliy surface can be computed by Fourier inversion techniques. For simplicity, we only consider the call option situation."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We first define:\n",
+ "$$x=log(S_0/K),\\quad y=log(S_T/K)$$"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Then the price of the call European option will be\n",
+ "$$v(y,T)=K(e^y-1)^{+}$$"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Using the method of inverse Fourier integral via cosine expansion, we have:\n",
+ "$$price\\;of\\;call\\;option=v(x,T,V_0)\\approx Ke^{-r\\Delta t}\\cdot Re \\left\\{ \\dfrac 1 2 U_0+\\sum_{k=1}^{N-1} \\phi_{hes}(\\dfrac {k\\pi}{b-a};V_0)e^{ik\\pi\\frac {x-a}{b-a}}U_k\\right\\}$$"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "where, $\\phi_{hes}(\\omega;V_0)$ is the characteristic function of the log-asset price $y$, when $x=0$, $[a,b]$ should cover $[x-10\\sigma,x+10\\sigma]$ (or other sufficiently large interval), with $\\sigma$ denoting the standard deviation of the density $f(y|x)$."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "$U_k$ is given by:"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "$$U_k=\\frac 2 {b-a}(\\chi_k(0,b)-\\psi_k(0,b))$$"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "$$\\chi_k(c,d)=\\frac 1 {1+(\\frac {k\\pi}{b-a})^2 }[cos(k\\pi\\frac {d-a}{b-a})e^d-cos(k\\pi \\frac{c-a}{b-a})e^c+\\frac {k\\pi}{b-a} sin(k\\pi\\frac{d-a}{b-a})e^d-\\frac {k\\pi}{b-a} sin(k\\pi \\frac {c-a}{b-a})e^c]$$"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "$$\\psi_k(c,d)=[sin(k\\pi\\frac {d-a}{b-a})-sin(k\\pi \\frac {c-a}{b-a})]\\frac {b-a}{k\\pi},\\quad for\\; k\\neq 0$$\n",
+ "$$\\psi_0(c,d)=d-c,\\quad for \\; k=0$$"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "$\\phi_{hes} (\\omega;V_0)$ is derived from the fact that:"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "$$\\mathbb{E}[ {exp}(u{ log} S_T)]= {exp}(u {log} S_0+\\int_0^T F(u,\\psi(s,u))g_0(T-s)ds)$$"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "$$F(u,v)=\\frac 1 2 (u^2-u)+(\\rho \\nu u-\\lambda)v+\\frac {\\nu^2}{2}v^2$$"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "$$g_0(t)=V_0+\\lambda\\theta\\int_0^t \\frac {s^{H-1/2}}{\\Gamma(H+1/2)}ds$$"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "$$\\psi(t,u)=\\frac 1 {\\Gamma(H+1/2)}\\int_0^t (t-s)^{H-1/2}F(u,\\psi(s,u))ds$$"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "$\\psi$ solves the fractional Riccati equation. We can use the Adams Predictor-Corrector scheme for the discretization of the fractional Riccati equation. The procedure is shown below:"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "$$\\psi(t,a)=\\dfrac 1 {\\Gamma(\\alpha)}\\int_0^t (t-s)^{\\alpha-1}F(a,\\psi(s,a))ds$$"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Over a regular discrete time-grid with mesh $\\Delta$, $0\\leq t_0\\leq...\\leq t_n\\leq t$,"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "$$\\psi(t_{k+1},a)\\approx \\hat \\psi (t_{k+1},a)=\\sum_{0\\leq j\\leq k} a_{j,k+1}F(a,\\hat \\psi(t_j,a))+a_{k+1,k+1}F(a,\\hat \\psi^P(t_{k+1},a))$$"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "with\n",
+ "$$a_{0,k+1}=\\dfrac {\\Delta^\\alpha}{\\Gamma(\\alpha+2)}[k^{\\alpha+1}-(k-\\alpha)(k+1)^\\alpha]$$\n",
+ "$$a_{j,k+1}=\\dfrac {\\Delta^\\alpha}{\\Gamma(\\alpha+2)}[(k-j+2)^{\\alpha+1}+(k-j)^{\\alpha+1}-2(k-j+1)^{\\alpha+1}]\\quad 1\\leq j \\leq k$$\n",
+ "$$a_{k+1,k+1}=\\dfrac {(\\Delta t)^\\alpha} {\\Gamma(\\alpha+2)}$$"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "$$b_{j,k+1}=\\dfrac {\\Delta^\\alpha}{\\Gamma(\\alpha+1)}((k-j+1)^\\alpha-(k-j)^\\alpha) $$\n",
+ "$$\\hat \\psi^P(t_{k+1},a)=\\sum_{0\\leq j\\leq k} b_{j,k+1}F(a,\\hat \\psi(t_j,a))$$"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Following above steps, we can derive the price of the call option under rough Heston model. Since we know the call option price formula of BSM model, we can derive the implied volatility surface through Newton's method with good initial guess."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Call option price formula of BSM model:"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "$$C=S_0N(d_1)-e^{-rT}KN(d_2)$$\n",
+ "$$d_{1,2}=\\dfrac {log(S_0e^{rT}/K)}{\\sigma \\sqrt{T}}\\pm\\dfrac 1 2 \\sigma \\sqrt{T}$$"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 121,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import cmath\n",
+ "from scipy.special import gamma"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Set parameters"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 122,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "V0=0.02\n",
+ "θ=0.02\n",
+ "λ=0.3\n",
+ "ν=0.3\n",
+ "ρ=-0.7\n",
+ "H=0.1\n",
+ "S0=100"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Calculation of implied volatility surface of rough Hestion model"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Since the calculation speed of Python is relatively slow, we can only choose to run 26 maturies(and this will take many hours). Meanwhile, the time steps of Adams Predictor-Corrector scheme is set at 60 (200 in the paper). The N of cosine method is set at 160, same as in the paper. However, to reduce running time, we will stop the calculation once the variation of price in the last 5 steps is less than $0.005\\% S_0$."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 79,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "104.2420576193561 0.013577110154244653\n",
+ "109.04631784921234 0.0021397512390339025\n"
+ ]
+ },
+ {
+ "ename": "KeyboardInterrupt",
+ "evalue": "",
+ "output_type": "error",
+ "traceback": [
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
+ "\u001b[1;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
+ "\u001b[1;32m<ipython-input-79-a0a987d36b05>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m 92\u001b[0m \u001b[0mcheck_end\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m5\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 93\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mN\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 94\u001b[1;33m \u001b[0mchar_func\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mcharacteristic_func\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcomplex\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpi\u001b[0m\u001b[1;33m/\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mb\u001b[0m\u001b[1;33m-\u001b[0m\u001b[0ma\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mT\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mH\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mρ\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mλ\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mν\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mV0\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mθ\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mgamma_coeff\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0msteps\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m60\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mintervals\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m70\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 95\u001b[0m \u001b[0mprice_add\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mchar_func\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mU_k\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0ma\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mb\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mexp\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcomplex\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpi\u001b[0m\u001b[1;33m*\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m-\u001b[0m\u001b[0ma\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m/\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mb\u001b[0m\u001b[1;33m-\u001b[0m\u001b[0ma\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 96\u001b[0m \u001b[1;32mif\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m==\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
+ "\u001b[1;32m<ipython-input-79-a0a987d36b05>\u001b[0m in \u001b[0;36mcharacteristic_func\u001b[1;34m(u, T, H, ρ, λ, ν, V0, θ, gamma_coeff, steps, intervals)\u001b[0m\n\u001b[0;32m 40\u001b[0m \u001b[0mleft_s\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mds\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 41\u001b[0m \u001b[0mright_s\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mleft_s\u001b[0m\u001b[1;33m+\u001b[0m\u001b[0mds\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 42\u001b[1;33m \u001b[0mψ_right\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mψ\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mright_s\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mu\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mH\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mρ\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mλ\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mν\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0msteps\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 43\u001b[0m \u001b[0mintegral_left\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mu\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mψ_right\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mρ\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mλ\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mν\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mg0\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mT\u001b[0m\u001b[1;33m-\u001b[0m\u001b[0mleft_s\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mV0\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mλ\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mθ\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mH\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mgamma_coeff\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 44\u001b[0m \u001b[0mintegral_right\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mu\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mψ_right\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mρ\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mλ\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mν\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mg0\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mT\u001b[0m\u001b[1;33m-\u001b[0m\u001b[0mright_s\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mV0\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mλ\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mθ\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mH\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mgamma_coeff\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
+ "\u001b[1;32m<ipython-input-79-a0a987d36b05>\u001b[0m in \u001b[0;36mψ\u001b[1;34m(t, u, H, ρ, λ, ν, steps)\u001b[0m\n\u001b[0;32m 17\u001b[0m \u001b[0mψ_series\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0marray\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mψ_series\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mcomplex\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 18\u001b[0m \u001b[0mψ_P\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msteps\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 19\u001b[1;33m \u001b[0mψ_P\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0marray\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mψ_P\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mcomplex\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 20\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mk\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msteps\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 21\u001b[0m \u001b[0ma\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mk\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m2\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
+ "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
+ ]
+ }
+ ],
+ "source": [
+ "def g0(t,V0,λ,θ,H,gamma_coeff):\n",
+ " ans=V0+λ*θ/gamma_coeff/(H+0.5)*pow(t,H+0.5)\n",
+ " return ans\n",
+ "\n",
+ "def F(u,v,ρ,λ,ν):\n",
+ " ans=0.5*(u*u-u)+(u*ρ*ν-λ)*v+0.5*ν*ν*v*v\n",
+ " return ans\n",
+ "\n",
+ "#Adams Predictor-Corrector scheme\n",
+ "def ψ(t,u,H,ρ,λ,ν,steps):\n",
+ " α=H+1/2\n",
+ " Δ=t/steps\n",
+ " pow_temp1=pow(Δ,α)\n",
+ " gamma_coeff2=gamma(α+2)\n",
+ " gamma_coeff1=gamma_coeff2/(α+1) #gamma(α+1)\n",
+ " ψ_series=np.zeros(steps+1)\n",
+ " ψ_series=np.array(ψ_series,dtype=complex)\n",
+ " ψ_P=np.zeros(steps+1)\n",
+ " ψ_P=np.array(ψ_P,dtype=complex)\n",
+ " for k in range(steps):\n",
+ " a=np.zeros(k+2)\n",
+ " b=np.zeros(k+1)\n",
+ " for j in range(k+1):\n",
+ " a[j]=pow_temp1/gamma_coeff2*(pow(k-j+2,α+1)+pow(k-j,α+1)-2*pow(k-j+1,α+1))\n",
+ " b[j]=pow_temp1/gamma_coeff1*(pow(k-j+1,α)-pow(k-j,α))\n",
+ " a[0]=pow_temp1/gamma_coeff2*(pow(k,α+1)-(k-α)*pow(k+1,α))\n",
+ " a[k+1]=pow_temp1/gamma_coeff2\n",
+ " for j in range(k+1):\n",
+ " F_value=F(u,ψ_series[j],ρ,λ,ν)\n",
+ " ψ_P[k+1]+=b[j]*F_value\n",
+ " ψ_series[k+1]+=a[j]*F_value\n",
+ " ψ_series[k+1]+=a[k+1]*F(u,ψ_P[k+1],ρ,λ,ν)\n",
+ " return ψ_series[steps]\n",
+ "\n",
+ "def characteristic_func(u,T,H,ρ,λ,ν,V0,θ,gamma_coeff,steps,intervals):\n",
+ " ds=T/intervals\n",
+ " integral_value=0\n",
+ " ψ_left=ψ(0,u,H,ρ,λ,ν,steps)\n",
+ " for i in range(intervals):\n",
+ " left_s=ds*i\n",
+ " right_s=left_s+ds\n",
+ " ψ_right=ψ(right_s,u,H,ρ,λ,ν,steps)\n",
+ " integral_left=F(u,ψ_right,ρ,λ,ν)*g0(T-left_s,V0,λ,θ,H,gamma_coeff)\n",
+ " integral_right=F(u,ψ_right,ρ,λ,ν)*g0(T-right_s,V0,λ,θ,H,gamma_coeff)\n",
+ " integral_value+=(integral_left+integral_right)*ds/2\n",
+ " ψ_left=ψ_right\n",
+ " ans=np.exp(integral_value)\n",
+ " return ans\n",
+ "\n",
+ "def χ_k(k,c,d,a,b):\n",
+ " sincos_1=k*np.pi*(d-a)/(b-a)\n",
+ " sincos_2=k*np.pi*(c-a)/(b-a)\n",
+ " ans=np.cos(sincos_1)*np.exp(d)-np.cos(sincos_2)*np.exp(c)\n",
+ " ans+=k*np.pi/(b-a)*(np.sin(sincos_1)*np.exp(d)-np.sin(sincos_2)*np.exp(c))\n",
+ " ans/=(1+pow(k*np.pi/(b-a),2))\n",
+ " return ans\n",
+ "\n",
+ "def ψ_k(k,c,d,a,b):\n",
+ " sincos_1=k*np.pi*(d-a)/(b-a)\n",
+ " sincos_2=k*np.pi*(c-a)/(b-a)\n",
+ " if (k==0):\n",
+ " ans=d-c\n",
+ " else:\n",
+ " ans=(np.sin(sincos_1)-np.sin(sincos_2))*(b-a)/k/np.pi\n",
+ " return ans\n",
+ "\n",
+ "def U_k(k,a,b,call=1):\n",
+ " if (call==1):\n",
+ " ans=(χ_k(k,0,b,a,b)-ψ_k(k,0,b,a,b))*2/(b-a)\n",
+ " else:\n",
+ " ans=(-χ_k(k,a,0,a,b)+ψ_k(k,a,0,a,b))*2/(b-a)\n",
+ " return ans\n",
+ "\n",
+ "maturities=[7/365,1/12,1/6,1/4,1/2,3/4,1,1.5,2]\n",
+ "log_moneyness_all=np.zeros(shape=(9,26))\n",
+ "impvol=np.zeros(shape=(9,26))\n",
+ "gamma_coeff=gamma(H+1/2)\n",
+ "for i_T in range(9):\n",
+ " T=maturities[i_T]\n",
+ " left_log=-1.2*np.sqrt(T)\n",
+ " right_log=0.3*np.sqrt(T)\n",
+ " strikes=np.linspace(left_log,right_log,26)\n",
+ " for i_strike in range(26):\n",
+ " log_moneyness=strikes[i_strike]\n",
+ " log_moneyness_all[i_T,i_strike]=log_moneyness\n",
+ " x=-log_moneyness\n",
+ " K=np.exp(-x)*S0\n",
+ " a=x-1.5\n",
+ " b=x+1.5\n",
+ " N=160\n",
+ " price=0\n",
+ " check_end=np.zeros(5)\n",
+ " for i in range(N):\n",
+ " char_func=characteristic_func(complex(0,i*np.pi/(b-a)),T,H,ρ,λ,ν,V0,θ,gamma_coeff,steps=60,intervals=70)\n",
+ " price_add=char_func*U_k(i,a,b,1)*np.exp(complex(0,i*np.pi*(x-a)/(b-a)))\n",
+ " if (i==0):\n",
+ " price+=price_add.real/2\n",
+ " else:\n",
+ " price+=price_add.real\n",
+ " flag=False\n",
+ " if (i<5):\n",
+ " check_end[i]=price*K\n",
+ " else:\n",
+ " for i_temp in range(4):\n",
+ " check_end[i_temp]=check_end[i_temp+1]\n",
+ " check_end[4]=price*K\n",
+ " check_min=check_end[0]\n",
+ " check_max=check_min\n",
+ " for i_temp in range(5):\n",
+ " check_min=min(check_min,check_end[i_temp])\n",
+ " check_max=max(check_max,check_end[i_temp])\n",
+ " if (check_max-check_min)<0.005*S0/100:\n",
+ " flag=True\n",
+ " if (flag==True):\n",
+ " break\n",
+ " price=check_max\n",
+ " # print(K,price)\n",
+ " model=pf.Bsm(0.3)\n",
+ " impvol[i_T,i_strike]=model._impvol_newton(price,K,S0,T)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Store the result of rough Heston model"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 123,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#import xlwt\n",
+ "#wb=xlwt.Workbook()\n",
+ "#ws=wb.add_sheet('log_moneyness')\n",
+ "#for i in range(9):\n",
+ "# for j in range(26):\n",
+ "# ws.write(i,j,log_moneyness_all[i,j])\n",
+ "#ws=wb.add_sheet('impvol')\n",
+ "#for i in range(9):\n",
+ "# for j in range(26):\n",
+ "# ws.write(i,j,impvol[i,j])\n",
+ "#wb.save('rough heston impvol.xls')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 126,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import xlrd\n",
+ "data=xlrd.open_workbook(\"rough heston impvol.xls\")\n",
+ "data_log_moneyness=data.sheet_by_name(\"log_moneyness\")\n",
+ "data_impvol=data.sheet_by_name(\"impvol\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Calculate the implied volatility surface under lifted Hestion model"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 178,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAABJUAAANvCAYAAABkmusvAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAIABJREFUeJzs3Xt8zuX/wPHXtYPNYcNmGFMTyhqZJsI2Zjab83EopwpRkspKQgkldFJSlKIwYw5zmjM7UdFX9UM1aWWzkHNsjtfvj88dO9kctvu+t72fj8f9+N73/bmuz+d97fv93m+f63MdlNYaIYQQQgghhBBCCCFuh42lAxBCCCGEEEIIIYQQxY90KgkhhBBCCCGEEEKI2yadSkIIIYQQQgghhBDitkmnkhBCCCGEEEIIIYS4bdKpJIQQQgghhBBCCCFum3QqCSGEEEIIIYQQQojbJp1KQhQTSqlBSqkES8chhBDC+imlvlJKTbZ0HEIIIayH5AZRFKRTSZQ4Sql/s7yuKaUysnx+/DbO00AptUEp9Y9SShdlzEIIIYpOSc8L8tBBCCFuXyHmhoFKqT1KqbNKqVSl1DSllF1Rxn6LcUluEGYhnUqixNFaV/jvBfwFdMry3cLbONVlIAp4qkgCFUIIYRaSF4QQQuRUiLmhHDAKqAI0A4KA0YUfsRDWSTqVhLgJrfWvWusvgH0FlVVKTVRKfWR6b6+UOq+Ummb6XFYplamUqmz6/KhSKkkpdVop9aNSqnWW81RUSn2hlEpXSqUppSYrpWxvcs3pSqkEpVTFwmivEEKI/N1OXgBQSmml1DNKqWSl1Dml1CSlVB2l1E7TE+0opVSZLOWHKKUOKqVOKqVilFI1cpxrmOlcp5RSs5TBC/gUaG56un46SwiVlVJrTdf+VilVp7D+FkIIIQxa69la63it9SWtdRqwEGh5s/LWmhtM9d5XSh1TSp1RSv2klGpQ6H8wUeJIp5IodZRSj5k6dG72uucOTrsDaG16/wjwN9DK9Lk58KvW+pRSqiawFpgMuGA8xYhWSrmZys4HrgB1gcZACDA4R/w2Sqm5wENAiNb6zB3EK4QQwqSI8sJ/QgFf4FHgZWAO8DhQC2gA9DXF0AZ4GwgH3IE/gcgc5+qIkWMamcq101ofAIYBO01P1ytlKd8XmAhUBg4CU+6iHUIIUarcRW4IoOCHD9aYG0JMsd8PVAJ6AycKaIcQ0qkkSh+t9SKtdaV8Xn/dwWl3AvWUUq4YP8ZfADWVUhUwOpd2mMr1A9Zprddpra9prTcBu4H2SqlqQBgwSmt9Xmt9DHgf6JPlOvbAYowOqU5a6wt3EKsQQogsiigv/OcdrfVZrfU+4P+AjVrrQ6YHAusxHiCAcTMxT2v9g9b6IvAqxhNmzyznmqq1Pm2KZxvgU8C1l2utv9NaX8F4cl5QeSGEECZ3khuUUk8ATYAZBZzeGnPDZcAJqA8orfUBrXV6AecSQjqVhCgMWusMjM6hVhidSjuAJIyhr1k7le4FemV9ygH4YTx5uBej0yg9y7HPgKpZLlUX6AJM1FpfKvqWCSGEuEtHs7zPyONzBdP7GhhPoAHQWv+L8YS4Zpbyf2d5fyFL3Zu53fJCCCHukFKqKzAVCNNa/1NAcavLDVrrrcDHwCzgqFJqjlLKuYBzCSGdSqL0UUo9rrLv9pDzdafTHHYAbTCeLHxv+twOaArEmcocBr7O8ZSjvNZ6qunYRaBKlmPOWmvvLNc4ADwBrFdKPXCHcQohhMiiCPPC7TiC8XDhv5jKA65A2i3UtZqd6IQQoqS4ndyglAoF5mLMJPi5EMMwa27QWs/UWvsC3hjT4CJu9xyi9JFOJVHqaK0XZt3tIY/XX3B9sTpHoIzps6NSyiGfU+8ABgD7TaOItmOsh/SH1vq4qcw3QCelVDullK3pnK2VUh6m4aUbgXeVUs6mtZPqKKVaZb2I1noxMBbYrGTRVSGEuGtFmBduxyLgCaWUj+mcbwHfaq1TbqHuUcBDZVnYVQghxN25jdzQBmMaWQ+t9XeFHIbZcoNS6hGlVDOllD1wHsgErt5h3KIUkU4lIW7uXozhp/8ttJcB/JpP+SSgLDdGJe3H+DH+7zNa68MY09fGAscxRidFcOP/iwMwblb2A6eAZRhT47LRWs8H3gS25phTLYQQoujcbl64ZVrrLcB4IBpIB+qQfU29/Gw1xfS3UqqgKRdCCCEK13igIrAuyyim9YVxYjPnBmeM0VanMKbcnaDgtaGEQGktI6aFEEIIIYQQQgghxO2RkUpCCCGEEEIIIYQQ4rZJp5IQQgiroJQKVUr9qpQ6qJQac5My4Uqp/UqpfUqpReaOUQghhBBCCHGDTH8TQghhcUopW+A3IBhIxdhBsa/Wen+WMvWAKKCN1vqUUqqq1vqYRQIWQgghhBBCyEglIYQQVqEpcFBrfci0e2IkxqL2WQ0BZmmtTwFIh5IQQgghhBCWZWfpAO5GlSpVtKenp6XDEEIIq7Nnz55/tNZulo7jNtTE2A3xP6lAsxxl7gdQSiUCtsAbWuvYnCdSSg0FhgKUL1/et379+kUSsBBCFGfFME8UCbmfEEKIvN1qnijWnUqenp7s3r3b0mEIIYTVUUr9aekYbpPK47uc87PtgHpAa8ADiFdKNdBan85WSes5wByAJk2aaMkTQgiRWzHME0VC7ieEECJvt5onZPqbEEIIa5AK1Mry2QM4kkeZVVrry1rrP4BfMTqZhBBCCCGEEBYgnUpCCCGswfdAPaVUbaVUGaAPEJOjzEogEEApVQVjOtwhs0YphBBCCCGEuE46lYQQQlic1voKMALYABwAorTW+5RSbyqlOpuKbQBOKKX2A9uACK31CctELIQQQgghhCjWayoJIUqfy5cvk5qaSmZmpqVDsQqOjo54eHhgb29v6VDumtZ6HbAux3cTsrzXwIumlxBC5EnyRHYlKU8IIcTdkhyR293mCelUEkIUK6mpqTg5OeHp6YlSea3tXHporTlx4gSpqanUrl3b0uEIIYRVkDxxQ0nLE0qpUOBDjB1AP9daT82jTDjwBsZmDz9qrR8za5BCCKsmOSK7wsgTMv1NCFGsZGZm4urqKkkAUErh6uoqT1qEECILyRM3lKQ8oZSyBWYBYcCDQF+l1IM5ytQDXgVaaq29gVFmD1QIYdUkR2RXGHlCOpWEEMWOJIEb5G8hhBC5yW/jDSXob9EUOKi1PqS1vgREAl1ylBkCzNJanwLQWh8zc4xCiGKgBP0uFoq7/XtIp5IQQgghhBDC2tUEDmf5nGr6Lqv7gfuVUolKqV2m6XK5KKWGKqV2K6V2Hz9+vIjCFUKI0qGUdirFYGwiJIQQ5nPixAkCAwOpUKECI0aMyHZsz549NGzYkLp16zJy5EiMNamFxfzfl3B0j6WjEEKUIpIjCpTXo/Scfwg7oB7QGugLfK6UqpSrktZztNZNtNZN3Nzc7iya3e/B6d/vrK4QQtwBa80TpbBT6RrwNtAJWGLhWIQQpYmjoyOTJk1ixowZuY4NHz6cOXPmkJycTHJyMrGxsRaIUABwOQN2TYaoNpCWaOlohBClhOSIAqUCtbJ89gCO5FFmldb6stb6D+BXjE6mwnX+b/j2LVgSACcOFPrphRAiL9aaJ0php5INnJ8Pmb4YDzA+tXRAQohiJiUlBS8vL4YMGYK3tzchISFkZGQUWK98+fL4+fnh6OiY7fv09HTOnj1L8+bNUUoxYMAAVq5cWVThi4LYl4Xw7VC+OiwLgT+3WDoiIUQxIjmiyHwP1FNK1VZKlQH6YEw/yGolEAiglKqCMR3uUKFHUr469N4O+hosaQXH9hb6JYQQJVdJyxN2ZruStdDXYGV/uHoBureBCsOBE8BY8h5VK4SwVqNGwd5C/necjw988EHB5ZKTk1m8eDFz584lPDyc6Oho0tPTWbhwYa6yAQEBzJw586bnSktLw8PD4/pnDw8P0tLS7ih+UUica0HvOFjWFlZ0gM7RcF8HS0clhLhNlsoTkiMKn9b6ilJqBMYaFrbAPK31PqXUm8BurfV/61uEKKX2A1eBCK31iSIJqEoDI08sDYKoQOi+Hmo8WiSXEkIUDbmXKBylr1NJ2XDxkcmUie2GirwAPbtApXEYHUszKJWDt4QQt6127dr4+PgA4OvrS0pKCuPGjSMiIuK2z5XXnGfZlcIKlK9mjFiKDoVVXaHDYri/p6WjEkIUA5IjiobWeh2wLsd3E7K818CLplfRq1wP+sQbHUvLgqHbaqjV2iyXFkIUbyUpT5S6TqVr16DX6GDqOW1lRov2qMW7oEdfqPo+cBL4nFL4ZxGiWLqVpwBFxcHB4fp7W1tbMjIymD59+h09XfDw8CA1NfX659TUVGrUqFG4AYs7U9YVem2G5R1gTW9o9yV4D7B0VEKIW2SpPCE5ohRxvhd6xxsjW5eHQecVUDvPTeeEEFZG7iUKR6nrPbGxgdBQGDGiKX+fjOfrLiHYLFkL3Z4Ej3nAKSASKGvhSIUQxU1ERMQdPV1wd3fHycmJXbt20axZMxYsWMBzzz1XBBGKO+JQEXpugJVdIXYgXLkAjYZZOiohRDEjOaIEq+AO4TsgOgRWdoaOS6BeN0tHJYQoZoprnih1nUoAzzwDVapAv35eHD2ZxPrBIdhHL4IOQ6HuHCAMY90/ZwtHKoQoaTw9PTl79iyXLl1i5cqVbNy4kQcffJDZs2czaNAgMjIyCAsLIywszNKhiqzsyxvTGlb3gs3D4fIFaGKe2RVCiNJDckQxVq4K9NoKy9sbuSL0K3iwn6WjEkKUMNaYJ1Re8++KiyZNmujdu3ffcf0tW6BrV6hb8x+SIjpQ9sweCHkKGswDHgLWA1ULK1whRCE4cOAAXl5elg7DquT1N1FK7dFaN7FQSFbjbvNELlcvwbp+8NtSaPEmPDoOSunaJkJYK8kTuUmeuLlCzxOX/jVGKx3eDsGfwkNDC+/cQoi7Jjkib3eTJ0r1qtRBQbB9O6SdrILX+C2cqdQGNsyB7wcABwB/4E/LBimEEMLsNrOZP/P6/bctAx0WgfdASJoA8a9CMX44I4QQopCVqQDd1kLtMNj0NOx539IRCSFEkSqVnUqrWc1GNgLg6wuJiaAcKnDfi6v5u2I4xM2DHV1B/w34YXQwCSGEKA0yyGAAA/DDj1/4JXcBGztoNw8aDYfv34GtI0FfM3+gQgghrJN9WeiyAur1gO0vwq7J8gBCCFFilbpOJY3mbd6mAx34hm8AqFfP6FiqeY8Dns8u4mD5YbB7MWwIhGuXMEYsfWfRuIUQQphHWcqynvVc4hL++LObPKZFKBsImgW+L8Hej2HDYLh21fzBCiGEsIgfgEv5FbAtAx0j4cH+kDheRrYKIUqsUteppFB8wXqaE0B/+jONaWg0NWpAXBw80tSW+5/5hO9sJ8C+VRDzEFyuAAQBWy0dvhBCCDNoRCMSSKA85QkkkO1sz11IKWg1HZq/Dvu+NNZaunrZ7LEKIYQwr7+5yo8M4VUSOJ9fQRs7Y8HuRsNkZKsQosQqdZ1K14ABVORv1tGR3rzCK4xiFNe4RqVKsHEjdOqkaPbCRNZmzITfN8Nyd7hYC2NXuJUWboEQQoiiptlPPaqRSCL3cA+hhBJDTO6CSkGLN8D/Hfg10tjx58pFs8crhBDCfKpzkm7EM5lgXmcNJ/MrrGwg6JMbI1s3DpGRrUKIEqXUdSrZAO8D/+DAdyzicV5gJjPpS18ucpGyZSE6Gp58EjqOf47P0xehj+yGJXZwvgHQA/jKom0QQghRdK6SwSmCOUYbamJPHHE8xEN0pzsLWJB3paYvQ5uP4PdVsKoLXL5g3qCFEEKYkRuViCeTBkylK9NZQFp+xf8b2froBPi/eTKyVQhRopS6TiUwlt5OBByxYRXvMYQZRBFFKKGc4Qx2dvD55zBmDAx5ty9T9q1Gn/odIs/AmRbAE8AHlm2EEKLY2bRpE76+vjRs2BBfX1+2br0xpXbPnj00bNiQunXrMnLkSLSsu2AxVyjLJ8yhAvv5Bz9cOMcWttCKVgxkIB/yYd4VG4+AkC8gZSMsbw+Xzpk3cCFEsSY5orhxozJbOUtr3mYg83iP3/IrrhS0nCgjW4UQd8xa80Sp7FQC8AJ2AnWAL3mJoXxDIon4408aaSgFb78N778P478I5bkdm9EZJyHyEPzTFngBGA9IUhdC3JoqVaqwevVqfv75Z+bPn0///v2vHxs+fDhz5swhOTmZ5ORkYmNjLRhp6eYAjKEDM9mELcc5Q0vK8SdrWUs3ujGKUbzO6+i8fv8bPgntF0JaAiwLgczTZo9fCFE8SY4ojpxwYS2n6Ml4XmI9r7KnoHsDGdkqhLhD1ponSm2nEkANIA5oBczhcfqxjj/4gxa04AAHABg1Cr7+Gj5b3Zzw6DiuXtWwZA8c6QhMBp7DWKlJCFFapKSk4OXlxZAhQ/D29iYkJISMjIwC6zVu3JgaNWoA4O3tTWZmJhcvXiQ9PZ2zZ8/SvHlzlFIMGDCAlStl/TZLsgNeoSXzieMCmgwCsOEHoohiEIN4kzd5nue5ltfvv1df6LQUju6BpW3gwj9mj18IYTmSI0obByoTyWme5nmm8jND2c6V/KtkG9kaJiNbhShlSlqesDPblayUM7AOeBL4krb0JI54wmhJS9awhha0oF8/cHGBnj0b0PZMIhuHBmO/dCt06QGes4BTGOss2VuwJUKUPqOAvYV8Th9ubXJrcnIyixcvZu7cuYSHhxMdHU16ejoLFy7MVTYgIICZM2dm+y46OprGjRvj4OBAWloaHh4e1495eHiQlpbv6gzCDBQwioZ8ThKtCKYWbbnCMr7gC1xw4T3e4xSnmMc87HP+/tfrBl1XQUx3iGoNPTdBBXdLNEOIUs1SeUJyRGljSyVmc5YqDGIKKznBKhbRBcebV2n4JNiXM9ZXWhYM3deDY2XzhSyEkHuJQmK2TiWlVCjwIWALfK61nprj+CBgOlxf5+5jrfXn5oitDLAA8ADeoTFtSSKFUIIIIpJIutCF9u1hyxbo0KE2vjMS+fbldpRdEQNhfaD+IuAMEAWUM0fIQggLq127Nj4+PgD4+vqSkpLCuHHjiIiIKLDuvn37eOWVV9i4cSNAnnOelVKFG7C4Y4PxZCmJnCOUh+jCOb5iBjNwxZXXeI3TnCaKKMpSNnvF2mHQbR2s7ARRraDnFnCuZZlGCCHMSnJEyfcbv1GHOthia/pG4cxkzuNGV0axlfYsYCUDcL75Ser3AbuysCYcotpAz41Qzs0s8QshLKsk5QmzdCoppWyBWUAwkAp8r5SK0Vrvz1F0idZ6hDliyskGmIrRsTSS+3iYRJzpSHe68wmf8DRP07w5xMdDSEg16k/YwU9vdqLi2iWQ2Rd8IoFQYDVQ0RJNEKLUseRy+Q4ODtff29rakpGRwfTp0wt8upCamkq3bt1YsGABderUAYynCampqdfLp6amXh/aKqxDL6qyge0k0JXW9OME/zCWsVSmMs/yLKGEEkMMFXP+/t8TCD1M0xuW+EOvrVDpPss0QohSyFJ5QnJEyfY3f9OMZrSlLV/zNY5ZRiSV53kycSWAJ/iRQD5mPSOoevOT1e0CXWNgVTdY0gp6bYYK8t+vEOYg9xKFw1xrKjUFDmqtD2mtLwGRQBczXfu2jACWAf+HG2fZSgChDGPY9UVZvb0hKQkcK1bE86UN/F2+I2xZDDt7gk4CWgPHLNoGIYRlREREsHfv3lyv/5LA6dOn6dChA2+//TYtW7a8Xs/d3R0nJyd27dqF1poFCxbQpYtV/kSWau1wpgzriKE7roziGK8xnGEsZCFJJNGGNhzneO6KNVtA+FZjzYwl/nDiF/MHL4SwOMkRJUd1qjOOcSxjGe1ox2myb8rgSD80q2jAAULw4y1S8l++27Md9IiFc4dhSQCc/bNI4xdCWKfimifM1alUEzic5XOq6buceiilflJKLVNK5TlHQCk1VCm1Wym1+/jxPP7xXgi6A1uAE5RnP6vozJO8yZs8zdNc4Qr33gsJCVC3flk8R0bzu8MASFoK2zqC/gXwByQZCCGy+/jjjzl48CCTJk3Cx8cHHx8fjh0zOqFnz57N4MGDqVu3LnXq1CEsLMzC0Yq8tMCR+4hiIYOpylv8zTD6Ek4MMRzgAP74czhbujOp5gvh2+HaVeOG4fhPZo9dCGHdJEcULy/xEgtZyE524o8/qaRmO25Pe+zZhAfHGUhLxvN/+S/f7RFgjFLKOAGR/nAquUjjF0IUP9aaJ1Re8+8K/SJK9QLaaa0Hmz73B5pqrZ/LUsYV+FdrfVEpNQwI11q3ye+8TZo00bt37y6yuA8AYcBxNJ0ZTyRT6ExnFrOYcpTj3Dno1g22br3G99NfwpcPwKsdtNsJts7AJqB+kcUnRGl04MABvLy8LB2GVcnrb6KU2qO1bmKhkKxGUeWJFDTrGcdw3iKd7rizkHi+pyMdqUhFNrOZ+7k/d8WTv8LSILhyAXpsgOqPFHpsQpR2kidykzxxc3ebJ7awhW50oyIViSUWb7yzHdf8zL+04zKZTGUtE2mecwW+7I7tNRbutrGDnpuhind+pYUQt0lyRN7uJk+Ya6RSKpB15JEHcCRrAa31Ca31RdPHuYCvmWK7KS9gJ3A/imVMZgCzWM1qggnmJCdxcoK1a6FHDxuajH6P2PNT4MAGiGkEly9ijFj6wbKNEEIIUeg8UfRgCu/yPu4sJ532+NOI7Wwnk0z88ON//C93RZcHoE88OFSCpW0hLdH8wQshhCg0QQQRRxxXuIIffsQTn+24oiFOJGGDK28QxOus50x+J6zqA73jAGWssXQ0j1wihBBWxFydSt8D9ZRStZVSZYA+QEzWAkqprHstd8YYKGRx7sAOjJWSFvAMfVnKbnbjhx9/8RcODhAZCcOGKcJeH8uCI5+iDyVA9D2Q6QgEAnGWbIIQQogiUBUYyiimsgA34jhKII2pSQIJOOJIa1qTQELuihVrGzcM5atDdDv4a5vZYxdCCFF4fPBhJzupRjWCCWY5y3OU8KQSCVzkAabQmakszn8FVlcvI0/Yl4elgXBkVxFGL4QQd8csnUpa6ysYa2BvwOgsitJa71NKvamU6mwqNlIptU8p9SMwEhhkjthuhTOwFngMWEQPwtjIEY7QnOb8zM/Y2sInn8CECTDwvaeZcSASnf4TRDnD+apAO2CdRdsghBDi1l26dGvlnIAX6M80YnDiAMfxpx4OJJKIO+6EEMJ61udR0QN67wBnT1jRHv6ILczwhRBCFCGtYexY+PHHG9954kkCCTSmMT3pySxm5ahVjcps5xwtmMLjfMrHpOR3kcp1jZGtZasY0+EO7yj8hgghRCEw10gltNbrtNb3a63raK2nmL6boLWOMb1/VWvtrbVupLUO1Fpb1fY4ZYCvgQhgFa3wJR4N+ONPHHEoBRMnwkcfwSvzwolIXI0+lQKRV+FMHYzN7iIt2AIhhBC34tIlCAuDV16Ba9cKLu8AvEJ7ZrEJO45xmpa4c5Z44vHCi850ZglLclcsX91YvNvFC1Z2huSVhd0UIYQQReDYMViwAPz9YePGG99XoQpb2EJHOjKCEbzGa+hs+75VxIVYTtOJCTzHct5gX377wjnfY4xYcqoFy8MgZUORtUkIIe6U2TqVSgIbYBrwAbCNhtQgiaqmJ9HRRAMwYgQsXAgfrmrHoDWbuXbhFESehBONMMY6fWa5BgghhCiQrS3Urw/TpsFjj0Fm5i3UAUbTkoXsIINrZOCPM7+zla20oAV96ctnef3+l6sCvbZAtYdhdU/4JY/OJyGEEFalWjXYtQtq14YOHeDLL28cK0c5lrOcIQzhLd7iCZ7gMpez1C6LC9GcZBAvMpEknmMX+TzBqFDDGNla+X7jAcTBmJuXFUIIC5BOpTvwPMaYo5+5F0jAm4fpRS9mMxuAvn1h9WpYmticTt/EceUKEPkHpDcHhgFTLRa7EEKI/Nnawscfw/TpsGQJBAfDyZMF11PACB5iI4kcxYVrBGHHLmKJpT3tGcYwpub1++9YGXpugpotYd1jsG9+obdJCCFE4fLwgPh4aNMGnnwSXn/dmBYHYIcdn/EZE5nIfObTmc78y79ZatvhwjxOM5ohzOIv+rGJfOZdl3OD8G3g5gOre8gDCCGEVZFOpTsUDmwEjuHKETbjTwee4RnGMx6NJjQUtmyBnckN8fs4gUuqEiz9Ef5sA7wKvAL5DXcVQpQ4KSkplC1bFh8fH3x8fBg2bNj1Y3v27KFhw4bUrVuXkSNHorX8PliSUjB6tLERw3ffQYsW8Mcft1Z3ELX5kUR+ox5l6MQlVrGCFTzGY7zKq7zCKzmmQwBlnKD7eqjVBmIHwY+fFnqbhBDWTXJE8ePsDGvWGJ1Kb74JgwbdWJNPoZjABOYyl41sJJBAjmVbnltRiemc4x3CWcxVuhDN+Ztf7L8HEO7N5QGEEKWUteYJ6VS6C62ABMCWcvyPFbTnKSYzmcEM5gpXaN4c4uLgrzP38dDbCZy3vw9WJMBvIRgT6YYBVy3aBiGEedWpU4e9e/eyd+9ePv30RsfB8OHDmTNnDsnJySQnJxMbKws3W4PevWHzZmP9jEcfhe+/v7V6PajGMXawi+Y48RhnmcPXfM1whjONaQxjGFdz/v7bl4Nuq+G+DrB5OOz5oPAbJISwapIjih97e/j8c6NTacECY02+M2duHB/MYFaxin3sowUt+J3fs9V34mUuMJdgNuJOMPPIZ2isgzP0WA/3BBkPIPbOLppGCSGsljXmCelUuksNgJ3APdixibl0YxzzmEc3unGBCzRoAElJcNXRnXqv7eBUGV9Ysxl+bg/MAR6H/Ia7CiGsTkpKCl5eXgwZMgRvb29CQkLIyMi44/Olp6dz9uxZmjdvjlKKAQMGsHJl6Vu0WSkVqpT6VSl1UCk1Jo/jg5RSx5VSe02vweaIy9/f+B0vXx5at4aYW1zOIpiK2BNLLJ1wZQT/8Caz+JjXeI05zOExHuNSzt9/O0fovBzq9YDtL8C3bxd6e4QQRUtyROmjFIwfD/PnGw+U/fzg8OEbxzvSka1s5TSnaUEL9rAnW/1yDOYKS3mEPTQlgA9Ju/l8Bvvy0DUG7ut1opAiAAAgAElEQVQEW56B3e8VWbuEEEWjpOUJO7NdqQSrBcQDXVGsYBI9qMFynqUtbVnDGjw9XUhIgLCwyniO3sSBaT2psXEdZLaHR5YAZ4FlQDmLtkOI4mYUo9jL3kI9pw8+fEDBI0SSk5NZvHgxc+fOJTw8nOjoaNLT01m4cGGusgEBAcycOROAP/74g8aNG+Ps7MzkyZPx9/cnLS0NDw+P6+U9PDxIS0srvEYVA0opW2AWEAykAt8rpWK01vtzFF2itR5h7vjq14edO6FTJ+jWzdjp85lnCq73KGX5mWiWMITeTCSdf5jMTCpTmdGM5hznWMYyymX9/bctAx0jYf1ASBgLVy5AizeNuxYhxG2xVJ6QHFE6DRgANWtC9+7G6Na1a8HHxzj2KI+SSCLtaEcrWrGc5YQQcr2uA925wnrq0IUKtOQtNvEq9fIeAWDnCJ2Xwbp+sOMlI088Os4sbRSiJJF7icIhnUqFpDKwAegPLGM4HXFjI4/jhx8b2ECtarXYtg26dClP7RdW8fO0AdwftwQyQ8FvPah2wBqgokXbIYS4NbVr18bH9C9FX19fUlJSGDduHBERETet4+7uzl9//YWrqyt79uyha9eu7Nu3L885z6r0dSA0BQ5qrQ8BKKUigS5Azk4li6lWDbZtMzZjePZZSEmBqVPBpoAxvw2xw4l5fE4VBjODI5zgJeZTkYoMZSjtaMca1lAx6++/jR2ELQC7srBrMlzOgFbTpWNJiGJCckTpFRQEiYnQvr0x0nXpUggNNY49wAMkkUR72tOBDnzJl/Sj3/W6drTBhm1UIYwhtOR1NjCBxtjndSHbMtBhkdHBlDgeLl8AvymSJ4QoJkpSnpBOpULkiLEr3AvAR/SkDVXYTRea05wNbMC7ojexsdCnTxm8XlrIrimVeeS7TyEzCIK2g00bIBZws2QzhCg2buUpQFFxcHC4/t7W1paMjAymT5+e79MFBweH6/V8fX2pU6cOv/32Gx4eHqSmpl4vn5qaSo0aNYq+EdalJpBlsgCpQLM8yvVQSgUAvwEvaK0P5yyglBoKDAW45557CjXI8uVhxQoYOdLYHe7PP43pDo6O+dfzRNGZ6XyIG8/zCkc4xWCiccaZfvQjkEA2sAG3rL//NrYQMse4YdjzLlzJgKCPQMnMdSFulaXyhOSI0q1BA9i1Czp0gI4d4dNPYbBpwnYNarCDHXSnO/3pTzrpjGY0CuMG0IYmlCeBq4QwmtZMIIYJtKJsXheysYPQr8CuHHz3tjFiqfX70rEkxC2Se4nCIZ1KhcwW+BDj7mgMrWlKHH8Sih9+rGENLR1bsmwZDBliS9Oxn7B+nCuhP02BTD9o/z3YBgCbAI98ryOEsD4RERH5Pl04fvw4Li4u2NracujQIZKTk7nvvvtwcXHBycmJXbt20axZMxYsWMBzzz1nxsitQl7/As752GU1sFhrfVEpNQyYD7TJVUnrORiL1tGkSZNC3/rC1hY+/hhq14aICDhyBFatAheX/OtVBZ7gZabjyosMJZ22hLMWZ2LoTnf88WcTm6hFrRuVlA20+cgYsbR7Bly7BG0/NTqchBDFiuSI0qVGDWN9pfBwGDLEGN06aZLR31ORiqxjHQMZyMu8zBGO8C7vYmOa7KZ4gIokcJJ2vE47JhLFGDrnPZ9B2UDb2Uae+OED4wFE29nyAEKIYqi45gn5tSkCCngF427nBxrhwk5cqEpb2hJDDHZ2MG8ejB6tCJs8ma8Pvwu/JcDKRnA5FfADDlq0DUKIwhcXF8dDDz1Eo0aN6NmzJ59++ikupp6I2bNnM3jwYOrWrUudOnUICwuzcLRmlwpZe1PwAI5kLaC1PqG1vmj6OBfwNVNsuSgFo0dDZCR89x20aAGHDhVczxl4jqeYwTJc+IGjBNCOhmxkI+mk44cfySTnvljANGO9jJ8/N3b8uXalKJolhLAgyRElj5OTsbnDkCEwZQr07w+XTPszOODAIhYxilF8wAc8xmNc5GKW2rVwIY7zPMQkujODBRy72YWUgtbvQdNX4ac5EPuE5AkhSiCrzRNa62L78vX11dZuvda6vNa6lj6mH9JNtY220XP13OvH33lHa9D63Se+0NfetdF60UNaZ1TWWlfXWv9koaiFsF779++3dAhWJ6+/CbBbW8Hv9K2+MEbOHgJqA2WAHwHvHGXcs7zvBuwq6LzmyBNxcVpXrqx11apaf/fdrdW5rLV+R2/VZ3UFfVx76qv6N71H79Fu2k1X1VX1Xr0374o7J2s9A61jeml95VKhtUGIkkTyRG4lIU8U1csceeLaNa2nTDH+zR8YqPWpU1mO6Wt6up6u0ehAHahP69M5ap/Vx3WQ1ho9Rb+n/yjoYjsnmfJEuOQJIfIgOSJvd5MnZKRSEQsFtgGZuJHKFpoSzBCGMJnJaDQvvwxz50LE/CcZtzMK/fcvEOUG5zXQCvjWsg0QQggz0FpfAUZg7HlwAIjSWu9TSr2plOpsKjZSKbVPKfUjMBIYZJlos/P3h6QkKFcOWreGNWsKrmMHRBDIl2xD8y/n8KMhinjiKUMZWtOaJJJyV3z0NWg1A35bCqt7wZWLucsIIUQJpZQKVUr9qpQ6qJQak8fxQUqp40qpvabXYEvEmZNSMHYsfP01JCSAnx/89ZfpGIrRjOZrviaeeAII4Ei2gbpOVGEtJ+jBWF4khtfYn2t2eBaPjoOA6fBbFKzuKXlCCFHkpFPJDB4BkoBKVOBHVhNIf8Yznud4jqtcZfBgiIqCGTE9eHr9Gq6dSoNIRzhTAQgCtlq2AUIIYQZa63Va6/u11nW01lNM303QWseY3r+qtfbWWjfSWgdqrX+xbMQ31K8PO3eClxd06WIsyloQBYykCTHEcwYHLtGaezhKAgm44UYwwWxiU+6KTV6CNh/D76tgVVdjZzghhCjhlFK2wCwgDHgQ6KuUejCPoku01j6m1+dmDbIA/frBhg2QmgqPPgr/+1+WY/RjLWs5xCFa0IJf+TVLTQdcWcIJhjCSt/iW4XzL1Ztf6JHRpjwRAys7GzvDCSFEEZFOJTOpi9Gx5IU9O/iKUCKYxSz60IeLXKRHD1i7FhYlBdMzcjNXz5+FyMtwojrQHoixbAOEEELkq3p12L4dwsJg+HB49VW4dq3gek9Rn50k8hc1sKEdLvxEPPHUox4d6EA00bkrNX4WgudCygZY2REuny/09gghhJVpChzUWh/SWl/C2HS5i4Vjum2BgZCYCHZ2EBAAsbE3joUQwna2k0EGLWjBLnZlqWmLK59xijE8wWek0ZfN5DMKqfGzEPIF/LkJVnSAS/8WWZuEEKWbdCqZUTVgO9AWG2KZRijvsoxlhBLKGc7Qti1s3QpxyY/Sdu4OLl8CIk/B3/cB3YHc2wsKIYSwHhUqwMqV8PTTMHWqsSjrxVuYedCbWhwinp9pQFm6Yc8GtrOdR3iEcML5ki9zV3poMITNh8PbIToULp4t9PYIIYQVqQkczvI51fRdTj2UUj8ppZYppWrlcdzivL1h1y6oWxc6doQvvrhxzBdfkkjCBRfa0IY1ZJ1TrajM25xlBt1ZiqYTK8ins6jhk9D+G0iNh2UhcPFMkbVJCFF6SaeSmTlh7IndD4jlRYL4hgQSaEUr/uZvmjY1th9NPtWQpu8nkKkrQtRh+KsB0B/4xKLxCyGEyJ+dHcyeDW+/DYsWQWgonD5dcL0OVOECW0mgFS4M5DJfsZGNtKUtT/IkH/BB7koP9ocOiyF9F0SHQOYtXEgIIYonlcd3ORcXWg14aq0fAjZjbMac+0RKDVVK7VZK7T5+/Hghh3lratQw/s3fti0MHgzjx4M2taYOdUgkEW+86UpX5jEvW11nXuI882jDFtxpywJO3PxCXo9BxyVwdDcsDYKMfMoKIcQdkE4lCyiDkeFeBrbwOE1Zy0EO0pKWHOQgDz5oDIs9b1cH70kJnLO5F5b/Agd9gWeBty0avxBCiPwpBWPGwDffGL/nLVveWJQ1PwE44cw61tAdN17gHFOJYRU96MELvMBEJqJz3kM9EA6dlsHRH2BpG7lhEEKUVKlA1pFHHpBtRWu01ie01v+ND50L+OZ1Iq31HK11E611Ezc3tyIJ9lY4OcHq1Uan0uTJMHAgXLpkHKtKVbaxjba05SmeYgpTsv3+l+cJLhPNw+zFlwBmkXbzC93fA7qsgH/+D6IC4fzRIm6ZEKI0kU4lC7EB3gE+AHYSQl22cpoztKQlP/AD995r7A5RsWYN6r62g39sGkHM/2Dfo8BYYAy5H84IIazZiRMnCAwMpEKFCowYMSLbsT179tCwYUPq1q3LyJEj0abHlSdPniQ4OJh69eoRHBzMqVOnLBG6uEOPP24sypqWZizKundvwXUexoF6RLGYp6jOZE7wApEsZBCDeIM3eJEXc3cs1e0CXVfByQMQ1VpuGIQohiRHFOh7oJ5SqrZSqgzQhxyLjiql3LN87Iyxm6hVs7eHOXOMTqWvvzbW5ftvdGsFKhBDDP3oxzjGMYIRXM2yQLcjXVGspzaH6UBLpvHbze8O7usA3dbA6YOwpBWcy6cTSghhlaw1T0inkoU9DywG9tMUFxKxx5FWtGILW6haFbZtg/o+rtR+eQuHVWuI3QU/tMToknoWuIVVYIUQVsHR0ZFJkyYxY8aMXMeGDx/OnDlzSE5OJjk5mVjTyp1Tp04lKCiI5ORkgoKCmDp1qrnDFncp66Ks/v5GJ1NBHsAWP+Yyl1eowaccZQBfMJuRjOQDPmAwg7PdWABQOwy6roHTh+SGQYhiSHJE/rTWV4ARwAaMzqIorfU+pdSbSqnOpmIjlVL7lFI/AiOBQZaJ9vYoBa+9BgsWGFPi/PzgsGn1qDKUYT7zeZmX+YRPCCecTDKv17UnEEe24cp5BuHHm/zv5vvC3dsWemyAf9NgSQCc/bPI2yaEKDzWmiekU8kK9AZigaM8wDWScMeT9rQniigqVjR2hQgMqUDdiLX8cqUbbEuEnS1Bz8bIlVcsGr8QpU1KSgpeXl4MGTIEb29vQkJCyMgoeFv38uXL4+fnh6OjY7bv09PTOXv2LM2bN0cpxYABA1i5ciUAq1atYuDAgQAMHDjw+veieMm6KGuHDjBvXsF1aqHozlQ+Yho1iSKdrnzAFCYwgXnMoy99ucSl7JXuDYIescYNQ1QrOHsLc+6EEIVKckTR0Vqv01rfr7Wuo7WeYvpugtY6xvT+Va21t9a6kdY6UGv9i2Ujvj39+xv/7j98OPvoVhtseId3eJ/3Wc5y2tGO09xYQ88GXyqQQBkcGUVr3iD+5vvCefhDr82QeRIiA+DUwSJvlxAiu5KWJ+wK/YzijrQBdgBh1OQYcdSnM33ow3GO82zZZ4mOhqeecqDB2CgSXx9Cs6Sv4GILaPU1qH8xxjs5WLQNQpjfKOAW5hPdFh/Ia0HkHJKTk1m8eDFz584lPDyc6Oho0tPTWbgw9y6NAQEBzJw586bnSktLw8PD4/pnDw8P0tKMUSZHjx7F3d0Yze/u7s6xY8dusz3CWvy3KGvPnvDUU8YaS6+/bjyhvhlX4AkieBcXRjGUdEKYyFoqUpGXeIlznCOaaMpR7kYlD3/ouQmWhxojlsK3QsXaRd4+IayTZfKE5Ahxp4KCjCUw2rc3RrdGR0NIiHFsFKOoTnUGMAB//IkllpqmDfAUD1CJRE4QwlhCmMQyxtCBCnldxL0Z9Npi7Ai3JAB6bQXX+mZroxDWQ+4lCoN0KlmRxsBOoB2V+ZWNPEIfRjCCoxxlov1EvvpK4epqR/M3viD2FWdC9syEi80geAXYdAJWAOUt2wghSonatWvj4+MDgK+vLykpKYwbN46IiIjbPtd/c56zUvn1NIhiy8kJ1qyBp5+GiRONjqXPPjPW1LiZCsAInuI9KjOSvvxNAC+wEWecGcpQ2tGONayhIhVvVKrxqOmGIdh4Eh2+FSrXK/L2CSEMkiPE3WjY0Bjd2r69Mbp1zhx44gnjWB/64IYb3ehGc5qzgQ144WWqWQtX4viH9rxBFyYxn+d5HJe8LlLtYei93dgRLqoV9NwMbg3N00AhRInKE9KpZGVqA4lAB8ryPdH4MYxJTOIoR/nE5hPee88WFxcb2k34gEXPVaYvE+FSEwjbDHbtgDVAJcs2QgizKfgpQFFxcLgxMtDW1paMjAymT59+R08XPDw8SE1Nvf45NTWVGjVqAFCtWjXS09Nxd3cnPT2dqlWrFmIrhCXY28MXX8C998Ibb8CRI7B0qdHhdDMOwIt050PWMpSunKAlT7AJZyJ5nMdpQxtiicWNLLsYVfOFXttgWVtjxFKvLeDqddNrCFEyWSZPSI4Qd6tmTYiPN0a3Pvkk/PnnjdGtQQSxgx2EEUZLWrKGNbSghammG1XYynG6MJF+vMVJBvEcNfK6SJUGEL4DlgUZmzz03GR0NglRasi9RGGQNZWskBuwFQjBjgTmEsBY5jCHXvTiospk/Hj46CPFYx+9wax978Nvu2HlQ3D5W4yJdMct2wAhSqmIiAj27t2b65VfEgBjKKqTkxO7du1Ca82CBQvo0qULAJ07d2b+/PkAzJ8///r3onhTyrg5+OIL2LwZAgIgPT3/OrbAC7TlK7aiOMNZ/OiGF6tYxX72E0AAaTm3lK7aCMK3g75m3DD8839F1CIhREEkR4jb5ewMa9fCwIHG6NannoLLl41jjWlMEklUoQptactqVmep6YQb6zhOF8YykiVM5Peb7QvnWh96x4F9BVjaBtK/LfJ2CSHyVlzzhHQqWakKwGqgP4o4puDHh6xgxfWF+UaMgG++gecXjOLNb+eh//oZltWDzP1AAOS8sRBCWAVPT09efPFFvvrqKzw8PNi/fz8As2fPZvDgwdStW5c6deoQFhYGwJgxY9i0aRP16tVj06ZNjBkzxpLhi0L25JPGdLjkZGNRVtP/HG5KASNoykriOY8tmQQQSCU2sIE00vDDj4PkWHS1ijf03gE2drCkNRz9X1E1RwhxlyRHiJzs7eHLL2HCBOM/O3aEs2eNY/dxH4kk0oAGdKUrn/N5lpqOuLGM4wziBd5gG6P46Wa7RleqA33ioGwVWNoWUuOLvF1CiDtjjXlC5TX/rrho0qSJ3r17t6XDKFLXgDHAdKApkfyPAXjhRSyxuOPOmjXQqxcMaR3NhyF9Ua73Qs90KOcGbAbqWDR+IQrbgQMH8PKSKTxZ5fU3UUrt0Vo3sVBIVqO45IkffjDWzcjMhFWrjJFLBYkkBV+C8eAIV1jOr7gSSij22LOJTTSgQfYKpw4aT6EvnYOeG6H6I0XTGCEsTPJEbpInbq645AkwRrc+/TQ0aADr1hkbQAD8y7/0ohexxDKJSbzGayj+W0/lGieIwJX3WEI/7mEezbnJQn7n0owp02f/gq4xxo6iQpQwkiPydjd5QkYqWTkbYBrwHvAdfajPWn7nd1rSkmSS6djR2Hr0q8QeDFi+mmsn0yDSFc6eAvyBfRaNXwghRMEefhh27oTq1SE4GJYsKbhOHzz5lQR+5X4c6UQdfieOOGywIYAAvuO77BUq1zWmODhWNp5EpyUVTWOEEEIUiaeeMqbD/f67Mbp1n+mf+RWoQAwx9Kc/4xnPCEZwlaumWja4MoNTTKE333CK7mzkJluXO9U0pkxXug9WdIA/1pujWUKIYk46lYqJF4BFwC8EU51tnOEcfvjxAz/QqhVs3w6xv7Sj84JNXD13BiLLwsnLQCtgj0VjF0IIUTBPT0hMhGbNoE8fePddKGgwcUeqcZptfE8zKtIXN+JJIIHKVCaIILazPXuFip5Gx1L5ahAdAqlxRdQaIYQQRaFdO4iLgytXoGVL2LbN+N4ee77iKyKI4BM+oQ99yCTTVEtRmbGcZTahrMWRUJZzJu8LlK9mbPLg+iCs7ALJK83SLiFE8VU6O5UuXLB0BHekL7AWOMojOJKAHY60pjXb2MbDDxs7RPz0T0taf7KdSxlXYYmGY2WAQEDmRgshhLVzcYGNG41pzaNHw/PPw9Wr+ddpTSXKsIHNhOHGMMoSSTxx3MM9hBHGOtZlr+DkYez243QPRIfCn1uKrkFCCCEKXePGxujWGjWMTqZFi4zvbbBhGtN4j/dYxjJCCeVMls4jZ4aRyWJakIQngSzgWN4XKFcFem01doJb3RN+uYXhs0KIUqv0dSppDa1bQ6dOUEzmT2cVDGwHrvAAF0jCjXsIJZRooqlfHxIS4Bg+PPJuPBmXykHUeUirBLQDNlo0diGEEAVzdITISHjxRfjoI6ODKeMmMxX+04RyeLCSFTxGdcaieZ8dbMcbb7rQhSiisleo4A69t0OlurCyI/wRW2TtEUIIUfjuvdcY3dqiBTz+OEydemN06wu8wEIWkkQSAQRwhCPX65WjN1dYzYP8QjP8+YS/8r6AY6X/Z+++42u8/gCOf04ikiAhCEHsPVpKWjVi772a0tq1S3VYbdGq6k/VatWm9t57hxCrpUVrBrVj05iRyPf3xwkJkrgkuTfjvF+v+3LdnOe553i1zzfP9znne6DZJshaFtZ+AIdnWmFUhmEkRskvqXTihF6AvHo1vP12okwulQJ2AunJRiDbyYcX7/EeE5lIjhx6xpJ9xoIUHexPUGhmWHwdzngA9QEzhdUwDCOhs7PTy99Gj4bly6FqVbh+PeZjiuBASWYxi+5kYwQP6csWNvAu79KCFvzGb88ekCoT+GyF9IVhRUM4tSrqExuGYRgJkpsbbNgALVrAl19Ct256WRzAB3zAGtZwmtOUpSzHOf70OCdqkYJNeHKF+pRjOMeIcrV1Shdoug6yV4b1beHQZGsMyzCMRCb5JZUKFoQzZ6BPH0iVKtEml/KhE0sFSc9xNlGcOnShC9/zPe6ZhK1bIXvRHBQYuIPrYQVg+UUIyAk0A+bYtvOGYRiGRXr2hEWL9O5w5crB6dMxt8+JHTX4hYkMxJNp3KETG1hJdarzER8xmtHPHuCcAd7bAu7FYWUTCFgaf4MxDMMw4pyjI8yeDX37woQJ0KQJ3Lunf1ad6mxjG/e5TznKsZe9T49LQTmc8MOVENrgzRD2E+Vqa4fU0GgV5K4FmzrBX2OtMi7DMBKP5JdUAnB3hx9/TPTJJQ/AD6hAKg6wjFK0ZgAD6ElPXNKGsX49eFXITP6vt3IxpCSsOg2H8wOtgEm27bxhJENnzpzB2dmZEiVKUKJECbp06fL0Z/v37+eNN94gX758fPLJJ0j4HPabN29SvXp18ufPT/Xq1bl165atum/YSNOmsGWLnqlUpszLQ1RmFM0ZxC+MwpOl3MSHFcyhKU35jM8YzGAk8jNpJze9xCHz27DKx9TOMAwbMTHCeF12dnr529ixene4KlXgani5pFKUYhe7SEtaqlCFdUTs6GZPcVzxx540fEJlvmcbj6L6AgdnaLAM8jYE3+6wb6RVxmUYxrMSapxInkmlJ5JAcskVWAc0w4H9TMOLLxjDGD7kQ+ydH7FsGdRp7EbB/psICK4E64/BX4WBzoAJCIZhbXnz5uXAgQMcOHCACRMmPP28a9euTJo0iYCAAAICAli/Xte4GTp0KFWrViUgIICqVasydOhQW3XdsKFy5XTtjFSpdFnAtWtjbp8W6MCnjGQaWfDlBnWZzzja0IaBDKQPfZ5NLDmmhWYbImpnHDUzWg3DFkyMMGKjWzdYuhT+/ls/hAgI0J/nIx872UlBCtKABswkoj6SIh/p8ecR2elLLX5gFVFuaZTCEeovggLvgd8XsPd/VhmTYRjPSohxwqKkklLqL6XUp0qpzHHeg4QgkSeXHIH5QDfs2MdwSjKM+cynPvUJdrjLrFnQ+qM0vDFwNQfvNgDfI7C3KPAF8B1EvYraMIxonDlzhsKFC9OxY0eKFi1KjRo1ePCySsoxCAwMJCgoiDJlyqCUonXr1ixfruufrVixgjZt2gDQpk2bp58nNEk+TiQAhQrp3X4KFIAGDWDq1JjbpwK605afWUwG/uIaVZnKELrTneEMpwtdeBx5scOT2hmeFWFtKzg8I17HYxhJlYkRUTNxwjoaNoStWyEoSCeW9uzRn3vgwTa2UZGKtKENwxgW6eFCNjKynTu8SX8aM5zZ3I7q5PYOUHcuFP4Q/L+CXd9GVAc3DMNiSS1OpLCw3ffAh8AQpdR2YBawTERef+QJ0ZPkUq9eMHw4/PqrTi6tXg2NG+uKqTly2LqXUbIHfkUviRtIb94gI1voSFWqssZuDWPHZiRdOie8vlvM1r5tKe8/F4KLgfc3oO4AwwBl0zEYxivb+ilcPRC358xUAiqPfmmzgIAA5s2bx+TJk/Hx8WHJkiUEBgYyZ86LMzwqVKjAL7/8AsC///7LW2+9haurK99//z3e3t5cvHgRT0/Pp+09PT25ePEiAFeuXCFLliwAZMmShatXo9n+1/aSR5ywMQ8P8PPTO8J16ADnzsG334KK5vKdEviUxoxhLR/RkJtUYBQbccWVH/iBO9xhBjNwwEEf4JAaGq+GFY1gfTt4/Aje7Git4RlG3LNRnDAxIkomTlhJ6dL6IUStWlC5MsybB40agSuurGENbWhDX/pymcsMZzh22AEZcGcLV2nEQFoxlFu0owcvZADtUkCtGWDnALsHweNgKP9D9IHIMBIycy8RJyxKKonIEmCJUio94AN0A8YppZYCs0XEN857ZktRJZeWLYONG2HQIF05NYWl+TjrUcAAdGKpC+3IRwYO8T7lKc9GtZEffshB2rQOVPhyFqt6ulKXCfCoKFQdDuouMJbkviLSMCyVO3duSpQoAUCpUqU4c+YM/fv3p3fv3tEekyVLFs6dO0eGDBnYv38/jRo14vDhw0/XPEemEtkvZ8kuTtiQiwusWgWdO8N338H58zBxIjg4RN3eDviEqkxkCz7UJghvvg1PLPWjH3e5y0IW4oSTPsAhFTRaCSub6qKsYSFQopvVxmcYSYGJES8yccK68uXTiaX69TxSiokAACAASURBVHXx7jFj4OOPwRFH5jKXzGRmFKO4whWmMY2UpARcyMQartCCfnzCz9yiEQPI+fyDZzt7qDkV7B3h96E6sVRxhEksGcYrSEpx4pUyIyJyUyk1E7gL9AGaAhWUUmFANxHZHN2xSqlawM/oSTVTRCTKxXxKqWbAIuBtEbHturMnyaWePeHTT/UWPL16waxZ+jf40qVt2r3odATcgeY0IBMbuUR9ylGOjWykb9/CpEtnR/2u45jdyZUPGKYTS7UmgN094Dde8T8Lw7AdC54CxBdHR8en7+3t7Xnw4AE//fRTjE8XHB0dnx5XqlQp8ubNy4kTJ/D09OTChQtP21+4cIGsWbMCkDlzZgIDA8mSJQuBgYFkypQpnkcWO7GJE4blHBz08rfs2XViKTBQh6g0aaJur4AulGY626lBDRyowCesw4WxfMzH1KUuK1hBGsJPkMIJGiyF1T6w5WOdWCrZ02rjM4w4Y6M4YWJE9EycsB53d/D1hQ8+gO7d4exZXdDbzs6O0YwmC1n4ki+5znUWsxgXXAAnMrOIq3SkJ98whZuUYySFn3/wrOyg2nidWNo/CkKDoeoY/blhJBbmXiJOWFpTyU4pVVMpNRu4hJ66OhTwEJF8wJfA7BiOt0dPg6kNFAFaKKWKRNHOBfgEIu13mRBkzQoLF+rtFHLlgoMH9SLlbt3gdpQrjm2uEbARCMIbJ7bzkFDKU5697KVzZ5g7V9Fm6lDGHhwCRw/DqsIQOgt4Hwi2becNI5Hq3bv308J5kV9Ppqteu3aNx491DZvTp08TEBBAnjx5yJIlCy4uLuzZswcRYebMmTRs2BCABg0aMGOGrm0zY8aMp58nNLGNE8arU0pPnp08GTZtgooV4fLlmI9pSzH82clV3BCq0oqCzGQmfvhRgxrcjlxF40lR1vxN9PTwP4bH74AMI4lLzjECTJywlVSpYMkSfdvy00/w4YcQHAwKRT/68Ru/sYUtVKEK17gWflQKMjGVa3xKB37mIO3YR+iLJ1dK35R79YaD42BTZwh7/GI7wzAskljjhKWp5EvACOAQUEREaovI3CdroMOnsx6N4fh3gJMiclpEHqHrSkc1msHo4j4PLR2AVdWpA4cPQ79+YG8P48fryqnz5iXIInUVgB1ACt4kmJ04kY6qVGUTm2jeHJYvV/Ra9BXf7xoDJ4/CsgIQshSdkopy3wfDMGJh+/btvPnmmxQvXpxmzZoxYcIE0qdPD8D48ePp0KED+fLlI2/evNSuXRuAfv36sWnTJvLnz8+mTZvo16+fLYcQk9jGCeM1degAK1fCsWP6ecfx4zG39yE3x/HnFHlwpA51ScNCFrKPfVSmcqSbCsA+JdSdDwXfh+29Ye8P8TsYw0jGkniMABMnbMbeXlfz+PFHmD8fataEJ7uKt6Mdy1jGP/xDOcrxL/+GH2WHOyO5wWCaM5PLNGN7VLdoSkGFH+HdAfD3FNjQDsKiSEAZhhFrCTZOiMhLX4CXJe1iOL4Zesnbk7+3An59rs1bwJLw99ui+06gE7AP2JcjRw6xmb//FilXTkSnk0Rq1BA5edJ2/YnBvyJSQEQcJVByS3FxEAdZIAtERMTPT8TFReTzmtMlbLidyNy8Ig8QkYoiEmSzPhtGdI4cOWLrLiQ4Uf2bAPskFtftV33FNk7E16tUqVKv8S+aOP3+u4i7u0j69CL+/i9vv1VuyB55V0LFTq7KNFkv68VZnKWQFJILcuHZxo9DRNa0FBmOyM5vRcLC4mcQhhEHTJx4kYkTJk48MXeuiIODSJEiImfPRny+U3aKm7iJh3jIATnwzDG3ZIyIIFulsqyN6f5g92AdJ1a9LxL6KH4GYBixZGJE1GITJyydqbQxqg+VUpaWDo+qStTTqT1KKTtgFHqP+xiJyCQR8RIRL3d3dwu/Ph4UKwbbt+t1B25uuoh3sWLw/fd6TmkCkgvwB97Ag7NsIzfv0pzmjGc8FSrobUdn7GtDhxULCQs8Bwtzwv0dQHXglk37bhhGohHbOGHE0ttv66KsGTJA1aqwdGnM7SuRHjs2s52quNOONznGetZzkYt44x3paTXhu/1Mh6JtYfe3sHNAgpyhaxhGgmbiRALQooW+bbl4Ed59V1f1AChLWXawgxSkoAIV8MPv6THp6M4dZlGe7WSkKou5EfXJ3+0PFYbB8QWw+n29g6hhGEmepUmlF/aUUUo5oItuW+ICkD3S3z3RU2CfcAGKAduUUmeAd4GVSikvC89vG3Z2et3BsWPQqhU8fAgDBkCJErBnj6179wx3wBeoQjpOsIFC1KMb3RjMYEqWEnbsgA2nm/LenFU8vnEVFmSBO38CVSDyUgjDMIyoxTZOGHEgb17YtQveeguaNdPLHWLyNqnJxCpW05QsfEpefNnCZm5zm/KU52jklShPdvt5oyPsHQI7+pnEkmEYr8LEiQSiUiXYuVMvi/P2hs3hpdGLUpRd7CIb2ahJTZYS8XTChZY8YinFOURhKjCTi1Gf/O3eUPlnOLlM7yIamrAethuGEfdiTCoppXYopbYDTkqp7ZFfwHFgl4Xf8weQXymVWymVEmgOrHzyQxH5T0QyikguEckF7AEaiK13f7NUpkwwcyZs2QIFCugkU+3acDVhPXhxAVYD7+PMUZZQhNYMZCA96UnBwmH4+8PB/2pSe/IGQm8Hwfz0cPsIUJFnc4CGYVtibmSfsvW/RRzGCSOOZMyow1H9+tCjhy4DGBYWffuiOFKE+SyiHdkYhAdz8WMrj3lMBSpwgAMRjZUdVJ8AxbvBH8PAr5dJLBkJkq2vjQmJrf8tTJxImIoW1c/Ac+XSty2zZunPs5OdHezgLd6iGc2YwISnx6SiAcI6cnOO8ngzgVNRn7zkJ3pnuNOrYUUjCHkQ/wMyjFdg6+tiQhPbf4+XzVSagt5jPhSYGuk1BegKNLHkS0QkFOgObEAX4FsoIoeVUt8ppRq8Zt8TnipV4NAhqFZN7wrXu7ete/QCR2Au0B0HjjCNInzOGMbQilZkyxWCvz9cTuGN9xhfHt0NgQWucOMsuuz3Wdt23jAAJycnbty4YYIBOgDcuHEDJycnW3YjTuKEEbee7PbTubMuzNqmDTyKYRVCHlJQjinM4DOy8zNpGc0OtuKMM5WpzG52RzRWdlD1V3jrE9g/Uu8MZ/5/NBIQEycimDhhxCRbNtixAypUgNat4X//05fzDGRgC1uoQx260pVBDELCK5c4UpmUbMWdIBpSnpH8Q5T/pxXvAjWmwpkNsLw+hJhNgIyEwcSIZ8VFnFCW/GMqpQqJyLHX/pZ44uXlJfv2JcDJTKdO6fR/cDD4+kLlyrbu0QsE+B4YiFCQHznOl9SmNotYRPDN1NSuDQ8v/MOeXtVwdgyGpqGQ2Q3YAuS3beeNZC0kJIQLFy7w8GHC3CTS2pycnPD09MTB4dlVBUqp/SJitSXEJk4kTCL6JuHrr/XzjiVLwNU1+vY3EBbxPV0YyDkaIwylGnUJJJCVrKQKVZ49uV8vnVgq3g2qjtEJJ8OwMRMnnmXiRMySe5wA/dChfXuYMwe6dIExYyBFCgghhI50ZAYz6EIXfuVX7MNXK4ZxhCCqE8YDJrKOPpSOeh3jkVmwvi1k84bGqyFlGmsOzTBeYGLEi2IbJ6JNKimlWonIrPD37aM7gYj89mpdjjsJOggMHgwDB0LBgroCnqOjrXsUpYlANyAXkzlDF0pTmtWsxuFOeho2hAv/BLC/X1VcUt6CxvaQzRnYDBS1bccNw4iRNW4WTJxIPKZPh44d9fOOtWsha9bo294BpvMLPejJOaqTknFUpzEBBLCEJdSlbkRjEV1b6Y9h8GZnqDbOJJYMI5EwcUIzcUILC9MPIIYO1cun582D1KlBEL7kS37kR5rQhDnMwQk9o0H4l5tUx5HLjGY5fahGyqhOfnQerGsFWd6FJmvBMYanG4ZhJBhxkVRaKyJ1wt9vjeZ4EZEq0fws3iXoIBAcDG++CSdO6B3hvv7a1j2K1lKgBZCJpVylBQUowAY24PYgKz4+cND/HAe+rkr6lBehoTPkVOgNPEratuOGYUTLSjcLJk4kIhs2QNOmene49euhcOHo2z4EJjCTHrQjkHdwZja1aM4BDjCHOfjgE9FYBHb2h70/QLGPoMYkk1gyjETAxAnNxIlnjRun6/F5ecHq1fBks+1RjOJzPqcSlVjOctKSNvyIQK5Sk7Qc5yfm8zmNSRXViU8shjUtIHMpaLIenNJZaUSGYbyuWCeVEoMEHwR8ffW+zk5OcPgw5Mlj6x5FayvQEHDCl3s0JBMZ2cQmcobko3Vr2LrmMge+rk5mxxOo+q6QNwRYB5SxbccNw4iStZc1JFQJPk5Y2f79ULeuXuqwciWULx9921BgLMvoQnNuUIg0LKYeH7GTnUxhCu1oF9FYBHZ9C3u+g6JtocYUvVucYRgJlokTmokTL1qxApo3B09PWLcO8uXTn89hDm1pSzGKsY51eOARfsRNrlCXjPzOj0zlY9o+TTk94+QKWPUeuL8JTTeCc3orjcgwjNdhaZyI9lGiUsrOklfcdjuJqVIFWraEhw/h448TdCHTyoAfYEcVHNjKbe5SnvIccTjI7NnQoLkHRb/bxrn7byIrb8MxJ6A6sM2m/TYMw3biOk4opWoppY4rpU4qpfrF0K6ZUkqUUsn+ZuhVlSoFu3frJ8/VqsHSpdG3TQH0oDGTWYMrp3hAHdYykWpUoz3t+ZVfIxorBeUGQdlBcHi6rp8R9jieR2MYRkJn7icSp4YN9bPxW7egTBnYu1d//iEfsopVnOAE5SjHqae7v6UnM5u5RlW+oh2T+ZlrUZ04X0NouByu/wOLqsD961YakWEY8Smmi3goEBLD68nPjZgMHw7p0um1BkuW2Lo3MXoL2AlkwItgdhBGSipSkd32/kycCO26ZuCN77ZwIqgMsuYq/O0K1AbW27bjhmHYSpzFCaWUPTAWfVEpArRQShWJop0L8AmwNw76nyzlzg07d0LJktCsGfz6a/Rt7YCPqcZcNpOS6wRTnSUMozGN6UEPfuTHZw8oMxDKD4Gjs3X9jLDQeB2LYRgJnrmfSKTKlIFdu8DFRe85tGqV/rwWtfDFl//4j7KU5S/+Cj8iNR6s4jJN6MWnLOBbLkS1L1yeOtBoJdw6Dosqw70rVhuTYRjxI6akUm4gTwyvJz83YpI5s956B6BnTwgKsm1/XiIv4A/kpxC38Cc1HtSgBuvUWn76CXp/7cpbQ9Zz4FYN2BgIf2YAGgDLbNtxwzBsIS7jxDvASRE5LSKPgPnoVbnPGwwMQ5f9MV5TxoyweTM0aKBrZ/Ttq4u0RkUBnXiX1fgRQiihVGMGffmAD+hHPwYw4OlW0wCU/gq8f4Rj82DNB/DY3C8aRjJm7icSsQIF9OzWokWhUSOYOFF/XprS+OOPI45UpCJbeVIuyxEPFnCZdnRnEBv5jJNEEVxy1YDGa+D2aVhYCe4GWmtIhmHEg2iTSiJy1pKXNTubaHXqBKVLw6VLeke4BC4LeilcGXJwiR24U4SGNGSumsOAAfC/n1Lx7tAV+F9pDFsvwh4PkGboe0DDMJKLOI4T2YDzkf5+Ifyzp5RSbwHZRWR1TCdSSnVSSu1TSu27di3KCfgGkCqVnkDbtSsMGwatW+taS9FpxZvswJ/bpMaO6oyjAx3owPd8zxd88Wxi6Z0+UHEEnFgEa5rD4xhObBhGkmXuJxK/zJlh61aoWRO6dIEBA3RFj0IUYhe7yEEOalGLxSwOPyIFHkzhCp/Snp/5gw78TRSzVnNUgabr4c4FWFhR/2kYRqKUIrofKKUmiUin8PezIKr5iyAireOpb0mHnR1MmKCLWYwZo39zL5mwd05LB2wA3sedVfiSm0a0pCW3uEXPnt1xdXWkSqeFrOzWnlrMgpDsUL4FqIdAW9t23jAMq4jjOKGiOjTSd9kBo7DgAiMik4BJoAuwWvDdyZa9PYwdq4uxfv01XL2qE00uLlG3f498rMWf3NQgN3UYxhJSk5pRjOIudxnPeOwJL9Dt9TnYpYCtPWGVD9RfCPZRbjZtGEYSZe4nkoY0aXTx7i5d9KbWFy7ApEng6eDJdrZTn/r44MM4xtGFLoAdmRnJVdLRgm9ZSRAPmcPbOD57Yk9vaLoBltaGBRXBxxdcc9pkjIZhvL5ok0rAv5Hen4zvjiR5JUro5W+jRukr8u7d+rf5BMwZWAp0wJUZrCUPLehBD25wg4HtBpImTQoatpzOnHapaMZEnViq3C48sdTFxr03DMMK4jJOXACyR/q7J3Ap0t9dgGLANqUUgAewUinVQETidtueO3f0nsqOjvrl5PTy987OkD27LlidyCgFX30FWbNChw5QsSKsXQseHlG3r4MnvvgRTE2K0IivmU0a0jCEIdznPtOZToonv16U/EQnlrZ8DCubQf1FkMIx6hMbhpEUmfuJJMLBAaZM0aFu0CAIDITFiyF9mvRsYhM++NCVrlzmMt/wDQpFJr7hBmlpwGds5g5+LKUiqZ89cbay0GwTLKkZnljaCmlz22aQhmG8HhF56QvweJXPrfUqVaqUJCpBQSLZsomAyLhxtu6NxR6LyBcigoRIbmkrCNJDeshjeSzr1ok4OYXJtLafiQxHZH12kceIyCgb99owkjdgn1jxehzbOIF+yHEaXV8jJXAQKBpD+22A18vO+1px4t9/9XX6VV9lyohcu/bq35eArF0rkjq1SK5cIsePx9x2t9yWnVJeQsVOLstU+UF+EARpLI3loTx8tvGB8TpGLK0rEvIg/gZgGIbFElucCG9bCziOTlD1i6FdM/SsqPiJE8nc5Mki9vYiJUuKBAbqzx7JI2kjbQRBukpXCZXQp+1vym8SKnayU8rJWrkV9Ukv7xP51U1kYnaRWyetMArDMF7G0jhh6RaeJ6L5/IiFxxug1xP8/LN+/+WXcPmybftjITvgJ2AoKfiXqeTkc8Ywhta0pmqtENavV/RYPIIxfwyAf87Duuzw+DPgBxv33DAMK4pVnBCRUKA7euXtUWChiBxWSn2nlGoQR320TJo00Ls3fPIJdO4MbdpA8+bQuDHUrg1VqkC5cnpJc7FikD8/uLrqGaje3nD+/Mu/I4GqXVvXzrh3D8qWhT17om/7LmlJxQb8qE5mPqINzvzMzyxjGY1oxAMeRDQu3gWqT4TTa2BlEwg1ddYNIxmKVZwwu4QmHB066OVwx47pWHHiBDjgwDSm0Yc+jGc8zWlOMMEAuNGO+yzgbX4nC5VZytUXT5q5FLznCyH39Yylm9H952IYRoJjSeYJuBPFZ67AdUuOj69XonyyEBYmUqeOfqr9wQe27s0rmyIiSsIke/gT6bpSV+7JPdm7V8TNTWRIw6H6afRyT5EQRGSAiITZuNeGkfxg/SfQyTtOXLok8sYb+tqePbvI0aPW+d54EhAgkjeviLOzyMqVMbc9Jg9ltTQVEeScDJIpMlmUKKkklSRIgp5tfGiKyHAlsrimyKP78dZ/wzBeLrHFCaAMsCHS378Evoyi3WigHvE5o/XxY5HOnUX27Xv1Y5OQvXtFMmYUyZBBZPfuiM9HyAhBkCpS5Zk4cFfWywNxlmNSQGbLuahPevWQyFh3kfFZRK4n7lhqGImdpXEixplKSqnzSqlzgLNS6lzkFxAILI+j3FbyoRT8+quuxTF3rt7TORH5CFiC4ipfkoWJrGUtNalJgXdus20b/Ly7L19u+gVOXoAV2SBkMNCHaOoyGoaRyJk4ES5LFvDz0zOYzp+H8uXhjz9s3avXli8f7NoVsY30lCnRty2II8WYzxLakp1vqMZR5jCbHeygBjW4xa2Ixm98BDWmwJmNsKIhhDyI/sSGYSQJcRgnEs4uoTNmwMSJ8M47embr/fuvfo4k4J139CTddOn0JN6VK/Xnn/M5M5mJH35UpjJXw2cmpaYmio14cpnylGcyAS+e1P0NXVdJwmBhJbh+2HoDMgzjtbxs+VtLoDXwCGgV6dUSKCkiHeK3e0lU7tx6P06Abt3gYeJaBtAYWA/cpRMZWMBe9lKZyni8eZUdO2DOkR50XzUVOXMJlmaFR8OBHkCYbTtuGEZ8MHHiCTc32LgR6taFGzegcuVE9+AgskyZ9FK4GjWgY0ddmFWieT6QkxSUYyqz+YScjKQMW1nCQv7kT6pQhWtEuml7oz3U/A3OboblDfRSB8MwkrK4ihOW7hL6xctOJCKTRMRLRLzc3d0t/PpI3nsPPvtMvx8+XC+F3rTp1c+TBDx5CFGsmF4lPmGC/rwVrVjBCo5whPKU5wxnAHCkPCnZihv3qY83Yzj04qPnjEXBZxsoO1hYGa4dsuKIDMN4VTEmlUTET0S2ARnD3z95bReR49bpYhLVqxcULgwBATBsmK1788oqoecUK97DmVUc4wTeeONU4Bw7dsD6C+1pv2gOYReuwOIs8HAs0Al4bMtuG4YRx0yceE6qVLBsGbRsqQsT1amjt8dJpNKk0U+e27aFb7+FTp0gNDTqth7YUYfRTGUAuZhCcRayiiUc5zgVqcilyJv5FWsLtabDuS2wvD6E3Iv/wRiGYRNxGCdeZZfQM8C76F1CvWI3giikSQMjR+rCc8WLw7//6gx827b6oUIy8+QhRK1a0LUr9O+vH0LUpS6b2cw1rlGWsvzN3wA4UJLU7CAlDrSkIiPZ/eKj5wyFdGLJPiUsrAJXD1h7WIZhWEhJdI8dn2+oVAnAG8hIpCcFIjIwfrr2cl5eXrJvX9zuJG1Vfn5QqZLejvrvv3Wx10QmAKgBXGEXdtTBDRc2sYm0gYWoXh2KpFzOvJbvY5/RDZpdgVQfADPgyXbThmHEC6XUfhGJ+1+kY/5OEyeeCAuDL76A0aP1sufx43XR70RKRE+wHTIE6tWDBQt0/iwqd4A5DKcLvTlLHc7zKbVpQmYys4Ut5CRnROMjs2F9G/CsAI1Xg0PqqE9qGEacS2xxQimVAl3suypwEfgD+EBEolwfpZTaBvQSkRiDQKzjREgIjBihM+/BweDurq/9LVro638yEhqqk0pTpug9LiZPBgcH+Id/qElN7nOf1aymHOUAEM5ynWo4E8hYlvMF1V68Q7h9Ss9WCrkLzTbpgt6GYViFpXHCot3flFKdgJ1AFaAv8AZ6amm+2HQy2atYUV9xg4OhVSu4c8fWPXpl+QF/IA9lCcaPu4TgjTeXsuxn2zY4pRrRYOpKHl//Dxa4w925QHP0DGjDMJIKEyeeY2enn2IPGaIzMl26RLxPhJSC77+HsWNhzRpdO+P69ajbugBt6cVYJpKddeRgCFtZwQ1uUIEKnORkROMiLaH2LLiwHZbWgUd3rTIewzCsL7ZxQhLSLqGROThAv376AXGlSnDtGnz4oV4KffaszbplCylSwKRJOr82YwbUrw9370IxirGLXWQiE9Woxmp0yStFTjKyg3vk4VPqMoLl4fvFRZIuL7zvByldYVFVCPzd2sMyDOMlLEoqoSst1xKRxsCD8D+bASHx1rPk4qefwNMT9u7VezknwsRSNmA7UIri/McOhNRUpjJHMm7H1xf+c6tJtfHrCbn5ABZkhKAl6P98XggbhmEkXiZOPE8p+OorXWBCKb0e4LPP9CymRKpbN1iyBA4e1DXJz5yJup0T0IlOjGcOWdhJZvrhxzLuc58KVOAoRyMaF/4A6syBi/7hiaXEFwcNw7BIrOOEiKwVkQIikldEhoR/NlBEVkbRttLLZinFqfz5wddXT9NJlw7WrdO7Hfz8MzxOPuUflIJvvtGzlDZv1nm2K1cgJznxx59iFKMRjZjBDN0eDzKzjVu8xRc042dm80KlvbS5dWLJKT0srg6Xdlt7WIZhxMDSpFImEdkR/j5MKWUnIuuA+vHUr+TD3R22bdOJpZ07E21iKT2wGahGfm7gjzOe1KQm/mnXsGEDOOSuiPcvm3h4OxTmu8GtVUBDwOz8YxhJhIkT0encGRYuhJQp9c1FmzZ6uUQi1bixrkd79SqUKaMTTFFxALrSgsksw51DZKQ721mEIFSkIgeJdGCh5lB3HlzaBUtqm8SSYSRNST9OKAUffQRHj+pi3vfuwaefQtmyeiZTMtKhAyxfDkeO6OEHBIA77vjiS2Uq05a2DGd4eOv0eLCJq1SgF60ZxwT+e/6ErjnBxw9SucPiGnDB38ojMgwjOpYmlS4opXKFvz8BNFRKeWPWMMWNvHmTRGIpDbAKeA9PrrKddOFPIlaknsvKleDx1ru8O3Ir94PsYUFauLEBqAeYAq2GkQSYOBGTZs30urHUqWH2bGjUKFFvQV2+PPj766UOFSroh/NRsQO6Uo+ZrMWVM7jRke3MwwknKlGJ34m0jKGgD9SbD4F7YEktCA6yylgMw7Ca5BMnPDz0w4QVKyBbNvj9dyhZUmda9uxJtEuhX1W9erqAd1CQTiz9/ju44MJqVuODD73pTR/6IAjgQlbWcJm69KIrMxjGC6usXbPrxFKarLC0ll46bRiGzVmaVBoGFA5//x0wG/AFBsVHp5KlJJJYcgTmAZ3JyGW24E55WtKS35zGsWgRFK1UAq/hfty544wscIGrW4Ha6NKuhmEkYiZOvEy1avq36wwZYO1a8PLSGzYkUkWL6m2kPT31jj8LFkTdTgEdqcJiNuPIddLRmm1MJz3pqUY1/In0tLlAM6i/EC7/bhJLhpH0JL840aCBnqrTrZteAjd1qp7iWayYrrt39aqtexjvSpfWtzYuLlC5sg5/jjgyl7l0oxs/8RPtaU8ooYAzWVnKJZrzCX1ZQn8CeS4B55JN7wrnkl3PbD2/zQajMgwjMouSSiIyPXx6KuF/ugFuIjI+PjuX7CSRxJI9MB74ClcCWYsH9fiYjxnmMIQZM4Vy9YtQcth2bt9JiyxMBZd3oveQu23bjhuG8dpMnLDQ22/rKT758+vlEZUqQevWuuBEIpQ9ux5O6dLQvLle3RcVBbTlayOJyQAAIABJREFUXdaxFeEh6fiArUwgK1mpSU02szmicf4mUG8BXPnDJJYMIwlJtnHC1VXvcnD0KPTuDZky6UTTF1/oWUxNm+pMSxKuu1SggH4IUaiQzrP99hvYY8+v/Mq3fMt0ptOEJjzgAeBAVmZziQ50Zggb6ckZnqtFmCYL+GyFtLl0Lb5z0UyXNQzDKqJNKiml7KJ7AaHA/fD3RlxKIoklBQwBRuBMIEvIQkv6059+9r2ZOEmo80F+Sg7bzvW7mZBFTnDxD6AacNO2HTcMw2ImTrymQoXg0CEYNAgcHWHWLChYEMaNS5Q3FW5usHGjrrX06afQt2/0tcg/oAT+bOc+KUjH+2xkFHnJSz3qsYY1EQ1NYskwkgQTJyIpWBCGDYMLF2DZMr02LCwMli7VO8XlzKk3dDh1ytY9jRceHvoWp2pVXXZq8GBAFN/wDWMZy2pWU5Oa/Md/gD1ZmUQgn9OGMezlI44R+uwJU3vAe76QNg8sqwdnt9hgVIZhQMwzlULRuzFE93rycyOuPUksZc8eu8TSxYt6D2gvL2jSRG/DcO5cnHc3Jp8D03DgCjPIRHdGMIKOqgPDR4fSonNu3hq6nctBnsgSBzh3CL3L7DWr9tEwjNdm4sTrcnKCgQPh8GG9duy//+Djj/WUnz/+sHXvXpmzMyxaBF276numtm2jr0XehEIcZAfXcCMDPqzhe4pRjMY0ZglLIhrmbwL1FoYnlmqaxJJhJE4mTjzPwUHX1Vu1Cs6fhx9+gHz59O/tQ4bo95Ur6/p7D5LWhjYuLnrYrVrpENi1K4SGQje6MZ/57GEPFanIZS4DiiwM5wqDeJ/pBNCcg8/vHJ06M/j4Qrp8sLwenNlok3EZRnKnJJpCcUqpnJacQETOxmmPXoGXl5fs22e9nUKt7tQpHVTOn9d7N69bp6/GMQkN1VNoJ0/Wf0b1uLhIEX0TU7s2eHvrJ+XxbAXgg+DKN1xnMM1oxmxmM3yII2N+vMzvvauR3SUA1VAgdwH0XnIe8d4vw0iqlFL7RcQrnr/DxIm4IKKfWvfsqZ9gKwVduuibCzc3W/fulYjo+6P+/aFmTVi8GNKkibrtFi6Rherk4TTXmElzRrOXvcxgBh/yYUTDgGWw2gcye0HTDeDoap3BGEYSZ+KEliDihAhs365rLi1eHJFMypABOnbU2ZccOWzbxzgkAl9/Df/7n14ON28epEoFG9lIE5rggQcb2Uge8gBwlVFk4nM2UxMXllKaVM+e8P51WFwVbh6Hhsshdy0bjMowkh6L44SIWPxCz2zKAti9ynHx9SpVqpQkeSdPimTPLgIi5cqJBAVF3e70aZGvvxbJmlW3BREHB5H33hNZuVJkwgSRRo1E0qSJ+DmIpEolUreuyJgxIgEB8ToUXxFxERE3GSEIUlNqyj25J6NGiWRIdU1OflNCwkY6iASkFJGCInIhXvtjGEkZsE9scF02cSIW7twR6d1bJEUKfX12dxeZMUMkLMzWPXtlU6aI2NuLeHmJXLkSfTt/uSZ/SkkJFgc5JzOlslQWJUomy+RnG55YKjIyhcicd0Ue3o7fzhtGMmHiRAKNE7dv69/b33pLnv6+bmcn0rixiK9voowJ0RkzRkQpkTJlRK5f15/tkT2SXtKLh3jIQTn4tO11mSKPRYm/eMtWiSIO3L8uMqOEyKiUIqfWWGkEhpG0WRonLL34uwIz0Vt+hgHBwAwgrSXHx9crwQWB+BJdYik4WGThQpHq1eWZRFGBAiI//RT1b/LBwSLbton07StSvPizx4FIvnwi3buL/P57vAxln4hkFBFXmSJKlJSX8nJbbsvEiSJuqW7Kkf7vSNhIe5FjTiKSV0TOxks/DCOps/bNgokTcejvv0W8vSOuyxUqiPzzj6179cpWrRJxdhbJm1eHsejsk9uyW8pJqNjJWZkotaSWIMgYGfNsQ5NYMow4ZeJEAo8TYWEiO3eKtGgR8bABRIoV00mnu3dt3cM4sWSJiKOjSMGCIv/+qz87LIfFUzwlraSVHbLjadtbMl8eSQrZJ6VknVx78WT3b4jMLKkTSydXWWcAhpGEWRonLC2M9wuQGigGOANvAKnCPzfiW968ehvqyDWWevfWxbx9fGDTJr2ErWVLvT31sWPQq5feXeJ5KVNCxYowdCgcOKDXb0+bps+TLh2cPAm//grvvAPly+spuKGhL57nNZUCdgCufIQT89nDXipTmcadrvHLRDfKjNjEoatlkDWP4PAloCLwb5x9v2EY8cbEibhSrJi+ls+YAe7ueklEiRK6kOsPP+if3b9v616+VL164OsLt25B2bLw559RtytFWtKwgR1UJQedGU9NGtKQHvRgBCMiGuZvDPUWwZV94cW7/7POQAzDiCsmTrwKpfTFc+5cXRP1m290tet//tFLpLNlg88/T/SFvZs00bcyV67o4R48CEUowk524oEH1an+dCOHdLzPQ5ZTlMNkpyLLufTsyZzTw3ubIeMbsLIJnFxpgxEZRjJkSeYJuAykeu6zNMAVS46Pr1eCfbIQXyLPWHryeuMNkV9+Ebl5M/bnDwkR2bVL5PPPRdKmjfiOnDlFhg/X03HjyBkRyS8iKWWtpBQnKSSF5Lycl0WLRFyd78qeXlUlbLgSOZhKRDxF5EScfbdhJAdY/wm0iRPx4eZNka5d9fqAyNf+FClE3n5bpGdPPWP1QsJdLnz0qEiOHHr19aZN0bcLkIeyThqJCHJaBouP+AiCDJbBzzY8sSx8xlJpM2PJMGLBxIlEGCeCg0XmztXrxZ7EA6V0KYt16xL10rh//hHx9BRxddWr/ERErspV8RIvsRd7mSkzn7a9J1vlnqSRAMkr8+XfF0/24JbI7LdFRjromGEYxmuxNE5YOlPpIeD+3GcZ4fkS/Ea8erIrXLVqei/OPXt0Or9Hj7gp5poiBZQpAyNG6GKxY8boHSjOntUznzw94ZNP9GymWMoJ+ANFqM1jNnCWi5SnPCWanWTuotTUHL+KHedrw6b78Oct9Iyl47H+XsMw4o2JE/HBzQ3GjdPX4Tlz9A5xJUroTRj++AN+/lnPNPX0hFy54IMPYOxY+OsvfbuRABQqBLt3Q+7cUKcOzJ8fdbt8OFKEhazgQ3IzgP+Rm1a0ZAAD6E9/hPDx5G8UPmNpf/iucGbGkmEkEiZOxFbKlNCiBezaBfv2QZs2+rM1a/RKhnfegS1bbN3L11K0qB5W9ux6P6GFC8Edd3zxpSIVaU1rRjMagFRUwo7NZOYGZfFmxvP3CE7poNkmyFwSVr8HAUttMCLDSEYsyTwB/YETQBegdvifx4H+lhwfX69E9WQhsXr8WBfGqFLl2SciDRrESbHA2yJSXkSQfZJaMkhmySyH5JBs3iyS1iVYNnzcWGQ4Ir+nEZHMInI4DgZlGEkf1n8CbeKENQUF6Wk/334rUqOGiIuLyPM18ooWFZk2TT/ZTgBu3dLloUBk9Ojo2wXKY1konUQEOSU9pIN8JAjSS3pJmESKOQHLzYwlw4gFEyeSSJy4elXkhx9EPDwirv/Vq4vs32/rnr2Wmzd1WUGlImLFQ3koTaWpIMhX8tXTWBAsB+S2uMtlySSTIhX1furhfyJzyoiMsBc5vsiKozCMpMHSOGFpEFBAe/Q+70fC//wIUJYcH1+vRB8EEpuDB0Xat9fV9J4EreLF9U1LSMhrn/aeiNQWEeSwuEpWcRM32SN7xN9fJH26R7KiU3OdWNqdRkTcReRQ3IzHMJIwG9wsmDhhS6Gh+ho9bpxIy5bP3lxkzSry449xuoT5dT14oDcwApF+/aJ/LnFdwmSOfCoiyEn5SLpLV0GQ7tJdHsvjiIYmsWQYr83EiSQWJ+7eFRkyRK8fe3L9f//9eN/dOT48eCDSpIkeQp8++hl3qIRKJ+kkCNJJOkmohIqISIgckxuSTW6Im/wqe+WFsBIcJDK3nE4sHZ1v9bEYRmIW10kle0vaWfuVZIJAYnPlisigQSKZM0cErSZNYpVYChaR90UEOS3pJI+kltTiK76yb59Ixgyhsqh9K51Y8ncRCUsvIgfiajSGkSTZ4GbBxImEJDhYZPp0vUvQk+u0i4vIF1+InD9v066Fhop07qy71LatyKNHUbf7T8JkmugJDCflA+ktnwqCdJSOUSSWHMITS/9ZZQyGkRSYOJFE48T16/pa/+QhcIoUIt26iQQG2rpnryQ0VHcbRFq10rEiTMLkK/lKEKSZNJOH8lBERB7LabkqeSRI0sho2RY5QmjBQSLzvEVG2IkcmWP1sRhGYmVpnLC0ptJlpdQ4pVQ5C9sbSVmmTDBwoK7xMW2a3jVu6VLo1Om1a3ikBOYAXcjNbfxJSS5qU5uLpVayxdee7qunMefQR7DnDuwIAakMRLOVkGEYtmDiREKSMqWutXHoEKxdC5Urw507umZe7tz6Z3//bZOu2dvD+PEwaBBMnw6NGsG9ey+2c0XxHoOZyv/Iy1y6cIaB9GUyk2lLW0IJ35k0X0OotzC8xlItCA6y6ngMw7CYiRPWkCEDDB8OJ05Au3a6Bt+4cbo264ABEJQ4rpH29npD6u+/h1mzoH59uHdXMYQhjGQki1lMPepxl7vYkZuMbOc+2elELUaxnseRT5bSBZquA88KsK4VHJllq2EZRpJkaVKpBnAXmKeUOqOU+p9S6o147JeRGDg6Qtu2ujigs7NOMPXq9dqJJXtgHPAlWbiFH6l5kyY04Z8357J1mz19Nk3itz+7wh93YFsoSBXgjzgckGEYsWDiREKklC7e6uurC3v7+OgbjJkz4c03I372mtft2HRr4ECYMAHWr4eqVeHGjRfbpQY+pB+T+IU8LKctf/E/BjCLWbSkJSGE6Ib5G4Unlv6ApbXh0R2rjscwDIuYOGFNOXLAb7/phwsNG8L9+zpDkycPjB4NwQm/PrpS8PXXMHUqbN4MlSrBlSvwGZ8xgxlsZStVqcoNbqDIRib8uE1hetCA0Sx5EiE0h9TQeA1krwTr2sA/020yJsNIiixKKonIXyLSR0RyAG0AN2CLUupQvPbOSBzKloVly8DBAUaOhP/977VPpYAfgGFk4CZbSIs3LWmJX+EJ+G2349sdYxm/tyf8eQe2CEhVYHdcjcQwjNdk4kQi4OUFCxZAQAB07w6pUkVkdLy8YONGq3epc2dYsgQOHIDy5eHcuRfbOAHt6MFEppKTTTRnG6MZzAIW4IMPwU82jsrfGOrOh8C9sMQklgwjoTFxwkaKFoXly2HnTvD21hn8zz6DAgXgu+/g6FFb9/Cl2reHFSvgyBEoVw5OnYLWtGYpSznIQSpQgYtcROFOFny5jhef4sOvzORh5BM5pIJGqyBnNdjQHv6eaqshGUaSYulMpciOA0eB80CuOO2NkXjVrAmzZ0c8Uhg/Plan6w1MxoWbrCUddehKV5bl+4nt2xXD949i1M4+cDAINioIqw74x8kwDMOIEyZOJGR58sCYMTqD89134O4Of/6pr+NNmuilzVbUqJHOZwUGQpkyUa/KcwA60J7JzCUbu2nEKiYylOUspzGNecAD3bBAU6g7DwL3wNK68OiuVcdiGIbFTJywtrJlwc8PVq+GYsV0DPjmGyhSRCeevv0WDh+2+sxVS9WtC1u3wu3beij790MDGrCBDZznPOUoxwlOAOnIykYCqcRntGEi43kmEjikgoYrIFcN2NgBDk2y0YgMI+mwKKmklEqnlPpIKbUFOAVUAn4EMln6RUqpWkqp40qpk0qpflH8vItS6m+l1AGllL9Sqoil5zYSCB8fvZYB4OOPYd68WJ2uA7AQZ+6wlHT40Ic+/JZrIH7bYcLhoQzdNgD+CYINdhBWE/CL9RAMw3g9cREnDCvLkEHX1zh7Fn78EVKn1rNOCxeGwYPh4cOXnyOOVKgAO3bo997eEe8jswc60pzpLMGDA9RmLtP4ifWspz71uUd4YaaC70HduXBpFywziSXDSChMnEgAlNLZmQMHdL29du3AzU1PARo0SCebihTR65P//jvBJZhKl4Zdu/RE24oV9QOJilRkG9u4z33KU56/+AtIgydrOE89etKNGfzE7cgncnCGhsshdx3Y1BkOxO5huGEke5ZU8wbuE7HtZ1pLjnnueHt08MiDrsl8ECjyXBvXSO8bAOtfdt4kt1tDUjF0aMRuE2vWxPp060TESUIlrbQXBPlMPpNLgWFStKjIN7UG613hVrmKhDqJyJZYf59hJAVYf1efWMWJ+HqZOPEKLlwQad5cnu4WlzevyOrVVu3CmTMiBQvqTYuWLYu6TZiITJWNck+c5aIUlPkySuzETrzFW4IkKKLh0fl6p5/5FUUe3bVG9w0jUTFxwsQJEdHbqq1fL/LRRyLp00fEABApUEDk669F/vpLJCzM1j196tIlkeLF9a3G7Nn6s2NyTHJIDnEVV/ETv/CWj+Sc+IgIMlEGyDV5bgwhD0WW1tf3En+OseYQDCNRsDROWLr8La+IVBORqSLy32vkrt4BTorIaRF5BMwHGkZuICKRtyJIDSSs1Lhhub59oU8fCA2Fpk2jfuT8CmoBG7EnjMm40JNRjGKgRye2bHvMyiv9+XLdMDgeBGtSwuM6gPXrghiGEes4Ydhatmx6hunWrXopxKlTUK+e3nLn1CmrdCFnTl32o0QJHT6mTHmxjQLaU50lbCANl6jILyxmNLvYRQ1qcPvJ8+hC70Pt2XBxByyrByH3rTIGwzCiZeJEQuTgoJc/T5kCly/r6T+dOkHGjHoHuSFD4K23oGBB+PlnuGv72Z9ZsuiVfN7e0LKlLulakIL4409WslKTmqxiFeBAduZynvZ0YjBr+ZzAyLeYKRyhwWLI1wh8e8D+0TYbk2EkZpYW6g6M5fdkQ6+ZfuJC+GfPUEp9rJQ6BQwDPonldxq2NHQodOyol0/Uqwd//RWr03kD27AjJaNITX+mMIXPMrZi/ZYQfO/05vNVoyEgCFY6Qmh9YPX/2bvvuKyqP4DjnwsCCrj3TnNrjlTce+HeA1dpbkvNNDO3lpmaOVJz5c6RA7e4Zzlw5F7hwkkuFBQEvr8/Dv1Ec5ACD+j33eu+4rnPfc49x5c+X+4Z3xMlzVBKRU4UxAkVW5Qvb76zf/wREiUy+Tfy5jXLIQKjv2MmeXLYvBmqVjVhZPjw56/AaEUZ1rIZR+5Shu9ZyVgOcOD/OwEBkNsDqs8F3x3gWVs7lpSyIY0TcYCDA1SpAlOmmER3mzZBp06QKpXZ5KFHD7Or3IABcPOmTauaODGsWweNG8MXX0Dv3pA+LCM72Uk+8lGf+sxlLmBPRqbhSzdaM5YddOAioU8Ksnc0u4dmbwjbPgfvH2zWJqXiqtdJ1P06rOec+9eviCIyUUTeB/oA/Z9bkGV1sCzL27Isbz8/vyiupooylmWSdTduDP7+ZgTkzJk3KvJDYCcWSRhGAr5nAQton7QRqzY+Yn9od7osmww+/rAiPjyuByyOkqYopdQ7x8HBPDycPg2tWpmtp4cNM7k2PD2jPc+GiwusXGlGoPv1M1UJC/v3dc0oyja2EcpjSjCUtYzlBCcoT3lucMNclLs5uM+GS1vBsw48fhitdVdKqbdCvHhmd9DJk+HqVfPdX6oU3LkD33xjOpc6dTKdTTbi5GQm2H76KYweDR99BIkfp2ALWyhHOVrTmnGMA+zIwFh86UdTpnOYlpzl8ZOC7B3MJg85GsP2XrDve5u1Sam4KKY6lXyBjBFeZwCuvuT6hUC9570hIlNFpIiIFEmZMmUUVlFFOXt7syNc1arg52dGPi5ffvXnXiI3Zp+3dHyJI5NYyUqaJ6rFkvUPOB2/E58snoFcuA/LXCCoGTArChqilFLvqDRpYM4cs4y5QAGT1Lt+fahe3WRLjcbOJQcHmD0bevaE8eOhRQsIDv73dQ3Izz528gAn3OiPFz/ggw/lKMcVrpiL8rQE91lwaQusqKsdS0op9V/Y20PdurBrlznq1jWDDVOmmGVxjRrBvn02q9r48WaV3rx5ZsW29SAha1hDferTgx4MZCACZOAbrjCCuizEh4acIMKGFPYOZpOHnM1g51ew51ubtEepuCimOpX2A9kty8piWZYj0AxYGfECy7KyR3hZE7Bdt7eKOo6OsGyZ2Sf60qUnHUxv4D1gJ5CdzsRjDlvZSgOXasxbfZdrydviMe9XwnwDYUlCeNgGmBQFDVFKqXdY6dLg7Q0//QRJkoCXlxmxLljQ7Pp5/3603NbOzow+f/89LFxoVlM/71a1ycFxduJHUgrTh02M4CpXKUtZLnLRXJS3NbjPhIubwjuWdCmcUkr9Z6VKmVlLJ07AJ5+YEYClS83WbOXLm13lonk267MsC77+GmbMMCv2KlaE+37xWcxi2tKWYQzjUz4llFDS04drTKQaq/CjFoeJkCPKLh7UmAu5W8Lu/rCrX6zbAU+p2MiSF/xDsSyrbWQKEJFfInUjy6oBjMXsBPeLiHxrWdZQTEbxlZZljQMqA4+BO8CnInL8ZWUWKVJEvL29I3N7ZWt37pi9P48ehcKFzTd+kiRvVOQtoAbgzTIsmpGffKwM8qJrk5SEnVvF8raNiZfCERrdB5eRQO+oaIlScYJlWQdEpEg03yNK40R00DgRDW7eNPmWZsx4MkiQMKFZJte5s9mSOhrMmgXt2pl8sWvXwvMmK2/mKumoTBbOc4TvqMYQEpGILWzhfd43Fx2bBV5tIWM5qLcSHBNGS32Viu00ThgaJ97Q1atmqtDkySblBZg40Ls3NGtmBphj0KpV0KQJZMxoxj/eyyJ8yZeMZjRNacoc5uCIIzeYQwra4E0xYC3FiPBcEhYKmzrD0WlQqBtU+BGsmJqLoVTsEdk48bJOpa0RXwKlgOuYhNsZgdTAbhGp8ObVfT0aBOKYa9fMNg1//WW+6WfPhgpv9tfnPlAH2IYXDtQnG++xJngjvZun586fm1nXqS6OSSxo/AASDQQG8/wUX0q9XWLoYUHjxLssKMjMRJ08+eldPsuUMZ1LDRqYhBdR6J+HhUyZwh8W3vv3NTvwIxHVyMMxjvEdVfkOJ5zYzGZykctcdHIBrGsFadygwVqI/2aDHErFRRonDI0TUcTfH6ZOhbFj4Ur40uO0aU3Co44dzS4MMeT3383MVicnWL/erN4eyUj60IeqVGUpS3HFFT+WkhgPTpIXfzZQhgijFSImv9KBMZC3DVSdBnb2MdYGpWKDyMaJF3a5ikiFfw7gKNBbRDKKSEkRyYiZ9nE06qqs3npp05otfYoWNbmVKlY02zU8evTqz75AQmAtUItqPGY9PvhSybEMwxf6kKpIJcpN2MijO/aw0AXuDAV68Zwc8Uqp16Bx4h3n5AQeHrBjBxw5Al26mBlLO3dC8+Zm8KBvX7hwIcpuWbu2meh68yaULGkmvz6rLCl5xBYOUYT89GEzXxBKKOUox9F//jrm9oDai+GGN/xWCR7eirI6KqWe0DjxDkmUCHr1Ah8fmDnTzFa6ds3stpAxoxlsOH06RqpSsiTs3m1W5pUtC9u2wZd8yQxmsIlNVKYyt7hFShrygJXk5BQpKcumf/LwgVlTV240lBgEx2fCmuYQ+pzEfkqpF89Ueuoiy7oDpBCR0Ajn7IG/RSRpNNbvpXRkIY56/NjsET1sGISGmq2q580zuTlet0jgI2AB+4mPO0lxYl3IRn5om5cjmw+zu0dVnF0DsBoFQsqOmDxLOo1Vvb1iYgT6mftpnFAm4dGvv8KkSaajCcwv5u7uZuZStWrm4eINHTtmigoMNLOXSpf+9zUHeEAAdSjNNk4wGHem8pCHbGQjH/KhuchnLaxsAEmzQ6NN4JL6jeumVFyhccLQOBFNRMxg8pgxsG7dk/M1a8Lnn5vBZSt6Vw/4+prwc/YszJ9v8ol74kkzmpGVrHjhRUYyco8d2FOLm6TgFJuoQdanC9o/Gnb0hqy1oPZvEC9+tNZbqdjijWcqPeM6ZpVRRLWBm/+1Ykrh4ACDBpm5qTlywPHj4OYGI0aYTqbXKRKYC3SkKI/YwT2gYryydJm5n8LuBSk8agf+95Mii+LDtSnAx0BIlDVJKfXmccKyLHfLsk5blnXOsqyvnvN+J8uyjlqWddiyrF2WZeV5wzqrqJYwoVnmcPiwGSZu2dJ8569bB+3bm3VrefOaWaobN772TNV8+UwISZ3abCy6cuW/rymMK0lZwyZqkI9BbOQjEpKQilRkD3vMRVlrQP01cNcHFpWF+75v0Hil1Cvo88S7xLKgcmWTBO/ECejQAeLHhzVrzPmCBU2yvKCgaKtChgxm8qybm1k6PXky1KMe61mPL76UohSnOEViygKbScFdClCGFZx8uqCivaDyZPBZA8trQvCD595PqXeWiLzyAKoA94DfgUXAH+Gvq0bm89F1FC5cWFQcFxAg0rWriBnPEClVSuSvv167uDAR6SUiyF/iKlnEVVxlc+hW6dhR5L1kPuI3IquEjXMUuYSINBCRoKhph1KxDGYThBj7Pn7TOIHZxOEvICvgCPwJ5HnmmkQRfq4DrH9VuRonYoGbN0UmTRKpW1fE1fXJ9z2IJEggUr26yLhxIqdPi4SF/aei/fxE3NxE7OxEfvnl+declCBZJY1EBDklPSWbvC+u4ipbZeuTi3x3iYxPKDIti8hdn9duqlJxSVyLE9F1aJyIQX5+IsOGiaRO/SQOpE4tMnSoiRXRJDBQpE4dc7tBg0yoOSgHJZWkkuSSXPbJPhERCZAjcktSy01JIYvl4L8LOj5H5Ac7kfklRB7eibb6KhVbRDZORGqmkohsBN4HJgMHw/+fVUQ2vE5HllL/5+xstqhet87kXNq922TTmzHjtbbwtICRwBCy8oCd2JGJmnbVqTV5DbVbZCH/8J1cD8iGLHWA88uAesDDKG6UUu+eKIgTbsA5EfERkWBgIVD3mXv4R3jpgiZIixtSpjS5NDw94dYt2LoVvvrKjFI/fGi+/7t3h5w5IWvvpYZAAAAgAElEQVRWc+3q1RAW9sqiU6QwqysqV4a2bWHUqH9fkwtHcrOA3/iInIxhHVXJTCbccWc1q81F6UtB480QdBcWloXbZ6L4D0Eppc8TihQpoH9/uHjRzFIqUABu3ICBA83S6MaNzdTT4KjNXZQgASxdauLEkCEmBWD+0ELsYhcJSUgFKrCJTTjzAS7sBBJQlfLMY+vTv2jkaWWWv93whsUVINAvSuupVJwVmZ6nfw7MLg3F/8tnovPQkYW3zN9/izRu/GTkonZtkevXX7u4H0QE8ZPEUkTiSTyZH/ar9OwpktzZTy5+86GEjbEXOY2IlBeRe1HVCqViBWJ4BPqf43XjBNAImB7hdSvgp+dc1xUzo+kykP0FZXUAvAHvTJkyRe0frIpaV6+KzJol4uEhkjy5PDWLyc1NZP/+SBUTFCTSrJn5WK9ez5/wdFFCZa50ERHER9pIUSks8SSe/Cq/PrnoxmGRiSlFJqUW8TsaNW1UKpaKa3Eiug59nrChsDCRzZtFatUSsawn3//Jk5uVDHv2/OcZrK+6Xd++5hYNG4o8fChyRa5IPsknDuIgv8lvIiISLJfksuSRR+Ios2ShhD5bkM86kbHxRX7JLeLvG2X1Uyq2iWyciNRMJcuyMlmWtRs4BWwKP9fIsqzpb9yrpdQ/kieHRYtM0u7EiU321Q8+gOXLTXLv/6gnMIUU3GMzrpSipdWC7KOn0L5bCj74ZgvnHhRHVltwbAdQHrgRxQ1S6t0RBXHiedk6/zUTSUQmisj7QB+g//MKEpGpIlJERIqkTJnyeZeo2CJtWvjoI5Pc+8YN2LvXDCOnSwf79plEGB07mhlOL+HoaJKwdu0Ko0dDmzYQ8kzavEzYUYmfmMGXZGEmS8hCWUrQghZMZaq5KFUBaLodLDtYVB5uHIqediv1DoqK5wnNvfeWsSyTsHvVKrh0yeRXzZvXfOdPnAjFi0OuXGZzn/Pno+R2w4fD2LFm5lL16uByLx072IEbbjShCVOYggMZSccufCnGRzRjAT/y1JNIFndo6AUPfGFRGbj35nVTKk6LTM8TsA74GpPY+074ucTAxch8ProOHVl4i126JFKx4pMRi3jxRHLkMCMZPXuKTJ5sRjYuX37lCMZ8EbGTQEkiNQVBRoR9LwMGiDg7PpAjg6qIjEZkn4NIWFYRef18TkrFJsR8row3ihNACcArwuu+QN+XXG8H3HtVuRon4ih/f5Hevc13P4gkS2a+90NCXvqxsDCRwYPl/5NdAwP/fY2fiIyXESKCXJJq0kDcBUFGysgnF90+KzIlk8iExCJX/ojSpikVW8TBOKG5994FYWEiBw+KfP7507mXQKR0aZEpU0Ru337j28yfb0JMwYIi166JBEiA1JAagiDDZJiESZiEyUM5KQ1FBFkmPSXg2TlL1/aJ/JRU5Of0In+ffOM6KRXbRDZORDYI3ALswn++HeH83ch8ProODQJvudBQkR9/FMmc+emA8uzh7CxSoIBZOtevn8jcuaazKYLlIuIgQZJEmgqC9JW+MmRomDjaP5LdfZuajqWtTiJhqUTkkC1aq1SUssHDwhvFCSAe4ANkifCwkPeZa7JH+Ll2ZNqocSKOO3FCpFKlJ9/3hQuL/PHqTp6JE81KijJlRO48J5fqPREZJVMkVCzxlVLSRuoLgvSTfhIm4QMV9y6ITH9fZJyryKVtUdsupWKBOBgn/uvggwew7lXlapyIxR4/Flm3TqR5c7Oxwz+xwNHR/N6/Z88bFe/lJeLiIpI1q8i5cyLBEiwtpaUgSDfpJqESKiIhclS6igjiJR5yWx49XcjNP0UmpRKZmELk+oE3qo9SsU1UdyqdAHJIhCAA5AGORObz0XVoEHiHBASIHDkismSJyPDhIm3amJ3iUqaUF3Y25ckj0qOHyNq1Ig8eyAYRiS8hklg6CIJ0kS4yfESoWFaorP6iu+lYWu0s8thVJOKuQErFQTZ4WHjjOAHUAM6Ej0T3Cz83FKgT/vM44DhwGNj6bKfT8w6NE2+BsDCRxYtFMmR48v3etu0rdwpauFDEwUEkf36TuulZgSIyQhZJkDjINSkgn0tzQZBP5dPwBwkRuX/F5MwYG1/kr9VR3zalbCiuxQk09967zd/f5OCrVOnp/EsVK4ps3PjauZf27jUpnFKlMhOkQiVUekgPQZDG0lgeykMRCZMj4TNc/5CKcvXZXKy3z5jZreMTiVze/sZNVSq2iOpOpbbhv+i3AfzDe/6PAi0i8/noOvRhQYmImQK7d6/IvHkiAweaJXLPblvt6ChSoYKc/+47KX3AWxKH9hIEaSkt5fsxwQJhMvvT703H0mIXkUcOIrLE1i1T6rXZ4GFB44SKXg8emAyrDg7mez1JEpEJE8xI9gts2PD0KPSzgkVkpKyXB+IsNyWbDJN2giCtpbU8lvByA26KzC0s8oO9yIl50dM2pWwgrsUJoPFzOpUmvOT65sDsV5WrcSIOunxZpE8fkYQJn/yuX6SIyLJlZqXDf3TqlEimTKa4zZtFwiRMRstoQZCyUlZui1lud1TmSLDEk+NSQHzkytOF+F8W+SWXDkKot0pk44Rlrn01y7LqhffqZw7v+f9ZRDwj9eFoUqRIEfH29rZlFVRsFRwMe/bAhg3m8PY2ISfc3ylSsK1yOtZUPQJVq5B7uSd9PnPmx/Zz6J77E6zkTtAwAFx+Bjrarh1KvSbLsg6ISJEYvqfGCRX9zpyBbt3Ay8u8LlDAJHQtVeq5l+/bBzVqQLx4sH49FCz49PthwFj+oA01AGcW0JCuTKA+9VnAApxwgiB/WFEPLm+FCuPhw8+itYlKxYS4FicsyyoBDBaRauGv+wKIyHcvuP6f3E2JX1auxok47O5dmDTJZN728zPncueGPn2geXNwcIh0UVeugLu7CTHz50OjRrCABXzER2QnO+tZT0Yychov0tOQ26TgAevJQ64nhQT6wbLq4PcnuM+B3B5R3GClYlZk40SkO5ViIw0CKtJu3YItW0wHk5cXXL78/7fCLPi9ZhIuv/8zLcY1ZmDrDQwq3AjLOQwaPoSkQ4ABPH9zKqViJ1s8LMRGGifeUiLg6Qk9epgdgwCqVoWvvoLy5c0WPxGcPGne9veHlSuhXLlnigN+5ij1qIorwazkY1oyhipUYTnLccEFQh7BGg845wklBpnD0rig4q64Ficsy4qHmelUCbgC7Aeai8jxCNdkF5Gz4T/XBga9qo0aJ94CgYEwY4bZ/vOfmJApE/TuDW3bgrNzpIq5cwdq14bffzdjFZ07wxa2UJ/6uOLKOtaRn/yc5wCu1MCeEC6yikKUfFJIkD941gHfHVBpIhTsHA0NVipmvHGnkmVZrURkbvjPbV9UgIj88tq1fEMaBNRrEYEzZ/DbsIE/N2ygzIb1OAWbvaf/TpOZYdc/x7nBBwyv1AwLf2gQBGm6AOMxG48oFfvFxMOCxgllc4GBZgvqMWMgIMCcc3MznUt164Kd3f8vvXzZdCydPw+LFpm3nzULH8pQhXTcYBOdqccYilOcNawhCUkgLAQ2tIfjs6Dgp1BxHFh2/y5IqTggLsYJy7JqAGMxv5D9IiLfWpY1FLNEY6VlWeOAysBj4A7wacROp+fROPEWefwYfv3VxIVTp8y5lCnNAESXLpAkySuLePgQmjUzAxBffw3ffANHrSNUpzoPeIAnnlSgAlfx4THVSIUvR1mIGxGCyuOHsLop+KyC0t+CW18dhFBxUlR0Kq0VkRrhP299wedFRCq+fjXfjAYB9aYuAU39/Kg0fRAdJ08h4+UwAB7ggvd7tSnbeid2yW5CnceQpTEwF3CyZZWVipQYeljQOKFih9u3zbDyuHFmZipArlzw5ZfQogU4OgLw999Qs6ZZET19OrRp8++ilnCNnFQlJ2fYRTfcGUce8rCOdaQlLUgYbO8NB8ZArubgPgvsI7/EQqnYQuOEoXHiLRQWZmazfved+cIHcHWF1q1N51LevC/9eEgIdO0KU6eaj0yfDtccLlGd6pzlLHOYQzOacQs/rlGL3Hizl4mUpNOTQkIfg1dbODkPivSCsiO1Y0nFObr8TalI+huz5dThkDO0WF2BVj9do+LmJ/8uJKcrVvEA6CFQsCKwHEhko9oqFTlxbVlDdNE48Y4JCIBffnl6CUSGDNCzJ7RvD66uPHgADRrAxo0wcqRZHfGsddwhCTVxYy/76UFlppCCFHjhRU5ymhmv+0bArq8hSw2o/Rs4RG55hVKxhcYJQ+PEW0wENm82nUtbtjw5X7686VyqV++FeZdE4NtvYcAAqFIFli6FkIR3qEtddrKT0YymJz0JIJBjNKU4a9jFV5TiWyzCZ7BKGGzpBocnQr5PoMoUsNNVDyruiGyceOGcbcuy7CJzRG21lYp5KYAtQPl4OZhV7yjtNpUhz3FY17g493HFOv0AZguUBzptheslges2rbNSsYHGCRXruLjAZ5/BuXMwZ44Zjfb1NZ1KmTLBoEG4PvqbVaugSRMzkalPn6f2cQCgOkl5zEa2UZnijGEPn/CQQEpSkj/4w4w2F+trHhDOr4Ol1eDRXdu0WalYTOOEsinLgsqVTcfSkSPQqZOJE9u2mSCQOTMMHgxXrz73o/37m3GKLVtMLr5H15KygQ00ohG96MXnfI4zCfgQT7bQkdKMYB8teExQeCF2UHECFB8Ax2aYJXEhQTH6R6BUTHjZl3gIZj3yi45/3lcqznMFVgFNScZ5NiB5PKixeA9NDzTkU7uxXHbOAfeAKQLvHYef84GcsXGtlbI5jRMqdnJwgFatzEPEypVQooTJwDp0KGTOjFOvz/i19yE6dxJGjjSTmEJCni6iLC4kYRWeNCEf4/mDKqQgCZWoxEpWmovyd4Bai+DaXlhcDgJ0wEGpZ2icULHDBx/A5MmmA2nCBLNL3LVrMGSI6Vxq0sR0Nj0zytCmDaxebXaFK1ECLpyKzyIW0Z3ujGMczWhGGCFUYDIbGEExFnKKqtzntinAsqDUUCg/Bs4uBc/aEPwg5tuvVDR6WU6lzJEpQEQuRmmN/gOdrqqiWijQDZhEGB8wgKMMJ/8Vd47mXsTnWfYwKkFn7Pb6mIsbOMD0tZC0si2rrNRzxVCuDI0TKm4QgV27TPLWtWufnM6dm81pWtJua3MK1XuPBQsgfvynP3qKMLbzFR0ZxRUq0oI77ORPfuZn2tPeXHRhA6yoD65podFGSJwlBhun1OvROGFonHhHiZhOpIkTTf6l0FBzPk8eszSuVStI9CTdxYEDJiff48dmrKJkKWEMY+hFL8pSFk88SUpStrCAUnzMVbLiwlpSESEeHJsJG9pBGjdosBbiJ43ZNiv1H0VLTiXLsizMaqG/JRYkY9IgoKKDAEOBwUB+pnOcTmS4k49L+ddQOkt6NlX5BMdvf4EgIAOwaDSU/MKWVVbqX2yVK0PjhIr1/vzTZF1duNBk7g63gzJ452hBO6/GJHov2VMfuQ7MYCp96MIdcvEFKZnLNgaF/2dhwdU9sLwG2MeHRhsgRb4YbphS/43GCUPjhOLKFZOVe+pUuB4+4zRhQmjbFrp1g6xZAbN7qLu7Sdn3669Qvz4sYAEf8RHv8z6rWEU2srGHHeSkLiE4cp81ZCXCP7Ozy2FNM0ia08QKlzQ2aLBSkfPGOZWeKSyJZVlzgUfADeChZVlzLctK9oqPKhXnWMAgYCJwlHbkYg23k/qQ9HQxdt8/QoX1MwjwnA2Z7MEXKNsLvm34ZIRDqXeQxgkVZxQoYJY+XL1q1jR4eECCBJRlJz3PdCJB1jQE1ahvsrI+egRAGqA7HRjCWhy5zE+cZiC1GMIQOtCBEEIgXXFousPcY1FZuLbPdm1UKhbSOKFirfTpzTK4ixdh0SIoWxbu3zc7imbLZnqPtm8ny3vC7t1QqBA0bAg//QQeeLCBDdzkJm64sZnNFKcsV/mdhziTmnIcY9WTe2WvD/XXwD0fWFga7l2wWbOViiqRTYw3E0gAFMSknymE2Vf9l2iql1I21wVYCJyhGmnYiZMzOO0rzZ7EXpQf0Jo7XofAPYlZM9d/GVTJAVd8bVxrpWxG44SKWxwczFqGX3+FGzdg9mz8ClXBTkJxWucJjRpBmjTQrh3s3ImrCIOoyjh2cxtH+rKF2TRiOtOpT30CCTSzkzx2g1NS+K0SXN5u61YqFZtonFCxm6Ojya20fTscOgQffWRihaen2TGucGFSrJ3DpjVB1Klj9oX46isoG1ae/ewnPempRjUmMIE85MLiD86Tm9zUw5tJT+6TubJZKv3wlulYunXSZk1WKipEavmbZVl3gbQi8jDCOWfgqogkicb6vZROV1UxYSNQH0iGLy7U5EzYcey6TyDXts5sXnObVFMqwIQjcB9IHh9mLoDa9Wxca/Wui+llDRon1Nti34preDZbSLPQeeR/fPDJG6VKwcCBSJUqTLBuUJw6FMGbHTSgIkspRnFWsYoUpID7V2BJFfA/D3WWQZbqtmuQUi+gccLQOKFe6vp1+PlnmDQJ/PzMudSpCevUha8udGLU7FS0aGF2iQtyvE9LWrKSlbSjHROZSACPOUkzSrKavfTGjRFY/8zr8DsCS6qChEJDL0j9oe3aqdRzROnyN+A08N4z5zKFn1fqrVYF2AoEkoFb7KKUXXVCJnThVJfPKO2eCN+O3jCtOeQEbj2COvWhe9f/L5tQ6h2hcUK9FdzqpsVj3+e4pzhA8UQn8G3dF5Ilg927oVo1rJIl6bb+MD6ylRXUpzxLOUo1jnCQUpTiAhcgYXqzFC5ZHvCsC6d/s3WzlIoNNE6ouCdNGhg82CRSmjkT8ueHGzewGzKI7xdm4nDhthyZfwR3d3h8OyHLWU4/+jGd6VSiEo8J4EOWs4HOFGMUB/EglPBnhJT5odlOiOcMiyuA706bNlWp1xXZTqXNwAbLsoZbltXZsqzhwAZgk2VZbf85oq+aStlWUWAXEJ+EHMKTRvQipPNP+EyoSclaAfgUmQdzf4BagD0wfhKUcIPT+nuSemdonFBvjQ8+MH1It1LlJueS4WycdgGGD4fkyWHPHqhenWYlKpF1bRvGSS/y4sVpCvGIm5SgBIc4BM4poMkWs8vPmmZwbJatm6WUrWmcUHFX/Pjw8cdw+DBs2QJ16mAFB1PgwEyOUID+2yrT7YOtnDlp8Q3fsIAFeONNUYpymuNUZiJrGElhFnOKKgRyy5SbNDs02wUuaWFpVTi/zqbNVOp1RHb529ZIlCUiUvHNqxR5Ol1VxTRfoCZwHGjBDOaHdULOZSNl21Vsm56NXI5rYVIDmBMEfoBzApjwE7RpA5Zl28qrd4oNljVonFBvnRs3zE4/x46Z1Q+fNH1glkCMGvX/neMCixZl0cBitKo5iXtWNmpyn+PcZwlLqEY1eBwAK+rDxY1QcQIU+tTGrVLK0DhhaJxQr+3cObPxwy+/wIMHAOy1L4HD4P582K863tYB6lGPO9xhLnNpQAO8WER5WnOd93BlLcl535QV6AdLq8HfR6HGfMjZxIYNU8qIbJyIVKdSbKVBQNnCfaAJsB7wYDtrQxrg7w+J2i5lx9Dy5E93DH6tBHNuwoHwD1WqZB5EcuSwWb3Vu8VWW0XHNhon1Jvy9zd5W728TELWb78Fu4cBMHmy6Vy6eROAi4Wzk2KgL1LblQ5WEhbjwxSm8AmfQEiQma10zhNKD4difW3cKqU0TvxD44R6Y3fvwsSJhP7wI/Z3zAykmxkKkfLHflxvUJz6dg3Zy16GMIT+9GcXv5OXuljYEcgKMlDSlBN0D5bXgiu7ocpUyN/Oho1SKupzKv1TaCLLstJFPF6/ikrFTQmBVUAnYAHlKBVvH++5pOLeb1UoMX0G+3zyQbuj8EVB8AASA5s3mzXYQ4ZAUJBN669UdNI4od42iRLBqlXQoQOMGAEeHvDQzgV69YLz5+GHHyB1ajIfOItL3Yc4FL7H7BUXGSZ5aEc7BjAAiecItRZD7haw62vY2Rfi8KCeUm9C44R66yRJAv36YX/pAkHfjua2UxpS+R7CatyINHmrsGNeRz4OackgBtGUphSmEJfYw12SkIKKnGWxKccpsUnY/V412NgevH+wbbuUiqRIdSpZllXFsiwf4C5mBdA/x+VorJtSsVY8YBIwCljL+6Rw2kPxkEoEjm9H6b1fsO1Acmi8B1o1gJ5AZUxn0uDBpnNpa2RmgCsVd2icUG8zBwez/G3UKFi82Ew+9fMDnJ2hZ0/w8YGxY5G0aXE6FIxDvWD6lj7K7t+zMZxvaE1rgu0Fqs+B/B1h3wjY8hlImK2bplSM0Tih3nqurjh9/QVJbp9nRdWJXCQT1qmTOLZqyy85f8drWiNWBy2lNKVJjhNh/MExipCdppzge0DAwRnqrYAcjWF7L9g9QAchVKwX2ZlK04HhQCLAIcLhGE31UirWs4BewG/AURJzM8FqGgZ04/FnY6gUUAfPHUHgvgSq9Qd3YACQMxmcOQMVK0Lr1k+2JlUq7tM4od5qlmUmJy1ZAocOQfHiEfZicHaG7t2x/voLGTeOgJQp4XcoWeocd+u7cvLUPNxx567lD5UnQ5FecHgirG8DYSE2bZdSMUjjhHon2DnHp65XF36fdZaO8WZwIV42LB8fqnZYwu1syak84RSlHxbGl2NkZBNeNCMPX3GSjsBjsHeEmgsg3yew5xvY0k0HIVSsFtlOpfjATBF5ICKhEY/orJxScUEjYCtwn3hscRlHx4DJhFX2okHqkkzbfAGKDYNa8yG5PXS/DX1zgJMTzJ0LOXPC9OkQpoFCxXkaJ9Q7oWFD2LbN5GQtUQK2b4/wZoIEWN264XLuHH8O6E+QsyMJPR+wPx907LSNutfcuGhdgrIjoeRQODEHVjc1OZeUevtpnFDvFI+PHGmzsy0lk52ibYJfuZ85Hwl8/2ZUt0ccynyH7UMq4vn3D5RkHov4mtxM4yy1EPzBzh6qToPCPeHwT7C2JYQ8snWTlHquyHYq/Qh8aVm6fZVSz1Mc2AOkAma6dKL74/XYZbxChwJuDNm8C3I2hya7AFdIdwa8MkGV8nDnDrRvD2XLwvHjNm2DUm9I44R6ZxQrBnv2QJo0UKWKGSN4SqJEFBg6jH3nLrC2Y20Amk4RvLKdZenAvBx+sAtKDIDyP8LZZeBZG4L8Y74hSsUsjRPqnVO8OOz1tudQTg+SXvqTlW2WI0WKkNwvlEGDhdYZ+7O3c25qnmnCbKbzHlvwpTTBXDZTZMuNhtLfwakFsLgiBNywdZOU+pfIdiotBdoD9yzL8ol4RGPdlIpTsgK/AyWAcQkq08FpD/EfJWVwmYq03PILpC0OLY5Bwoxw4CyMPA4LfoTUqWH3bihYEPr2hcBAG7dEqdeicUK9U7JkMV/dZcqY1cyDB/877UWZtGnJ+PNKPI5t4l69RMQPhJ7DAkj/flmOT+wC+btCtV/g0hZYWBr8NbWMeqtpnFDvpIwZYdcuqFPPjroz69E+/z4eb9iK1KxBgkdQ+eezOOcqSKP6i9myexSJ5AL3KcYDDpiOpWJfQe0l4HcY5ruB3xFbN0mpp1gSicRflmX9CRzGpI95GPE9EdkcPVV7Nd0CVMVGwUA7YC7QOOQOW4815e+CG3Hb3YNdJUfhEBwIa6rB+T1QOD7kXwL915gssCKQIQN89hm0awfJktm4NSquiumtojVOqHdVcDB07AizZkHLlmZFs5PT09dcARoSwHe7a1Gh9zb4w5y/ly0Vib+bCIUTwerG4OAC9VdB6sIx3Ar1LtI4YWicUDElLAwGDYJvvjEDEr/9BqlvneDymM9JNXcDTsHmutvF8hPa6yqu9QMItF9EcsyMV24cfDKzteav8H5t2zVGvRMiGyci26nkDyQRiV0ZwjQIqNhKgKHAYKBsWAg3d/TmVPmxpDtehcM5F5HSLiFsawuH5kJWO6j5CxzMCZ06wZ9/mkKcnc3wd7dukDu37Rqj4iQbPCxonFDvLBEYPhz69zermZcv//eYgD/QBKGIfMM3ngMJ/soOxzPmn4sULYr1aQsI/AGCbkGthfqwoKKdxglD44SKaQsXQtu2kDw5eHpC4cLgc/13NkysS+NJf5P8trkuKKsjDp8/5mabkaRx6WVOPrgKnnVMB1O5USbnkq4oVdEksnEissvfVgAV36xKSr07LGAQMAfYYxeP4HI/UnbTDK5m30aWm24cDDoLFedAxRFwPgwWfgx5l8PBA7BmDVStapbB/fwz5MkD1avD+vWa0FvFZhon1DvLsqBfP/j1V5NrqXhxOHny6WsSAauwuG4NoG59T0KPJSBwsgO3UoO1fz981APGJYCTqWBZXTg4wSZtUSoaaZxQCmjWzCyftrOD0qVh/nzImqYkLYedp/ulenT9CW5kdcHJJxi7z4Q0GXvjP9AN7v4Nrumg6Q7I0RC294IN7SE02NZNUu+4yHYqOQErLcvysixrTsQjOiunVFzXCtgOBFpwsHJb6hzYSoC9P26hxVnovxYK9YH6K+BePJg/EvwaQo0q4OVlEnd37AgJEpgOperVIW9emDwZAgJs3TSlnqVxQr3zPDxgyxa4d88k81616un3HYBpQFHqUsRhL3c6ZSLxX3bMHwfXMzrCqTMw5QKMig/DusHaLhCmG2Opt4bGCaXCFSoE3t7g5maWTvfqBfFDXJnrsoxMXUeQ4Uwgny/NhH/xgnAHEg3bT1iWtDC8PwSFQa1FULw/HJsBS6rCw1u2bpJ6h0W2U+k48D0mD/FfzxxKqZcoDngDuYGVJUpR695+Qs++j4drLb6+MwrJUhs8DoJdYli4As4UBK6aGUo//wyXL8N335lcS6dOQZcu5ucvv4RLl2zbOKWe0DihFFCqlHlQyJED6tY1uTMiZhqwgP5AH/JSiH3sd6lIi26w5FwovWYm5VGuLOD3EDyBJpOhdT7wu2Kj1igVpTROKBVBypSwaRN07Qo//AA1asCd2xZ96MMa+/XMbuUYgnIAACAASURBVHCfzH9cYOuOUVwqnxO7uyHQ71ska2YYOw4+/BpqzINre+DXYnDrlK2bpN5RkcqpFCU3six3YBxgD0wXkRHPvN8Tk984BPAD2orIxZeVqWugVVzyEOgAzANK+wWy5/c2hNRdTK27LfktyTTiB9yDFaXg2l+Qxwkq/ALxmz8p4PFjk6hj7Fj4IzzLq729eWpp2xaqVYN48WzQMhUbxXSujNhK44SylYcPoUMHmDcPGjY0ibxdXZ++ZjPQmBBG8gXtGM9WnGgZZoenZ3eKfrfJ9E4BJLCDTp2hzwCzY6hSUUDjhKFxQsUGM2Y8GTdesQLy5QMffKhPfY5ylKEyjDxbXKjVrzeOe0PMh9KnN8n83PPAusYQGgS1FsN7VW3bGPXWeONE3ZZllRWRHeE/v3D9s4hsiURl7IEzQBXAF9gPeIjIiQjXVAD2ikigZVmdgfIi0vRl5WoQUHGNAD8AfYBsD4Urs4YT0Lk/uf3d2JRoOelCU8Cez2HfJEgAVKkB7y/BvIhg3z4YNw4WL4aQ8MCSJg20agVt2mhibxUjDwtRGSeii8YJZUsiMGaMmViaN695UMiS5elrjgN1AHemMp6uXCQe1XhERxnJF5sLYg3sBX+Ebx/t5AhtP4Hevf9dkFL/kcYJQ+OEii3++MMMQvj7w5w50KABBBBAe9qzgAXUpS71ZBhl19QlS//zWOF7+5AlC3zZFRxnwd2TUGEcFOpqy6aot0RUdCodE5F84T+ff8HnRUSyRqIyJYDBIlIt/HXf8A9/94LrCwE/iUipl5WrQUDFVeuBZoB9KFjjPbnVviVJ7ROxPoEnbrjBjb2wvhb8/TfkSQwV1kD85/xzuHrVRJ2ZM+HMmSfnixWDjz82mQCTJImhVqnYJIYeFqIsTkQXjRMqNtiwAZo2NUlZFy+GSpWefv9voDEgbGMNDQnjAXUI5n0+YRKTcFy3APp2hD+DzAfixTMzVPv3h4wZY7o56i2hccLQOKFik6tXTWfS3r0wYAAMHgyWnTCe8fSiF1nIwmDm4RI2irpLl/B4YCIcTvmbD+fIDrUSQpqDUKC96VxySPDS+yn1Mm+8+9s/ASD85ywvOCIbANIDlyO89g0/9yKfAOue94ZlWR0sy/K2LMvbz88vkrdXKnZxB/YBKe3hXo96pF/8B3euO1EqpCyzmIWkdoOWV6B4czh5D2aXBp9OmLlOEaRLB199ZXIt/f47tG8PCROaSNS5M6RNC82bmyeaUE32qqJWFMcJpd5aVavC/v1mQmm1amaiacQxvRTABiAX5fmA/dwhG5uwIx4zqEpVblWvBTvOwrfZobBlkndPnQrZssGnn5qnEKViIY0TSv036dLBtm1m4cGwYVCvHtz3t+hOd7awhfvcpz0V8LVryE+Nh+FwzJ+7c7KaPEtnzsKYgzAlFSycBvOLwq0Tr7ynUm8qsom635T1nHPPnSJlWVZLoAgw6nnvi8hUESkiIkVSpkwZhVVUKmblAPYCVS240vYD0p/bT8iuErShDfWlAdftb0Op+dBiI8RPCMunwLqM8OjsvwuzLChRwjxkXL9uknhUqgSPHsGCBeYp5r33zKj2hQsx21CllFJkywZ79kDt2tCjh3lgePToyfsOwGTgC7JSgD/YjTs/A03ZSSncOJUoAL7whq+rQy+BshlNrr2JE+H996FnT7hxw0atU0opFVXixzc5liZMgLVrzQKEM2egDGU4yEEKUYhP8eA0t/nefjHxWt3g7qlgQqb0NUmZ/roJM4Fhp2FoITj6y9MjGUpFsZjqVPIFIs7PzgD8a1jNsqzKQD+gjogExVDdlLKZxMBKTI6lK1VSkDrtJhg8klWP15EnLC8LWICkrgQt/aB4NTh5BWbnAp9hLy7U2RlatDDbSVy4AEOGmLXWvr7w7beQMyd8/TUEBMRMI5VSSgFmIunSpTBoEMyeDeXKPT3JyAK6AktIRANW8hO96EwYU7lMNYqx0Wkv1F8FDb+HetdgYBqoUd70Tv34I2TNCn36mKXTSiml4izLMhNRN24EPz8oWhRWroS0pGUrW+lGN37iR9YwgZ9ZxV0HJ0I7jOHu2WEmHqRIAT4hMD4Ymn4Ck2pCkL+tm6XeUjHVqbQfyG5ZVhbLshwx6WRWRrwgPI/SFEyH0s0YqpdSNmcPjADmA/dy2JPki95Ih8M8OpKD5jSnAQ24YX8XSq2HFgshvgMsHwjr8sKj6y8vPHNmGDgQzp0zc2mbNYPgYPjuO5PMe8kSHblQSqkYZGdncmQsWwbHj0Phwk829PxHJeAP7JnIKD5hJiWx2M4jeuDOZGsKuH0JzXZBeieotBPmdDVToAIDYeRIM5DQvz/cuWOLJiqllIoiFSrAgQOQPbvZ8HnAALALdWAc45jHPLzxZgwtWMZE9lKcJPHbcL3HDfjrjHkGcHGBk8Bn66BCeti/ytZNUm+hGOlUEpEQ4FPAC/PXerGIHLcsa6hlWXXCLxsFuAK/WZZ12LKslS8oTqm3UnNgtwWpEgIzcxG2fRdOg0eyJnQdecgTPmupCbS8AcULw8kTMDsTnBkLEvbywu3szJD4ggUm91KhQnD5MjRubJJ9nDoVE01USikVrn59sxzO2dl8PU+Z8nQff3ZgD3CVjynHFpKRiL3Y4UkXOtKRgLT5oPVhyNEIbk6EVoGwfR1Urw4PHpiZqVmywNChcO+erZqplFLqDWXODLt2mf0ZvvkGataE27ehBS3Ywx6cceYr6rKO+sylI2kYwfVEDZEhH8Fff8Fnn5kNHv54ACXqQPNycFPncKioE1MzlRCRtSKSQ0TeF5Fvw88NFJGV4T9XFpHUIlIw/Kjz8hKVevt8CBwEPrEgqLs9do1787jWYVyuZKc5zWlIQ27YP4JS3tBiDMQPg1Wfw6xkcGw0hAa/+iYlSpiMsZMmQdKkZplc/vxmycSDB9HdRKWUUuHy5TNfx5UqQadOZq+FiHmWEgOrgRKU4gO8uUEe1mGRjKkUJj+7nI5CzQVQdTpc/R2OtYaJn5nBg8qVTWfSoEFm2fPChTozVSml4qj48WH6dDMAsWULFCkChw5BfvLjjTfuuDOCHqwkgOFMwhlvgviAgNQLYfyPcOo0NGsEYcCCHZA5HfTvA/fv27pp6i0QY51KSqnIcQGmAUuBBHnAfnkuLg/aTb65I1kra8lLXhayEEndA1pdhBruYO8PXr1hejI4MBiCX9E5ZG9vdoc7cwbatYOQELNkIlcuWLRIHzyUTViW5W5Z1mnLss5ZlvXVc97vaVnWCcuyjliWtdmyrMy2qKdSUSlZMli9Gvr1M4lZy5Y1E0n/YQ+MBgaTmSLsxouGfAcsx5felKG39SWPPmgBLQ+Aa1pYVgOClsD6NbB9O5QsaRJ4e3hAjRpw/kW7uiullIrNLAs6dICdO002i5IlYc4cSEISVrCCYQxjKfNZyM+MZRVbKI8LPbhDGcgaDAt+g4OHoFQeeBQK346ELJlMRvDgSAxMK/UC2qmkVCzVADhiQXknYLo9x1x6k77VITI+yoYHHjSiETfs4kHudaZzqUENSBoI24bAtBSw+wsI9Hv5TVKkgGnTzBqMIkXgyhWTd6lSJZPsQ6kYYlmWPTARqA7kATwsy8rzzGWHgCIikh9YAoyM2VoqFT3s7c2ShuXLzWrkwoVNGryI2gCrceVjFuPBUjKQgt1AVkZTngLsT/4Amu+Fgl3hwBhYUAo+SGeePqZNgyRJYP16yJsXRo0yO8cppZSKc4oVg4MHoXhx+Ogjk9A7JNiO/vRnLWvx5TLfUpVlFKQP0wjjNI8pSDDfQcG8sOs4LJ8GWePDrbvQrZsZWF6wAMJekVJDqefQTiWlYrH0wAbLJByLVw/Oj8zNWY/dtDvzPWtYQ17yMoMZBFmpIMsaaPIXNK8FGYJgzxiYlg42t4d7F15+Izc307E0ZYoZNt+6FQoWhC++AH/dKULFCDfgnIj4iEgwsBCoG/ECEdkqIoHhL/dgdhJV6q1Rrx7s2wfJk5vVa2PGPD1xtBRwEAt/GpCOUyzhczpixyrOMY5iDIz3LcGVxkCdZXDvL5hbCE4vMDNST52C5s3h4UP48kuzldC+fTZrq1KvQ2e0KmWkSmV2hvviC5g40ST0vnoV3HHnOMdpQhNmMJwFDKM7I/GkNo58TQDFgMNQrx2cuAH9y0EqzCzW5s3NM8HmzbZunopjtFNJqVjODugF7LODrCkgYKk901d8SZdZh8gu2WlHOzKRiUEM4joJIO0qqHsaPq4JuULgyHSYkRXWNgW/Iy++kb29mVN75oxJ7hEaap5oPvhAHzxUTEgPRFj0g2/4uRf5BFgXrTVSygZy5YK9e6FOHfOw0Lw5BAQ8eT89Js/SeBLSnjGUxpsQCjIPoQzf0JiCHM2eDVodhlQFYW1LWO0BrgLz55vZSlmywJ9/mmHuzz7TwQMVJ+iMVqWeFi8ejB5tMlf8+Sd8+KGZnJqWtMxlLjvYQRISM592fM89PmEc97lKKEUJZQA4OcHQrbBxFngkMIn8Dhwwoxru7qZQpSJBO5WUiiMKAUcc4ZNQoDf8mD83yXr/zspHGyhKUYYylExkohWt8MYfkq+GakehXXX4UODcYphTAJZWgQsbXpw3KXlymDzZZI8tXBguXYLSpWH8eM21pKKT9Zxzz/0LZ1lWS6AIZhLf897vYFmWt2VZ3n5+r1gCqlQslCgRLF0Kw4ebh4USJcwGPv+wMMvhjgEJKUQG9jGOiZTDmUWcZBkFGZloNiFNNkLJoXBuGczMBYcnQ5XKcOyY2ZzBzg5++gny5DFr75SK3XRGq1LP0aSJGYxIlAgqVnzyK3sZynCQg4xnPOfYx1x60YCmzKMx9nxDEB+CtQ/yfwRT/4Kfq5su2wT24OVldotu3RouXrR1E1Usp51KSsUhzsB0B1gu4JwL1g61+Hh8FaotWs2JkDN0ohOeeFKUopSiFIs5QUjClVD+IHSoatZO+G2GpdVgdnYzi+nxw+ffrHBhs4NQt24m90b37tC4sW5NraKLL5AxwusMwNVnL7IsqzLQD6gjIkHPK0hEpv6PvfsOj6raGjj8W2kkkACh1xB6lSIdxIIKihUbCldFrhVR/NSLei0oVtRrQVEvelUUUbHSBaRZQDoioXcSIJSQkJ7MzP7+2CcSehKSmcxkvT7nyZQze/bZhrNy1tnFGNPJGNOpevXqJVJZpUqaCDzxBMycCfHxdtq7mcf1zasP/AS8TzBPMZRWbGEH/RmJh2t4huFBHdjY/Sa47S+o2QnmDoUve0DqRnjlFTspR9eudj69666z4+/yzxKuVOlSbD1a9eaDCjStW9v7wVdcYf9k79/f3hcOIYQHeICNbGQQg1jMGB7mF/rzGAc4goceGB6ByEowYDq8/hk8UwEuCIbgIPj8c2jWDB59FJKSfH2YqpTSpJJSfuhagS3loXMGJI2AB6+DTvOb4pk7ho258bzJm+xjHwMYQEMa8gqzOBQ+EbotgTsHwmWhELQV5twFH9aA3x+D9H0nflFYGLz9NnzzDURF2VvnHTvaNUxVyXGnQML78Mfl4DlF0i/wLAOaikhDEQkDbgam5N9BRDoA/8UmlPb7oI5KeV3fvrB8OTRoYC8Wnn/+2HlUBbgb+AtoQG1a8j1PMJNa1GAsG1hBK8ZUGU/mDVOg3xdwZAd80QnmPwTNY+H3321vpagomDzZ9lp66y27KqhSpUux9WjVmw8qEFWqBN9/b4fEzZkDLVvaxZ1zcqAmNfmET1jEIhpQgx8ZzXk04r/ciPAGLpqBfAatBsGwDTC8H/zLDT2q2QL+8x9o3NgWmFlm/jZVBaRJJaX8VG1gaTX4y0D/nZDTAcZeDPVTKzF17UNMyN3Ej0ymKU15gieoT33u5iOWhjyIp/VeuHUM3NQQ6qTBH6/Ch3Xhpyth/+oTv+yGG+wd7fbt7RiM7t1h3DgdDlcs3GD+gqSXYFVP+LES5r3K8NVQ+P0ncg/96OsKeoUxxgUMA2YB64FJxpg4ERklIlc7u70GRALfiMhqEZlyiuKUCiiNGtmOowMHwjPPQL9+J45GiAXmYCecGcNlNGMHf/AQN2IYwkv8T2ryRcuDuO5YC23vgZVj4NOWsPUHGDoU1q+H66+HtDT4v/+zNxB+/937B6vUqRVbj1alAlVQkJ2Pb9066NPHjnTu0AEWLrTvd6c7y1jGe7zHEf5iGN9xIQNYRU1gMC46QuR6uHYy/OMzuMUFj4RBl2aQnGwLbNYMJk7U6wD1NzF+/MvQqVMns3z5cl9XQ6lSIdfAS8thTAok9QLKQd2DMLwydApZw0TeYQITyCKL6lTnci6nH5dzOVWpePi/sHIyxLkgF6jfDDo+B41uAsmXe87KgocesqvEAQwaBB98AJGRvjhkP2SABGApZMyDnfMwOzcju1yQavfIrATrG8CsBvBl/VC+jFhFa1oX+ptEZIUxplOxVt8PaZxQgcQYO+XdiBH28Qsv2Hm2Q0KO3W8rMAT4BbiTTTzF/dTnZ9KAL6hGA97m8r2NkTn3woHVEHsZXDwWKjeCadNsoTt22MIGD4bRo+1SQyqg+FucEJEQYBNwMTaYLgMGGmPi8u3TATtB92XGmM0FKVfjhApk+U/p//iH7cVUs6Z97yAH+Tf/5iM+IoxK9OdSRrOEGHbh5kqCeRXSKsOce2DbVDjYCqYb+Gu9LeDSS21QatzYZ8enSlZB44QmlZQKMMbAd/NhxErYfj7QBYLc0A+4MTgJNzP5mRn8xE8kkUQQQfSgB9fSmwFZqdT962tk1R6b5KgcBR3vgdbPQWj5o1/yxRdwzz12SaIWLeDbb+1gbnUSOcB8cE/C7P4R2ZkEOwFnCoescFgTA1Nj4LcG9ahT+Xy6Of+1ox1hhBXpW/3tYqGkaJxQgWjXLtu5aPp026Howw/tnej8PMC7wOPYdPZI1jKAu2nIYpKBb4jhHM8HdFu9CX57CowLuj4FnR6FXA+8/LJNJuXkQOXKdtbwu++2K4WqgOCPcUJE+gFvAcHAx8aYF0VkFLDcGDNFRH4GzgH2Oh/ZZYy5+hTFARonVODLyLCn9FdfhYgIePFFu9Bz3ul8FasYyUimMpVIormPLjzFYiJJB+4myIyE9XNg/oOQkwGHr4GxP9s5lsLDbRfaRx6x02aogKJJJaXKOGNg7lx4bDysbANBg8FTE4INNBNojZvKLCWZGaxnBnGsBKAOdRjq7shdm3dQfWUcstcD5UPg3Cug3TsQ7vQ837DBDouLi7MR6v334fbbfXfApUomMBvc3+DZ9QNBGzMwW0CyITcY/qwDMxuEsrdBO6rUuISuQT3oSldqcLQnQAawC2gChJziW07HHy8WSoLGCRWojLHT3T34IBw8CA8/DM8+C+XLH7vfduB54DMgDBjFcq7lTprwJ4eA6bSke+prNF3wKWz6Fqq0gN7vQoOLYfNme4t71ixbWKdO8N570LmzF49UlRSNE5bGCVVWbNwIw4bBzz/bGxLvv3/s6Xw5y3mWZ5nOdGoSzZO04D6WAuUJ5gkkbQDMecj2WopoB/PrwLfOXPitW9uRDD17+uTYVMnQpJJSCrAXHgsWwLMvwC+hwHlQvgvQBjJq8ffMauFmLzXlJ2AGicwmiyNUNkGMi6/PNUsTCduRZa9I2rWHc1+HyIttT6X774fx420hQ4bAO++ceFVTJqQBMzCeSXh2TyV4Yw6ezRCUBelhMLNJOXY160F0zHV0Cr2AOrQinmB2wjHbDo7pyMQ2oGERaqMXC5bGCRXoDh+2U1x8+CE0bGhHJPfpc+J+m7HJpS+ACOB5fqUfd9KcTewH5tOFS7bdRdV5L0HKdmh8NVzwOlRuYmd+feghuwydiO2x9NJLUKWKV49VFS+NE5bGCVWWGAOTJtmp8/btsz2WXnwRoqOP7rOUpTzLc8xkBi2ozCvU5hrWk019ypkXYWMwLHjULvLj7gfj1sHW7fbDd99tVxfNX6DyW5pUUkqd4PffYd4827koLg427AZXU6AN0BYiOoGnFWRXzQUWA9MJNt/ilm3cuj+IV5aWo/amTDvNUusadphE9DD45Cs7FiMrC845x94+b97cp8fqHYeBqeR6viQofg7Bm9x4NkFQJmSEwq9NqrCrWV9SYodxOKQbawn6O2mUclxJ4UCDk2xXApWLUDO9WLA0Tqiy4pdf7N/yGzfaeTPeeANOtqjVBmAU8BV21vsXmMkl3EsrdrEXWOG6iN4rulF+yTvgzoYOD0C3p8EVYpeee+MNuzJc1ap2eNwdd9iZYZXf0ThhaZxQZdGRIzByJIwZY0/nTzxhp9DLnwtawhKe4Vlm8xMXUpHXiaIjCWTRkfDsZ2DxQlj5NgRVgvU94JNZkJtr5+B76y24+WZ7I0L5LU0qKaXOKDcXtmyxK0TkJZri4mDjIXA1A9oC3Q1y6UpM9UnAJDok7+CdZdA9DoI8YJqFIp0HwP6b4IZHYdMmO3H3uHFwyy0+PsKSkA3MIM28R0TCPII3enBvguAMyAyF3xrFsKh5f+bFPsyq0Ji8+bcJBloAjTg2aRTr/KzOyddKLiq9WLA0TqiyJCvLzpvx8ssQFWXzP7fddvK/6eOA54BvgErA83xNbx6kNfvZh7A2vSs9fqtH+bXfQURV6DEK2t4FGzbZHqoLFtiCune3Q+Lat/facSpHVjLsWQSN+hXp4xonLI0Tqiz780/bEXXBAjubxaBB9hSf/5S+mMU8xbPMZzYDqchLBBFDMhn0pvzBITB3HMT/Au5z4PtgWOKsJN23r40PjRr55NgUkLIDDq6FxlcW6eOaVFJKFVlurp1KIy7OJp22boO4TNhUx5B0wXLoOYmaoRP5z8o93LgawnKAWNharQ3hI6Oou3CxLWfIPYSOfctO4ufXDLCUVDOW0MRJhG/Ixr0RgtMgK0SY3ag5E5oPYlrDB8kMrUgk0A5oD3RwfrbG9kbyFr1YsDROqLJo3Trba+n336F3b7vaT/v2J08urQGeBX4AojE8x0dcwDO0ZR/JwNrEZnRcUJ6I+NVQtTVc+CY0uAS+/NJOzLpvn+2pNHw4PPeczWapkuPOhR2zYN1nsHUyxuNC7t0L5Qu/Op/GCUvjhFKwerXN/3zxhZ3Yu0cPm1y6/nooV87us4hFPMFIlvAz91CBJ4EapJNsrqLy+m6wcAykJcK+C+GT1XA42V4DjBxp40VoqC8PsezIOACbvoH1X9gbD+UqwX37IbjwE6lrUkkpVSJyc+3KQ4v3GL6MWML6Gp8zZM9nDF+VRlQGEA3u9QJjIDjHEFehLaOaf0d4myY0bswxW7Vqpb1X7E7S+AD3oXFU2pCEZwMEJUNuEMxs2JIvm9/H0sZ30DwskvYcTSI19MChA7B3r9327bM/k5Ls9VZ0tJ2KJDr6xC0vcJ8tvViwNE6ossrjsR1GH3vMDnNo3hxuuslurVufeO5diU0uTcUmwO9lMtfzCD3YSo6B9Vtq0nohhKUkQqOr7HxLIbXg6afh3XftF9ata8dS9O9f2k/u/sUYSFwB6z7Cs2EiQZmpeCKEoBYGTytw10wgVOoUuliNE5bGCaWOSk6GTz+1CabNm+1Itrvusos+13fW6vmd33mWl1nMdIYTxr8QKpLDgezrqLEoEln1OeRWhl+bwfQ/7IfOOQf+9z9d6KGk5KTB1smwfqK98WDcmGr1yGlRkYwWIURX+rNIxWpSSSnlNR48zHAtZOu6F+i14Vfax+cSFA9MAA6AKzKYsU89wL+PvEDGrxVgBZBhEyx5CaZGjY5NONWvDyFFWfbsrB3hCONJSXmT+hu2w0Z7DB6BhfVjmdr4LnZXuo+aidHU2gTZW48mjfK2/fvB7T6x5IgIyMw8/beXL39skunzz6FBg8IfhV4sWBonVFl36BB8952dmHX+fJv7adnSJpcGDLCP81sJ/A/4EjtrXHcWMYL7uZzVhLhg+8pIGi5xEeRyIXnzLa3bbmd7XbbMFnLFFTbRFBvr1WMNOEe2k7Z+JCHrfiQ8KRUTDNIYMlrBnFhhVnAjZtGXubxCLIXvIaZxwtI4odSJPB67StzYsTBtmn3tmmts76Xeve19gz/5k+d5hfl8zSMEMRyhHIbE/VdTZ+5uZM9yONgSvkyFnfG2V+tDD8GoUVChgm8PMBC4c2DHbNgwEbZMBlcGrqho0ltEUqFlPCHVDbsJYTIVGMoBgih8TzFNKimlfMKDh5UZs9i25U2arV5AuzdyESc57r4WFr7enfdjH2bVoSuI2BSBWQ2pf8DenyE38Wg5ISE2mZI/4dSokU1EhYRAcPCxP0/2ODjYJnHS049uaWnHPk9Ng7TIQ1TrOImOTT/iov2rCNtoYK+tx4oatfg0dzDfLBhO4txasOfY4w0Kgpo1oVYtqF3bbnmPj38tIsLOb5ucbFdsytuSko59nv+1r76yny0svViwNE4odVRiol3IbdIkWLjQdoBp0+ZoD6b86ytkA9OA8cAMoAYbeJp7+Qe/EJVu2LsojFprciG8CtL139DmLvj4czvb65Ej9oT3zDPw8MMQVvgu92WRwbA75yeyN71AzfUridqVhQCmLmxvJXzXLIbJ4Zeyk4FcQnf6Ek5voPAD3yyNE5bGCaVOb+dOu7LoRx/BwYPQogUMHAhXXmmHVm+VLbzMq8ziU0bg4l4EY4I4sK43dReuQI4kwfJW8MN6m61q2NB2pb3kEl8fmv8xHkj4HTZMxGz8Bsk6RG54BGnNw4lucRjqwnoRfsAw2YQSn3IBzTb2ZXbXhwil8HfrNamklPI5g2FN5gLM88No+9o6glxAPeAeyO0WxO/NujGx7m0sCe7GOloR7Q4lJgWi90LoJshZBYd+gZ1rbILlrNUBWoO08dDxot+4ovN/ud41izY7DyHbwSTaybK3VI5mimsg27Y+QrinIZUrH9t7KP/zatVs8qrYpaXZiVG6dCnSx/ViwdI4odTJ7d17tAfTb7/ZUsGEUgAAIABJREFUBFO7dja51KePHamQNxx3PzARm2DawV6Gch8PMZXq+z0cWRhExV0eTPkaSOcRUONaePxpO+cS2LF2778PvXr56EhLv0T3SvbtHEqD9cupvMUNLjtyZE3LSnzSqjfTKt9PZy7gUkK4BLvgA9gbFLt2Qdu2RftejROWxgmlCiYryy7w/N//wqJFNm7UrWuTS1ddBc17JzA24g2m8z6PkclgICsrnCN/tKPWn38iO3JgSmXYlmQLvOMOO+lflSq+PCz/kLId4j4jZ914wlK24woJIaNxCBVbZkEsLA22cyPOSWtC4u9XkfJRX1Kn94LM8tSqBQkJRVuoVZNKSqlSxSxbSu6N1xK2cy+uCsAACGkFhAJ1wFVP2F2vNstqdWd+yCUsphtxtMZFKHWBZrlQOwnCsm2S3ngADxi3velh8m9uu7mApGjYWwXSoxLpFfoB16VNos+OjVTc7sbsAsmyQ9sSatciK/YqGjd7kKCqbXzZVNZjj8Frr8Grr8Kjjxb643qxYGmcUOrMEhLg229tgmnRIvtaaKhNLHXsCJ062Z/nnAMbwmxy6VtSuYKH+BcTaBifQ+piIWqXwZSvjnQaAQebw4MP29UeAIYMgdGjbSZekWkOsXnvfdRYP5WaG7OQTHCFw9JmsXzQaij76/wfF5sQ2iRCuY2wfSts2wZbt9pt2zbbo1XE9sgtynx8GicsjRNKFV5iIsyYYYfGzZple/9HRNjORxfdcIjd/d9hYdSbjOAIA4C09HAOL21NvVVrkbk5MCcIct22u/8778ANN+hcfMfLTYdN35EZ9ykRu+djgMyYEMq3duFqAgvCYEpueeYsu5iN/+uPmXUpJNSjZUu7MGu3bvZny5ZFvwGuSSWlVOlz+LC9KzF5MgBbBrdg34B0GifupfZBFwAmBKQ2UA9y6gnba9diQ2gblnM+c+jLAaoQhMfZchFyCSLnJFsuESTT1/0d1+35jeY70pDtwAFblcwK5UiO7U6VhkMoF3MFRJSiuyRr10KHDnZipiVLijSpoV4sWBonlCqchARYvBhWrIDly+3PvJ6iYWE2sdSpE7TvBDkXwm8Nc4gMfown+ICmCVk2ubTT4ImoRlDb/4NZafDqfyAnB6pWtcnywYPL5MWDwc2mpJGErX+fmA1JBCeDJwQ2NKrK583uY03u05SbH0b6HNi5CbZvt82WJ29YeP4h4Y0b2ymsNKlUdBonlDo72dmwYIFNME2daofLAbTvlUr1J8eR0fsV7g89yE1AbloIictaUn/OBoK+zoVtTiHXXGMncKpb10dHUUoYAwm/kbn2E4I3fUNYbhoZlUMIb+0iqBXMi4IJO5vw4xc3cvjH/lTcei7dOgf/nUTq2tWOpCgumlRSSpVOxsBbb8GIEXaCofbtYdw4ctrUY2/8JLLifyI6fjnV9x9EAE8weGpBSD3s0LlQINfZXKd4nLdlg0kAyQFPEKTUaUhY7EAqNLwBqrcrnRc1xsAFF8Cvv8J999nlN4pALxYsjRNKnR1jbHIjf5JpxQo79ApsoqlDH6j8QC6NL3yWYWFv0zIhnbTFELkTPBFVCapxB7y3HOYvsB/q0wcmTIDq1X12XN60O/0rUjf+mybrthOWCAbYExPBdw2v5vNN77HtwyokLbD7RkVBkyYnJo5KYgELjROWxgmlio8x9t5oXoLpjz/AhGUR/cgn1H/8eR6I2sttRpB0IeGPpsSM24JMdSNZQMVIePV1u9xcUcZq+bMju8iOG0/2uvFUTN5KTmgQNPcQ1hq214FPE2IZP+4OcmbcxWXtatOjx9FeSCXZVJpUUkqVbn/8AbfcAjt22OTOsGHwwgtQsaJ9PysZEn4jN/5ncuJ/JiJxPUHGc9oijQju0BDcoaG4Q8PwhIZhavUgquFtENMbylUq+eM6W599Brffbi+2Nm4s8u0GvViwNE4oVfyMscOvVqywC74tWGAfGwPlL3UzcOxohjd5hTZ7U0lfBBV2gis8mpCkvjBmDhw8ZO9GT5oEPXr4+nBKRHruFjZsuYOmcYupuMsNBlJqBDMjpgsj533A5k/bwi47X+1550HPnvZnSV8g5KdxwtI4oVTJOXAApk+3c/jNmp9D7k0TaDBqFA/V28ndRghPNyRMb0C9F3Yha528RM+u8OkEm2EPZFnJuLb8yOH1E6i6ax5BGNJjhAqtDelN4NuUWoyfeDOu30ZwVZfaXH65nabQm/fENamklCr90tPhuefgjTfsUK+6de246muvPfGMmZMK+5aBJxdCKkBoBQgtb3+GOD+Dw0pn76OCOnzYLr904ACMHw+33VbkovRiwdI4oZR3JCXZ5NLcuXbblOih/3/H8PR1o2i//zAZi6H8DsjOrEjY1xWRtfG2281rr8Hw4f597s5jXPy1/1kq//Uu9TakINmQXREW1m/M84tfZtF7N3JurE0g5W116viuuhonLI0TSnlHSortvTTpexczK31F9BPPMbzZFoaZICqlejjwejWqvXkQSQPCQ+C5h+DR0YHVayknDbZOJXXjV4TvmEmoO5f0SkJEa0NQK/hFKvPNnKvIXf0sl3dqRO/etgerr2hSSSnlP/78E+6+G5Yutc+vvtoml2JifFsvbxs61K6SdP759ursLC6y9GLB0jihlG/Ex8O8eTB3rsHVcRwP3fU0nZMPkPEHhG8BmQ6y0Nn5uv7w8SdQyQ96k55EQsavHFx7P63WryX0oMEEw8aGUby59y42TXuVi7oEc955djHPyEhf1/YojROWxgmlvC81FabOcDMm/ns29H2OoW3i+D8TRPVdHjKGRFB+XiYApkMk8trj0PN+CK/s41oXUW4m7JhJ7oavYdtUQl2ZpEUGEdLcQ3hz2F0ljOl/XoJryzNc3KErLVqUnvssmlRSSvkXtxs++ACeeMJGmgoV4Pnn4YEHincSidJq2TI7u15wMKxebfu3ngW9WLA0Tijle8bAxo2GZQkTaNLl33Q38XjWguszCBsPZEF2jWhyPv2MqMuv9HV1C+RAZhJr1j9C+11fU3V7JnjgcE3h++hOLF71P65pdw69e9tQVlppnLA0TijlW6npHl5YNY0JdZ7l+kareNQTTMwEN2YYSCoQBZ5bwgi67h/Q7l6oVfgFbLzOnQM7f8Zs/ArX5h8JzU0lLSKY7GZuqraAzDowL7497B3BBa1vIjKyiMuzlTBNKiml/FNCgh0K8d139vm558K4cXY960DldtuE0ooVdgLz0aPPuki9WLA0TihV+mS7N7DoyCgaRk4m9tcMzJ3Y1TlDYPM1TVnU5gVqd72WnueHlZqkjNsNK9a7WXPwc3q4RtJiyy6C0sEVAX80rsLP5lFujB1Bq+bBpeYO85lonLA0TihVOhgMU7JmM/LIaOrUmM/wHdBn8NFere7zggju58FdvyPBbe+FlrfY6S9Ki5xU2DUftk3FtfF7QnKSSCsXwqGmbuo1NwTHwKqM2qQk/pPu9R+mXFgxLtNWQjSppJTyb1On2sm7d+2yY6kfeMD2XPLlwOKSMnasPdb69WHdumIZH6EXC5bGCaVKM8NhZrAp/Xk6DF1C2GfOy50h6cYKfLzzHlZsvYdaLZrRpg2cc47txOmNRNOhQ/DHEpi9NZX6tcbQJ/JjWiVuJ+SQwQjsig1iTv0L6dniA1pGNS35CpUAjROWxgmlSp/tbOej3PHMyPqIl99PoO9TILngjhWO3F6R6KgUXGEVCW51G9LuXqh2dj38i8R4YP9q2DELz/ZZsGcRQSaX9JBQdjeB2i1yqRQL+0w421KupFP0SMKC2ni/nmdBk0pKKf+XlgbPPgtvvgkeD9SrZ3vxDBhgh4kFgn37oEULO3vh999D//7FUqxeLFgaJ5TyF9nsH387Ve+bRHCmgQZgbgGpBlvC6zB7/xUs+PMSFv5yAZHRNTnnHJtkyks2NWtW9JHSLhfExcHixfDLatgTuZaBHd+gX+gU6iYcQlxggiGxLiyLqUNQqyfpE3UXoYQWbxN4mcYJS+OEUqWXBw8LWcjPy/7DPbfPIGa9wYSAPAKJPatSZUsKoR4XmZExhNU7j+B6vaBuL6jaEqQEJvhOT4SdszHbZ+HeNoeQnP0A7IiuyJHGWdRsmEPNOpAdLMRld6RZuceIlGsB/5zKQ5NKSqnAsWqVncg779978+bw1FNw883+P9/SrbfChAnQrx9Mm1ZsM/PpxYKlcUIpP/PXX5gbrkU2bcMdBcH/BpqBJx6Ccu0uWyvWYYHrcmbF9WXBwgs4sLYGYWHQsqXdwsPtfQi32255j0/2My0d4jyZXHL5NO5s+T49UhYRdTgbAHcl2BQbxOqGLShX/yEuCRtARSr6rm2KmcYJS+OEUv7hSGYiux8bSOt35gGQeR5EjIP0rDAOxVenUnwqlTKOAJAeUoXsqj2p3LQXQfV7Qc1z7SrRBWUM5KZBxgFI2Q4755CzeRZhyasBSA6pQEJsKBWaHCE21gMVINmEsM3TmvrBA6jOPUCV4m4Cr9OkklIqsLjd8Omn8MILsGOHfa1pU5tcGjjQP5NLCxbARRfZK6C4OGjUqNiK1osFS+OEUn7oyBG46y6YNAmA/Q+1ZecrmTRN2kbl3W6IPzbJtKlKbRZX7MPy5CtYuKInue4QoioeISryCJHljxAVcYSocilEhh2hYuhhaoTuo3rQfqpwgNquXTTZt51gl4EQyKwHKxuGsi62O/WiH6G39KUc5XzYGCVH44SlcUIpP/PTT7juuI2QfQfIiBJWjDU0GgR1BUiBg/FRJCVUJzIhgzqH9wGQRQQJId2QqudRsWkncGXhSj2AyTiIyThIUPYBgrMPEuI6SJjrAOHmIMHk/P2VLhPMzirReFpkEtM4nXI1IAdYZaqTFXQhHbibilyIv/ZIOhVNKimlAlNuru3Z8+KLsHWrfa1JE3jySRg0CEL9ZDhCTg60bw/r18OoUfD008VavF4sWBonlPJTxsC778Ijj9jz/nnnwaSvyaidwG7G4/H8TP3ELUQ6SSZ3PATnFrDsICAMKGe3pHrwW2x5dtS7gg6hw+hBT4IJkCHWp6FxwtI4oZQfOnjQ3nz48UcA/ry1Le9+EEb58qvphYuLgKoA6bA7vgqH9tQmMiGLhvu3E2w8xxSVHFyJ1NAo0stFkBMheCq4kQo5hFbIJDwym2pR6VSs64EwWGtCWUsrqsqNXMB9hAVAb6TT0aSSUiqwuVzwxRe259KWLfa1Ro3g3/+G224r/cml0aPh8cdtb6s1a2xvpWKkFwuWxgml/NzixXDDDbBnD9SqZXsv9erlvOkml+XsZQJuz2xqJ24hfK/n76RRThgcKRfKkbAwUsIiSCkXSWpYJdKDK5MtVcilKobadOF62tIWwU+WbSsmGicsjRNK+Slj4JNP7GI+GRnQti3Z33/JssZJLGAeCUynCqs4j1zOByoA7mzYfaAaYWE5RJdPI6K8EzPycQGJwD5gvwljv1RiPz3pxL1cSJ8yFSs0qaSUKhtcLvjyS5tc2rTJvhYba5NLt98OYYUYP+0tO3dCq1Y2AM6eDZdeWuxfoRcLlsYJpQJAYqKdQ2/BArtIw+uvw/DhJ5mDLgeIw1461AKioAz98V9YGicsjRNK+bm1a+G662DzZqhUCT7/HK66CoAccljBCn7hZ/YzjSqs4hxySQaSCCGbaAy1CKUu5WlEZVpQgxbEEEs96hFBhG+Pzcc0qaSUKlvcbvj6a3j+ediwwb4WE2PnXBo8uHT1XLr2Wpg82a5i99VXJfIVerFgaZxQKkC4XPZmwWuv2ecDBsBHH0FkpG/r5cc0TlgaJ5QKACkp9mby5Mn2+VNP2RWkj1stOpdctrGNalSjClXKVK+joihonCiBdfaUUsoHgoPthN1r19qeS61awa5ddtW4Vq1g4kS71I+vTZ1qA15UFLzxhq9ro5RS/iEkBF59Fb791iaSvv4aunaFjRt9XTOllFK+VqkSfP89vPwyBAXZEQz9+sGhQ8fsFkoozWlOVapqQqkYaVJJKRVYgoPtMIm//rLJpWbN7JxLgwZBu3Z2Qj9f9dDMyIAHH7SPR42COnV8Uw+llPJX118Py5ZBy5awbh107mwvJJRSSpVtQUF2vtLZs6FaNfvz3HNBeyKWOK8llUTkMhHZKCJbROTxk7x/voisFBGXiNzgrXoppQJUUJBNLsXFwf/+Z4fCrV0L/fvbu9uzZ3s/ufTSS7BjB7RtC8OGefe7lVIqULRoAUuWwI03QmqqTTQ99pgdIqeUUqpsu/hiWLkSunSxoxZ69rTDpVWJ8UpSSUSCgbHA5UAr4BYRaXXcbruAwcBEb9RJKVVGhITAkCF2Eu933oGaNe1d7r594cIL4bffvFOPDRvs0A2A99+39VJKKVU0UVF2CNx//mN7qL76KvTpA/v3+7pmSimlfK1+ffjlF7jvPsjJgbvugn/+EzIzfV2zgOStnkpdgC3GmG3GmBzgK+Ca/DsYY3YYY9YApWDSE6VUwClXzvYO2rYNRo+GKlVssOnVCy6/HFasKLnvTkyEq6+G3Fyb4OrRo+S+SymlygoRePhhmDfP3jCYP98OdZg/39c1U0op5WvlysF778H48RAeDh9/DOedZ0cNqGLlraRSXWB3vufxzmuFJiJ3i8hyEVl+4MCBYqmcUqoMKV8eRoywyaWRI+2Erz/9BJ062SEUu3YV7/clJ8Nll9llTtu318m5lVKquJ1/vh3q0KMHJCRA7952FSDttaSUUuq222DxYmjUyMaKc8+1PV19NcdqAPJWUulkU6sX6f+iMWacMaaTMaZT9erVz7JaSqkyq1Ilu9To9u3w6KP2Dsb330O3brBmTfF8R0YGXHUVrF4NTZva5FWlSsVTtlJKqaPq1LE9lEaNsnenP/vMzr304YelY+VPpZRSvtO+vZ2w+8or4fBhO+/qFVdor6Vi4q2kUjxQP9/zesAeL323UkqdWrVq8NprtufShRfC3r32rvevv55dubm5dhLZ336DunVhzhw7PEMppVTJCAuDp5+2izJceqm9cLj7bjvM+a+/fF27M8vMtDchUlN9XROllAo80dEwebK92VC5MsycCa1bw+uv60IPZ8lbSaVlQFMRaSgiYcDNwBQvfbdSSp1Z7do2uNxwA6Sk2AuSH38sWlkejx16MWMGVK1qE0oNGhRvfZVSSp1ckyYwaxZ8+aVN5i9aZIc7PPYYpKf7unYnMga++872rOrQwfZobdkSbr0V3nrL3pxIS/N1LZVSyv8FBcGdd8L69ba3UkYG/Otf0LmzXchHFYlXkkrGGBcwDJgFrAcmGWPiRGSUiFwNICKdRSQeuBH4r4jEeaNuSin1t/Bw+Ooru1JEdradY+nDDwtXhjHwwAP2YiZvvqaWLUumvkoppU5OxF4wbNgAQ4eC221XiGvdGqZN83Xtjlq/3q5ad8MNdk6/2rXt6qAbNsCECfB//2d7WlWsCK1a2blB3n4bfv+9dCbIlFLKH9SqZf9WnzEDYmNtL9Fu3WD4cO0tWgRi/HiCqk6dOpnly5f7uhpKqUBjDDz/vJ3IG+zjJ5+0Fyln8vTT8MILdk6PmTPhootKtq6nICIrjDGdfPLlpYjGCaUUAEuXwj332AsHgP79YcwYqFfPN/VJTbXzP731lh12UaUKvPSSvYPuctnheitW2DlAVqywz48fnhEUZHs3vfWW7V1bSP4YJ0TkMuBtIBj4yBjzynHvnw+8BbQFbjbGfHumMjVOKFXGpafDc8/ZxXTcbhsX3n0XrrnmzJ/1towMOw9UTo7dcnNP/zPv8a232iF/hVTQOKFJJaWUOpX//tfe4fZ44P777d3h4OBT7//GG/DII3af777zaTDSiwVL44RS6m8ul71QePppO5wsMtImdoYOtTcCvMEYmDjRDrfYu9ferLj7bnjxRTtc+lSysmxiKS/JtHw5xMXZY1q82N5hLyR/ixMiEgxsAi7Fzte6DLjFGLMu3z6xQEXgUWCKxgmlVIGtXm3Px3nD4Pr3h3fesXOj+srhw7Zn6q+/wi+/2PN/bm7hy9mwAZo3L/THChonQgpfI6WUKiPuuQeqV4eBA2HsWLs89eefn/zi45NPbEIJ4H//K513N0ox52JhLPkuFkRkSv6LBWAXMBh7saCUUoUTEgIPPWSHmg0fblf8fPhhm9D5xz9gyBBo27bkvn/NGhg27OhCEF272tjSseOZPxsebuf86Nz56GtZWbbMkqxz6dIF2GKM2QYgIl8B1wB/xwljzA7nPV3yTylVOO3b2yT92LF2hMIPP8DPP9sYMWQIVKhQ8nXYu/doAunXX+3NhPydgETsvIHly0NoqF2gIizs6OOTvRYWZodQlyBNKiml1Olcd52d8PXqq+Gbb+DQIRtk8p+cf/jBDlkAOwzh9tt9U1f/phcLSinvqFfP9iadNg2eegr+/NP2RH37bZvgGTIEbrnFrhRUHJKT4Zln7IWKx2NvVrz6qp0fKegspjcND4cuXYqnjv6hLrA73/N4oGtRChKRu4G7AWJiYs6+ZkqpwBAcDA8+aHspPfCAXS3uwQdt79KLL4arrrJbcfReMgY2bz62J9LWrcfuExZmbyacf76dX69HD7uYQymjSSWllDqTCy6wJ/rLLoN58+DCC+18STVrwty5djJYj8deNAwf7uva+iu9WFBKedeVV8IVV8CqVfDxx/DFF3ZowYoVtgfTddfZBFPv3kVL/qSk2OTV44/DgQP2YmX4cHj22SLNbaE42cSGRZrHwxgzDhgHdvjb2VRKKRWA6te3q0D/8AOMHm3n5Zsxw2733WdXFL36aru1b1+weVeTk205f/xhtyVLICnp2H0iI23iqFcvm0jq3BkiIkrmGIuRJpWUUqog2rWzy1L36WMvQHr2tJOq/vOfdhK8YcPshYIqKr1YUEp5n4i9ODj3XHj9dXsR8fHHdsjDl1/aLSYG7rgDBg+2qwTlSU62E6bmbTt3Hvs8Ofnovuefb+fmKDtD1UpCPFA/3/N6wB4f1UUpVRb072+3xESYPh2mTIHZs2HlSrs9+6zt/ZrXg+mii2wvUrcb1q07mkBavNiu9nm8WrWge3ebROrVyyaoQvwvReN/NVZKKV9p2NB2Ub3iCjtJ6oAB9vVBg+ywiYLcpVCnohcLSinfCg+3PU9vvtkmiMaPt/Pl7dhhVwZ67jk7IXZGhn0/JeX05ZUvD82awYgRtkyNEWdrGdBURBoCCcDNwEDfVkkpVSbUrGl7rg4ZApmZduTClCkwdSrEx8P779utQgU45xxYu9YuCJFfWJi9gdGtm00kdetme0QFQGzQ1d+UUqqwUlPh+uthzhw7fOL77+1keKWIH67qE4Jd1edi7MXCMmCgMSbuJPt+CkzTVX2UUiXO44EFC2zvpe++s5Nj56lQwfZcytsaNDj2ebVqpfpiwd/iBICI9MOuAhoMfGyMeVFERgHLjTFTRKQz8AMQDWQB+4wxrU9XpsYJpVSReTy2x9LUqTbJtHr10fcaNDiaPOrWzfZC8tZKo8WkoHFCk0pKKVUUubl23o1OnUplN1W9WLA0Tiilik1ysh3CUL26TRpVrVqqk0Zn4o9xoiRonFBKFZtdu2DDBttbqXZtX9fmrBU0TpS+KyGllPIHoaH2roMqNsaYGcCM4157Jt/jZdhhcUop5X2VK8Pll/u6FkoppUqrmBi7lTFnsY6pUkoppZRSSimllCqrNKmklFJKKaWUUkoppQpNk0pKKaWUUkoppZRSqtA0qaSUUkoppZRSSimlCk2TSkoppZRSSimllFKq0DSppJRSSimllFJKKaUKTZNKSimllFJKKaWUUqrQNKmklFJKKaWUUkoppQpNjDG+rkORicgBYKev63ES1YCDvq5EKabtc3raPqen7XN6ee3TwBhT3deV8bVSHCcKQn/XT6RtciJtkxNpm5zoZG2icYJCxYlA+L3y92Pw9/qDHkNp4O/1B+8dQ4HihF8nlUorEVlujOnk63qUVto+p6ftc3raPqen7RM49P/libRNTqRtciJtkxNpm5y9QGhDfz8Gf68/6DGUBv5efyh9x6DD35RSSimllFJKKaVUoWlSSSmllFJKKaWUUkoVmiaVSsY4X1eglNP2OT1tn9PT9jk9bZ/Aof8vT6RtciJtkxNpm5xI2+TsBUIb+vsx+Hv9QY+hNPD3+kMpOwadU0kppZRSSimllFJKFZr2VFJKKaWUUkoppZRShaZJJaWUUkoppZRSSilVaJpUKiIRqSIic0Rks/Mz+hT7/SQiySIy7bjXPxWR7SKy2tnae6fm3lEM7dNQRJY4n/9aRMK8U3PvKET73O7ss1lEbs/3+gIR2Zjv96eG92pfMkTkMueYtojI4yd5v5zzu7DF+d2IzffeE87rG0Wkrzfr7S1FbR8RiRWRzHy/Kx94u+6qYApyXhCRBiKywvl/GSci9/qirt5SwDZpLyKLnfZYIyIDfFFXbznb+BooziZmBKoCtMn5IrJSRFwicoMv6uhPCvFvzZ0vxk7xdj1Pp6DH4OxbUUQSRORdb9bxdAIhLgZCHPPXuBMIccJfzuuaVCq6x4G5xpimwFzn+cm8Btx6ivf+ZYxp72yrS6KSPnS27TMaeNP5/GHgnyVSS985Y/uISBVgJNAV6AKMPO4kPijf789+b1S6pIhIMDAWuBxoBdwiIq2O2+2fwGFjTBPgTezvCM5+NwOtgcuA95zyAsbZtI9ja77flVL1x5Y6RkHOm3uBHsaY9thzw+MiUseLdfS2grRJBnCbMSbvHPCWiFT2Yh29rTj+/vBrxXBODDgFbJNdwGBgondr57cK+m8tM1+Mvdp71SuQgh4DwPPAQq/UquACIS4GQhzzu7gTCHHCn87rmlQqumuA8c7j8cC1J9vJGDMXSPVWpUqRIrePiAjQG/j2TJ/3YwVpn77AHGNMkjHmMDAHG2gCURdgizFmmzEmB/gK20b55W+zb4GLnd+Va4DvaDjwAAAgAElEQVSvjDHZxpjtwBanvEByNu2j/McZzwvGmBxjTLbztByBH8cL0iabjDGbncd7gP1Ada/V0Pv07w89J57MGdvEGLPDGLMG8Piign6oQP/WSrkCHYOIdARqArO9VK+CCoS4GAhxzB/jTiDECb85r5e2f3T+pKYxZi+A87Mow49edLo4viki5Yq3ej53Nu1TFUg2xric5/FA3WKun68VpH3qArvzPT++HT5xuvo+XcpOgEVxpmM9Zh/ndyMF+7tSkM/6u7NpH4CGIrJKRBaKSK+SrqwqsgKdN0Wkvoiswf7/Hu38ARqoChVLRKQLEAZs9ULdfKU4/v7wd2d7TgxEZSEWeltB/62Fi8hyEflDREpb4umMxyAiQcB/gH95uW4FEQhxMRDimD/GnUCIE35zXg/xdQVKMxH5Gah1kreeLIbinwD2YU8a44DHgFHFUK7XlGD7nCxBYs6yTK8rhvY5XTsMMsYkiEgU8B22q+lnha9lqVGQ/+en2icgfl/O4GzaZy8QY4w55NyJ/FFEWhtjjhR3JdWZFcd50xizG2jrdO//UUS+NcYkFlcdva24YomI1AY+B243xvh1T4wS/vsjEJzNOTFQlbXjLRbF9G8txhizR0QaAfNE5C9jjNcSAsVwDEOBGcaY3b64RxkIcTEQ4lgAxp1AiBOlvX5/06TSaRhjLjnVeyKSKCK1jTF7nRNAoea0ycv2Atki8gnw6FlU1SdKsH0OApVFJMTJGtcDStMdhwIphvaJBy7M97wesMApO8H5mSoiE7HdI/05qRQP1M/3/GT/z/P2iReREKASkFTAz/q7IrePMcYA2QDGmBUishVoBiwv8VqrExTnedO5iIkDenF0uLDfKY42EZGKwHTgKWPMHyVUVa8pyb8/AsTZxIxAVRZiYbErjn9reb1ijDHbRGQB0AEv9jIphmPoDvQSkaFAJBAmImnGmNPNv1RsAiEuBkIcC8C4Ewhxwm/O6zr8reimAHmrcd0OTC7Mh51/kHnzB10LrC3W2vlekdvHuQieD+TNYF/o9vUDBWmfWUAfEYl2JujuA8wSkRARqQYgIqHAlfj/788yoKnYVf/CsBNvH7+CSv42uwGY5/yuTAFuFruCQ0OgKbDUS/X2liK3j4hUdyb6w7mL2hTY5qV6q8I543lBROqJSITzOBroCWz0Wg29ryBtEgb8AHxmjPnGi3XzlbP6+yNAnE3MCFQFaRNVOAU5/0TnTWHh/G3WE1jntRqe2RmPwRgzyBgTY4yJxd7k/sxbCaUCCIS4GAhxzB/jTiDECf85rxtjdCvChh1vORfY7Pys4rzeCfgo336/AgeATGy2sa/z+jzgL2wyYAIQ6etjKmXt0wibGNgCfAOU8/Ux+ah9hjhtsAW4w3mtArACWAPEAW8Dwb4+pmJok37AJuzdvSed10YBVzuPw53fhS3O70ajfJ990vncRuByXx9LaWof4Hrn9+RPYCVwla+PRbdT/j8+43kBuNT5t/+n8/NuX9e7FLTJP4BcYHW+rb2v6+7LNnGenzS+Bsp2NjEjULcCtEln53chHTgExPm6zqV5K+D5pwf27/k/nZ//9HW9C3sMx+0/GHjX1/Uu5P+DUh0XAyGO+WvcCYQ44S/ndXEqo5RSSimllFJKKaVUgenwN6WUUkoppZRSSilVaJpUUkoppZRSSimllFKFpkklpZRSSimllFJKKVVomlRSSimllFJKKaWUUoWmSSWllFJKKaWUUkopVWiaVFKllojsEJFLfF0PpZRSpZPGCaWUUqeiMUIp79CkklJKKaWUUkoppZQqNE0qKaWUUkoppZRSSqlC06SS8gsiUk5E3hKRPc72loiUy/f+CBHZ67x3p4gYEWlyirIWiMgLIrJIRNJEZKqIVBWRL0TkiIgsE5HYfPv3cF5LcX72OK6s50XkdxFJFZHZIlIt3/vdnO9JFpE/ReRC5/UbRWTFcfV6RER+dB5/KiJjRWS6U+4SEWmcb98WIjJHRJJEZKOI3JTvvX4iss75XIKIPOq8Xk1Epjl1SRKRX0VEzwFKqYCgcULjhFJKnYrGCI0RqgQZY3TTrVRuwA7gEufxKOAPoAZQHVgEPO+8dxmwD2gNlAc+BwzQ5BTlLgC2AI2BSsA6YBNwCRACfAZ84uxbBTgM3Oq8d4vzvGq+srYCzYAI5/krznt1gUNAP2wC91LneXWgHJAEtMxXr1XA9c7jT533uzjf+wXwlfNeBWA3cIfz3rnAQaC18/5eoJfzOBo413n8MvABEOpsvQDx9f9n3XTTTbeibhonNE7opptuup1q0xihMUI372yaWVT+YhAwyhiz3xhzAHgOe3IGuAl74o4zxmQ4753JJ8aYrcaYFGAmsNUY87MxxgV8A3Rw9rsC2GyM+dwY4zLGfAlsAK46rqxNxphMYBLQ3nn9H8AMY8wMY4zHGDMHWA70M8ZkA187+yAirYFYYFq+cr83xix16vRFvnKvBHYYYz5x6rQS+A64wXk/F2glIhWNMYed9/Nerw00MMbkGmN+NcaYArSVUkr5A40TGieUUupUNEZojFAlRJNKyl/UAXbme77TeS3vvd353sv/+FQS8z3OPMnzyFN8b9531833fF++xxn5PtsAuNHpIposIsnAediTMcB4YKCICDaoTXICREHK7XpcuYOAWs7712PvaOwUkYUi0t15/TXsXZXZIrJNRB5HKaUCh8YJjRNKKXUqGiM0RqgSEuLrCihVQHuwJ8A453mM8xrYLpr18u1bvwS+N78Y4KcCfHY38Lkx5q6TvWmM+UNEcrBdRwc6W0HsBhYaYy49RbnLgGtEJBQYhr3jUd8Ykwo8Ajzi3M2YLyLLjDFzC/i9SilVmmmcOLZcjRNKKXWUxohjy9UYoYqN9lRS/uJL4CkRqe5MXvcMMMF5bxJwh4i0FJHyznvFZQbQTEQGikiIiAwAWnFs19JTmQBcJSJ9RSRYRMJF5EIRyR+0PgPeBVzGmN8KWKdpTp1uFZFQZ+vsHH+YiAwSkUrGmFzgCOAGEJErRaSJczcj73V3Ab9TKaVKO40TR2mcUEqpY2mMOEpjhCpWmlRS/uIF7BjiNcBfwErnNYwxM4ExwHxsl8zFzmeyTyymcIwxh7Djjh/BTow3ArjSGHOwAJ/dDVwD/Bs4gL0r8C+O/Xf3OdDG+VnQOqUCfYCbsXc/9gGjsRP2ge3+ukNEjgD34oy1BpoCPwNp2DZ6zxizoKDfq5RSpZzGiaPlapxQSqljaYw4Wq7GCFWsROfWUoFGRFoCa4FyzsR0pZaIRAD7sasqbPZ1fZRSqizQOKGUUupUNEYoVTjaU0kFBBHp73TXjMZm2qeW9iDguA9YVpAgICKfisgLXqiTUkoFHI0TSimlTqUsxAjQOKFKhk7UrfySiKTle1oeMBxNki4Hri5gOTdjlw2the3iOhN4wBhzpPhqe8rv3gEIcO1J3hsM3GmMOa+k66GUUoGouOKEU1Yj7NCIC7Cx4mNjzIhiqurpvncHGieUUqpEHBcnKuR77AaGF6KccsArwAAgAjt/03BnTqISc7oY4bw/GI0Tygu0p5LyS8aYyLwN2AX0NcaIs3U2xuwtYFG/Az2NMZWARthEq1ey98aYWGNMA2PMKm98n1JKlSXFFSdEJAyYA8zD3oCox9HJXUuUxgmllCo5x8WJncClTowIMcaMLURRjwOdsHMbNQPOBZ4q/hofS2OEKi00qaTKNGPM7uMmynMDTU61v4gYERkqIptFJFVEnheRxiKyWESOiMgk5wIkb/+7RGSLiCSJyBQRqXNcWfc6ZR0WkbFitQQ+ALqLSJqIJOerQrSITHe+e4mINHbKEhF5U0T2i0iKiKwRkTbF1lBKKVV2DQb2GGPeMMakG2OyjDFrTrWzxgmllCpzrgLGGGOSjDEHsD1bh5xqZ40TKtBoUkkFJLHLdiafZovJt+95IpICpALXA2+dofjLgI5AN+wKDuOAQUB97B2KW5xyewMvAzcBtbF3QL46rqwrgc5AO2e/vsaY9diVFhY7d08q59v/FuxwvWjs6hQvOq/3Ac7H3h2pjO1+e6gATaWUUmVSIeJEN+wqODNF5KCILBCRc85QvMYJpZTyc4WIE+Js5HteT0QqnaZ4jRMqYGhSSQUkY8xEY0zl02y78u37mzP8rR7wGrDjDMWPNsYcMcbEYVeGmG2M2WaMScHOydTB2W8Qdt6NlcaYbOAJ7N2C2HxlvWKMSXbqMx9of4bv/t4Ys9SZOPCLfPvnAlFAC+yqjusLMQRQKaXKnELEiXrYZZfHAHWA6cDk/HeRT0LjhFJK+blCxImZwHARqS4itYAHndfLn6Z4jRMqYGhSSSmHMSYB+IkTs//HS8z3OPMkzyOdx3WwdxPyyk/DZvvr5tt/X77HGfk+eyon3d8YMw94FxgLJIrIOBGpeIaylFJKnVkm8JsxZqYxJgd4HagKtDzNZzROKKVU2fEisApYDSwCfsQmaPaf5jMaJ1TA0KSSCkgiMsgZP3yqLeYUHw0BGhdTNfYADfLVqQL2QiShAJ81hf0yY8wYY0xHoDW22+q/CluGUkqVFYWIE2sowjm5gDROKKVUKVXQOGGMyTTGDDPG1DXGNMImfVYYY9zFUA2NE6rU06SSCkjGmC/yr+hwkm0X/B0sYpyJ6Rpg7zTMLaZqTPx/9u47rOryfeD4++EwVdwjlEpEKzQTw9ziQFTEiYr6rTRnbk3DUea2NG2ZZqmVmubEvXCg4sh+SdkwKyypQBy5U5w8vz8eUoRjOeCcA9yv6/K6OOc8n4f7Q98vN5/7WUAXpZS/MkeNvg58qbWOv4trj2PWYv/b8oqblFLPKKWqKaVcgIvAZcym40IIIay42zyBOemtulKqoVLKAgwC/gIOZUIYkieEEMJB3cPzRCmlVMnU54nqwGvA6EwKQ/KEcHhSVBK5XXnMNNW/gT3Az0CPzOhYa70Nk1QigSTMDKgOd3l5NHAQOKaU+uu/GgP5gdnAGcwU2VOYJRpCCCEegNb6Z+A5zCk6Z4CWQIvUpXAP2rfkCSGEyP58Mc8TF4F5wHCt9ebM6FjyhMgOlNZZNaNbCCGEEEIIIYQQQuRUMlNJCCGEEEIIIYQQQtwzKSoJIYQQQgghhBBCiHsmRSUhhBBCCCGEEEIIcc+kqCSEEEIIIYQQQggh7pmzvQN4EEWLFtWlS5e2dxhCCOFwYmNj/9JaF7N3HPYmeUIIIayTPGFInhBCCOvuNk9k66JS6dKl2b9/v73DEEIIh6OU+t3eMTgCyRNCCGGd5AlD8oQQQlh3t3lClr8JIYQQQgghhBBCiHsmRSUhhBBCCCGEEEIIcc+kqCSEEEIIIYQQQggh7lm23lNJCJH7XLt2jYSEBC5fvmzvUByCu7s73t7euLi42DsUIYRwCJInbid5QgghbpEckdGD5gkpKgkhspWEhAQ8PT0pXbo0Sil7h2NXWmtOnTpFQkICPj4+9g5HCCEcguSJWyRPCCHE7SRH3C4z8oQsfxNCZCuXL1+mSJEikgQApRRFihSRkRYhhEhD8sQtkieEEOJ2kiNulxl5wmZFJaVUE6XUz0qpw0qp4XdoE66U+lEpdVAp9bmtYhNCZC+SBG6Rn4UQQmQkvxtvkZ+FEELcTn4v3u5Bfx42Wf6mlLIAM4BgIAH4Sim1Rmv9Y5o25YARQC2t9RmlVHFbxCaEEEIIIYQQQggh7p2tZipVBQ5rrX/TWl8FFgMt07XpAczQWp8B0FqfyLpwJgCTsq57IYSw4tSpU9SvX598+fLRr1+/2z6LjY2lYsWKlC1blgEDBqC1tlOU9uMwM1q1hq194cfPsqR7IYSwRnJENnLjKqxtD/Fb7B2JECIXcdQ8YauiUingzzSvE1LfS+sx4DGl1B6l1D6lVBNrHSmleiql9iul9p88efI+QkkBDmEmRY0CJCkLIWzD3d2d8ePHM3Xq1Ayf9e7dm1mzZhEXF0dcXBybNm2yQ4T2k2ZGawhQHuiolCqfrk3aGa0VgEFZEsyNK3DmJ9jYGb7/JEu+hRBCpCc5Ihu5+rfJE6uaQ3yUvaMRQuQSjponbFVUsrZIL301xxkoB9QDOgJzlFIFM1yk9SytdRWtdZVixYrdRyhOwHygKzAeiLASihBC3Fl8fDx+fn706NGDChUq0KhRI5KTk//zurx581K7dm3c3d1vez8pKYnz589To0YNlFJ06tSJVatWZVX4jspxZrQ6u0OrtfBoMGzuBt/NypJvI4TImSRH5AIehaFdNBT2g1Ut4chGe0ckhMhGclqesMmeSpiZSQ+nee0NHLXSZp/W+hpwRCn1M6bI9FXmh2MBZgN5gLeAZOB95DA8IbKXQYPgwIHM7dPfH95997/bxcXFsWjRImbPnk14eDiRkZEkJSWxcOHCDG0DAwOZNm3aHftKTEzE29v75mtvb28SExPvK/5szNqM1mrp2jwGoJTag/lFPkZrnWEYRinVE+gJ8Mgjj9xfNC55oNVqWNMGtrwIN65B5b7315cQwm7slSckR+QCHkWg3TZYHgyrW0HzSPBtZu+ohBD3QJ4lMoetikpfAeWUUj5AItAB+F+6NqswM5TmKqWKYh4efsu6kJyAaYAHMAW4BMzBPKcIIcS/8/Hxwd/fH4CAgADi4+MZOXIkERER99yXtTXPufBUinud0eoN7FJKPam1PnvbRVrPAmYBVKlS5f6nojq7Q4sVsC4covtByjUIyJoVd0KInEVyRC7hURjabYXljWBNGDRfDmVb2DsqIUQ2kJPyhE2KSlrr60qpfkAUpmrzidb6oFJqHLBfa70m9bNGSqkfgRtAhNb6VNZGpoDJQF5gDGbG0meAS9Z+WyFEpribUYCs4ubmdvNri8VCcnIyU6ZMua/RBW9vbxISEm6+TkhIoGTJkpkbsONzqBmt8fFQvDjkyeMGzZfB+o6w4yVTWHrm3pO9EMI+7JUnJEfkIu6FoO0WiGwEa9tCs6VQrpW9oxJC3AV5lsgctpqphNZ6A7Ah3Xuj0nytgcGp/2xIAaMxM5aGAZeBJYDbv10khBAZRERE3NfogpeXF56enuzbt49q1aoxf/58+vfvnwUROjSHmdGanAzVRkXhc+0xts72IV8+VwhdDBueg5ihprBU7ZXM/rZCiBxOckQO5l4wtbDUGNa1g2ZLoFyYvaMSQmQz2TVP2Kyo5PiGYvZY6o/ZG3ZF6mshhMg8pUuX5vz581y9epVVq1axefNmypcvz8yZM3nhhRdITk4mJCSEkJAQe4dqUw41o9UjmasfduHL007U6b6NHR89ToECLhC6EJycYferkHIdaoz6776EEOIeSI7IxtwKQJvNENkE1oZDs8XwWFt7RyWEyGEcMU8oa+vvsosqVaro/fv33/uFp34CiysULGPlw48xBwzVBdYAng8UoxAicx06dAg/Pz97h+FQrP1MlFKxWusqdgrJYdxvnujDd8y7GsylM1B+wFZiPqhIkSJAyg1zItzBeVB9JNQcB7K3iRAORfJERjklTyilmgDvYQYf5mitJ1lpE47Z10ID32qt0896vc19P0/cydULEBkCSfug6UJ4on3m9S2EeGCSI6x7kDyR+4470ymwvj0srg1/HbTSoBuwANgFNAbOWmkjhBAip0oGvuQprrruJF8hZ378oB7V+uzn+HHAyQKNP4GK3WHfBNg1ArLx4IwQQmQXSikLMAMIAcoDHZVS5dO1KQeMAGpprSsAtj9dwdUT2myEkjVhw//g0CKbhyCEELaU+4pKysmMGgAsCYSkL600+h+wFNgPBAF/2Sw8IYQQ9uUB7OBbgijBRdddFPDMz6+zgnhm0B4SEjB5JPgjqNQbvpoMO4dIYUkIIbJeVeCw1vo3rfVVYDFmz4q0egAztNZnALTWJ7IiEI3ZkdXa8DRgCkthG6BUHdj4HPy4ICvCEEIIh5D7ikoARZ+EDrvBrSAsC4Lft1lpFIbZE/ZHoD5wzKYhCiGEsJdkPAlhPfUIIy/nXGMo5PEQf85pxDPDojlyBFNYCpoBlftD7DuwfaAUloQQImuVAv5M8zoh9b20HgMeU0rtUUrtS10ul4FSqqdSar9Sav/JkyfvOZATwGzMZhlf36mRaz4IWw/edWFjJzg4/56/jxBCZAe5s6gEZj+lDruhgA+sbApxq6w0agqsB45g0kaClTZCCCFyFg9gHhYOs5RAuqA547qTIi5lOPZxU54ZvYFffsHspVT/PQgYDN+8D9v6mCXWQgghsoK1DezSV/OdgXJAPcxpoXOUUgUzXKT1LK11Fa11lWLFit1zICUwG2Xkwww9771TQ5e80HodPNIANr0AP8y95+8lhBCOLvcWlQDyeUH4TiheGda2vcMIQgPMgUPHgEBMgUkIIUTOFgxsxoljfEwdBnCBU647KGqpwKk5rag6OZIffsAUlupOharD4dsPYXNPKSwJIUTWSAAeTvPaGzhqpc1qrfU1rfUR4GdMkSnTrZwKn8WbAlMwYG3dAwAueaDVWni0IUR1he8/zopwhBDCbnJ3UQnAozC03QoP14NNneHr9600qoVJFWcxhaVfbBmhEEIIu6gF7EBxiXepw0iO8pfLNoqpKpz7qD01Zizg668xhaXar0P11+CHj2FTF3NKnBBCiMz0FVBOKeWjlHIFOmCOak5rFWbyEEqpopjlcL9ldiDHjsGkSRBeA2b9BGWAUGDdnS5w8YCWq6F0Y9jcHb6bldkhCSGE3UhRCcya59broGwr2D4AvhhnZW+MKsAO4AqmsPSDraMUQmRzW7ZsISAggIoVKxIQEEB0dPTNz2JjY6lYsSJly5ZlwIABaNmfx0FUBmJQODOOukzhF066bKY4gfw9oxO1581i3z5MYanWOKg5Fn6cb/bPSLlu7+CFENmI5Ih/p7W+DvTDLCE4BCzVWh9USo1TSrVIbRYFnFJK/QhsByK01qcyO5aHHoKYGHBygja14O2voSLQGnPUj1UuHtByJfg0hS0vwoGZmR2WECKHc9Q8IUWlfzi7Q/NlUKEz7B0NO16ysoThKSAGsGCWan9j6yiFENlY0aJFWbt2Ld9//z3z5s3j+eefv/lZ7969mTVrFnFxccTFxbFp0yY7Ripu5wfsQlGIIQTxEfs54byeEilNSH7vRequeI+dO1Ob1hhlZi399DmsfxZuXLNn4EKIbERyxH/TWm/QWj+mtfbVWk9MfW+U1npN6tdaaz1Ya11ea11Ra704q2J5vPwNdu+GggUhrC6M2QXVMRs5zb3TRc7u0GIFlGlu9uH7ZnpWhSeEyIEcNU/kyqLSamLZbu0QUCdnaPwJPD0Qvn4PorpZGWl+AlNYyouZXbsvy+MVQjiW+Ph4/Pz86NGjBxUqVKBRo0YkJyf/53WVK1emZMmSAFSoUIHLly9z5coVkpKSOH/+PDVq1EApRadOnVi1ytrhAcJ+fDCFpUfoSQiL2c5x55WUuNGaq28OomH060RFpTatNsLss/TLUljfAW5ctWfgQggbkxyR853nPDWpyXafT9i1Cx55BNo2gkGbIAjoAsy408XObtBiuVkhEd0fYt+1XeBCCIeQ0/KEs82+k4O4Tgod6MZVfmcB6+hIrdsbKCeo9w64FYIvxsCVcxC6yCSAm3wxZz40wGzNtx6zJE4IYUuDgAOZ3Kc/cDd/3sXFxbFo0SJmz55NeHg4kZGRJCUlsXDhwgxtAwMDmTZt2m3vRUZGUrlyZdzc3EhMTMTb2/vmZ97e3iQmJj7gnYjMVxLYCTShPS3xZCGhlqWUuNGZ42Nfpekbl1hxZTwtWyioMsQMVGwfBGvbQbOl6fKIEMIW7JUnJEfkbBYsFKYw3ejGlJKn2bnzZZo0gQ7N4dPPIU87s07vb2CY1Q5cTV5Y3yF1dcQNkzeEEDYlzxKZI9cVlZxxYjEraUNj/kdDjrKUITS/vZFSUHM0uBc0DwQrm5k10K750jR6BDNjqSHQBFiNKTAJIXIDHx8f/P39AQgICCA+Pp6RI0cSERHxn9cePHiQYcOGsXnzZgCra56VsnZysrC/opiDG5rRlI7sZDZ1LfMpkZKH4yMm0vq9i3y+5G06tFdm1quTC2zrC2vCoEWkWfoghMjxJEfkbHnJy2pW04lORBDB6aKn2RY9kRbNFZ3aw4xzkKc7DMcUlsYBGf6LWVwgdDFseBZ2vmwOeKg61Ob3IoSwj5yUJ3JdUQnA9wcflhTcw/+8m/IyrUlkFm/TNWPDpweCW0GzDG55Q2i9wZwWd1NJzObdjYBmwHJIX6ASQmQZe04Yd3O7NevEYrGQnJzMlClT/nN0ISEhgdatWzN//nx8fX0BM5qQkJBws31CQsLNqa3CERXA7AUbRiDdiOU8zzh9RLEUD04OfJeOHyWTPO8DunR2Av8+prC05UVY1RJarjKbtQohbMJeeUJyRM7niisLWUhBCvIGb3A6/2nWbZxB+7YW+vSAKRcg70swAbgIvMWdCkufg7LArmFmxlK1ETa/FyFyK3mWyBy5rqikNTz3HBw9WozFW7fT6ak2vEM3EjjGEkag0v+6r9AZXAvA+vawtC602Qz5vNI0KA5EY2YrhQGfA+1sdTtCCAcSERHxr6MLZ8+eJTQ0lDfeeINatW4tvfXy8sLT05N9+/ZRrVo15s+fT//+/W0RsrhveTAzVJ/laV7iBy5QyeldiqXk5eSLk+g6/xKXPvqEvi86w1M9TGEpqiusagat1oBLXnvfgBDCxiRH5DwWLMxkJoUpzBu8wdk8Z1m6aj5dn3MlYjC8dhYGjIF3lJmxNBNz3M9tnJyh6WfgZIHdr5j9XGu8ZvN7EULYX3bNE7luo26lYMkSyJMHnq+ZjznRaynOsyzjVQIZSArpT3wDyrWC1uvh3BFYUgfOxadrUBjYijnzoQOwIKtvQwiRDU2fPp3Dhw8zfvx4/P398ff358SJEwDMnDmT7t27U7ZsWXx9fQkJCbFztOK/uQGLgc74MSXEJ4AAACAASURBVIpfGMoFp4kUSRkPnT6jX+GOTHkvdZPuJ1+AkPnw5w5Y0RSuXrBf2EIIhyQ5Iru5BIBC8TqvM4UpLGEJ7VxbMmfRRbp2hfHjgJfgFQ2zgU6A1TNBnZyhyTwo3wn2joK9Y8xIuBBCpOGoeUJZW3+XXVSpUkXv37//vq5NSoKQEDh4ED74NIUpz0UQx9s8TjjfMB8PrGyoenQfrGwKzh7QdgsUKZ+uwUWgJWbm0kdAj/uKTQhxZ4cOHcLPz8/eYTgUaz8TpVSs1rqKnUJyGA+SJ+5eCjAQmE4SPfHjA5z0NM6owbAulNE/LGfM8NS9lH5aDBueA69qELYR3PJncWxC5D6SJzKSPHFn95cnTgE1gc7ACP5Z2PYxH9OTnlSnOmtS1jFhSCHefRe6dIGyc+BVJ/OksASsPWmYfZU2d4eDc6H6SKg5zoyICyEyjeQI6x4kT+S6mUr/8PKCmBioWxd6Pu9Elzffooaews8spQyhnOJ8xotKVofwnaBTYHEgHEufgPIC64AQoCcwLUMXQgghchonzO/7V/BiFr/yPC6qHwX1TGi2nrFVmhEx5qIZdH6iAzRbDMf+DyIbweWzdo5dCCHEvcsPVAVexQwqmJUO3ejGUpayn/00cKrH0LePMXo0fPopfNMe3rluFk634J95Tuk4WaDxx1CxO+ybYJbDZeMJAEKI3CHXFpUA8ueHDRugY0d4ZRgEDHyZ1inzOMYOylCP3zie8aJiFaHDbnD1hGUN4M+d6Rq4Aysx+ysNBCZn+X0IIYSwNwVMBCZRhEXE0Yb86gU89TxosJ2pDRvT95Vz5tngsbbQfDkc/xqWB0PyaTvHLoQQ4t64APOAwcD7wP+AKwC0oQ3rWMev/EodVZvOY47w1luwfDlENYeZV8ymGU3A2hA2KCcI/gieehH+bxLEDJPCkhDCoeXqohKAqyssWABDhsD098HSvhN9r6/lPD9Tnlp8xa8ZLyroawpL+bxhRRP4dV36XjETWztiDhMdA0gyEEKInG8YMIP8rOUQoXipMPKoxagaXzIzrCFdhpwmJQUo2xJarIC/voNlQZB8yt6BCyGEuCdOmDPd3sT83d8MMPvlBRPMVrZymtPUohaNBx9k9myIioLPg+GTi/AFEIRZSJeBcoKGH0ClPrB/CuwcIoUlIYTDyvVFJQAnJ5g6lZujCD80DGHspWiucJYa1GQ9X2e8yLMUtI+BIk/CmtZwaFG6Bs7AZ0AXYCymuCTJQAghcr4+wHzysJNvCeZx1RA3p5U4Vf6eeS/Uo/2A41y/Dvg2g5ar4fQhWFofLp2wd+BCCCHuWQQwF9gO1AfM7/LqVCeGGAACCaRi9y9ZtAi++AKmBcK8c/A9UA84Zq1b5QRB06HyAIh9B7YPksKSEMIhSVEpjcGD4fPPYe9eWFatGu+f3kMK7jSnLrPZlvGCPEWh3TYoVRs2PAvffpiugQWYA/TGjGLcWnMthBAiJ3seWIY7X/Ml9XhGPYPFeR3O5X9leb+6tOqXwNWrgE8TaL0Ozh42haWLVh8thBBCOLTOmN2SfgRqAUcAeJIn2c1uClKQIIIo0n4rq1fDjz/ChBow/6RpWQf4w1q3SkH9dyHgJfhmGmzrZ/Z2FUIIByJFpXQ6doSNG+H33+FN/8eZE78XC6XpSQhjWZrxArf80HoDlGkGW3vDl5PSNXACZnBrzXUvpLAkhBC5QWtgLS7EEU0gDXkM7RyFi+9R1g8NJKTPES5fBh5tCGEb4Fw8LKkHfx+1b9hCCCHuQyiwjVsnw30LQBnKsJvdlKEMoYSS3DSSTZsgIQGGVYVPE+AkprAUZ61bpaDuW1AlAr79ALb2kcKSEMKhSFHJiqAgczLctWvw8tOlmPN1DB5UZwwd6Mn0jBe4eECLSHjif7B7BMQMTzc9VQFTMSdEzAZeAK7b4E6EEI4kPj4eDw8P/P398ff3p1evXjc/i42NpWLFipQtW5YBAwagZYp7DtEI2Iwzx1hHHcIowQ2Xbbg+fJbo0YEE9f6FS5eAh+tBm03wdyIsrQcXEuwbthDC5iRH5AQ1gN2YbTACAXOgjxde7GQnAQQQTjiH635MdDScPw8Dq8Lsw+Y0uEDgB2vdKgWBk6HqcPjuI9jcUwpLQuRCjponpKh0B/7+ZhlcsWLQq1Yhpm2MohAtmE1/mvEaOv3+SBYXaPqZ2VDvq8mwtRek3EjTQAETUv99hjkl4pqtbkcI4SB8fX05cOAABw4c4MMPby2Z7d27N7NmzSIuLo64uDg2bdpkxyhF5qoNbMeJSyymDl1w56rLDtwfusLeNwIJ7PMDFy4A3nWg7Wa4eByW1IXzv9s7cCGEjUmOyAnKA3uBUkBjYAUAhSjEFrYQTDDd6c72KlOIMVsu0asaTP/ePC3UBfZb61YpqP06VB8JP3wMUd3SPWsIIXIDR8wTUlT6Fz4+sGcPVKoELzbzYOTHy/GmO+uZwDP05Fr62Ub/bKhX7RX4bhZseA5upC8cvYo5KWIZ0JZ/jh8VQmQf8fHx+Pn50aNHDypUqECjRo1ITk6+7/6SkpI4f/48NWrUQClFp06dWLVqVSZGLOzvaSAGJyzMpi4vcZnLLjF4FLYQ+1ZdavaL5cwZoGQNaLsFLp82haVzR+wduBDiHkmOEPAwZsbS05i/982DX17ysoY1tKc9QxnKggoj2LVbU6AAdK8JU/aBJ9Ag9eoMlIJa46HGGDg4F6K6SGFJiGwop+UJZ5t9p2yqaFHYtg3at4ch3Z0Z8ecs1o1+iFg1gcc5yXcsIh8ety5QCmpPBNcCsGsYXLsAzZaZJXI3DQY8MCcEtQBWAnlseVtC5AiDGMQBDmRqn/748y7v/me7uLg4Fi1axOzZswkPDycyMpKkpCQWLlyYoW1gYCDTpk0D4MiRI1SuXJn8+fMzYcIE6tSpQ2JiIt7e3jfbe3t7k5iYmHk3JRyEH7AbRUPeIoj8rGWsawx5PYP4YVoDqg3cyJ4pNSnmVdUcArG8ISwOhPDtUKisvYMXIluyV56QHCGgMLAVCMcc2nMcGIUrrixkIQUpyCQmcabMGXbsmkFIIwvd68PMdfBGkFk8vRoIttZ1zdHgZIE9r5miUsg8cJLHOiHulTxLZA757XMX8uaFVavgxRfhjbGKrgnjKTarBNFOAyhNI35gDQ9R6PaLqg4F94KwpResaAKt1ppNvW/qDbgB3TEb+60F8tnqloQQD8jHxwd/f38AAgICiI+PZ+TIkURERNzxGi8vL/744w+KFClCbGwsrVq14uDBg1bXPCulsix2YU8+wC4UwYwmhPxEMsR1F/lUEHHTG/HMgDV8MbEBXl5PQ7vtprC0tC60i4bCj9s7eCHEXZIcIYw8mMHjHsAYTGHpfSxYmMlMilCE13mdM6XOsGXnZ7Ro4kqPJvD+UpjZGpoBS4GW1rquPhKUBXa/AinXoekCsx2HECJbyEl5QopKd8nZGebMgVKlYPx4aHa8Hx1XFmeR83OUJZD9bOIJSt1+0VM9wTU/bHweljWAsE2Qp2iaBl0xM5aex4xHbAQK2OqWhMj27mYUIKu4ubnd/NpisZCcnMyUKVP+dXTBzc3t5nUBAQH4+vryyy+/4O3tTULCrY2ZExISKFmyZNbfhLCTksBOFE14iZYUYCHdXWLwpCG/zwilSv8V7BsdwsMPVzKzlJYFmc27222DIuXtHbwQ2Yq98oTkCHGLC/ApUAJ4EzgBLEDhzkQmUpjCvMzLnCt6jrXRkXRonpc+beCtT2FxZ2gDzMfsxppBtRFmhlLMUNA3IHSRFJaEuAfyLJE5pKh0D5SCceOgZEno2xeq1glnwLYiTMvTmqeoyTaiqMMTt1/0RAdw9YS1bWFJoNkrwzNt8akjZsZSB6AhEIWZLiuEyG4iIiL+dXTh5MmTFC5cGIvFwm+//UZcXBxlypShcOHCeHp6sm/fPqpVq8b8+fPp37+/DSMXtlcU2IaiGV3pSH5mE+6yg/w05ugHLQkYsJh9Q8MoU+ZJCN9hBiaW1IPwaCj6pJ1jF0LcD8kRuZkCJgMPYbbBOAWsAgowhCEUohA96EGb/MEs27ieHm0LMfgFGH8WPAbCc8BFzHynDJ6JMIWlHYNhXTg0WwIWVxvdlxAiM2XXPCEbdd+HXr1g+XL45huIejqIMad2cI3L1KM2kfxfxgvKhJpZSn8nwOLacPbXdA3CMCdDfAfUx4xgCCFympiYGJ566ikqVapE27Zt+fDDDylc2BSRZ86cSffu3Slbtiy+vr6EhITYOVqR9QoAUSga0o5uRLGA8y7byO9UhZPTw6ny9uf89BNQxM8UliwuprB04lv7hi2EyBKSI3KDl4AFmG246wJJAHSlK8tYRiyxhOSpy8xVSYSHw2uDoNpYaKKhJ/DOnboNeAnqT4PDq2BNW7guBwEJkRM5ap5Q1tbfZRdVqlTR+/dbPXTTJnbvhubNwd0dXt57mKE+jUnhGO8TST+aZLzg2H6IbGIeDNpusTLavAWzaro0sA3wyupbECLbOXToEH5+fvYOw6FY+5kopWK11lXsFJLDsHeeuDtXMAsbVrCXcdRiEAWut+Cc0048h8xmT9duVKwInDlsZixdu2hySImn7Ry3EI5J8kRGkifuzD55IgqzsK04sBkwhzFsYxstaUkJSrDpxhbe7FWGOXOg90A4/g6sUDAOGImZ+5TBgQ9gW1/waQotIsHZ3Ub3I0T2ITnCugfJEzJT6QHUrm0KS87OMM6/LG/H7sGFx+hPc0awIOMFD1WBDjGgnMxSuKT0s5qCMfsq/QEEAn9m+T0IIYSwNzdgCdCJmoziAOM457yeAroxF97pTvWF7xMbizkBrv1Os6R6WRAc+8rOcQshhLg/jYFo4AJQE4gFIIggoonmLGcJtNSi/6zvGTIEZr4HHl3guRQYBQwDrE4L8O8DwR/BkQ2wuhVcu/8jyoUQ4m5JUekBVagAX3wBjzwCETUeYtKGneQlkEk8TyfeznhBkfLQYTe4FTIPBX9sT9egLmbE4gSmsHQky+9BCCGEvTljNnLtSyWm8guDuWiJpMCNVlyaNIBaayazdy9QwMcUltwLw7KGcPQLO8cthBDi/lQF9mBOiKsHbE19tyq72IUFC3VVIGFTvmD8eFg4Dy60hRdvwBSgD5BirdunekKjjyF+M6xqAdcu2eZ2hBC5ls2KSkqpJkqpn5VSh5VSw618/oJS6qRS6kDqv+62iu1BeXtDTAxUrw5DQvMz7KMNFKEtnzGEYIah048lFPAxhaX8j8KKEDi8Jl2PNTHL384BdYBfbHIfQggh7MkJeB8YQTk+4jd6cNWykPw3OnBl7HDq7RzN9h3a5I72OyFPcVjeCBJ22ztwIYQQ9+UxYC/gAzQFFgNQnvLsZjfFKEawasgzI6OYNg1Wr4TDTeClq/Ah8AJw3Vq3FbtCk0/hj22wsplZNi2EEFnEJkUlpZQFmAGEAOWBjkopa+ciL9Fa+6f+m2OL2DJLoUKweTO0aQOjernx7KuLeVT3Zitv4k8XrnLt9gvyeZmHgmKVYE0YHEp/dGAVYDtwFTNj6aBN7kMIIewlJw8+3D0FvA68wcN8TjwdUZY5eKZ05dqIcQQfiGBTlAZPb5ND8pWCFU3gz532DlwIIcR9KQnEADUwp0JPA6A0pdnFLh7jMZrTnOL9lzB3LmyPhr114dVk+Axoj9mZL4MKnaHpZ5CwE1Y0hat/2+Z2hBC5jq1mKlUFDmutf9NaX8WU4Vva6HvbjLs7LFkCffvCtNct1Ooyg8opY/mOeZSlNedJN/3Uowi02wregbDheTgwM12PlYAdmP9M9YADNrgLIYSwvdww+HBvhgMzKM4ajtCSPE7v4JnSjxuD3iI0vi8rV6dAvpLQfsetWa+/b7N30EIIIe5LQczm3a2BgcCrgKYEJdjBDqpTnY505HLnj1i2DL75GtZWh/HnzfnRrSD9U4bh9yw0/RwS95jDgq5esNkdCSFyD1sVlUpx+67TCanvpddGKfWdUmq5Uuphax0ppXoqpfYrpfafPHkyK2J9IBYLvP8+TJwIn89TFGs6iuDrM/mTDfgQzFFO336Bqye0Xg9lmsG2PvDlpHQ9lgd2Ah5AfUA2ZhVC5Ei5YvDh3vQB5lGI7cTRhMJOY8mnh5Ly4kzanOnGoqU3IO9DEL4dCpaFVc0gPsreQQshhLgv7sAyoCdmxmp34DoFKEAUUYQSSi968VPY66xdpzl8GOZXgcmnTDkqBDhvrdsn2kOzxXDsS1jeGK6cs9kdCSFyB1sVlaydepn+0IK1QGmt9VOYnermWetIaz1La11Fa12lWLFimRxm5lAKXnkFPvkEtm2FMzV60TF5GafZTznqcJCE2y9w8TDHfj7REXaPgJjhoNP+eMphpsUWAoIwm/oJIbKbU6dOUb9+ffLly0e/fv1u+yw2NpaKFStStmxZBgwYgE79HXD69GmCg4MpV64cwcHBnDlzxh6h20KuGXy4N52AZXiyn0M04GE1mDwpY9EvzOV/+lnmzLtm9lZqFw2FnjCbsv62wd5BCyHug+QIARbMbkmjgU+AMOASHniwghU8x3O8yqtEBb/M5i2akydhemV466jZmakhpB++Nh5rC82WwvGvzF58l8/a7I6EEJnHUfOErYpKCUDaP/69gaNpG2itT2mt/1kSPBsIsFFsWaZLF1i9Gg4ehP+r2Ia+pzdxiT+pTE1iOHR7Y4sLhHwGT70IX02GbX1Bpz3ToTSmsPQQ5hjS9KfGCSEcnbu7O+PHj2fq1KkZPuvduzezZs0iLi6OuLg4Nm3aBMCkSZMICgoiLi6OoKAgJk1KP5sxx8hVgw/3JgxYiwe/8A11Ke/UFQ/9JrRfQo8C7Zj20RXIUxTabYMiT5pjpDMcACGEcHSSI4ShgDHAB8A6oBFwGhdcmMc8+tOft3mbOTW7snXHda5cgTf8Yepv8B1mw4zj1rot1xqaR8KJb2B5Q0i2Wn4SQjgwR80TtioqfQWUU0r5KKVcgQ7AbX/xKqW80rxsAemrLtlTaChER8PZs7DMrz7D/9jJda5Sn9osY9/tjZ0s0HAmPDMUvp0JGzvBjbQbfHtjCkuPYk6IkGUOQthDfHw8fn5+9OjRgwoVKtCoUSOSk5P/87q8efNSu3Zt3N3db3s/KSmJ8+fPU6NGDZRSdOrUiVWrVgGwevVqOnfuDEDnzp1vvp8D5crBh7vXGIjCjST2UpuqKgw3PR1arWZg6RZMfv8SeBQ2+/QV94e1bSBuhb2DFiJXkhwhMkdvYCnmMaoOkIATTrzHe4xlLHOZy4RK7di6+zLu7jD6aZj8A/ya2voPa12WbQEtV8Jf36cWlk7Z7naEEDfltDzhnOk9WqG1vq6U6oepgliAT7TWB5VS44D9Wus1wAClVAvMyZinMadk5gjVq8OePdC4MUyvUJlRu/Yw3r8x4QTxLssZSMitxkpBnUngVgB2v2o21Gu2BJz/+R/OQ5jNu4MxtbdIoJmN70gIRzGIzN/A3h949z9bxcXFsWjRImbPnk14eDiRkZEkJSWxcGH6kxwhMDCQadOm3bGvxMREvL29b7729vYmMTERgOPHj+PlZWruXl5enDhx4h7vJ9u4OfgAJGIGH/6XtoFSyktrnZT6MscMPty9OkA0LjRmG3VopbYQpT/mWqPuDHcP4eKUdYyLKARtt0BkCKwNh9BF8Hg7ewcuhB3ZJ09IjsgaSqkmwHuY54k5WutJ6T5/AZiCySMA07P3oQ5tgaKYLQZrAlEo/BjFKApTmP7051y5pmzas5rWDT15pRpMiIax1UzG2AaUTd9lmVBotcbMal3WANpuhTw5YVavEPdDniUyg02KSgBa6w3AhnTvjUrz9QhghK3isbXHH4e9eyEkBCY+48vIdXuY1DiEQbTgKJ8wmedvNVYKqr0CrgUguh+sDIWWq8E1X2qDYkA0Zjpsa2AJZnmEEMJWfHx88Pf3ByAgIID4+HhGjhxJRETEPfeldfpVXqCUtdVgOVduH3y4ewFADBYasppAnlNRLNULuVHneca7BfP3+I28NbIQqm2UOUJ6fUdIuQ5+He0duBC5iuSIzJfmlNBgzOzWr5RSa7TWP6ZrukRr3S9DB9lWPcxKhSZAbcySuBr0ox+FKERnOtP54Qas2LWBZxsVY1gdmLgB3mxoCktbgCfTd1m6MbRaC6uam8JSu21mfz4hhM3kpDxhs6KSgJIlISYGWrWCcU1KMHTODmZ0a82bdCKREyxgyO0XVO4LbvlhUxczRbX1BrO8AYDCmPGHECAcWIAZ2BciN/nvUYCs4ubmdvNri8VCcnIyU6ZMua/RBW9vbxISbm3gn5CQQMmSJQEoUaIESUlJeHl5kZSURPHiOfePvtw++HD3ygO7cSKIhTQgv1rHbJajn2nPO24NuDRqMzPHFUOFbTQPDBufA30dyj//nz0LkfPYJ09IjsgSN08JBVBK/XNKaPqiUg5UCbMVdyPMoT1LgWY8y7MUpCBtaUub4nVYtmMLLzZ9mOGNYdwy+CAM6mJGa6qk7/LRhuYE6pXNYUk9CI82J4oKkavIs0RmsNWeSiJVgQKwaRO0awdvds/Pc6M2UFS3ZSEv05Ch6PT70pZ/HpovM5vqLa0HF4+l7Q2TJmoBz3KHPWuFEDYSERHBgQMHMvz7tyQAZiqqp6cn+/btQ2vN/PnzadmyJQAtWrRg3jzz/+158+bdfF/kdmWAXShKMZMmDFHuaKc1OFX8iY861KPz8CRSnPOZB4aH68PGzvD9J/YOWohcTXLEA8u0U0KzJx/MCdDlgVbAXABCCWUzm0kiidCCtZix9WeCgmBkG+j6KeQHGgC7rHX5SAMI2wAX/jCFpb+PWmslhLCR7JonpKhkB25usHgx9O8PH413o17XxZTWvdnGFPzpyjWu335BudbQah2c/RUW14Hzv6f50BPYiEkXXYBZNrsPIcS9K126NIMHD2bu3Ll4e3vz449mgHXmzJl0796dsmXL4uvrS0iI2Wtt+PDhbNmyhXLlyrFlyxaGDx9uz/CFQ/EGdqJ4nMm0YLz6mxTnjVge/53PugcS/vIf3HDKY5Y4PBoMm7vBd5IjhHBkkiP+VaadEqqU6qmU2q+U2n/y5MlMDjMrFcecAP3P3/2TAU0d6rCTnVzhCo3y1GbsuljCwmBCV2jzDpTS/xz3YMXDdSFsE/ydaAawLyRYayWEcBCOmCeUtfV32UWVKlX0/v377R3GfdMaJk+GESOgQWPNqXXj+NZ5DI/QjB9Ygid5br8gca/ZX8klnznhp/DjaT68DLTBrBx5H8hBS8mFSOPQoUP4+fnZOwyHYu1nopSK1VpnmO2e22T3PHF3zgKhaPbxPp8wkMdwvhbC9cSChL6zjZVTfXFRl2FNGziyAYJmgH8fewctRJaRPJFRTsgTSqkawBitdePU1yMAtNZv3KG9BTittS7wb/1mzzxxFbOt4CJgIPA24EQccQQTzGlOs/LGGhZ0r8fcufDia/DlWDioYDF32In16BcQ2QQ8ikL4dsj/iK1uRgibkhxh3YPkCZmpZEdKwfDhMHcu7NyqcKo2mvqXP+AP1lOaRiRx5vYLStWE8B2QctXMWDqRdqd6d2AFZml5f+AtG92FEEII+yoIbEbRgAG8wFxiue4SjUupv1k/NJAmg37iyg13aLECfFvAtr7w9b9PoxZCCAd085RQpZQrZjPRNWkbKKW80rzMwaeEumL2Ux2IOQzvOeAq5SjHHvbwMA8TamlCi49XM2AAfDQeKvSHp/WtnVgzKFnDnB56+RQsqQvn4m12N0KI7E2KSg6gc2dYswZ+/gl+r9CbsHNLOM1XlCOQQzdPRE1VvBK0jwGLm5mimrg3zYduwDKgHfAyMNFWtyCEEMKu8mJWfbSkM/1ZQRTXXHbgUvwG0aMDqT/oWy5ddTN79JULg+0DYb8MPgghsg+t9XXMVPwoTLFo6T+nhKaeDArmlNCDSqlvgQHk6FNCnYB3gEmYGUuhwAVKUYoYYvDHn3ZObaj87jxGjYKFM8CrE9RJgU7Ah9a69KpqToK7es4Uls7+arvbEUJkW1JUchBNm0J0NJw7B7sfa0eXoxu5SDyVqMVufr69ceHHoeNu8CgGy4Ph961pPnQBPseMWIwEXiPjcnMhhBA5jztmYOF/tOYVtrKAay47cS3kxhcT61Nn8Ff8newKoYvhsXaw82X4cpK9gxZCiLumtd6gtX5Ma+2rtZ6Y+t4orfWa1K9HaK0raK0raa3ra61/sm/EWU0Bw4BPMXst1QdOUIQibGUr9alPF/UCBce+w1tvwaoF4NwSmtyA3sAUa12WCIB20XDtotm8+0ycze5GCJE9SVHJgVSrBnv2gIcHLHu8AX0P7eAal6hLbVby1e2N8z8KHXZBQV+zz9Lh1Wk+dMacCNENmIBJNlJYEkKInM8F+Ax4kSAm8wXvc81lB26eBfn6zSBqROzm3N8uEPo5PNERdo+AfRPsHbQQQogH8gKwGvgRcyr0b+QjH+tYR1vaMpjBnB48ktlzNNEb4FwQhF2FocAorDwlFPeH8Gi4cdmsjDj9c/oWQghxkxSVHMzjj8PeveDjA7MqBTBw1x4gH22oz0y23N4470Nmj6Vi/mYD1h/TrpC2YE6C64MZhxiEFJaEECI3cAJmAi9TnRl8yziuu0TjnseLH6Y25pnh2zh1xhlCPoPyz8Oe12DvGHN6hBBCiGwqFNgGnAZqAt/ghhuLWUx3ujORicR268Pni2/w1V74rSZ0TIbxwGCsPCUUe8ps2J1y3RSWTv1o07sRQmQfUlRyQCVLQkwM1KwJ7wWWo9fivTjjSx9CGcWS2xt7FDYnwXkHwsZO8G3aFdJOwHTgJWAapsCUYqvbEEIIYTcKeBMYT0Xm8zNDSHHegrtbGeLeCSXgtQ2c+MsCjT+FCl3gi7GwZ6QUloQQIlurAezGbORdF4jGgoVZzGIYw/iQ49vyzQAAIABJREFUD1nZ7lki117l5x8h9mnoegHeBXoCN9J3V/RJM4ANsLQ+/PWDze5ECJF9SFHJQRUsCJs2QZs28EFHL56dspM8ujrj6Uh3pt/e2NUTWq+HMqGwtTf83+Q0HyrMSXDDMVvy9cBKyhBC2EB8fDweHh74+/vj7+9Pr169bn4WGxtLxYoVKVu2LAMGDECnPtyfPn2a4OBgypUrR3BwMGfOnLlT90KkozB7672DLyv4lRdxct6Au3MFfn+3FZUnRJKYZIHGc6BiD/jyddg1XApLQtiJ5AiROfyAvcAjQAiwFIViEpN4kzdZwhJmNG7Bqi0XOXYUtjwJfU7BHMyOrNfSd1fEzxSWnJxNYenkdza9GyHELY6aJ6So5MDc3WHJEujTB+YOLUiTAVEU0i34mP6EMgqddqKqi4c5LvrxDuahYPeraR4MFPA6MBr4BLPu+rqN70YIAeDr68uBAwc4cOAAH354a2Zh7969mTVrFnFxccTFxbFp0yYAJk2aRFBQEHFxcQQFBTFpkmysLO7VIGAO3kRxhGdxd16Jh1MVjr7dnspTF/L7H04Q/CFU6g1fvWk28JbCkhB2ITlCZA5vYBfwDNABUgekI4hgDnPYwhbG1ApmVcxpki9BZAUYmASLgTbA5fTdFX4cwneCxd0Ulo5/Y8N7EUKk5Yh54q6KSkqpb5RSg5RSJTI9AvGvLBaYPh0mTIAV0z2oHLacUind2MB4qtGH62lnHVlcoOmCWyPO0QNA/7PcTQFjgInAAuBZrIxFCCHuQnx8PH5+fvTo0YMKFSrQqFEjkpOT77u/pKQkzp8/T40aNVBK0alTJ1atWgXA6tWr6dy5MwCdO3e++b6jkTzh6LoBiyjOF/xGG/I7L8JD1eHkW89TecYcDv/qBEEzoHJ/iH0btg+SwpIQ90lyhHWSJ2ytELAFaA70x8xc1XSjG8tYRiyxDKhUl8i9STg7w7zyMOQ3WAs0A/7O0F1ZaL8TXPLBsgZwbL9N70aInCSn5Qnnu2w3AVOFmKiUisEcLbNSa33/dy7umlLw6qvw0EPQs6czlWrOxnNXcb5yeYMn+IvvWYAHbqaxkwWCPwK3ArB/Kly7AI3mmCmrALyCWWcdgSkqLU59LUQ2tH0QnDiQuX0W94f67/5ns7i4OBYtWsTs2bMJDw8nMjKSpKQkFi5cmKFtYGAg06ZNA+DIkSNUrlyZ/PnzM2HCBOrUqUNiYiLe3t4323t7e5OYmAjA8ePH8fLyAsDLy4sTJ05kxl1mBckTDq89kJdCtOUwoTxlWcOxG30582YPAkYms+/Z/vjVf8/ki9h3zOasQe+DkknNIhuzU56QHGGV5Amb8wAigd6YgeVjwIeEEcZ61tOKVrxQrhYL922he31fZlaEof8HUytAY2A9UDBtdwXLmMLS0vqwvCG02QxeVW1+V0JkGnmWyBR3VVTSWkcCkUqpwkA4ZsfnD5RSK4AFWuvoTI9MZNCtGxQvDu3bK0pWeJ3q3xZjn8dgSnOag6ykKPlNQ6Ug8E1TWNrzGly9AE0/B+fUwhMvYwpJA4EwYDngbo9bEiLb8vHxwd/fH4CAgADi4+MZOXIkERERd7zGy8uLP/74gyJFihAbG0urVq04ePDgzTXPaSmlsiz2rCB5IrtoBmwkH805SGMCLOs5kjKM8xMG8My4ZPa0HEqlum+Bcob9U0Bfh4YzpbAkxD2SHJGR5Al7ccacCP0Qpq53ElhEQxoSTTRNaUoH79os/CKKQQ2e4t2nYUgMvFsNGgBRQLG03RUoDe13wNIGsDwY2myCkjVsfE9CZH85KU/c7UwlALTWp5VS8zEzIodilt0GKqVSgD5a661ZEKNIo3lz2LoVmjWDi2VeovHBYkQV7kIZ6vMtG/GhuGmoFFQfaTbx3j4IVrc0ey655EntaQDgBvQCWgIrgTzWvqUQjusuRgGyipub282vLRYLycnJTJky5V9HF9zc3G5eFxAQgK+vL7/88gve3t4kJCTcbJ+QkEDJkiUBKFGiBElJSXh5eZGUlETx4sWz+M4ejOSJ7KA+sBUPQjhAELWcNvF9ijsXRw2j+uRLxFwbzTOBk82Mpf97w8xYajRbCksie7JTnpAccWeSJ+xBAeMxhaX+QCNgDVWpyi52EUwwYUUDWbR7PeMa1+LtWjBoE8xoaM6Q2wKUSttd/kfTzFhqBGEbwbu2ze9KiAcmzxKZ4m73VHJSSjVWSi0AjmKmrk4CHtJalwVGYDbqETZQsybs3g3OzvCFz3O0+XM1FziEH7WI5cjtjZ8eCI0/gd+3QGQTuHIuzYcvYjbu3oIZvb5os3sQIieKiIi4uXFe2n//TFc9efIkN26YfdB+++034uLiKFOmDF5eXnh6erJv3z601syfP5+WLVsC0KJFC+bNmwfAvHnzbr7vaCRPZDfVgR24cpW9NKC601DcUrpwedhYau8ezp4vgNoTofpr8MMnENUVUuTkUCEeRG7OESB5wjH0BZYAXwF1gAT88GMPeyhOcdrkDyYieiP16sFbwdAzEhJSWx5J35Wntyks5SsJK/6fvfuOjqraAjj8O5kEEgidIL33JggC0kvovYbeiyAgoCioqBRFEQXkSRGk9xJqICTUFARBQXpHASnSpZNy3h8nmICUQJK5k2R/a2W9MHPnzr4xb3buKXvXgbPb7HolQiRE8TVPRHfa8TymL/0+oLDWuq7WesGjPdARy1kPx3p04pkKF4bt2yFrVliTtx7t9m3iAVcpSwX82P/4wUW7QP2FcOFnWFoD7l2N8mQXYA6wDdN29JbdrkGIxCYgIIDixYvz+uuv06JFC6ZMmULatGkBmDx5Mt27dydv3rzkyZOHunXrAjBkyBD8/f3Jly8f/v7+DBkyxMpLeB7JE/HO60AALiRhE9Wp6dSDJOF9eDhgDFX39WfzNg0VRkD54XBwNvh2MquWhBBxIoHnCJA84SBaAr7AWaA8cJgc5CCIIApSEC+3RnRYt5DGjeH7FtD2J7ihoSJP+Y/jnhlabYUU2cG7LpyRHYxCxCVHzRPqafvv/nOQUqW11g5X4r906dJ6926HC8uurl2DRo3MAFN7v4PM86wN3GE2a+jAE8tQT/nAmhaQKg+08Af3TFGeXAK0xbQeXc8TZfmEcBiHDx+mUKFCVofhUJ72M1FK/aq1Lm2vGCRPxGd/AjUI5xJtWY239iFEfYttRndWZ5lCvdo22PEFBH8CBbxMl1Gnl9o9L4RdSZ74L8kTz5Z488QezIRyCLAWeIub3KQRjQgkkAlh/2NXlz7MnQsdv4EN70GYAj+g5JOnunPJTFzfPAmNV0POmna+FiGiT3LE08UkT0R3pZLf0x5USjl0i4nEIG1a8PMzNZbm1ixC6znBOOnX6EhNvmHN4wfnrm/2PP/zJyyuBDf/iPJkK0zB7l8BT+CavS5BCJEwSJ6It3IAgTiRg4XUo7OqhrMeRljX6TS82okVa0Kh3MdQ6Ws4uhh82kBYiNVBCyHiH8kTDqUksB1IC9QA1pKKVPjiS0Ma0t/2DnlnjeSdvpo5g6HyMHDTpirfz0+eKvlr0GoLpM4HKxvCaV87X4sQwkrRHVRyefIBpZQLYIvdcMSrSJYMvL1Nd7iFnXLQcFQQSXUxPqAp7zH78YOzVYWWG80WuEWV4NrRKE82wRTsPoDp93DFXpcghIj/JE/Ea5mArSiKMJWmvKuKYdNfEt52Ps0ftmbhsodQ5gOo8i0cWwZrvSDsodVBCyHiF8kTDic3EAwUxtwHzMQNN5aznI505DOnT7F9P5CPh4Wz9Aso3hfSa6gJ/GejWzIPaLkZ0hYyDYJO+dj5WoQQVnnuoJJSKlApFQC4KqUCon4BRzHD28IBODvDtGnwySew8tP0VOq1mRS6Ot/RmTaMffzgTGVNYb3wh2Zg6e/fozxZH1iF+c9bDZDJI+F4orNtN7Gw+mcheSIhSQ9sQlGWb2jNMJUJJz0e3Xw5bd2aMWPBfSg9CKpNgBMrYHULCH1gddBCPJXVn42OxOqfheQJR5cB2IKZUO4KfIUzNmYykwEM4Hs1gTMjOvPVtyH4TILs7SFHONTDbJp7TLL00HITpC8Gq5rCiVV2vhYhosfqz0VHE9Ofx4uKIkzH9KB8E/gp6vsCl3jKILWwjlIwciRkzAj9+rnz5vk1nFrdiUVOg7nEJTYxBoUyB3sUB69AWOYJS6qabXGZy0WcqTbgAzQEqgKbMLPYQljP1dWVq1evki5dOpRSVodjKa01V69exdXV1cowJE8kKKkAXxTN+IwupFITGaSnouu9TbeNDbk7cyV9u/QHZYPNfWFNc2i4DJwt/R0U4jGSJyJJnhDRkwIzRNQZ04TvAk6M4zu+Ix3pGMYwGg66wf9SL6ZfdzfK3oTCK6Gps2nX5xX1VG5pocVGWF7b1HJtsATyNbX/JQnxDJIjHhcbeSK6hboLaq2PvPK7xJHEW1jvxZYuhfbtIXfRcO7u6McZl0kUpzO7mYZL1LHEf/40hfXuXIQmqyF79ShnCcDMQ2TB5Pssdr0GIZ4mJCSEc+fOcf/+fatDcQiurq5kzZoVF5fHdxVYUIBV8kSC8gBoA6xgBl/SjSwQ3gWCKjDm8FoG90oJv0+FjW9DzjrQeIUMLAmHIXnicZInnk/yRFThwPvAOKA1MAtIymQm8w7vUIlKdPZeTU+vVBR5C9w2ws4kMA3o9uSpHtyE5XXg4i7ThbpAS7teiRDPIjniv2KaJ545qKSU6qC1nhvxfddnnUBrPePlQo49kgSeb8sWaNwYUqbVpDo8gkNun5ODRhxgEe64RR54+wIsrwXXj5sZ5zwNopwlGNMZIgNmYCm7Xa9BCPFq7HGzIHkioQsFugDzWMYQWlISwtvBrjf4fKcvn/VPA/umg39PyOEJjVeBi9uLTiqEcBCSJwzJE0/SwFjgA0zzHm8gBYtYRAc6UIxivL/Rl24NM5C9IGT6Gba5wnjg3SdP9eAf8K4HF3aYzqEFW9v3UoQQMRIbg0rrtNb1Ir7f8ozXa6119Wc8F+ckCbzY3r1Qpw48DIH8J35gZ5p+pKcSB1jNa6SKPPDeVTObcHkv1J0HBaMuZN2J2RKXBjOwlMuu1yCEeHl2ulmQPJHghQPvAFPwpS/1qAFhXuh9hflwsx+jB3mgDs6EDd3MStcmq8ElmdVBCyGiQfKEIXniWeZgaiy9DqwDXsMXX5rRjKxkZeROP3rUzEmajFDwN/Bzh5HAx8BjG4oe3oYV9eGvIKg7Bwq1s/+lCCFeSYwHleIDSQLRc+oU1K4Nf/0Fbx1bxOasHXGnML/hSz4yRh744B9Y0cB86NeaBsWiLmT9FdPrwR0zsJTXrtcghHg59t7W4KgkT8QGjZmxHksQnalKK8LDmqOP5KLf6o1MGJIJdXgu+HaGrJWh6VpwSW5xzEKIF5E8YUieeJ51QEtMbdUNQB6CCaYBDUhOcsYd9KN3lcLYkkKZA7A2jckWX/HEwFLIHVjREM5uhTozoUgnu1+JEOLlRTdPPLP7m1LKKTpfsRu2iAu5c0NwMBQuDNtytqbe/rXc5jjFqMgvnIo8MGlKaO4LOWuDX3f4bUKUs5TCDCbdBapgmnUIIRIzyROJhQLGACOoyCx2MQNn22pUwT+Z2KIyPUeeRRfqAHXnwrkAWF7XzEwLIRI9yRPxXT1Mw57rQAVgDxWowDa2EUYYbxepxP9++QVnICgvNLtoskVfzDrXf7kkNxMO2WuAbxfYb9luRyFEHHjeh3goEPKcr0fPi3ggQwZTY6lqVVhXvBYNNm/mAdcpTwXWsy/yQJdk0Hgl5G0KWwbAji/g39VsJYCtmP/0VYFD9r0IIYSjkTyRaChgGDCOkixjH9+R1LYWp1x/M71DZTp8eorwAm2h3gI4v91sp354y+qghRDWkzwR75XD1FhNiplY3kxxihNEEKlJTffc1flq90bSpQHfvOD1B0zCVOQLjXoal2Rmi3TOWuDXDfb9aPcrEULEjecNKuUCcj/n69HzIp5IkQJ8fMDLC9bWKEvdeYGEa2fqU5nZBEYe6JwUGi6Bwh0g+BMIHBplYKkoZmAJzMDSfnteghDCsUieSHQGANMogC+H+ZxkzmtwyvYP83tVpuUnRwnL5wUNFsHFnbCstun+I4RIzCRPJAgFge1ADqAOsIQ85CGIIHKRi+6Z6jNk13Jy5wLvAtDusKnI5IXpJfovFzczeZ2rHvj3gr2T7X8pQohY98xBJa31n9H5smewIuaSJoUFC6BfP1jfoTDVvw7GpjPSmVqMYU3kgU7OUGcWvP427PoaNvcD/WghayFgG5AEqAbstfdlCCEcgOSJxKo7ivnkJIhjDCaV80psGR/i3b8KjT46QGjuFtBgCVzaBctrw/0bVgcshLCI5ImEJAsQgFm51BqYSCYyEUAApShFrzSt6L5jOiVLwKJi0H6X6RvXBFM841/OrtDIG3I3hE194LeJ9r8UIUSscn7WE0qpH7XWPSO+n4up1PkfWuuOcRSbiCNOTjBhAmTMCB8PzU65c4HsmViPD1VTLjCdcXQ2ByonqDEJXNxh91gIuQ21ppsBJ/JjBpaqR3z5AYm+1qMQiYrkicSsDZCMTLTiGH0p4uzN1fRerBtclTpD/Vn3RVOSNFwOa1rAsprQwg9c01gdtBDCziRPJDRpMAW72wL9gYukYRT++NOCFgxI3oMRAddwr/cB88pAu02woDrUBdYAKR+dxjkpNFoGa71gS3/QoVBqoDWXJISIsedtfzsd5fsTwMlnfIl4SCn46COYNg1+mexBgWabcQ+vyni60Iaxjx9YeQyUHw4HZ4NPGwh7GPFkHszAUirAE/jF7tchhLCU5IlErTGwlvSc4BjdyOS8GOfUydn0cTWqD93J/ayNzGz0lX2w1BPuXbM6YCGE/UmeSHDcgKVAT+BLoDvJScoqVuGFF58m/ZASGz6kYSPN/BrgtRK2a3On8FgWsCUxq1rzt4Ctg2DX2Ke8lxAiPnjmSiWt9ego/5yqtb745DFKqYxPPibil+7dwcMDWrdOQZbyPiQJbs8i22AucwV/RqNQZmDprU/NiqVt70HIXWi4zOyLJidmYKkaUBPwBd6y8IqEEPYieUKYz30/UlGPw3TgDee5nHbvSvDnnlT62IdtIxuQrPFKWNUUltaAlhvBLZ3VQQsh7ETyRELlDEwBMgIjgMskYRHzmU8a0vCd8xi6rrhGm05TWNjURrOZ4NMJqijwj3gVADYXqL8QlDMEDIbwUCg7xKJrEkK8qui28Dz2jMej3f5LKVVHKXVUKXVCKfXMTwulVAullFZKyV4qO2ncGPz84OrRpCQpsIgsD3qxia95gx6ERO3bUHoQ1JwKp9fDivpROvtkxwwsZQBqAUF2vwYhhOUkTyRaFYDNuHOLfbSmsPM0nJNlZffIOrw1bCO30teFJqvg+hFYWh3uXrE6YCGENWKcJ4QjUcBwTK+3tUBNbNxkEpP4hE+Y4TSdB3O86NnvAd5dwHM8nNJQGTgT9TROzlBvLhRsC0FDYccoC65FCBET0R1UUv95QKmUQPhTjv3vi5WyAT9gttQWBtoopQo/5bgUmA26O6MZl4gllSpBQAA43bNxK8tk8t7+hL38RH5acYf7kQcW72k++M8FmDoZ969HPJEVM7CUBdMVYpvdr0EIYSnJE4laKWAbrmh20YrStok4J83Lvi8aUGa4DzfT1IYma+D6sYiBpctWByyEsL8Y5QnhqHoDS4DdQCUU5xjJSMYxDm+1nFMTGjDo09v4DILyn8PfGiph9kL+y8kZ6s6J6Dw9DLYPt+A6hBCv6rmDSkqps0qpM4CbUupM1C/gArAymu9TBjihtT6ltX4ILMIUY3jSSGAMRB3FEPZSrBhs3w4Z0ynOeoyk+OVx/MEKclGPy/wTeWChdmb72997YEk1uPt3xBOZga2YlUt1gU32vgQhhJ1JnhCRigCBJCEZgTSnsu1rnF2KcuTLprwxyptrKTyhqQ/cOGFyx51LVgcshLCDWMwTwmG1wBTwPgeUBw4xgAHMYhZb1BaChtfgk3FX2TgCig2AuxEDSweinsLJBrVnQpEu8PPnEPwp6KfWdRdCOJgXrVRqD3QEHgIdony1B97QWneP5vtkAc5G+fe5iMf+pZQqCWTTWq993omUUj2VUruVUrsvX37Fmc7z5yFcJkWeJkcOCAqC4sXgYKYBlDk5h8sEkIfqnCbKzztfk8hZ50WV4da5iCcyYgaW8gANMF3hhBAJmMPlCWGlvEAQzryGHy2oaxuGs600p0a3osSYBVx2qw7N1sHN0xEDS/8pryKESHhiK08Ih1YVCABCgYrAdjrRCW+8+Z3f8R5QmdFz/iJ4IuTqAiocqmDWN/3LyQa1p0Ox7rBjJAR9LANLQsQDzx1U0lpv01pvBdJHfP/oK0BrffQl3uc/y12J0lJUKeUEjAPee9GJtNY/aq1La61Le3h4vEQIEUJDoWZNePNN2CQraZ7GwwM2b4bq1eGXvB2osHslt/RBClOR36Pugs5ZC5pvgDvnYXFlc5MAmNpKm4H8QCNgvd2vQQhhH46YJ2Jl8kHEQDYgABu5WYkXLW0DcHaqyNkv21Ni/EwuOFeF5uvh1hkzsHT7gtUBCyHiUCzmCeHwXge2A+kx/d7W0IhG+OLLWc4ypUMFxq46zm/zIH0zcA+D6kBg1FMoJ1PDtXgv+GU0BHwoA0tCOLho1VTSWt9VSpVQSvVTSg1XSo149BXN9zmH+SvzkazA+Sj/TgEUBbYqpf4AygGr46QI66lTcOMG/PYbeHpCnTrw+++x/jbxnbs7rF0LrVtD8JsNqLDOj/v6EqWpwBYORx6YtRK03AQPbsCiSnDt0d8GHpiBpcJAE0wBPyFEQuVIeSLGkw8A338Ph6R27KvLCGzDieLMpx1dnbrijCfnv+hKiSlTOEdlaLberHJdUhVu/WV1wEKIOBYLeUIaOsQLuYBgTMpuCsykKlXZwhbucIevG1bk2017OeYLSWtChhCozRN7G5QTeE6GEu/A7m9g2/sysCSEA4vWoJJSqifm06E68CFQDDNbnDea77MLyKeUyqWUSgK0BlY/elJrfVNrnV5rnVNrnRPYATTSWu9++uliIH9+OH4cvvgCUqaEDRugZEno1AnOnHnx6xORJElg/nzo1w+CG1Si3PRthOkQPKnEUn6JPDDjm9BqK4SHmBVLl/dFPJEOU1epONAM2TIvRMKVoPLE4cPw7rtQpAg0bAiBgfLH7CtJC2xEUZ4pdOZdp+bYwuvz9/DevD5zPH+EVoLmvnDnQsTA0rkXnVAIEY/FNE9IQ4f45NHkcg2gKzCaUrxBIIEkJSmfVqnC19sDubALQitAzgfQEFgR9RRKQfWJULI//PodbBkguVgIBxXd7m8fAHW01k2BexH/2wIIic6LtdahQF9MBbfDwBKt9cGI2YlGrxB3zCRLBh99BCdPmhsHZ2eYM8cMOA0eDNevv/gciYSTE0yYAKNGwY6er1NyeDDolLSiOpPYGHmgR3HwCgAnF3NzcHFXxBNpAH/gDaAlsNzelyCEsI+EkydSpIDevcHV1SzZrFwZypeHFSsgLMyuocR/KYH1KGoxlrf5xKkGNt2Ma8MGUmLRV5y4V8Fso757yeSOf86+6IRCiPgrRnkCaegQz7gDa4B2wEfAAAqSnyCCyEQmhrxRi+G7fbh1Eq6XhEJ3zZ3C/KinUAqqjYdSA2HP97CpL2ipiyuEw9Fav/AL+CfK91cBp4jvr0Xn9XH1VapUKR0rTp7Uuk0brc34t9Zp0mj9zTda37sXO+dPIH78UWsnJ60Ldz+vXcKLabSL/kwvffygG6e0npZL6+9TaH02IMoTN7XW5bXWNq31IvsFLUQiBezWdvw8TpB54tIlrYcNMznhUX7In1/rqVMlP7y0+1rrJlpr9Ld6lHYKb6PRaPexn+lDh8O1Pr9D6+9Tmvxx8w+rgxUiUYhveQIzADU9yr87AP974piSwPKI77cCpZ9xrp6YGtG7s2fPHts/WvGYMK31QG1+9K211vf13/pvXUqX0jZt06PPzNMZM2qdNofWpW5qrbTWU588RXi41lsHaz0Wrf16aR0eZt9LECKRim6eiO5KpXNKqZwR3x8DGiulKmG6OMR/uXPDggWwe7epTn39ulmxVKCAWcEkM9MA9OgBy5bBybmZyNpwG67hbzKcVrzN1MiDUuUCr0BwzwLLa8Mf/hFPpAR8gbeAtsACu8cvhIhTCS9PZMgAI0aYrdETJpj2mMeOQa9ekDMnfPmlrGyNtqTAEqAtg/iEKSoHTroTt98bTmn/oey/WgZaboT712BxVbj5h7XhCiHiQkzzhOM0/hEvwQn4FrN4bBHQAA9c2cxmKlOZodna03PfRFIqOJoXyl6FXpj/kP9SCip/DWWGwr6p4N9LViwJ4UCiO6g0BigU8f0IYB5mo+zwuAjKMqVKwcaNsH49FC9ubiQ6dYI33jCjKXfvWh2h5Zo2NWWorgamIXVZf1KE1WEqb9OML9GP8nqKLOC1DdLkg5UN4MSjsigpMJ3gKmMml+ZZcg1CiDiRcPOEuzv07w8nTpgJiJIl4dIl+PhjyJYNBg6UmnzR4gLMAXrQg6+Yq1KhdC/u9vuaMsED+O1CaWix0TR+WFwFbpyyOmAhROyKaZ5wnMY/YDpKi2hSwGBgNrAFqEpK7rGOdTSmMSM8+tPs9+FkTa/ZkwsqXIBBmF+SKKOGUPELKPcJ7J8OG7pBuEz8C+EIlFnV9JIvMkVUk2itb8d+SNFXunRpvXt37NdoBczqpHnzYNgwOBtR48HNzXSLa9YMGjSA1Knj5r3jgb17zY/igVsItqOduZpkAZUYxDbGoh5NJN27Bt514dKvUG8eFGwd8eo7mHJ8W4GZQCcrLkGIBE0p9avW2rKuNwk6T2gNmzbBmDHgH7Ea02aDFi1MLabKlc0fv+IZNOZ2YTyr6UaN8CUCAAAgAElEQVRTnYxwNZEkM3uxrcgkyuXYC8tqgkty0wQidW6L4xUiYYpveUIp5YxZ4VQD+AvT4KGt1vrgM47fCryvX9DQ4ZXyxPXrULgweHmZjjZ58rzc6xO1dZjqSZkAP0LJTg96MItZdL/bj18rj2ffAScqHIaAXPA+ZjTysay6fTj8/DkU7gC1Z4KTzf6XIUQiEN088cyVSkopp2d9AaHA3YjvEyabzaxSOnYMxo+HsmXh3j1TqLVDB/DwgNq1YepUuHjR6mjtrkQJ2L4d0tlcuPPaXDLf6Usg31GcroQQMXPjlhZa+EOWCuDTFg7MjHh1cmAtpvlHF2CGJdcghIiZRJsnlAJPT/Dzg99+g7ZtzeOLF0PVqlC0KEycCDdvWhqm41LAd8AwGvETfuoKznowD7tMpdLxrgSceB1aboKQO6Z4942TFscrhHhVsZkntCM1dPDxMX//T5gA+fJBo0awebN0J4uWepju0NeB8jizn5/4iYEMZHqyieTb3olyb4UQkBeqHYSxQB/gsc1u5T+D8iPg0Fzw7QThsmpMCCs9c6WSUiqcKCsOn3YIoLXWlg0Nx+lKpac5dw5WrgRvbwgIiKy1pBRUqGD2hjVtCrly2S8mi126ZFYs7TukyXF+OKfTDScHTTjIQpLjag4KuQurmsKfflD9f1DynYhX38M07fAHpgHdLbkGIRIie8xAS56I4swZmDYNpk+PnGhIlswMOPXubbZRi6f4GhjCdhpTjWI8ZBS2pa3xzTAHz0KHYGkNcHaDVlsgTbS6jgshoknyhPHKeWLPHvj+e7Mt+mFEWaiiRU1n6XbtzA4H8RyHgdrADWAlmmqMZjQf8zF1wxoQ2mwJ/qvdqLYTtpQxhTNmAM5RT7FzNAR9BAVaQ7254OT8lPcRQryq6OaJ5w0q5YjOG2mt/3zJ2GKN3QeVorpyBdasMQNMfn6RyQTMMp4uXaBnT9OSOoG7eROaNIGtW6HAH99zNMe7eFCNg6zEg5TmoNAHsNYLTq6CSl9DmQ8iXn0PaIqZdJqKacYhhIgpO90sSJ54UkgIrFoFkyebWetH3nzTDC55eZnBJhHFD0Bf9lCLClTkHp/itKopq5Mton6JI2ZgyZY0YmApn9XBCpFgSJ4wYpwn/v7b7FyYNClyUiFdOnMf0KcPZM0aO4EmSOeAOsBxYC7QislM5h3eoWJ4JdJ1Wc3KOamo5g9bPKEZptVP0qin+GUMBH4I+VuZchs2F/tfhhAJVIwHlZ5xUifgNeCS1taX3Ld0UCmqW7dMcW9vb7Mc9nbE1vBs2eDzz6FjR3BO2CPn9++bCfkVK6DInnkcLNGZFJTgd9aTi4iuGmEhsL4jHF0E5T6F8p9H1B25DzTH7LGeBPS26jKESDCsqpUheSKKo0dhyhSYNQtu3DCPpU4NnTubDnIFC1oTl0OahaYbR6lAGepziyGodfXxdlpGk1LHYWl1sCWBllsgbX6rgxUiQZA8YcRannj4EJYsMVviHp3P2dnU23v3XShXLubvkSBdBxoBwcBE4B0WsYgOdKCYLkbBgb4snJCBysshoJkZgloOPDY9s/tb2PY+5GsO9RfKwJIQsSTGNZWeOFlKpdQczN3/X8A9pdRspVSqGMaZMKRIAa1awaJFcPkyLF1qlr+ePQvdukGxYrB8eYLeZ+3qavJo9+5wsGR7ivqv5JY+SGEqsY+IQuc2FzODULQr7BgBAR9E/ExcAW+gAWbX9A+WXYcQ4tVInniKAgVg3Dg4fx5mzoQyZczg0vjxUKgQ1KhhJiOkgxDQGcVCCvAze1lOGsai6/nQzKkxi3fkMauUwkJMjaVrR60OVgjxChJ8nkiSBNq3h19+geBgc2+gtbk/eOstU5911qzIyWcRIQ3gh2ni0xcYRmu8WM1qjqgj/DquEt1G/ElAcyg/AzZoU5XpVtRTlH4Pqn4Hx5ebnRFhD5/yPkKIuBLdAqrfY6orFwXcgGKYAeLv4yiu+MvV1cxI7N0Lc+ea+kpHjpjHypQxnYIS6OCSszP8+CN89BEcqNWAwvM3cF9foBQV2MYRc5CTDWpNgxLvwO6xsKkv6HDMQtZlmJmKvsivlhDxjuSJZ3FzM6uTdu6EX3+FHj3MFrjNm6F5c8iZE0aOTJRNHx7XCsUKcrGP/cwmA+PQNf1pnaI+cwNymoElHQZLqsHVI1YHK4R4eYkjTygF5cub5g2nT8OQIZA2rRls6tIFMmY0OWHbNgi3fKGWg3DDrD/qBowCelGXmvjjzyV1iQ2fVOSd/x1mezd4czwEa/AErkU9RamBUG0CnFgBa1qa0htCCLuI1vY3pdRFILfW+m6Ux9yBk1rr1+IwvudymO1vz/PwoSncGvWGoVo1GD3azFgkUOPHw8CBkP+bPRx/rw5KhbMUX5pRyhygNQR8CLu/gSJdzECTkw14CLQGVmC6Aw207BqEiM/sva1B8sRLunkTZs82NTiORqy8cXExg0zvvGOaPyj1/HMkWJvQNOIyWXmDd/hLD4Ttb/HjuXX08DwHS6qbn03LzZCukNXBChFvSZ4w7JIn7t41K5ZmzoSgoMjHc+Uy3aY7dTITDImeBoYBXwBNgAX8zjFqU5tQQum4wJdx7UpT4lM49DkUVGaN02O/PHt+gM19IXd9aLgcnJP+922EENESq9vfMMtUPZ54LD0gQ8AvkiSJKdJ34oQZSEqdGrZsMfuqmzSBgwetjjBODBhgFmqdGlqSXO8FgU5OC6oxja3mAKWg8tfw1mdwcCasa2+2NpAEWIypsTQI00hUCBEPSJ54GalSQf/+cPiwWcHapInpKLpoEVSqZBo+/Pgj3LljdaQWqIHCDw8usp9x5FIT4K2d9MxVk//5ZopYsaQjViwdsjpYIUT0Jd48kSwZdO0KgYFw/Dh88ompvXr6tKm/misXVK8Oc+Yk0s/9RxRmpdL3wCqgNq+TgyCCSEEKprWtxmCfLewbBfkGwgkNVTDlvv9V8h3wnAynfGB1Uwi9b8F1CJG4RHdQaTrgr5R6WylVVyn1NqZd149xF1oCkzy5Wf566hQMHWqSy6pVpt5Sx45w7JjVEca69u3NJV6Yko+MbYOx6Wz0pA5fssocoJQp1l3pK1O8e22riKWqLsBCoCUwGNNyWgjh4CRPvAqlwNPTdDk4fdrsH/bwgH37TDHvLFnMKH0CzBHPVwHFJtLwD3v5igJO46H0HvoVrsG3azJAq63mZ7ekGlxJmJMzQiRAkicA8uY1OxhOnzaTCm3bmvIZW7aYFUsZM5qarIGBCbZkxov1w9wL7AAqk5dkBBFEDnLwfb26vBe4kmOTIEs3OB8OlYBTUV/++ttQ80c4vR5WNYGQe1ZchBCJRnS3vymgC9AWyAycx/w/fYZ+mfZxscxhtzVEx8WLMGqUmYkOCTGPVatmam00bWqSSwLx889Qvz7YSl7lln89Hjj9yrv8xHg6RR7020TY0h9y1oFGy8ElGRAKtMesXBoFfGxJ/ELERxZsa5A8EVsePIBly8zWuO3bIx+vUQPefhsaNzZb5RKF/UBN7hJOOT5nf/ggOJifL3Zu5KNm182gUngotNoM6YtaHawQ8YrkCcMh8sTNm6bjzcyZ5g/nR/LnN/cGnTqZCYdEZyPQFEgHbOAaHtSjHrvYxbv7fmJy2c5krAc3lkAymzn6sU3R+2eAX3fI4QmNV0bcXwghoiu6eSK6g0o2rXVYrEQWixwiCcTUqVPw5ZewYAHcixhFT5vWrF7q0QMKF7Y2vlhy8CDUqgW30t4mbE8T7jpvoj3jmcu7kQftmw7+PSFbFWiyBpK4YwaWOgPzgc+BTzFLY4UQz2PBzYLkibiwZ48ZXJo/PzJHPJrF7tEDcuSwNj67OAbU4D53qMRIdocPhmM5GLZ1EyNa3Yal1Uynn5abwKO41cEKEW9InjAcLk8cPWq6xM2ZY7qHgplIaNIEevY02+ScorvZJCH4FagLhAPruE1hmtKUjWykz8lvmVtyEO7lIGQdKGdTY6lE1JcfnA2+XSB7NWiyGlySW3ERQsRLsV1T6aJSapJSqkIM4xJPyp3bFPK+cMHcOJQoAdeumUrXRYpAxYqmmOvduy8+lwMrUsRMuGd66E5oTh9SPmjKPAZQj8/RRAxsFu8O9ebCuUBYVgvu3wCcgdlAJ8yg0jAgsS4FFsKhSZ6ICyVLwrRp5sbi++/NRMPFi/DFF6YGR4MGsHatqceUYOUHAnElNcEMpaL6GpXvHCM9q/DBfDezFc7malYtXdpjdbBCiGeTPBEdBQqYOqx//mnqSDRoYD7jly6FmjUhXz7z/IULVkdqJ6WAYCAlUB13glnLWlrQgkl53qPlkU948JtGVQHbQ6iG2TT3ryKdoO4cOLsVlteFh7csuAYhErboDirVAm4DC5VSfyilRiulisVhXIlPqlTQuzf89hvs2mVmItzdITjYtB3NnNl0BNq71+pIX1mOHKbhRbGMSbnlsYS0dzqznuGU513CiGipWqgdNFgMl3bD0hpw7ypgA2YA3THdIIYgA0tCOBzJE3EpdWro1w8OHDB1Ntq1MzPXPj7QsKEZYBo5MnJWO8HJCQSShCxsUR9S02kUKtfffNOoMu/Odka32gYu7rC0OlzcZXWwQoinkzzxMpydoVEjWLPGDDANH26Ke586ZervZcsGzZrB+vUJfGIBIB+wHcgLNCApy1jEIrrTnRmZv6DmsT7Y/gzj3puQ4j54wqPWQEbh9lBvAZzfHmXiWggRW6K1/e2xFyhVBWgDNAMuaq0tW2vucMtVY9vt27B4sam79MsvkY+XLm0Gndq0MQNP8cytW2YF7+aAcF67+D6X0o2jEB3Yw08kJaJOyCkfWN0c0uSDFhsh+WuYZa99gcnAQOBbZCucEE9n720NT7y35Al7uHLFbJGYOtV0GAWw2UzNpV69TAHwBLdF4m+gJmEcobkezerwUegL7vRatJnJPVxQS6uZyYjmGyBzOauDFcKhSZ4w4lWeCAsDPz9zb7BmTeRgUvbsZlt0587m+wTrJtAEM2T0HZoBDGUoX/M19W97cbDUHC6HJ8FjL1xMDt6YjXP/Or4C1nqZrdLN/cAtrQXXIET8Edvb36I6ChwGzmKmDkVccXc3CWLnTvj9d+jb16xo2r3bDCplzmxWN8Wz1UspUpjJ9WaNnLjk8S1ZzoziMHPJRwtuE9H2M3d9aOoDN07B4spw6xzm1/UHTEeIccC7yIolIRyS5Al7SJ8e3n/f1N/w94cWLUxHNG9vqF3bdBj68ssEtkUiA7AFG6/jrT6kjdOHqEz3mNq+Mp2m3DYrlpJlgOW14FyQ1cEKIZ5N8sSrsNmgbl3TMfTsWfMZnysXnDkDn30GOXOaLXILFsT70hlPlwpYDzQHBqEYwleMZgxj8HFfTM7fG5M15R0uFIAsN6ExsDzqy/M1hcYr4MoBU4/v7mUrLkKIBCdag0pKqdRKqW5KqU3ASaAqps97hjiMTURVvDhMnGhuDmbPhgoVzJKfKVNMzY2yZeGnn+DOHasjjRZXV9Pkons3xV85Pibb/v9xltXkoi5/8485KEcNM9t85wIsqgQ3TmJWJk0ABgETgT7waOucEMIykics5ORkViUtXWpuLEaNMjPVp0/Dxx+bLRJNmsC6dQlki0RaYCNOvMU89RHdnAagPGBujyp4TblEeMttkDwzeNcxNTSEEA5B8kQsy5QJhg41K1X9/cHLC5IkgY0bzRbpTJnMJPTPP4N1zfXigCumM3RvYAzQmcEMYDrTCXD1I+WOWhTIe50/ckOuK9AKmBv15bnrm4Ld14/DkqpwOyFNvAhhjeh2f7uL2ci6EFimtb4Z14FFR7xarhoXDhwwy1/nzDGtSAFSpoT27U0Sef11a+OLBq3NtvCvvoLsW+dzpkonUlCSfawnJ+nNQRd3w/La4OxqtsKlK4RZoTQU87dId2Aqr7bwToiEyYKuPpInHElYmLmx+PFHWL0aQkPN49myQdeu5iveb5G4i9kG4c9A/RkTwmej716l/uS1rO6dH6flNeDmaXPzkMPT6mCFcDiSJ4wElSeuX4dFi8zW6KilMwoUMFvjOnSALFmsii6WaWAUpjN0XWAp3mygDW3IG1YA9+Yb2LUpEwWPwOEspnjG21FffnYrrGgA7pmh5WZIkdX+lyCEg4tunojuoFImrbXDDeMmqCQQE3fvmhnqqVPNbMQjZcuauhpeXpAsmXXxRcO335pdHNmWruVs85a4qpz8gj/FiPiAv3IAlnqCDjN7oF8riUkmn2ISSifgJ0xRbyGEBTcLkicc1cWLZoXrtGlw8qR5TCmoUwd69DCdhVxcrI3xld0HvIDVjNQf8Hn4KsJD/qTGpOVs6PEmNu8acOM4NFoBuepYHawQDkXyhJFg88TBg+azf+5ckwfArGytWRO6dDH191xdrY0xVvyIWbX0JuDDRvbQhCZkCH+NbN38CViYm8KH4FBu+Ar4MOpL/woG77rglt4MLKXKaUH8QjiuWK2p5IgJQESRLBl06gTbt8O+fZG1l3buNLPRmTPDwIFmK4SDeu89mDkTzrduQJbJvtzXf1GKCgRy3ByQvii0DgRnN7MH+vzPmK1wI4HhwGzMwFKoVZcgRKImecKBZcwIH34Ix47Bpk3QurUZRFq/3nQOyp7dLBl14BzxbK7AMqA9w9QY/meris2lIJv6NabKjK2ENtsCaQvBqsZwcq3VwQqRqEmesLMiRWDMGFN7ae1aU3fPZoMNG0weyJQJ+vQxXafj9fa4npjKSXuBiniSn81s5qbTDY7NqIBnn/0cKgBF95v+0UOJUpE1SwWzC+L+dVhcBa6fsOgahIjfZL9QQlOsmKm9dP48zJgB5cqZrXHjx5uirc2bQ1CQQyaPzp1h+XK4MqgKr322lRB9l6pUZDURhcjT5DMDS24esKwmnNkS8cpPgS+A+UB7IMSK8IUQwrE5OUH16rBwIfz1F3z3HRQsaGawR4+GPHmgXr3HOwrFCy6YiYX+9GYqC2wFSeJUhuC+rSk3dxUPG2+C9MVhdTM4vtLqYIUQwr6cnaF+fbOr4cIFc59QqhTcuAGTJ0OZMqZ263ffwd9/Wx3tK2oC+AEXgLcoQzICCcSmbOz+tjL1Pt/OgRJQLNisVnqsImumMmaVUsgdWFIFrh6x6BqEiL9kUCmhSpbMLG39+Wf47Tfo2NHMTnh7Q6VKJoEsWAAhjjUA07ixmUC5N+EN0vUOROukNKEqswg0B6TMAV4BkDInrKgHp9ZFvPIjTLG+xZgOtQ+tCF8IIeKH9OnNCtZDhyAw0NTZSJLErF5q1Mh0Exo1KnLLhMNzAsYDI2jFItbaUpNMVefXPt14Y8lsHjTaCK+VgrUt4dgyq4MVQghrpEtndjTs3m12NwwcaPLBgQNm20CWLNC0qanF52D3CC9WGQjErEOqRGGuEkwwHsqDLR950vgHX/ZXhGI+MAXoQJRp6NdKgtdWCA81A0tXDlhzCULEUzKolBiULGn2VP/5p+kElC6dSSbt2pkbh6++gmvXrI7yX1WqwNatYFtRkBTNgnDSGelCLb7BxxzgnglabYW0hWFVkyg3CIMxNxXLgUZA/OiEJ4QQllEKKlY0DR/OnYNvvjErls6ehWHDTGHvVq1gyxaHXOH6OAUMA37Ak/VscbpPqvBGHOw+kKKrxnO3vi9kLAtrW8PhhVYHK4QQ1ipWzKxO+usvM+ncsKH5nF+50szyZstmCp4ePGh1pC+hGKYWfAagFjnYSyCBFFAF8OnVkGbL57O/ARSZBwuAZsC9Ry9NXxRabQNlg8VV4dIeay5BiHjomYW6lVJdo3MCrfWMWI3oJSTYwnpx7e5dmDfPbIk7fNg89qgu04ABkD+/tfFFOH4catWCS5kvExpQlxDbXt5jFmNpbw54cBO868OFn6H2DCjSKeKVP2H2V5cD1gJpLIlfCCvZowCr5IkEKjzcdI6bMsXMVj/aClegALz9tskVaRz9c3Uhmo4cpijVwgvwt9Nisi0byCHP4bhvaAjnAsBzMrzey+pAhbCM5AlD8kQUFy+awt4zZ0beI4DZ4dCli6nFlDq1dfFF2xWgPrAbmMpNWtKUpmxhC40DxrKqynsUHA9H+0NlBauBlI9eev0ELK0OD/+BZush81tWXYQQlotx9zel1Jao/wQqABeBs0A24DUgWGtdLebhvhpJAjEUHg5+fjBunPnfR+rXNzcOtWtb3hHo/HkzsHQ05BbOBxpz32ULHZjAHPqbA0LuwMomcGYj1PgBSvSJeOUyoC1QCNgAZLQkfiGsYqebBckTCd25czB9uukcd/68eczNzdxY9OxpuowqZW2Mz+SLphlnyEpVXZ4/1Gwyru3G4QrjSL21DZzygYpfQJmhDnwNQsQdyROG5Imn0No0/Jk5ExYtgn/+MY+7upqC3127mq0FTo686eUO0ALwBUbygPfpSCeWsITaBwbhV+Ib8n7sxOnPoYSC9UD6Ry/950/TdfrOBWi8EnJ4WnQNQlgrxoNKT5xsInBSaz0+ymPvAnm01v1jFGkMSBKIRQcPmpVLc+fCgwfmMQ8PaNPG1NooVcqyP7qvXTPjXDtO3cf9z7bcdl1BHYaxjuEoFITeh7VecHI1VPoaynwQ8Up/oCmQKeL7nJbEL4QVLGgVLXkiIQsJMd2DJk8Gf//Ix4sWhe7dTZ5Im9a6+J5pO5r6XCEZVXVDDqmppNvYisOlZuDxcy84PB9KDYIq34By5JsjIWKf5AlD8sQL3L0LK1aYBkCbN0c+niuXWb3UqZPpIuqQQoBuwFzgHcIZxwDeYyITqXSmLTsKzyRLlyRc/B5yK1PqO8ujl965aBoDXT8GDZZA3sZWXYQQlontQaXrQHqtdViUx2zAFa21ZWvgJQnEgcuX4aefzODSoUORjxcqZG4a2rWzJHHcvm06X/sHh5L6/NvcSPUTpenNz0zEGRuEhcD6jnB0EZQbBuWHRwyC7QDqAW6YgaXCdo9dCCtYcLMgeSKxOH7crFyaNcvkDICkSc2HdPfuULWqg81e7yec2tzmATVpxy9MJGVwXfYXWEz2fR/Dnolm+3St6eDkbHWwQtiN5AlD8sRLOH3afPbPnGlq74H5e7tmTejWzdRiSprU0hD/KxwYAnwDNESzgK+YyEd8xBtXa3K48HLSeKbg5lzwcIKNQJ5HL713DbzrwqVfoc4sKNzeomsQwhrRzRPR/avvIqbycVQNgfjad1I8i4cHDBliukDs3g3vvmseO3wYPvoIcuY0LalnzYJbt+wWlru76XLdop4zN9JNw+Pih+xmMoVpx30egs0F6s2Dot1gx0jY3A/CwzB1lbZhEkolYJfdYhYikZE8kVjkywdjxpitccuWQZ068PAhLFwINWqYunyjR5vW1Q6hGE4E4U4atjIDTwbwz1sbKHixOvsKfwTlR8DB2bC6OYTce/HphBCvSvJEfJcrFwwfbgaX/PzMVmgXF/O9lxdkzgz9+8PevVZHGoUTpkP0D4APiioMpTMzmcnv6TaT/WRVbu+6RPKGcCMUKgL/9n5zSwstN0LWyrC+A+ydZNVFCOHQortSqSamSM1BzB7o7JglHy211n7Pe21ckpkFOwkJMcli7lzTEeLR9jg3N2jSBDp2NDMUNluchxIWBr17w7TpkPHIN1zM/wGZqM0hlpOa5GYPeOAQ2DUG8reCunPAOSlwCvAELmPK8Vm2dV8Iu7BgBlryRGL2559m5nrGjMjZa5vN7F3u0cMMPDlbvQroIuHUIZxDtGEgy0Im4nwhExuc1lP9ykbY1NfcODRZBUlTWRyrEHFP8oQheSKGrl2DBQvM5/+eKB3TSpY02+PatjWdpx2CD+AFpAV8WMdZWtKStA8ycq/KBsJseXHZAqFJTI2lso9eFnof1rSCU2ug4mgoO8SqCxDCrmJ1+1vECdMDdYHMwAXAR2t9NUZRxpAkAQvcvGlmpufMgYCAyMfz5oWBA6FzZ9NJLg5pbRZNffUVZNkxg7/K9iA1ZdiPD1mJqOmxaywEDIbsNaDxCkiSAjgP1AaOA4uAJnEapxBWsvfNQsR7Sp5I7MLCzCTE9Ommc1xoqHk8c2ZT2LtnT8iUycIAb6BpiCaYvrzH5HuzcLqnmffPGtrc+wN8O0L6YtDcF5JlsDBOIeKe5AlD8kQs2rPHTDDMmwfXr5vHkiQxk9BduthtEvr59mA6w90BlrGTlNSnPoTacGmyjttnSpFyJ9xwMyOgdR+9LCwEfDvBkYVQZghU/FKaPIgEL9YHlSJOmg3IorXeEZPgYoskAYv98QfMn29qMJ0+bR5Lmxb69IG+feG11+L07b/5Bj74ALKuXsm5Bq1JpvKyiw0UflRi7+Ac2NAVMpSAZusibhCuYWos7QJmAJ3iNEYhrGLFzULE+0qeEMbFi2YCYvp0U4cJzGqlFi1Mjihf3qI/yO+haY1iNSPpw2e3fdG284w9v5D3nFxhdTNIkRWa+0GqnBbEJ4R9SJ4wJE/Egfv3YdUqM8Dk52dmhAGyZjU7HLp0MRPSljmLGVg6DEzhKBWpTW2uhF8lbQ9v/t5Uk6y/wx+pYBrQ5dHLwsNgUx/Y9yO83gdqTJQmDyJBi9WaSkqp7EqpYOAIpn4ZSqkWSqnpLxFQHaXUUaXUCaXUf9YMKqXeVkrtV0rtVUoFKaWkorKjy5kTPv7Y3CwsXWpaS1+7BqNGmWLe3bs/Xuw7lg0eHNHlukkTsv60nrv6DCWoyHYibl6KdDTbGK4egkUV4eZpzHLXjUB1oDMwIc7iEyIxiY08IRKYjBnNyP/Ro7BxIzRtCuHhpj11xYrwxhtmUuLuXTsH5oZiOdCdYUxiqnt5nB8W4/1czRgUfhJabIS7l03euBp3OUyIxEbyRCLi6mpqLPn6mu3Ro0ZBnjymFt+XX5rafFWqmBqtt29bEGA2IAioAXSnALPZThC5nXJxYXo9MrWfzx85odAF6AqMAjSAkw08p+/DuQIAACAASURBVEDpwfD7JFjfCcJDLYhfCMcS3aHVqZhNqCkwvRnBtNKqGZ0XR3R2+AGzgrAw0OYpg0YLtNbFtNYlMNXUvotmbMJqNpuZef75ZwgKMjcOISHmZqFIEVNTY/PmyFmKWNS9OyxZAn+/U42MX2whRN+mMhXx5hdzQO765gbh3hVYWAEu7wfcgbVAc2AA8BkRqUII8epilCdAJh8SLKVMAW9vb7OqdehQSJ/eFHLt3t3MXA8eDKdO2TEoZ+BH4BN6MI9lKTOQ/GZdxuXpS6uHqwj32go6HBZVggs77RiXEAlajPOEiIeyZYuchN62LbJURkCAWbGUKZPpHBccHCf3Cs+WElgD9ARGk5n3CcCPCqoCf4xqT87RYzmQQ1PqMAwD+gBhYHJa5a+hwig4PA9WtzA1l4RIzLTWL/wCrgJOEd9fi/L4jWi+/i1gQ5R/DwWGPuf4NsD6F523VKlSWjioY8e07t1bazc3rU2K0LpkSa3nzdP64cNYfzt/f62TJ9fao88R7RSeS6Pd9DfaO/KAywe0npJF64mptT4bGPFgiNa6qza/Tn201qGxHpcQVgF262h8PsfWVyzkCRtwEsgNJAF+Bwo/cUzKKN83AnxfdF7JEw7q3j2t58zRukyZyByhlNb162u9fr3WYWF2DOYHHa6VDg5/S6e/3kWj0RX/bKPvXz+k9fQ8Wk9IrvXpDXaMRwj7iG95Iq6+JE9Y4J9/tP7pJ60rVIjMAaB1oUJaf/ed1leu2DGYcK3119r8OlTQ9/RZ3UK30Gh07vW9NbYQXW6zebax1vpO1Jf++r3WY9F6SQ2tH9yyY8xC2Ed080R0VypdAh7b+BoxQ3wmmq/Pgtm8+si5iMceo5R6Ryl1ErNSqX80zy0cUb58MGkSnDkDI0aAh4cp3te+PeTODSNHwl9/xdrbeXrCpk0QtqgAqRvtwCX8dQbTnF58h0ZD+iLQJhiSvQbLa8KJ1ZiZ6unAh8AkoC3wINZiEiKRiWmeKAOc0Fqf0lo/xFTTbxz1AK31P1H+mRxZYhh/ubpChw6wcyf88gt06mSKufr4QN26UKAAfPstXLlih2D6oFhCOfUr21LtJOfNIQRlX0iJe3242doHUueFFQ3g6FI7xCJEghbTPCESihQpoGtXs8PhyBEYMsRsmT58GAYNMs0d2raFrVvtsHpJAR8AS4DduFKNxYxiMIM5VWcy2X5vyI7G/1BuPqzWppf0v5Xl3+gHdWbB2S2wrCbcuxbHsQrhmKI7qDQWWKuU6gI4K6XaAIuBr6P5+qdV4vzPJ4TW+getdR7MXf4nTz2RUj2VUruVUrsvX74czbcXlkmfHoYNM4NL06ZBwYJmP/Wnn5q6Sw0bPt4hKAbKloXAQEi2NwMur28meUgzfuQ9atKXUEIhZQ5oHWQ6+6xuBgdmYn41v8L8ii/BFO27FeNYhEiEYponYm3yQfJEPPPmm6auxtmzMHq0yQ0nTsD770OWLGYyIigojm8sWuDEBvKrcwSlnEfxO19xJH0wBe8351yreZCpLKz1gt+nxGEMQiR4Mc0Tsk06ISpQwHz2nzkDK1aYiYWQ/7N339FRF9ECx7+z6aTQe2ihhd6bARWRDqEISJeioIIIClJUmnSV3nvT0HuTKr0jPfQWek9IIWV33h8TnzyfaIAkG8L9nJMj2fz2tzPnCDe/mbn3RkNAAFSubH7+449w924CD6QxsBV4jAU/RuDPVKZys+AmMpyvyL7e1yg1HI5o8AOu/Pm2Qh9B3cVw9wgsegdCbyXwOIVIguJynMmcfKI+sA44BWwA6r/Ae180/c0CBP/XfeW46mvIatX6t9+0btRIa0fHv467Zsmi9XffaX358it/RFCQ1oULa+2QzqrTPumh0egCurYO0bHHUiOfaL24qjmuun+Y1jZb7DvnaK0dtNaltNZ3X3kcQtgTiZzWoF89TjQGpj/zfStg3L9c3xyY81/3lTjxGoqO1nrlSq1r1TIpcX/GiUKFtB47VutHjxLww4/qKJ1JP7Sl1m9HjNE88dKewVn0sch9Wi+rY+LG3h+eiRtCvL5ewzghadJviitXtO7bV+usWf+KAU5OWjdubGpeJGiK9AWtdT6ttbPW+he9UW/UXtpLe4Vm0pQ+qIt8rnVKq9aZtNZ//J8xbzbp0tN8tH50MQHHJ0TiiWuciHMPRK31Cq11La11Ia11Da31ijivXJn+7XmVUrmUUs5AU2DVsxcopfI+821t+LOFl0hWLBaoVs10i7txA0aMMKlyN2+azhA+PlC9OixZAlFRL/UR3t7mxFLFQhYepB1B1tuTCWQDualEEDfA2QMarIH8TWFnL9j+tSnISmtgJXAaqMgzexBCiDh4xThxHdOO5U/ewM1/uX4B5uFEJDeOjuDvb1LhLl2CPn0gY0Y4dQq6dDFpEe3ambQ5Hd+nl4rhxF7cVXp+c+lJPZcfeBKpKGOryuY6naFgK9j9PfzeLTZuCCFexCvGCUmTflPkyAEDBsCVK7B6tclssFrN80PVqubZYehQuJUQp4JyA3sxZyJaUJU97GE3qd1dcN77NqfvrCB9Q1BWeBvY8r9jrgKNt0LkY9M99N6JBBibEEmT0s/5hUwp1UprPS/2z+2edwOt9cw4fZBStYDRmF2GmVrrwUqpgZjVr1VKqTGYNNVo4BHQWWt96t/uWbp0aX3o0KG4fLxIyrQ23SCmTYOlSyEytq5RhgymQ8THH5vg8YKePoXWrWHxEvA5uoFLRRvjRkq2s5YyFDMPBNu6wh/jwLcZVJ8Fji7AbqAOkAL4DSgcb1MVIrEopQ5rrUsn8GfEW5xQSjkC5zD9fW9gNiOaPxsHlFJ5tdbnY/9cF+j3X3OUOJFMREWZVOnJk00BvT+VKAEdO5raG56e8fiB94iiDg76EJ/ahjH9+lwsWc8yS8+k9a7DcGQ0FGgJ1WeCg1M8fq4Qiec1jBONgBpa64//vDdQTmvd+W/XdQK+wpxmeu/PuPG3azpg2n6RPXv2UlevXo37pIR9XL8Os2bB9OkmVQ5MB+q6daFDB7Np7eAQjx8YhflfZA7QgjsMwZ/GHNQHcfr2R9Jt/QqP7YrLLjALaPHn2+6fgqXVIDocGq6DLBXicUxCJK64xol/W1Rap7WuFfvnbc95v9Zav/fyw3w18rCQDD18CPPmmQWmU8+sKdaoAV27moCh/qlE1z+z2Uy9vzFjIM/qY1yoUxsHglnIIj6gplnQOjAcdvWG7O+B/3Jw8QJOAtWBcGANJntaiNdHIj0sxGuckM0HESfnz8PUqebh4kFsudQ/i7526WJOvMaLUKJojDMb+E5/y+DzuyDfdgZFDqfP0WjUru/ApzbUWQROKeLpM4VIPK9bnFBKNQaq/21RqazW+ovnXN889vqP/u2+EideM1YrbNxonhVWrTLfg6nH166d+cqW7d/vEWcaGAp8C1QknF/4iK9ZwhJc53TEbfB4fA45ctjLFAbrQWwh4eArpnB36E2otxxyVoun8QiRuF55Uel1IEEgGdMa9u0zAWPBAoiIMK8XKABffmm6BqWI2y/xWpsmQj16gM+YG1z6og6oEwxnAt/Q0Vx0ai5sbA9pC5ldBY8smPS3apisnMWYrEwhXg+J8bDwOpA4kYw9fWpOt06ebAp5g0mxrlcPunWDihVfaBPin0UTzcc4MZcptKfT2RCs+RfzccQXTDlXAMvmTpDlLZNS7ZrqlackRGJ63eKEUqoC0F9rXT32+94AWuuhz7neAjzSWqf8t/tKnHiN3bplGj1Mn27SpcHEgZo14ZNPoHZtk1L9yhZhymRkxcZqvmUewxiG6/bqWD5aRNl9XvyeCT4HxmD6SxN2B5ZWhwenodYvkL9xPIxDiMQV1zjx3JpKSilLXL7id9hCxFIKKlSAmTP/6giUNatpNfrpp2YHondvcxQ2Drfq3h1++QWCumfF+8sdWHR1evIp7emBDRsUam0eCh5fgIC34MEZICewCyiISdmfm6BTFuJ1I3FC2JWrK7RoYYro/fGHyXd2cDDdg95+23SV++WXl67PZzjhxGxs9KYjM1ieP5wUF7ow3W0cdfJsIarOPLh9QDr+CPEc8RwnpEar+L8yZzbPA+fPw6ZN0KSJiQNr10L9+ub00nffweXLr/hBTYDfgVAs+DGUqkxnOjFvb8G6yY+dla5SNxAmAg2AMAD3jNDkd8hU1nQPPT7tFccgRNL1b/+Ix2BSDJ739efPhUhYadNCr14mIAQEQNmyJk1u2DDImROaNYP9+//zNs2bw/r1EDzbk7T1VuJi+5yZ/MT7NCGccMhZHT7cDjERsMAPbuwBMgDbgHeBj4CfE3KmQrxuJE6IpKF4cZgzB65eNQ8Q6dLB4cPQsiXkygVDhvyVKvfCFBaGoJlIbdazLfdeMgQNZL3bMsp7TyakQQA8vmgKsz6+GK/TEiIZiLc4obWOATpjCl4GAou01qeUUgOVUv6xl3VWSp1SSh3F1FX619Q3kUxYLPD++7BwoWkE9NNPkC+fOck0eDDkzm0aAa1c+Ve63AsrD+wHsgDVaY9ig9qAS54gLPvKsbr1QRpshnXaPDXcAXOCtdFGyFUDNnUwJTeESIb+raZSjrjcQGttt8p2clz1DbZvH4webbrE/Rkcypc3dZcaNgSn5xdOPXrUnIoNzaJR+0bxxKk7uSnFDlaShSzw+JI5rhp6HWovhDz+QCSmw/li4BtgGLFZ00IkSYlUK0PihEiaIiJg/nwTJ06fNq+5uZnTTF27gq/vS954FTE05ZrOTI2HXTjv+Q05YnKzN3gImZe1B4sTfPAbZCgWb1MRIqFInDAkTiRTWpuTrFOnmueFPxsBZc9ush4+/hjSp3+JGwcDjYFNQE9O05JatroExdzG9tFMar3TjN87QgYF6wFfAGsUbGgDZwKgdA94e3g8pGcLkfASpKaSUkoB6YD7OgkUY5IgIAgKggkTTMB49Mi85u0NX3xhOkGk+ucaF1eumNrfl8Ig3alV3PJqjhep2MZqSlICwu/B8tpw5zBUmQjFOgJW4AtgEvBJ7H/js8uEEPHHXrUyJE6IJEVrU9B11Cj47be/Xq9Z09Tnq1rV7HC/kP1EU4cQrakbPpC9ug+pHVKwJ3w8vku6QHQo1F8N3pXidSpCxDeJE4bEiTfAw4em9tLEiXAx9kSps7NJl+vUCcqVe8FFnmjMM8EU4APu8TMf6FbsVDthSG8qPhjE2Z8sxChYAbwNpuv0li/g2EQo3B6qTgGLPEeIpO2Vayr97WaplFLzgKeY03wRSql5Sqk0rzhOIV5NtmwmDS4oCCZNMrvP169Dz57mZ127/mMedc6csHs3lMkOtzL5k/fybkKwUI6KLGYFpEgPTbZBzhqw+VPY3Re0BZiA6QAxDWiGaTcqhJA4IZIkpUzKw4YNpqNohw6mFtP69WZnoVAh85ARGvoCNy2HE3vxVKnYkqI7/i4DeRTmQAnPNuxuNgJSZDTtpC+tTbBpCfE6kjgh7CZNGtMO+tw5WLfOFPCOjjYnWitUgNKlTR3XPxsD/ScnzObySGAZ6WnMZjWfT/gE+gxl17v1Sd8shHQxUBVYAKAsUGU8lP8OTs6ANU0g5mkCTViIxBXX7blZgBtQHPAASgAuwMwEGpcQL8bd3RxlPXXKBIsqVcxDwpgxkCcPNG5sUuaekTYtbN4MH/rD+dzFKPjbAWIoTBMa0o/haKcUUH8lFG4H+36AjZ+AzQoMAn7CpML5E1uOT4g3ncQJkbQVLAhTpphNiCFDTPOHM2fMLrW3N3z99V/dg/5THpzZg6MqwjLHbnyW7lOePsjBO+5tWPxhV9NJdEU9OD0/QackxGtG4oSwrz87w61ZY04s9ehhFpyOHIH27U0s6NEjjrFAAd2A5cApnPFjCh0YxzgstdcR2PctQutdomiY2YYeDmilwO8HeHcUnF8GS2vA08cJOWMhEkWc0t+UUo+BzFrriGdeSwHc1FrbrYeuHFcV/+roURg50hT3jokxr731ltmpqF/fdIcAbDYYMAAGDoS8QyO42LMtNrWQhrQhgCk4ayfY088sLPnUhjoLwckdmAF0ACoAawBpJy2SjsROa5A4IV470dGmU9zYseboKpiTTXXrmtS4ypXjkA4RhpVmOLCaYXxJn+tH0d7b+SmiP1+t3o4K2gaVx0DJLgk+HSFelMQJQ+LEGy4iwhT4njAB/vz/QCmz+NS5sznt+p9p0n9gOkXfA2ayhQw0jGnMkxBFio+XUG5yZbZmgE+BcYAjQGAAbPgI0vjCBxvAI0vCzVGIlxSv6W/AWUx/9Wdlj31diKSpeHGYO9cUUOrVy9RX2rMHGjUyHSHGjYPQUCwWs6j0yy9wrb8bGT8MwNXWj2XMpgxVua8egN9AeH8yXF4Pi96D8PtAe2AhcACoTGyfByHeVBInxOvFycnU09i1yzxItG5tXlu1ypx2LVoUpk2D8PB/uYk7DixD8ym9GMM870w432hMd7f+dPX3ReepB9u+jE2htnvpGCHsTeKESHrc3KBNGzh40HSTbt3a1Ftatw5q1TLPDKNG/VW79R+VAA4BZYDmVGEzRxz3kdsjI2GLqvL7oIn4n4bJQH0gFKBAM2iwFoIvQ8Bb8FD+GojXV1xPKg3BtL6aBwQB2YCWsd//b/9crXWiHl+VnQXxQkJDYdYs0w3oz2OtqVJBx46msHfWrOzdaw4xRWQG530BPHBtS3qysp01FKAAXFgJa5uCZw5o9Bt45cB0tm0AeAObMb8fCWFfdtiBljghXn937pgUuUmT4PZt81rq1KYW05dfQubMz3mjxnQF7cNWXZkGdwsSknECDSIbsuh3TxxPzoFin0OVcaauhhBJgMQJQ+KE+H/u34cZM0wsuBrbmDBFCmjZ0qRMFy36nDdGAV0wBbxrEcJkGkd9xkbntTDpU+r5jGV1NSeKK1gLZALTFGhpTVPIu+FayFwuESYoRNzEa/c3pdS2OHym1lq/F5fBxRcJAuKlWK2wciX8/LM5uQTg6AhNm8JXX3EldQnq1oXTNyH76X1cyVgfF56yksVUpypc3wUr6oKjmzmumr4osBuoDXhiWoy+bLtqIeKHHR4WJE6I5CMqyrSgHjMGDhwwr7m4mN3sHj0gd+7nvHEeNtpxWvtSK7geQakGUyHybbbuK4broXGQvynUnAMOzok1EyGeS+KEIXFCPJfVCmvXwvjxsGnTX69XqmRS4xo0MCdc/5/JmO5wubGyjF4xc/nJcTj8/g7vXVjC/vbpSBu7sFQY4NEFWFodwm6D/xLIVTMxZifEf4rXRaWkSoKAeGX79pm6S0uXmuJKAJUrE/7pVzSeVYt1myz47r3GmTJ1UZziZ8bSjc/h/klTXC86FOqthGzvAEeB6oANc3qppN2mJYS9WkUnNRInxCvbtw9+/NHUX9La1NZo0sR0GS1e/B/esJkYPuCedqd2RCf+cB5APmt+DhzzJ+XOIZCzOvgvja3NJ4T9SJwwJE6IODlzxnQLnT0bnjwxr2XJYjIeOnSATJn+9oYdQCPM6aUA5tke0tbaHuv1zBRfsIrbPYsQboElmA5xhN02J5YenIRqM6BQ68SbmxDPEd81lf68qZdSKsuzXy8/RCGSgPLlYdEi0wGia1fw8IBt20jxYV3WXC7IwnemcLVsOvLP3IXWNfmKTrTlC2LS+UKzPeCe2ewsnF+GaWayE0iBqbG0065TE8IeJE6IZKd8ebPxcPo0tG1rFpUWLIASJUy9jR07/lYv6X0c2U165chOt6HU0v04Z7tK/uLzuFltIFzdBIurQsRDu01JCHuSOCFeS76+prHDjRumqHeBAnDzJvTrB9mzQ/PmfxX6BuBt4CCQC6hNK8sN9jrtIGWGKI5+UQGnQSvIEg01gWkA7pngw+3g/bYp4H3wR6nFJ14bcVpUUkpVVUpdAh4D15/5CkrAsQmReHLmNEX4rl+Hn36CbNlQZ8/SZOunPHDPTquPR1Cq3WQc9dfMZjx+1OCBlzs03QUZSsDqxnBsMpAP2AVkBqoB6+w5KyESjcQJkez5+sLMmaYmX9eupr7G+vXwzjtQsSKsXv3XiVcK48h+XFR+Vjn25TO3ztyJjMbXdxSBdQfC3cOw6B0IvWnXKQmRmCROiGTB0xM+/xxOnYKtW6FhQ5MmFxAAZcqYmLBqVWw8yIEpkdEE6EkZRhPovoN8MYUI6tuA65MGUjbERgegJ2Bz8YIG6yBfE9jxDWz/2tRaEiKJi+tJpenAEMALcHrmS4oCiOQlZUr4+mtzcikgAEqXxi3sAd/qQeye7cPsLA8p/8cgDrCTQpTllNttaLzF5D5v/gz2DADtjTmlVBDTXjTAvnMSInFInBBvhmzZzCbEtWtmhzpNGlOfz98fihWD+fMhJgbIjCPbQdViIkMZ4VmX0LB0FPMZxM6G30LwFQjwM7U0hHgzSJwQyYdSULmyOcl6+TJ07w5eXub0ar165iTT5MkQDuZZYAiwgMw04Viq+dR51IrQLv04sK0JVa+GMQKz9BTh6AJ1AqBEFzg8Cta1BGuUPWcqxH+K66KSKzBLax2qtbY++5WQgxPCbpycTOHuAwdg506oXx9nFU2L27PYW/I71tcpT5YjwZSkPCucNoH/cijUBvb2N4tLtjTAVuAtoDnwM6Y7kBDJlsQJ8WZJmxb69zedgUaOhKxZ4eRJaNUK8uY16RERDjiwAk0nejCNRany4xBShHe8B7CkSSdTl29BRbh71N6zESIxSJwQyVP27Kb2XlCQ2XTIkQPOnYPPPjM/+74v3G4LrALO40pFVqX+hL4hP2Gts5xNj9+i6v4rLMMU0LijLFB5NFQcCmcCYHkdiHpi3zkK8S/iuqg0CvhGKaUScjBCJDlKmbSG5ctR587xtH0nIi2u1Fi7gyOlHrCkrgODD9anr8MwdPUZULY3HJ8Ca5pAjAuwAVOkrzumC4T83iSSLYkT4s3k4QHdupm0uBkzIF8+uHLFdAbKkQOGDEc9/gEYyQd6LVtTKdI9qULjTMMZ9eEHaAdns7B0bqm9ZyJEQpM4IZI3Ly+THn3hAixcCGXLwoMHMGiQiQftlsHJ+UBKFO8xwMuJFdFrccxzlU0+ZSi7ejvHNZQHTisF5XpB9VlwbauJE8GX7T1DIf5RXBeVlgKfAMFKqUvPfiXg2IRIWvLkwXX6eLh0mfUFvyYcN+quCeZgWShfqy899r1PWKVvzc7C+WWmgPfTSGAhZlFpAtAACLPrNIRIIBInxJvN2RnatTMFvZcsgVKl4N49+PZbyJ4DvrmFuj2Nshxnr9dF8oV+wFdpp/BVUz9s6QrD6kaw61uwyeaDSLYkTog3g6Oj6RK6b5/JeGjQAKKjYdYsKFIPamSHTWVAf0k917mcdNtGGls69td4n8yLJxFhM7kOmwEKt4GGayHkGswvDVe32HduQvwDpeNQVV4pdQzTL30xEPHsz7TWdvs/W1qACnvRGsZ9f5fwwSP5wmk87tFmoWhXVQ/y9J1FpnQxsL41pPGFDzaARxbMolIXoCSwBshovwmIZC+xW0VLnBDib7Q2RVyHDoUtsX8FnJ2hTS2iu+/gSR5N/Sh/drrMoWZkVZZvz4LLiTmmRl+tX8E1lX3HL5I9iROGxAmRKC5cgNGjzcJSeLh5rXhm6H4bmhTgidNc/K7240SOtaRc25GMVcdyydmZyUB7MPX3VtaHh4Hwzk9QsqvJqBAiAcU1TsR1USkESKV10io/L0FA2NvSpdC15X26Zx1F27uj8XpigsTjyiVI1aEF3O0Prqmh7hLIXBaTS90Us6C0HvC129hF8maHhwWJE0I8z8GDMHw4LFtmFpssFqyN3bF+E0Gbom0IULPJrX3Ydao5mbYOBq8cUG8FpCtk75GLZEzihCFxQiSqhw9hyhQYMwbu3DGvZbdANyesH0+nxd1TLPQZhtPBShTNv4TDXhnoBQwGLFFPYP1HcGE5FGgJVaeCk5s9ZyOSubjGibimv60E3nu1IQmR/HzwASzbkY5hYYMpnC6IsT078TilhVTb/oBm3SEgHwRGmQ4/RyeCrgtsx7SCeAvYYd8JCBF/JE4I8TxlypiUuMBAkyLn4IDDwic4l4rh1zrTmb67KpejH5C38Cj2NhliCnj/Ws6kUguRfEicECJNGujd29TemzYN8ueHazboFolDtlYsmLGdCYfHEVPkIIcfl6H8jaMMw3SGC3X2BP8l4PcDBM43dZZCrtl5QkLE/aTSIqAOpk/6nWd/prVunTBD+2+ysyCSiqAgqFsXjp8Dv11XeXdtVbqNOk+aR7EX5EsFFR9Dg2ZQfRo43QVqApeBOZjTS0LEHzvsQEucECKurl83HYKmTIEwkz79oEoqWvVOxYb3rjI+rA+frdqCurUPyn0Lbw0Ai4OdBy2SG4kThsQJYVc2G6xZAyOGwe695jVnxc0Pa1C94xFOlgih5KXZHC3UhAIKVgB5AC6uhnUtwcEF6i6GbO/YcRIiuYrvk0qngOHAHuDi376EeONlywa7dkGd92FXqRxsKh5Izivd6DMYHqd1hHOPYSbwaQB084V7EZi/TuWBZpi/Xv+9wCtEEiZxQoi48vaGn3+Ga9egfz9I6UbaLY9Z9/4VdtdMzYyzg2nXOBcxRdrA/sGwwh+ePrb3qIV4VRInhPg7iwX8/WHXHti9G+qXhGhNlnnrOVHxDmv93XAN/pDsB7/hpo6iDKaABrnrQvP94JoGlrwPf4w36dVC2EGcTiolVbKzIJIaqxW++QZGjoQSg+BknwBcwtrx1RRnvv3REec7D82FmSzQvTN0HQoO7YEFQEdgPOBovwmIZCOxd6CTKokT4rXw6BGM7IkePR0Van4vW14fFvQryGTVitRb+0qdJRHvJE4YEidEknN2BYxsDXOeQKR5aZcfDGufhzPNV3HJpQCDgN6Aigw2J5YurYFCbeH9ieDoas/Ri2TklQt1K6Xe1lrviP3zjQUz9QAAIABJREFUc/OftdZbX3qUr0iCgEiqpk6FTp0gez0IDTjGXacGuEdcZ8PMD6g4bAdcv2ku9E4FA0ZAq/Pg9CNQC1gIeNhx9CI5SIyHBYkTQsSz+1eIGl4T5wlnIAJsClY2dqXQ573Id3oSRIdBzTmQt6G9RyqSAYkThsQJkTQ9gjuNYdwWmOgMj6IA2F9aMb37Z0xvMo6GysJswFPbYE9/2PcDZCoL/svAM6s9By+SifhYVDqptS4c++fLz3m/1lr7vPwwX40EAZGUbdliCnk7pAOf/Q85lLY58BvtotoyeXZJnPp/C7dCzMXZvaH329A2AFyKAUsBu/3VEslAIj0sSJwQIt5pom//jGVILyxTrKgosFog6MPS5CwTDbZjULY3+A0Ei5xsFS9P4oQhcUIkXTZgEDzpBxPTEfNjNI4PggE4ViQl/QeM5ny91iy3WMgLcH45rG8NTu6mzpJ3JXsOXiQDr7yo9DqQICCSurNnoX59OHcBamy1sq5iX1BDKEJZ1sUsxHvkTzByItyJ/XuYJS30DIeOLuDyK6aYtxAvTtIaDIkT4nWlOcbDKw1IPewyegY4xIDVQWGpmhdV6hwU8oPav4JXdnsPVbymJE4YEidE0rcZ+BjCrmKd6EfwkGOkeRwKwImiufjpu+F8+MEH1LJY4P4pWFkfgi+ZRg8V+soGhHhp8V2oWwjxEvLnhwMHoL4/rHvbgco/DCalbRknOE0hx3Js/6YxHD8On2WFzMDNB/BlBPiGwy+1wNYPs0shhBDiTaIoRtqcx7g+tjkO5+BGG3c0GrXhHHqEI0w+ABOKwoWV9h6qEEKIBPU+cBLcu+DQYw9pbqbmULcPuZ7BkSLHLzOnSRNyFCnM8oAAdGpfaHkYCrQ06XALKsHjS/aegEjmZFFJiATm6QlLlsDQofB7f8hUpQFFnh4ghNRUpgo/ZdiCHh0IU5tCGyCbB1yJgpZA6YGwqTzw0K5zEEIIYQ+eZHeez+1s00gzw8qD0x5saqbQMTGwIxoGPIFP68OqjhATae/BCiGESDAewBhgJ7i5U3rkQlyPNeHbtpW4mh0KnQ6kQfPm3CxYkIiFK6DqDKgdAA8DYV5xOD3f3hMQyZgsKgmRCJSCXr1gwwa4dxyuZi1AvYsH0NShB11p4PwZ4bWnw5cToFs0tEgBGVPDH0C1g1AtK/wRYO9pCCGESHSKTI4fo9V+InJm4b1fFZMPebKqHhBpgy1A06nQ3AeuH7P3YIUQQiQoP8wDwreky7SQH6afY+aw3rT/KSUXfSDruXO4ffQRUb6+cCgGWhyB9MVgfStY2wIig+09AZEMyaKSEImoWjU4dAh8ssOqvF40W7AMV/0DK/mVwsqPS8VrwEfHoFZR6PoIWheGlO6w6SmUbA4tKsDl59W5FEIIkVylUEXJ4XyYAyEt+LzkE9IvS8sHe+GPyqkgAlh8EwqWgO/aQKScWhJCiOTLFRgEHMJiycKAZkPp3qAi74yuTus5cC6vG84XL0KrVmi/OqA6Qfn+cHYhzC0ON/bYefwiuZFFJSESWa5csHs3tGwJAc0svNXhO3JZ13KZqxSiNKvTXISmO6HyUChxDvq4Qtvq4Kzg133gmwe6dYH79+09FSGEEIlI4UEFr7mcCQuggLYyp7wL4zeHU/83L4JL+cATDYPnQPZ0MGMKWK32HrIQQogEUxw4AAyjgM9mLtfcj4dzGwoe0rSb4cqlXOlQgYHQtBl8vgzSDDFvW1gJ9vQHW4wdxy6SE1lUEsIOUqSAOXNg3DjYMRssxWpS8/EhnuKNP7Wpa/mI2+XamEJ7mbNDod9gcl1o5gvRNhg9DnL7mEJN4eH2no4QQohE5OvelBSW41wK8WOGJYqP3oc8By8xc2ktdK50cDcUPv4UCuaDpUvhNe70K4QQ4t84Aj2BYzg5FmZi09nsuVaKrb7F8T1zn88m5ee6d2bTGKhtT5iaBsLehT0DYOE7EHzFvsMXyYIsKglhJ0pB586wbRuEPYAd3rn57MB+XOnLGpaQE1/GpNuOtfkeqNAPHq2Fd0Jg9cdQ3QFCnkCfPpA3L8yeLTvSQgjxBnFW2SjqtYlTj36mFk85oVKwuOE6ip3x5sGoDpDWAc5dgkaNoFw52CPpDkIIkXzlB7YDEyhT6Bjnyxyn69pGTGt5g7xnH9NlTEMeZMwIh45A360QkA/2HIW5xSDwV3sPXrzmEm1RSSlVQyl1Vil1QSnV6x9+/pVS6rRS6rhSaotSKkdijU0Ie6pYEQ4fhmLFYFI5N1r3HUCNmBNEUpqudKawQyUOvVUHWuwHl9RwZjp81RDWe0NJ4OZNaNsWSpeGrVvtPR0hXprECSFelIVCqb/CZjtIWGhu1gMdnQLJ/cV8Fp0fjv6kAHgBBw+Cnx80bw5BQfYetBBCiARhAT5HqVM4Ob3HiAZLOH0vJwWOlGBcl2XkuJiL3j/2ICJtWjhyDsaHw2QFE1rAmmYQLqU1xMtJlEUlpZQDMAGoCRQEmimlCv7tsj+A0lrrosASYERijE2IpCBLFnNiqXNnmPoDnM2Xj4GHN5GOAM5wnTKU5ZOMswhuuQXKfAMnlkCQAyytDPMAb2c4ehSqVIG6dSEw0N5TEuKFSJwQ4uW5ORYlt8cBTt79ik4qkn0OmuGpu9N4UjGervoWqlrACQgIgPz5oX9/SZ0WQohkKzuwClhM3pz3OfTWPkZvqoGOPsPwr8aS4XIXZgwaiDVVKjgdbH77+mYhDM0P55fbeezidZRYJ5XKAhe01pe01lHAAqDesxdorbdprf/8DWcf4J1IYxMiSXB2NjWWtm8HFxfoW1pRuW1T2oadAToxnUnkdCzOr28XRzfdCRYnWLoNMleDE24w2AE8XGDNGihSBD7/HO7etfe0hIgriRNCvBJXCmf4meDwzaSOSMM+bSGfZSE5/H7lWMB8+LEYFAMiImDAALO49MsvUm9JvFbkRKsQcaWARigViMXyCV9W3cClGA+q7y1JqGc/OvRZifflNRz+/nu0hwcEahj8EFo2hDn15NSSeCGJtaiUFXj2vPX12Neepz2w/p9+oJTqoJQ6pJQ6dO/evXgcohBJw9tvm0NHAwbAyl9huXdKvl0+jvz6AI/xpgXNqZS1H+dbL4GSX8LRTbDABRoUg/OR0DGjeUiYNAny5IFhw8xDhBBJm8QJIeJByhRVyOh2ggt3GzNEaZY63qRhmuZ0+PxdoqYNgs5OkM0Brl83bUj9/Ex6nBBJnJxoFeJlpAImAzvIkNaD9X57WbTPj3SPrnA7ZWVKD7TQ+GIgIZ07g4OjaSb3ySpomAOOzLPz2MXrIrEWldQ/vPaPW2NKqZZAaeDHf/q51nqq1rq01rp0+vTp43GIQiQdLi7Qt69p1FC8OAxuCOkql+Lr2/twYgK7OUQBp7J8V9mLpy13gkd2WHcEdhaEIeFwwglqFYQnT6B3b/D1NTvSNpu9pybE80icECLepKZAxgDuPJxP0WgXTuJAFqcxeBebzs7+U2FIaWgCpHKBvXuhbFn46CNTo0+IpEtOtArx0iqh1FGgP43KHeSCi5W2+0oBA1iWvhbpx7Vg1OnTRDduDNHAb+FQqTV0KAmPJTaIf5dYi0rXgWzPfO8N/L//O5VS7wPfAv5a68hEGpsQSVb+/Kb29qxZEHgCxmZ3oMPPn/OO9QxWGjOYH/DJ2IJ5zTthqzIO7tyAuZHwyBtWnIZNRaGoL1y7Znaky5eHXbvsPS0h/onECSHilSJjmhZ4Op7m+p0m9Af2O1/n57RtqVi3IOE9foBeQFVXcHKEuXMhXz4YPFhOt4qkSk60CvFKXIB+KHUUT/fCzKywjx2ni5Dn3h2iqMBXeX8g26LRrN+7F12xIoQD0/6AvNlg9NeSLi2eK7EWlQ4CeZVSuZRSzkBTTPWw/6WUKgFMwTwoSCEYIWIpBW3awJkz0LQpTOgO130z0efkfFKylVuko7WlLQWKT2Fj27HofI1h30WYkx5yn4cj12BmK8ic2aQ4VKoErVvDnTv2npoQz5I4IUQCUCoreTP9SnT0NpxC8rMC+C7lLEoVG8vU5iOhTQnoHgPlM0NYGHz3HRQsCKtX23voQvydnGgVIl4UALYDU6hY4BqnUgczcPdbeEQt4K4tH7XKb6Xcjo2cXbkS8uaC+zboNhLyp4PfVtp78CIJSpRFJa11DNAZ+A0IBBZprU8ppQYqpfxjL/sR8AAWK6WOKqVWPed2QryR0qc3G8mbNgEahhSBGp9V5suwAziziHNEUt39I8rVusKJxqPBkgaWRcBaD2g0D877Qt8vTW7dvHlmR3rcOIiJsffUhJA4IUQCc3J6F2+vPwgJG41fjDvHHO/zMFsnfKpl4pb/AGj6CLp4QL5scOUK+PtDnTpw8aK9hy7En+REqxDxxgJ0QKlAnJwa8L3fHq5GeNHxeB4c+ZbDuhC+/lZanT7Lo0kTIa0HnH8INerDe6Wl07T4P5R+jY+xlS5dWh86dMjewxAi0UVEmAyF4cPB1RVafwMRX0czN8UsrPQHblE7pibTD+ch075poDS8paGEC1zpCV12w7rYE+HFi8PEiVChgj2nJOKZUuqw1rq0vcdhbxInhPgnd7gV3JPMKedwQ8PX2h2XU32ZdXIllqA9cLYALA6CJ6FmI6JXL+jZE9zc7D1wEY9etzihlHIEzgFVgBuYE67NtdannrmmBKZAdw2t9fm43FfihBBgKnR3B3Zy6bY3X0UrVmYLQukquKjR9A3LRfdBPXEaPQme2sBBQYc2MGQkpEpl57GLhBLXOJFY6W9CiHjk5gaDBplC3nXrwuT+EJDWiZb9O1A/7AKKYax13EvmcuPp0qYG4dnegu1RMN8GLt/BmoewYiTkyGFazb31FrRvD1JXQAgh3gAZyZxyNrAXIgqywBJG2yI9KV1XsaV4PygWBN2joEYxiIw07UgLFZKUOGFXcqJViIRUFpMSt5JcGT1YkS2I3Vd8KBt6gKe6OH3ce5Fr6EBWXrqGblAObBomzYJcWWHKRLBa7T0BYUdyUkmIZOD8eRg2zKTHKQX+veDRN4/Y6jEcGIODjmHiheq033YUhyc3ILczVIoGt89gSAr4cSxERUHq1DBkCHzyCTg42Hta4hW8bjvQCUXihBD/xUq0dRrhfI27JZwJyon5Z79i+dFLeF9fDHezwEpnOHvFXF6nDowZAz4+dh21eHUSJwyJE0L8XQwwE5utLxbLHZZcyk2vHJe4RGq0w0D86MiMXZvJ37EVnL5v3lI4D0yZYzaqRbIhJ5WEeIPkzQszZsCFC9ChA6wZAb+nTE2NvsMoFXYBq2pHx7wbSNf2Mb9VrIwOcoY5wM6J0HMOnBgKVavCo0fw2WemS9zBg/ae1pst/D6cWSCdNoQQCcwBJ4dPSelwjfvRLfhCR/Nb/uGMabKZ5mV7EJbLA9pdgY+LgZcnrFljCnn37y9d4uwp5Crs7wlLi0mcEELEM0egAxbLBaA/DXPe5gwOjL7lRlo6s9dWAt+KiqYn7/BoTB9I7QgnL4CfH3zYAG7+v1JnIpmTRSUhkpEcOWD8eLh8Gbp1g50j4bBHVir1m0K+sNM8dqpNjXLbyPqxZnvJ0uhAJ5h5H25/DSsjYPFo8PaGQ4egXDn49FN48MDe03ozaA0PTsOB4RBQESZnhLXN4N4xe49MCPFGSEsm5/lY1B+ERpfhR/WIEZV+5Ot2T/kxVxusBc9CjxioWfL/psStWWPvgb85IoPhxFhYmB+m5YRdI9DRxyFCCuYKIRKCB9APi+U8jg7t+SLrba48TcE3j2/gRg0W6fdJ/4U/fa7cJaptFbMWtWgF5MkJg38wsULYh7bBjd2wrRusbpTgHyfpb0IkY/fvmyyFceMgOBhK9oOQr05ywWswsJA8wa78sjsnZQLPoFyBsgrydoGhCkbGdobLkAEmTIBGCf8P0hvHGgXXd8DF1XBpDQRfAiAygwtOPpFYcsO9jOtJr2q88K0lrcGQOCHEy7GxjZvWT/B2uMgZYOzDgnywJB1VnuyA+zlhhQXOmH+zqF/fNHzInNmeQ06erNFwZRWc/hl9cT/KaoPUcL+ghTkFbMxI6cJ2gkhP+he+tcQJQ+KEEHEVCPQCVvEg3IMRWjPJPYwnuiEeDGbSiYc0//RDLHuvm8uzZ4JxU0wBWKXsOfA3g80KN3bCuaVwfjGE3UE7KCJzOuNa9wk4OL3wLeMaJ2RRSYg3QHCwWRcaOdIcPMpYH1KMCORyniGgfqXkXSfm7sxAoStB4An4pQX6QaelsH27uUmjRuYmGTLYcyqvv4gHcHmdWUi68htEhWBzcCAkuwspcofj7ANXPGGxzsAc7c8v/EAxS6YX/hh5WDAkTgjxKjRRLOGB7QsyW+5wEFh5pgSfbbhH1qjrcLEMBASaLnGpU8PYsdCihTw8vCqt4fZeOP0j+ux6VEQkuEF4flhREMZkcCT0cm30oaY8uFaHW909sLxE7oHECUPihBAvajcwCNhAcKQro5SVsU4xPFLtyWTtx+oFCyj1zXeom7Enlaq8DROnQb589hx08mSNhqDf4fxSuLAMwu9hc1SQS2PJCxd9YINLSjpwEydSvPDtZVFJCPH/RETAypUwZw5s3Ai2nJB2/HkeVh+MtsynyjULc3a4k/XOY0gH+JWBjbWh508QGgpp05pjT02bykPDi4gON/WRTs2Gm7tB24hwT8FtH0VanzC8csADR1ikcvILjdkT2Rl9MDvshY21oGqhF/9IeVgwJE4IER+shDKJp7oP6dQTtkQrru4sQqs/AlFP3HDcmBn2njWX1q0LkydDliz2HfLrKOQqnBqODlyAevQIHCA6N2wvCCO9HdhzvCYhgU3RaetCJS/wAmWDC0/AJ+WLf5zECUPihBAv6xAwGFhBaIwT45WVUcqJu5YuFH/ShQ0DviDjxBUQATg5QJ8e0GcAODvbedyvOWsUXNsC55bAheXw9BE2Jwv42LDkg6u5YK2TOxepy00+x9HqxzyHl6t6JItKQoh/desW/PqrWWA6cQMs31+Ez4ainWfT9CxM2u1IyseRkAXIUsFsSGzea95crx5MmiSpDv/l4Vk4NtksJkU+5laalFzNF0n23E/JkhHClWIVeQkIacH6jd2I3uWJxwmokhbefxuqVAFf35dbv5OHBUPihBDx6SkPGYKDHkFKFcn6B054bPSm0o3LRB/xwnF1FCr0KaRKZXKvW7WSDYj/YouBS4vQJ4bA5VMoDTZvOF4QRvsoFp2vRkRIc/DxB+9UAGQMg2oW8HeD94A0L/nREicMiRNCvKqTwBC0XkikTTHFYuVH7ckNS29an6nC1C9a4LL5grk0VzqYOQ/effHSDm80reHOITg+Dc4tgshgbM4O6NxWHPLBjRyw2tGdA9G12Bnehcseb2F1tEAUuB2EJ37wMn29ZVFJCBEnWsOxYzB3LsxfAveaXUH1Hoaj5wy+OmHju4MOeIREm99ar+WCkfcgJDbVYfRoeWj4O2s0XFwJxybBta3EWCwE5nXGrfhT8mQFK7Al0pd5B9qxYuqnxOzz5O3cZgGpShUoXhwcXuZf/b+RhwVD4oQQCSGEm3QnlZ6Js7Ky8bozabZnpPyZIKxLXXAIjE15qF0bpkyBrFntO9ykKOQqnOiH9eRCHEKfgjvcLgITCsKkkMoEO7UiJks9IA1eMfC+A1RTUBXwiachSJwwJE4IEV/OAcOw6XnEaCuzLJrhtnRc5gd+XuhI16+7YbkVCgpoUBomLYUM2e096KQtMhgCf4HjU+HeMWyOFmz5NI75NXeyw3KVglUhtdjk2IUYTz/AYtb4NkGmE1AuEsoUhD59EnaTWhaVhBD/KzrapMXNmQfLPIKwDhiOY6bpfHI2kh8OOpP2fhREAWs8Yc8T86ZatcxDg7e3Xcdud0+uw/Fp6BPTUGG3CPZ0JrhoFNmLAO6w90lOAgI6sGRoO3J7Z6RKFXjvPdNkz8Ul/ocjDwuGxAkhEo7mNtf4nAx6JW7Kxq4gC+7b01Ji7T30SoWK0JAyJYwaBW3ayAaELQYu/crTowNwuXoJBUTngs1FoH+qkhz26ojVpSFOOh1+sQtIVYGSvNwO83+ROGFInBAivl0FRmDV09FE8auCn3Qazke0Z/HXQdSethBl1ZBKQe/m0HUSOHvae9BJh9Zwcw+cmIY+uxAV85SYDArHoprHvrDEIQW/htVge8ovsVER7lhIfRgK34KqCt7NC0WLmvD7qmRRSQjxSh4/hkWLYNSpO5zpMBYKjKfJ1RB+2u9CtuuRcAzT/SfUBl5epgp4u3Zv1kODtsHVzXBsEvriatBWHuS0kLq4DYdccCw6LQFbmrPr524U881FrVrwzjvg4ZHwQ5OHBUPihBCJ4T436IOnnoMXURy9Bm6rPMk/6YlpFgRQvRpMmw7Zstl1pHYRcpnHR7qSInAdzuEx4A7XisDYnDmZlq4zIS4tSKkzUV9BI6Ay4J4Iw5I4YUicECKh3ETzE1Y9EUcVyXYN41BcPFaJRR9dJe/xq+ayYs4wpAdU7QNOL15MOtmIeACn56FPTEY9OIvNWWHx1ViLwsb0Tsy2vsMqp2+IjKxCtssWKoTCB15QNye4JlCZKllUEkLEm1On4Md1IfxSaTIxZUfx3p3bjN/viu+Rp6jlmGOWANWqwbRpkD2ZH2UNvQWn5hBzciqOjy8T6aZwKKxxLAYXPNwIOFGD07O/p2TWEtSuDQUKJP5amzwsGBInhEhModxjOOjRpCeUi1fAfYQzmeZEQQTYUrhhGT0aPv4k+W9AWKO5c2os+uRwMt66hwIic8G6/Knon/tjjrt2wVNno56CJkA1IAEOrf4riROGxAkhEtojYCZRtjE4W4IIslmYrG3YJqShb+8nuIVHgytQ3xO+HgDFPwNHV3sPOnFoDde3o49PQZ9fgsUagy0TWIrC6XyKWU7F+cXSjfshTalwz4mOGaG+By/Rx+3lyKKSECLeRUXB/M1P+TblXG6XG0HJ4ItM2e9MqV+jzOJSOODpCuMnQ6vWyeuhwRoFl9aiT85AX16PRduwZgWHYnA9j4XFd8txaWMvKrnXpVo1RapU9h2uPCwYEieEsIcoQpjCEwaSVd/nzgHw/EyR4g/zO2dYiUKkWLEKlT2+qgMlHWF3T3Bpbwfy3TiAS4QN7QGXCjgxqlBNpqYdhrMugH/sQlINzHOUvUicMCROCJFYrMBaYmxjcLRs5alWrLquydhJ8c7q2DWJnEDrDFCnOxRuD24v24ogCdMa7hyGswuJPjsbpyf3sbmApQA8KgLz0vswR3Xg3L3OVA115/MsUNklYdKg/4ssKgkhEtSFmzG0urWU/YWG4RN9lNmbnPDrH406EXtBVW+YswYyF7PrOF/Zg9NwYibRp2bi9PQRVndwKAQhhWCxsw+BJz6noeVLypV2jJcC2/FFHhYMiRNC2JONCBZzi1742K4QOgZSfA+WMLC5K843bkCa70eR3uf1Pt1qi47gwPZ+eN+YjPf9J6DgqQ+sKuhL7zwjuaVqUkfBh0BNEm+H+b9InDAkTghhD4FoPZ5o2yycLRGcXaLI0EWT+jZoB1BVgaquULgVlPgC0hex94BfjdZw/wScXYj17HwcHl/DZgFLTojJB+vypWOWU1MO3OxLbWt6OmWFohZT09yeZFFJCJEorFrT6cpGZqUfiqfjdpYPt1BxmA31FHRKoGdWVJuhkKnl63NyKTIEzi4k6o8pON8/bP7Rzw3WwrAhmwe/32yEf8wgKuVOuh2N5GHBkDghRFKgiWYbF/kK36Bj2JqDZZf5ia087H+rAbpCd8rVr4CD42sSJ4BjxzYRFtiV0vdP4xwJpITzhT0YXrgdi92HUUO58QFQm8SpkfSiJE4YEieEsKdgYDZPIsfiGX6JsJ4K92lmfeJRXldSNrJhSRsF2d41i0u5/cHiaNcRv5AHgXB2IfpsAOrhObQCnR0s+eFIXldmOddg160fqO1YmA4ZIaltsciikhAi0c16up/vnk7A5f4C1jePJv/B2B9UBFtrNyxlmkCBEeCSwa7j/Ee2GLixi8gjM3C4tAhHWxS2tGApDCcLKBbFlCdryPd0yFYDZfd9g/8mDwuGxAkhkpoTnLMNIvuopbj2sUIU2DKApRn8kbIYFxx6ULpFY3LlTqCqo6/o1p177NzxNX6hi8j6MBIc4GkexfKipRjhPZmillJ8gOna5mbvwf4HiROGxAkhkgIb8BsPI8aRZs96dFtQQRDlCls/TkXZ/A6kiXwAntmh+OdQ5GNwS2vvQf+zxxfh7ELzde84GojxBidfuJFXMfd/2Lvv+Krq+4/jr0922MgSCHuIDMsIIlNZCg5cOHDbqhWrVKuordZWrbWOtv4cteKqKI4qiogIyHIBClhFATGojACyZUPW9/fH90QCJJBxc0fyfj4e55Hce84993NvkvvJ9/MdJ7UbUzbcTn93LtcdDVHYKvqZikoiEjHr2MzNu56j1T/+xl33bCExB/IaQdwF4JpBTqvjSOz2Zzj6rMiNXnIOtn6LW/E+u5dOJmnDbBLz9pCXBHHtYHNHeLFOI9bvvJbbav+OWhaN/cxFU2PBU54QiVa72f3l/SRc/CBJi7NwCbBvGKScAGvtKN774bcc1WskQ8+tR0okFx7Kzeb7ZdP4+uunabV3Nsds3UZCDlAHvj2uNo8d8ztyq97KWSRxEhCdpbDCKU94yhMi0eZ79m59Gn77GCkv7gJgXX8Yd0cSfVfVp8eWTL+Qd7uL/eil+hFcaiM3CzZ8Aevmwtp5sG4ebF8BwN6GRko7x6628EbVJry95Wra7h7NjWkpHB39/dOAikoiEgXyyGP6widpdfHttFq2k5xE2HFJArWOy8FyIatmDXKaDyC1zflYo2GQWM6Fm90bYdUM9mVMghVTSc7aBPhpetYM9jWDic2S+DD3DM5P/TN9rWP5xlOO1FjwlCdEotyePXD7VfDoy/6UTQqpAAAgAElEQVR2d9j9S6iyC/bGxfPf9eeSueVKjurUl579qtKxI+W7fl1uFrk/fso3S18kLnMyrbatJSkn+F+5DuxuEs+kY/rxTaPHOSmuPb2JzOKpoaA84SlPiEQrBxPuJ++ae4nbuBeqw6b/g/GnQ+25dTnzu+0k52VB4z7QYig06gVHdy/f9sSOTFg71xeP1s3zC27n7gNgT9VEshvlUSMtl7zWMKN6dd7YcRZVtt7D6CbNaRRXfmGVFxWVRCR67N7Nztt+Q7XH/wPAgt6w4w9J9N6aQ9KPeQA4g93V67KrVjdSWp1DjXbnQJW6ZXvenL2w5mNyf5hIdsYkUrb/4O9PBppCVjOY3wym1KjJDzkDOCHpCq5hKEkklu15o4AaC57yhEiMmDoVrrgcflxPXo044h7II7cVuCWQkANZFsdc14YPlwxl6+7hNDjueHr3TSQ9nbKNZMrNgnWfsTPzXbZ89xZHb8wgKdfnJeqCS4Nv0+rwUYPTcDWvo4t1pytxxGDb4BDKE57yhEiU27ABrr0a3poIgBsGNgY+qwmrvkyk8+fVaL17q99n8Vi9X0Cjnr7I1Kgn1GhespkRzsHerbBjFWxfCVsz4MdPfTFp5xoA8uLj2Vk/ifhG+6jaKA8awpbq8LGrxdzdXdmx6Q/c1nggTWJo+afCqKgkItFn6lTclVdi69axs3o81z+WS8YFcMm6KgxYE0/LzF0k/pjnrzgK7EquyaaUzuytM4yG6WdTI625Twp5ObB7A3k71rF38xqytv5A3rbl2O5VxO9ZS8K+DSTu20pC1k4MB3FAI8hqCv9rDu80gI84Djifq+POYgTtiY+BdZJKQo0FT3lCJIZs3AhXXw1vvw3A3hEtyfq/NdTYtQ9WQvYqSNjgr4azKyGe2bltmbHoFDZsuYwmHTrTt6/RsydUrQq5uX7LyYHcrCzY9gNu23L27fiK7J1fE7czg6o7V1Jz50YS8oLOjXpgabCtSSLTG3RjVcIvaVvlHPpShxoRfFvKi/KEpzwhEgOcgxdfhBtugO3bya1blU3/qkKD8zaSBXy7BzasA7cylQarUmi9ZRcpeVn+sVWPhoY99xea6v0C9m6G7UHRaMeqQ7/P3nnA02fXrM7uox3JjXaR0shBPVgZDx9Tm092dmXl2rPpnXcRN7StTfWK0OsQUFFJRKLT5s3w61/D+PEAZFyQzk1PpzGr+kfsYTNdcuDi9bUZnGkcs2YbSWtzwY8qZU98KgYk5+4pvARUBX+JnWr+654qxtJGjolNYFpSdb5yp9HThjGKUziVoypET3NR1FjwlCdEYoxz8MwzcOONsHs3NG0K4+4mu88uVjGZanvm0mD1VlgFeSsh7if/sK0pCczIacmMb06iRsp22tZdzrE11tDStlA/ax9xBf7dzUsAakFcLf81txEsbNSIT3KHUY0rOalGOq2Jq2BdDYdSnvCUJ0RiyOrV8MtfwvTpAORcPJRlj7ZiV435NI77hsZx2/xxebBjE6xfY+StTKHuOuOo3buLPG1OagpZ1RPJqm7kVc8hsWY2VapnE18TqAFUga+BjziKj3d358tlwzn2x3P5XZva9Gxd7q86YlRUEpHo5RyMHQvXXw87d8Kxx5I3/nW+PDaL95nBBKazkI/IZjcd8mDE5oYMycyj3bqt5MXFsb1aEluqJrK+mrGmWg4rqu3i+yrZrIuHDcB6YBMNyKYVqe5EBttpXM8JDCA+Zte+KCk1FjzlCZEYlZEBF18M8+f7RZQefBBuuimYwrCWn5jDCqZRdfs0WqxaRcJKB6uA/DZDKhAUjajpv26qlcKqGnVZldyYTTkt2L6nLdm7O9GxZn8G1ahDcoReaqQoT3jKEyIxJi8PnnwSRo/26/I1buzbFQMGsJ2tfMNXZGTPIs99SPPEpXSy9dQiD3YB62DXRiOlivMFo+r4olGiTx+ZwGriyaQqq6lJJvVYubcpn311JkcvOp1Lq9flyhOhQYNIvgHho6KSiES/Zcvg3HNh8WKoVg2efRbOPx+AfexjHvOYzgwmMZ1FfEZe/rw4IJ4UatOSOrSkPi1pSEvSaElTWtKC5tShKtWA9lABVkgqOTUWPOUJkRiWnQ1/+AM8/LC/fe658NxzUOPAyWh57GY5n7HazabK9qnsSG3AvqSOJNCGZJpSnabUIY1GJBPJC8lFG+UJT3lCJEZ9+y1cfjnMm+c7HO64A/70J0jYv5BRNvAljrm5y/g26y2qJ35As4Tv2EhN1mU3Yv2+ZmzZ1ZqdO9qTsK0NDXYcTf29KdTNhnrZ0CAP6tSA3r391OrKRkUlEYkNu3b5NTReecXfvvFG3yOdeGApaDvbmcc8qlKVlrTkaI7GKvzkhNJTY8FTnhCpAN56C664ArZvhzZt4M03oWPsXp0zWihPeMoTIjEsJwfuvddvzkHfvvDyy5CWVujhDtiEH8CaFM44Y1Rx80RFXlJERGJB1aowbhw89pjvWXjkEejfH9auPeCwGtTgZE6mN71pSEMVlEREKouzz4YFC6BTJz8trkcPeOmlSEclIiKRlpAAd98NM2ZAw4bw0Ufwi1/AO+8UergB9VBBKdRUVBKRyDPz6yt9+KGfF/3JJ9C1K3zwQaQjExGRaNCmjZ/icNllfgHvSy+F666DffsiHZmIiERa//7wxRcwZAhs2QLDhvl1+LKyIh1ZpaCikohEj5494fPPfWJYvx4GDvRracTwNF0REQmRKlXgP/+Bp56CpCS/UGvfvrByZaQjExGRSKtfH9591y+jkT/7oVcv+O67SEdW4amoJCLRpX59mDYNbr8dcnP9lR2GD/draYiISOVmBtdc40e0Nmvmrw7XtStMmRLpyEREJNLi4nzb4aOPoHlzWLgQunSBV1+NdGQVmopKIhJ9EhLg/vthwgR/lZ8334Tu3f1V4kRERNLT/cjWoUP9VIdTT4U//9l3RoiISOV2wgnwv//5q4bu2AEjRvgOid27Ix1ZhaSikohErzPP9D0Mxx3nLxt6/PH+ctKVZTpcXl6kIxARiV5HHQWTJvmr/oBfrPW002DjxsjGJSIikVerFrz+OvzrX5CcDE8/7dsS6qQOORWVRCS6tW4Nc+f6RVl374Zf/QpOPhm+/z7SkZWfrCwYMwbattU8cBGRw4mLgzvvhKlToU4d/7V9ez/VoSJ3QOTl+asbnXkm7N0b6WhERKKTGYwcCZ9+Cscc4wtK3bv79ZY0sjVkVFQSkehXpQq88AK8+KJvNEyfDh07wt//Djk5kY4udLKz4ZlnfDHp17/2BaVnnol0VCIi0W/wYD/VoX9/2LTJT3U46yxYsybSkYVWdjaMHQudOvmrG02c6G+LiEjRfvELWLAALr8c9uzxV4bLnyJX0YWhg0VFJRGJDWZwySWwdClcdJFPCLfc4q8Yt2hRpKMrm+xsP63vmGPg6qv9lYyOPdb3tN93X6SjExGJDU2awIwZfqRnjRq+4NKhgy/Ox/qopV274NFH/ejdyy+HJUsgLQ3++U+fE0VE5PCqVfNXEJ040eeLBQv8qKXRo/1nbEWydatvR1x2GbRoUe4jWlVUEpHYUq8ejBvn19HITwjdusEdd8TeFICcHJ/c2rXz0/p++MEXll5+Gb76Ci64wE/tEBGR4jHzxfnFi+H002HbNn978ODYnDa9eTPcc4+/0t1vfwurVvmc8fzzfjTrjTf6hlIlYWZDzGyZmS03s9sL2d/PzD43sxwzGx6JGEUkyp1xhs8Rv/2tn0r88MN+BkQsX0XUOd92eOAB6NfPt5dGjPCzPFau9FdMLUdha60oCYhISJ12mk8I11/v50T/9a/QubO/hGi0y8nx0/natYMrr/QNnbZt4aWX/GsaMQLi4yMdZdgpT4hIyKSl+d7ol1+GunX9CKZOnWJnHY3Vq/30jKZN4U9/8sWl44+Ht97yeeKKKyApKdJRhpWZxQNPAEOB9sAIM2t/0GGrgCuAl8MbnYjElOrVfT749FM/NW7FCn810YsugvXrIx1d8eza5dfWGznSdzwcdxzcfrtvC5n56eAPP+xHtg4YUK6hhKWopCQgIuWienV47DH4+GM/XWzZMl+dv+462L490tEdKjfX9xi0b+8bBN9956cyjB3rGwkXX1wpi0mgPCEi5cDMF+mXLIELL/QXe7jpJujb10+ljkZLl/rOhpYtfYNn92445RSYNQvmzfPrRFXeEazHA8udc98757KAV4EzCx7gnFvhnFsE6PKpInJk3bvD/Pnw4IOQmgqvvOLbFM8+G33TpvPyfD577DFfAKtTx6+t9+9/+46IBg18/nj9db+24MyZcPPN/vWYlWtoCeV69v1+TgIAZpafBJbkH+CcWxHsUxIQkZLp1csvtPfXv8L998OTT/oe6ief9ENcw23XLv/hXnBbtcr3HGRk+GNatYI//tEXkhLC9VEc1ZQnRKR81KvnGwojRvge3blz/cjWu+6CW2+FxMTwx7RrF3zzjS8iLVnit6VL4dtv/f64OF8Iu/VW6NIl/PFFp8bA6gK3M4EeEYpFRCqKxES/rtLw4T5HTJ0KV13lO4KfesovTREJP/3kR1LNnes7FT791N+Xz8yPYD31VD+Do2vXiHU6hKslE7IkYGbXANcANG3atOyRiUjFkJwMd98N553nE8Gnn/rqfYcOMHCg3048EWrWLPtz5eb6UVFffOHnKR9cPNq6tejHtmjhi0mXXBKZhkz0Up4QkfI1bJgfzTp6tF+8+847fY/uVVf5zonjjgt9kX/btgMLR/nFoxUrCj8+Odn3NN9yi+98kIIK62ov1VAC5QkROUSLFvDee74T4sYb4YMP9k8pu/BCX1wqr6JNXp7vaJg7d/+2dOmho6XS0qB3bz9SaehQqF+/fOIpoXAVlUKWBJxzY4AxAOnp6VE2Jk1EIq5jR78Y3eOP++LN4sV+e/RRP7UsPR0GDfJFpp49ISXl8Odzzi+gPX++3xYsgIULYefOoh+TlOQ/9Js08ethNGnitxYt/JxmFZMKozwhIuWvVi14+mnfQLj6avjyS7jhBr+valXo0cP/w96rl7/cdK1axTtvdrYfZbRokV8sddEiv61eXfjxCQl+Lb327f3UhPbt/da27ZHzUuWVCTQpcDsNWFuaEylPiEihzPy6SkOG+A6I557zF0u45x6/7Ea3bn7KXP7WrFnJppY5B+vW+ZkL337rvy5adOgoJPDtia5dfXslf0tLC+3rDZFwFZVClgRERI4oPt5f0eHaa/2H9IwZfvv00/3bfff5f9z79Nk/kqlrV784X8EC0oIFfoHUgzVp4hNLq1aHFo/q1avMa16UlvKEiITPwIG++PPyy35q8pw5fp27mTP9Br6h0KHD/iJT795+raP16/cXjfILSEuWQFbWoc+TkuIvynBw8ahVK3UwlNx8oI2ZtQDWABcCF0U2JBGpkI46yq+rdOmlfg2jzz6DzEyYPdtv+erW9cWl9PT9haajj/Zth/zCUX7xKP/rrl2FP2fjxgcWkLp29aNXY4C5MCxAZWYJwLfAQHwSmA9c5JxbXMix/wEmOefeONJ509PT3YIFC0IcrYhUWDt2+MZDfpHpyy8P3J+cDPv2Hfq4evUO7JVIT/eL4UUxM1vonEuPdBzFpTwhIhH3449+ysGcOX7E68KFhxaKUlNhz57CH9+ihZ8qkb916uQvxhClF2CItTwBYGanAo8A8cBzzrn7zOweYIFzbqKZdQfeAmoDe4EfnXMdDndO5QkRKZYff9zf8Zy/FdbxXLVq0YUj8AWrtm2hTRv/tV07P0q2SZOiHxMhxc0TYSkqgZKAiEShjRv9FXWmT/dFpu+/92supacf2OPQpEm5XzUh1NRY8JQnRKTU9u71haX8ItOcOT5v1Ky5v2iUX0Dq2NFPjYghsZgnyoPyhIiUinN+fbyCRab8JTKqVj2wcJT/fZs2/qptMSLqikrlQUlAREJq61bfWKgAU9fUWPCUJ0QkZJyDLVt8L3OMdTQURnnCU54QkZDJzfXtiTp1KlWe0HWsRUTy1a4d6QhERCRamcVUD7OIiIRZfLxfZ6mSif3ueBERERERERERCTsVlUREREREREREpMRUVBIRERERERERkRJTUUlEREREREREREpMRSURERERERERESkxFZVERERERERERKTEVFQSEREREREREZESU1FJRERERERERERKzJxzkY6h1MxsI7CylA+vC2wKYTihpvhKL5pjA8VXVtEcXzTF1sw5Vy/SQURaGfNEaUXT70F50OuLbRX99UHFf42hen3KE4QlT8Tq76PiDi/FHT6xGDNEJu5i5YmYLiqVhZktcM6lRzqOoii+0ovm2EDxlVU0xxfNsUn4VPTfA72+2FbRXx9U/NdY0V9fRROrPy/FHV6KO3xiMWaI7rg1/U1EREREREREREpMRSURERERERERESmxylxUGhPpAI5A8ZVeNMcGiq+sojm+aI5Nwqei/x7o9cW2iv76oOK/xor++iqaWP15Ke7wUtzhE4sxQxTHXWnXVBIRERERERERkdKrzCOVRERERERERESklFRUEhERERERERGREqs0RSUzO8rM3jezjOBr7SKOa2pm08xsqZktMbPmURZfrpl9EWwTwxFbSeILjq1hZmvM7PFoic3MmpnZwuB9W2xm14YjthLE19nM5gaxLTKzC6IpvuC4KWb2k5lNCkNMQ8xsmZktN7PbC9mfbGavBfs/DdffaQni62dmn5tZjpkND2dsEn4l+Bt6MPgbX2pmj5qZhTvW0oj2/FlW0ZzfQiHac1BpRXueCIVivMbfBX9ri8xshpk1i0ScciAzOy/4W8ozsyIv/32kn2+4xWoui9UcFau5J5ZySqzmiVj87K80RSXgdmCGc64NMCO4XZixwEPOuWOB44ENURbfHudc52AbFqbYoPjxAdwLfBCWqLzixLYO6OWc6wz0AG43s0ZRFN9u4DLnXAdgCPCImdWKovgAHgIuLe9gzCweeAIYCrQHRphZ+4MO+xWw1TnXGvgn8EB5x1XC+FYBVwAvhysuiagj/g2ZWS+gN3Ac0BHoDpwYziDLINrzZ1lFc34LhWjPQSUW7XkiFIr5Gv8HpDvnjgPeAB4Mb5RShK+Bc4APizqgmD/fcIvVXBarOSpWc09M5JRYzROx+tlfmYpKZwIvBN+/AJx18AHBDyzBOfc+gHNup3Nud7TEF2HFis/MugENgGlhiguKEZtzLss5ty+4mUx4f/eLE9+3zrmM4Pu1+ERXL1riC+KaAewIQzzHA8udc98757KAV4MYCyoY8xvAwDD2lB0xPufcCufcIiAvTDFJZBXnb8gBKUAS/jMoEVgflujKLtrzZ1lFc34LhWjPQaUR7XkiFIqTa2YV+DubB6SFOUYphHNuqXNu2REOK87vcLjFai6L1RwVq7knVnJKrOaJmPzsr0xFpQbOuXUAwdf6hRzTFvjJzN40s/+Z2UNBtTBa4gNIMbMFZjbPzMJZeDpifGYWB/wdGB3GuIoVG4CZNTGzRcBq4IHgQy5q4stnZsfjk/V3YYgNShhfGDTG/4zyZQb3FXqMcy4H2AbUCUt0xYtPKpcj/g055+YCs/CjJtcBU51zS8MaZelFe/4sq2jOb6EQ7TmoNKI9T4RCSXPNr4D3yjUiCaVo/F8iVnNZrOaoWM09sZJTYjVPxORnf0KkAwglM5sOHF3IrjuKeYoEoC/QBT995TX8FJZnoyQ+gKbOubVm1hKYaWZfOedC8kcagviuAyY751aHusgbivfOObcaOC6Y9jbBzN5wzoWkdyVEP1vMrCHwInC5cy5ko1xCFV+YFPbL40pxTHmJ5HNLhJT1b8jMWgPHsr836X0z6+ecK3J6RDhFe/4sq2jOb6EQ7TmoHER7ngiFYsdvZpcA6UR+GlKlcbi/Oefc28U5RSH3lfvvZ6zmsljNUbGaeypITonVPBGTn/0VqqjknBtU1D4zW29mDZ1z64Jf8MLm0WYC/3POfR88ZgJwAiH6wAlBfPlDCHHOfW9ms/EfjiEpKoUgvp5AXzO7DqgGJJnZTudcmRcfDMV7V+Bca81sMT65vFHW2EIVn5nVAN4F7nTOzQtFXKGML4wygSYFbqcBB48qyz8m08wSgJrAlvCEV6z4pIIJwd/Q2cA859zO4DHv4fNLVBSVoj1/llU057dQiPYcVA6iPU+EQrFyjZkNwjf0TiwwzV/K2eH+5oopIv9LxGoui9UcFau5p4LklFjNEzH52V+Zpr9NBC4Pvr8cKKwXYT5Q28zy53wOAJaEITYoRnxmVtvMkoPv6+IXyoua+JxzFzvnmjrnmgO3AGPD9A93cd67NDNLDb6vjX/vjjTfPZzxJQFv4d+z18MUV77i/G2E03ygjZm1CN6XC/ExFlQw5uHATOdcuHoWihOfVC7F+RtaBZxoZglmlojvVYr0lIHiivb8WVbRnN9CIdpzUGlEe54IhSO+RjPrAjwFDHPORbpDSEomGv+XiNVcFqs5KlZzT6zklFjNE7H52e+cqxQbfn7kDCAj+HpUcH868EyB4wYDi4CvgP8ASdESH9AriOvL4Ouvou39K3D8FcDj0RJbgZ/rl8HXa6LpvQMuAbKBLwpsnaMlvuD2R8BGYA++in5KOcZ0KvAtfhTeHcF99+A/PMEvEvk6sBz4DGgZrp9nMePrHrxHu4DNwOJwxqctvFsx/8bj8f8ALMX/I/uPSMcdytcX3I5I/gzX6ytwfNjyW7heXyRzUBleV1TniTC9xun4RZLzf2YTIx2zNgd+NE8msC/4+UwN7m+En8pU5M83wnHHZC6L1RwVq7knlnJKrOaJWPzstyAwERERERERERGRYqtM099ERERERERERCREVFQSEREREREREZESU1FJRERERERERERKTEUlEREREREREREpMRWVRERERERERESkxFRUkqhlZivMbFCk4xARkeikPCEiIkVRjhAJDxWVRERERERERESkxFRUEhERERERERGRElNRSWKCmSWb2SNmtjbYHjGz5AL7bzWzdcG+q8zMmVnrIs4128z+YmZzzGynmb1jZnXMbJyZbTez+WbWvMDxvYL7tgVfex10rnvN7BMz22Fm08ysboH9JwTP85OZfWlmJwX3n2dmCw+K62YzmxB8/x8ze8LM3g3O+6mZtSpwbDsze9/MtpjZMjM7v8C+U81sSfC4NWZ2S3B/XTObFMSyxcw+MjN9BohIhaA8oTwhIlIU5QjlCClHzjlt2qJyA1YAg4Lv7wHmAfWBesAc4N5g3xDgR6ADUAV4EXBA6yLOOxtYDrQCagJLgG+BQUACMBZ4Pjj2KGArcGmwb0Rwu06Bc30HtAVSg9t/C/Y1BjYDp+ILuIOD2/WAZGALcGyBuP4HnBt8/59g//HB844DXg32VQVWA1cG+7oCm4AOwf51QN/g+9pA1+D7+4F/A4nB1hewSP+ctWnTpq20m/KE8oQ2bdq0FbUpRyhHaAvPpsqixIqLgXuccxuccxuBu/EfzgDn4z+4Fzvndgf7juR559x3zrltwHvAd8656c65HOB1oEtw3GlAhnPuRedcjnPuFeAb4IyDzvWtc24P8F+gc3D/JcBk59xk51yec+59YAFwqnNuH/BacAxm1gFoDkwqcN43nXOfBTGNK3De04EVzrnng5g+B8YDw4P92UB7M6vhnNsa7M+/vyHQzDmX7Zz7yDnnivFeiYjEAuUJ5QkRkaIoRyhHSDlRUUliRSNgZYHbK4P78vetLrCv4PdFWV/g+z2F3K5WxPPmP3fjArd/LPD97gKPbQacFwwR/cnMfgL64D+MAV4ALjIzwye1/wYJojjn7XHQeS8Gjg72n4vv0VhpZh+YWc/g/ofwvSrTzOx7M7sdEZGKQ3lCeUJEpCjKEcoRUk4SIh2ASDGtxX8ALg5uNw3uAz9EM63AsU3K4XkLagpMKcZjVwMvOueuLmync26emWXhh45eFGzFsRr4wDk3uIjzzgfONLNE4Hp8j0cT59wO4Gbg5qA3Y5aZzXfOzSjm84qIRDPliQPPqzwhIrKfcsSB51WOkJDRSCWJFa8Ad5pZvWDxuruAl4J9/wWuNLNjzaxKsC9UJgNtzewiM0swswuA9hw4tLQoLwFnmNkpZhZvZilmdpKZFUxaY4HHgRzn3MfFjGlSENOlZpYYbN2D159kZhebWU3nXDawHcgFMLPTzax10JuRf39uMZ9TRCTaKU/spzwhInIg5Yj9lCMkpFRUkljxF/wc4kXAV8DnwX04594DHgVm4Ydkzg0es+/Q05SMc24zft7xzfiF8W4FTnfObSrGY1cDZwJ/ADbiewVGc+Df3YtAx+BrcWPaAZwMXIjv/fgReAC/YB/44a8rzGw7cC3BXGugDTAd2Il/j/7lnJtd3OcVEYlyyhP7z6s8ISJyIOWI/edVjpCQMq2tJRWNmR0LfA0kBwvTRS0zSwU24K+qkBHpeEREKgPlCRERKYpyhEjJaKSSVAhmdnYwXLM2vtL+TrQngcBIYL6SgIhI+VKeEBGRoihHiJSeFuqWiuLXwH/w83o/AK4zs50F9lfBD2HNn/f7a+fcuOKc2Mw6An8HugF1nHMWioDNbAVgwFmhOJ+IiOxXSA5w7O9MWwAMK+Z5SpQDzMzhr7KTPxT8VefcVSUIveC5VqA8ISISDgXbEjWAfUEeKUsb4nJgFH7a2HbgZeAPoSpWKUdItND0N6kUgg/dq5xz00vx2GPwl+/cBEwIVVGpLMwsIUZ6T0REIi6cOSAoKrVxzi0vTayhFiyoas65vEjHIiISa8qYP0bip9F9CtQDJgKvO+f+FtIgSxaT2hAScpr+JnIEzrllzrln2X8J0iKZ2RNm9veD7nvHzG4Mvm9kZuPNbKOZ/WBmowocd7yZzTWzn8xsnZk9bmZJBfY7M/uNmWUAGuIqIhIGJckBJWFm55nZwoPuu9nMJgTfJ5vZw2a2yszWm9m/g7UzMKbCiSgAACAASURBVLPaZjYpyCVbg+/TCpxntpndZ2af4EdNtTSzK8zsezPbEeSfi0P5ekRE5EDOuSedcx8557Kcc2uAcUDvwo4NZxvCvH+a2QYz22Zmi4JRuSKloqKSVFrBpT1/OszWtBSnfQEYYWZxwXPUBQYCrwT3vQN8CTQO7r/RzE4JHpsL3ATUBXoG+6876PxnAT3wlyIVEZFSKqcckO9DM/vRzN40s+ZFHDMRaBEsCJvvEvZfwecBoC3QGWiNzxv5l7mOA54HmgFNgT34S0oXdClwDVAdf9WgR4GhzrnqQC/gi9K+OBGRyqwM+aMfRXdQhLMNcXIQS1ugFnAB/sp0IqWiopJUWs65l51ztQ6zrSrFOT8DtuE/zMFfqnO2c2490B2o55y7J+ix+B54OjgG59xC59w851yOc24F8BRw4kFPcb9zbotzbk/pXrWIiED55IDAiUBzoB3+Us2TzOyQNSydc/uA1wgu1WxmHYLHTQqmrF0N3BR85u8A/sr+fLHZOTfeObc72Hcfh+aL/zjnFgfTHHKAPKCjmaU659Y550I68kpEpLIoTf4wsyuBdODhIs4ZzjZENr7DoR1+evRS59y6sr0rUpmpqCQSei8QNBI4sNe5GdCoYE8G8AegAYCZtQ2mMPxoZtvxDYi6B517dfmHLyIipeWc+zD4p/8n4LdAC+DYIg5/AbgoKCJdCvw3KDbVwy8Ou7BAvpgS3I+ZVTGzp8xsZZAvPgRqmVl8gXP/nC+cc7vwPdHXAuvM7F0zaxfK1y0iIoUzs7OAv+FHi246zKFhaUM452biR7c+Aaw3szFmVqNsr1IqMxWVpNIys4vNbOdhttJOfXgJONPMfoFvSEwI7l8N/HBQT0Z159ypwf4ngW/wC7zWwCeLgxeE1cr6IiIhUI454GCOQz/L/Q7n5gFZQF/gIvY3IDbhp7R1KJAvajrnqgX7bwaOAXoE+aJf/ss66HkLPtdU59xgoCE+1zxd5lcmIlIJlSR/mNkQ/OftGc65r45w6rC1IZxzjzrnugEd8NPgRpf4jRAJqKgklZZzbpxzrtphtlXgr5xjZilAUnA7xcySD3PeTGA+vnEwvsBUtc+A7WZ2m5mlmlm8mXU0s+7B/ur4y43uDHqQR5bPKxcRkfLIAWbWwcw6B5/v1YC/A2uApYcJZSy+xzjHOfdxEFsevhHyTzOrH5y7cYH1M6rji04/mdlRwJ8O91rNrIGZDTOzqvhLY+9k/+WxRUSkBEqQPwbgF+c+N5jedqTzhqUNYWbdzayHmSUCu4C9KCdIGaioJHJkzfD/vOevP7EHWHaEx7wAdGJ/rzPOuVzgDPyiqz/ge6KfAWoGh9yC76negW9MvBaa8EVEpAwOmwPM7D0z+0NwswH+s3s78D1+jaTTnXPZhzn/i0BHCuSLwG3AcmBeMJ1hOn50EsAjQCo+j8zDT407nDj86Ka1wBb8WhsHL+IqIiKh9Uf8//mTC4xieu8IjwlHG6JGcNxWYCV+ke5C13oSKQ5zTrNpRELNzPrhh7A2D3qcRUREDmFmqcAGoKtzLiPS8YiISOSoDSGxSCOVREIsGEr6W+AZJQMRETmCkcB8FZRERCo3tSEkVqmoJBJCZnYs8BN+IdRHIhyOiIhEMTNbgW9A3BzhUERigpkNMbNlZrbczG4vZP8/zeyLYPs2uEqWSNRTG0Jimaa/iYhIVAiukPJ/QDy+l+5vB+3/J9A/uFkFqO+cqxXeKEVEJBLMLB74FhgM5C9oPMI5t6SI428Aujjnfhm+KEVEKp+ESAcgIiISNBaeoEBjwcwmFmwsOOduKnD8DUCXsAcqIiKRcjyw3Dn3PYCZvQqcCRRaVAJGcIQrI4qISNnFdFGpbt26rnnz5pEOQ0Qk6ixcuHCTc65epOMogXJpLChPiIgULgbzRGNgdYHbmUCPwg40s2ZAC2BmEfuvAa4BqFq1ard27dqFNlIRkQqguHkipotKzZs3Z8GCBZEOQ0Qk6pjZykjHUELl0lho2rSp8oSISCFiME9YIfcVtY7HhcAbwaXYD32Qc2OAMQDp6elOeUJE5FDFzRNaqFtERKJBSBsLzrl051x6vXqx1AkvIiKHkQk0KXA7DVhbxLEXAq+Ue0QiIqKikoiIRAU1FkRE5HDmA23MrIWZJeFzwcSDDzKzY4DawNwwxyciUimpqCQiItFAjQURESmScy4HuB6YCiwF/uucW2xm95jZsAKHjgBedbrEtYhIWMT0mkoiUvlkZ2eTmZnJ3r17Ix1KVEhJSSEtLY3ExMRIh1ImzrkcM8tvLMQDz+U3FoAFzrn8ApMaCyJyWMoTB6ooeQLAOTcZmHzQfXcddPvP4YxJRGKLcsShyponVFQSkZiSmZlJ9erVad68OWaFLcNTeTjn2Lx5M5mZmbRo0SLS4ZSZGgsiEgrKE/tVtDwhIlJWyhEHCkWe0PQ3EYkpe/fupU6dOkoCgJlRp04d9bSIiBSgPLGf8oSIyIGUIw4UijyhopKIxBwlgf30XoiIHEqfjfvpvRAROZA+Fw9U1vejkhaVsoNNRESkENl7QMs2iYhIkfZEOgARkahQCYtKefh1Xi8GciIci4hUJps3b6Z///5Uq1aN66+//oB9CxcupFOnTrRu3ZpRo0ahdagjKHsPjD8ZZv9OhSURCRvliFiyCegGPBLpQESkEonWPFEJi0pxQE/gdeAiVFgSkXBJSUnh3nvv5eGHHz5k38iRIxkzZgwZGRlkZGQwZcqUCEQoACSkQINu8Pkj8OFtKiyJSFgoR8SSWrCrJbibgKcjHYyIVBLRmicq59Xf3O8ABzYaMGAclfWtEIllN94IX3wR2nN27gyPHKHjccWKFQwdOpQ+ffowZ84cGjduzNtvv01qauphH1e1alX69OnD8uXLD7h/3bp1bN++nZ49ewJw2WWXMWHCBIYOHVqm1yKlZAYn/RNys2HBQxCfCL3/4u8XkZgSiTyhHFEJ7NoEL34O7ZrDideAVcV3VotILFFbIjQqXyXFOfj4DxCXCL0eBLsVX1h6icr4dohI6WRkZPDKK6/w9NNPc/755zN+/HjWrVvHuHHjDjm2X79+PProo0Wea82aNaSlpf18Oy0tjTVr1pRL3FJMZjDwMcjLhk//CnFJ0OtPkY5KRGKEckQFV6UBtD0PFj4Kic2g92VAKnB2pCMTkRhRkfJEJayiONizCb56Btwd0PsBsNvwhaUXqZRviUiMOlIvQHlq0aIFnTt3BqBbt26sWLGCO++8k9GjR5f4XIXNedZVKaKAxcHgf0NeDsz9sx+x1OMPkY5KREogUnlCOaKCM4P+j0DOHpj3NCQ0gR4XABOBIZGOTkSKSW2J0Kh8FRSLg8FPAQaf3gfuduiTX1gCFZZEpDiSk5N//j4+Pp49e/bw0EMPlap3IS0tjczMzJ9vZ2Zm0qhRo9AGLKVjcXDy0+By4OM7/CjX7iVP9iJSuShHVHzbbDs1Bz3pC0sfvwQJjaDb2cAU4MRIhyciUa4i5YnKWT3J7322OPjsb+Buhb5/A7sdP2JpLJX1rRGR0hs9enSpehcaNmxI9erVmTdvHj169GDs2LHccMMN5RChlEpcPJzyvF9j6cNbIS4But0U6ahEJMYoR1QcG9jACZzAlXFX8schz/vC0uzxkHA0/OJ0YAZwfKTDFJEYE6t5olJWTvYAcRZH8qB/+cLS/AfB3QL97gf7Pb6w9AKV9O0RkXLUvHlztm/fTlZWFhMmTGDatGm0b9+eJ598kiuuuII9e/YwdOhQLcAabeLi4dQX/Yil2b/zI5a6XH/kx4mIlIByRGyovq8ueXP7cddJd5EYl8jtp70ME8+B6ZMhoS50OAWYDfwiwpGKSEUTjXmi0lVN8oBz8WWj8RZHysAnwOJhwcP+sqAn/hUsf80MFZZE5FDNmzfn66+//vn2LbfcUuzHrlixotD709PTDzinRKG4BDj1Zb/G0swbfGHpF7+OdFQiEmWUIyq+vbvjiB/3LHGbsvn98N+TGJ/IzWe8AW+dAVNnQkItOGYw8CHQLtLhikiUqWh5Ii4izxpBcfjrMrwHnAHsNoMBj0KXUbDwnzB7Pbi/Ai8DlwO5EYxWRESiSnwinP4atDwdpl8LXz0b6YhERCTM9tWGtWPiSW7xAnFvn8ct3MJjCU/DWROgUS+YvB2+ywIGAT9EOlwRkXIVtqKSmQ0xs2VmttzMbi/imPPNbImZLTazl8srlhE74aksmAmcBuzMv4JD1xvh8/+DWevA3YcKSyIicoj4JDjjDWg+BKZdDYtfiHREIiISRkcD/zHY1zWB5EbjiJt0NqMYxb8TX4Sz34X6XeCd3bBiGzAQCN+lvUVEwi0sRSUziweeAIYC7YERZtb+oGPaAL8HejvnOgA3lkcseXlw1lnw6lB4Zi98BJwCbDODk/4B3X4H/3sMZq4F9xdgHHAFKiyJiMjPEpJh2JvQdCBMuRKWlls/iIiIRKELgJcM9qUnklzvVeKnns5IRvJs8utwzhQ4qj28nQ2ZP+JHLG2IcMQiIuUjXCOVjgeWO+e+d85lAa8CZx50zNXAE865rQDOuXL55I2LgyuugNmz4dlB8Pxu+AwYDGw1gxMfhvTR8MUTMGMNuHuAl1BhSUREDpCYCme9DU1OhPcuhW9ei3REIiISRiOAsQb7jk8iscYbxM8YwtXuasamToLh06BGS3gTWPcDcDKwNbIBi4iUg3AVlRoDqwvczgzuK6gt0NbMPjGzeWY2pLATmdk1ZrbAzBZs3LixVMFccgm89hp8+ik8eiL8Zwd8iR+cuskM+j0A3W+DL5+E6SosiYhIERKrwFnvQKPeMPli+PaNSEckIiJhdDHBVLgTkklMeZO4jwZwpbuSV6rMgPOmQ9VGMD4R1i8GhgDbIxyxiEhohauoZIXc5w66nQC0AU7CF/6fMbNahzzIuTHOuXTnXHq9evVKHdDw4fDWW7BoETzYB174CZYAA4ANZtD3fjj+97DoKXh/lQpLIiJSuKRqcM670PAEeHcEZLwV6YhERCSMLgWeM9jXK5UEm4jN7cul7lJer/YJnDcDko6CN1Jh00L8iq67IhyxiEjohKuolAk0KXA7DVhbyDFvO+eynXM/AMvwRaZyc/rpMGkSZGTA3T3hhU2wHF/VWmcGfe6DE+6Er56BaStUWBKRMnn//ffp1q0bnTp1olu3bsycOfPnfQsXLqRTp060bt2aUaNG4dzBdXeJaknV4ZzJ0KA7TDoflr8d6YhEJMYoR8S2K4BnDPb1rUJCziSYfwIXuYuYUON/vrAUXw1erwZbPsGvArInsgGLSMyJ1jwRrqLSfKCNmbUwsyTgQmDiQcdMAPoDmFld/HS478s7sMGDYcoUyMyEO06A53+EVcCJQKYZ9LoHTrgLvn4Opn4HeSosiUjp1K1bl3feeYevvvqKF154gUsvvfTnfSNHjmTMmDFkZGSQkZHBlClTIhiplEpyDTj3PajfFd45D76bFOmIRCSGKEfEvl8CY4B9/aqRsGsy/K8b5+Wdz7u1l8F5M4FkeL0m/DQDGA5kRTReEYkt0ZonEsLxJM65HDO7HpgKxAPPOecWm9k9wALn3MRg38lmtgRfrRntnNscjvj69YPp02HIELi1Bzz3CVydBv2AmWY07303xMXDnD+BuxROuRfi/oifwfdC8JJEJNxuBL4I8Tk7A48c4ZgVK1YwdOhQ+vTpw5w5c2jcuDFvv/02qamph31cly5dfv6+Q4cO7N27l3379rFlyxa2b99Oz549AbjsssuYMGECQ4cOLeOrkbBLrgnnToU3BsM758Kwt6DlqZGOSqTSikSeUI6o3K4G8oBr+9cgaeYU8r4axNmdzuGdOhM55bwZ8NpJ8N+j4MLJUGME8BphapKJyEHUlgiNcI1Uwjk32TnX1jnXyjl3X3DfXUFBCef9zjnX3jnXyTn3arhiA+jRA2bOhF274Mbj4ekf/PUZTgS+A+h5F/T+Cyx5Ed5bGoxYGgdcjkYsiVQ+GRkZ/OY3v2Hx4sXUqlWL8ePH89BDD9G5c+dDtlGjRh3y+PHjx9OlSxeSk5NZs2YNaWlpP+9LS0tjzZo14Xw5EkoptfxVf+p0hInnwIqpkY5IRMJMOaJy+zXwBJA1oBZxG6fhlrTnzLyzmFF3PQx/H7Ly4L91YMebwGWoLSFS+VSkPKGyeAFdusAHH8DAgXB9D/j3x/CbtsGIJeCYE+4Ai4ePfw/uAjj1Hoi7K3i0RiyJhNuRegHKU4sWLejcuTMA3bp1Y8WKFdx5552MHj36iI9dvHgxt912G9OmTQModM6zWWHXN5Bw+ZYcGpFAtdKeIKW2bzi8PhAmnOmvENd8cChDFJFiiFSeUI6Q6/Ajlm4YdBRJU98nN6E/p7c9g8kN3qX/uVPhjUHwel244BWomgI8Qxj7+0UEtSVCRZ9cB+nQAT78EJKTYeQJ8NjXkIMfsbQYoMft0O9BWPYavLsIcjViSaQySk5O/vn7+Ph4cnJyitW7kJmZydlnn83YsWNp1aoV4HsTMjMzDzimUaNG4XsxcoA9ZLGW03mNP7P1kAuVlkDqUf5y0kcdA28Pg5UzQhekiEQ15QgBuB7faM06pS62YgY5y1twWt7pfNBwD5zzHuzcA6/Xg93PAzdw6MWxRaSiqkh5QiOVCtG2rS8sDRwIv+4F/5oFt3bzV4V7H+jcfTTEJcDs30FeLpx+D8TfhU8EY9GIJZHKafTo0YftXfjpp5847bTTuP/+++ndu/fP9zds2JDq1aszb948evTowdixY7nhhhvCEbIUIpU4mtOIk7ibcfzEIP5Bg9L2waTWgeHT4fUBMOEMOPtdaNo/tAGLSExQjqicfosfsfS7IfVJfG8m2dafU1udxpTG79H37Enw5lB4ox6c9y9ITQUeAjQSTaQyitU8oZFKRWjRwheWGjaEX/eDv82BVGAA/lJ2dLsJ+j8Cy9+CdxYGI5ZeRvOiRaQojz/+OMuXL+fee+/9uedhw4YNADz55JNcddVVtG7dmlatWmkB1ohKYO2cZ1i290Yu5v/4mF+xipzSn65KPX856Zot4K3TYfUHoQtVRCoM5YiK6ybgYSB7aANs+UyyfkhjaN6pzGmSBGe+DVu2wfj6sO/vwF1HOJuIVFbRmiessPl3sSI9Pd0tWLCgXJ9j/XoYNAgyMuBfk+EvA2Az8B7QC+B/T8DM66Hl6XDG8ZBwF3ARGrEkUj6WLl3KscceG+kwokph74mZLXTOpUcopKhRmjyxZw+0bAlVqzre/vweOtT4M5M5lzaMow3JRz5BUXath//2hx2r4JwpkNan9OcSkSIpTxxKeaJo4WhP5HsYGA0kvrcW1+4kUpr9yPtx0zjhu03+wg4NasPwDZB0H/CHsMQkUtkoRxSuLHmiUo5UWstaNrO5WMc2aACzZ0PHjnDtELhtMjQATgY+AOjyGxj0JHw/CSbOg5y72T9iqQw92yIiEhGpqfDWW7Bli3HysX/iy03/5FTGk8kwvmJX6U9ctQGcPxOqpfnpDmvmhC5oEZFKwMyGmNkyM1tuZrcXccz5ZrbEzBab2cvhjvFwbsFPbsse2gi+mcXeVfU5Oe8UPmtVH05/DX7cDG/Vh+w7iOwSwiIixVfpikp55HEmZ9KXvqxmdbEeU6cOzJgB3bvDdWfAqPHQFBgKTAf4xbUw+Cn4YTK8PReyC06FU2FJRKQ4oqmx8MkJf+fhhbNwDgYccyPzVz9LP6azi1P4jJ9Kf+KqRweFpYbw5hBYOzd0QYuIVGBmFg88gf8XvD0wwszaH3RMG+D3QG/nXAfgxrAHegT5haWcoY1h6Sz2rK7LoLyTWdCmCZw6DtZsggn1IPsm4N8RjlZE5MgqXVEpjjge4iHWsIZe9GIpS4v1uJo1YepUOOkkGHUe/HIstAFOByYDHHcNnPwsrJgKb38UFJZeAS5FhSURkcOLpsbCHvbwHM8xssUQ/vj5W9SqBf2P/SWfLH2NrnxGMv35gA2lf4JqjeC8Wb7ANP4UWDsvdMGLiFRcxwPLnXPfO+eygFeBMw865mrgCefcVgDnXBk+rMvPLcCDQM7QJrBkFrvX1GZg3mA+b9cGTnkeVm2CifUgZyTwXISjFRE5vEpXVAI4iZP4gA/IJps+9GEexfuHvlo1mDQJhg6F0ZfDBU9BR+AsYAJAp1/CkOdh5XSYMBuy/4LPd5egwpKIyGFFTWMhlVQ+5EO60pXrjx7OdQufpWVLGNx5ODM/m0hblnE0/ZhazNGuhare2BeWqjTwhaV1n4buBYiIVEyN4YAP3szgvoLaAm3N7BMzm2dmQwo7kZldY2YLzGzBxo0byyncwxtNfmGpKfb1LHatq8GA3EF80eE4GDwGVmyESfUg91fASxGJUUSkOCplUQmgM52ZwxxqU5uBDOQ93ivW4/LX2jjnHLjjWhj6d+gGDAdeA+hwOQwdC6tnw1vTIesvwZ6LUWFJRKRIUdVYqEMdpjOdwQzmllpXcc68B+jazXFazyG8O3UajVlHO/owgYxSnR/whaXzZ/mrw71xsgpLIiKHZ4Xcd/AVhxLwkwlOAkYAz5hZrUMe5NwY51y6cy69Xr16IQ+0uPYXlprDolnsXF+NAbmDWHTc8TDgcfhuI7xbD3IvI2hpiIhEnUpbVAJoSUs+4ROO4RiGMYyXitkLkJQEr70GF18Mf7kF+twDvZy/5tuLAO0vgaEvQeaH8ObUoLD0X/wR2eX2ekREYljUNRaqUpWJTGQEI7i7yu0c/8FoBg7O47whfXjtlVnUZDcn0JfXWFTq56B6mh+xlFo3KCx9VvpziYhUbJlAkwK304C1hRzztnMu2zn3A7AMnzei1mjgASB3aEv4chbbN6bQP3cgX3c5Efo/AhkbYXIdyLsIeCvC0YqIHKpSF5UAGtCA2cymL325lEv5B/8o1uMSEuCFF+Cqq+DhP8Fxt0N/B5cDzwAcOwJOewXWzoHxk2HffcDrqLAkUnmtWLGC1NRUOnfuTOfOnbn22mt/3rdw4UI6depE69atGTVqFM4dXE+p8KKysZBEEi/xEtdzPY8m/p2j3/0l516YzVUXdeWZxz4iwSVwMifyAvMOqYAVW40mcP5sX1gafzL8OD+Er0BEYoVyxBHNB9qYWQszSwIuBCYedMwEoD+AmdXFj3D9PqxRlsKt5BeWWsEXs9i2KYmTcgewuOsg6PcQfLsJ3qsNeecDkyIcrYhESrTmiUpfVAKoQQ0mM5nhDOdmbuY2bsMVo3kQHw9jxsCoUfDEg9B8FJzigkU/AI45P7g86GcwfmJQWHoD38GuwpJIZdSqVSu++OILvvjiC/797/1XdRk5ciRjxowhIyODjIwMpkyZEsEoIyJqGwtxxPEoj3I3d/Ni/AvsffkcrrxuD6NHtePhP37MPleXcxnE08woY2FpFqQcBW8Mhh8XhPIliEiMUI4omnMuB7gemAosBf7rnFtsZveY2bDgsKnAZjNbAswCRjvnNkcm4pL5ubA0pA18PpOftsRzUu4AlnQ/Ffr8Fb7ZDFNrQN45wLQIRysikRKNeSIhbM8U5VJI4VVe5QZu4EEeZCMbGcMYEo7wFpnBI49A1apw//1w0S4Y9gxcHwd7gZvbngtxb8A758HrufD/7N13XJbV+8Dxz+ERcO+FYoriwImrEgVUREBcIGpuK7NMM9NMK7OhlmmW+c00K3OWC7cMUWaQ5UgrXJR7b83UZJzfHzf5s2Q8IvAwrvf39byEh3Of5zq9/HJ5X/cZAdOg6JsY90srAOtcGJ0QBdMYxrCXvdnapzPOzGZ2hm2OHTuGj48P7dq1Iy4ujurVq7NhwwaKFSuWpc88e/YsN27coE2bNgAMHjyY9evX4+Pjk6X+8iOtdZJS6p+bBROw8J+bBWCX1npj6s86p94sJJOLNwsKxWQmU4lKjFQjaftZZ8ZU3sSH79TiyqUYJs/rzBDVhfmsYDh+mLLyIaUfMwpLK9sbhaXe26BKy2weiRCFiyXyhOSInKO1DiL14OX73pt839caGJv6yndew1j3PdGnPqbgCK626kD78h2JeiICp5REiHsbrMpD5+6ggkl9ziKEyCK5l8geMlPpPiZMzGUu7/AO3/AN/vhzi1uZXqcUvP8+TJ0K334DRfpDQLJxXOhUAMce0H0tXPoFVq+B29OAtUBf4G6OjkkIkTMSEhIYOXIk8fHxlC1blsDAQGbOnHlvOur9r9GjR9+77ujRozRv3hx3d3diYmIAOH36NPb29vfa2Nvbc/r06Vwfk6VprYO01vW01nW01tNS35ucWlBCG8ZqrRtqrZtorVfkdowjGMEKVvCj+pHwt91587OzfPlFVcb0ieRcSguGE8AXLM76b/bSNaFvJNiWNQpL5/dkY/RCiNwiOUJk1QRgOpDs0wB2R3DlCrgnd+BAm97w5CT47QpsKw7aF/jewtEKIbKqIOUJman0HwrF27xNZSozkpF0pjOb2EQ5ymV67ZtvQvHiMHYs+NyC/uvgLZMxY2lKna6o7utgoz+sXmXMWCr+JkZhaSVgk8MjE6LgyewpQE5ycHDA2dkZgJYtW3Ls2DEmTZrE+PHj073Gzs6OEydOUKFCBXbv3k3Pnj2Jj49Pc82zUmntWy3ygj70oRzl8MOP70a2Y1rFrbzVvw7XOofxRag/L5qG8gXXGMTLFM/KB5SuacxYWtUe1nSCgG1QpUX2DkKIQsJSeUJyhHgUEzBOr5jg3QDT1kiuNG+Pe/kORLmE45ScCDs/BKsy0NEH1DbgCQtHLET+JPcS2UNmKqVjBCNYyUp2shM33DiNeZW+V16BefMgeBOc84ahiTAN42QHXbsL9NwAVw/B6hVwaxrGFiF9kBlLQuQvtra29742mUwkJSVl+nTB1taWChUqAEbyqFOnDocPH8be3p5Tp07d6+/UqVNUq1YtdwckHoonnkQQwXWuM6dvW6aH7CUmpiR9Ht/E4bv+PM8Yf35XZQAAIABJREFUVvAO17K6y1KZWsbm3daljMLS+Z+zM3whRA6THCEe1WvADCC5cwPYG8GVq+Ce0pGDrkOg5VjYex0iTaA7A7stHK0Q4mEVpDwhRaUM9KY3wQRznOO44MJBDpp13QsvGCfDRYbD4Q4w/G+YBbwEpNTygp6b4NrvsGo5/DUN2AD0RgpLQuRv48ePv7dx3v2vOXPmAHDx4kWSk5MBOHLkCAkJCdSuXRs7OztKlSrFjh070FqzZMkSevToYcmhCDO0pjXf8z022DDV050PYqM5dMgW30Yr2X/raZ7hXYIYwwVSsvYBZWoZS+GsSxqFpQvZu+ZfCJG7JEeIhzUe+AhI9nSCfeFcuaqNwpL7MGg+GvZch2hAewL7LBusEOKR5dc8IUWlTHSkI5FEcoc7tKMdP/GTWdcNHgwrV8JPP8KutjDqtnEi3HAguWYn8A+C68dg1VK4+T7GIUcBwN85NhYhhGVFR0fTtGlTmjVrRkBAAPPnz6d8+fIAzJs3j2HDhuHo6EidOnUK5Qas+VEDGhBLLNWpzhutOvP2ng1cvVoEjzpfs/faWPozh1ie5gRJWfuAMg6pM5ZKwGoPKSwJUYBJjhBpGQd8DCR7NIRfIrh8NQX3lI4c6jACmo2AXTcgNgm0BxBv4WiFEDkpr+YJldb6u/yiVatWeteu3Dl2+Xd+xwsvznGOQALxxtus67ZsgV69wLEueP0AH5eEAcAioMipGFjrAyWrQ++hUOoNwBcIBGwz6FWIwuvAgQM4OTlZOow8Ja3/Jkqp3VrrVhYKKc/IrTxxmcv44stOdvLumQXMf/xZbt7UhB6YxhN2bxFKDxxYQT2KZu0Drh0x9lhK/At6b4fKztkavxAFieSJB0meSF9u3k88ik+BMYApYj806UCFciairbZTP+xj+PUraFMSXEoAkUADi8YqRF4mOSJtj5InZKaSmRxxJJZY6lGPbnRjOcvNus7X1ygsHT0Cm1vCa9dgOdAPuGvvCr1C4a+zsOpruPE+sAXwx9jeWwghRH5QgQpsZzueePJWtWEM+HU6latAhzqTiPr9f3ixgXP4so8/s/YBZWvLjCUhhCjEXgb+ByR3aAjx4Vy6loRbigeHPcdBo6Hww03YcRPoCCRYNFYhROEiRaWHUJWqRBGFK64MZCCf8IlZ13l4wNatcO4crHSGNy7BGozFbneqt4VeW+HWRVj1JVyfBgQhhSUhhMhfSlCCjWykP/2ZUe51Ov48lgYNU+jkNIote5biQhSJdOIHLmftA/5bWJLNu4UQolAZBXwGJLs3QsVHcOlaEq4pHTnc+TVoOAhi/4IfbwAdgD8sG6wQotCQotJDKk1pgggigADGMpYJTECbcbpP27YQHg5//gmLmsHkc7AJ6AHcqvYk9N4Gd67Cyi/g2vtAMNATKSwJIUT+YYMNS1nKy7zMF8VnUzduMC7t79K15UDWbFtHE/ZRGnfCOZO1D7hXWCopp8IJIUQhNBL4HEh2a4Q6kDpjSXciwesNaNAfvv8LfrqKUVg6atlghRCFghSVsqAoRVnBCkYwghnM4GmeJpHETK9r2RKioiA5GeY1hXdOQhjGLko3q7Y29slIvAkrP4er7wNbMcpOt3N2QEIIIbKNFVZ8wie8z/ussllO0ZAedO37F/08u7HwuxBq6eM40JbNWX2KXLY29ImQwpIQQhRSI4D5QHK7xqiD4Vy8loir9iDBZxLUfwpibsHOSxhL4U5YNlghRIEnRaUsMmFiLnN5l3dZzGL88OMWtzK9rnFjiI4GW1uY3RTe+R1iAC/gepUW0Dscku/Aqs/gylSMspMUloQQIj9RKF7ndb7kS7aZtnLhOw/6jbrMi/3bM3tOBOX0n7SkHav5JWsfIIUlIYQo1J4HFgDJbRvDoXAuXr+La0onDnd5C+r1gejbsPscxoylU5YNVghRoElR6REoFJOZzHzmE0wwHnhw2Yy9MurVg5gYKF8eZjaHt/fDTqATcKVyM+NGISUJVv0PLk8FtgHdwYyilRBCiLxjGMMIJJB9ai9757gyfMpJJo1pxbtvxGCtTXTCnSXEmbGIOg3/KizJHktCCFHYPAd8Beg2jeFwBBevJ+Ka0olDXd6CegEQeQf2nMYoLGVx2bUQQmRCikrZ4HmeZzWr2cMeXHHlJCczvaZWLaOwVKMGfNAKJu2GXzF+5V+o2NjYMwNg5Wy4NBXYDnRDCktC5F+XL1+mQ4cOlCxZklGjRv3rZ7t376ZJkyY4OjoyevRotDbKDFeuXMHT05O6devi6enJ1atXLRG6eAQ96UkooZxWpwma5MLYLw8we7oTrzwTy21diQA68TUhWS8s9Y0E61JSWBIin5McIbLiWeArBfqJxqjfI7h0LRk33ZlDXd6Guv4Q8Tf8fAJjKdw5C0crhHgUeTVPSFEpm/jjb9w0cBoXXNjP/kyvqVbN2GOpXj2Y5gKvxxoHgLYHzlRwMgpLJmtY9QlcmAJEAl2Bv3JwJEKInFK0aFGmTJnCRx999MDPRowYwYIFC0hISCAhIYGQkBAApk+fjoeHBwkJCXh4eDB9+vTcDltkA3fciSKKRBJZNKwdE9fv4NslNRnW9XsupdRnMN35gpUkZaXzMg5SWBKiAJAcIbLqGeAbBSmPN0IdMwpLrtqTg75vQ50eEH4X9h4FPIALFo5WCJFVeTVPSFEpG7WnPdFEk0QS7WhHHHGZXlOpEkREQPPmMMUdXt0GJwE34ET5+tAnCkxFYfXHcP5dIApja28pLAkBYzDKsNn5GpPppx47dgwnJyeee+45GjVqROfOnbl9O/N9z0qUKEG7du0oWrTov94/e/YsN27coE2bNiilGDx4MOvXrwdgw4YNDBkyBIAhQ4bce1/kP844E0cc5SjHnB4evB4dzPZtlentEsnJpCcZTj++4gv+zkrnUlgSIh25nyckRwhLGAIsUaBbNkSdiODyVY2r7syBbu9C7W6w/S7sS8DYcOOShaMVIq+Qe4nsIEWlbNaMZsQRRwUq0IlObGZzpteUKwdhYeDqClM7w6gNxq96N+CPco7QN8rYM2P1LDj3DsbW3l2Amzk6FiFE+hISEhg5ciTx8fGULVuWwMBAZs6cibOz8wOv0aNHZ9jX6dOnsbe3v/e9vb09p0+fBuD8+fPY2dkBYGdnx4UL8oQxP6tNbWKJpR71+LBtd8b9vIwD+8vQpVEoh/725QVeYBkfcDMri+EeKCztyfb4hRDmkRwhLGEgsFwBzRuiTkdw5Sq4aS8OdHsPavvCtkT45QCpO7laNlghCrmClCeKZHuPAgcciCWWLnShJz35ki95mqczvKZUKQgKgl69YHpPGLccFvU3Ckvby9amwVPRsKoDrP4Ier0D1d4FfIAgoFTOD0qIPGm2xT7ZwcEBZ2dnAFq2bMmxY8eYNGkS48ePf+i+/lnzfD+l1CPHKPKmKlQhiih60pMPGg5izP7zLG8xDg+HtYQcfppnS77Bci7jw0zK85B/D/4pLK3qYJwKFxAGVVrmyDiEyB8skyckRwhLeQoooqBfMyf4NZIrdMC1nBfR3UJouDEFwoJBxUMTT4zDgMpZOGIhLEnuJbJDrs1UUkp5K6UOKaV+V0pNTOPnQ5VSF5VSe1Nfw3IrtpxQmcpEEEFHOvIMzzCd6ehMnjwXKwbr1oG/P8waAAO+hGTAHfi1dE1jKVzxShA4E05PBn7AKCzdyPkBCSH+xdbW9t7XJpOJpKSkLD9dsLe359Sp/z/u99SpU1SrVg2AKlWqcPbsWcCY2lq5cuUcGI3IbaUpTTDBBBDAbPtX6XZwPNa2JlyrL+GnS6MYwCwiGcaZrOyyVMbB2JPPpjSs7gTndmV7/EKIjEmOEJYUAKxSoJo0QJ2P5OoVE654Ed99CtTygq3J8Ns+oDNwzcLRClE4FaQ8kStFJaWUCZiLUQFpCPRTSjVMo+lKrbVz6uur3IgtJ5WiFJvZTD/68Tqv8wqvkEJKhtfY2sLKlTBwIMwZDj0+BmttrM7cU7qGUVgqYQeBM+DkW8AOwBspLAlheePHj2fv3r0PvObMmZPhdXZ2dpQqVYodO3agtWbJkiX06NEDgO7du7N48WIAFi9efO99kf/ZYssKVvAiL7Kw/Ec8Hv809g7JuFafQ+TxyfizkF/owxHuPHznZWoZS6dtyxozls7tzPb4hRAPR3KEyE1+QKAC1bA+6nIE164UwQ0f4ntMg5qdIDQF4n8GvIDrFo5WCAH5N0/k1kylx4HftdZHtNZ3gRVAoch6NtiwjGWMYQyf8ikDGchd7mZ4TZEisHgxPP88LBgHnaZAaW0cBPpDqerGE+hSj8HaD+H4JGAnkhCEyB9q1arF2LFjWbRoEfb29uzfb5wUOW/ePIYNG4ajoyN16tTBx8cHgIkTJxIWFkbdunUJCwtj4sQHJnqKfMyEic/4jPd4jzXFl1D9p540b3cLj9rvsvmX2XizjtP48ht/PnznpWsahaWi5Y0ZS2d/zP4BCCGyleQIkZ26A+utwKp+fdTVSK5dtsYVb37rMQ0e84CQFIjfjTygFiL/yIt5QqW1/i7bP0SpAMBbaz0s9ftBwBNa61H3tRkKfABcBA4Dr2itT6bR13BgOMBjjz3W8vjx4zkef3bQaGYwg4lMpBOdWMtaSmWyF5LW8Oqr8PHH0Gcc7JkJ5xRsBtxvXYDVHnDtd+jxGtR6H2gBhAJlc2FEQljGgQMHcHJysnQYeUpa/02UUru11q0sFFKe0apVK71rV/5Y/rWABYxgBC2TW1O6/xa2r6rAsvAl9G3/DPtUC5IJ4nEqPnzHN07CqvZw+xL0CoVqT2Z77ELkJZInHiR5In35KU9kVSjQPQWSjySgS3agTMW/idKbabL+TTgRDj4KGj6R2lL2ahUFm+SItD1KnsitmUpp7RL132rWJqCW1ropxq5xi9PqSGu9QGvdSmvdqlKlStkcZs5RKCYwgW/4hggi6EAHLpDxzutKwUcfweTJsGoWNHoRamhjDWFY8crQOwLK1Yf10+HIROBnwBO4mgsjEkIIkZ2GM5w1rOEX015OrWhHz9EnGNhxMJ+vXEtj/QulcCOCU5l39F+laxgzXItXgsDOcOaHbI9dCCFE3uUFbLGCIrXrov6K5PrForgpX37pORUe6wjBGvb/iHGXkYWZsUKIQi23ikqngBr3fW8PnLm/gdb6stb679RvvwQK5HE1QxnKBjawn/20pS1HOJJhe6Xg3XdhxgzYMB8eGwx1U6ArsKl4RegdDhWbwIYP4ffxwD6MwpIcEyqEEPmNH35sZSvn1Fl2znZh4AfxvNyvO9Nmh1JDn6I2bdnMoYfv+F5hqQqs6Qyn47I9diGEEHlXJyDICmwcHFF/R3LjYnHclC97e74Hj3VILSzFAV2AmxaOVgiRn+RWUWknUFcp5aCUssE47XLj/Q2UUnb3fdsdOJBLseU6X3zZznaucIW2tGUvezO9Zvx4mDsXQpdB+d7QJBn8gdXFykPANqjcHDbNgMOvAr9ipA4pLAkhRH7jhhvRRJOsktk8oR3PLYplylh3xr8WRWl9m8dxZTW7H77jUvZGYamkHQR6wenYbI9dCCFE3tURCLEC28fqoO5GcuNCSdyUL3vuFZaA/bGAL/CXZYMVQuQbuVJU0lonAaMwFuoeAFZpreOVUu8ppbqnNhutlIpXSu0DRgNDcyM2S2lDG77ne6yxxh13IojI9JoXX4RFiyB6PVh3gdZJRnVuWdGyELAVqrSGzTPg4CvAfsADuJyzAxFCCJHtmtKUOOKopCqxdEgnXgzexIKPmzN88PdoXRwvOrCYyAfWkWfqn8MeSlYzCkunYnIgeiGEEHmVG7DVCorVqI1VShQ3z5XFXfmyq+fbRmEpBNgfg7Eu4pZlgxVC5Au5NVMJrXWQ1rqe1rqO1npa6nuTtdYbU79+XWvdSGvdTGvdQWt9MLdisxQnnIgjDnvs8cabNazJ9JohQ2DlStgVDrfbQ9tEGAx8aVsGAkKhmgsEzYT9ozHqdx7ApZwdiBBCiGzngAOxxNKYxnzh7ccLPy1k46p6PNU5luspNeiLN1+znpSH7bhkNegTASXtYa0PnIrOifCFEELkUW2BMCsoXq0WVlaR/HW2Au1VN37q+RbYtzcKSweigG5IYUkIkZlcKyqJtNljTwwxtKY1fejDXOZmek1AAKxfDwd2weU20PGOcRzeHJtS0CsY7N0h+CP4bRRwCGOy68UcHokQQojsVolKRBCBBx583vJZBh14n10/VqNL8xhOJznzNL34im9IfNiO/ykslaoBgT5wMionwhdCCJFHPQmEW0HJKjWxso7k1pnKdFDd2eE3ySgsBQMHwjF2Jblt0ViFEHmbFJXygPKUJ4wwutGNUYziLd5CZ7KowdcXgoLg+EE41gK8b8HLwIfWJcBvM9TsBKEfwy8jgASMwlLGp80JIXLWsWPHKFasGM7Ozjg7O/PCCy/c+9nu3btp0qQJjo6OjB49Gq2N3wFXrlzB09OTunXr4unpydWrcrpjYVOSkmxiEwMYwNe138T3j9Gcv1CGjo7bOHSnE8N5hqV89PD/5C9pZxSWSteEtV3gRObLsIUQOUdyhMhtrYBIE5SuVANVLJLbp+zoqHoQ5/d66kNqBQe2Az2QwpIQlpdX84QUlfKIYhQjkECGMYypTGU4w0kiKcNrOnaEsDC4dAbiG0O3GzAReNu6OLrnRqjlBWGfwN7hwB9AB+B8LoxGCJGeOnXqsHfvXvbu3cv8+fPvvT9ixAgWLFhAQkICCQkJhISEADB9+nQ8PDxISEjAw8OD6dOnWyp0YUE22LCEJYxlLCsrfUarw/2wKmpNW7tN7LnRh2cYz1omcu1hd1kqUdUoLJWpBet84fj2HIlfCGEeyREZU0p5K6UOKaV+V0pNTOPnQ5VSF5VSe1NfwywRZ37iDESboHz56qhSkdw5aY+H8iPGb8J9haUwwA+4Y+FohRB5MU8UMaeRUupnYDHwndZaqhI5pAhFWMACqlKVqUzlAhf4ju8oTvF0r2nTBiIioHNn+MkJ/H6D98rB7SJF+bDHetSm3rB9DqSMhBbfYBSWwoGquTUsIXJOxBi4kPnpiQ+lsjN0mJ1hk2PHjuHj40O7du2Ii4ujevXqbNiwgWLFimXpI8+ePcuNGzdo06YNAIMHD2b9+vX4+PiwYcMGIiMjARgyZAjt27fnww8/zNLn5KTsyBNKKW/gU8AEfKW1nv6fnw8FZgKnU9/6TGv9VZaDzmessGIWs6hGNV4t9Spt912iRMd1uFT+lm1HyzPA7kPWchkX5lMVk/kdl6hiFJZWe8D6rtBjI9TyzLmBCJGbLJAnJEek7VHzhFLKBMwFPIFTwE6l1Eat9f7/NF2ptR71yAEXIo2BGBO0L2vHRRXB3yc86FyzFyF+q3FfBwRHAaHg5AesA4paNF4hso3cS2QLc2cqTcU4LOCIUipYKdVfKZW1EYsMKRRTmMJnfMYmNuGJJ1e4kuE1zZtDVBRYaYisC70vGHddLxWxJaX7GnD0g4i5sHMwcAJoD5zJ+cEIUYAlJCQwcuRI4uPjKVu2LIGBgcycOfPedNT7X6NHj7533dGjR2nevDnu7u7ExBgnb50+fRp7e/t7bezt7Tl92qibnD9/Hjs7OwDs7Oy4cCHPLmN9pDxx382CD9AQ6KeUaphG05Vaa+fUV6EpKN1vHONYylJ+tI1BRbvj7HMBt+qfE3xoEv58xV76cpS/H67T4pWhdziUqwfru8Gx0JwJXohCQnJEmh71fuJx4Het9RGt9V1gBca6LJENGmAUlqqWqYqqEMGdo7Xxsgog3G8sVHdLnbEUgsxYEiJ7FKQ8YdZMJa11IBColCoP9AFeBD5XSq0Flmmtw7M9skJuJCOpQhUGMABXXAkhhBrUSLd9w4YQEwMeHhBSF/rth7nV4Y7Jhi+6rsQUNACi50PKcHjiW4zCUgRQPZdGJEQOyOQpQE5ycHDA2dkZgJYtW3Ls2DEmTZrE+PHj073Gzs6OEydOUKFCBXbv3k3Pnj2Jj4+/t+b5fkqpHIs9J2RDnrh3swCglPrnZuG/T6AFMJCBVKQiAaYAKga60HFUKF0aTOHbuAr0a/MKMVzjNutoSCnzOy1eCQK2wxpPWN8DeqwDB5+cG4QQucFCeUJyxIOyIU9UB07e9/0p4Ik02vVSSrkBh4FXtNYn02gj0lCX1BlLJStz2iqcv490wqd2H4L8V+KxTqXOWAoBJ39gLTJjSeR7ci+RLR5qTyWt9RVgCTAfY8pLL2CBUuqwUqpTDsRXqAUQQAghnOIULriwP5N7qzp1jMKSXVVYXw8GHoGvgcEma5J8v4UG/eH7BRDXB/RZjMLSqVwYiRAFj62t7b2vTSYTSUlJmT5dsLW1pUKFCoCRPOrUqcPhw4ext7fn1Kn////iqVOnqFatGgBVqlTh7NmzgDG1tXLlyrk1xCx5hDyR1s1CWlXvXkqpX5RSa5RSaVbalVLDlVK7lFK7Ll4suCdfeuNNBBH8ZXWTfXPb0u29nfR3GcP/1i2hjY7kDh35iUsP12nxitB7O1RoCBt6wh+bcyZ4IQo4yRHpe4Q8kdYd0n/vpDYBtbTWTYFtGMvtHuyokOSJrKiNUVh6rHgllN127v7RAB+rPmz1G516KpyC/cGAPzJjSYisK0h5wqyiklLKSinlpZRahrFuagAwHaiqtXYEXgeWZXt0gg50IIookkiiHe2IIy7D9jVqQHQ0ONaBVU4w+AB8C/SxKsJdnyXQaAj8sBBie4E+h1FYkgc4QmSH8ePH39s47/7XnDlzALh48SLJyckAHDlyhISEBGrXro2dnR2lSpVix44daK1ZsmQJPXoYM/q7d+/O4sXGv4kXL1587/28JhvyRLbdLGitF2itW2mtW1WqVOlhh5KvtKY1scRSUpUkfFIH+i4MZbT/IKbOX0dD/RtlaEcEJx6u02LljcJSxSaw0R9+35gzwQtRyBTmHAHZkidOwb+m7dvzn/0ctNaXtdb/rP/9EmiZVkeFKU9kRU2MwlLtYhVR9ttJ/KMRvlZPEeT3olFYCvmnsNQLKSwJkX3ya54wd6bSGWAW8AvQUGvto7X+Vmt9G+5NZz2Q7dEJAJxxJo44KlIRDzzYxKYM21epApGR0KwZfNsUhuwxttTzszJx22shNBkGPy6GmJ6gLwDuwPFcGIkQhVt0dDRNmzalWbNmBAQEMH/+fMqXLw/AvHnzGDZsGI6OjtSpUwcfH2PZ0cSJEwkLC6Nu3bqEhYUxceIDh93kFY+aJ7LtZqGwqUc94ojDUTkS+HRXBgQv490XuzF2Uhh2+hyOtGXLw6boouUgYBtUbg6bAiBhfc4EL4S4p4DnCHj0PLETqKuUclBK2QBPAf+qeiul7O77tnsm/YkM2GMUluralkc9to2khGZ0t+rPRr/hUKODMWMpPgijsPSQ+/gJIbIkr+YJldb6uwcaKdVKa70r2z/9EbVq1Urv2pXnwsoxF7lIF7rwMz/zBV/wLM9m2P7GDejWzVgSNygGlrY1zn7bqFMosX0U7JsHLZ6C9kGgymPssVQrF0YiRNYdOHAAJycnS4eRp6T130QptVtr3Sq3YnjUPKGUKoKx/4UHxuluO4H+Wuv4+9rYaa3Ppn7tB0zQWj+ZUb+FKU9c5zp++BFBBP32zGT1E6/SfcAvfPGNF0rdJZwgeqe5/UgG/r4Ogd5wfhf4roB6vXImeCGykeSJBxWEPJHaRxdgNsYpoQu11tOUUu8Bu7TWG5VSH2AUk5KAK8AIrfXBjPosTHkiK84DHZPhQNJ19B8+mBrsZFXyN/ivXwQnwsFbQ6MuGHss2WbcmRB5gOSItD1KnjB3ptLWtN5USuXpIyYKmkpUIoIIPPBgGMOYxjT0A6tD/l/p0hAcDJ07w5J2MGArRAJeyoobHnOhxcuwZwWEe4G+ijFj6WgujUYIUcA8Up7QWicBo4BQjCfLq7TW8Uqp95RS3VObjVZKxSul9gGjgaGPHnbBUYYyBBNMb3rzXYvxdD88juA1jfHvHMudlHL44MEStmaQNdJgWwZ6hULVx2FzXzi0OqfCF0IUfI98P6G1DtJa19Na19FaT0t9b7LWemPq169rrRtprZtprTtkVlASmasCRJmgSZEy4BhK8oEn6W0ayqqeg+AxD2Mp3G9BGHssyYwlIQojc4tK1v99QylljfGUQOSikpRkE5sYwAAmMYmXeIlkktNtX7w4bNgAfn6wzAv6rIUfAQ+luNL+E2j1KuxdDds8QN/AKCz9kVvDEUIUHI+cJ+Rm4dHZYst3fMcoRrHW4WNcjw0k/jd7vFp8z4UkR56iKwtZScpDdVoaeoVAtTawpR8cXJlT4QshCja5n8inKgKRJmhZpBTUDyZlvytPmZ7h255PQc1OEPpPYUmWwglRGBXJ6IdKqRiMjVKLKqWi//Nje8hk12iRI2ywYQlLqEpVZjGL85xnKUspms6xnra2sGoVPP00LOsFAYtg42DooBRhbjOobGUNP30AyT2gcwxYtcdYCueYa2MS4mForfPlcco5wZwlzDlJ8kTeY8LEHOZQneq8XvF12vx+kROtA3FziCQsoTtPF+3HN1xmEC9iY26nNqXAPxjW+UJQf9DJ4NQ/J4chxCORPPH/JE+I7FAOCLcCb0ryg9MWdHwPBjZ+jsQenzNkg4LQMNBboEkvIBBZCifyMskR//aoeSLDohLwFcaJPK0xTqe/97kYS2zDH+nTRZZZYcVHfIQddrzKq1ziEutZTxnKpNm+SBFYvBhKlIAvhkL3PyFsJLgrxfZ206hmsoEf3oUUX/DeAVbuGIWlerk5LCEyVbRoUS5fvkyFChUKfTLQWnP58mWKFk27oJxLJE/kQQrFRCZSlaoMKzEMp73tuesZxJNVQgk/8RTPlhnJEi7Si8mUSPPgvTTYlAT/IFjXFYIHGYWlhoNydiBCZIHkif8neULOrX2wAAAgAElEQVRkp9LAVivollKcyEYb0b/5M7TJCBJ7zGHYRgVbQ4Et0CQAWIMUlkReJDni37IjT5i7UXeDvLjMQDbWMyxnOUMZSiMaEUwwdtil21ZrGD8eZs0C7/fh+4lQRcF2oOaOaRA7Cep7g88uMFlj5PkGuTUUITKVmJjIqVOnuHNHjrAFIzHa29tjbf3vVQUW2IBV8kQeFUwwAQRQKbkK5fuH8utaB7afeA43u0WsZhQd+ZQKZq+GBxJvwfruxgatXguh8dAci12IrJA88W+SJzImeeLh3QZ6psBWqzvwSwA03cLnSR8zYsNWOBYCnkDTrkhhSeRFkiMe9Kh5It2ZSkqpQVrrpanfuiilXNJqp7Ve+DABi+w3gAFUohL++OOCC6GEUi+dGUZKwcyZxibeb78BHf+EPdPAVUH4k2/iaLKB6NcgxRN894KpPcaMJdkhX+QN1tbWODg4WDoMgeSJ/MIHHyKIwNfky18rXHhyXBDu1RaycX9Fejt9RBCXaMpi7M1dDGddHHpugg09IfQZSEmCpsNydhBCPATJE3mH5ImCqRiwwQp6pxRlc9O18GsfXmwylrvdP+TlTQrCgoHN0FSWwom8R3JE9sto+Vs/4J8kkN78dg1IEsgDOtOZSCLpQhfa0pYtbOFxHk+zrVIwebKxFO7VV6Hdn3BwDrgp2NZ6PA2trCHyFdjUAbruhyLtMWYsNcrNIQkh8j7JE/nE4zxOLLF4KS/2zGqPp30g3RvO5JvoSgx1nUAkV7lFIPUoYV6H1sWg5wbY4AdhzxlL4Zo9n7ODEELkR5InCqiiQKAV9NM2rG2yGn7pz5imE0jsNpVXNykICzL2WGomhSUhCjqzlr/lVTJd9UEJJOCFF+c5zxrW4INPhu2/+AJGjIDWQ+H4V5BsBWGA889zIXwUOLhB98NQJBljkVyTXBiFEOJR5fayhrxK8sS/neMcPvjwm/4Nr9Vfs6XvYGasXMjY3s/xs2qNiS00p4L5HSb9DZt6wZEt0PEzaD4y54IXQmQryRMGyROPJgkYrOE7lQS/DoImK3g/8R1e37zTyA0egHMXjMKSRff2EkI8JHPzRLqbKCilrMx5ZW/Y4lHVpS5xxFGPenSnO0tYkmH755+HJUtg9xKwewpsU6AD8FPzkeC5AI7GwPo6kGgCOgL7cmMYQoh8QPJE/lOVqkQRhbtyZ0ufIfhGf8hrfZ9m4uxAGuu9FMOV7zlpfodFbKFbINTpYTyI2PNpzgUvhMh3JE8UfEWApQqG6iLQZCn8NpA3rN/h3a7N0LW7Gc+kfw4C/AHZw0aIgiijX+JJQGIGr39+LvKYf24a3HBjCEOYyUw06c9IGzgQVq+G/RugTFcokwydgJimzxmbsB6Pg3U14a4NRmFpb24NRQiRt0meyIdKU5ogguhHP7a4TqTzgTF8MqE7L7waSnV9msdoSwgPsZduEVvotgrq+kPEGNj1cc4FL4TIbyRPFAIm4GsFw3URaLwIfnuad6zf561uTmjHnsYuGnuCAT+ksCREwZPRnkqye1U+9s9NwxCG8BqvcYYzzGIWVunUEf38YONG489qHaHKNvCyho2Nh9LJZA3Bg2FtC/A/CzYdgW1Ai1wdkxAiz5E8kU/ZYMMyllGVqnzS4BNcT5xldcOlnD8VxdIV3rRS7VhLEP7p7M33AJMN+K6AoAEQNc7YvPvx13J2EEKI/EDyRCFhBcxXUFSbmNP4K4i3YVqjGdzu8jIfBfujItZCSgi06gmsR5bCCVFwpFtU0lofz81ARPazxZZv+ZaqVGU2sznPeRaxCJt0Tvjx8oKQEOjaFcq5QM3voastrHEaQFcra9jSH9Y0hV6XwNYDY/elQr8UX4hCS/JE/maFFR/zMdWpzqtVX6XFsYv85LweH7dY1kd2prOpI8tZS386o8zp0GQNvt+CMkHMBEhJhCffzOlhCCHyMMkThYsCZisohhUfNpoH+234uOGn3PUewRwVgIpaAzoUWvfAKCwVs3DEQojskG5RSSm1QGs9PPXrpZD2+imt9eAcik1kAyus+IRPsMOOiUzkIhdZy1pKUSrN9m5usH07eHtDYkuouxP8isG39fvQ28oaNveF1U4QABTtBGwFc59kCyEKFMkTBcM4xlGVqgwtPZTaB1w56xaMa8NYwn7zprd1VxaxhCE8leF6+XusikCXpcafsZOMGUsub+f0EIQQeZTkicJHAR8AxVC80/BTOGDLZ04fccf7WRaovqjolZCyFZ7oAWxACktC5H8Z/Rvx6H1f/w78kc5L5HEKxQQmsIhFRBBBe9pznvPptm/dGiIjIeUSnG0MjW/CU8DSun7QPRAuHYRVpeBWWcAT2JFLIxFC5DGSJwqIAQwgiCDO2B6FWBesna7yeLUoDt1qwxD6s5jPuGtuZ1ZFwHsRNBoCP7wDsZMhH580K4R4JAU3T8jvtXQp4G1gOgqcZsDBN/nK+muGdrYhpUE/+B7YEQb0AG5bNFYhxKNT2oxfiEqpqlrrc+a+n1vkCNCHF0wwAQRQlaqEEoojjum2TUgADw+4kQyO+2FPGZgPDD8aAht6QjkH6H0bil8BQgCX3BqGECITuX1UtOSJgmEPe+hCF/5OuUvNUZs5uLAFEaf70abCepYxmZ68Q0nzFsOBToGtw+G3r+HxidDufVBmXiuEyHGSJwxZyhO3b4OvL4waBf7+ORNYAfE/YDTA4SlQbzJ97vblu+1FsNq/3Lh1aNMJY8ZScUuGKYRIg7l5wtwjPA+n8/5+80MSeYEPPoQTznWu44ILu0g/idatCzExUKk4HKgDj1+G54FPHbzBbzNcOw4rbeBmRcAL47GDEKKQkjxRALSgBXHEUcmqIofmetD8nVBcq6wm9OizDOQ9QhjJJZLN60xZQecF0PR5+Gk6RL8mT/aFKNwKTp5YuhQiIqBXL3jnHUhJsXREedZLwAJA1X0Lfp/OKpuVBHS8TXKjwRAHxG0D3RW4ZdlAhRBZZm5R6YFHi0qp0oD8Bs2HnuAJYomlOMVpT3u2sjXdtjVrQnQ0OFSFvQ7gchbGAB/U7AT+QfDnGVil4M/KgDcQnVvDEELkLZInCoja1CaWWJqqpvw0wR+XxV/jXftLvt01kQDmsZN+nOBv8zpTVtBpHjiPhF0fQeQrUlgSovAqOHniuedgxgxj9uW77xrFpT//tHRUedZzwBIFqvYEOPox62zX0q3jFZIbD4EfgLiI1MLSXxaOVAiRFRkWlZRSJ5VSJ4BiSqkT97+Asxjb9ot8qD71iSMORxzxxZflLE+3rZ0dREVB43rwowO0Ow5vAG/VaI/2D4GbF2BVCtyoCvgAkbk0CiGEpUmeKJgqUYlwwvFW3sQMeAG3yLcZ0Pp9Pt08Cx9WcxxfDmHmDZRS0PF/0GIM7PkUwl8ylsYJIQqFApknlILx4yEoCMqUgfXroU0b+CN/bg+VGwYCK63AVPMVODaXYJvNeLc/R1KTp43tWb+PAO2LFJaEyH/SPf0t1UCMpwpBwKD73tfAea31oZwKTOS8alQjiih60pOBDOQ85xnL2DTbVqhgnArXtSvEOoJbPEytB7fs2/FRQBgq0AtWloE+1aBMF2Az0DFXxyOEsAjJEwVUCUqwgQ08z/MsdJ+Cy/4zvNJsPufnVOK955/mN9WB3QTRksqZd6YUtP8YrKxh10xISTRmMClzJ0wLIfKxgpsnvL3hp5+gRw+IjzdOu1m5Ejw9LR1ZntQbKGoF/jVeJOmkDduqD8fDvQPbrJ7F+qevISUK3HxABQElLR2uEMJMGRaVtNZRAEqpilprWehaAJWhDCGEMIhBjGMcZzjDDGZglcYktjJlICQE/PwgrD6474WPm8Gtak8yt/d2rNZ4wsoU6G0P5XyBTUCnXB+TECL3SJ4o2IpQhK/4impUY6rTVFoeP8esRis5f7ICn00N4JRqRzShuOGQeWdKgduHYLKGH9+HlCTwXABWppwfiBDCYgp8nqhXD378EQYOhE2bjELTRx/BmDFyOEEaugFBJuhWbRh/n7MhusrTuLVLJEo9h82uL0HHgLs3qGCglKXDFUKYIbOZSgBorW8ppZwBV6Ai962J1lpPzqHYRC6xxZbv+I4qVGEWszjHORayEBtsHmhbooSRL/v2hQ3O4B4H89vA7aqt+Lp3OKY1nrAqGXo/BuW7YZzm0DnXxySEyF2SJwouhWIKU6hOdUbajaT+MQ9WO2/m/KntLFvkSwnVliBC6EJTMzpT0HaqMWPph3eNGUte30hhSYhCoEDnidKljSVwb78NU6fC2LGwdy988QUULWrp6PIcTyDUBD6VB3P7og07Kg7EpW0isVYvYLt7PqTEQQcvUCFAaUuHK4TIhFnzzpVSw4FYjPVME4AmwDjI4Dz6B/vwVkodUkr9rpSamEG7AKWUVkrl2hGnAkyYmMMcpjGN5SynG924yc0029rawurV0L8/RLmA2zZYDAyo0pzEPhGQnAyrrsHlWkB3ICQXRyKEsITsyBMib3uBFwgkkKOl91H+QFt+OlgVb88YTClWuODGGmLM60gpcHkH2k6B/UsheJAxa0kIUaAV+DxhZQVTpsCqVVC8OCxZAm5ucPq0pSPLk9yB7UWgZMWn4Moqdpt207rNTm63HAk/a9i+A7QXcN3SoQohMmHuZgavAd5aaz/gduqfAUCiORcrpUzAXIxdnBsC/ZRSDdNoVwoYDfxoZlwiGykUb/AGC1nIdrbTgQ5c4EKaba2tjVz53HMQ7Qlt18FKIKBSE+72jQSsYOUluOgA9AC25N5AhBCW8Eh5QuQPPenJNrZxw/YixLpwrtRd2jaJ41qiHb50ZikbMftstycnget0OPgdbOkPyfJXRYgCrnDkid69IS7OOEJ5505o1Qp++MHSUeVJbYCoIlCmnD/cWMuvVr/S4okYbj0+GvZpCPsRdGeksCRE3mZuUamy1vqfR5ApSikrrXUwxrJYczwO/K61PqK1vguswKg0/NcUYAZwx8x+RQ54mqdZz3riiactbTnCkTTbmUzGrN6xYyHWH55cChuBbhUacrtPFJhsYdUFOF8H8MPYY0kIUUA9ap4Q+URb2hJLLMWK2HAp0A1rj0O0fiyGI7eb0R8/FrMQs+cdPT4B3GfB4dWwuS8k383J0IUQllV48kSzZrBrF7RvD+fOGX8uXGjpqPKkFsD31lChdFe4tYmDKoGmrcL488kx8KuG0J2Q4glcs3SoQoh0mFtUOqWUqpX69WGgh1LKFTD3X3/VgZP395f63j1KqeZADa31ZjP7FDmoK13ZznaucIU2tGEPe9Jsp5SxF+Hbb8OOwdD6c9imwad8PW72jQLrkrD6DJxzBHph7LEkhCiAHjVPiHzECSd+4AfqWNXm90+7UG1sCE9U2sauK54M5Vm+Yzp3zJ2z1GosdPgUfl8Hm3pD0t85G7wQwlIKV56oWBG2boWXXoK7d+HZZ2HECLgjz87/qzEQZw1VS3SGxCD+4ASNW27huss4iNcQugtSOgFXLR2qECIN5haVZgBOqV+/BywDwoF3zbw+raMP7v1rUyllBXyCsa46446UGq6U2qWU2nXx4kUzP/4/hg6F996Dq/KLKSNtaMP3fE9RiuKOO9vYlmY7peCdd2DmTNg5EprNhO81eJatw42+UWBbDlafhjN1MWY5r83NYQghcsej5gmRz1SjGtFE46pc+WX8IBp8/TmuVTew9Xg/BvE6mxjLdVLM66zFaPD4HP7YCBv9IUluuoQogApfnrC2hjlz4OuvwcYG5s+HJ56AgwctHVmeUw/4wRpqFG0PKVs5kXyehs3XcrXtONivIfhnSPEArlg4UiHEf5lVVNJaL0qdnkrqn+WAclrreWZ+zimgxn3f2wNn7vu+FEaROlIpdQx4EtiY1mbdWusFWutWWutWlSpVMvPj73P4MCxebEytqVUL3nwTslqcKgSccCKOOGpRiy50YQUr0m376qswbx7snQgNJ8MeDe3L1OJK32goXgnWnIBT9YE+wJpcG4MQIudlQ54Q+VAZyhBMME/xFLv7TqBp5Hi86yzmuz0v05vZfM9gzpk7CcF5BHgugKPBsL4HJN7O2eCFELmqUOeJZ54x9lVydIRffjH2WVq82NJR5Tm1MApLtW1cwLSdM4nXaOC8ikuu4+FgCmzZB8kdgcsWjlQIcb90i0pKKav0XkAScCv1a3PsBOoqpRyUUjbAUxjb7wCgtb6uta6ota6lta4F7AC6a613ZXlk6alXD6KioFMnuHED3n/fKC6NGwdnz2b7xxUE1alODDG0oQ396MdsZqfb9oUXjA28938AtcfCAQ1upWtwvk8UlKwOgUfhpBPGX4GVuTYGIUT2y+Y8kbfcLZirMXKCLbYsZzljGctul//R9EB/+rt8wOyg9/FlOYfpzhH+Mq+zps+B10I4Hgbru0KimdcJIfKkAp0nHlaLFrBnj3F88l9/GSsnhgyBm2mftlxYVcdYCudkagVFI7jw923qN1vGeffX4HAKbPkVkjsAlywdqhAiVUa/xJMwTmNI7/XPzzOltU4CRgGhwAFgldY6Xin1nlKqe9bDzyI3NwgLM54Y+PrCrVvw8cfg4ACjRsGJE7keUl5XlrKEEoo//rzCK0xgAinpLGsYOBBWr4Y/5kL15+CoBtdS1TndNxLK1IK1f8CxhkB/4NtcHIUQIptlW57IUxITjafIY8bIMmkzWWHFrNT/7au7hgYnvBg/9AVeX/AlbXUYV/HgF3NvABoPBZ8lcDIS1naBu3/mZOhCiJxVMPNEVpUqBcuWGcvhihUznsS2agX79lk6sjylCsbm3c40g+JRXLmlqd/kG053eA0SUmBTPCS1h3ROqRZC5C6lddobaSqlaprTgdb6eLZG9BBatWqld+3KhslMP/8MU6fC2tS9fqytjScHEydCnTqP3n8BkkwyL/ES85jHIAbxNV9jjXWabUNDwc8PKveAK8uhvBVE3rpIrTWd4Moh6FEPHOKBRcCg3ByGEAWeUmq31vqBJcTZ/BkFM0+Eh4OnJ6SkGButTp0Kw4YZR16KTK1gBUMYgt1NRy61DqFP7918/u5THFcOXCGUNjxmXkcHV0LQALB7AvyDwbZ0zgYuRCEjecKQbfcTD2v/fujbF377DWxtjQfcI0YYm5UKAK4D3omww+owXPegZIlbxB94jse2fwi1raBbPSgSiVGGEkJkN3PzRLozlbTWx//7wjjB7S5w8r738r/mzSEw0Pil3r8/JCfDV19B/foweDAcOGDpCPMMEybmMpcpTGEpS+lGN26S9rRdLy/4P/buO6zK8g3g+PdhCYhbxAXiwpVb3LhRXIg4MUfubVla2fiVZZmpZZp7j9xbERcOHGhirtwLBfceKLKe3x8PpiniSTjnMJ7PdZ0reM8577nfynP7PuO+N26Eu/6QyQcexEINe0fOtNkGOUrCmtNw/j2gC2pgSdO01CS584QQwksIcVoIcU4I8Xkir2sthJAJ1d1LFvXqwcGDULs23L6t9vVWrKi2Tmtv1Z72bGQj9xzCyXikGmuCCuPbdTN55DWcqc4Wjht2ouLtoNkSuP4nLPeESN1OWtNSmzSbJ5JDyZKwfz/07AnPnkH//tCmDdzX33XPZQG2WkOdODfIHsTjR5kpUXIK5z0/hQtxsOYMRNcCdAkTTTMng/YwCyEyCyHmAZHAFeCpEGKuECKLUaMztVKl4I8/1CBS165qpmD+fHW8ZUsICFADTumcQPAVXzGd6WxhC/Woxy0SLnZeqxYEBsLT3WDTEKJiwMMuB8fbBIJjWVh7As6WAboBM0x6HZqmJZ+k5gkhhCUwEWgMlAT8hBAlE3hdJmAQsD/Zgk9IuXKwfTssXQouLmprQp060LYtXEob8ynGVJe67GIXVjaS6EAPjkZL6nsGkSEulop4sIa9hp3IrRU0XwE3D8Gy+vBUd/3RtNQqzeWJ5GBvD9OmweLFamvcihVqsnt/yg/dVDICAdbgFVMQcgbx5IEjpYpP4nSjzyBUwurzEO2B6gulaZo5GFoYbzzqz/R7gB1QGrCPP572uLnBrFlw9qyaoba2htWroUkTVXfp22913SWgBz1YxSqOcYwa1OAiFxN8nbu7muAXxyHWA0QU1LLNxqHWW8DJHdb9DafLAD2BqSa9Bk3Tkk1S80Rl4JyU8oKUMgpYDLRI4HXfo9pSG7/nvBBq1vjUKfjuO1X/YtkyKF5cdRB98sToIaRmZShDMMHkt8zLrQUNuVPpFFXL7OVhdE48acBi1pPwBvxXFPGGFqvhznFYVg+e6I6tmpZKpb08kVzatVPlOCpWhNBQqFkTRo9W27A1bIE1VuAb4wyOQTy770zpYuM51ngohElYGQpRHoC+P9M0czB0UMkL6CSlPCOlfCalPAN0jT+edrm6wuTJ6sv9xx+hUCEIC4Phw9VzjRurOkzR6ae+4Ku88WYrW7nNbapTncMcTvB1pUtDUBA4XIEn7pAhEupkyMK+1psgbzXVyeFkOaAPMMmk16BpWrJIap7Ih9oS8Vx4/LF/CCHKA85SyvXJEbDB7Ozg66/h9Gnw84PISDXIVKyYml1+Q21CDVxwYTe7cRfuhI5sj/WANVQusJvQJyVpjQ8LmINB638LNQGfdXDvNCytCxE3jB26pmnJL8XkCSFELyFEiBAi5NatFDJQXbgw7N0LgwdDTAx8+ik0bKjuQzRsgCVW0CE2D+TaQfRdN8q7jeNgk4/VurcVYfDMAwg1b6Calg4ZOqgUCTi+ciwn8Cx5w0mh8uSBYcPUyqXAQGjfXq1e2rgRWrWC/Pnhs8/U8+lQDWqwm91YYUUtarGNbQm+zs0Ndu0CpydwvwxkfgINbDKxo9VGyF8bNhyB4+WA/qTVRXCaloYlNU8kVJn0n9Ga+JbTvwKfvPVExrpZcHaGhQvVF1n58hAergaZatVSbaK1BGUnO1vYQkvRktN9BpNzxs9UyRNIyJ26dKIri/mZZ4asWXL1hJYb4MFFWFoHHusaGpqWyqSYPCGlnCalrCSlrOTo+GpIZmRjowp2r1unGkUEBsJ778HEiXrVEmAFzLeEXrG5wGkbsXfKUrnoOPY0/RCuAyuuQGRN4LyZI9W09MXQQaUZwBYhRB8hRGMhRB9gEzDNeKGlQBYWqoDrokVw9Sr8+qsqsnfzJvz8sxo1qVNH1WWKTD0rbpNDSUoSTDAuuNCYxixlaYKvK1BArVgqZAO3SkLOR9DYOiObWq6HAg1g4xE4Wh74EPX3Ak3TUomk5olwwPml3/MDV1/6PRNqy8QOIUQoUBVYm1ARVqPfLNSsCQcOwPTp4OgIu3erltBdu+oZ5Teww46lLKU//TnZZCz5d/ahtusKAi62430+Yy1DeIQBN0wudaHVRngUDktrq39qmpZapJg8keI1a6a6w7VtCxERMGCAugc5rwdLLIApljA4Ljs4bSXuVjU8io4jsFl/uCFg+XV46gGcMXeompZ+SCnf+kDNDHQDtgIn4v/ZHRCGvN9Yj4oVK0qzi4uTcu9eKbt2ldLeXkq1EULKHDmk/PJLKa9cMXeEJnVX3pU1ZU0ppJDj5fg3vu7WLSkrVJDSKreUBe9KaS2lXB39VMoVjaUcg5SHykn1n3m0qULXtDQFCJEm/D5Oap5ATUBeAAqiVrkfAUol8vodQKW3ndfoeeL+fSk/+URKKyv13W9tLWX//lJevWrcz02l4mSc/En+JJHIIpfqSpHlrpx/YICUEukvO8obMsqwE4XvkXJ8ZimnF5LyQahRY9a0tErniRR0P5GY5culzJVL5Rh7eynHjZMyNtbcUZldnJTym1gpkY8l1zwlErnuXH8pf7WRcq6llBFOUsqT5g5T01I1Q/OEoSuVLKSUs6SUDaSUJeP/OTP+g9I3IaBaNVXY+9o1mDJFbYu4cwd++EEtzXn/fTWrnQ5kIxub2UwLWjCIQXzBF8gEtjXkzAnbtkGVwhBaGFxuQisrW5Z4r4JCzSHwMPxVHhgK/GTy69A07T9LUp6QUsYAA1Cz1ieBpVLK40KI74QQ3sYMPEmyZIExY1Qx744dVR2MiRNVDb6hQ+H2bXNHmKIIBJ/xGfOZT6jLLvKH1qZr688YvXYETVjASbwJJeLtJ8pXHVpvgci7sKQ23L9g/OA1TUuq9JknkqpVKzh+HDp0UA0iPvoIatdOt2U3nhPAtxYwWmaE3GvhRnOaF57IshYfwD0rWHYHIjyA42aOVNPSPmHI97gQ4hawDPhDSrnH6FEZqFKlSjIkJMTcYbxOSlVob9w4Vcj7+R7oatVUIvD1BSsr88ZoZLHE0p/+TGUqH/AB05iGNdavvS4iAnx8YOs+KHoKzuWDWbFRfODvB2dXQu3yUOkQ8E38I6Ht9JqmvUoIcVBKabIl/zpPxDt+HP73P/XdD6pF9ODB8PHHagBK+8cWtuCLL3aR2XjsEUDfrnv5uW8fDgt3bPGnFDnefpIbf8FyT7CyhzaBkN3N+IFrWhqh84SSYu8nErJmjepMff062NrCiBHq3sLS0tyRmdVUCX2Ihpvvg9My5oR2pfOaxYjMUdAmCzhsB8qYO0xNS3UMzROGrlRqCDwGFgkhQoUQI4UQpZMUYVomBNSooVpPX7igZquzZoXgYNUytGBBGDUK7t41d6RGY4klk5nMt3zLHObggw8RCcw+Z8yoahF614OzRaHIRehqacPkpouhWDvYeQj2VwSGA5+CYQ2oNU0zPZ0nAEqVghUrICQEvLzg0SPVKa5gQfjpJzWSrgHgiSdBBGFpG41VcE1mbypO52HLKSUPY0VN9hvSGtqpArTdDrHPYEktuP238QPXNO1d6TyRVC1aqMmLTp1U/dYhQ1Sdv1OnzB2ZWfUWsEBYIxwXwq3OfOA6m8k+LZGPMsDSh/CoNryhQ7WmaUln0KCSlPKQlPJTKaUL0AXIBgQKIY4aNbq0oEABVcQ7PBwmTVItqMPD4fPPVde4Pn3g5DgaHKoAACAASURBVElzR2kUAsE3fMMUprCRjdSnPrd5fSuIrS0sXw7tW8DZ4uB2EvpZWjO2yQIo8T7sPgh7KoEcg1r1rLtfaFpKo/PEKypWhIAA1SmuVi24d091ES1cGMaPh2fpo3nq25SnPMEEk9fKiYhVnmyLiKXZ+5vJLa+Rj+oEGrJtwbEMtNsJwgKW1IEbh4wet6Zp/53OE8kke3aYN0/NyubNC/v2QblyasI6Jsbc0ZnN+8AKCysss8+G273pX2AhY32aIiMywJLH8LA2kD7KkWiaqRm6Uullp1H7mMMA12SNJi3LmBH69lWdHDZsgEaN4OlTmDpVdZCrV08liDQ4i92b3qxgBYc5TE1qEkroa6+xtoYFC6BHZzhTBooegiEWVnzvNRf5XjfYFwJB7iAnoWo6xpr6MjRNM5zOE8/VrAk7dsDmzeDuDjduwIcfQtGi6vtfDy7hiit72ENFiwrcGN+WMxUOU6tOEDaxsVSgJuswYJdMjhLQLgisM8KyunB1n/ED1zQtKXSeSKpmzdSqpa5dVS75/HOVZ4KDzR2Z2bQE/C0tsM46Ge4MZqjLMr5rUR8ZaQdLnsD9usBuc4epaWmOQYNKQoisQojuQohA4DxQBxgF5DJibGmThQU0bgwbN6oBpj59wM4Otm+HLl0gd27o1g2Cgl7UYkoDfPBhK1u5wQ2qU52jvD4pZWkJ06bBh/3hbCUouhf+Z2HJFw2nI8sNgJADEOgOcg7QAYg29WVomvYGOk8kQgjw9IT9+2H1aihdGsLC1Pd/oUKq/l4anFD4L3KQg0ACaSFaEDbkQx4OWECVMru5H+VIAxqwlHVv3/ycrQi0DwK7nKrOUniQKULXNM1AOk8YQdasqllQQIDaHXH4MFSvDt27w61b5o7OLBoBgVYC28xj4d5XfOuyls98aiCjMsGSZ3DHE9hm7jA1LU0xdKXSVcAPWAjklVK2lFIulVJGGi+0dKBECZg8Ga5eVTPW1avD48cwe7bq6lCkCAwfDhcvmjvSZFGTmuxmN5ZY4oEHO9jx2muEgF9/ha++gLM1ochW+ElYMKDeeGSloXDkAGx2h7ilQCtA/y+oaSmEzhNvI4Sqh3H4MCxeDGXKqO//wYPB1RV+/BEePDB3lGZjhx3LWU5f+hLaZjTM+JrKRbdx7vF7+NKShcx6++bnzAXUiqVM+WGFF4RuMUXomqYZRucJY/HyUpPVX3yhlv/PmqVKbkydCrHpb3W/BxBkLXDI+D08+JHR+TcyoGUFZFw2WBINt7yAAHOHqWlphqGDSoVfavuZfv/GayxZs0KvXrBnD5w+rRJC/vxqMOnbb9VMdt26MGeOGnRKxUpRir3sJT/5aUQjlrHstdcIAd9/Dz+NhHOeUGQVTBKCzrVGEVftG/j7AARUgth1QHMwpP20pmnGpvOEoSwsVNOGw4dh7VqoXBlu34Yvv1QzzV9/rX5PhyyxZCIT+ZEfCa22CPvAzlQrtprgm/V5n+4sYSTP3rZmySGvqrGUrSisbgbn15kmeE3T3kbnCWOyt4cffoC//1arY+/dUytiq1ZVzSPSGXcg2Aay2Q6Dx+OYlDeQLj7FiLN0hKVxcL05sMrcYWpammBooe5rxg5Ei+fmphJCaChs2QLvv6+2x+3YofZM584NH3ygir/K1NkJzRlndrGLylSmHe2YwIQEX/fZZ/D773DOFwrPggVC4Fv9W6I9foJTIbC+IsQEAl7AQ5Neg6Zp/6bzxDsQApo3V0VWt2yBOnXUSqURI9TKpSFD4Fr6+9cqEAxjGPOYx9Uiu8h8uDH1K09hzZkO+PEF/nzEo7etWbLPBW22Q84ysNYXTr8+gaFpmmnpPGEibm6waRMsXQr58qkBpcqVVW3XNNx5OiHvAfszQC6rD+HJDOY77cG3pTNxNnlgGXClNbDYzFFqWur3LoW6NVOwtIQGDVT16mvXYPp0VfA1IgLmzlXdhEqXVqMuqXC7RHays5nNtKAFgxjEF3yBTGD2uX9/tUDrYk9wHQtrgMaVP+NZ3fFw7iCsLQvRwUB9IH0lSk3T0ggh1Pf99u2we7equxcRAWPHQsGC0K+fmmhIZzrRiQ1s4JHjRTKdqEWbdp8zbfdgfBnPHt7nJm8pcm6XHdpshdxVwL89nFhgmsA1TdPMTQho00Z1mB4yRN1XTJmitsTNnp2m6ra+TVHggC24iO4Qs4g12Q/h2TIbsRldYDlw2Q+YY94gNS2V04NKqUGWLNCjh1qddOaM2h7n5KQ6PgwcqNqJ9ugBBw+aO9L/5Hn9jN70ZiQj6UpXohMovt2lCyxbBleGgfNXsENCnQoDifCcBhePwKqSEHUEVe/xhqkvQ9M0LfnUqKE6hIaEgK+v6ugzebLqFtexoyr2nY544skudmFjH4PNnx589IM3wxePwovFnKUZoTxK/AQZskCrjeBcBwI6w9HpJolb0zQtRciUCUaPVtuta9dWW6u7dQMPDzhyxNzRmYwLEGIHJWPagVjNtixnqNHCmugshWGVBVzsCkw2d5ialmrpQaXUpmhRtT0uLEwta61XD548gZkzoVIltbx11ix1LBWwxJLJTGY4w5nLXFrQgogEaiT5+sK6dXD7F3AaCH9JqFamJ/cbz4PwE7CiKDw7B9QGwk1+HZqmacmqYkVYsUJNHnTsqGaV//hD1caoUkX9HBVl7ihNohzl2Mc+nK3zEOPfiF+3udJn9ByqyO08oC7HuZn4CWwcwGc9FPSCLb3gr/GmCVzTNC2lKFVKrYZdsEBNTO/dCxUqQM+e6p4iHXAE9tlD1cimYB3AfocrVGwRzbPsxWG1gLP9gF/NHaampUpCvqEujxCimyEnkFLOStaI/oNKlSrJkHRYeO41p0+r7g5z5qiifKBWN3XpAr17Q8mSZg3PUNOZTh/6UJGK+OOPI46vvWb3bmjaFGybwqMFkM8C9p5ZgaN/e3AsDK2uqHbSBAKFTH4NmpZSCCEOSikrGfkzdJ4wlUuXYNIktRX6+fe8k5Mqwtq7N+TJY974TOAud2lBC3azm5w//kKNa24sHN+GayIft9lElbd958c8A38/OLcKPH6Cyp+ZJnBNS6F0nlDSTJ4w1IMH8L//wcSJqjNchgxqm/WwYeD4+t+905pngE8EbMy4DyIbU/iZPcf8c2J3/Rg0llBiBPClucPUtBTB0DyR2KDS9pd/BWoA14EwwBlwAvZIKesmPdx3k+6SwNs8fapWL02Zogq/Plerltoe5+sLGTOaLz4DrGUt7WiHM85sZCOFErhJ+OsvaNQI4qpA9BrIYgnBF/zJv7YVZHOGNrfB3g7YCqSOATVNS24mulnQecLUnjyBhQth/Hg4dkwds7ZWtTMGDVKrmNKwSCLpSEdWsAKnhYNxXdaaDSuaE2VhzVH8aUjFxE8QGw0bu8CpRVDlC6gxQtUe0bR0SOcJJc3lCUOdPg3ffANLlqjfHRzg44/VI0sW88ZmZLFAlwj4I+NReOpJvphYTm0ogEP4X9AQKP0l8D3qf1lNS78MzRNv3P4mpaz7/AEcA4ZKKZ2llNWllM7A0PjjWkphZ6dWJwUHw6FDavY6Y0YICoLOnVXnuG7d1O8ptECfN94EEshtblOd6hzi0GuvqVBBXYLtIaAORERDhUJNOddyPdy/AouzwsNooBbwl4mvQNPSD50nzMDeXk0SHDmiuoL6+qqZ5oULX2yNW7BA1WJKg2yxZQlLGMQgbnT4lcuDf6NGnUCiY2ypTm2WsyGBlg8vsbSGxvOhdE/Y/yNsGwgyZeZDTUsLdJ5IwYoVg8WL1T1Dkybw+DF89x0UKgRjxqjJ6jTKEpifET56UgbsgrhiaYdrs/PcL1AZNgOHfgCGQOIZRdO0eG9cqfSvFwlxD8gppYx96ZglcFtKmc2I8SUq3c4s/BcPH8KiRapjXHDwi+MFC6qBps6dVfJIYU5ykkY04j73WclKGtDgtddcvKgaJl3PDA7B8MwWdoXvovSqZpDBAVpLyB4B+AM1TX4NmmZOppiBfuXzdJ4wl0uXVDHv6dNftIt2coJevdQjf37zxmcEEslYxjKUoTidroVoM4UtwR0pnvEIS5hEO3phlegJJAR9CiFjoERH8JoNFom+Q9PSHJ0nlHSRJwyxe7dqBrRrl/o9b161Ta5bN7UiNo368Sl8aXcJntQnk7jG+U1lcTwfrOam3bsDU1HDUJqW/iR5pdIrrgPerxxrDm+rjqmZXebMasXS3r1w6pRKFvnzqxGZ4cOhcGHVDWLWLHj0li46JlSCEgQTTAEK0IQmzEmg1WfBgirvuUbBgzLg8Biq5Pdgb9vtEBMNi6PgRlbUOtbNpr4ETUtvdJ4wlwIF4KefVLHV6dOhTBm4cQO+/14917IlbNmSYleovguBYAhDWMhC7roFE7O1NVXd57PrWiPepzcr+YLHJHK9QkCtn9X2t5MLYG1riIk03QVoWvqk80RKVrMm7NwJAQFQvjxcvarq9hUvrppDxMa+/Ryp0Bd2MO1ZAciwi0cUxKXxQa4U84AgYPdMkO2B9NEYQ9PelaGDSoOAOUKIvUKIJUKIYGAuMNB4oWnJrlgx1TkuNFTdYHTsqLbMBQVB9+5qe1ynThAYmCJuPvKRj13soja16UpXPuVTYvl3QsubV+W/0lngejHIdQ/qOFXAv/0usLKHpfcgPC/q7yyrzHIdmpZO6Dxhbs+3xh0+rL4Y27UDCwtYvRoaNlQ3Br/88mI1Uxrghx+bxCaic13BMqQeDVt9xaLDPWnLSHbRiWsksg1QCKj6JdSbAOfXwKpmEPXYdMFrWvqj80RKJwR4eUFICCxbpu4dLlxQ9wzlyqljaXBwqWcGWBmXB2Gxk8i4Mrg22sv50nVhP7BtOUhvIHV01tY0czBo+xuAECIn0BjIC1wD/KWUd4wY21vp5arJ4NEjWL5cdY4LCnpx3NVVtRnt2tXsXYWiieZDPmQyk/HGmwUsIBOZ/vWahw+heXMIOgpFT8DZPDDjYRjdl3vCo0vQvAAUOgfMBjqZ5To0zZRMva0h/jN1nkhprl+HmTNVh9DnbaNtbcHPD/r2BXd388aXTE5wgqY05WrMDWSHPxjud5JhLb8kmDpkYxXFyZr4CY7Pg03dILc7tPQHu+ymCVzTzEjnCSXd54nExMTA/Pnw7bdw+bI6Vry42vng5wdWaWvb8I4Y8Hz2kBir5mAdxF9BDSh/cCuUABpVB8sNQNouYq5pL0vu7W9IKW8DO4CdUsp55k4AWjLJlEkNHO3cCefPq6Th6qpWM335JTg7q0KwGzeabfWSNdZMYhITmMB61lOTmlzi0r9ekzmzWq3buBqcLQTvnYEemZ35rv0uZPaSsOY8nCwGdAYmmeU6NC2t03kiBcqdW32XX7gAa9ao1pmRkTB7NlSurAaVZs9WXeVSsZKUZB/7KGdVmpglrRhx0I5eI+dRUe5BUoO9XE78BKU6Q/NlcPMvWFoHIq6bJG5NS290nkhlrKzUfcKZM6p2X4ECqpxG587g5qYmLNJQY4g6VnDALjN2kQEQ04wKtbeytXo9OAmsC4aY2ujdmpr2OoMGlYQQLkKIPcApVJ92hBCthRAzjBmcZmKFCqnWoufPq0EkX1+1DHbVKmjcWD3/ww9qj7UZDGAAAQRwiUtUpjLBBP/reXt7tcujvQ/8XQLKBsM39o70abuduLw1YMNJOFwa6A+MMss1aFpapfNECmdlBd7e6rv97FkYMgSyZ1dbHLp1U7X2Bg1SW+dSKSec2M52fIUvT0Z8zOJ8+2nebQN55RVcqYp/At1E/6VoS7VK6f55WOwBDy8l/npN0/4TnSdSsQwZVH2ls2fV7gY3N1WftU8fVZ/1t99S/eTEc+Us4Fhme7I9WgmxXfCsto1FdWrDeQkrj0FUTSDM3GFqWopi6EqlqagWWpmA6PhjWwBPYwSlmZmFhZrNXrFCLXX98UdVFfvSJfjqK3BxUYVfAwJMvq+6IQ0JJphMZKIudfmDP/71vI2NqiXYvy8cqQ7lV8K0DJnx8Q0gplAzCDwG+0qD/Bz4Et0qVNOSjc4TqUWRIjB6NISHq5uDKlXg3j2YMEEVZ61QASZOVMdSGXvsWcpShjCER50nsq/bOGp4boFYK2pRi4VsTPxbv0ADaLMVnt6GRTXhzilTha5p6YHOE6mdtTV06QInTsDixaoxxJUr8NFHaqfDTz+pmhSpXGEBp3JY43JnFsR9QoeKO/nNsyoyHFh2Hp5WA86aO0xNSzEMHVSqDPwkpYwj/i5cSvkAvak07cuTB4YNg3PnYNMmaNVKrV5avRqaNFGzEyNGqJsTEylBCfazn6pUpSMd+YqviHupy4+Fhbo3+uYbONQKyv0O/lZ2eHivILJER9hzDHa+B/JHVM1I8xcl17Q0QOeJ1MbOTt0c7NsHhw7BwIGQLZv6ecAA9f3v55fqOsdZYMFoRjOZyUTU3MjlKb2oVH0VV54WoS3NmMeMf+5mE5S3GrTdAXFRsKQW3HjLCidN0wyl80RaYWmpmkEcPgxr16rt1LduqXuGAgXUX8LvpO6djbmA404WlLs5BhjFR2X28WXjsshbVrDkOjyuBhwxc5SaljIYOqh0Ayjy8gEhREl4W5ECLc2wsFDdg5YvV8VeR45U2+EuXYKvv1YJpEkT9bwJ9lbnIAeb2UwPevADP9CGNkQQ8c/zQqjyUBMmwJFBUOJLOGxhTdnGc3lQfiAc/Bs2lYK434FuQIzRY9a0NE7nidSsXDkYP15tb168WH3fR0W9+LlgQfWlGhpq7kgN1oc+rBfrofB5HgS0wL3eRP682YAu9GQZX/EwsTVLucpCu11gZQvL6sKVPaYLXNPSLp0n0hohVKecfftg82aoXRvu34fvvlP3BoMGqbIaqZQD8GduaHjtU2AmI4sdoWezQsiHGWDxfbjvAew1c5SaZn6GDiqNAdYLIboCVkIIP2AJujBN+pQ7N3z+udpXvWULtGmj6nUEBKif8+WDwYPh2DGjhmGDDdOYxi/8wmpWU4tahPPvFVMDBqjtcKdHg3NXuCktKFb3N65V+xaOH4d1xSFmLtAaeGrUeDUtjdN5Ii2wtVWzz5s2qQGk4cPVlobLl9XPBQtCgwawcCE8TfnfmV54sVvsJns2QdSORtTu3o+Vx7vTgR/YRUcuEfnmN2d3g/a7wd4JlnvChQ2mC1zT0iadJ9IqIcDTE3bsgF27VBmNiAg1u1u0KPj4qKZABnYdT0msgY15oOv1bmCxgpkFL+LjnYu4Zxlh8RO4XR+1i1PT0i8hDfzDLYTwAXoBBVDVyaZIKVcb/EFCeAG/AZbADCnlT6883wdVQTkWeAz0klKeSOycugVoCnLnjhq9mTkTjh59cbxSJVUE1s8Psr6lpXMS+OOPH3444MAa1uDOv9tkb9yodu7l9IDo9fDYCkL++g237R+Bixu0OAM2tYE16FXYWlpgplbRScoTxqDzRDKIi1M3CjNnqlp7z1ejZskC7dur73h3d3VTkUJd5SrNaMbhuCMwcAJjGj3gY+8v2EdNbFhFBXK++c1PbsIKL7h9DLzmQokOpgtc04xI5wlF5wkjOXoUxo1T9wdRUepY+fKq/lL79qoQairzv9vwffadEOdN9Rv27FwXjVXsffAVkGcJ4GvuEDUtWRmaJwweVEpiMJbAGVQhvnDgAOD38qCRECKzlPJh/M/eQD8ppVdi59VJIAWSUtXjmDlTJZEHD9RxW1s1qtOtG9Spo7bTJbO/+ZvmNOc615nFLPzw+9fzwcHQtClYu0LmvXDJFoJOzKfqxq6QywV8L4N9aWAj4JTs8WmaKZnjZiEl0nkimd27p7bEzZqlOsc9V7KkajvdsaNazZoCPeYxfvixnvVY/fYx3R9XYtwXXQkXzpxnA40o+uY3P3sIq70hfCfUHQ8VBpoucE0zEp0nFJ0njOzGDZg8GSZNUnWXQOWJ/v1V97iciQzqp0DT7kPvjIdAeFHqbhQH19qS4clN8JHgMhXoae4QNS3ZJHlQSQjRSUo5P/7nbm86gZRylgHBVAO+lVI2iv99WPx7R77h9X5AZyll48TOq5NACvf0qSroPXMmBAa+OF6wIPTooQaYkvnm4xa3aE1rgghiGMMYwQgsXtrlefy4Kg/y2BoKHoEjWWDN+XV4r28LmXJA69uQ2RnYDBRM1tg0zZRMcbOQnHnCWHSeMKJjx1T3uPnzX9woWFqq+npdu6pR/BQ2Ex1LLIMZzAQmYLO5GR5/DGLp7A5Iizg2sRo/PHjjequYSPD3g3OroerXUH14il6dpWlvo/OEovOEiURGqq3T48a9KJFhawudOqnVSyVLmje+/8D/CXhzjjibhrg8us7xNY443A+DJhLcvkd1mNb5QUv9kmNQaYOUskn8z9vf8H4ppaxnQDCtAS8pZY/43zsBVaSUA155XX/gY8AGqCelfK1XoxCiF2rZLC4uLhUvXbr0to/XUoLQUHXzMXu2qs0Bqg5Ty5ZqlqJu3WT7y3kUUQxkINOYRnOas4AFZCbzv0Jp2BDCbkGFk7A3N/wevpt+q5shrG2g9TPIkRE1sPRessSkaaZmopuFZMsTxqJvFkwgOhr8/dV3vL8/xMQ3PsiZU61c6tpVtZ1OQSYxiUFyEFZnS+A88He2rOtNHpuLzGMWH/A+1m96Y1wMbO4Fx2dD2b5QbwJYWJoydE1LNjpPKDpPmJiUarL5119hw0u16ho2VKuXmjZVExQp3IEo8Ii4xrNMXuR4doJTawqS89pZaACU7Q+Mx/DyxZqWMqW07W9tgEavDCpVllImuH5cCNEh/vVdEjuvTgKpUFyc6g4xdapqQfq8TbWbmxpc6tIFsmdP8sdIJJOZzCAGUYxirGUthSn8z/M3b4KXFxw9DvWOwpZiMPTWUUat8ELEPoGW1pA3BvAHqic5Hk0zNb2tQdF5wsRu3FBbn2fPhr//fnG8QgU1uOTnBzlymC++l2xlK61lG548sCKj3zwCF/5MhWw7mMNwWvA12d40yywlBH0GIaOhWDtoPA8sU9aKLE0zhM4Tis4TZnT6NPz2m5qUeN78wdkZevVSuxpS6Hbq5y7EQfnb93mYszl2Mbs5tq4khUNPQDWgWhsQ84EM5g5T096ZoXnijcOnQggLQx4GxhMOOL/0e37gaiKvXwz4GHhuLTWxsFCjOatWwaVLqkV1vnxw5gx8/DHkzQudO8PevUnqECEQ9KMfm9nMda7jjjuBvNiClyuXqjvrUR22FIcmO2C0Yxn8/PYQa5sTlkVAqB1quiEgiRetaWlTMucJLS1wclLf5UePwoED0K+fatLw118wcCDkyaPq661bp1Y4mVEDGvCn2I9rluw8XNeCqkPex/9UFz7gG4LowgWeJfxGIaD2z1DrZzi9BFY1h6jHpg1e01KJ5M4TQggvIcRpIcQ5IcTnCTzfRwhxTAhxWAixWwiRevZUpUfFiqlaS+HhMGYMFCkCYWHw9ddqcKltW9i+PcV2jStkAeccs5Lv2maeWvtS3OcE+4qXgmAgcBnENQEemTtMTTO6xL7EY4DoRB7PnzfEAaCoEKKgEMIGaA+sffkFQoiXK2Q2BV7b+qalMfnzwzffqP1oq1erwaaoKFWfo0YNKFtWJZqHD9/5I+pRjwMcIC95aUQjfud3JCoxZc4MAQGqe/aGulB/JqzMXJC67fcQla0YrLoNp5wAb2BhslyypqUxyZkn9M1CWiKE6v45cSJcu6aKezduDLGxsHIleHurHPDJJy9qa5iBG27sF/uoZ1mb6Jk9abbWkfHrhtOC+VyjEX9y981vdh8KDWfC5a2wvAE8vWO6wDUt9Ui2PBHf+Gci0BgoCfglkAcWSilLSynLAT8DvyTHRWhGlj27ygenT6sdDS1bqoGkZcugXj1Vb+m33+D+fXNH+hpHAWfy2VEufCkxlv2p1uQ4K8qVgCPA+m0QUwu4ae4wNc2oEqupVMCQE0gpDSpqJIRoAowDLIFZUsofhBDfASFSyrVCiN9Qy0KigXvAACnl8cTOqZerpkEXLsD06aq49/PCrxkzQocOantchQrvdNpHPKIjHVnLWnrSk9/5HRvUdoW4OPjiCxg1Cqp8Dqd+hNxRDwhZ3QKH8CCoVxjKn0PtjdYdf7TUwUS1MpItT+guoenE1atq4mDOHDh16sXxChXggw/U9jgzdAKKJprBDGYiE7Hwb0bfkz6M/aQfl4QrZ9hAs5e2T7/m7Grwbw9ZCkHrzZApv8ni1rSkSIV5Qjf+SU/Cw2HGDHVfcDV+g4udncoTffuqiYsUJAbwCZf45/8Z+Jyxe4vycfBZcLaAFgUgQyC6CZCW2hicJ6SUBj9QZewdiR+MMvejYsWKUkujIiOlXLxYyjp1pFRzFerh7i7ljBlSPn78n08ZK2Pll/JLiUTWlDXlDXnjX89PmSKlhYWUJdpImSdGypzRT+WNVS2kHIOUe4pJGYeU8n9SyrjkuUZNMyLUgL3Jv5ffNU+gKhBseun3YcCwRF7vBwS87bw6T6RQcXFS7tsnZZ8+UmbJ8uI73tpaSl9fKdeulTIqyuRhTZQTpWWcpbQ8/p707LdU3o3NIW/KnHKW3J34N/+lbVKOzyTlVBcp75w2VbialiSpME+0Bma89Hsn4PcEXtcfOA+EAUXfcK5eQAgQ4uLiksz/ZrVkFRUl5YoVUjZo8O97gkqVpJw5853uCYwlTko56KqUyHmSWCs56C9nGfeLpZRzLaV85CilPGzuEDXtPzE0Txi0h1kIkVUIMR+IBG4AT4UQ84UQSa+orGkJyZBB7Uvbvh1OnFCtRrNmVTU6evRQtZcGDvx3Idi3sMCCEYxgEYsIIQR33DnM4X+e791blfm4vAFENcgWY4uL93LOvdcNgk9DYHGI+w71d5XY5L9mTUvFkiFP5EPdADwXHn/s1c/pL4Q4j9rWMOgNsfQSQoQIIUJuPV/xqKUsQkCVKjB5Mly/DkuWJLw97nl9JhPp1L03rAAAIABJREFURz82io1kLBbOtu/6U63bBCKjstGBekxmPk/f9EaXutB2B8Q8hcU14MZBk8WsaalFMuSJhKrnv7blQko5UUpZGPgM+CqhE0kpp0kpK0kpKzk6Ohr48ZpZWFuDry9s2aK2xw0eDNmyQUgIdO+uarMOGgTHE93gYhIC+C0PjLvbCeI2ML7MPVo3zk7cfWtYfBfu1QR2mjtMTUt2hhbGmw3YAeUAB6A8qpT9LCPFpWkvlCih2o5evaq2TFSrpuos/f47lC4NHh6q21BkpEGna097drObWGKpQQ2Ws/yf55o0gaAgiAuDG25Q8r4VRRvOINj9UzhyCjYUh9jJQBt48+2FpqVHSc0T+mYhvbK1VcVYN2xQBVpHjYLixVWbzl9/VfX1KlRQ9TRMMEjYgAYcsNyPa9asnJnehbKDB3Hibg360ZmlfM414hJ+o1MFaL8brDPCkjpwUTd50LRXJDVP6MY/6Z2bG/zyC1y5AnPnQtWq8OABTJgA770HtWr9p3sCY/kwO6yK9MQiKoiVRS2p5WNFdJQ9LHoKNzyBVWaNT9OS2xtrKv3rRULcB/JIKZ++dMweuCqlzGrE+BKl90CnY0eOwNSpqjbH4/iuOzlyqJocvXqppPMW17lOS1qyj318xVcMZzgW8eOsly+rAabTF8H9GAQXguUHxtAqaCgUKAbNT0OGasA6IGW0x9a0l5m6VXRS88Q71MqwAO5JKbMkdl6dJ1IpKdXK1DlzYNGiF8VZraygWTP1Xd+kiZrBNpJ73KN1XFu2WWzFauxgltR7gm/5qWzCGycWUI5MCb/x8VVY1QxuHYX6E6Fsb6PFqGlJkQrzhBWq9l594Aqq9l4H+VINViFEUSnl2fifmwPfvO0adZ5I5d50T9C1q7onKFo08fcb0YFo8HgSyjMHL4rfvcDBVZmxj7wHLSQUmILahalpKZehecLQlUqnAddXjrnEH9c003veGe7qVZVIypeHO3dg7FjVntTLC/z9VRXuN8hNbnawg250YwQjaEELHvAAABcX2L0balWF4MLgsRtauw9hnNccZNh5WOwCjw4C1YGLprlmTUvZkpondJdQ7QUhoHJl9T1/7RosXaoGkeLiVLdQHx+15eGjj+Dw4bef7x1kIxsbLTbQT/Yn5pNfaXXzIt/MG0UDuR5LahDAG+oKO+SFdkHg2gi29oGgz0C+ORdpWjqSpDwhpYwBBgCbgJPAUinlcSHEd/HNGwAGCCGOCyEOAx8DXZIlci3levWeoFw5dU8wZoyaZPb0hBUrINrgZrTJxt0aTmd2JfudPZzKUYkifne4ndkJVgKnegNfwJtWv2paKmLoSqUfUcXw5qNqXjgDHeN/P//8dVJKk26H0zML2j+ez2pPmaJaVz+NnwQrXBj691ezFVkTngSTSCYxiY/4iMIUZg1rKEYxAKKioGdPmDcPqs6FPztB30tbGL+uNRY2NtDyGeSyB/yBiqa5Vk0zgBlmoJOcJ3SXUO2trl1TWxtmz1b19p4rW1Z9z3fsqGaok9l0ptMvrj9xFwvQfPanzPt+KJHCBn9W8QE1Ety7SVwMbBsIR6aAWxvwmgvWdskem6a9q9SYJ4xB54k05uV7gkWLXmyFy51brXLt0UPdH5jQPcD96lPO5+1AlsjVhKzMT5Fr4VADqNIOxBzA1qQxaZohDM0Thg4qbTfgM6WUsp4hwSUXnQS0BN29CzNnwsSJcCl+JjljRujcGQYMgJIlE3zbTnbShjY84xkLWUhTmgIqNw0frh5lv4Yzw6H27WOsXdUU68g70NwBCkYAy4BEu9ZqmsmY4WZB5wnNdKSEgwdfbI+7e1cdt7FRq5i6d4f69cHSMtk+cg978Ilpxd1nERT5YhSbfx5H7gyXmMU0PqALCQ4XSQkhYyFoKOStDi3WgH3OZItJ05JC5wlF54k07N49tS1uyhQ4efLF8Xr11Kxxy5aqOZAJRAINrsSyJ98gbGImEeBfgHrnLkEJoGF1sFqLLqmhpTTJOqiUUukkoCUqNla1c5swAbZte3G8fn3VOa5Zs9duOC5zGR98OMxhRjCCYQxDxM9Bz5mj8o9La3iwAJyeXmX/qmY43DoKDfJCmavANKCbyS5R097E1DcLKZXOE+nAs2ewdq2aTNi8WQ3kgNrH/MEHagWTq2uyfNQVruAd25K/LA+Q7adPCeh6kCpOgcxnKPUZSV7eMIh1ehkEdIJMzuC7AbKZr8aHpj2n84Si80Q6IKWqazFjhtpO/Xz1Uo4catK5R483Tjonpzig4zXJojyjQA5j3E5nPjwYBnkFtHAF+y2AaVdRaVpijDKoJITIjOrW8A8pZWJdF4xKJwHNYMePq25x8+bBkyfqmKur2hrXvbtqTRrvCU/oSU8WspDWtGY2s3GI/99+61Zo1QpsioP9Tnhs8Zi/17cjz8UN4O4KHqEgvgX+R8LNrDTNNMx1s6DzhGZWYWFqBmDWLAgNVceEUJMJ3burVUy2SdtiEEkkvWUf5om5WK32ZnYeRzpWmckWmpGThZR/UwHvK3thTQt1c+OzBvLVSFIcmpZUOk8oOk+kM/fvq23U06erIt/PVa+uZo/btgV7e6OG8L/b8H225SA60/WYAzN23MciYyy0zAw5NgDVjPr5mmaoZC3ULYTwFEJcAO6j2nk+f4QlKUpNM5VSpWDyZAgPV8W8CxVSNxxDh6pir337/rMs1h57FrCAMYxhJSupTnUucAGABg0gOBgcbsLNIuD4yAEXnzUcKdsHDoTCeleI+RbVzSHGPNeqaWag84SWIjg7w9dfw/nzahagQwe1JW7rVvDzg7x51UrVJBT3tsWWOWI24xhHnLc/XTIHM2TOt9SVAVhTHX9CE35jvurQYR/YZYdl9eHUkneOQdNSI50ntBQha1Y1qXzokKq91KsXODjA3r1qZWuePNCvn3reSL7LCYuftMYiKojZ71nh0dKaqGgHWPQQQmsDK4z22ZpmDIZ2f5sB/AhkBqxfetgYKS5NM45s2eDjj+HMGbU1ztNTFfWeMkUte23cGDZtQkj4hE8IIIBwwnHHna1sBdTL9u2DsvnhtDOUPGtFufqTWF3rZzgTCsvyw5MZQAvgsTmvVtNMSecJLeWwsFCrk/74QxX3/v131SX03r0XP1eqpL77Hzz4z6cXCD7kQzZbbCJT0ev82nIcTUZ+j3NcOJVxZza7Eu7nk7Uw+AVDbnfwbw9/jnqxXU/T0j6dJ7SUQwiVB6ZOVXlixgyoUgUePlQT0RUqgLu7Oh4Rkewf3y4T/GlZCbsHf7I3nxtF/R5xx8ERVkbDkdbAWEDnBy11MHRQyRaYLaV8LKWMfflhzOA0zWgsLVVNpc2b1da43r3Bzg42bgQvLzVyNGUKDSNqcIAD5CUvjWjEL/yCROLkBNu3Q5tmcLQElN4qaOk+lNHNliJv3ILFueBeAFAXuGHuq9U0U9B5QkuZsmVTs9J//aUeAwaomeqDB9Uq1Tx5oEsX2LXrPw/w1Kc+h6xCKJHRhS2ffYX7rz15/DQ7HanHLH7ncUI3BHY5oPUWKNYedn0OW/uqTnGalvbpPKGlTA4Oaov0vn1w9CgMGqTyREiI2hL3fPXSy9vlkkFFazifLT95r+/iclZvCvrd4JiLC2wFtg+BuH7onQ9aamDooNKvwKdCCF0kRkt74geQCAuDkSPVdrhTp9TNhrMzhT+fzr6wZfjgwyd8Qmc684Qn2NnB4sUw7HM41hCKTYLP3NrQq8024p7GwkIHuHIMqAq8+1YLTUsldJ7QUr7y5VXzhqtX1SqmunXVatV586BWLSheHEaPhhuGTwYUpCD7rfbSWrTi7CejKb+uDEGXPOnBQLbThUs8ef1NVrbQ9A+oPAyOToVVzeDZf18xpWmpjM4TWspXujT89htcuaJq9FWrBo8eqdVL5cqp32fPflGjNYnyCDiXz4HKoSt5lOFTyvleZmlpF/gLWDMFopqhdz5oKZ1BhbqFEEWBTUBO4PbLz0kpCxkntLfThfU0o4iOhpUrYdw4NWMBYGmJbN2KOR9lpXvV6bzHeyxnOW64AaombO/ekKcf3PwVajw4S8DKJtg8ugyNM0KxSNSq7w5muywtfTFDq2idJ7TU6fx59SU+e7baAgFgZQXe3mrmulGj1zqFJkQiGclIvpJfIY6UYcT1OnzWaDwnRBkiWEkV3vDH4OgMCOwLWYuAzzrIViQZL07T3kznCUXnCe2tjh2DadNg/vwXW6azZIFOndQNwHvvJfkj4oBul2BugVkQ15thB3Pyw66biJxx4FMKMm8G8ib5czTtv0jW7m9CiCOopRbLgKcvPyelDHzXIJNKJwHN6PbtU7MVy5ZBrFqdfb9KcT4aGs5qH5hlORdffAHYtk11hhPVQK4Bx+jbhKxpQeare6GyM9QIA4uPgVGAlfmuSUsXzHCzoPOElrrFxEBAgKqf4e//z3c++fOr4q09e6pC4G+xgQ34xXTk0ZNYmi4awPxek4gTgmAW0hSvhN8UtgPWtlI/N18OLnWT55o0LRE6Tyg6T2gGi4iApUtVHab9+18cr14d+vSBNm2S3GF0zDUYmnMHWLTC+0IMKzdGYWn1DHwcIc9GoHySzq9p/0VyDyo9BLJKKROsO2kuOgloJhMeDhMnqlmKu3cBCCuSgR8+eUbWLoP43m4M1lhz6hQ0aQJX7CD7foiwfcbhbQModGwGuLpAk8tgVxdYAjia9ZK0tM0MNws6T2hpx7VrMHeuGmA6f14ds7BQtfj69oWGDdXvbxBKKL7RbTlkfYBC8z9gXZuDFLf9m/V8R2O+wDqh6gP3z8Oq5nD/LNSfCGV6GefaNC2ezhOKzhPaOzl8WN0XLFigtscB5MihJiH69IHChd/51BvuQwtxlhiHZpS6c5H9ax3I+PgeNLSBkguBVslzDZr2FobmCUNrKq0B6iUtJE1LxfLnV/WWwsLU4FKhQjife8aUvvBxgfHM/b4I1+78TfHiauKiYha47grZbmSgcMPprPWchgy7Dn84ws09QEXgoJkvStOSlc4TWtqRJw98/rnqFLp9O7Rvr7bArV2ruoQWKQKjRsGtWwm+3RVX9lnvZkDcIC50mkOloxlYebkF3nzNn/hwmwTqJ2UtDB2CoYAnbOkN2wbpAt5aWqPzhJZ2lCsHkyapGn3Tp6tucXfuwJgxKkd4ecGaNWoV7H/UJCscsy1Kpuv7OJ7TgwId7hGaOx8EREFQa4gbju4Mp6Ukhq5UWgo0A3bxSisrKWVn44T2dnpmQTObmBhVd+nnn1UHIeCJPdzt3pL8H/9CZG5XPvgAlqyGIoFwrgZ8enUfI9e1xiLyNjR0gBKPgalAF3NeiZZGmWEGWucJLW27cUPVXZo6FUJD1TEbG7XvuW9fqFlTtah+xXKW0zm6G08fWvHJibaMrDmTMFGQaFZSjATqcMTFQtCncPAXKNAQmi0B26zGvTYtXdJ5QtF5QksWUsKBA2qgackSiIxUx52doVcv6NEDcuf+T6e8FwcVz0Vz0W0QVrFTWBGYD+9jV6Ag0NQXMiwA7JL9UjTtueReqXQcVQhmL3D+lYempT9WVtC2rUoe27bx2Ksm9k8g/4RVxBUpTIaufiwc8hdfDYVzNcF5LIzNU5U6HQ/y1KkybLgD23NB7AfAQCDazBekaUmm84SWtjk5qdVL587Bhg3QvLmaYFi0SHWOK10afv/9RRHXeK1pzRHrg7jZuzDWYyr1NrbHPu4R+anCfpa8/jkWllBnLDScAWHbYWFVuHfWRBepaUal84SWdgkBlSurjnFXrsDYsWrFUlgYfP21Glxq1w527FADUAbIZgFn3KxpeGIyMZYzaNHgNp/Wzoa8ZAELV8K9KsBVY16VphnEoJVKKZWeWdBSkoijwRwc40e1RZewfr7StUED9tb8lMZjGyDrC1gCGSyjObBzCK6Hxqttdc3Dwd4DWAr8txkMTXsTU89Ap1Q6T2hGdfmy2vYwYwZcv66O2dtDhw4wYACULfvPS5/ylF6Rg1hgO4M8B6qyunQMlW1D2MvHVGUUFgk1cAgPgjW+QFx8AW+9c0hLPjpPKDpPaEYTFweBgTB5stoKFxdfTqxECbXCtUsXyJzZoFONvgif5joAtq2oF3adAH9LbGQkNMsOrpuAdP9HWTOCJBfqFkLUklIGxf/8xr/FSCm3vXOUSaSTgJbSSCSzw77jwbjh9JwGDo/Vn6/I0pX4/OGXjBfe5Aq24JYTrDm5gKZbeiJs7cE7AvLkAFYAVc16DVraYIqbBZ0nNC1edLS6YZg8WbUCfa5mTTW45OsL1tYAzImbR6+YvnAvI7/L6vTKvYbjeJCPP8hKAt3lHlxUBbzvnoJ6E6BcXxNdlJbW6Tyh6DyhmUR4uJqEmD5dNYMAcHBQA0v9+6uBprcIuQ917t0iomB7XB/8n737Do+q+Bo4/p3NpieEACEBEmpooQZClSKIBRWxIIgiWLH3hqCovHbsvWClCFJEEBA7TXovoUlLIAEChPS2O+8fs/wIGCRlW+B8nuc+2XJ399yb3T07596Z+YNlP4RR8+hxVE9faDce1CAXb4Q43zijqLRJa93ScXn3GR6vtdYNyx9mxUgSEN5qKUu549h1XPvJYUa+E0jgITMrRHLVFjyV8zQLfh7E/l5WRhxcy0uzrsWSvR8uCoNWGcD7wJ3Av8fmEKK03NRYkDwhxOm2bTNjanz9NWRkmNtq1YK77jLjatSqxWY2c0nGAA4Eb2dY4jV80OJnipQfKYyjOdf++znzM2DujbBrDrS9D3q9A5YSzmwSogwkTxiSJ4RbnTgI8cEHsGDBydsvusgchOjXz0wMcQbZdui+poi1CSMJLhjL3DlV6bErHVoAfUaB9f+QNoRwlgoXlSoDSQLCmx3mMDdyI0tyfuPNL5pz5+vpWJPNkYmdNOL9a5/i04lD6ayzmDdnMIF7f4XW0dArGaw3Ax8DwR7dBlF5SbcGQ/KE8JjMTDPV9AcfwJYt5jarFQYMgPvvJ6trawYdv5u5VScRu74LU1pk0866gTUMpw1v40PQqc9nt8GiEbDqDYjpBVdMgmDpMi3KT/KEIXlCeMzGjWZW6fHjISfH3Fa3Ltx7L9x+O9SoccaHPr4W3mw2FeV/C68ugSdX5EAtoH8/CJ4Mp+cQIcrB2QN1CyHKKIII5jOf14Pe54kH9hH5TyYLxw1Fx8YSyz+8O2M4O8MbkfDRBJpfOp29HUfAhmSYUgeOjgc6Als8vRlCCCHKIzTUjJmxaZPpEnfttWY8jcmToVs3Qtr14Kfpvfgg7X32NN1A55xdvJ12Ke34jGQSOMT6U5/P4gM9x8Jl30DKMvi2Lez93TPbJoQQouJatYJPPjEDe7/9thnYe98+MylEdDTceuv/Zpk+3RvxMD/tenyTVvJUt2iuv1JhO+yDnjAbDiYAye7dFnFek6KSEC5kwcL93M9GNtLWryM9b/+WSxNjODzpXYqatyQ6bz9vPP4wKxs04rNFVfil17fo9FwY7w9r9oFOAMZ7ejOEEEKUl1LQqxdMnw67d8PIkebo87p1qDvu5L4mozn87E10396SR2vM59K97fHTR6hCJ7bwPnDaGeUthsJNKyGgGky7GJY8Z85iEkIIUTlVrQoPP2y6T8+dC5dfDvn5pht1QgJ06WJmGi08dbboS2IgOSKO+ktXMK3pVbQfbOO48kdPToQtzYG5Htkccf6RopIQbtCABvzGb3zKpyyzrqLB4JF8umk4RTNmkFy7AxFHDvPSqJF07P0A8/cNpTC8B/yZBdP8IWMoZoylXE9vhhBCiIqoWxdeeslMMf3tt2b66WPHqPrGZ/zWeSUbr2nH8b2baJ2Vy58FTYnjQbZwFQUcPvV5arSAISuhxTBYNgam9YGsFM9skxBCCOewWKBvX5gzB3bsgEcegbAwWLbMzCrasCGMHQvp6f97SEQQ7OoSxpA/ZrA+/EWa3JTP+pqBMC8L5l0BBQ8CBZ7bJnFekKKSEG6iUAxnOJvYRDe6cb/lQS665h3ykiey4sVfWGztSdXjx7ns9XfIfWA5h//pB7sK4Rt/2DwOdCdgu6c3QwghREUFBMDNN8Py5WYZPBgFtJy5hmU985nXSzFu9gYeOdKchvzKcdpwgN9OfQ7fYLjsK7jsa0hZAePbwp5fPbE1QgghnC02Ft56y3SN++QTaNbMzCD35JOma9xDD8GuXYA5IXZ8bwvjE0dx5PjPJAz055VOvuhEYML7cLAt8I9HN0ec26SoJISb1aUu85jHV3zFetbTWrVh8aiNRG3/nduaLObXsD5Uycgg4uPZ5L7ig31xFMwEZm2FnHhgiqc3QQghhLN07AiTJpmucU8+CWFhJKzOYPoAeLD9Vt57HY4e10TpS0jkKf51xLnFMHPWUmAETL8UljwL9iKPbIoQQggnCw42s4du3mzOYLroIsjOhvfeg8aN4brrYMkS0JohrWGH76WEL1vHyG7tufB6yCjwR09KhNUtQE/29NaIc5QUlYTwAIXiFm5hC1voQx8e4zGGNujOwxurs/ihX+kVtICFTXoSeDwDy/d7sY0NgnF2+DQfdtwA3A/ke3ozhBBCOEtMDLz2mjkS/f770KgRDfZqnnwqn6i6qWx6JITme15nNxeQw85TH1s9Dm5aAS1vhWUvwtQ+kHXAM9shhBDC+SwWM9bSb7/BunUwbBj4+MCMGdCtmxl36fvvaRhexMEu9bhp/kIWRjxFg2H5/FkvGP7Khx8GQ84QIMfTWyPOMVJUEsKDalObH/mRiUxkG9vo6NcW/xdeZuzSzjwc9Ce92/7O0g5d8TmeAz/Z0M8peAz44UPI7wzs8vQmCCGEcKaQELj/fjNg68yZ6B7dCcuA1u9mYmsEtQeuxbK8Jbt5Byg2QLdvEFz6BfT9FlJXmtnh9sz32GYIIYRwkTZtzCDee/fCqFFQrZrpSj1oEDRqhOWdt5jQJZffk18lN2keF10TyIO9fLHvs6C/nQh7mwObPL0V4hwiRSUhPEyhuJEb2cIW+tGPUYzi5tZteHXln1w8sDc9dy3myqfms6xTJ1RmEcwGhgHD1sG2VsB0D2+BEEIIp/Pxgf79UQsWwqpVpFzTD7tF4T/VRkDnfBpc+AgHf25Jnt5y6uPiboYhqyA4EqZfBotHSXc4IYQ4F9WqBS++CPv2wUcfme5w+/bBY49BTAy9x40gPbANFy5az/ttu9H2JjupvkHoaftgUVuwfcy/ZhgVohykqCSEl4gkkqlMZS5zKaSQS60XseHpwfy6NIUjCy6hS8RS+k+Zw+r27SETM7RSpxx4aABk9QcOengLhBBCuET79tSaMQvbjj1MHtiL41WABRDZdyt+8S05PPkGKMo7uX715nDjcmh1Byx/GSZ3hyOJHgtfCCGECwUHwz33wNatMGsW9OwJGRnw2mv4xdbnzwnPM2v+R2xRLxA7NJfvmofCChtMuReOXwmkn+0VhPhPbisqKaUuU0ptU0rtVEqNKOH+R5VSW5RSG5RSvyul6rkrNiG8SV/6solNPM/z/MAP9GvcjAFL3mbsRTZ+eexyuoxcSf+ZM9nZuhUcB94DGs6Cd+qB/VvkiIMQQpybAurX5YYpf7BiwSpGP1yPg1FgWa+JGDyFoqZhFH7yDOTmmpV9g+CSz+GK7+DYdjM73PJX5KwlIYQ4V1ks0K8f/PUXLFsG11wDhYXw+ef0uyKOnDs30Pf7j7np8lAGXe5DwREr+tu5sLUJsMTT0YtKzC1FJaWUD/Ah0BeIAwYrpeJOW20tkKC1bg1MA153R2xCeKMAAniO59jMZrrRjcctj/Ltw+34auliur6nmPVdf+IWrOPGKVPJaFQXDgOP5EOTYTArHkjy9CYIIYRwkYvbtmf0m7t4cvIbDH/fj5RGCuuuAnzveQlb/Qh45f8g3XHkudkNcMsWaHQVLB4JEzvBofWe3QAhhBCu1amTGcQ7MRFuvx2sVvxmTmfakLvZ3akh6TvjaXJzEVvDqsCcwzCnG2TfhukOIUTZuOtMpY7ATq31Lq11ATAZ6F98Ba31n1rrE0PRLwOi3RSbEF6rEY2YwxxmMIN00hkc3Z16f97Ca5cdwreDhe+rDKB64k5e+PgjiqqHwj9A//XQpR6sGQnYPb0JQgghXMBqsfBNz8e444ZE2v/Qk9umwJF4Kz6HsmHkaHTdOvDUU5CSYsZX6jfVLFnJMDEBljwHtgJPb4YQQghXatoUxo2DPXvgiScgNJT6KxYz/6pVzOpWm5d8MnmlXRXs2y3w1Vewvi7oWZ6OWlQy7ioq1eHUUyeSHbedye3AvJLuUEoNV0qtUkqtOnz4sBNDFMI7KRTXcA2JJDKCEXynJvHKLU15etlH9H7PRtF1vrx0+T2E7U3hh9FPooOssExDwitwXQ3Y85enN0GIUpFu0kKUXccaDdnf6g/CWn1KxIIARs3zJa+3FZWZA6+/DvXrw913m1mCmgwwZy01vQGWjYEJ7SF1lac3QYhSkzwhRDnVrm1ywr598MorEBlJ680HmDBEM+j2HF6w2dkQHA6/pcN3/eFwX+CQp6MWlYS7ikqqhNtKHPhFKTUESADGlnS/1vozrXWC1johIiLCiSEK4d2CCeYVXmEDG2hHO56tfh9H53TitceXEX0Z5LwUzPVPv0b07hQSb+2P9gFmHIMmveCBrpAmA3kL7yXdpIUoP4Xi7ebD2eq7mSmNehP5exEzllSDa0EXFsCnn0JsLNx5J6RmwOXj4erZkHcUJnWChSOgMNfTmyHEf5I8IYQTVK0KI0aYM5ccuaHh7iJeeALCRx9j/FFfcg/6osf/DAvqQeFnyHit4mzcVVRKBmKKXY8GDpy+klKqDzAKuEprne+m2ISoVJrRjN/4je/4jgPqAE/16EKbjdfxeGwigfGQ8nsN4r6cSa9Nm8m4pDG6CPhgKdSvBS8+CNnZnt4EIUoi3aSFqKAmAXXZ0XgeIw58zcBWdi60IOtgAAAgAElEQVT63pfkzWFwE2h7kekC0aSJGV+D5jBsM7S8DVa+BuPjYb8M1Cq8muQJIZwlIACGDzczxk2ZAi1bEpMMN79cyLGxhazfFghL8+Cru2BnPLDb0xELL+auotJKoLFSqoFSyg+4ATils6ZSKh74FFNQknPthPgPCsUN3MA2tvECL/Cbzy+8dVtL+q25ncHrk6AHLM6PI2z+dp7/bTL21kGQreHZ96FRTRg/Duwy3pLwKtJNWggnUCierj2MvSGbObq/L02aH2fkl1Fkb/HHPlSh7Tb48kszzsbdD0ODJ+G6X8CWB5O7w58PQ6EcfBBeSfKEEM7m4wMDB8L69TBtGvbWramdAm0/yyXtbTj+hx9MXQ8/NoaMZwGZQVT8m1uKSlrrIuB+YD6QCHyvtd6slBqjlLrKsdpYIASYqpRap5SSEcKEOItQQhnNaHaxiwd5kOmBE5j+amOGTnucTs8dgbvgpfhB+K3NZNHnw9F1FRzMgaF3QtfGsGK5pzdBiBOkm7QQTlRH1WZN3Zl8lPk9n2QGE9s0n8+/qIl9GxTc4odGwzffQLNmMHo8dJ4Bbe+DNe/Cl00hcSJoOfggvIrkCSFcxWKB667DsnYt/PADOW1aU+MQhE0pIOsNhf1LG3z6IqxqAPbVno5WeBl3namE1nqu1rqJ1rqR1volx22jtTbDy2ut+2itI7XWbR3LVf/9jEKIEyKI4G3eZjvbGcxgJtR8m00zGjL4oRepfWUWtg8t9Lj1U5qt38/hx9ugQ4Hlu6BTZ7iln5kdSAjPkm7SQjiZQnFr6PUcrJ7I41kf81iOhbaxmr/GhaK22cm+rQraomD8eGjbAb48Cu0nQnAUzB0C33WFA8s8vRlCnCB5QghXs1jg6qsJWrsOZs0ipXUzQo5qLLOg8EUFzyfDuARIuh445ulohZdwW1FJCOF69ajHV3zFBjbQS/Xiu7hnKVgcS/9mHxJ4YQE71tWi5th13LTqTwquqgo+wDc/QeMYeO1ZyJffXl4h/R9Y+n8wvh0UZHo6GneRbtJCuIgvvjwecjeHquzkmszXuSrPTt9GsHccqO02jt5RC+1jgUmToPcQmNcE4l6HjH3wXReYcxNkJJ39hYRwLckTQriLUtCvH7XWbaHwpx/Z0qYevhka5oB+DLhnGkyMgP2PAoWejlZ4mBSVhDgHtaAFM5nJ3/xNU9WUHy++n4g/m9Fz50TUNXYmcyH+M4/y8exX0G2skG2DES9C81owazpomeXB7XIOwZr3YVIX+CIW/h4NfpmQ/a+DsOck6SYthOsFEcSY0Cc4GLybppnP0K6giHvqKWyfHUNvLyJ1eFO01QqTvoOrRsL6fhD7EOyYDl81hb9fgMKcs7+QEC4geUIID1AK3yuuIm7tbnb+OImVCTVQWZji0v02ePht+L4qpL6DzBJ3/lK6EjceExIS9KpVqzwdhhBeTaP5mZ95mqdZz3qa5Lekxmej+Xv7dajnLASE5bDgvcEkjJ2FOuh40MVt4J1JEHf6TL3CqQoyYedMM3bJ3t9A2yAiApppaJYGVcDMjty2zE+tlFqttU5wdsiVjeQJIc7sEId4OvNlZgR+xAiLnYcV2Pb5kv5sE2pN3ISy2yEoCO67HdonQ/IPEBINPV6FZoNBybHJykzyhCF5Qogy0Jpp096i0UvPEb/eTOqgI0BdClxdA7p/DDUHeDZG4TSlzRPya0CIc5xC0Ze+rGENk5iE8i/k7wcGEju2Ne2+m0rumwF0vO9Hmq7fSdqdjSAQ+HU9tG4BDw6FY9Jf2qlsBfDPbPjpBvg4EuYNhaOLoIMVhgFDj0PHdlDlPfLZQnkKSkIIURo1qckXoe+wzrqDNdnDaKbtzIopova3G8hcH0jKlc0gJwfGvg/3LYTj94FvhIy35E3y0j0dgRDifKIUA65/jLZrjvPNx4+zPdYPdRiYAPqRNHjmevixAaQt9HSkImMvrP8Eljzr8peSM5WEOM/YsPE93zOGMWxlKw1zWxD45Wg2Vx0AN1kYtOIHxj99M75/ZpuzWKv7wzuvwk0Pmf7Vouy0htQVsPkr2DYZ8o5DgBWaFkFzoHZNUFeiuYLt1OVHFjKVn1nHIvawhzpElvkl5Qi0IXlCiNLbylYey3qO5ODvGaMt9LfYSV8SSsFTNai5ZLdZKSYG7rwYqs2BvIPQ/Cbo9hJUqefZ4M8XBZmQvBD2ToO9P8Pxw3BfFlgDyvxUkicMyRNClJ+tMI8v37qby98bT50DZsZQHQv0BXVpW+jyLVRr5dkgzxe2QjiwBHbNhd1z4chmAIrCo7DekgQWa5mfsrR5QopKQpynbNiYylTGMIZEEqmfEwcTR7On6QAs3eC9SSO494W3UDsdU0p3qw5ffgeNL/Zs4JVJ7hFIHA8bP4S0nWBVEKtNIaleW/C5iiwuZB7HmMB8FuifOa72OR4cB/bLmJn9GP1Da5f5paWxYEieEKLstrGN0dmvsTvgW16w2OmL5ujcKugRIVTf5BjnrVlTuLkVBM4CNLS8FTqNlOKSs9kKIXUl7JsPe3+AlM1gt4MVqAP5dcPxb7sG/OqX+aklTxiSJ4SouOyco0wYM5TrP5tLtWOO+kIr0JeB6tUTOn4M1Zt7NshzUVYK7J5nikh7f4WCDLTFh9zoEGiQTVCDIo5XgyqqEIUUlUokSUCIirNhYxrTGMMYtrCFellx5M96ltSE6wmul8OsZwfT66M5qGzAF3iwAbw4FQLaezp076TtsO8P2PgO7PwZbDaIAlpZoGlvtP8ANtCADwrWMsc2j5SAJaCKgFCgDyRdBvMvI+j3usRnwjfvQqNGZQ9DGguG5Akhyi+JJP4v5022+X3M8z4F9NJwZFIVfJ71peqeI2alDu1gcF1Qc833XwtHcSmsvkdjr7S0hqNbzTh7++ZC0l9QkGfuiwRdz0JS3ZpMrmPhI+sBkrCQRhrhhJf5pSRPGJInhHCeQ4f28uPzN3LjN38TnANagUoALgHax0P71yCmj/R+KC+7DVKWmyLS7rlwaC0AtpAAChvYCGhQCPUg1Q/+xp8DtCWbQTzOA/hIUalkkgSEcB47dqYxjRd4gS1sITqrOTm/j+Zo++uJsSQz77araTF/nVm5NvBWCxj4FagOHo3ba2Qkweb3YdMXkHEUAjBnJLVqR26Nobx2MIxx9oWkRPyM3TfF8aA2cOgy/Ff0pdn6LvTSfnRrDvHx0KBBxfKtNBYMyRNCVNxhDvNa7nts8nmbZ/2yuaAA0j4LJWCMIuRwhlnpmivh2nBIm+IoLt3iKC418GjslYK9yHRp2z4V/pkBWYcA0FVB1YXcegEsianKZ4FHmEshBfgSS1dCuZgCLmYl7bHiU+aXlTxhSJ4Qwvm271zH0jFDGDx5M36FoP1A9Qa6A9HRkPA8NL0ZfPw8HGklYCswB6x3zICdP0BuGlopimpb8W1QCA3hWA1YoBQ7acg+rmQHQ1lPW1KwEA6kUb7BtKWoJIQolxPFpTGMYTObicxpQMHyezjW6FYu2baKSbffTPWkNLNyT+DN1tDufVA9PBq3R9gKYNcU2DgW9mw0Y1DVBVrVI63RIEanV2WidTEZVX8HlQ9URWVeQo09l9F136UM9K9NrxZQq5bzQ5PGgiF5QgjnySCDd/M/Zq1+lZEB6SRkwZE3Qgh9PR+/3EK0vz/q/jugZyH8842Z0TJumCkuVW3o6fC9i70I9v0JO6bCjmmQe8x0kW6goT7sqxfGlLBCppDDGiCKFoRyMUe5mDR6ACEE2aDRIVhQi3KcpyR54gTJE0K4zt8rfif1xdu5dvZeAIpqKKxXaWgGhIZC/MPQ+mEIrObZQL1NYTbs/tkUknbNgoIstK8F3dCOJRay68PCAFhNOEn0ZgeD2cbFHDBTR1PTDnGHofoGiNwCHzwE5TlWLUUlIUSF2LEzgxm8z/ssZCG+Nn8CEweR538Hoyb8wshXXsFaaIMgYAjwRHNo9JZjTtFz+JRWbYf9f8DWN2H7n5CbDyGgW1ZlX9ylPKMimOq/jPxAx3eTvSEBqf3pdugqXq5zAR0ifN0SpjQWDMkTQjhfHnl8WvAlq4pe4LGgQ7RNhoIRFvwmmjH4CqOi8B39BDTeDZs+NwWUFsOg06jzu7hkK4SkP2Db97BzOuQdR/sqVEONrYliZYMgvvTN5icgk0hC6cMxLiGPPkBtwrKg2mYo+AtSp4FtLWCD9HQICyt7OJInDMkTQrjeljkzKXj6XtpuNGfrZ7ewEDzQDsGA1Qotb4J2z0B4rGcD9aS8Y2aG6B3TzUQMRQXoAAsq1g6xkFgPfrJa2EwCOxjEbq4khcaAIiwfoneCdREcngoH/jj5tI0bw7Zt5esBIUUlIYTTbGITH/Mx3/ItWWQRmNqOmO3X8slz8+n11yKzUiPgDqB/DDR+Cqy3YDLFOUBrSFkK296F7XMgKxusoBv6sysunlE16zA1cAV2axKgIK8TVXdfRb8j/Xm5eXOiq7u/yCaNBUPyhBCuU0QRk4um8kvGG/SttoYBS8H3EWC5uf9Iu3hCXn4Wf9+/YMOnprgUNxQSHoMaLTwZuvvYCsz4SNunwj/TIS8T7XeykLSwvh/f+OYzGyiiG1n6KuzqUqAVYUkKtQTS5wCLgL0QEQHt2pklPt78bdjQtY2Fc53kCSHcxG7nyBdfY3vmMWoeSgcg5UoLkdfaUemg7EDsRdD+OajT7fwYdyn7oOnStmMqJC0Auw0dolCNNbbGsLAOzLVY2cCFbNe3sEddCYQRmA2hayB7DmTPBraY3dWkCbRta/LDiSUiovzhSVFJCOF0mWQygQl8yIdsZjM+uWEM/rIzbz2/goi0Y2iro7/0FUBrP2h9A1QbBTTxcOTloDUcXA3bPoVtUyHzOPiArm/hUJM4RteJ4osqK7CpDNCBcPRiam29iluyr+CJjlGEV/Vs+NJYMCRPCOEeu9jFewc+ILzKOO7/MZPqTwH7zX2bru9HyHOjqH/oO1NcKsozDYbWd0GTAWAN8GjsTleYDXvmm6PNu36E/GxTSIrVFDZW/FrfwiSrjTnanywupUj1B/uV+CfWJH8OsBD4G2JCThaOTiy1azuvnSV5wpA8IYSb5eSQ/+ob6LEvE5CXT74fbLtX0fwyje9OIA8Ij4Zmt0PzIefe2Us5h01+2DYRkpeA1uhwUI0hKxZmR8E85ccG3ZfdthvJsF4O9hD81kDBdGAu+G6DVi1OLR61bg0hIc4NVYpKQgiX0WgWsYiP+IjpejpVjhbx2iO1uWO8mWo6t0EIAQOyUJFADNCmFcSOBp9roByDibqN1nB4A2z7BraNh+NpZlS7epDTuDHv1mzJa9USOe67FXQQKmkAsesG8BAXMbRXEKGhnt6Ak6SxYEieEMK9bNiYdHA+q9PG8MD3y2k0FsgFm7/i10cHUvjQK/TdNx3rxs/g2A4IqGa6xrUaDtWbeTr88ss9Crtmw47vzbTORYXoAIVqpClooviprmaKFX7W1chQ/YCrsSRfjH1eMPwKVVZD9+ZwwQXQvn3Fjy6XhuQJQ/KEEB6yfz/2p0dhGf8NAIcjYOUL0KOzhZAddnSSY0CNqHbQfBg0HQTBkR4Nudxy0swZSdvGQ9Lik4WkprC/KUytDj8TyMbCqznocz02n0tRWUHon4HZELEKLmwBPXpAt24QFwd+bhjjXIpKQgi3SCWVcYzjw6JPabYoma9usVB/n518PyurB3ejXce1BOQeN2MvtagCre+Eqk8BLv61XFo5h8zppknzYd88OHbAZLC6UBhbi8lhPXi+2nF2VfkNVBFkdqbRptt4OmsQN3arQmCgpzegZNJYMCRPCOE5e7LS+OLPF7hp/Oc0m5oPQGGUYuarPUm8+UNuTk6lwfpPzA9texFE9zRnLzW+Fqz+Ho6+FDL3w86ZsHMiJC0HbUeHgIqFzMY+zKhj40cfmK/rkaOuhaz+8OsF8KuVyI1wYTR07wbdu0PLlmApz9Q8FSB5wpA8IYSHrVqFfuQR1OLFAGxs6cOsD2xcHO9Lx22F6ERQhwFlgXoXQ/ObIPZq8POio7klyT1qcsS2r2HfEjMuazjQFHY1UUysoZmlQ9mSfy05gdcDfWC3P8yGGsugjx/0vsAUkpo08UxvQCkqCSHcqogi5jCH749/Ra/HZ3PHODNg69LOtfnqiTt4OGMezY+uQmkN9RS07g6NXgafrrh1YO/sg5D8FyT9ZAZMPWrOrsIXqAO2hiH8EXA5L4QEsaTOfLCkgC2C2klDeTT7Nh5uFoePF59sdYI0FgzJE0J4XpFNM/3Tz+j88RPU25RpbuwNiR9X5ddG19Ew70Yu3rQc/42fw/HdEFgDWtwCrYdDeGOPxn4KreHYdtNtYecESE00t4cDjSEpNpAJkbnMssAKGmLXN8C6gTCjNfV2KPpUhR5dTRGpfn3PDxciecKQPCGEF9Aapk1DP/EEaq+ZKe6bAdGM+3A/19fU3HLYQpWtdvRWKyqjCKyB0Kg/NL8R6l8KPm44bac0sg6Y7s/bv4K9S8BuhzCgKSQ2gfE14UdbOIl6ANp3ANh6wVJfqi2FnhlwVSxc2BPq1fN8jgApKgkhPOg4x1nx8xja3vEhEfvzyQ2AEa/WYvrNQ3nm1xRuSZtBQF6WGce7XgBENIKIjhDRF4L6UL7Jkc8gOxWS/oTkGZC0CI4eNLf7AtGgo/1ID4tnTnZXJvhY+b3ZUor8F4O2EJZxObfm386rEVfgr9wza5uzSGPBkDwhhBex29n18hfUfP0RQjKzsfsBI8EyAnb6VWVdYX/aHWhLgw0LUTtngbZB3d7QsB9Ed4eINmCxui/enDQ4uBJSl0PqH5CyDnIdRbFIsMcqNjQK48vqmcy22NhDPSgaBEsHEvlLOy4vUlzZwRSRXN2VrTwkTxiSJ4TwInl58Prr8PLLkJ9PXmg4z953ER+N3MDA0O3cZYfOB8C21YJlmy8qL990o47uAbW7miWyvXvG6tMa0ndC8iLYPx/2L4B0RzujCtAU1jeBbyJhpi2C3daBwHWQ3J3w5VY6HoMB4dC3M9Sp4/pwy0OKSkIIzzt2jNyHhhM4fhoAf/WEW7+CffW6cu3qZoxet4Km+f/gl5978jEhQEQQRNSHiASIuAzC+4GlhJHntIbcI5C1BzITIWsHZO6GrCTITIWMQ5B+zKzrB9QBYsLIi+zIjwWd+aLAyh81dmGLWARql1mtsAlXFN7GG0E305DaLtw5riWNBUPyhBBeKC0NnnwSvvoKgNRGfuz9vID4XuarOsUeyp5jfWi7I4zAzYsg/R/zOL9QqNXFFJjqdIeojuDrpD7IhTlwaC2kroCURZC6FI6nnry/OhAFBZEB/B5djS+rH2a+pZBM6kD+QPyXD6Ljpo4Mqaa4orv3NhCKkzxhSJ4Qwgvt2AH33gu//QZAUZduvHLHvbzebQENYydwlyWbIUVQZS8UbAvFN8WCSj9uHmvxhch2J4tMtbtCiBN+09ttkLYJ9i+E5Lno/X+jsjPMfQFANBTUgdXRMD4SZurapFgGQuEAQtd1IT7VwoAQGNwaalSveDjuIEUlIYT3+PFHGD4cDh2iINiPUWOr88bdKaB8gARqpLSm67oqDDqSQg8SiSr8B5/0DDO1KIAVqBEENaLBZofMY5CVDZn5YDvtO0xhzoAKdSxRURTFdGWef0feydAsqrKRwqoLQSWb1W3ViMrpQV//ntzq14MLiEe5szuei0hjwZA8IYQXW7AA7r4btm4F4K9LE5j63jF6NfmHvpiv8nR7IIeONCPyUA2qpOSjkpPgyG7zeB8/iEwwBabo7lD7AggoNvWm1lCUY8a1yD8GeUch77S/OSlwcCmk7TDjXYAjd4A9SpFSsyp/Vgvkl6AMllizMIcfoiDnemptHsQ1h7pwb1MLcY28o6tCWUieMCRPCOGltIbJk+GRR+DgQbBa4fHH2X7b49x1ZD5rYz/n+up/MURBB60IytVwAPIP+GE5EID1YA6qqMg8V5V6p57JZLGCrRDsxZbi120F/7us89IoPPATPvvX4pNfYJ4vFIiGzDqwPBrmVoPFRLJWxVNEO/wO96fVrg5c5acY3hiinDwrm7tIUUkI4V3S0swRh6lTAci8pAsvjEtgWu31JKmV2C0nzlaKhMLOBPyTwHV7Ari9aBcdrasJytiJOnIMfC0Q4g+hIRASDqER2ENrkxtSl4PB0Wz2qcpWm2aLrZBN9nQ2+68gN3ghqEPm6QujqH6sJxfae/BoRA86+8Rhwc2jo7qBNBYMyRNCeLn8fBg7Fl580VyuVo2dz4zgvp55BDf5gstD9tIaiMOcyApALuTvD8a6Pxyf/YVw8LAZtwIF1ZoB2lE4OmoGAD8TBToQVAQQBVlRfqysGcj8kBz+thSyGsjBD2gJBfGEHI6naXp77vbtwLBYH3wreeqQPGFInhDCy6Wnw6hR8PHHptBUvz58+CFcfjnfb9/H8/nTSApfQ2yNFXTy30UnZaMT0KwILIeBA5B7wB/fA2DNyi9fDNWAaDhcBxbV8ePnsAYsogNb7d3xSW1FrZQ4WtvC6FkN+tWFZn5uHTHWZaSoJITwTlOmmOLS0aMQFgYffEDhkEFsYCM/sYyf8pexWS0j12+H4wEW0K2xHO1MzYMd8LFosoNSyQtKpTAgFZt/KlhTQKUC2f9+vfy6BKf0pP2xHjwe0ZMro2PPiTORzkYaC4bkCSEqiR074J574PffzfUePeCTT1he05cPk1eywLoGn5rLaFJtHXE+WcQBcRrisFC1yA4pwH7QqT5gtaMCNPhjuiQ4FlsAZPj7kRbgQ4q/Itm3kP2WQtYoWArsJRSIh6K2WA/EE3Mgnp4FzRkY48cl9cHnHEsdkicMyRNCVBLLl5uzW9etM9evuw7eeQeiowEzHNPvawv54vB2loZsILfuChJi/qaT/1Y6kUEnDZGZwGHH8/kAFsfiA9kWyLRAhkWR4eNLuiWQ45YgDvpWZZF/BxYf6Uvqlt7UTatJWx/oHQHXNIPaThwK1ttIUUkI4b1SU+Guu2DWLHN9yBBzxKFKlf+tkkYav+sVjM9cxjKWcjR4Odon8+RzFFXFpyAKa24tAnOjCM6JIjw3ioi8KGIKa9GwKIo4Sy0uaVODsDA3b58XkMaCIXlCiEpEa5g0yXR1OHwYfH3hqadg5EgINGMnpR7UjF+XxJSitWypuZbc+muoVX01cZYDxAGxQCZwEDjk+GsuWzhKdTQ1gQighvlrrwnJLQnfEU/8sQb0rW5hUBzERHpkD7iV5AlD8oQQlUhREbz/Pjz7LGRnQ0iIOdP1vvtM97jTVl23Dmavhp9yjrEpegO12v1FizpryCkM43huFOkZdUg/VpeMw/WwH6lFUGYNQgp8CdVmrO1wH4j0hV5R0Cfee2ZlcxcpKgkhvJvW8OWX8OCDkJMDjRrBd99Bhw4lrm7Dxj/8gz/+RBJJAG6Y1aESk8aCIXlCiEro6FEYMQI+/9xcb9rUDOrdpcu/Vk1NhT8WwqTkNJbUXEd67BbID8KSUYOArAhCciKoklODaoVVqW61UMMPIgIgKghqh0BMVWjfBoKD3byNXkDyhCF5QohKKCkJHnoIfvjBXO/Y0bQrWrQ440PsdkhMNIWm4GCoVg3Cw83fatX+d+xCFCNFJSFE5bB1KwwebL7hrVZ46SV4/HGwVPLBKjxMGguG5AkhKrHFi80kD4mJ5tDwo4/C//3ff/7yT08Hf39pHJSG5AlD8oQQldjs2eYspaQkc3brs8+agxK+vp6O7JxQ2jwhrTYhhGc1awbLlsHDD5vzVJ96Ci69FFJSPB2ZEEIIT+rWDdasMQ0EpeDNN6FtW1iy5IwPqVpVCkpCCHHe6NcPNm0yYy0VFsLo0abXw9q1no7svCJFJSGE5/n7w9tvw08/QY0a8Ntv0Lo1zJ3r6ciEEEJ4UkAAvPKKOfgQFwfbt0P37mbcpZwcT0cnhBDC06pUMTPD/fEHNGwI69ebwtIzz5hZRYXLSVFJCOE9rrgCNmyAPn0gLc1cf/hhSQhCCHG+69DBnLU0cqTpHv3OO9CmDSxa5OnIhBBCeINevUw74qGHzABKL70E8fFm1jjhUlJUEkJ4l1q1YP58eO01M8bSu+9C585m7CUhhBDnL39/00hYtgxatoSdO6FnT9OAyM72dHRCCCE8LTjYHHRYtAiaNDFj8nXtasZrlbNbXUaKSkII72OxwJNPmnEzGjUyg3i3b29mdajEkwsIIYRwgoQEWLXKdG2wWOC990yX6QULPB2ZEEIIb3DBBab98NRT5vqbb5qzWxcu9Gxc5ygpKgkhvFfHjqa7w003maMLt98O114LycmejkwIIYQn+fubmeBWrDAFpV274MIL4YEHICvL09EJIYTwtMBAePXVf5/dev/9kJHh6ejOKVJUEkJ4typVYMIE+PZbCA2FmTOheXNzZNpm83R0QgghPKldO1i50sz4Y7XCBx+YLg/ffmvG1BBCCHF+69ABVq+G554zeeLDD6FxY9MDQvKEU0hRSQhROdx8M2zZAtdcY45CP/SQGWtpzRpPR+Y6Bw7IOCFCCHE2fn7wwgumuNShA6SkwLBh0KWLDNAqhBDC5Innnzddp7t2hUOHTA+Ijh3NcBuiQtxWVFJKXaaU2qaU2qmUGlHC/T2UUmuUUkVKqQHuiksIUYlER8OMGeZspehokxg6dIBHH6383R2ys00/77FjYcAAiImBOnXgl188HZnbSJ4QQlRI27amm8PXX0NUlOka17kzDB1qivTnktRU+PlneOUVOHrU09G4jeQJIUSFtGkDixfDxInmd/bq1dCtG9x447k7vEZBgctfwi1FJaWUD/Ah0BeIAwYrpeJOW20fcAswyR0xCSEqsf79zVlLjzxirr/9NsTFwaxZno2rtOx22LzZnHZ7112mIRQWZvp5P/kkTJ9uEluVKpCW5ulo3ULyhBDCKSwWc5bS9u3w9NPm6Ngtkl4AABYrSURBVPT48aZL3MsvQ16epyMsG7vdjAMydSqMGgWXX25mSa1VC/r2hZEjzQGW84DkCSGEUyhlikjbtsGzz0JAAHz3HTRtasbqy831dITld+gQ/PorvPGG6eXRujWEhLj84IPVpc9+Ukdgp9Z6F4BSajLQH9hyYgWt9R7HfdKxUQhxdqGh8NZbMGQIDB9ujjT0728G8n7vPXP0wVukp8Pff5vTa5ctM100MjNPXcfHxxSXOnU6uTRrZhpI5wfJE0II5wkNNUWkO+4wU0n/8IMpynz+ufmxfe21pmHhTQoLzQGHtWvNrEUn/p6eL8AcdGjbFuLjTYHp/CB5QgjhPMHBMGYM3HabOag7daoZn++LL072HPC2PHFCUZEpiq1ff+qSmvrvdZUy63bp4rJw3FVUqgMkFbueDHQqzxMppYYDwwHq1q1b8ciEEJVbu3ZmzIwPPzQNhhkzTIX+pZfg3ntNscbdkpLMqbWLF8OiRbBpE2h96joxMacWkNq3h6Ag98fqPSRPCCGcr2FDkxf++MOMxbdpk2koXHghvPuuOYrrKUeOwNKlZvn7b9NdLyfn3+vVqmWKR8WXBg28t7HjOpInhBDOV78+fP89LFhg8sT69TBwoOlB8M47poDvSScOOKxcaZY1a0wuy8//97ohIaaLX/GlZUtTQHMhdxWVSsp6uoTbzkpr/RnwGUBCQkK5nkMIcY7x8YEHHzSDeD/4oBlz6cEHzew/994LCQlmxjirC77y7HZITDTFoxOFpL17T13Hz8+M/dStmxnfo1On8+nIcmlJnhBCuE7v3ubMn88/N90d/vrLFGfuvBNuuME0GqpWdd3rn8gVJwpIf/9tjhyfrlEjc5DhRPGobVuIjHRdXJWL5AkhhOv07Gl6PowbZw5UL1hgvo9vuw2uvtpcjopybQx2O+zYcbKAtHKlyV0ldd2uX//fBaQGDTzSy8FdRaVkIKbY9WjgHBsxUQjhcTExpovDzJnwwANmnInbbjP3BQWZH+gJCSeXJk1K/8Wbl2eKRbt2mWX3bti61TQQTu+nHBYGF1xgikjdu5vXCghw7raeeyRPCCFcy2qFe+6BQYPMbHEffgiffmoWMGc1tWtnlvh487dmzbK9RmGhGRQ8KcksO3aYPLFsmekKXVxAgJl5qGtX0y2hSxeIiHDOtp6bJE8IIVzLx8eMdzpwoOka98EHpsg0bpy5v3ZtU1xKSDB/y1tostnMuKkpKfDPPycLSKtWQUbGv9ePjTUHqDt0MK/Zpo1pb3gJdxWVVgKNlVINgP3ADcCNbnptIcT55uqr4aKLTAJYutR8Qe/ebcY0Kj5taGioaTScKDK1bg3Hjp0sHJ0oHu3aBfv3n/n16tQxxaPu3U0hqUULz3S7q9wkTwgh3KNaNdP17a67TINh1SrYsOHk9/60aSfXrV371EJTs2bmQMKJolFy8qmXU1P/3d35hDp1zAGHrl3N0qaNOZNVlJbkCSGEe4SHm4mAhg83YyytWmW6nR04YJbZs0+uW7v2qUWmJk1M9+aUlJNLauqp1w8dMoWlktSpc7KA1KGDee7wcPdsdzkpfabE5+wXUupy4B3AB/hSa/2SUmoMsEprPUsp1QH4AQgH8oBUrXWL/3rOhIQEveo8mfFCCFFBR46YU1pXrTq5JCWd/XEn+PhAvXrmtNKGDU8unTpB3bpeN7aFUmq11jrB03GUheQJIYTHFBaa7mlr15qGw9q1ZsnKKtvzKGW6N8fEQHS0yRsnzkaKiTn7491I8oQheUIIUSonZuM80Z5Yvdrki5ImUyiNGjXMWU4xMaYYdaKI5EVDZJQ2T7itqOQKkgSEEBVy8OCpRabNm03Xg4YNTy0eNWhgvvBdMSaTi1TGxoIrSJ4QQpTbiQZE8ULTjh0mT0RHm7xwonh04nKtWuDr6+nIS0XyhCF5QghRbsULTSeKTXv2mDxRq9bJJSrq1OuRkZXiTNXS5onK00ISQghni4yEK64wixBCCFGcxWK6MTRpYsZhEkIIIYornicGD/Z0NB7j/qHBhRBCCCGEEEIIIUSlJ0UlIYQQQgghhBBCCFFmUlQSQgghhBBCCCGEEGUmRSUhhBBCCCGEEEIIUWZSVBJCCCGEEEIIIYQQZSZFJSGEEEIIIYQQQghRZlJUEkIIIYQQQgghhBBlJkUlIYQQQgghhBBCCFFmUlQSQgghhBBCCCGEEGWmtNaejqHclFKHgb3lfHgNIM2J4TibxFd+3hwbSHwV5c3xeVNs9bTWEZ4OwtPKkCe86X93Jt4eo7fHB94fo7fHBxKjM3hLfJInqHB7wp285X3jjWTflEz2S8lkv5zZ6fumVHmiUheVKkIptUprneDpOM5E4is/b44NJL6K8ub4vDk28d8qw//O22P09vjA+2P09vhAYnQGb49PeCd535yZ7JuSyX4pmeyXMyvvvpHub0IIIYQQQgghhBCizKSoJIQQQgghhBBCCCHK7HwuKn3m6QDOQuIrP2+ODSS+ivLm+Lw5NvHfKsP/zttj9Pb4wPtj9Pb4QGJ0Bm+PT3gned+cmeybksl+KZnslzMr1745b8dUEkIIIYQQQgghhBDldz6fqSSEEEIIIYQQQgghykmKSkIIIYQQQgghhBCizM6bopJS6nql1GallF0pVeI0eUqpGKXUn0qpRMe6D3lTfI71LlNKbVNK7VRKjXBjfNWUUr8qpXY4/oafYb3XHduRqJR6TymlvCi2ukqpXxyxbVFK1Xd1bGWJz7FuFaXUfqXUB+6IrbTxKaXaKqWWOv63G5RSg1wc03++z5VS/kqpKY77l7vrf1mG+B51vMc2KKV+V0rVc2d8omRl+J7do5TaqJRap5RaVez2Un+WXRXff+UppdTzju+PdY7lcmfGV9oYHeuV+BlRSjVwfGZ3OD7Dfi6IsTTfab2K7ad1Sqk8pdTVjvu+VkrtLnZfW3fH51jPViyGWcVu95Z9eMa84Kp9WJHcoJR62nH7NqXUpc6Ip5wxnjE/nOl/Ls5PFc1Z57KK5qJzVUXzy7mmIjnjXFaK/XKLUupwsffIHWd9Uq31ebEAzYGmwF9AwhnWqQW0c1wOBbYDcV4Unw/wD9AQ8APWuzG+14ERjssjgNdKWKcrsMQRpw+wFLjQG2Jz3PcXcLHjcggQ5C37rti67wKTgA/cEVsZ/rdNgMaOy7WBFKCqi+I56/scuBf4xHH5BmCKG/dXaeLrdeL9Bdzjzvhk+c//3Vm/Zx3r7QFqlHB7qT/Lrorvv/IU8DzwuKf34X99RoDvgRsclz8B7nFBjGX6PwHVgKPFPrNfAwNcuA9Lm7OyznC7V+zD/8oLrtiHFckNQJxjfX+ggeN5fFyw3yqUH870P5fl/FxK833rWG8PJeSsc3mpaC46V5eK5pdzaalIzjiXl1Lul1soY1v0vDlTSWudqLXedpZ1UrTWaxyXM4FEoI63xAd0BHZqrXdprQuAyUB/10cHjtf5xnH5G+DqEtbRQADmDeoP+AIHvSE2pVQcYNVa/wqgtc7SWue4IbZSxQeglGoPRAK/uCmuE84an9Z6u9Z6h+PyAeAQEOGieErzPi8e8zTgIqVcf1ZcaePTWv9Z7P21DIh2U2ziP5Tye/a/lOqzXF7enqccr1nuXOX4jPbGfGbBBfvQoaz/pwHAPG/LCSXxpn3o5rwAFcsN/YHJWut8rfVuYKfj+dweo+QHUVpOyFnnrErQbvIUl/5OqWS8vT3hKS75XJw3RaWycpz+Fg8s92wkp6gDJBW7noz7GhORWusUMI0aoObpK2itlwJ/Yo5WpgDztdaJ3hAb5ohqulJqhlJqrVJqrFLKxw2xlSo+pZQFeBN4wk0xFVea/fc/SqmOmMLhPy6KpzTv8/+to7UuAo4D1V0Uz+nK+jm8HZjn0oiEs2ngF6XUaqXU8GK3l+mz4mpnyFP3O7rVfHmm097d4EyfkepAuuMzW/x2Zyvr/+kG4LvTbnvJsR/fVkr5eyi+AKXUKqXUMuXomoeX7sMz5AVn78OK5AZ3/X6qaH4o6X8uxNmcKWed7zzZbvKUiuSXc423tyc8pbSfi+scOXyaUirmbE9qdVZ03kAp9RsQVcJdo7TWP5bheUKA6cDDWusML4qvpMqprlhUxZ78P+Ir5eNjMaejnjjq9qtSqofWeqGnY8O817tjGmD7gCmYU/u+qGhsTorvXmCu1jrJFQVyJ8R34nlqAeOBYVpruzNiK+llSrjt9Pe5Sz8LZ1Hq11ZKDQESgJ4ujUj8j5PywAVa6wNKqZqY77Gtzvgec2J8Z8pTHwP/h3k//h+mUH2bB2I802fEaZ9bJ3+ntQLmF7v5aSAVUyT5DHgKGOOB+Oo63ocNgT+UUhuBkn6TeMM+PD0vVHgflvRSJdxW2tzgrpxR0fzwr/+51tpVB3CEF/D2nOVJ3t5u8hRX5Zdz8LvG29sTnlKabZ4NfKe1zldK3Y05m6v3fz3pOVVU0lr3qehzKKV8MT/UJ2qtZ1Q8qpOcEF8yULxSGA0cqOBz/s9/xaeUOqiUqqW1TnH8gDxUwmrXAMu01lmOx8wDOgMVTmxOiC0ZWKu13uV4zExHbE4pKjkhvi5Ad6XUvZjxnvyUUllaa6cMKuiE+FBKVQHmAM9orZc5I64zKM37/MQ6yUopKxCGGRPFHUr1OVRK9cEk+J5a63w3xXbec0YecHTlQWt9SCn1A+ZU4YVAqT4rro7vTHlKa32w2DqfAz+V5/ldmKvSgKpKKavjiGC5c5gzvtMcBgI/aK0Liz13iuNivlLqK+BxT8RX7H24Syn1F+agyHS8aB+eKS84Yx+WoCK5waW/n8oY4xnzwxn+5+daQ08U4+KcVal5e7vJU1yYX8617xpvb094yln3i9b6SLGrnwOvne1JpftbMY4+lF8AiVrrtzwdTwlWAo2VmfnFD3PKvrtG7J8FDHNcHgaUdIRgH9BTKWV1NHp6Ysb78IbYVgLhSqkT4z30Bra4ITYoRXxa65u01nW11vUxP76/dVZByRnxOd5vPzjimurieErzPi8e8wDgD621u44snDU+pVQ88Clwlda6zIUH4TlKqWClVOiJy8AlwCbH3aX5rnF1fGfMU44fkCdcw8m43a3Ez4jjM/on5jMLrtuHZfk/Dea0rm8n9qNjX1+N8/djab5zw090GVNK1QAuALZ40z78r7zgon1YkdwwC7hBmZl+GgCNgRVOiKnMMZ4pP5zpf+6CGMU55Cw563znyXaTp5Q7v7gtQvfx9vaEp5QmTxX/PXkVpWnPay8YhdwdC+YHdjKQjxk8er7j9tqYbkcA3TCnf20A1jmWy70lPsf1/2/v3mLmqso4jD9/aSkgkXAyIgINVEKpF0g8Ekm4aD1UDhoEtYVEosZDvKsSY4yJQILGxBCjxgsTkLZBSzQGajFQpA2K2CpaS1GoNSUNUDmU0hIItvh6sVfT+UoHZkq/Y59f8mb23mvvtVfXzOz369qHmU/3az+b6C7/HKv+Ox64G9jYXo9ry98F/LRNH0b3h9I/6A5O358obWvz89p7u57ul2kOn0jt61n/M4ztr78N8t5eAezq+V78DThnFNv0is853a0TF7fpI4Bb6R62ugY4faz6a8D2rWzf4z19ddtYts/o+74NkgdOp/sljHXAht7jbL/vyhi3r2+eorsFaX0ruw04aTz6sM3vN1e1/l3Tvru3AjNGoY2D5oSZwGPAG/bZ/netHx8ElgBHj3X76H5NdX37HK4HPjvR+pBXyQuj1Yf7+1wxYG6guzJoE/Aw8JGD3WdDtHG/+eHV3nPj0IxBjre8Ss6ayjFI37T5cfl/0zj2y+vKL1MtXk/OmMoxQL9c344n6+hOZJ31WnWmbShJkiRJkiQNzNvfJEmSJEmSNDQHlSRJkiRJkjQ0B5UkSZIkSZI0NAeVJEmSJEmSNDQHlSRJkiRJkjQ0B5U0YSXZnGTueLdDkjQxmSckSf2YI6Sx4aCSJEmSJEmShuagkiRJkiRJkobmoJImhSQzktyQ5PEWNySZ0VN+dZInWtnnklSSWX3qWpXkuiT3JXk+ye1Jjk+yNMmOJGuTzOxZ/7y27Ln2et4+dV2b5A9Jdia5M8kJPeXva/vZnmRdkgva8suS/GWfdi1K8us2fVOSHyX5Tav3T0nO6Fn3rCR3JdmW5OEkl/eUzU/yUNvusSRfbctPSLK8tWVbknuTeAyQNCWYJ8wTktSPOcIcoVFUVYYxIQPYDMxt09cA9wNvBk4E7gOubWUfBrYCc4CjgMVAAbP61LsK+BdwBnAM8BDwCDAXmAbcDNzY1j0OeBa4spV9us0f31PXJuBM4Mg2/51WdjLwDDCfbgB3Xps/EZgBbANm97Trr8ClbfqmVv6ett+lwM9b2RuBLcBVrexc4GlgTit/Aji/TR8LnNumrwd+AkxvcT6Q8X6fDcMwDjTME+YJwzCMfmGOMEcYYxOOLGqyWAhcU1VPVtVTwLfpDs4Al9MduDdU1Qut7LXcWFWbquo54A5gU1WtrKrdwK3AO9t6HwU2VtXiqtpdVbcA/wQu2qeuR6rqRWAZcE5bfgWwoqpWVNX/quou4M/A/Kp6CfhFW4ckc4CZwPKeen9VVWtam5b21HshsLmqbmxtegD4JfCJVr4LODvJm6rq2Va+Z/lJwGlVtauq7q2qGqCvJGkyME+YJySpH3OEOUKjxEElTRZvBR7tmX+0LdtTtqWnrHe6n//0TL+4n/mj++x3z75P7pnf2jP9Qs+2pwGXtUtEtyfZDnyA7mAM8DNgQZLQJbVlLUEMUu9796l3IfCWVn4p3RmNR5OsTvL+tvx7dGdV7kzy7yRfR5KmDvOEeUKS+jFHmCM0SqaNdwOkAT1OdwDc0OZPbcugu0TzbT3rnjIK++11KvDbAbbdAiyuqs/vr7Cq7k/yX7pLRxe0GMQWYHVVzetT71rgkiTTga/QnfE4pap2AouARe1sxj1J1lbV3QPuV5ImMvPEyHrNE5K0lzliZL3mCB00XqmkyeIW4JtJTmwPr/sWsKSVLQOuSjI7yVGt7GBZAZyZZEGSaUk+CZzNyEtL+1kCXJTkQ0kOS3JEkguS9Catm4EfArur6vcDtml5a9OVSaa3eHf79x+eZGGSY6pqF7ADeBkgyYVJZrWzGXuWvzzgPiVpojNP7GWekKSRzBF7mSN0UDmopMniOrp7iP8OrAceaMuoqjuAHwD30F2S+ce2zUuvrGY4VfUM3X3Hi+gejHc1cGFVPT3AtluAS4BvAE/RnRX4GiO/d4uBd7TXQdu0E/gg8Cm6sx9bge/SPbAPustfNyfZAXyRdq818HZgJfA8XR/9uKpWDbpfSZrgzBN76zVPSNJI5oi99ZojdFDFZ2tpqkkyG3gQmNEeTDdhJTkSeJLuVxU2jnd7JOlQYJ6QJPVjjpCG45VKmhKSfLxdrnks3Uj77RM9CTRfAtaaBCRpdJknJEn9mCOkA+eDujVVfAG4ie6+3tXAl8e1NQNIshkI8LFxbookHQrME5KkfswR0gHy9jdJkiRJkiQNzdvfJEmSJEmSNDQHlSRJkiRJkjQ0B5UkSZIkSZI0NAeVJEmSJEmSNDQHlSRJkiRJkjS0/wMzpmJNh+h2mAAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ "<matplotlib.figure.Figure at 0x1bf5e6a7b38>"
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "T=1 week: [0.00358235 0.00276278 0.00299642 0.00420163 0.00385517]\n",
+ "T=1 month: [0.00309864 0.00467375 0.0053564 0.00473218 0.00695323]\n",
+ "T=2 months: [0.00405539 0.00635089 0.00496945 0.00623992 0.00868845]\n",
+ "T=3 months: [0.00511969 0.00558735 0.00561834 0.0090895 0.00732372]\n",
+ "T=6 months: [0.00799324 0.00743358 0.00786481 0.00704351 0.00927781]\n",
+ "T=9 months: [0.00802688 0.00870764 0.00731743 0.0061944 0.01133724]\n",
+ "T=1 year: [0.00874137 0.00945326 0.005941 0.01162977 0.01082401]\n",
+ "T=1.5 years: [0.00871327 0.00948823 0.00815824 0.00953475 0.01558524]\n",
+ "T=2 years: [0.01055309 0.01022309 0.01102695 0.01279537 0.01229152]\n"
+ ]
+ }
+ ],
+ "source": [
+ "n=[10,20,50,100,500]\n",
+ "n=np.asarray(n)\n",
+ "r=1+10*pow(n,-0.9)\n",
+ "maturities=[7/365,1/12,1/6,1/4,1/2,3/4,1,1.5,2]\n",
+ "titles=['1 week','1 month','2 months','3 months','6 months','9 months','1 year','1.5 years','2 years']\n",
+ "colors=['b','#00FFFF','#00FF00','#FFFF00','#FF8C00']\n",
+ "fig,ax=plt.subplots(3,3,figsize=(20,15))\n",
+ "MSE=np.zeros(shape=(9,5))\n",
+ "for ix in range(3):\n",
+ " for iy in range(3):\n",
+ " i_row=ix*3+iy\n",
+ " log_moneyness=np.asarray(data_log_moneyness.row_values(i_row))\n",
+ " impvol_rough=np.asarray(data_impvol.row_values(i_row))\n",
+ " strikes=S0*np.exp(log_moneyness)\n",
+ " plt.subplot(3,3,i_row+1)\n",
+ " for i_n in range(5):\n",
+ " LHM=LF.LiftedHestonMc(vov=ν, v0=V0, rho=ρ, theta=θ, lamda=λ, H=H,n=n[i_n],rn=r[i_n])\n",
+ " LHM.set_mc_params(n_path=1000, rn_seed=1234)\n",
+ " prices_lifted=LHM.mc_price(strike=strikes, spot=S0, texp=maturities[i_row], N=300, cp=1)\n",
+ " BSM_model=pf.Bsm(0.3)\n",
+ " impvol_lifted=BSM_model._impvol_newton(prices_lifted,strikes,S0,maturities[i_row]) \n",
+ " plt.plot(log_moneyness,impvol_lifted,color=colors[i_n])\n",
+ " MSE[i_row,i_n]=np.mean(np.power(impvol_lifted-impvol_rough,2))\n",
+ " plt.plot(log_moneyness,impvol_rough,color='r',linewidth=2)\n",
+ " plt.legend(['n=10','n=20','n=50','n=100','n=500'])\n",
+ " plt.xlabel('log moneyness',fontsize=12)\n",
+ " plt.ylabel('implied volatility',fontsize=12)\n",
+ " plt.title('T='+titles[i_row],fontsize=12)\n",
+ "plt.show()\n",
+ "for i in range(9):\n",
+ " print(\"T=\"+titles[i]+\": \",MSE[i,:])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The result in the paper (Figure 2) is shown below:"
+ ]
+ },
+ {
+ "attachments": {
+ "figure2.JPG": {
+ "image/jpeg": "/9j/4AAQSkZJRgABAQEAYABgAAD/4RDcRXhpZgAATU0AKgAAAAgABAE7AAIAAAAGAAAISodpAAQAAAABAAAIUJydAAEAAAAMAAAQyOocAAcAAAgMAAAAPgAAAAAc6gAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGFwcGxlAAAFkAMAAgAAABQAABCekAQAAgAAABQAABCykpEAAgAAAAMzNwAAkpIAAgAAAAMzNwAA6hwABwAACAwAAAiSAAAAABzqAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMjAyMjowNDoyMiAwMzo1MzoyOQAyMDIyOjA0OjIyIDAzOjUzOjI5AAAAYQBwAHAAbABlAAAA/+ELGGh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8APD94cGFja2V0IGJlZ2luPSfvu78nIGlkPSdXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQnPz4NCjx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iPjxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+PHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9InV1aWQ6ZmFmNWJkZDUtYmEzZC0xMWRhLWFkMzEtZDMzZDc1MTgyZjFiIiB4bWxuczpkYz0iaHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8iLz48cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0idXVpZDpmYWY1YmRkNS1iYTNkLTExZGEtYWQzMS1kMzNkNzUxODJmMWIiIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyI+PHhtcDpDcmVhdGVEYXRlPjIwMjItMDQtMjJUMDM6NTM6MjkuMzY3PC94bXA6Q3JlYXRlRGF0ZT48L3JkZjpEZXNjcmlwdGlvbj48cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0idXVpZDpmYWY1YmRkNS1iYTNkLTExZGEtYWQzMS1kMzNkNzUxODJmMWIiIHhtbG5zOmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyI+PGRjOmNyZWF0b3I+PHJkZjpTZXEgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj48cmRmOmxpPmFwcGxlPC9yZGY6bGk+PC9yZGY6U2VxPg0KCQkJPC9kYzpjcmVhdG9yPjwvcmRmOkRlc2NyaXB0aW9uPjwvcmRmOlJERj48L3g6eG1wbWV0YT4NCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgPD94cGFja2V0IGVuZD0ndyc/Pv/bAEMABwUFBgUEBwYFBggHBwgKEQsKCQkKFQ8QDBEYFRoZGBUYFxseJyEbHSUdFxgiLiIlKCkrLCsaIC8zLyoyJyorKv/bAEMBBwgICgkKFAsLFCocGBwqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKv/AABEIAVEDGQMBIgACEQEDEQH/xAAfAAABBQEBAQEBAQAAAAAAAAAAAQIDBAUGBwgJCgv/xAC1EAACAQMDAgQDBQUEBAAAAX0BAgMABBEFEiExQQYTUWEHInEUMoGRoQgjQrHBFVLR8CQzYnKCCQoWFxgZGiUmJygpKjQ1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4eLj5OXm5+jp6vHy8/T19vf4+fr/xAAfAQADAQEBAQEBAQEBAAAAAAAAAQIDBAUGBwgJCgv/xAC1EQACAQIEBAMEBwUEBAABAncAAQIDEQQFITEGEkFRB2FxEyIygQgUQpGhscEJIzNS8BVictEKFiQ04SXxFxgZGiYnKCkqNTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqCg4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2dri4+Tl5ufo6ery8/T19vf4+fr/2gAMAwEAAhEDEQA/APpGiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiqt7qlhpvl/2jfW1p5hwnnzKm8+2TzRd6pp9hLFHfX1tbSTHEaTTKhf6AnnrQBaoopCwBAJAJ4HvQAtFFFABRUFtfWl5bmezuoZ4QSDJFIGUEdRkcVPQAUUVC13bJdpavcRLcSKXSEuA7KOpC9SKAJqKKKACiiigAoqGa9tbeeGC4uYYpZyRFG8gVpCOoUHr+FTUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUVUtNV07UFlNhf2t0If9YYZlfZ9cHjoadZanYamjPpt7b3aocM0EqyBT6HB4oAs0UUgIZQVIIIyCO9AC0UVDNe2ttNDDcXMMUs5KxI8gVpD6KD1/CgCaikDA5wQcHBx2paACio7i5gtLd57uaOCGMZeSRgqqPcngU9HWRFdGDKwyrA5BHrQAtFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQB4Z+0ZpDa9f+E9Oiz5szXhjx/fWIMv6gV554k8QN8QNY8Pa9uLRaQNLtG54+0SuWk/H5MV9Ma54Q0/xBr+iavey3CT6LK8tukTKEcuu0hwVJIx6EVzFn8EPC9hosmmWs+opDJqqaqW82Pd5ifdTOzGwemM+9FP3Wm+9/6+Tf3IJ+8rLtb9fzS+85n4n/EPxBofiDVIdB8SW8X9nWyyrp1npL3khOMk3EhAWJfoTx1qiNR8R+IfjN4J1KPVYbVb3QheiD7PuSNSoMqAFurc4bqPeu51v4P6RrevatqLatrFlFrUYTULKzuQkVwQuAx+Un8M4P0yKmf4Vact/wCG7yz1fVLW48P2wtIZI5E3TwjHySfJg5xg4A4NKK0V9/8AgNf5BPW9v61X/BOL/wCFn+Jf+FFjxP8AaYP7TOr/AGTf5C7fL87Zjb0zjvWrL4h8ca58YNY8OaHqdjZaXpaWtxM01sHdkZQWRT6nJ5PTAq1J8B9BltbmxbWtdXTJbv7ZFYJdKIoJd24lQUOfTnPB9ea7DTvB2n6Z4w1bxJBNctearDFDOjspjURjA2gLkH1yTT7X/rRfrcJbu39a/wCR8/6le6pf/AjTbjT57bTY28StDLDb24VZD5uEJAI6EZI713WpfEfxB4IvPFWleKbyC9u7PSorzSp0txEJ2b92RtHX94Rx6V08vwf0N/AB8Jx3uox2wvTfR3PmJ50cu7dkHaBjPbFcr4m8D6j4v+JXhaxudIvpNO8OnN3rd9Ih+3INrKoC/eJYc8DvQtfd7/lypX+TQ3u5dv8A5Jv8Voep+GDqj+FdNfxBIsmpyW6PcsqBAHIyQAOmM4/CvJdI0PVNG/aX099d1qXV7280i4meRkCJEu/Cxoo6AAficmvb6wZ/CFhcePLXxY81yL+1s2s0jDL5RRjkkjbnP4/hTv8AvOb1/FMm3ucvp+aMj4j+JdT8JJoWqWkqLph1OO31NXQH9zJ8oYHtg4/OvMYfjZ4kurTXYYhCt7eXsKeHcwj54pJ3jyR/FjYete2eKfDVj4v8M3uhat5gtLxNjtEQHXBBBUkEAggdjXNwfCDwzb6p4Zv4/tfm+GoPItAXTbIBkgyfLyQSTxjk0o7+9tf8P6X4sqW2m/6/0/wRw3if4l+KF8W67pmj6xBp48PwxqsLaU902pz7NzAsoxGD0HT/AAdrPxX13Vda0rS9Mvk8LmTR01G6lm0yS7cytwIQgBKjPViK7fxF8J9N17XrzVbfWNX0eTUY1i1GPTrgIl2q8DcCpwccZHan658K9L1S6sbzTNT1TQr6ytPsS3WnThXkg/uPuB3eueuaWvKr7/8AAf62/wAu5pfT+tv+D/W3k3iz4h30lt8OPFms6VN/aFrcXiy2kUTIZZFUKNqnkBjg+2a9j+GWrap4h8D2mt61qVvezagDMq2sQSO3XP8Aq/UkdCT3pkPww0OB/DTRTXw/4Rt5Htd0wbzWcYYyllJb14IrR8KeDtP8GxX8Gjz3X2W8umuhbSupjt2b7wjAUEKfQk1ask1/X9P8LeZLu7f13/L+tjoKKKKkYUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAU1/9W30NOoIypB70pK6aBHx94T1+XwLo2uXEbMB4k066it+et1HOYwB77ZM11XgO7uvh74L8U2VjrFlpFzDrkVobm7geds+WNwjiUEu5IOB0r1I/BDwu+maPYyzahJHo9+99AzSx7nZ2DMj/JymQOBg+9P1P4NaFqIv5E1DVLW7u9VGrJdQzIHt5wMfJ8v3cdjk+9NaL5Jfc0/8/uQ3rK/m3+DX+X3s8v1Txx4q8R/DLx5p95qsjNo/kMt3Npps5p4ZM5Qx5+Qngg+n1rufB/iLxFY+NfC/hXUtQgu7W48PG8kZLYRksGwg6k8Lge+M1swfB3RIrDxDbT6lq94fEUCRX0tzcK7sydJAdvDZPuPYVHJ8H7OT+xpx4l16LUdJha2S/inRZpIWOfLY7MYHQYHFNaP7vya/OzJeq/ryf+aOMuvin4uHw/k1OyntX1E+Km0qLzIBsMXIVSB74561peKD4l03XvANhr19p2oazdXt0v28WS4hOz5Ci8Yxxn1rpbf4N+Hbbw7DosV3qf2WHVxq6M0yM/mjopJTlf1966LXfB9h4h17Q9XvZrlLjRJnmt1iZQjswwQ4KkkcdiKlLb1X5L9bjfX5/m7fhY8K8FeIvF3hP4dHxKdVivNKj8QmPUIHtgXEZk2ySbySeSVx6V614K8U6n4q8a+KHSaM+H9NnSyswqDMkoXMjbupAPH41zuv+BD4N8D+INL8O2OseJIdfaRE04yx+XZu4Y7xwCBuxzkngfWut+F/hFvBPw703R7gKLtUMt0VOcyuctz3x0z7VUdteiX32X5W/EUt9Orf3X/W/wCBx37Quh6pqHgO/wBQGtS22k2Nsrvp8SAfaZjIAC7ddoB+7616VpKSyeD7JLaTypmsYxHIRna3ljBx35pnivwzZ+MPC95oOpyTxWt4oWR7dgrgBgeCQR29K0LeBdP02K3gDyLbwhEBI3MFGB+PFStIyT6/8Ep6uL9f0PG9N+KHiTVdL8PaHFJDF4puNbl0/UT5IIjihOZHC9B8pX9ayr34veKZ7/WNV0m+hFtpmom1g0EaVJK13EjBXczqMK3Uge359J8P/CF7e/F3X/Hmq6BPocVwgis7W6ZTIXIAklIUkDO39TW3e/BzR7rWbq6h1fWLKwvroXd5pVtchLeeUHO4jG4ZI5ANNX91v5+un6J/eJ21S/rf/NL5HI/Eb4k6/pmrXx8P+IobcWdmlwumWukPeSZK7j9okICwjHoScdRVi78b+OfEXijw3o3he9sNNbWPD8epTST2/mCJ85Yr19hg+tdPrPwd0fWNc1W+/tbWLGDWIlS/srO5CRXBVdqk/KTwO2cH6ZFaej/DjSdF13SdWt7u+luNK0saXAJnQq0QOQWAQZb3BA9qI26/1o/1sDv0/rb/AIJ1cKyJbxrM/mSBQHcDG445OKfRRQGwUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFQ/aof7/wChqasigDSW5iZgFbJPTg0n2qH+/wDoapQ/69P94VHQBo/aof7/AOho+1Q/3/0NZ1FAGj9qh/v/AKGlFzEzAB+ScDg1m0+L/XJ/vCgC99qh/v8A6Gj7VD/f/Q1nUUAaP2qH+/8AoaPtUP8Af/Q1nUUAaQuYiQA/J9jQbqEHl/0NUI/9an+8Ka33j9aAND7VD/f/AENH2qH+/wDoazqKANH7VD/f/Q0ouoScB/0NZtOT76/WgDQN1CDgv+hpPtUP9/8AQ1Qb77fWm0AaP2qH+/8AoaPtUP8Af/Q1nUUAaQuoSeH/AENBuYgSC/I9jWcv3h9adJ/rX/3jQBe+1Q/3/wBDR9qh/v8A6Gs6igDR+1Q/3/0NL9qh/v8A6Gs2lHWgDRNzErEF+R14NJ9qh/v/AKGqMv8Arn/3jTKANH7VD/f/AENH2qH+/wDoazqKANH7VD/f/Q0rXMSsQWwR14NZtSTf69/940AXftUP9/8AQ0faof7/AOhrOooA0ftUP9/9DR9qh/v/AKGs6igDSNzEpIL8j2NJ9qh/v/oaoy/61vrTKANH7VD/AH/0NH2qH+/+hrOooA0ftUP9/wDQ1KCGUEdCMismtSH/AFKf7o/lQA+iiigAooooAKZJKkWN5xnpxT6qX3/LP8f6UAS/aof7/wChpftMW3du4zjoazak/wCXf/gX9KALv2qH+/8AoaPtUP8Af/Q1nUUAaP2qH+/+ho+1Q/3/ANDWdRQBpC5iPR+2ehpPtUP9/wDQ1Rj6t/un+VMoA0ftUP8Af/Q0faof7/6Gs6igDR+1Q/3/ANDS/aof7/6Gs2lH3h9aANE3MSsQX5BweDSfaof7/wChqjL/AK5/940ygDR+1Q/3/wBDR9qh/v8A6Gs6igDR+1Q/3/0NKbmIAZfqMjg1m09/ux/7v9TQBe+1Q/3/ANDR9qh/v/oazqKANH7VD/f/AENH2qH+/wDoazqKANL7TEVJ3cDrwaT7VD/f/Q1SX/UP/vD+tR0AaP2qH+/+ho+1Q/3/ANDWdRQBo/aof7/6GlFzETgP+hrNqvcX32SaJFheZ5MhVTr0/wDr0AbP2qH+/wDoaPtUP9/9DWF9su/+gXc/98n/AAo+2Xf/AEC7n/vk/wCFAG79qh/v/oaPtUP9/wDQ1hfbLv8A6Bdz/wB8n/Cj7Zd/9Au5/wC+T/hQBu/aof7/AOhpWuYlYgtgj2NYP2y7/wCgXc/98n/Cle+u3kLf2XcjP+yf8KANz7VD/f8A0NH2qH+/+hrC+2Xf/QLuf++T/hR9su/+gXc/98n/AAoA3ftUP9/9DR9qh/v/AKGsL7Zd/wDQLuf++T/hR9su/wDoF3P/AHyf8KAN43MQAJfg9ODSfaof7/6GsM3t2VUf2Xc8f7J/wpPtl3/0C7n/AL5P+FAG79qh/v8A6Gj7VD/f/Q1gSX9xFGXk064VF5LFSAP0q5FIJYUkUYDqGAPvQBrRypLnYc468U+qlj/y0/D+tW6ACiiigAooooAKKKKACsvYP+ei/r/hWpWRQBFqEslnpl1c28kfmwwvIm8MRuCkjIAyRn0rzHw18YomZLXxbAts33RfW6loj/vL1X9foK9WiIWZSxwAckntXJeIfAOheJ4zcCNbe5kGRdW2Pnz3YdG/n71pHl2kb0qsIXjOHMn8mvR/o0dPaXFrf2qXNldwXEEgykkTblYexFTbB/z0X9f8K8IuvDPjD4dXT3uizu9pnc7QAvC4/wBuM/d+vbsa63wz8YNN1FktfEUY0u6PAlzmBz/vfw/jx70SptK61Rt9WVVc2GfN5bSXy6+quj0rYP8Anov6/wCFOjQCVPnU/MPWoI5EljWSJ1dGGVZTkEeoNSxf65P94VmcQbB/z0X9f8KNg/56L+v+FMooAfsH/PRf1/wo2D/nov6/4UyigCWNB5i/OvUetIyDcf3i9ff/AApI/wDWp/vCmt94/WgB2wf89F/X/CjYP+ei/r/hTKKAH7B/z0X9f8KVUG8fOvX3qOnJ99frQA5kG4/vF6+9JsH/AD0X9f8ACkb77fWm0AP2D/nov6/4UbB/z0X9f8KZRQBIqDcP3i9ff/ClkQeY3zr1PrUa/eH1p0n+tf8A3jQAbB/z0X9f8KNg/wCei/r/AIUyigB+wf8APRf1/wAKNg/56L+v+FMpR1oAkkQea/zr94+tN2D/AJ6L+v8AhRL/AK5/940ygB+wf89F/X/CjYP+ei/r/hTKKAH7B/z0X9f8KfMgMz/Oo+Y+tQ1JN/r3/wB40AJsH/PRf1/wo2D/AJ6L+v8AhTKKAH7B/wA9F/X/AAo2D/nov6/4UyigCWRAZG+dRz703YP+ei/r/hRL/rW+tMoAfsH/AD0X9f8ACjYP+ei/r/hTKKAH7B/z0X9f8K0Yv9Sn+6P5Vl1qQ/6lP90fyoAfRRRQAUUUUAFVL7/ln+P9Kt1Uvv8Aln+P9KAKdebfFqXUraXR7rSL+4tJoTKQYnwv8PUd/wAeK9JrP1uKSawVDYRX1sWPmxt97pwV/WtKVudcyGqlSl79N2a8r/gebeGvjGYmSz8ZW/lN0F9AuVb3ZR0+o/IV6lZ3trqNol1YXEdxBIMrJEwZT+IrzDV/AVjqqu+hSYk/is5+GH0J4P8AnmuJtx4h8D6k76TNNaOD+9tpQTHJ9VP8+voa6ZYdS1p/cbRxGHxGlRezl3+w/wBY/ivQ+jKK898K/FvTNXdLPXVGk354y5/cyH2Y/d+h/M16ECCAQcg9CK42mnZirUKlF2mt9uz9Hsx8fVv90/yplPj6t/un+VMpGIUUUUAFKPvD60lKPvD60AOl/wBc/wDvGmU+X/XP/vGmUAFFFFABT3+7H/u/1NMp7/dj/wB3+poAZRRRQAUUUUASL/qH/wB4f1qOpF/1D/7w/rUdABRRRQAVUk/5Dunf7zfyq3VST/kO6d/vN/KgDoqKindkxtOKh86T+9QBboqqkrl1BbvSXGq6faTiG7v7aCUgERyzKrEfQmgC3RRRQAUUySeKJ40llRGkbagZgC564HqaJp4rePfcSpEmQu52CjJ4A5oAfRTEnikmkiSVGkjxvQMCVz0yO2afQAUUZBJGeR1FFAFLWf8AkDXX/XM1n2X/AB4Qf9c1/lWhrP8AyBrr/rmaz7L/AI8IP+ua/wAqANKx/wCWn4f1q3VSx/5afh/WrdABRRRQAUm4bgMjJ6ClrxvxAynxZq988hGs2usWcNiN5DCIgZCjPQ5OaFrJL+t0v1B6Js9jJCgliAB1JoLADJIA9Sa5bxgdD1bSora/vbZ83BSGOWcrBJOoOElK9h6HFebWhs7/AE3Q9O1qUtp1tbX4/eS/umnQ8FGzyAD8tK+47bHudZFZPhq416b4c6JNYLazXzQJ5hvpHUFMHByoJJ+7+tbI8vAyWz3wBVSVm0SndXGGCO6U28674pQUdc4yDwRxXGv4R1PQpDP4O1Jo485NhdHfE30Pb+fvXatGsytHHLJE7gqrqBlSe4+lcpnxLoJ7a5Zr3+7Mo/XP61tSvqk/k+plUtu180RWnjpLe4Wy8U2UukXR4DuN0L+4b/8AWPeoPEHw48P+J4TdWWy0nkG5Z7YAxye5XofqMGti31zw74jhNleDDtw1rdxgHPtngn6c1mT+C7nR5GufBmpy2hJy1lcfPC/88fr9RV2UX/K/wFGUl70Xe3Vbo86Nn42+F8xezc3GmBslDmSBvw6ofy/GvQPCHxS0XxDPDbXZ/szUCwHkTt8rnP8AC/Q/Q4P1qa28bLZyix8Z6dLpkzfKJgu+CT8e36+5rN8RfCrQPEsH2vw/MlpNLyhjw0En5fd+o49qUor7St59D0Y4yFfTELm81pJevSXzs/M9Aorw+08ReMvhrdJZazBJf6cDhEmbPH/TOT+hzj0Feo+GPHPh/wAVxgafdPHdAZe0mAWRfXjPI9xmsZQcRzwzUfaUnzR7rp6rdfP7zfoqT916v+Qo/der/kKg5BI/9an+8Ka33j9alj8rzFwXzkdhSN5W48v19BQBFRUn7r1f8hR+69X/ACFAEdOT76/WnfuvV/yFKvlbxgv19BQAxvvt9abUreVuOS/X0FJ+69X/ACFAEdFSfuvV/wAhR+69X/IUAMX7w+tOk/1r/wC8acvlbhy/X0FLJ5XmNkvnJ7CgCGipP3Xq/wCQo/der/kKAI6Udaf+69X/ACFH7r1f8hQAkv8Arn/3jTKmk8rzWyXzk9hTf3Xq/wCQoAjoqT916v8AkKP3Xq/5CgCOpJv9e/8AvGj916v+Qp8vl+c+S+dx6AUAQUVJ+69X/IUfuvV/yFAEdFSfuvV/yFH7r1f8hQAkv+tb60yppPL8xsl859BTf3Xq/wCQoAjoqT916v8AkKP3Xq/5CgCOtSH/AFKf7o/lWf8AuvV/yFaEX+pT/dH8qAH0UUUAFFFFABVS+/5Z/j/SrdVb1d2zkDr1P0oApVHcy3EUCtBEJU3fOM89O1T+X/tL+dP2fuMbl+96+1NaCZiTW+n6v98eXcDow+Vwf61mappcj2/k6tarqdqBhZVGJYx9etdJcafDc/6zaG7MDgiqxS9seQwuoh6H5hW0Z22/r0ZlKF9zyTXfh2l1C9xoj/bYQMmLGJY/w7/UflWJoXi7xH4IlEEbNfaepwbO4J+Qf7B6r/L2r217Cy1NvOtpfst0Odyna2fcd6wde0CG7Urr1qpJ4W/tgM/8CHf+fpXVzwqe7Nf5/wDBLo4ithlyx1g94vWP+afmvvNLwj480XxYhWxn8m8CEvaTfLIOOcf3h7j8cV0VeB+IfAF3ppF/YSedEp3R3dqxyh7Z7qfrWn4a+LOpaMyWXi2J722B2i8jH7xP94fxfofrXPUw7iuaOqO2EaOK/gO0v5Xv/wBuvZ/n6ntNFVtK1Ow1uwS80q9hurd+jxt0PoR1B9jzVzy/9pfzrmOaUXF2krMZSj7w+tO8v/aX86UR/MPmX86BCS/65/8AeNMqWVMzP8y/ePem+X/tL+dADKKf5f8AtL+dHl/7S/nQAynv92P/AHf6mjy/9pfzpzp8qfMv3fX3NAEVFP8AL/2l/Ojy/wDaX86AGUU/y/8AaX86PL/2l/OgBV/1D/7w/rUdTKn7lxuXqO/1pnl/7S/nQAyin+X/ALS/nR5f+0v50AMqpJ/yHdO/3m/lV7y/9pfzqlMu3XdO5B+Zuh9qAJ/FNpqt7o5h0HUF068LArcNEJABnkYIxyK4v/hGviN/0PFv/wCC+P8A+Jr0i4AIXLY/Codqf3/0p3OmliZ0o8qSfrFP80cj4c0XxjZa3FNr3iiLUbMAhrdbRIyT2OQM8GuQ+JHhDXdc8a69eaVptvcxJosCAXWnCb7R88m9IZG4SUKcjg846V68irvX5+/pVupauTUrSqSu0l6JLrfoeJ2Vz4sivrW10z/hI4po7i3isra4tGW2/s8QqHaViuBKDuzkhtwAAxT7TxD41l8OXMsa6rBdWfhjbJLfWzQp9uWTDkGQBWfb0PQ17TUdzbQXlrJbXcMc8EqlJIpFDK6nqCD1FN3d/wCujX639dTCNlb+ut/+B6Hg1mur+J/GVpZw3viOWwsdStnWe6jK3Fqj2k25iSuUBYgZYdxjtXTavaeJNY+B2mwXcV9LrX2633mS2ZplCXQxIyYBOFAYnjjmvRtG8P6R4dtXt9C0210+GRt7pbxBAx6ZOOvFaNPr80/usJX/AAt+f+Z4tqVh410S88XXEEt5e3E9zpwkv7azaFpbcAiXygofJUYBKhiOeM1T1XUfHcOg6LcJPrkrfvysENtMkk480eUGcRMQ+3gebGqkcnBr3WiktBnh91Hr2m+Ntad28SW2m3mpxTX0llbySusBtht8sqhziUbW2ZIAHAHNNST4jT6bdXdzPr0VxZ2NrLaRRwEee5uXB3qF+Z/K2FlHTOSK9yooWlvL/KwPX+vO5S1j/kC3P/XM1n2X/HhB/wBc1/lWhrP/ACBrr/rmao2KZ0+3+Zf9Uvf2oA0LH/lp+H9at1Vsl27+QenQ/WrVABRRRQAVSl0bTJ9Tj1GbT7aS9iGEuGiUyL9G61dooAozaHpVxZy2k+m2j280hlkiMC7Xc9WIxy3v1pk/h/R7qwhsbjS7OW1gOYoGgUpH9BjArRooAREWNFRFCqowqgYAHpWTWvWRQBJD/r0/3hUdSQ/69P8AeFR9elAGfqehadrCYvrZHbGBIOHH4isb+ytf0PnRr0ahbL/y63R+YD0Df/q+ldTRWkasoq26M5U4t32ZzMfibS9RDad4gtDZStw0F4mUb8Tx+JxVVvB13pVwLzwTqJtGYhjaTNvgk/nj9fqK6i90+01GAw31vHOno46fQ9vwrAHhzU9GmEvhe/Ozdn7FdHch+h7fp9a2jKP2XbyeqM5Rl9pX81uU18W20mdJ8caX9gkkG0mVN8EvuDzj9QPWud8RfCK3usal4Pu/Kk+/HF5nHsUkHT8fzFdU3iKxu1Om+LdN+xu3VZ03RN7hu317etVj4Vv9HP2zwRqe2FvmNlO++F/909v881VuXy/FMqlWqU5c9OWq6rRnG6R8SvEHhO8GmeNrOa5iTjziuJ0Hr6OPf9TXq+ja7pviCxF3pF3HcwnqVPKn0YdQfY1ydxrmka4o0fx1pX9n3X8JnHyZ9Uk7fy9zXJav8Odd8KXh1nwRezSoBuxEf3gXrgr0kH4fhWcqa9H+HyZ6Ma9DE/xPdl3S0/7ej09V9x7XH/rU/wB4U1vvH615l4S+MNtcXEVl4tiXT7kMF+1KD5LH/a7of0+lelrLHMolhdZI3+ZXQ5DD1BrCUXF2ZnWoVKNubZ7Nap+jFooopGAU5Pvr9abTk++v1oAG++31ptOb77fWm0AFFFFACr94fWnSf61/9401fvD606T/AFr/AO8aAGUUUUAFKOtJSjrQA6X/AFz/AO8aZT5f9c/+8aZQAUUUUAFSTf69/wDeNR1JN/r3/wB40AR0UUUAFFFFAD5f9a31plPl/wBa31plABRRRQAVqQ/6lP8AdH8qy61If9Sn+6P5UAPooooAKKKKACql9/yz/H+lW6qX3/LP8f6UAU6k/wCXf/gX9KjqT/l3/wCBf0oAjooooAr3FlDccsNr9nXg1XL3doCJl+0w92A+YD3rQoqlJ7Mlx6oxRpkNwzz6NcfZptp3R/wsPQr/AJFcdr/hCxv2ZZ4F0y8boQMwSf8AxP6j6V6HLp8U7l0JhlAJDpxVWZ5EjMOpwLPAf4wM/nXRTqtPT+v8zCdNPf8Ar/I8Il0zxD4F1b7VpUslnIeSB80cy/yYV6P4R+LOn6w6WOvqul6geAWP7mU+xP3T7H8zW9daIstqwsfLvLRuWtZuQP8AdPUH8jXnXiDwBDe720pWEq/etJeJF/3T/F/P2rZxp1ldaP8Ar7jthjXZQxa5l0f2l8/tLyf4HtNKPvD614D4e8ceIPA8ws71X1DTUO0wSnDxD/Zbt9Dx9K9m8N+KtJ8U2YuNIuhIVx5kLcSRn0Zf69PeuOpTlTdpGs6Hue1pPmh3XT1W6fr8jal/1z/7xplPl/1z/wC8aZWZzBRRRQAU9/ux/wC7/U0ynv8Adj/3f6mgBlFFFABRRRQBIv8AqH/3h/Wo6kX/AFD/AO8P61HQAUUUUAFVJP8AkO6d/vN/KrdVJP8AkO6d/vN/KgC74g1ix0LTft2qTeTbowUuELYJOBwATXL/APC0vCH/AEFG/wDAaX/4muzvoo5owk0ayLn7rLkVR/s6y/587f8A79L/AIVceS3vIh819DJ0Tx14e13VY7HS74zXD5YIYXXIHJ5KgVJrvxD0Xw7rEunX8d60kEcUs8kNsXjhSRyiszDoNwx61r29nbQzq8NtDG2fvLGAay9b8BaXr13qlxeT3aPqdtBbTCJ1AVYpC6lcqcEk85zx6Una6tsUr8rvuZ978TrGC11FotK1QS2VvNMDPalI3Mak43e+MD61g2fxW1e/0W0Npp+kz6te3jwR26XcoSFUhMreZujDK2BgDGDkHOK7GX4feFiLl7PRLPTru5jdGvrCBILhd4+ZlkUbgeetY7/CfT5LNjJrWrtqjXX2ltWMsf2gt5flY+5s2+WduNvv1qNbP0X33/yK0/P8tPxMpPi/NdT6fcWWkRnTWhsZL55JyJYjdOUQIoXDbSOckZ7V6jXEN8KNCF9p8tvPe29vZRW8T2cci+Vci3O6EyZUklSSeCM967erdund/d0JV7/Jff1CiiipGFFFFAFLWf8AkDXX/XM1n2X/AB4Qf9c1/lWhrP8AyBrr/rmaz7L/AI8IP+ua/wAqANKx/wCWn4f1q3VSx/5afh/WrdABRRRQAUUUUAFFFFABWXvX/nmv5mtSsigCQLHMfKeMbX+U4Yjg/jWIdHuLQ7tKvHjH/PGX5l/+tW3D/r0/3hUdVGTjsS4p7mV/bNzZ8arp21R/y2hJK/z4q/a6nZ3g/wBGeNz/AHckH8utTVQutEsro7jF5Un9+L5T/hVXg91Ym0ltqaW9f+ea/madGy+an7sD5h3NYX2bV7Dm2uFvIx/yzm4b8/8A69TWuvwLcJHfxyWcm4ZEg+X86PZv7Oo+dddC/dW1rewGG7tYpoz1VxkVzcvhe60yVp/C981tk5NpMS8Tfn0/X610yOkih42V1PQqcg0tEakoaIJQjLVnJXOuWd1GNN8baKtuW4EjAtEx9VYHK/UH8arR6LrHh1RdeD71dS05vm+wXD7hj/YbP+H412Vxbw3cLQ3MSSxt1R1yDXOS+FrrTJWuPC961sSctayndE/+H+eRW0Zxem3l0/4BjKElrv8An/wTn7+z8K/ECY2t/bHQ9dztIkXaxb3zw/6NXKeT4y+FF2TB/puk7+Y2y8Df1Rvy/Gu7vrrStYdLDxtphsLnolyOB9VcdB+Y9aa0PiHw3Afs7L4l0Rhjy2+aVU9uu4Y+v0FaOPR/j+jN8PjKlG6WsXuunzX6rU0fCPxF0XxYohiAtNQA+ezmb5j/ALp/iH6+wrq96/8APNfzNeMal4K0TxYrah4IufsWoRfO1hIfLZSO6/3Tn6j6U/w98T9U8OXn9j+PLeZljO37WU/eJ/vj+Ie45+tc8qTWx3xp0sUr4fSX8r/9tfX039T2Tev/ADzX8zSqy7x+7Xr6mqdjf2up2cd3p9xHcW8gykkbZBq0n31+tYnG007MczruP7tevqaTev8AzzX8zSN99vrTaBD96/8APNfzNG9f+ea/maZRQBIrruH7tevqaWRl8xv3ank9zUa/eH1p0n+tf/eNABvX/nmv5mjev/PNfzNMooAfvX/nmv5mjev/ADzX8zTKUdaAJJGXzX/dg/Me5pu9f+ea/maJf9c/+8aZQA/ev/PNfzNG9f8Anmv5mmUUAP3r/wA81/M0+ZlEz/ID8x7moakm/wBe/wDvGgBN6/8APNfzNG9f+ea/maZRQA/ev/PNfzNG9f8Anmv5mmUUASyMvmN8gPPqabvX/nmv5miX/Wt9aZQA/ev/ADzX8zRvX/nmv5mmUUAP3r/zzX8zWjF/qU/3R/KsutSH/Up/uj+VAD6KKKACiiigAqpff8s/x/pVuql9/wAs/wAf6UAU6k/5d/8AgX9KjqT/AJd/+Bf0oAjooooAKKKKAHx9W/3T/KmHnrT4+rf7p/lTKAKcungP5lo5gk9vun8Kp3aQ3WItWg8uTok6f41sUjosilXUMp6gitFNrchwXQ4jX/DEd5CTfx/ao8fLeQj94g/2h/EP85FeZan4W1Tw3erquh3LxlG+S5tyQPow7fQ9fcV7s1nNasXsX+XvE54P0rOnsLa9kbycWV03DRuPkk9iOh/z1rqhWTVpaoinKrh5+0ouz/PyfRryZyPhL4vRXcq2Hi9VsrsnC3YGIpD/ALX90+/T6V6arK6BkYMrDIIOQRXkfinwFFcSyKsK2lzk/uyf3b/7rdvx/MdK53QvFfiH4fXf2SdJLzTVbD2kxIaMeqHt9Ont3qKmHuuanqjvhUoYp20hU7fZfo+j8np2Z7/RWN4b8V6T4qsftOkXAcqB5kL8SRH0Zf69K2a5DGdOdOThNWaCnv8Adj/3f6mmU9/ux/7v9TQQMooooAKKKKAJF/1D/wC8P61HUi/6h/8AeH9ajoAKKKKACqkn/Id07/eb+VW6qSf8h3Tv95v5UAblwxXbtNQ+a/r+lVfEVtqF3ppi0i9WxuSwKzNGHwM8jBrlf7B8a/8AQ2Rf+Aaf4VpGCau2kRKTT0VztUkYuoJ7+lW64/Q9K8TWmrRS6vr8d9bDIaFbZUyexyBXPeOB4uPiPWZ9EuNYjgsrKzlsorSMmKaYzMJB907/AJMZUduT2qZJKSV7lJtxbtY9RqBr60S3M73UKwq20yGQBQc4xnpnPFcBpXhXxk3iCc61qlwulzLKmYNad3UMCFKp9nXBGR/Fx71x8nhy40/wLocGo6Ff3ml2Otai1zYm0knkZGMywuYyCzDLKQ3PUH3qG9L/ANblW/r5XPcWuYEmjheeNZZQTGhcBnx6DvUtfPieGNfhvtDh1LSL2fVja6QtleCBnWz8qQm4VpOkZCnnJG7pzX0HVtW+9r7iU9fkvxCiiipGFFFFAFLWf+QNdf8AXM1n2X/HhB/1zX+VaGs/8ga6/wCuZrPsv+PCD/rmv8qANKx/5afh/WrdVLH/AJafh/WrdABRRRQAVzN7470yx8QnSpYLtgkscE12kYMMMkn3FZs5yfpXTV5prHhbXJ/EGo2VvYiSw1TUre9N95qgQqmNyleueOMUL4kv66fpcH8LZ22v+IINAtoXltrm7luJRFDb2se55GxnuQB06k1kP8RNN/syyu4LDUrmS8mkgS0gtw0yvH98Fd3b2Jo8S33i2bQpYtB0Ro7x7loQ/wBqiysI/wCWqkkAFuwPTvXNX3hi6m0LSo5fBTTpaiZGtW1IGZWfGJS6sFbceTnkUuj/AK/r+uw/6/r+vzPSbC7+3WEN19nntvNXd5NwmyRPZh2NUKo6T4YNx4K0zSvFBe5ntkDOUuHUhuQBuUgnAOPTitESuAACMD2FVKyehK2Fh/16f7wqPrUqMZWEb8q3BGMcVnCweA5s7mSL/ZPK/lQknuDbWxcoqr9r1KD/AFsSTr6oADTo9ZjZtsjGJvR0xVcj6C5l1LFIYIrkrHcRrIhYZVhmnrcl1yjqw9QAakjlcyoCR94dhUbFbmG+heS5k0u5ktX/ALudyn8Kb/aOoWHGpWnmxj/ltBz+Y/8A1Vuec/qPyFHnP6j8hWntG/i1I5LfDoUbTUrS9H+jzKzf3Tww/CrVUr3SLS9be8Yjl6iSIbTn8OtVwda00fuZFv4R/C4w4/Hv+tHLGXwv7xc0lujRms7e+QQXkKTRMRlHXIrm5fDN9o8zzeFr0xLnJs7g7o2+h7f55rcsvEVvPOsU7G2l3AFJlC/rWgZn3HBGM8cCmpTp6A4wqannN+ml6xfBdYhl8Pa4hzHdxnarH13dD/nmqutq/wBmWw+IWni8tPuwazaL8yZ6ZwP07+h616NqFnbapbG31CCOeM9mUZHuD1B+lc1Jo+r6DG40WUajp7Ah9PugG47hSf5foa6I1Iy02/ro+nz0MJQlF3X9eq/yPNTpPiL4fyHWfCV8upaNIdzmP5o2HpIn8J9x+Y6V6P4M+I+k+K2jtmP2HUuN1rK33j/sH+L6dfasS0gjW+kn8H3baPqJP77Sbv8A1Up9ADx/ntXOa14c0vXb4xmAeGfECnPkSfLbzN2KN/Ac/h6etE6Sl6/1/V1oelTx0KyUMVr2kt/n/N89fM9ub77fWm15H4d+JuseFb8aF4+imKxnat4yZkQdt398f7Q5+tetWuoR31rHc2c8c8Eq7kkjIZWHsRXJKLi9R1sPKklLeL2a2f8AwfJ6jqKk85/UfkKPOf1H5CpOcYv3h9adJ/rX/wB405Zn3DkdfQUskriRgCOp7CgCGipPOf1H5Cjzn9R+QoAjpR1p/nP6j8hR5z+o/IUAJL/rn/3jTKmklcSsAR1PYU3zn9R+QoAjoqTzn9R+Qo85/UfkKAI6km/17/7xo85/UfkKfLK4mcAj7x7CgCCipPOf1H5Cjzn9R+QoAjoqTzn9R+Qo85/UfkKAEl/1rfWmVNJK4kYAjr6Cm+c/qPyFAEdFSec/qPyFHnP6j8hQBHWpD/qU/wB0fyrP85/UfkK0Iv8AUp/uj+VAD6KKKAMPxfrs3h7w7JeWkSS3LSJDCsmdu92CgnHOBnNQeFdd1DUxqllq0dudQ0y48mRrbKxygqGUgNkjrV3xNoKeI9Cl09p2t3LLJFMq7vLdWDKcd+R0rM0nwpqOmxTSHW83t7efab6dLYASrt2+Woz8nb5uTSXW/wDW3/BB9P67/wDAKvhzxPrd54wudF1qKw3RW/nutoWzbHdgIxY4YkHORW22r2OoalNYWdwJbq1z50YUjZzjqRg/hWXo3hC/tPEMGqazrj6m1nC8FqGhCMFY5Jdsnce3auhvv+Wf4/0quiDqyv5Mn939af5T+Tjbzu/pUFSf8u//AAL+lIA8mT+7+tHkyf3f1qOigCTyZP7v60eTJ/d/Wo6KAJkicE5X+E03yZP7v60kfVv90/yplAEnkyf3f1o8mT+7+tR0UASeTJ/d/Wop9PW5XbNHn0OeRS0o+8PrTTtsG5nXdjPAjx3EX220z3+8vv8A5/Sub1bwtBqlmxgT7ZAo+5nEsX09R7f/AK67eX/XP/vGqU9gkj+bCxhl/vL3+tbQquLMZ01JHgupeGdT8N341XQLmSJ4jxNFwV9mX/HIPvXe+DPivbarJHpviUJp+oH5VmJxFMfqfun2PH8q6e/so7hiL5PInIwLhBlX/wB4d6888V+AFly8caxSNypU/u5PdT2+n8q6XGFbyZ0UsY4xVLErmgtn9qPp3Xk/k0ez+TJ/d/WnPE5VPl6L/U14J4Z8f6x4JnXTdejlvdMQ7Ru/1sA9ieo9j+BFe16Zq9hrmmQX2lXUdzbuvDIehyeCOoPsa4p05QdmbVKHLFVIPmg9mvyfZ+TL3kyf3f1o8mT+7+tR0VBzknkyf3f1o8mT+7+tR0UATiJ/Jcbecj+tM8mT+7+tC/6h/wDeH9ajoAk8mT+7+tHkyf3f1qOigCTyZP7v61SmRk13Ttwx8zfyqzVST/kO6d/vN/KgDcuADt3Nj8Kh2p/f/Sota1K10qy+1X8nlwqQCwUtyenArn/+E98O/wDP83/fh/8ACrjTnJXirkSnCLs2dMirvXD9/Srdc1pfivR9V1CO1sboyTNyFMTLwOvJFRa78Q9F8O6xLp1/HetJBHFLPJDbF44UkcorMw6DcMetKUZRdmtSlJSV09DqqK4i9+J1jBa6i0WlaoJbK3mmBntSkbmNScbvfGB9awbP4ravf6LaG00/SZ9Wvbx4I7dLuUJCqQmVvM3RhlbAwBjByDnFRfqVb+vQ9VorL8M63H4k8L6drMMRhS+t0mEZOSmRyM98GtSqaadmSndXCiiikMKKKKAKWs/8ga6/65mqVjE50+3IXgxL/Krus/8AIGuv+uZrPsv+PCD/AK5r/KgDTs0ZN+4Y6f1q1VSx/wCWn4f1q3QAUUUUAFFFFABRRRQAVkVr1l+VJ/cb8qAFh/16f7wqOpoY3EyEowGR2pnlSf3G/KgBlNkiSVcSIrD3Gal8qT+435UeVJ/cb8qAM9tMiDbrd3hb1U0qHULaRSAl0oIwOjVf8qT+435U6ONxKhKN94dqvnfXUnlXQzk1OLdtnV4G9HFWkkSRcxsGHqDmnvbmRcPEWHoVzVR9HXdugEsLeqZo91+Qe8i1RVPytTt+i/aV/wB3DUq6jGH2XKPbv6OtHI+mocy6k01lbXpVLqFZATjkcj6HtWa2k3dgxOk3Z2A/6iblfwNbUH7xkeP5lyPmXkUrRvuPyN19KFOUdAcU9TFj1wwSCLVbd7Vz0fGUP4//AK61I5EljDxOrqejKcg06S286MpLDvU9VZcg1lyeH5YJDLpMstq/UoQSjfh/+un7kvIn3o+Y7VtB0/Wo8XsP7wD5Zk4dfof6GuY1XR7+1tfs+r2o1/S1+6+MXEA9Qev8/wAK6P8AtW6sCE1mzeIZwJ4xlDWraTR3arJbMJUz1XnFaRnOnvqv62IcYT20Z5Fe2EdxpRjYPr+iRjjoLywH9R+anHasDTb7WvAMh1Dw9dDVdCkf95EchVJ7MvWN/fofevY9X8JR3t0b3TjJp9+CSJ4QQGP+0O/+etcNqulXFhfF72IaRfyAqLqOPNrdjuJFxgZ78Y9V710pwqq39f16fcVRxNbCNreL3T2f+X6dGdp4V8Z6T4us/N06XbOg/fWsnEkf4dx7it+vn3UdDns9Uju9K3aNq6/PGiSYin/2onzjB/ukkHoCfu13Hg74qpd3C6R4uQafqKt5YnZdscjdMMD9xv0+nSuWpRcNeh6Kp08TFzw3zi916d1+K6o9LX7w+tOk/wBa/wDvGlWN9w+RuvpSyRuZWwjdT2rA4yKin+VJ/cb8qPKk/uN+VADKUdad5Un9xvyo8qT+435UAEv+uf8A3jTKlkjcyvhG+8e1N8qT+435UAMop/lSf3G/KjypP7jflQAypJv9e/8AvGk8qT+435U+aNzM5CMRuPagCGin+VJ/cb8qPKk/uN+VADKKf5Un9xvyo8qT+435UAEv+tb60ypZI3MrYRuvpTfKk/uN+VADKKf5Un9xvyo8qT+435UAMrUh/wBSn+6P5VneVJ/cb8q0Yv8AUp/uj+VAD6KKKACiiigAqpff8s/x/pVuql9/yz/H+lAFOpP+Xf8A4F/So6k/5d/+Bf0oAjooooAKKKKAHx9W/wB0/wAqZT4+rf7p/lTKACiiigApR94fWkpR94fWgB0v+uf/AHjTKfL/AK5/940ygBHRZEKyKGU9QRWdPYNFGywgTQN96CTn8q0qKqMmiXFM4DXfCdrq0DeShYqD+7I/eR/T+8PavOEi17wBqpvtDlPlMf3kR5jlHow9f1HY179c2Udx83KSDo69RWHq+kJcx7L+NVZxgTquVfn+If16/WuyFWM1yzJpVKuFk5Uuu6ez9V/XkQ+DfH+l+L4PLiP2XUUGZbOQ/N7lT/EP1HcV1VeD+JfA91pt4t9pTSQTxnfG0TYI91I610/gn4rC4lj0nxeVtrz7sd4RtSX2bsp9+n0741KLj70dUd8YU8UnLD6SW8eq813X4rr3PUaKOvSiuc5CRf8AUP8A7w/rUdSL/qH/AN4f1qOgAooooAKqSf8AId07/eb+VW6qSf8AId07/eb+VAGzeIkiKsiqy+jDNU/slt/z7xf98Cr9wxULg4qHzX9f0p3YEUNvDHMrRwxq2eqqBWTrfgLS9eu9UuLye7R9TtoLaYROoCrFIXUrlTgknnOePStxJGLrk9/SrdIOljm5fh94WIuXs9Es9Ou7mN0a+sIEguF3j5mWRRuB561jv8J9Pks2Mmtau2qNdfaW1Yyx/aC3l+Vj7mzb5Z242+/Wu7JCqSxAAGST2qD+0bI2n2oXlv8AZ8487zV2Z6YznFKwyPR9KtdD0Wz0vT0KWtnCsMQY5O1RgZPc1coByMjkUVTbbuyVZKyCiiikMKKKKAKWs/8AIGuv+uZrPsv+PCD/AK5r/KtDWf8AkDXX/XM1n2X/AB4Qf9c1/lQBpWP/AC0/D+tW6qWP/LT8P61boAKKKKACiiigAooooAKyc1rVkUASQn9+n+8KZmnw/wCvT/eFR0ALmjNJRQAuadEf3yf7wplPi/1yf7woAbmjNJRQAuaY6LKu2RQw9CM06igCoNMTzQbWR7dyeqnj8qaX1C2Y+Yq3Kg9V4NaEf+tT/eFNb7x+tXzvrqTyroVItSgkba5MT/3XGKtA5GQcimSwRTLiVFb6iqh094TmynaP/ZblaPdfkHvIvMAylWGQeoPesyXQo/OE2nSvZzZ6x/dP1FS/bLm3/wCPu3JX+/HyKtW13BcOvlSAnPQ8GmuaOqE+WWjMw6pe6c+zV4NyZx9ohGQfqP8AP0q8TZ6rZsp8u5gcYZSMj8RVqQBmYMMgk5BrJn0NBIZ9Olazm/2Pun6ineL8mK0l5o53WfBUkdu40tVu7Mnc1hOfun1jbqD/AJ56V51reiJcxPHKksvkjBEi4ubcDsem9QPxGP4R19kXVrmxYR6xblV6C4iGVP1/z+FLqWjaZ4ithI20yD/V3MJw6H6/0NdUazjpU1Xcx5HGXPRdpI8o8GfEa98KTQ6Z4kd7vSchYbpcs0I/my+3UfpXtcF5b38K3VlOk9vMN0ckbZVh6g14z4q8HXOnF/tMSvFIeJlX93Ifcfwt/n1NYPhzxTq3gK/YQBrrS3bM1m7fd/2lPY+/Q/ymrh01z09UenTrwxr5Knu1fuUv8n+D8j6IzRmsvQPEOm+JdLS+0m4EsR4ZTw0bf3WHY1p1xGE4ShJxkrNC5oB5pKUdaCR0p/fP/vGm5p0v+uf/AHjTKAFzRmkooAXNPmP79/8AeNR1JN/r3/3jQAzNGaSigBc0ZpKKAHyn9631puadL/rW+tMoAXNGaSigBc1pw/6lP90fyrLrUh/1Kf7o/lQA+iiigAooooAKqX3/ACz/AB/pVuql8CfLwM9f6UAU6k/5d/8AgX9Kbtb+6fyp+1vs/Q/e9PagCKinbW/un8qNrf3T+VADaKdtb+6fyo2t/dP5UALH1b/dP8qZUkatk8H7p7e1N2t/dP5UANop21v7p/Kja390/lQA2lH3h9aXa390/lQFbcPlPX0oAWX/AFz/AO8aZUkqt5z8H7x7U3a390/lQA2inbW/un8qNrf3T+VADadKqvEiuAyleQR7mja390/lTnVtsfB+76e5oAxrzSsxsIlEsR5MLdvdT2NedeK/AkGoxPPajDjk5GCPqP6165tb+6fyqrdaetx84BSUdHA/nXTTrOOjMnBpqUHZrY8W8KePtT8E3KaT4kWW60tTtSQ8yW49v7y+35ele1WN9a6lZRXdhOlxbyruSSM5DCuJ8TeEYNRiaO4gEcpHykDCt9PT6fyrz3T9R174ZasxgVrjTZH/AH1q5O1vcf3W/wAnNXOiprmp/cd8K0MW+WraNTvspevaX4PyPoNf9Q/+8P61HWd4a8Sad4p0Q32kTeYm5RJGeHibB+Vh2P6HtWntb+6fyrjMJwlTk4zVmhtFO2t/dP5UbW/un8qCBtVJP+Q7p3+838qu7W/un8qpygjXdOyMfM38qANPVoria1CWk4glzw5XOPwrF/s/Wv8AoLr/AN+RXRXABC5bH4VDtT+/+lWpNIlxTMzT7TU4b1Hu9QE8XdBGBn8a43xwPFx8R6zPolxrEcFlZWctlFaRkxTTGZhIPunf8mMqO3J7V6MirvXD9/SrdS220xpWTR5TceH/ABTZnU7rxQt3rGhmCbfp1pqstxLcKQdqLEsCHJyBw/HvXIxaP5nh+zv7rwrqSWD6tLcajo66PKkcJa2ZIhHCVy6qduXA+8c8V9CUVNtLfL8blX1/rtY5/wABWd9p/wAPtCtNXV1vYbGJJlc5ZWCjg+46V0FFFXKXNJsmKsrBRRRUjCiiigClrP8AyBrr/rmaz7L/AI8IP+ua/wAq0NZ/5A11/wBczVCyVvsFvwf9Wvb2oA0bH/lp+H9at1UsQRvyMdP61boAKKKKACvNNY8V65b+INRvLa9WPT9M1G3smsfJUiZXxuYt1B54xXpdcxe+A9NvvEJ1SS4u0WSaOea0SQCGaSP7jMMZ4+tC+JP+un6XB/Cy34qOoLpSyWGrQaRDG++7u5UDMkQHO0EEZzjrXEReI/FV/Z6VYf2gbK4uLe6u/thtU3zRR/6vKEYXd1PHSut1nwRb67p32K+1fVvK+1m6G2dcgnovKH5V7DtUF38P7a8s7aKXWtXa5tvMCXr3CtMUcYZCduNuPbilrZ/10/zHpoWNJ8UvN4J0vWbuyu7mW7jXfHZW5lYNg5O0dBx+orREeQDuUfU1b07T7fStNt7CyTZBbxiONSc4AFUauVm3YmN7ak8MeJkO5eo6GmeV/tp/31RD/r0/3hUdSMk8r/bT/vqjyv8AbT/vqo6KAJPK/wBtP++qdHHiVDuX7w/iqGnxf65P94UAL5X+2n/fVHlf7af99VHRQBJ5X+2n/fVHlf7af99VHRQBNHHiRfmTqP4qRo/mPzp1/vU2P/Wp/vCmt94/WgB/lf7af99UeV/tp/31UdFAEnlf7af99VXl0u3nYbwgbP3kbBqSnJ99frTTa2E0nuUmtL21J8idLhB/A7fN+dEWoxl/LuAbd/R+n51db77fWo5I0lXbIgYehFVzJ7oXK1sSGJZY8Fo3Rh/eBBrLm8P+XIZ9KuBay/3Q3yN9RUh04xMWspmiP908qaPtlxb8XkB2/wDPSPkVUbr4WS7P4kVl1JQ32LX7dIt/ylmG6KQVyfiv4bpNHJdaEVlj5Itw+SB/snuPau9H2XUItjbJoz1Uis6XTLvS5nfR5d8QY5tpTkfga1p1HB+7p5dDKdNSWuv5ngdtNq/g7Wft+jSNDIpxLEw+SUd1Zf8AJFe2+CvHOneMrMiFltdQiGZrORvmHuv95fft3qrq2k6T4qVo7lPsOpAYywwWPof73868o8QeF9X8JavHeWpe2uYW3wzxHhvoe49vfkVtOnGrrHSXY7qOKU4qjinpsp9V5S7r8V+B9F+V/tp/31R5X+2n/fVcP4B+INv4stvsl6FttXhX97D0EoH8af1Hau1HWuBpp2YVaM6M+Sf9ea8iWSPMrfMv3j/FTfK/20/76pJf9c/+8aZSMiTyv9tP++qPK/20/wC+qjooAk8r/bT/AL6p0seZnO5fvH+Koakm/wBe/wDvGgA8r/bT/vqjyv8AbT/vqo6KAJPK/wBtP++qPK/20/76qOigCaSPMjfMvX+9TfK/20/76pJf9a31plAEnlf7af8AfVHlf7af99VHRQBJ5X+2n/fVaEX+pT/dH8qy61If9Sn+6P5UAPooooAKKKKACql8SNmDjr/SrdVL7/ln+P8ASgCrvb+8fzp+5vI+8fvevtUVSf8ALv8A8C/pQA3e394/nRvb+8fzptFADt7f3j+dG9v7x/Om0UASRs2T8x+6e/tTd7f3j+dLH1b/AHT/ACplADt7f3j+dG9v7x/Om0UAO3t/eP50B23D5j19abSj7w+tAD5Wbzn+Y/ePem72/vH86WX/AFz/AO8aZQA7e394/nRvb+8fzptFADt7f3j+dOdm2x/Mfu+vuajp7/dj/wB3+poATe394/nRvb+8fzptFADZkWeMpMN6nsa5rXfDkV3bssyebERjcRyv19q6eitIVJQd0ROCmrM8CvNK1rwHrP8Aa/hyZlX+NOqSL/dYdx/ng16x4M8eWPjGxJhY29/EP39o7fMvuPVff860dS0KK8tpGjUdg0ZHBznp6V5B4k8H3uiaiuseHJJLe5gbcNnBU/4e1dLjGsrx3OqniFJKjin6S6ryfdfivQ913t/eP50b2/vH864bwF8RbbxTGLDUQtprEY+eLos2OrJ/UdvcV29cbTTsyatGdGXLP/h/Nd0O3t/eP51TlJOu6dkk/M38qtVUk/5Dunf7zfypGRr6hPHbQiSY7VB64zWb/bFl/wA9T/3wa1boAqoIyPeq3lp/cX8qpcvUTv0IbXUba5uUjhkLMT02kVma78Q9F8O6xLp1/HetJBHFLPJDbF44UkcorMw6DcMetbcaKJVIUA57CsfW/AWl69d6pcXk92j6nbQW0widQFWKQupXKnBJPOc8elJ2urbBrZ33IoPiFZ3N5Pa2+ja280KSMB9iIEhQE4Uk4JOOKwI/ibq134Y0y5s9Ft01fUr+6tUtbi4KxxCDzCxZlBycR444yfSuysPB3hrS9U/tLTfD+mWl8SxN1BaIkh3fe+YDPPesa6+GemT6JBp9vqGo2kltez3sF5DInmxvMXMgGUK7SJGGCOmOc81Lvbz/AOCVp/Xp/mb3hnW4/EnhfTtZhiMKX1ukwjJyUyORnvg1qVT0fSrXQ9Fs9L09ClrZwrDEGOTtUYGT3NXKuVuZ22JjeyuFFFFSMKKKKAKWs/8AIGuv+uZqhZMw0+3+Y/6te/tV/Wf+QNdf9czWfZf8eEH/AFzX+VAGlYknfkk9P61bqpY/8tPw/rVugAooooAKKKKACiiigArIrXrLxH/fb/vn/wCvQAsP+vT/AHhUdTQhPOTDNnI/h/8Ar0zEf99v++f/AK9ADKKfiP8Avt/3z/8AXoxH/fb/AL5/+vQAynxf65P94UYj/vt/3z/9enRhPNTDNncP4f8A69AEVFPxH/fb/vn/AOvRiP8Avt/3z/8AXoAZRT8R/wB9v++f/r0Yj/vt/wB8/wD16ACP/Wp/vCmt94/WpIwnmLhm6j+H/wCvSMI9x+Zuv93/AOvQBHRT8R/32/75/wDr0Yj/AL7f98//AF6AGU5Pvr9aXEf99v8Avn/69Koj3j5m6/3f/r0ANb77fWm1Iwj3H5m6/wB3/wCvSYj/AL7f98//AF6AGUU/Ef8Afb/vn/69GI/77f8AfP8A9egClJp0UkgaImGTPDJxTHkvbSRhOv2lAeXT735VoqI9w+Zuv93/AOvSyBPMbLN1P8P/ANer5n1J5V0MqaLT9Yj2yAFwOD0day9RsZorNrbUov7S05uuR+8j9wfb/OK357C0uOW3K/8AeVcH+dVmW7sz8jfaovQjDCtIytsZyjfc8T8UeDbjR501nw9cO0Ubho7iI4eJuwb0P6H9K7/4e/EWLxNGNN1Xbb6xEOV6LcAfxL7+o/EcdN+fT7O93y6bL5FwwIeF0GyT1BFeWeLvBUkN19u0hJLO9hbf5SEgqR0ZG7/z+tdEkqy/vHTh8RGMFQxHwdH1j/mu6+aPcpf9c/8AvGmV5/8ADz4jx6+RpHiFxb6ynCttAW5HqPRvUfiPQeh4j/vt/wB8/wD164WnF2ZdajKjLll8n0a7ryGUU/Ef99v++f8A69GI/wC+3/fP/wBekYjKkm/17/7xpMR/32/75/8Ar0+YJ5z5Zs7j/D/9egCGin4j/vt/3z/9ejEf99v++f8A69ADKKfiP++3/fP/ANejEf8Afb/vn/69ABL/AK1vrTKlkCeY2Wbr/d/+vTcR/wB9v++f/r0AMop+I/77f98//XoxH/fb/vn/AOvQAytSH/Up/uj+VZ2I/wC+3/fP/wBetGL/AFKf7o/lQA+iiigAooooAKqX3/LP8f6Vbqpff8s/x/pQBTqT/l3/AOBf0qOpP+Xf/gX9KAI6KKKACiiigB8fVv8AdP8AKmU+Pq3+6f5UygAooooAKUfeH1pKUfeH1oAdL/rn/wB40yny/wCuf/eNMoAKKKKACnv92P8A3f6mmU9/ux/7v9TQAyiiigAooooAkX/UP/vD+tUL7T4r2MhwA+OGx1+tX1/1D/7w/rUdNNp3Qmk1ZnjnjPwFLHcf2hpO63vIm3qYzg5HIII71v8Aw/8AiP8A2xIuieI8W+rx/KjsNq3OP5N7d+3pXf3FvHcxFJVyOx9K8x8dfD37WDd2I8u5Q7kkTjJH8jXVzRrKz3N6NZQj7GvrT6PrH08u6+7U9TqjdTR2+sWEszbUUtlj24rgfh/8RZbi5Xw74sbytSQ7ILh+BP6K3+179/r19MEMczASxq4AONyg44rmlFxdmFajKjKz1T1TWzXdFg61pp63Uf60n9s6Z/z9R/kap/ZLb/n3i/74FH2S2/594v8AvgVJiXP7Z0z/AJ+o/wAjS/23p3/P2n61S+yW3/PvF/3wKPslt/z7xf8AfAoAu/23p3/P2n60f23p3/P2n61S+yW3/PvF/wB8CnzWdsJmAt4gM/3BQBa/tvTv+ftP1o/tvTv+ftP1ql9ktv8An3i/74FH2S2/594v++BQBd/tvTv+ftP1o/tvTv8An7T9apfZLb/n3i/74FH2S2/594v++BQBd/tvTv8An7T9aP7b07/n7T9aqNZ23lp/o8Xf+AU37Jbf8+8X/fAoAfqmr2E2l3EcVyrOyYAGeTUdl/x4Qf8AXNf5Uv2S2/594v8AvgVKAAMAYAoAuWP/AC0/D+tW6qWP/LT8P61boAKKKKACiisy68SaPZaoum3WoQRXrjK27ON7fQdaic4wV5Ow1FvY06KoyazYQxNJLOERAWZmBAAHc0kmvaTBp9vfXGp2kFrcqGhmmmVFkBGRgkjPFTCtTqfBJP0Y5QlHdF+sitO3uIbu3Se1mjnhkG5JI2DKw9QRwazK1JJIf9en+8KjqSH/AF6f7wqOgAooooAKfF/rk/3hTKfF/rk/3hQAyiiigAooooAfH/rU/wB4U1vvH606P/Wp/vCmt94/WgBKKKKACnJ99frTacn31+tAA332+tNpzffb602gAooooAVfvD606T/Wv/vGmr94fWnSf61/940AMooooAguLKG55dcN2deDVC7s5Wh8u6j+2Qjow4dPpWtSjrVxm0S4pnkHjHwEt6W1DRZCLiM71YfK4I55x0PuP0ra+HXxAl1WU6B4k/davCMRyNx9oA/9mH69fWu+vbGG4mdiNj7jh14NedeOfh/NfqNS0o+VqNuQ8U0Z27iORn0Poa3co1VZ7nTh6qjH2Fb4Hs/5X3Xl3XzWp6XRXE/D7x1/wkdu+m6uPs+t2Y2zRMNvmgcbwPX1Hb6V21czTTsya1KVGbhL+vNeQVJN/r3/AN41HUk3+vf/AHjSMiOiiigAooooAfL/AK1vrTKfL/rW+tMoAKKKKACtSH/Up/uj+VZdakP+pT/dH8qAH0UUUAZ+ua1a+H9Hm1G/3mKLA2xrlnYnAUDuSTVfQvEtprtjczxw3FpJaSGK5t7lAskTAZ5AJ7c1V8daPd614XeDTUEt1DNFcRxMwXzCjhtuTwMisvw9Brumyanqs2hs02rX4ka0+0xhreIJgMTnBOR0HrSXX+u3/B+4b6f13/4BoaJ45sdc1SOxjsr+1aeJpbaS6hCLcIpwSvJP54rbvv8Aln+P9K47wpb+JJfFc+p+KtAeGeVGSO5+2RPHbR5yI0jUk892710jaRZafqU1/aQmO5us+c5dm3c56E4H4VXRC6sWpP8Al3/4F/Sjzn9f0FP81/Jznnd6e1ICCipPOf1/QUec/r+goAjoqTzn9f0FHnP6/oKAEj6t/un+VMqZJXJOT/Ce1N85/X9BQBHRUnnP6/oKPOf1/QUAR0o+8PrT/Of1/QUCZ8jn9KAEl/1z/wC8aZU0kriVwD/Ee1N85/X9BQBHRUnnP6/oKPOf1/QUAR09/ux/7v8AU0vnP6/oKc8rhU56r6e5oAhoqTzn9f0FHnP6/oKAI6Kk85/X9BR5z+v6CgAX/UP/ALw/rUdTiV/Jc55yO31pnnP6/oKAI6R0WRCjqGU9Qal85/X9BR5z+v6CgDzXx58PYtWt2ubMbLhOVcdfof8AGovh38QZxfL4Z8XOYtQjzHb3Mhx53HCsf73oe/16+nGVyCCQQeoIFef+Pfh7Br9q1xZKI7qMFlYDp/8AW/lXQpKouWW/c3oVowj7KrrTf3xfdfquvqd7RXm/w9+IV210PDHixzHqUPyW9xJ/y3HZWP8Ae9D/ABfXr6Z5z+v6CsZRcXZirUZUZWeqeqa2a7ojoqTzn9f0FHnP6/oKkxI6km/1zfWjzn9f0FPllcSsAe/pQBBRUnnP6/oKPOf1/QUAR0VJ5z+v6Cjzn9f0FACN/q0/GmVM0r+WnPr2pvnP6/oKAI6Kk85/X9BR5z+v6CgCxY/8tPw/rVuqtm7Pv3HPT+tWqACiiigArwnxbCW8b6zaNCzatc6paSWX7slmjAHKnHQYOa92pCqlgxUbgMA45FYVqKrJJsuE+W55nr9ta+JPC+oiN55IrMyiURbkBkRTlG45AJBx04FZWqJaHwb4JbUdSGnRw6cWSSaxW4iZ/KXCsG4B9AQa9ipGRXXDqGHXBGa58JglhU1GV7mtWs6rTa2ucvoQ1fVfAekyWs0OiXbRqzhbMMu3kYCZAXPB9q1QyADKZPc7q1KyK9Bu7ucyVlYnhZPOTCYOR/FTN0f/ADz/APHqIf8AXp/vCo6QyTdH/wA8/wDx6jdH/wA8/wDx6o6KAJN0f/PP/wAep0bJ5qYjwdw/iqGnxf65P94UALuj/wCef/j1G6P/AJ5/+PVHRQBJuj/55/8Aj1G6P/nn/wCPVHRQBNGyeYuI8cj+KkZo9x/d9/71Nj/1qf7wprfeP1oAfuj/AOef/j1G6P8A55/+PVHRQBJuj/55/wDj1KrR7x+77/3qipyffX60APZo9x/d9/71Juj/AOef/j1Nb77fWm0ASbo/+ef/AI9Ruj/55/8Aj1R0UASq0e4fu+/96lkZPMbMeeT/ABVEv3h9adJ/rX/3jQAu6P8A55/+PUbo/wDnn/49UdFAEm6P/nn/AOPUbo/+ef8A49UdKOtAEsjJ5rZjzyf4qbuj/wCef/j1JL/rn/3jTKAOD8feBDqLpr/hYGz120+dfLbHngdv970PfofabwL8R7XxDa/ZdWiFtqcPyTKTtDEcZx2rtq8z+I3gWdrg+KfC6FNRh+a5gjH+vUdWA/veo7j366xknpM7qclXpqhN2a+F/o/J/g/K56eHiIyEyD3DVJKyec+Uydx/iryvwT49/tC0VWcB14eNj0r0WDUob1iyna7c7Cf5etOpSlD0PPu4ycJq0lui5uj/AOef/j1G6P8A55/+PVHRWJRJuj/55/8Aj1G6P/nn/wCPVHRQBNIyeY2Y88/3qbuj/wCef/j1JL/rW+tMoAk3R/8APP8A8eo3R/8APP8A8eqOigCTdH/zz/8AHq0Iv9Sn+6P5Vl1qQ/6lP90fyoAfRRRQAUUUUAFVL7/ln+P9Kt1Uvv8Aln+P9KAKdSf8u/8AwL+lR1J/y7/8C/pQBHRRRQAUUUUAPj6t/un+VMp8fVv90/yplABRRRQAUo+8PrSUo+8PrQA6X/XP/vGmU+X/AFz/AO8aZQAUUUUAFPf7sf8Au/1NMp7/AHY/93+poAZRRRQAUUUUASL/AKh/94f1qOpF/wBQ/wDvD+tR0AFFFFABT4vv/gf5Uynxff8AwP8AKgDgvH3w/h8RWhu7ACG/h+aN14z7VX+HnjubUJj4c8TEw6zbDajycfaFH/swH5jn1r0KuE+IHgEa9CNT0c/Z9XtjvjdDtLkcjkdD6GtlJTXLI6qFWPL7Gr8D2f8AK+68u6+a1O7oriPh746bxDC+la0Ps+uWY2yow2+cBxuA7H1H4jg4Hb1k007MyrUpUZ8kv+H815BUk3+ub61HUk3+ub60jIjooooAKKKKAHt/q0/GmU9v9Wn40ygAooooAuWP/LT8P61bqpY/8tPw/rVugAooooAKKKKACiiigArIrXrL8w+i/wDfIoAWH/Xp/vCo6mhkJmQYXqP4RTPNPov/AHyKAGUU/wA0+i/98ijzT6L/AN8igBlPi/1yf7wo80+i/wDfIp0chMqDC/eH8IoAiop/mn0X/vkUeafRf++RQAyin+afRf8AvkUeafRf++RQAR/61P8AeFNb7x+tSRyEyLwvUfwikaQ7jwvX+6KAI6Kf5p9F/wC+RR5p9F/75FADKcn31+tL5p9F/wC+RSrId44Xr/dFADW++31ptSNIdx4Xr/dFJ5p9F/75FADKKf5p9F/75FHmn0X/AL5FADV+8PrTpP8AWv8A7xpVkO4cL1/uilkkIkbhep/hFAEVFP8ANPov/fIo80+i/wDfIoAZSjrTvNPov/fIo8w+i/8AfIoAJf8AXP8A7xplSySEStwv3j/CKb5p9F/75FADKKf5p9F/75FHmn0X/vkUAeO/EXwVPoV+/ivwyhWLO+9tox9z1kA9PUduvTOLPhbxdBqcKKCPN7xZxk/7J7H2r1hn3KVZUIIwQUHNeQePvhjNpV5PrXg2I+UGLz6eg+76mMen+z27eg6qNZJcs9jrlGONiot2qLRN7SXZ+fZ/JnotjqpaHcW8+IdWH30+orVimjmTfEwYe1eMeE/Hi3DxpeTeTcLwtxjP4OO49/59vSbK+SeZdrJaXbDK4wYph6jtVVaNtUebzThJ06is1umdDRVWLVGV/KvY1hk9So2n8auCbcMrsI9QorkcWtzZNMWX/Wt9aZU0khEjcL1/uimeafRf++RSGMop/mn0X/vkUeafRf8AvkUAMrUh/wBSn+6P5VneafRf++RWjF/qU/3R/KgB9FFFABRRRQAVUvv+Wf4/0q3VS+/5Z/j/AEoAp1J/y7/8C/pUdSf8u/8AwL+lAEdFFFABRRRQA+Pq3+6f5Uynx9W/3T/KmUAFFFFABSj7w+tJSj7w+tADpf8AXP8A7xplPl/1z/7xplABRRRQAU9/ux/7v9TTKe/3Y/8Ad/qaAGUUUUAFFFFAEi/6h/8AeH9ajqRf9Q/+8P61HQAUUUUAFPi+/wDgf5Uynxff/A/yoAZRRRQBwHj/AMDz3syeI/C7G21uzO8eXx5wH9f5jg1q+BPG0Hi/Sz5ii31O2+W7tumD/eXP8J/Q8ehPVV5v458K3ukaoPGng8eXf2/zXlso+W4T+I4HXjqO/UYI50T5lZnfRnGtD2FR2/lfbyfk/wAHr3PSKkm/1zfWsDwn4qsfF2iJf2J2OPlngY5aF+4PqPQ9x+Vb83+ub61DVjjnCVOTjJWaI6KKKRAUUUUAPb/Vp+NMp7f6tPxplABRRRQBcsf+Wn4f1q3VSx/5afh/WrdABRRRQAUUV59rnxKn0vxPc2MOnLLp9pcRW1xdmTBR5AMYXuBnnmsataNJXkXGDlseg0Vx+v8AjV9A0t7yaHzm+7HFFGSXbBOPYYBOe2KhuPHtyuj6JLY6SLy/1OyN61v5/lrHGqBm+Yg5POAMVlh8XTxCbhfQupRlTaUup21ZFP0/xBYX2gWervPHa293Grp57hcEjO3J78H8qbtY9FP5V2NNOzME7q4+H/Xp/vCo6lhVhOmVP3h2pmxv7p/KkMbRTtjf3T+VGxv7p/KgBtPi/wBcn+8KTY390/lTolbzk+U/eHagCOinbG/un8qNjf3T+VADaKdsb+6fyo2N/dP5UALH/rU/3hTW+8frT41bzU+U/eHakZG3H5T19KAGUU7Y390/lRsb+6fyoAbTk++v1o2N/dP5UqI29flPX0oARvvt9abT2Rt5+U9fSk2N/dP5UANop2xv7p/KjY390/lQAi/eH1p0n+tf/eNCo24fKevpSyK3mv8AKfvHtQBHRTtjf3T+VGxv7p/KgBtKOtLsb+6fyoCN/dP5UALL/rn/AN40ypJFbzX+U/ePam7G/un8qAG0U7Y390/lRsb+6fyoAbUk3+vf/eNN2N/dP5U+ZWMz4U/ePagDzDx/8MF1R5NZ8MqsGpctLbj5UuPcejfof1rifDfjGfTpG03WIXMcblZIJPleJu5Geh9vzr6C2N/dP5VxXjv4b23iuE3lmBaavGvyT7fllx0V/wDHqPccV00qzhpLY626eKiqdd2ktpdvKXdfivTQs6brkUtkjyuL6wbhZgMvGfRh2P8AkE1dW60ljm2vmgb8QP1rw/S9c1vwZrclpdrJZ3cJ2yROOGH8iD61tXHxSvGz5sNm3+9Av+FdPsVL3ovQ82pTrUansZx97yV7rumt0ewC7mDnyb22vB/vgE1KNS2f8fMEkXvjI/OvC3+I11dTbLeyjlkbokMJyfwFadldfEK/I/szw5cwg8hpozED+LkCspUqa3aOuGCxjV3DlX95qP8A6VY9qhuYZ/8AVSKx647/AJVLXCeBdO8b2+szz+K44o7RrcqkaOrMJNy4PBPbd3rvdjf3T+Vcs0k7IU4OD5W0/TYbWpD/AKlP90fyrN2N/dP5VpQ/6lP90fyqCB9FFFABRRRQAVVvW27OAevX8KtVUvv+Wf4/0oAreZ/sL+VP3/uM7V+96e1Q1J/y7/8AAv6UAJ5n+wv5UeZ/sL+VMooAf5n+wv5UeZ/sL+VMooAljfk/Kv3T29qb5n+wv5UR9W/3T/KmUAP8z/YX8qPM/wBhfyplFAD/ADP9hfypQ/zD5F/Ko6UfeH1oAklfEz/Kv3j2pvmf7C/lRL/rn/3jTKAH+Z/sL+VHmf7C/lTKKAH+Z/sL+VOd/lT5V+76e5qKnv8Adj/3f6mgA8z/AGF/KjzP9hfyplFAD/M/2F/KjzP9hfyplFAEyv8AuX+Veo7fWmeZ/sL+VKv+of8A3h/Wo6AH+Z/sL+VHmf7C/lTKKAH+Z/sL+VOjfL/dXoe3tUVPi+/+B/lQAeZ/sL+VHmf7C/lTKKAH+Z/sL+VHmf7C/lTKKAPLfE+kXnw88RHxf4WhDadM2NS08cIAT94egz/3yfY4Ho+j6/Y+ItLi1PS3WW3mGRkfMp7qw7EVZliSaF4pkWSN1KujjIYHqCO4rybU7S8+EPilr/S0kuPDF9JiWDOTbt6Z9ux7jg88nRe9o9z0Y/7ZDl/5eLb+8u3qundadj17zP8AYX8qPM/2F/KqWnaja6tp8V7p8yzQTLuV1NWqzeh5w/zP9hfyo8z/AGF/KmUUASs/7tPlXv2pvmf7C/lQ3+rT8aZQA/zP9hfyo8z/AGF/KmUUAXbJt2/gDp0/GrVVLH/lp+H9at0AFFFFABXmuufDnVdS8TXr291aJpOo3cN3OX3ebGYwMqoxg5wOc16VRWdSlCppJFRk47HKSeGtTvNH1G2vZ7RZZhLHb+UG27CMKWJ6H1xkVl3PgvW7XTdAfSZbJ7/TdObT5lnZhGysgUspAzwRnkc139FZUcLRoX9mrX/r9S51ZztzPb+v0MTSPDNnZeFdP0bUIIL5LONR++iDqXA5YA9Op/Opg7AYDMB9a1ayK6m7u7MUrKxLC7mZMs33h3pnmP8A32/OnQ/69P8AeFR0hjvMf++350eY/wDfb86bRQA7zH/vt+dPidzKmWb7w71FT4v9cn+8KAE8x/77fnR5j/32/Om0UAO8x/77fnR5j/32/Om0UASRyOZVyzfeHekaR9x+ZuvrRH/rU/3hTW+8frQAvmP/AH2/OjzH/vt+dNooAd5j/wB9vzpySPvX5m6+tR05Pvr9aAFaR95+ZuvrSeY/99vzob77fWm0AO8x/wC+350eY/8Afb86bRQA9ZH3D5m6+tLJI4lbDN9496Yv3h9adJ/rX/3jQAnmP/fb86PMf++3502igB3mP/fb86XzH/vt+dMpR1oAkkkcSvhm+8e9M8x/77fnSy/65/8AeNMoAd5j/wB9vzo8x/77fnTaKAHeY/8Afb86fM7iZ8M33j3qKpJv9e/+8aAG+Y/99vzo8x/77fnTaKAMLxR4O0nxfDEurxP5sJzHcRNtkQemeePY1naf8LvCOn4ZdJS5cdXuXaTP4E4/Suuop3Z0xxeIhD2cZtLtcjt7K103dFp1tDaR5+5BGEH5CpvMf++350sv+tb60ykc7bbux3mP/fb86PMf++3502igQ7zH/vt+daUX+pT/AHR/KsutSH/Up/uj+VAD6KKKACiiigAqpff8s/x/pVuql9/yz/H+lAFOpP8Al3/4F/So6k/5d/8AgX9KAI6KKKACiiigB8fVv90/yplPj6t/un+VMoAKKKKAClH3h9aSlH3h9aAHS/65/wDeNMp8v+uf/eNMoAKKKKACnv8Adj/3f6mmU9/ux/7v9TQAyiiigAooooAkX/UP/vD+tR1Iv+of/eH9ajoAKKKKACnxff8AwP8AKmU+L7/4H+VADKKKKACiiigAqPVLG21K3uLO/hWe3nUpJG44Yf571JUk3+ub60DTad0eLsNV+D+v8ebe+GbyT5GPJiPofRh+TDn1A9Y0rVrPWbCO80+ZZopBkFT0qS+sLXU7GWy1CBLi2mXbJG4yGH+e9eT6h4W8R/DjUJNS8JNLqGkE7pLU/M8Q75Hce4/EcZOqamrPc72o4z3lpU+5S/yl+D9d/YKK4fw38VdB1uNUup1sbnukxwCfY12sU8U6BoJEkU91bNRKEo7nDOEqcuWas+z0Jm/1afjTKe3+rT8awtS8Y+HdID/2hrNnEyHDRiUO4/4AuW/SpHCE5u0Fd+RtUVz3hrxvo3iy7vINGklc2gUs8kewODnlc88Y5yB1FdDTtYKlOdKXJNWZcsf+Wn4f1q3VSx/5afh/WrdIgKKKKACiiigAooooAKy/Lb1X/voVqVnfZZv7n6igAhjImQ/L1H8Qpnlt6r/30Kg1K7XQ9MudUvkf7NZxmaXZgttUZOBmrX2Wb+5+ooAZ5beq/wDfQo8tvVf++hT/ALLN/c/UUfZZv7n6igBnlt6r/wB9CnRxsJU+794fxCl+yzf3P1FOS2mWRSU4BB6igCLy29V/76FHlt6r/wB9Cn/ZZv7n6ij7LN/c/UUAM8tvVf8AvoUeW3qv/fQp/wBlm/ufqKPss39z9RQAkcbCRfu9R/EKRo23Hlev94VHeSf2XYXF/dI3kWsTTSbcE7VG44GeuBT4Ue6gjuIkOyVQ65IzgjIoAPLb1X/voUeW3qv/AH0Kf9lm/ufqKPss39z9RQAzy29V/wC+hSrG28cr1/vCnfZZv7n6ilW2mDAlOh9RQAxo23H7vX+8KTy29V/76FSNazFidnf1FJ9lm/ufqKAGeW3qv/fQo8tvVf8AvoU/7LN/c/UUfZZv7n6igBqxtuH3ev8AeFLJGxkb7vU/xCmzo9rbyXEqHZCpdsEZwBk0yzk/tOxgv7ZG8m6jWaPdgHawyMj6GgB/lt6r/wB9Cjy29V/76FP+yzf3P1FH2Wb+5+ooAZ5beq/99Cjy29V/76FP+yzf3P1FH2Wb+5+ooASSNjK33fvH+IU3y29V/wC+hUr20zSMQnBJPUU37LN/c/UUAM8tvVf++hR5beq/99Cn/ZZv7n6ij7LN/c/UUAM8tvVf++hT5oyZnPy/eP8AEKDbTAElP1FVtNul1zTLbVLFH+z3kazRb8BtrDIyM9aAJvLb1X/voUeW3qv/AH0Kf9lm/ufqKPss39z9RQAzy29V/wC+hR5beq/99Cn/AGWb+5+oo+yzf3P1FACSRsZG+71/vCm+W3qv/fQqV7aZpCQnBPqKb9lm/ufqKAGeW3qv/fQo8tvVf++hT/ss39z9RR9lm/ufqKAGeW3qv/fQrRi/1Kf7o/lVH7LN/c/UVfjBWJAeoUA0AOooooAr3t9badatc3sywwp96RzgL9T2qnZ+JNK1C3FxY3kdzCxwJIjuU/iK534tQyzeApPLjeSOO5hedUUn92G+bI9K5TwjdWsF3qtxDmPTtR1JYbApEwWVygyFwPUfpXlYvE16Umqav8vT/P8AA6qVOnJJyf8AWv8Al+J6paavY313Pa2twkk9uFaWMH5kDdMjtnBp19/yz/H+lcF8PNKXRvHnia1j88gQ2rM87FndiHJJJ712a2OorqU81zfC7tnz5Nv5Kx+Tz/eBy3413YecqlKM5bswqJRm4roNqT/l3/4F/Spfs7f88P8Ax+qpvYRq40jyX+0tAbkDI27AwXr65PStyBaKs/Z2/wCeH/j9H2dv+eH/AI/QBWoqz9nb/nh/4/R9nb/nh/4/QBDH1b/dP8qZVtYGBP7jsR9+m/Z2/wCeH/j9AFairP2dv+eH/j9H2dv+eH/j9AFalH3h9asfZ2/54f8Aj9NkTyYnleA7UUscP2FAEcv+uf8A3jTKXT7mPV9NttStIGMF5Ck8RdgDtcBhkdjg1Y+zt/zw/wDH6AK1FWfs7f8APD/x+j7O3/PD/wAfoArU9/ux/wC7/U1N9nb/AJ4f+P05oGIX9x0GPv8AvQBUoqz9nb/nh/4/R9nb/nh/4/QBWoqz9nb/AJ4f+P0fZ2/54f8Aj9AES/6h/wDeH9ajpJb2GDVbfS3hf7RdxSTRgEbdsZUNk9j+8X9atfZ2/wCeH/j9AFairP2dv+eH/j9H2dv+eH/j9AFanxff/A/yqb7O3/PD/wAfpywMGz5HY/x0AVKKs/Z2/wCeH/j9H2dv+eH/AI/QBWoqz9nb/nh/4/R9nb/nh/4/QBWqSb/XN9al+zt/zw/8fqrpl7DremwalZQuYLld6FyFOPcUALRVn7O3/PD/AMfo+zt/zw/8foA5LXfh/wCG/EUjTahpqLcMDm4gJjcn1JHDH6g1y8vwS0xMf2dreqW3rudW/kFr1X7O3/PD/wAfo+zt/wA8P/H6pSktmdcMZiIR5VN27b/meWJ8EdMdQ2pa3qd1n0ZV/mGrTsPhF4SscFrOW7Yd7iUn9BgfpXoRgYqo8jp/t037O3/PD/x+nzy7hPGYia5XN27XsvwMrTtI0/SYzHplnDaqeoiQDNXKs/Z2/wCeH/j9H2dv+eH/AI/Utt6s5B9j/wAtPw/rVuoLaMx7spszjvnNT0gCiiigAooooAKKKKAGySJFE0kjBURSzMewHeuT1P4keHbfR72403VbW8uobd5I4I3yXKqTiuuIBBBGQeorM1fQLHV9GvNOlhSJLqFoi6IAy5GMipldxdio2urnEeHfGOtapfXOleII7JpDax3K+RGdjRuPusGzyKuN4z1cfEDStFFpssrmR0kuJVA8wiMvhMenGT+FJpfw3v8AT1ubhvEO7UZYooI7gWY2xxIemwtySOpzW3c+EFn8Q6RqgvCv9mySP5Rjz5m5CuM54xnPQ149OjjFiIyk/d66nVKdJ02lv6GHpvju+1Lx5PpK3OkQ2kV49usUqTCeUKOSrfcznt19q9ArkP8AhC72bxBBc32uSXOnWt4b23tXhG9ZD0Bkzyoz0xXX17S+FdzjfxMKKKKACiiigBskiRRNJIwVEUszHsB3rAi8c+HLzfFp+sWtxOI3dY43yx2qScfgK6EgEEEZB6iq0mn2skMkf2eJRIhQlUAOCMGplfldhq11c808PeP9c1bUbS31aKz+y6rbST2yxIcqqnBV88HIrUfxnq0fj3SNESz8uxuJGjkuJEAEmIy2I8dhxk/hUek/C240ucSf28ZDa20lvp5+yj9wGOdzfN85HTtW4/g1pdY0PUZtRLy6WzM/7gDz2ZNpPX5fXvXjxo4xV4ybfL6+v6WOtzpcjXX08v8AMydP8c6pdeIbYzW9oNHvdQm0+DaG85WQHDk5wQSDxiu+rjLHwA1n4iiu21MyabbXct7b2XkgFJZBzl88gZJAxXZ17S+Fd/6/W5xv4nb+v6VgooooAKKKKACuek8eeGEmeAazatOpK+UH+YsP4frmuhqvJp9pKjq9tF84IJ2DPNJ3toNW6nGeFvGWq6vq1lb6zbWa2ur2kl1aCANujVWxtfJwcjnIxR4p8X6po2qXdpodtZG20mxS7uhOGy6lsBE2kAcDOTUmi+ALzSLgSjXPMaztJLXTSbUf6MrnO5hn5yOnan694DutZuzOmteQ13ZpZ6ifswP2hVOdy8/ITz69ab6W/re36XEut/62v+tjOvPiHeN4yXS7G40m1tMQfNfJMXcyKGIDJ8oOCPvY5r0WuN1XwLd6ldtANbkTRpXheSxaEMw8vGAj5+UHAzxXZU9LC1uFFFFIYUUUUAFc74t1y+0pdNtNISBr7UroW8TXAJjjGCSxAwTwK6KsPxP4ek12Gzks7z7Fe2FwLi3mMfmKGAIIK5GQQaTGZth4ylk+H17rt9bxi6sBMk0UZOxpIyRxnnB4/OsFfHusaRp13aX9np/29ILaSyW2VkixMdoVgTn5fbrW9b+BpIfDg0VtWZrWdJ/tw+zruuJJOd4Ofk2nnHOaoL8NpbjT71dU1k3F9PHBFBcpbhBCsJyny5OTnrzT66+X/BsLpp5/8A2/CWuX+qNqdlrKW4vtMufIke2BEcgKhgwBJI610VYXhjw9Locd7Le3gvb6/n8+4mWPy1JwAAFycAAVu02IKKKKQwooooAKKKKACiiigAooooAOvWkVQihUAUDoAMYpaKACmySJFE0kjBURSzMewHenUEAggjIPUUAc9F468PXZePTNSiv7kIzpbWx3SS7QThR3PFQ+C9cvPEI1O41SxitLi0vHtUjAy6IArbWbucnnHFdElrbxuGjgiRh0KoARWPp3h2fS4da+yahtn1O6kuY5TAD5DMoA+Un5sYz2zR1fp+qD/MyPEPiPxJo2tRMtrYf2dLdxW0EDMWuLrd95lwcDHoR2rtK4mTwTrsnihNbPimMzIixqr6YrbFAG4Ll/lLc5IHeu2oXwg9wooooAKKKKACiiigDL1bxNo2hSxx6xqUFm8oLIsrY3D1rnte8ZXM81ha+EHsbo3dvPcme4DNGUi6qApHJPFdjJbwzEGaKOQjoWUHFc54l8Iy6vdWl5pF+umXVvFLBu8gSK0cgww25GD3BpO9hq1yvc+LJYfhfD4i020hR2gjYRPny4QSFJwOSq88D0q14I8RXXiLTLqa7a2mFvctDHc2ysiTqADuCsSR1xVS88Cyz+H4NFt9XaGxtYIlgjNurETRtu8xjn5gcYK9PetPw14fm0Vr+5vrxbu91CcTTvHF5aAgYAVcnsOuearS7/AA/r7/62nWyN2iiikMKKKKACiiigArnpPHnhhJngGs2rTqSvlB/mLD+H65roaryafaSo6vbRfOCCdgzzSd7aDVupxXhXxfqer65p66xaWSxapZy3Ni0Ct5kSqwBRiTzkAHjHSr/jnxpJ4Zgii02BLq+cq7q4JWGIsF3tgjqSAPf6VW0XwBeaRcCUa55jWdpJa6aTaj/Rlc53MM/OR07VJ4h+G9j4hhkmubyaPU5kiSa7RmCybMc+UGA5wfpmqdrq39a/5WuLv/X9dbHZiiorW3S0tIreLcUiQIu5ixwBjknk1LSAKKKKACiiigAqO4mW3tpZ3+7Ghc49AM1JTJYlmheKQZR1KsPUGk720GrX1OI8KeM9W1XWLGHV7e0S21a0kurP7OG3RhWxtfJwTjnIxTfFHi3U9E1S7s9BtbL7NpNkt5dLMrAupbGxNpAHGTk5q34b8CTaHqcNzc6qbyOxt3trCPyAhhRmySxz8x7dqr6r8P8AUNVkMsviDEt3ai01GQWaj7RGH3DaAfkOOO9N9Lf1vb9LiXW/9bX/AFO0tbhbqzhuI+FmjV1z6EZqWmQQpb28cMQwkahFHoAMCn03a+gle2oUUUUhhRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAVxPh3xDqd/8WfF2i3dz5mn6bFaNaw+Wo8suhLfMBk5I7k121ee3/w98Rr451fxH4Z8YxaQdVSFZoJNJW5x5a7R8zSD3PTvQtJfeH2X/XUl1/4pRaPq+qWtjoGoatb6Iivqt3bNGEtQw3YAZgXIXkgdKfqnxQtYNUtrHQdHvddaTT11OdrRkUQWzdG+cjcT/dHNZd98K9bmk1T7F4vEEevwxx60DpykzsqbGki+b92WGQR8wGaw/Gmg3fhfxPaSeF7bX7eBtEGmyT6ZpwvlmROEjI6xv/tkEc9Kl3S8/wDgP8L2t1+ZSs3/AF3X42v5fI6GD49eBE0uxudZ1N9LmvYBcLbS20sjKhYgElEI/hPenaj8ZNPS42eHtHvddjOmf2os1uViXyAxVmPmbSMbTxjJ7Ctr4W6Pe6B8L9C0zVITBdwW372JjyhJLYPvzTdW8B/2p4s1TWv7S8r+0NFbSfJ8jd5eWJ8zduGev3cD61VRNNqPn+Tt+NiYNNJvy/NX/C5R8OfFW01/XLSxl0a+02DULBr+xurlo9s8S43HarErjPfqKg034vWl/d6fJNoWo2mjarcNbadqsxj8u4kGcAoG3KGwcEjmrOl/DJNP1Dw3cS6n9oj0TSJNLaL7Pt+0BwAXzuO3p0569azNN+Ed5atpOnX/AIkN54d0S6+12Fj9jCSBxkoHl3fMF3HGAM05b6f1q/xta34iV+XX+tP89/wJdD+MdvrN1ohbw7qNpp+t3D2ttfSvGUMy5yu0Nuxx97HX6U5/jFZf2q8UOi30um/bW06LUg8YjluhnCBc7gCw2hsYzUmm/Cn+z9C8J6b/AGz5n/COai195n2XH2jJc7Mb/l+/1yenSs21+DMGleKG1KG7sZNNjvm1EQHR4pLssTu8oTtk7M8jAz2B70tL+X/Df8H8Pm35f1v/AMD8flu6V8UdO1n/AIRxbCyuHm11ZnEWVzarD98yfQ8cd6y9J+NFpqUmjz3Hh/ULHS9Xlkt4NQmeMp5qZyu0Hdj5T82P5Vn/AAr8JSL408T+JpdPv9OsbmV4dKttQhMUkaO2+Vgh5UF+lalt8IY4vC/hnRZ9Y86PQr6S6d/su37SH35TG/5Pv9cnp0o1sn6fj/kvxDTVf1/VybQPi7Y63qdpDNpF9Y2epJNJpt5MyMt2IwS3yqSyHAJAI5p3g/4rweL7o/Z9Eu4bFoZJo7xZ4pgAnaREYtEx7BhzWd4R+DS+F9Zt7j+0bGS1sUkS0MOkQxXR3ggNJPyzFQeCMZ7+lGgfB+40vxnBr19rdvLLapKscllpqWs05kBG6Z1JDkA8fKOeaP8AL/P/AIHkBqeCfilF421FIrPRLqG0mjeSG8+0QyqApxiRUYtET2DCu9rzHw38Ip9H8c2niLUNYtbmSyEoja001LWW53jGZ3VsPgHsoyea9Op6WQtbsKKKKQwooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA4nw74h1O/wDiz4u0W7ufM0/TYrRrWHy1Hll0Jb5gMnJHcmoNf+KUWj6vqlrY6BqGrW+iIr6rd2zRhLUMN2AGYFyF5IHSor/4e+I18c6v4j8M+MYtIOqpCs0Emkrc48tdo+ZpB7np3qnffCvW5pNU+xeLxBHr8McetA6cpM7KmxpIvm/dlhkEfMBmkr8q72/H/L8R6cz+X/B+f4GpqnxQtYNUtrHQdHvddaTT11OdrRkUQWzdG+cjcT/dHNUIPj14ETS7G51nU30ua9gFwttLbSyMqFiASUQj+E9657xpoN34X8T2knhe21+3gbRBpsk+macL5ZkThIyOsb/7ZBHPSu9+Fuj3ugfC/QtM1SEwXcFt+9iY8oSS2D781WjTa7/q/wBLE6qyfb9F+tzF1H4yaelxs8PaPe67GdM/tRZrcrEvkBirMfM2kY2njGT2FWvDnxVtNf1y0sZdGvtNg1Cwa/sbq5aPbPEuNx2qxK4z36ir2reA/wC1PFmqa1/aXlf2horaT5PkbvLyxPmbtwz1+7gfWqel/DJNP1Dw3cS6n9oj0TSJNLaL7Pt+0BwAXzuO3p0569albff/AO3f/a/e/k3v/X93/wC2+4rab8XrS/u9Pkm0LUbTRtVuGttO1WYx+XcSDOAUDblDYOCRzUWh/GO31m60Qt4d1G00/W7h7W2vpXjKGZc5XaG3Y4+9jr9Ki034R3lq2k6df+JDeeHdEuvtdhY/YwkgcZKB5d3zBdxxgDNW9N+FP9n6F4T03+2fM/4RzUWvvM+y4+0ZLnZjf8v3+uT06VStfXy+7rfz/wCAJ3s7f09f+B+JG/xisv7VeKHRb6XTftradFqQeMRy3QzhAudwBYbQ2MZq/pXxR07Wf+EcWwsrh5tdWZxFlc2qw/fMn0PHHesK1+DMGleKG1KG7sZNNjvm1EQHR4pLssTu8oTtk7M8jAz2B71B8K/CUi+NPE/iaXT7/TrG5leHSrbUITFJGjtvlYIeVBfpUq7j52v+H6v8F8xy0enf+vuRoaT8aLTUpNHnuPD+oWOl6vLJbwahM8ZTzUzldoO7Hyn5sfyq1oHxdsdb1O0hm0i+sbPUkmk028mZGW7EYJb5VJZDgEgEc1DbfCGOLwv4Z0WfWPOj0K+kunf7Lt+0h9+Uxv8Ak+/1yenSq3hH4NL4X1m3uP7RsZLWxSRLQw6RDFdHeCA0k/LMVB4Ixnv6U+r/AK6f57fiD8v61/y3/A0fB/xXg8X3R+z6Jdw2LQyTR3izxTABO0iIxaJj2DDmn+CfilF421FIrPRLqG0mjeSG8+0QyqApxiRUYtET2DCsvQPg/caX4zg16+1u3lltUlWOSy01LWacyAjdM6khyAePlHPNP8N/CKfR/HNp4i1DWLW5kshKI2tNNS1lud4xmd1bD4B7KMnmmrXV+3+f/AE72dj06iiikMKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA//2Q=="
+ }
+ },
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We can find that, although in our simulation, the results of lifted Heston model and rough Heston model differ a lot, the trend of implied volatility is similar. Meanwhile, the result of rough Heston model is closer to the result in the paper. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We believe the error in rough Heston model is due to choosing fewer steps in Adams Predictor-Corrector scheme(only 60) and cosine methods(less than 160), as well as fewer intervals to calculate the integral(only 70). "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The error in lifted Hestion model, is probably due to fewer MC simulations(only 1000) and fewer steps when simulating the price and volatility(only 300)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/pyfeng/heston_mc.py b/pyfeng/heston_mc.py
index 0c3c0f5..283d54c 100644
--- a/pyfeng/heston_mc.py
+++ b/pyfeng/heston_mc.py
@@ -40,7 +40,7 @@ def phi_exp(self, texp):
phi = 4*self.mr / self.vov**2 / (1/exp - exp)
return phi, exp
- def var_mean_var(self, var_0, dt):
+ def var_mv(self, var_0, dt):
"""
Mean and variance of the variance V(t+dt) given V(0) = var_0
@@ -49,7 +49,7 @@ def var_mean_var(self, var_0, dt):
dt: time step
Returns:
- (mean, variance)
+ mean, variance
"""
expo = np.exp(-self.mr * dt)
@@ -112,9 +112,9 @@ def var_step_ncx2_eta(self, var_0, dt):
chi_df = self.chi_dim()
phi, exp = self.phi_exp(dt)
chi_nonc = var_0 * exp * phi
- nn = self.rng_spawn[0].poisson(chi_nonc / 2, size=self.n_path)
- var_t = (exp / phi) * 2 * self.rng_spawn[0].standard_gamma(shape=chi_df / 2 + nn, size=self.n_path)
- return var_t, nn
+ pois = self.rng_spawn[0].poisson(chi_nonc / 2, size=self.n_path)
+ var_t = (exp / phi) * 2 * self.rng_spawn[0].standard_gamma(shape=chi_df / 2 + pois, size=self.n_path)
+ return var_t, pois
@abc.abstractmethod
def cond_states(self, var_0, dt):
@@ -127,17 +127,17 @@ def cond_states(self, var_0, dt):
dt: time step
Returns:
- (var_final, var_mean)
+ (var_final, var_avg)
"""
return NotImplementedError
def cond_spot_sigma(self, var_0, texp):
- var_final, var_mean = self.cond_states(var_0, texp)
+ var_final, var_avg = self.cond_states(var_0, texp)
- spot_cond = ((var_final - var_0) - self.mr * texp * (self.theta - var_mean)) / self.vov \
- - 0.5 * self.rho * var_mean * texp
+ spot_cond = ((var_final - var_0) - self.mr * texp * (self.theta - var_avg)) / self.vov \
+ - 0.5 * self.rho * var_avg * texp
np.exp(self.rho * spot_cond, out=spot_cond)
- sigma_cond = np.sqrt((1.0 - self.rho**2) / var_0 * var_mean) # normalize by initial variance
+ sigma_cond = np.sqrt((1.0 - self.rho**2) / var_0 * var_avg) # normalize by initial variance
# return normalized forward and volatility
return spot_cond, sigma_cond
@@ -167,7 +167,7 @@ class HestonMcAndersen2008(HestonMcABC):
array([44.31943535, 13.09371251, 0.29580431])
"""
psi_c = 1.5 # parameter used by the Andersen QE scheme
- scheme = 2
+ scheme = 4
def set_mc_params(self, n_path=10000, dt=0.05, rn_seed=None, antithetic=True, scheme=4):
"""
@@ -178,7 +178,7 @@ def set_mc_params(self, n_path=10000, dt=0.05, rn_seed=None, antithetic=True, sc
dt: time step for Euler/Milstein steps
rn_seed: random number seed
antithetic: antithetic
- scheme: 0 for Euler, 1 for Milstein, 2 for NCX2, 3 for NCX2 with Poisson, 4 for 2 for Andersen (2008)'s QE scheme
+ scheme: 0 for Euler, 1 for Milstein, 2 for NCX2, 3 for Poisson-mixture Gamma, 4 for Andersen (2008)'s QE scheme
References:
- Andersen L (2008) Simple and efficient simulation of the Heston stochastic volatility model. Journal of Computational Finance 11:1–42. https://doi.org/10.21314/JCF.2008.189
@@ -187,7 +187,7 @@ def set_mc_params(self, n_path=10000, dt=0.05, rn_seed=None, antithetic=True, sc
self.scheme = scheme
def var_step_qe(self, var_0, dt):
- m, s2 = self.var_mean_var(var_0, dt)
+ m, s2 = self.var_mv(var_0, dt)
psi = s2 / m**2
zz = self.rv_normal(spawn=0)
@@ -258,9 +258,9 @@ def cond_states_old(self, var_0, texp):
n_dt = len(tobs)
var_paths = self.vol_paths(tobs)
var_final = var_paths[-1, :]
- var_mean = spint.simps(var_paths, dx=1, axis=0) / n_dt
+ var_avg = spint.simps(var_paths, dx=1, axis=0) / n_dt
- return var_final, var_mean
+ return var_final, var_avg
def cond_states(self, var_0, texp):
@@ -274,45 +274,46 @@ def cond_states(self, var_0, texp):
weight /= weight.sum()
var_t = np.full(self.n_path, var_0)
- var_mean = weight[0] * var_t
+ var_avg = weight[0] * var_t
if self.scheme < 2:
milstein = (self.scheme == 1)
for i in range(n_dt):
# Euler (or Milstein) scheme
var_t = self.var_step_euler(var_t, dt[i], milstein=milstein)
- var_mean += weight[i + 1] * var_t
+ var_avg += weight[i + 1] * var_t
elif self.scheme == 2:
for i in range(n_dt):
var_t = self.var_step_ncx2(var_t, dt[i])
- var_mean += weight[i + 1] * var_t
+ var_avg += weight[i + 1] * var_t
elif self.scheme == 3:
for i in range(n_dt):
var_t, _ = self.var_step_ncx2_eta(var_t, dt[i])
- var_mean += weight[i + 1] * var_t
+ var_avg += weight[i + 1] * var_t
elif self.scheme == 4:
for i in range(n_dt):
var_t = self.var_step_qe(var_t, dt[i])
- var_mean += weight[i + 1] * var_t
+ var_avg += weight[i + 1] * var_t
- return var_t, var_mean # * texp
+ return var_t, var_avg # * texp
class HestonMcGlassermanKim2011(HestonMcABC):
"""
- Exact simulation using the gamma series based on Glasserman and Kim (2011)
+ Exact simulation using the gamma series based on Glasserman & Kim (2011)
References:
- Glasserman P, Kim K-K (2011) Gamma expansion of the Heston stochastic volatility model. Finance Stoch 15:267–296. https://doi.org/10.1007/s00780-009-0115-y
"""
antithetic = False
- KK = 1 # K for series truncation.
+ scheme = 3 # Poisson mixture gamma
+ kk = 1 # K for series truncation.
- def set_mc_params(self, n_path=10000, dt=None, rn_seed=None, scheme=3, KK=1):
+ def set_mc_params(self, n_path=10000, dt=None, rn_seed=None, scheme=3, kk=1):
"""
Set MC parameters
@@ -321,168 +322,241 @@ def set_mc_params(self, n_path=10000, dt=None, rn_seed=None, scheme=3, KK=1):
dt: time step
rn_seed: random number seed
scheme: simulation scheme for variance
- KK: truncation index
+ kk: truncation index
"""
super().set_mc_params(n_path, dt, rn_seed, antithetic=False)
self.scheme = scheme
- self.KK = KK
+ self.kk = kk
- def gamma_lambda(self, dt, KK=None):
+ def laplace(self, aa, var_0, var_t, dt):
"""
- gamma_n and lambda_n in Glasserman and Kim (2011)
+ MGF of the average variance given the initial and final variance
+
+ Args:
+ aa: dummy variable in the transformation
+ var_0: initial variance
+ var_t: final variance
+ dt: time step
+
+ Returns:
+ Conditional MGF at dummy variable aa
+ """
+
+ vov2dt = self.vov**2 * dt
+ mrt = self.mr * dt
+ iv_index = 0.5 * self.chi_dim() - 1
+
+ gamma = np.sqrt(mrt**2 + 2 * vov2dt * aa)
+
+ var_mean = np.sqrt(var_0 * var_t)
+ phi_mr, _ = self.phi_exp(dt)
+ cosh_mr = np.cosh(mrt / 2)
+
+ phi_gamma = 2 * gamma / vov2dt / np.sinh(gamma / 2)
+ cosh_gamma = np.cosh(gamma / 2)
+
+ part1 = phi_gamma / phi_mr
+ part2 = np.exp((var_0 + var_t) * (cosh_mr * phi_mr - cosh_gamma * phi_gamma) / 2)
+ part3 = spsp.iv(iv_index, var_mean * phi_gamma) / spsp.iv(iv_index, var_mean * phi_mr)
+
+ ch_f = part1 * part2 * part3
+ return ch_f
+
+ def cond_avgvar_mv_numeric(self, var_0, var_t, dt):
+ """
+ Mean and variance of the average variance conditional on initial var, final var.
+ It is computed from the numerical derivatives of the conditional Laplace transform.
+
+ Args:
+ var_0: initial variance
+ var_t: final variance
+ dt: time step
+
+ Returns:
+ mean, variance
+
+ See Also:
+ cond_avgvar_mv
+ """
+ # conditional Cumulant Generating Fuction
+ def cumgenfunc_cond(aa):
+ return np.log(self.laplace(-aa, var_0, var_t, dt))
+
+ m1 = derivative(cumgenfunc_cond, 0, n=1, dx=1e-5)
+ var = derivative(cumgenfunc_cond, 0, n=2, dx=1e-5)
+ return m1, var
+
+ def gamma_lambda(self, dt, kk=None):
+ """
+ gamma_n and lambda_n below Eq. (2.8) in Glasserman & Kim (2011).
+ gamma_n is the original value * dt to make x1, x2, and x3 the average variance.
Args:
dt: time step
- KK: number of terms
+ kk: number of terms
Returns:
- gamma, lambda
+ gamma_n, lambda_n
"""
- if KK is None:
- KK = self.KK
+ if kk is None:
+ kk = self.kk
mrt2 = (self.mr * dt)**2
- vovn = self.vov**2 * dt
+ vov2dt = self.vov**2 * dt
- n_2pi_2 = (np.arange(1, KK + 1) * 2 * np.pi)**2
- gamma_n = (mrt2 + n_2pi_2) / (2 * vovn * dt)
- lambda_n = 4 * n_2pi_2 / vovn / (mrt2 + n_2pi_2)
+ n_2pi_2 = (np.arange(1, kk + 1) * 2 * np.pi)**2
+ gamma_n = (mrt2 + n_2pi_2) / (2 * vov2dt) # / dt
+ lambda_n = 4 * n_2pi_2 / vov2dt / (mrt2 + n_2pi_2)
return gamma_n, lambda_n
- def x1star_mean_var_asymp(self, dt, KK=0):
+ def x1star_avgvar_mv_asymp(self, dt, kk=0):
"""
- Mean and variance of the truncated terms of X1 in Lemma 3.1 in Glasserman & Kim (2011)
- (v_0 + v_t) dt and (v_0 + v_t) vov^2 dt^3 need to be multiplied afterwards.
+ Asymptotic mean and variance of the truncated terms of X1/dt.
+ (v_0 + v_t) need to be multiplied to mean and variance afterwards.
+ This is NOT used for pricing, but for verification purpose.
Args:
- dt:
- KK:
+ dt: time step
+ kk: number of gamma expansion terms
- Returns:
+ References:
+ - Lemma 3.1 in Glasserman & Kim (2011)
+ Returns:
+ mean, variance
"""
+ vov2dt = self.vov**2 * dt
# remainder (truncated) terms
- rem_mean = 2 / (np.pi**2 * KK)
- rem_var = 2 / (3 * np.pi**4 * KK**3)
+ trunc_mean = 2 / (np.pi**2 * kk)
+ trunc_var = 2 * vov2dt / (3 * np.pi**4 * kk**3)
- return rem_mean, rem_var
+ return trunc_mean, trunc_var
- def x1star_mean_var(self, dt, KK=0):
+ def x1star_avgvar_mv(self, dt, kk=0):
"""
- Mean and variance of the truncated terms of X1 in p 281-282 Glasserman & Kim (2011)
- (v_0 + v_t) dt and (v_0 + v_t) vov^2 dt^3 need to be multiplied afterwards.
+ Mean and variance of the truncated terms of (X1^*/dt).
+ (v_0 + v_t) need to be multiplied to mean and variance afterwards.
Args:
- dt:
- KK:
+ dt: time step
+ kk: number of gamma expansion terms
- Returns:
+ References:
+ - p 281-282 in Glasserman & Kim (2011)
+ - Proposition 3.1 in Tse & Wan (2013)
+ Returns:
+ mean, variance
"""
mrt_h = self.mr * dt / 2
+ vov2dt = self.vov**2 * dt
csch = 1 / np.sinh(mrt_h)
coth = np.cosh(mrt_h) * csch
- x1_mean = (coth/mrt_h - csch**2) / 2
- x1_var = (coth / mrt_h**3 + csch**2 / mrt_h**2 - 2 * coth*csch**2 / mrt_h) / 8
+ mean = (coth/mrt_h - csch**2) / 2
+ var = vov2dt * (coth / mrt_h**3 + csch**2 / mrt_h**2 - 2 * coth*csch**2 / mrt_h) / 8
- if KK > 0:
- n_2pi_2 = (np.arange(1, KK + 1) * 2 * np.pi)**2
- term = 8 * n_2pi_2 / (4*mrt_h**2 + n_2pi_2)**2
- x1_mean -= np.sum(term)
- x1_var -= np.sum(4 * term / (4*mrt_h**2 + n_2pi_2))
+ if kk > 0:
+ gamma_n, lambda_n = self.gamma_lambda(dt, kk)
+ mean -= np.sum(lambda_n/gamma_n)
+ var -= 2*np.sum(lambda_n/gamma_n**2)
- return x1_mean, x1_var
+ return mean, var
- def x2star_mean_var_asymp(self, dt, KK=0):
+ def x2star_avgvar_mv_asymp(self, dt, kk=0):
"""
- Mean and variance of the truncated terms of X2 (with delta=1) in Lemma 3.1 in Glasserman & Kim (2011)
- (vov * dt)^2 and (vov * dt)^4 need to be multiplied afterwards.
+ Asymptotic mean and variance of the truncated terms of X2/dt (with shape=1 or delta=2).
+ This is NOT used for pricing, but for verification purpose.
Args:
- dt:
- KK:
+ dt: time step
+ kk: number of gamma expansion terms
- Returns:
+ References:
+ - Lemma 3.1 in Glasserman & Kim (2011)
+ Returns:
+ mean, variance
"""
- rem_mean = 1 / (4 * np.pi**2 * KK)
- rem_var = 1 / (24 * np.pi**4 * KK**3)
+ vov2dt = self.vov**2 * dt
+ trunc_mean = vov2dt / (2 * np.pi**2 * kk)
+ trunc_var = vov2dt**2 / (12 * np.pi**4 * kk**3)
- return rem_mean, rem_var
+ return trunc_mean, trunc_var
- def x2star_mean_var(self, dt, KK=0):
+ def x2star_avgvar_mv(self, dt, kk=0):
"""
- Mean and variance of the truncated terms of X2 (with delta=1) in p 284 in Glasserman & Kim (2011)
- (vov * dt)^2 and (vov * dt)^4 need to be multiplied afterwards
+ Mean and variance of the truncated terms of X2/dt (with shape=1 or delta=2)
+
+ X2 = sum_{n=1}^kk standard_gamma(1) / gamma_n
Args:
- dt:
- KK:
+ dt: time step
+ kk: number of gamma expansion terms
- Returns:
+ References:
+ - p 284 in Glasserman & Kim (2011)
+ - Proposition 3.1 in Tse & Wan (2013)
+ Returns:
+ mean, variance
"""
mrt_h = self.mr * dt / 2
+ vov2dt = self.vov**2 * dt
+
csch = 1 / np.sinh(mrt_h)
coth = np.cosh(mrt_h) * csch
- mean = (mrt_h * coth - 1) / (8 * mrt_h**2)
- var = (mrt_h * coth + mrt_h**2 * csch**2 - 2) / (32 * mrt_h**4)
+ mean = vov2dt * (mrt_h * coth - 1) / (4 * mrt_h**2)
+ var = vov2dt**2 * (mrt_h * coth + mrt_h**2 * csch**2 - 2) / (16 * mrt_h**4)
- if KK > 0:
- term = 1 / (4*mrt_h**2 + (np.arange(1, KK + 1) * 2 * np.pi)**2)
- mean -= np.sum(term)
- var -= 2 * np.sum(term**2)
+ if kk > 0:
+ gamma_n, _ = self.gamma_lambda(dt, kk)
+ mean -= np.sum(1/gamma_n)
+ var -= np.sum(1/gamma_n**2)
return mean, var
- def draw_x1(self, var_sum, dt):
+ def draw_x1(self, var_0, var_t, dt):
"""
- Simulation of x1 using truncated Gamma expansion in Glasserman and Kim (2011)
+ Samples of x1/dt using truncated Gamma expansion in Glasserman & Kim (2011)
- Parameters
- ----------
- var_t : an 1-d array with shape (n_paths,)
- final variance
- dt: float
- time-to-expiry
+ Args:
+ var_0: initial variance
+ var_t: final variance
+ dt: time step
- Returns
- -------
- an 1-d array with shape (n_paths,), random variables x1
+ Returns:
+ x1/dt
"""
# For fixed k, theta, vov, texp, generate some parameters firstly
- vovn = self.vov**2 * dt
- gamma_n, lambda_n = self.gamma_lambda(dt, self.KK)
+ gamma_n, lambda_n = self.gamma_lambda(dt, self.kk)
# the following para will change with VO and VT
- Nn = self.rng_spawn[3].poisson(lam=var_sum * lambda_n[:, None]) # (KK, n_path)
+ pois = self.rng_spawn[3].poisson(lam=(var_0 + var_t) * lambda_n[:, None]) # (kk, n_path)
- rv_exp_sum = self.rng_spawn[1].standard_gamma(shape=Nn)
+ rv_exp_sum = self.rng_spawn[1].standard_gamma(shape=pois)
x1 = np.sum(rv_exp_sum / gamma_n[:, None], axis=0)
- rem_mean_x1, rem_var_x1 = self.x1star_mean_var(dt, self.KK)
- rem_mean_x1 *= dt
- rem_var_x1 *= vovn * dt**2
-
- rem_scale = rem_var_x1 / rem_mean_x1
- rem_shape = rem_mean_x1 / rem_scale * var_sum
+ trunc_mean_x1, trunc_var_x1 = self.x1star_avgvar_mv(dt, self.kk)
+ trunc_scale = trunc_var_x1 / trunc_mean_x1
+ trunc_shape = trunc_mean_x1 / trunc_scale * (var_0 + var_t)
- x1 += rem_scale * self.rng_spawn[1].standard_gamma(rem_shape)
+ x1 += trunc_scale * self.rng_spawn[1].standard_gamma(trunc_shape)
return x1
- def draw_X2_AW(self, mu_X2_0, sigma_square_X2_0, ncx_df, texp, num_rv):
+ def draw_x2_aw(self, mu_X2_0, sigma_square_X2_0, ncx_df, texp, num_rv):
"""
- Simulation of X2 or Z from its CDF based on Abate-Whitt algorithm from formula (4.1) in Glasserman and Kim (2011)
+ Simulation of X2 or Z from its CDF based on Abate-Whitt algorithm from formula (4.1) in Glasserman & Kim (2011)
+ Currently NOT used for pricing.
Parameters
----------
@@ -536,8 +610,19 @@ def draw_X2_AW(self, mu_X2_0, sigma_square_X2_0, ncx_df, texp, num_rv):
return X2
- def eta_mean_var(self, var_0, var_t, texp):
- phi, exp = self.phi_exp(texp)
+ def eta_mv(self, var_0, var_t, dt):
+ """
+ The mean and variance of eta RV.
+
+ Args:
+ var_0: initial variance
+ var_t: final variance
+ dt: time step
+
+ Returns:
+ eta (n_path, 1)
+ """
+ phi, exp = self.phi_exp(dt)
zz = np.sqrt(var_0 * var_t) * phi
iv_index = 0.5 * self.chi_dim() - 1
@@ -550,17 +635,19 @@ def eta_mean_var(self, var_0, var_t, texp):
return mean, var
- def draw_eta(self, var_0, var_t, texp):
+ def draw_eta(self, var_0, var_t, dt):
"""
- generate Bessel random variables from inverse of CDF, formula(2.4) in George and Dimitris (2010)
+ generate Bessel RV from p 285 of Glasserman & Kim (2011)
Args:
- zz: Bessel RV parameter (n, )
+ var_0: initial variance
+ var_t: final variance
+ dt: time step
Returns:
eta (integer >= 0) values (n, )
"""
- phi, exp = self.phi_exp(texp)
+ phi, exp = self.phi_exp(dt)
zz = np.sqrt(var_0 * var_t) * phi
iv_index = 0.5 * self.chi_dim() - 1
@@ -573,48 +660,45 @@ def draw_eta(self, var_0, var_t, texp):
return eta
- def draw_x2(self, ncx_df, dt, size):
+ def draw_x2(self, shape, dt, size):
"""
- Simulation of x2 (or Z) using truncated Gamma expansion in Glasserman and Kim (2011)
- Z is the special case with ncx_df = 4
+ Simulation of x2/dt (or Z/dt) using truncated Gamma expansion in Glasserman & Kim (2011)
+ X2 is the case with shape = delta / 2 and Z is the case with shape = 2
Args:
- ncx_df: ncx2 degree of freedom
+ shape: shape parameter of gamma distribution
dt: time-to-expiry
size: number of RVs to generate
Returns:
- Random samples of x2 (or Z) with shape (n_path,)
+ x2/dt (or Z/dt) with shape (n_path,)
"""
- vovn = self.vov**2 * dt
gamma_n, _ = self.gamma_lambda(dt)
- gamma_rv = self.rng_spawn[1].standard_gamma(ncx_df / 2, size=(self.KK, size))
+ gamma_rv = self.rng_spawn[1].standard_gamma(shape, size=(self.kk, size))
x2 = np.sum(gamma_rv / gamma_n[:, None], axis=0)
# remainder (truncated) terms
- rem_mean, rem_var = self.x2star_mean_var(dt, self.KK)
- rem_mean *= ncx_df * vovn * dt
- rem_var *= ncx_df * (vovn * dt)**2
- rem_scale = rem_var / rem_mean
- rem_shape = rem_mean / rem_scale
-
- x2 += rem_scale * self.rng_spawn[1].standard_gamma(rem_shape, size=size)
+ trunc_mean, trunc_var = self.x2star_avgvar_mv(dt, self.kk)
+ trunc_scale = trunc_var / trunc_mean
+ trunc_shape = trunc_mean / trunc_scale * shape
+ x2 += trunc_scale * self.rng_spawn[1].standard_gamma(trunc_shape, size=size)
return x2
- def cond_intvar_mean_var(self, var_0, var_t, dt, eta=None, KK=0):
+ def cond_avgvar_mv(self, var_0, var_t, dt, eta=None, kk=0):
"""
- Mean and variance of the integrated variance conditional on initial var, final var, and eta
+ Mean and variance of the average variance conditional on initial var, final var, and eta
Args:
var_0: initial variance
var_t: final variance
eta: Poisson RV
dt: time step
+ kk: number of gamma expansion terms
Returns:
- (integarted variance / dt)
+ mean, variance
"""
# x = np.arange(1, 10) * 0.02
@@ -626,23 +710,18 @@ def cond_intvar_mean_var(self, var_0, var_t, dt, eta=None, KK=0):
# y2 - y1
if eta is None:
- eta_mean, eta_var = self.eta_mean_var(var_0, var_t, dt)
+ eta_mean, eta_var = self.eta_mv(var_0, var_t, dt)
else:
eta_mean, eta_var = eta, 0.0
- vovn = self.vov**2 * dt
+ x1_mean, x1_var = self.x1star_avgvar_mv(dt, kk=kk)
+ x1_mean *= (var_0 + var_t)
+ x1_var *= (var_0 + var_t)
- x1_mean, x1_var = self.x1star_mean_var(dt, KK=KK)
- x1_mean *= (var_0 + var_t) * dt
- x1_var *= (var_0 + var_t) * vovn * dt**2
-
- z_mean, z_var = self.x2star_mean_var(dt, KK=KK)
- z_mean *= 4 * vovn * dt
- z_var *= 4 * vovn**2 * dt**2
-
- x23_mean = (eta_mean + self.chi_dim()/4) * z_mean
- x23_var = (eta_mean + self.chi_dim()/4) * z_var
- x23_var += eta_var * z_mean**2
+ x2_mean, x2_var = self.x2star_avgvar_mv(dt, kk=kk)
+ x23_mean = (2*eta_mean + self.chi_dim()/2) * x2_mean
+ x23_var = (2*eta_mean + self.chi_dim()/2) * x2_var
+ x23_var += eta_var * (2*x2_mean)**2
return x1_mean + x23_mean, x1_var + x23_var
@@ -651,14 +730,14 @@ def cond_states(self, var_0, texp):
var_t, _ = self.var_step_ncx2_eta(var_0=var_0, dt=texp)
# sample int_var(integrated variance): Gamma expansion / transform inversion
- # int_var = X1+X2+X3 from formula(2.7) in Glasserman and Kim (2011)
+ # int_var = X1+X2+X3 from formula(2.7) in Glasserman & Kim (2011)
# Simulation X1: truncated Gamma expansion
- x123 = self.draw_x1(var_0 + var_t, texp)
- x123 += self.draw_x2(self.chi_dim(), texp, size=self.n_path)
+ var_avg = self.draw_x1(var_0, var_t, texp)
+ var_avg += self.draw_x2(self.chi_dim()/2, texp, size=self.n_path)
eta = self.draw_eta(var_0, var_t, texp)
- zz = self.draw_x2(4, texp, size=eta.sum())
+ zz = self.draw_x2(2.0, texp, size=eta.sum())
total = 0
for i in np.arange(eta.max()):
@@ -666,13 +745,12 @@ def cond_states(self, var_0, texp):
count = eta_above_i.sum()
if count == 0:
continue
- x123[eta_above_i] += zz[total:total+count]
+ var_avg[eta_above_i] += zz[total:total+count]
total += count
assert eta.sum() == total
- var_mean = x123 / texp
- return var_t, var_mean
+ return var_t, var_avg
class HestonMcTseWan2013(HestonMcGlassermanKim2011):
@@ -693,9 +771,9 @@ class HestonMcTseWan2013(HestonMcGlassermanKim2011):
>>> # true price: 44.330, 13.085, 0.296
array([12.08981758, 0.33379748, 42.28798189]) # not close so far
"""
- dist = 0
+ dist = 'ig'
- def set_mc_params(self, n_path=10000, dt=None, rn_seed=None, scheme=3, dist=0):
+ def set_mc_params(self, n_path=10000, dt=None, rn_seed=None, scheme=3, dist=None):
"""
Set MC parameters
@@ -704,92 +782,54 @@ def set_mc_params(self, n_path=10000, dt=None, rn_seed=None, scheme=3, dist=0):
dt: time step
rn_seed: random number seed
scheme: simulation scheme for jumping from 0 to texp
- dist: distribution to use for approximation. 0 for inverse Gaussian (default), 1 for lognormal.
+ dist: distribution to use for approximation.
+ 'ig' for inverse Gaussian (default), 'ga' for Gamma, 'ln' for LN
"""
super().set_mc_params(n_path, dt, rn_seed, scheme=scheme)
- self.dist = dist
-
- def mgf(self, aa, var_0, var_t, dt):
- """
- MGF of the integrated variance given the initial and final variance
+ if dist is not None:
+ self.dist = dist
- Args:
- aa: dummy variable in the transformation
- var_0: initial variance
- var_t: final variance
- dt: time step
-
- Returns:
- Conditional MGF at dummy variable aa
- """
-
- vov2 = self.vov**2
- mrt_h = self.mr * dt / 2
- iv_index = 0.5 * self.chi_dim() - 1
-
- gamma = np.sqrt(self.mr**2 - 2 * vov2 * aa)
- #decay = np.exp(-self.mr * texp)
- #decay_gamma = np.exp(-gamma * texp)
-
- var_mean = np.sqrt(var_0 * var_t)
- phi_mr, _ = self.phi_exp(dt)
- cosh_mr = np.cosh(mrt_h)
-
- phi_gamma = 2 * gamma / vov2 / np.sinh(gamma * dt / 2)
- cosh_gamma = np.cosh(gamma * dt / 2)
-
- #part1 = gamma * np.exp(-0.5 * (gamma * texp - self.mr * texp)) * (1 - decay) / (self.mr * (1 - decay_gamma))
- part1 = phi_gamma / phi_mr
-
- #part2 = np.exp((var_0 + var_final) / vov2
- # * (self.mr * (1 + decay) / (1 - decay) - gamma * (1 + decay_gamma) / (1 - decay_gamma)))
- part2 = np.exp((var_0 + var_t) * (cosh_mr * phi_mr - cosh_gamma * phi_gamma) / 2)
+ def cond_states(self, var_0, texp):
- part3 = spsp.iv(iv_index, var_mean * phi_gamma) / spsp.iv(iv_index, var_mean * phi_mr)
+ tobs = self.tobs(texp)
+ n_dt = len(tobs)
+ dt = np.diff(tobs, prepend=0)
- ch_f = part1 * part2 * part3
- return ch_f
+ var_0 = np.full(self.n_path, var_0)
+ var_avg = np.zeros_like(var_0)
- def cond_intvar_mean_var_numeric(self, var_0, var_t, dt):
- # conditional MGF function
- def mgf_cond(aa):
- return self.mgf(aa, var_0, var_t, dt)
+ for i in range(n_dt):
- # Get the first 2 moments
- m1 = derivative(mgf_cond, 0, n=1, dx=1e-5)
- m2 = derivative(mgf_cond, 0, n=2, dx=1e-5)
- return m1, m2 - m1**2
+ var_t, eta = self.var_step_ncx2_eta(var_0, dt[i])
+ # m1, var = self.cond_avgvar_mv_numeric(var_0, var_t, dt[i])
+ m1, var = self.cond_avgvar_mv(var_0, var_t, dt[i], eta=None)
+ var_0 = var_t
- def cond_states(self, var_0, texp):
+ if self.dist.lower() == 'ig':
+ # mu and lambda defined in https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution
+ # RNG.wald takes the same parameters
+ lam = m1**3 / var
+ var_avg += self.rng_spawn[1].wald(mean=m1, scale=lam)
+ elif self.dist.lower() == 'ga':
+ scale = var / m1
+ shape = m1 / scale
+ var_avg += scale * self.rng_spawn[1].standard_gamma(shape=shape)
+ elif self.dist.lower() == 'ln':
+ scale = np.sqrt(np.log(1 + var/m1**2))
+ var_avg += m1 * np.exp(scale * (self.rv_normal(spawn=1) - scale/2))
+ else:
+ raise ValueError(f"Incorrect distribution: {self.dist}.")
- var_t, eta = self.var_step_ncx2_eta(self.sigma, texp)
- #m1, var = self.cond_intvar_mean_var_numeric(var_0, var_t, texp)
- m1, var = self.cond_intvar_mean_var(var_0, var_t, texp, eta=None)
-
- if self.dist == 0:
- # mu and lambda defined in https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution
- # RNG.wald takes the same parameters
- lam = m1**3 / var
- var_mean = self.rng_spawn[1].wald(mean=m1, scale=lam) / texp
- elif self.dist == 1:
- scale = var / m1
- shape = m1 / scale
- var_mean = scale * self.rng_spawn[1].standard_gamma(shape=shape) / texp
- elif self.dist == 2:
- scale = np.sqrt(np.log(1 + var/m1**2))
- var_mean = self.rv_normal(spawn=1)
- var_mean = m1 * np.exp(scale * (var_mean - scale/2)) / texp
- else:
- raise ValueError(f"Incorrect distribution: {self.dist}.")
+ var_avg /= n_dt
- return var_t, var_mean
+ return var_t, var_avg
class HestonMcChoiKwok2023(HestonMcGlassermanKim2011):
- dist = 0
+ dist = 'ig'
- def set_mc_params(self, n_path=10000, dt=None, rn_seed=None, scheme=3, KK=0, dist=0):
+ def set_mc_params(self, n_path=10000, dt=None, rn_seed=None, scheme=3, kk=0, dist=None):
"""
Set MC parameters
@@ -798,53 +838,48 @@ def set_mc_params(self, n_path=10000, dt=None, rn_seed=None, scheme=3, KK=0, dis
dt: time step
rn_seed: random number seed
scheme: simulation scheme for jumping from 0 to texp
- dist: distribution to use for approximation. 0 for inverse Gaussian (default), 1 for Gamma, 2 for LN
+ dist: distribution to use for approximation.
+ 'ig' for inverse Gaussian (default), 'ga' for Gamma, 'ln' for LN
"""
- super().set_mc_params(n_path, dt, rn_seed, scheme=scheme, KK=KK)
- self.dist = dist
+ super().set_mc_params(n_path, dt, rn_seed, scheme=scheme, kk=kk)
+ if dist is not None:
+ self.dist = dist
- def draw_x123(self, var_sum, dt, eta_sum):
+ def draw_x123(self, var_sum, dt, shape_sum):
"""
- Simulation of X1 + X2 + X3 using truncated Gamma expansion in Glasserman and Kim (2011)
+ Samples of (X1 + X2 + X3)/dt using truncated Gamma expansion improved in Choi & Kwok (2023)
Args:
- var_sum: initial + final variance. (n_paths,)
- eta_sum: Bessel RV
+ var_sum: sum of v_t at the observation times. (n_paths,)
+ shape_sum: sum of gamma shape parameters
dt: time step
Returns:
- X1 + X2 + X3 (n_paths,)
+ (X1 + X2 + X3)/dt (n_paths,)
"""
- vovn = self.vov**2 * dt
gamma_n, lambda_n = self.gamma_lambda(dt)
- Nn = self.rng_spawn[3].poisson(lam=var_sum * lambda_n[:, None])
- x123 = np.sum(self.rng_spawn[1].standard_gamma(shape=Nn + eta_sum * 2) / gamma_n[:, None], axis=0)
-
- # The approximated mean and variance of the truncated terms
- #rem_mean_x1 = 2 * dt / (np.pi**2 * self.KK) * var_sum
- #rem_var_x1 = 2 * self.vov**2 * dt**3 / (3 * np.pi**4 * self.KK**3) * var_sum
- #rem_mean_x23 = (self.vov * dt)**2 / (4 * np.pi**2 * self.KK) * ncx_df
- #rem_var_x23 = (self.vov * dt)**4 / (24 * np.pi**4 * self.KK**3) * ncx_df
-
- m1, var = self.x1star_mean_var(dt, self.KK)
- m1 *= var_sum * dt
- var *= var_sum * vovn * dt**2
-
- m1_x23, var_x23 = self.x2star_mean_var(dt, self.KK)
- m1 += m1_x23 * 4 * eta_sum * vovn * dt
- var += var_x23 * 4 * eta_sum * vovn**2 * dt**2
-
- if self.dist == 0:
- lam = m1**3 / var
- x123 += self.rng_spawn[1].wald(mean=m1, scale=lam)
- elif self.dist == 1:
- rem_scale = var / m1
- rem_shape = m1 / rem_scale
- x123 += rem_scale * self.rng_spawn[1].standard_gamma(rem_shape)
- elif self.dist == 2:
- scale = np.sqrt(np.log(1 + var / m1**2))
- x123 += m1 * np.exp(scale * (self.rv_normal(spawn=1) - scale / 2))
+ pois = self.rng_spawn[3].poisson(lam=var_sum * lambda_n[:, None])
+ x123 = np.sum(self.rng_spawn[1].standard_gamma(shape=pois + shape_sum) / gamma_n[:, None], axis=0)
+
+ trunc_mean, trunc_var = self.x1star_avgvar_mv(dt, self.kk)
+ trunc_mean *= var_sum
+ trunc_var *= var_sum
+
+ mean_x23, var_x23 = self.x2star_avgvar_mv(dt, self.kk)
+ trunc_mean += mean_x23 * shape_sum
+ trunc_var += var_x23 * shape_sum
+
+ if self.dist.lower() == 'ig':
+ lam = trunc_mean**3 / trunc_var
+ x123 += self.rng_spawn[1].wald(mean=trunc_mean, scale=lam)
+ elif self.dist.lower() == 'ga':
+ trunc_scale = trunc_var / trunc_mean
+ trunc_shape = trunc_mean / trunc_scale
+ x123 += trunc_scale * self.rng_spawn[1].standard_gamma(trunc_shape)
+ elif self.dist.lower() == 'ln':
+ scale = np.sqrt(np.log(1 + trunc_var / trunc_mean**2))
+ x123 += trunc_mean * np.exp(scale * (self.rv_normal(spawn=1) - scale / 2))
else:
raise ValueError(f"Incorrect distribution: {self.dist}.")
@@ -862,15 +897,16 @@ def cond_states(self, var_0, texp):
var_t = np.full(self.n_path, var_0)
var_sum = weight[0] * var_t
- eta_sum = np.zeros_like(var_t)
+ shape_sum = np.zeros_like(var_t)
for i in range(n_dt):
var_t, eta = self.var_step_ncx2_eta(var_t, dt[i])
var_sum += weight[i+1] * var_t
- eta_sum += eta
+ shape_sum += 2*eta
- eta_sum += self.chi_dim() / 4 * n_dt
- var_mean = self.draw_x123(var_sum, dt[0], eta_sum) / texp
+ shape_sum += 0.5 * self.chi_dim() * n_dt
+ # self.draw_x123 returns the average by dt. Need to convert to the average by texp
+ var_avg = self.draw_x123(var_sum, dt[0], shape_sum) / n_dt
- return var_t, var_mean
+ return var_t, var_avg
| Revert "Asp risk-parity-project-fully-version-1.0"
Reverts PyFE/PyFENG#107
| 2022-04-22T05:31:10 | 0.0 | [] | [] |
|||
ThePorgs/Exegol | ThePorgs__Exegol-223 | b7a3609f6e0d39563cfa5f48f16f72941fb75811 | diff --git a/exegol-resources b/exegol-resources
index 833f0359..314c14d8 160000
--- a/exegol-resources
+++ b/exegol-resources
@@ -1,1 +1,1 @@
-Subproject commit 833f035933eec193fc8a32cc31c44eee80564a6b
+Subproject commit 314c14d8e275f6d9111b7f434b3f846444fdbf60
diff --git a/exegol/config/ConstantConfig.py b/exegol/config/ConstantConfig.py
index 5bd0e4c0..c4519dde 100644
--- a/exegol/config/ConstantConfig.py
+++ b/exegol/config/ConstantConfig.py
@@ -5,7 +5,7 @@
class ConstantConfig:
"""Constant parameters information"""
# Exegol Version
- version: str = "4.3.4"
+ version: str = "4.3.5"
# Exegol documentation link
documentation: str = "https://exegol.rtfd.io/"
diff --git a/exegol/console/TUI.py b/exegol/console/TUI.py
index 893d4cbe..0364bc73 100644
--- a/exegol/console/TUI.py
+++ b/exegol/console/TUI.py
@@ -7,6 +7,7 @@
from rich.prompt import Prompt
from rich.table import Table
+from exegol.config.EnvInfo import EnvInfo
from exegol.console import ConsoleFormat
from exegol.console.ConsoleFormat import boolFormatter, getColor, richLen
from exegol.console.ExegolProgress import ExegolProgress
@@ -17,7 +18,6 @@
from exegol.model.ExegolContainerTemplate import ExegolContainerTemplate
from exegol.model.ExegolImage import ExegolImage
from exegol.model.SelectableInterface import SelectableInterface
-from exegol.config.EnvInfo import EnvInfo
from exegol.utils.ExeLog import logger, console, ExeLog
@@ -437,7 +437,10 @@ def __buildContainerRecapTable(container: ExegolContainerTemplate):
recap.title = "[not italic]:white_medium_star: [/not italic][gold3][g]Container summary[/g][/gold3]"
# Header
recap.add_column(f"[bold blue]Name[/bold blue]{os.linesep}[bold blue]Image[/bold blue]", justify="right")
- container_info_header = f"{container.getDisplayName()}{os.linesep}{container.image.getName()}"
+ container_status = container.getTextStatus()
+
+ container_info_header = (f"{container.getDisplayName()} {'(' + container_status + ')' if container_status else ''}{os.linesep}"
+ f"{container.image.getName()}")
if "N/A" not in container.image.getImageVersion():
container_info_header += f" - v.{container.image.getImageVersion()}"
if "Unknown" not in container.image.getStatus():
diff --git a/exegol/console/cli/actions/ExegolParameters.py b/exegol/console/cli/actions/ExegolParameters.py
index cbe0addb..2c8e10bb 100644
--- a/exegol/console/cli/actions/ExegolParameters.py
+++ b/exegol/console/cli/actions/ExegolParameters.py
@@ -289,4 +289,4 @@ class Version(Command):
"""Print current Exegol version"""
def __call__(self, *args, **kwargs):
- return ExegolManager.print_version
+ return lambda: None
diff --git a/exegol/manager/ExegolController.py b/exegol/manager/ExegolController.py
index ad36116a..3c90ca4f 100644
--- a/exegol/manager/ExegolController.py
+++ b/exegol/manager/ExegolController.py
@@ -1,3 +1,5 @@
+import logging
+
try:
import docker
import requests
@@ -69,8 +71,16 @@ def main():
logger.info("Exiting")
except git.exc.GitCommandError as git_error:
print_exception_banner()
+ # Printing git stderr as raw to avoid any Rich parsing error
+ logger.debug("Full git output:")
+ logger.raw(git_error, level=logging.DEBUG)
+ logger.empty_line()
error = git_error.stderr.strip().split(": ")[-1].strip("'")
- logger.critical(f"A critical error occurred while running this git command: {' '.join(git_error.command)} => {error}")
+ logger.error("Git error received:")
+ # Printing git error as raw to avoid any Rich parsing error
+ logger.raw(error, level=logging.ERROR)
+ logger.empty_line()
+ logger.critical(f"A critical error occurred while running this git command: {' '.join(git_error.command)}")
except Exception:
print_exception_banner()
console.print_exception(show_locals=True, suppress=[docker, requests, git])
diff --git a/exegol/manager/ExegolManager.py b/exegol/manager/ExegolManager.py
index ab93d90f..627f8cc9 100644
--- a/exegol/manager/ExegolManager.py
+++ b/exegol/manager/ExegolManager.py
@@ -362,7 +362,13 @@ def __loadOrCreateContainer(cls,
# Return cache
return cls.__container
container_tag: Optional[str] = override_container if override_container is not None else ParametersManager().containertag
- container_tags: Optional[Sequence[str]] = ParametersManager().multicontainertag
+ container_tags: Optional[List[str]] = None
+ if ParametersManager().multicontainertag:
+ container_tags = []
+ for tag in ParametersManager().multicontainertag:
+ # Prevent duplicate tag selection
+ if tag not in container_tags:
+ container_tags.append(tag)
try:
if container_tag is None and (container_tags is None or len(container_tags) == 0):
# Interactive container selection
@@ -438,51 +444,55 @@ def __interactiveSelection(cls,
@classmethod
def __prepareContainerConfig(cls):
"""Create Exegol configuration with user input"""
- # Create default exegol config
- config = ContainerConfig()
- # Container configuration from user CLI options
- if ParametersManager().X11:
- config.enableGUI()
- if ParametersManager().share_timezone:
- config.enableSharedTimezone()
- config.setNetworkMode(ParametersManager().host_network)
- if ParametersManager().ports is not None:
- for port in ParametersManager().ports:
- config.addRawPort(port)
- if ParametersManager().my_resources:
- config.enableMyResources()
- if ParametersManager().exegol_resources:
- config.enableExegolResources()
- if ParametersManager().log:
- config.enableShellLogging(ParametersManager().log_method,
- UserConfig().shell_logging_compress ^ ParametersManager().log_compress)
- if ParametersManager().workspace_path:
- if ParametersManager().mount_current_dir:
- logger.warning(f'Workspace conflict detected (-cwd cannot be use with -w). Using: {ParametersManager().workspace_path}')
- config.setWorkspaceShare(ParametersManager().workspace_path)
- elif ParametersManager().mount_current_dir:
- config.enableCwdShare()
- if ParametersManager().privileged:
- config.setPrivileged()
- elif ParametersManager().capabilities is not None:
- for cap in ParametersManager().capabilities:
- config.addCapability(cap)
- if ParametersManager().volumes is not None:
- for volume in ParametersManager().volumes:
- config.addRawVolume(volume)
- if ParametersManager().devices is not None:
- for device in ParametersManager().devices:
- config.addUserDevice(device)
- if ParametersManager().vpn is not None:
- config.enableVPN()
- if ParametersManager().envs is not None:
- for env in ParametersManager().envs:
- config.addRawEnv(env)
- if UserConfig().desktop_default_enable ^ ParametersManager().desktop:
- config.enableDesktop(ParametersManager().desktop_config)
- if ParametersManager().comment:
- config.addComment(ParametersManager().comment)
- return config
+ try:
+ # Create default exegol config
+ config = ContainerConfig()
+ # Container configuration from user CLI options
+ if ParametersManager().X11:
+ config.enableGUI()
+ if ParametersManager().share_timezone:
+ config.enableSharedTimezone()
+ config.setNetworkMode(ParametersManager().host_network)
+ if ParametersManager().ports is not None:
+ for port in ParametersManager().ports:
+ config.addRawPort(port)
+ if ParametersManager().my_resources:
+ config.enableMyResources()
+ if ParametersManager().exegol_resources:
+ config.enableExegolResources()
+ if ParametersManager().log:
+ config.enableShellLogging(ParametersManager().log_method,
+ UserConfig().shell_logging_compress ^ ParametersManager().log_compress)
+ if ParametersManager().workspace_path:
+ if ParametersManager().mount_current_dir:
+ logger.warning(f'Workspace conflict detected (-cwd cannot be use with -w). Using: {ParametersManager().workspace_path}')
+ config.setWorkspaceShare(ParametersManager().workspace_path)
+ elif ParametersManager().mount_current_dir:
+ config.enableCwdShare()
+ if ParametersManager().privileged:
+ config.setPrivileged()
+ elif ParametersManager().capabilities is not None:
+ for cap in ParametersManager().capabilities:
+ config.addCapability(cap)
+ if ParametersManager().volumes is not None:
+ for volume in ParametersManager().volumes:
+ config.addRawVolume(volume)
+ if ParametersManager().devices is not None:
+ for device in ParametersManager().devices:
+ config.addUserDevice(device)
+ if ParametersManager().vpn is not None:
+ config.enableVPN()
+ if ParametersManager().envs is not None:
+ for env in ParametersManager().envs:
+ config.addRawEnv(env)
+ if UserConfig().desktop_default_enable ^ ParametersManager().desktop:
+ config.enableDesktop(ParametersManager().desktop_config)
+ if ParametersManager().comment:
+ config.addComment(ParametersManager().comment)
+ return config
+ except CancelOperation as e:
+ logger.critical(f"Unable to create a new container: {e}")
+ raise e
@classmethod
def __createContainer(cls, name: Optional[str]) -> ExegolContainer:
diff --git a/exegol/model/ContainerConfig.py b/exegol/model/ContainerConfig.py
index 9a2c672e..c34ba06b 100644
--- a/exegol/model/ContainerConfig.py
+++ b/exegol/model/ContainerConfig.py
@@ -24,8 +24,12 @@
from exegol.model.ExegolModules import ExegolModules
from exegol.utils import FsUtils
from exegol.utils.ExeLog import logger, ExeLog
+from exegol.utils.FsUtils import check_sysctl_value
from exegol.utils.GuiUtils import GuiUtils
+if EnvInfo.is_windows_shell or EnvInfo.is_mac_shell:
+ from tzlocal import get_localzone_name
+
class ContainerConfig:
"""Configuration class of an exegol container"""
@@ -38,6 +42,12 @@ class ContainerConfig:
__static_gui_envs = {"_JAVA_AWT_WM_NONREPARENTING": "1", "QT_X11_NO_MITSHM": "1"}
__default_desktop_port = {"http": 6080, "vnc": 5900}
+ # Verbose only filters
+ __verbose_only_envs = ["DISPLAY", "WAYLAND_DISPLAY", "XDG_SESSION_TYPE", "XDG_RUNTIME_DIR", "PATH", "TZ", "_JAVA_OPTIONS"]
+ __verbose_only_mounts = ['/tmp/.X11-unix', '/opt/resources', '/etc/localtime',
+ '/etc/timezone', '/my-resources', '/opt/my-resources',
+ '/.exegol/entrypoint.sh', '/.exegol/spawn.sh', '/tmp/wayland-0', '/tmp/wayland-1']
+
# Whitelist device for Docker Desktop
__whitelist_dd_devices = ["/dev/net/tun"]
@@ -126,6 +136,9 @@ def __parseContainerConfig(self, container: Container):
"""Parse Docker object to setup self configuration"""
# Reset default attributes
self.__passwd = None
+ self.__share_timezone = False
+ self.__my_resources = False
+ self.__enable_gui = False
# Container Config section
container_config = container.attrs.get("Config", {})
self.tty = container_config.get("Tty", True)
@@ -133,14 +146,6 @@ def __parseContainerConfig(self, container: Container):
self.__parseLabels(container_config.get("Labels", {}))
self.interactive = container_config.get("OpenStdin", True)
self.legacy_entrypoint = container_config.get("Entrypoint") is None
- self.__enable_gui = False
- envs_key = self.__envs.keys()
- if "DISPLAY" in envs_key:
- self.__enable_gui = True
- self.__gui_engine.append("X11")
- if "WAYLAND_DISPLAY" in envs_key:
- self.__enable_gui = True
- self.__gui_engine.append("Wayland")
# Host Config section
host_config = container.attrs.get("HostConfig", {})
@@ -158,8 +163,6 @@ def __parseContainerConfig(self, container: Container):
logger.debug(f"└── Load devices : {self.__devices}")
# Volumes section
- self.__share_timezone = False
- self.__my_resources = False
self.__parseMounts(container.attrs.get("Mounts", []), container.name.replace('exegol-', ''))
# Network section
@@ -173,6 +176,15 @@ def __parseEnvs(self, envs: List[str]):
logger.debug(f"└── Parsing envs : {env}")
# Removing " and ' at the beginning and the end of the string before splitting key / value
self.addRawEnv(env.strip("'").strip('"'))
+ envs_key = self.__envs.keys()
+ if "DISPLAY" in envs_key:
+ self.__enable_gui = True
+ self.__gui_engine.append("X11")
+ if "WAYLAND_DISPLAY" in envs_key:
+ self.__enable_gui = True
+ self.__gui_engine.append("Wayland")
+ if "TZ" in envs_key:
+ self.__share_timezone = True
def __parseLabels(self, labels: Dict[str, str]):
"""Parse envs object syntax"""
@@ -401,6 +413,11 @@ def enableGUI(self):
# TODO support pulseaudio
for k, v in self.__static_gui_envs.items():
self.addEnv(k, v)
+
+ # Fix XQuartz render: https://github.com/ThePorgs/Exegol/issues/229
+ if EnvInfo.isMacHost():
+ self.addEnv("_JAVA_OPTIONS", '-Dsun.java2d.xrender=false')
+
self.__enable_gui = True
def __disableGUI(self):
@@ -419,37 +436,37 @@ def __disableGUI(self):
def enableSharedTimezone(self):
"""Procedure to enable shared timezone feature"""
- if EnvInfo.is_windows_shell:
- logger.warning("Timezone sharing is not supported from a Windows shell. Skipping.")
- return
- elif EnvInfo.isMacHost():
- # On Orbstack /etc cannot be shared + we should test how Orbstack handle symlink
- # With docker desktop, symlink are resolved as full path on container creation. When tzdata is updated on the host, the container can no longer be started because the files of the previous package version are missing.
- # TODO Test if env var can be used as replacement
- logger.warning("Timezone sharing on Mac is not supported (for stability reasons). Skipping.")
- return
if not self.__share_timezone:
logger.verbose("Config: Enabling host timezones")
- # Try to share /etc/timezone (deprecated old timezone file)
- try:
- self.addVolume("/etc/timezone", "/etc/timezone", read_only=True, must_exist=True)
- logger.verbose("Volume was successfully added for [magenta]/etc/timezone[/magenta]")
- timezone_loaded = True
- except CancelOperation:
- logger.verbose("File /etc/timezone is missing on host, cannot create volume for this.")
- timezone_loaded = False
- # Try to share /etc/localtime (new timezone file)
- try:
- self.addVolume("/etc/localtime", "/etc/localtime", read_only=True, must_exist=True)
- logger.verbose("Volume was successfully added for [magenta]/etc/localtime[/magenta]")
- except CancelOperation as e:
- if not timezone_loaded:
- # If neither file was found, disable the functionality
- logger.error(f"The host's timezone could not be shared: {e}")
- return
+ if EnvInfo.is_windows_shell or EnvInfo.is_mac_shell:
+ current_tz = get_localzone_name()
+ if current_tz:
+ logger.debug(f"Sharing timezone via TZ env var: '{current_tz}'")
+ self.addEnv("TZ", current_tz)
else:
- logger.warning("File [magenta]/etc/localtime[/magenta] is [orange3]missing[/orange3] on host, "
- "cannot create volume for this. Relying instead on [magenta]/etc/timezone[/magenta] [orange3](deprecated)[/orange3].")
+ logger.warning("Your system timezone cannot be shared.")
+ return
+ else:
+ # Try to share /etc/timezone (deprecated old timezone file)
+ try:
+ self.addVolume("/etc/timezone", "/etc/timezone", read_only=True, must_exist=True)
+ logger.verbose("Volume was successfully added for [magenta]/etc/timezone[/magenta]")
+ timezone_loaded = True
+ except CancelOperation:
+ logger.verbose("File /etc/timezone is missing on host, cannot create volume for this.")
+ timezone_loaded = False
+ # Try to share /etc/localtime (new timezone file)
+ try:
+ self.addVolume("/etc/localtime", "/etc/localtime", read_only=True, must_exist=True)
+ logger.verbose("Volume was successfully added for [magenta]/etc/localtime[/magenta]")
+ except CancelOperation as e:
+ if not timezone_loaded:
+ # If neither file was found, disable the functionality
+ logger.error(f"The host's timezone could not be shared: {e}")
+ return
+ else:
+ logger.warning("File [magenta]/etc/localtime[/magenta] is [orange3]missing[/orange3] on host, "
+ "cannot create volume for this. Relying instead on [magenta]/etc/timezone[/magenta] [orange3](deprecated)[/orange3].")
self.__share_timezone = True
def __disableSharedTimezone(self):
@@ -627,9 +644,8 @@ def enableVPN(self, config_path: Optional[str] = None):
skip_sysctl = False
if self.__network_host and EnvInfo.is_linux_shell:
# Check if IPv6 have been disabled on the host with sysctl
- with open('/proc/sys/net/ipv6/conf/all/disable_ipv6', 'r') as conf:
- if int(conf.read()) == 0:
- skip_sysctl = True
+ if check_sysctl_value("net.ipv6.conf.all.disable_ipv6", "0"):
+ skip_sysctl = True
if not skip_sysctl:
self.__addSysctl("net.ipv6.conf.all.disable_ipv6", "0")
# Add tun device, this device is needed to create VPN tunnels
@@ -872,17 +888,18 @@ def __removeCapability(self, cap_string: str):
# When the capability is not present
return False
- def __addSysctl(self, sysctl_key: str, config: str):
+ def __addSysctl(self, sysctl_key: str, config: Union[str, int]):
"""Add a linux sysctl to the container"""
if sysctl_key in self.__sysctls.keys():
logger.warning(f"Sysctl {sysctl_key} already setup to '{self.__sysctls[sysctl_key]}'. Skipping.")
return
- if self.__network_host:
+ # Docs of supported sysctl by linux / docker: https://docs.docker.com/reference/cli/docker/container/run/#currently-supported-sysctls
+ if self.__network_host and sysctl_key.startswith('net.'):
logger.warning(f"The sysctl container configuration is [red]not[/red] supported by docker in [blue]host[/blue] network mode.")
logger.warning(f"Skipping the sysctl config: [magenta]{sysctl_key}[/magenta] = [orange3]{config}[/orange3].")
logger.warning(f"If this configuration is mandatory in your situation, try to change it in sudo mode on your host.")
return
- self.__sysctls[sysctl_key] = config
+ self.__sysctls[sysctl_key] = str(config)
def __removeSysctl(self, sysctl_key: str):
"""Remove a linux capability from the container's config"""
@@ -992,28 +1009,25 @@ def addVolume(self,
# Docker Desktop for Windows based on WSL2 don't have filesystem limitation
if EnvInfo.isMacHost():
# Add support for /etc
- # TODO check if path_match + replace really useful , path_match rever used
- path_match = host_path
- if path_match.startswith("/opt/") and EnvInfo.isOrbstack():
- msg = f"{EnvInfo.getDockerEngine().value} cannot mount directory from [magenta]/opt/[/magenta] host path."
- if path_match.endswith("entrypoint.sh") or path_match.endswith("spawn.sh"):
+ if host_path.startswith("/opt/") and EnvInfo.isOrbstack():
+ msg = f"{EnvInfo.getDockerEngine().value} cannot mount directory from /opt/ host path."
+ if host_path.endswith("entrypoint.sh") or host_path.endswith("spawn.sh"):
msg += " Your exegol installation cannot be stored under this directory."
logger.critical(msg)
+ else:
+ msg += f" The volume {host_path} cannot be mounted to the container, please move it outside of this directory."
raise CancelOperation(msg)
- if path_match.startswith("/etc/"):
- if EnvInfo.isOrbstack():
- raise CancelOperation(f"{EnvInfo.getDockerEngine().value} doesn't support sharing [magenta]/etc[/magenta] files with the container")
- path_match = path_match.replace("/etc/", "/private/etc/")
if EnvInfo.isDockerDesktop():
match = False
# Find a match
for resource in EnvInfo.getDockerDesktopResources():
- if path_match.startswith(resource):
+ if host_path.startswith(resource):
match = True
break
if not match:
logger.error(f"Bind volume from {host_path} is not possible, Docker Desktop configuration is [red]incorrect[/red].")
- logger.critical(f"You need to modify the [green]Docker Desktop[/green] config and [green]add[/green] this path (or the root directory) in [magenta]Docker Desktop > Preferences > Resources > File Sharing[/magenta] configuration.")
+ logger.critical(f"You need to modify the [green]Docker Desktop[/green] config and [green]add[/green] this path (or the root directory) in "
+ f"[magenta]Docker Desktop > Preferences > Resources > File Sharing[/magenta] configuration.")
# Choose to update fs directory perms if available and depending on user choice
# if force_sticky_group is set, user choice is bypassed, fs will be updated.
execute_update_fs = force_sticky_group or (enable_sticky_group and (UserConfig().auto_update_workspace_fs ^ ParametersManager().update_fs_perms))
@@ -1268,9 +1282,13 @@ def addRawVolume(self, volume_string):
def addUserDevice(self, user_device_config: str):
"""Add a device from a user parameters"""
- if EnvInfo.isDockerDesktop() and user_device_config not in self.__whitelist_dd_devices:
- logger.warning("Docker desktop (Windows & macOS) does not support USB device passthrough.")
- logger.verbose("Official doc: https://docs.docker.com/desktop/faqs/#can-i-pass-through-a-usb-device-to-a-container")
+ if (EnvInfo.isDockerDesktop() or EnvInfo.isOrbstack()) and user_device_config not in self.__whitelist_dd_devices:
+ if EnvInfo.isDockerDesktop():
+ logger.warning("Docker desktop (Windows & macOS) does not support USB device passthrough.")
+ logger.verbose("Official doc: https://docs.docker.com/desktop/faqs/#can-i-pass-through-a-usb-device-to-a-container")
+ elif EnvInfo.isOrbstack():
+ logger.warning("Orbstack does not support (yet) USB device passthrough.")
+ logger.verbose("Official doc: https://docs.orbstack.dev/machines/#usb-devices")
logger.critical("Device configuration cannot be applied, aborting operation.")
self.__addDevice(user_device_config)
@@ -1381,12 +1399,9 @@ def getTextCreationDate(self) -> str:
def getTextMounts(self, verbose: bool = False) -> str:
"""Text formatter for Mounts configurations. The verbose mode does not exclude technical volumes."""
result = ''
- hidden_mounts = ['/tmp/.X11-unix', '/opt/resources', '/etc/localtime',
- '/etc/timezone', '/my-resources', '/opt/my-resources',
- '/.exegol/entrypoint.sh', '/.exegol/spawn.sh', '/tmp/wayland-0', '/tmp/wayland-1']
for mount in self.__mounts:
# Not showing technical mounts
- if not verbose and mount.get('Target') in hidden_mounts:
+ if not verbose and mount.get('Target') in self.__verbose_only_mounts:
continue
read_only_text = f"[bright_black](RO)[/bright_black] " if verbose else ''
read_write_text = f"[orange3](RW)[/orange3] " if verbose else ''
@@ -1412,7 +1427,7 @@ def getTextEnvs(self, verbose: bool = False) -> str:
result = ''
for k, v in self.__envs.items():
# Blacklist technical variables, only shown in verbose
- if not verbose and k in list(self.__static_gui_envs.keys()) + [v.value for v in self.ExegolEnv] + ["DISPLAY", "WAYLAND_DISPLAY", "XDG_SESSION_TYPE", "XDG_RUNTIME_DIR", "PATH"]:
+ if not verbose and k in list(self.__static_gui_envs.keys()) + [v.value for v in self.ExegolEnv] + self.__verbose_only_envs:
continue
result += f"{k}={v}{os.linesep}"
return result
diff --git a/exegol/model/ExegolContainer.py b/exegol/model/ExegolContainer.py
index c3238278..e49f4800 100644
--- a/exegol/model/ExegolContainer.py
+++ b/exegol/model/ExegolContainer.py
@@ -77,10 +77,10 @@ def getTextStatus(self) -> str:
if status == "unknown":
return "Unknown"
elif status == "exited":
- return "[red]Stopped"
+ return "[red]Stopped[/red]"
elif status == "running":
- return "[green]Running"
- return status
+ return "[green]Running[/green]"
+ return f"[orange3]{status}[/orange3]"
def isNew(self) -> bool:
"""Check if the container has just been created or not"""
@@ -341,9 +341,9 @@ def __applyXhostACL(self):
with console.status(f"Starting XQuartz...", spinner_style="blue"):
os.system(f"xhost + localhost > /dev/null")
else:
- logger.debug(f"Adding xhost ACL to local:{self.config.hostname}")
+ logger.debug(f"Adding xhost ACL to local:{self.config.getUsername()}")
# add linux local ACL
- os.system(f"xhost +local:{self.config.hostname} > /dev/null")
+ os.system(f"xhost +local:{self.config.getUsername()} > /dev/null")
def __updatePasswd(self):
"""
diff --git a/exegol/model/ExegolContainerTemplate.py b/exegol/model/ExegolContainerTemplate.py
index b0f3b678..2b4b9fed 100644
--- a/exegol/model/ExegolContainerTemplate.py
+++ b/exegol/model/ExegolContainerTemplate.py
@@ -46,3 +46,6 @@ def getDisplayName(self) -> str:
if self.container_name != self.config.hostname:
return f"{self.name} [bright_black]({self.config.hostname})[/bright_black]"
return self.name
+
+ def getTextStatus(self) -> str:
+ return ""
diff --git a/exegol/utils/FsUtils.py b/exegol/utils/FsUtils.py
index 2e1267c7..5d81f58e 100644
--- a/exegol/utils/FsUtils.py
+++ b/exegol/utils/FsUtils.py
@@ -90,3 +90,17 @@ def setGidPermission(root_folder: Path):
logger.raw(f"sudo chgrp -R $(id -g) {root_folder} && sudo find {root_folder} -type d -exec chmod g+rws {{}} \\;", level=logging.WARNING)
logger.empty_line()
logger.empty_line()
+
+
+def check_sysctl_value(sysctl: str, compare_to: str) -> bool:
+ sysctl_path = "/proc/sys/" + sysctl.replace('.', '/')
+ try:
+ with open(sysctl_path, 'r') as conf:
+ config = conf.read().strip()
+ logger.debug(f"Checking sysctl value {sysctl}={config} (compare to {compare_to})")
+ return conf.read().strip() == compare_to
+ except FileNotFoundError:
+ logger.debug(f"Sysctl file {sysctl} not found!")
+ except PermissionError:
+ logger.debug(f"Unable to read sysctl {sysctl} permission!")
+ return False
diff --git a/exegol/utils/GuiUtils.py b/exegol/utils/GuiUtils.py
index 607a912e..47ccc741 100644
--- a/exegol/utils/GuiUtils.py
+++ b/exegol/utils/GuiUtils.py
@@ -41,10 +41,9 @@ def isWaylandGuiAvailable(cls) -> bool:
:return: bool
"""
if EnvInfo.isWindowsHost():
- return False # TODO To Be defined (WSLg works fine for now)
- # elif EnvInfo.isMacHost():
- # return False
- # Linux or Mac, rely on var env settings
+ return False
+ elif EnvInfo.isMacHost():
+ return False
return EnvInfo.isWaylandAvailable()
@classmethod
@@ -211,16 +210,20 @@ def __windowsGuiChecks(cls) -> bool:
logger.debug("Testing WSLg availability")
# WSL + WSLg must be available on the Windows host for the GUI to work through X11 sharing
if not cls.__wsl_available():
+ if sys.platform != "win32" and os.getuid() == 0:
+ logger.critical("You are running exegol as [red]root[/red]! The root user cannot be used to run Exegol on a Windows environment.")
logger.error("WSL is [orange3]not available[/orange3] on your system. X11 sharing is not supported.")
return False
+ logger.debug("WSL is [green]available[/green] on the local system")
# Only WSL2 support WSLg
if EnvInfo.getDockerEngine() != EnvInfo.DockerEngine.WLS2:
+ logger.debug(f"Docker current engine: {EnvInfo.getDockerEngine().value}")
logger.error("Docker must be run with [orange3]WSL2[/orange3] engine in order to support X11 sharing (i.e. GUI apps).")
return False
- logger.debug("WSL is [green]available[/green] and docker is using WSL2")
+ logger.debug("Docker is using [green]WSL2[/green]")
# X11 socket can only be shared from a WSL (to find WSLg mount point)
if EnvInfo.current_platform != "WSL":
- logger.debug("Exegol is running from a Windows context (e.g. Powershell), a WSL instance must be found to share WSLg X11 socket")
+ logger.debug("Exegol is running from a Windows context (e.g. Powershell), a WSL instance must be found to share the WSLg X11 socket")
cls.__distro_name = cls.__find_wsl_distro()
logger.debug(f"Set WSL Distro as: '{cls.__distro_name}'")
# If no WSL is found, propose to continue without GUI (X11 sharing)
@@ -252,12 +255,16 @@ def __wsl_test(path, name: Optional[str] = "docker-desktop") -> bool:
if EnvInfo.isWindowsHost():
wsl = shutil.which("wsl.exe")
if not wsl:
+ logger.warning("wsl.exe seems to be unavailable on your system.")
return False
if name is None:
+ logger.debug(f"Running: wsl.exe test -f {path}")
ret = subprocess.run(["wsl.exe", "test", "-f", path])
else:
+ logger.debug(f"Running: wsl.exe test -d {name} -f {path}")
ret = subprocess.run(["wsl.exe", "-d", name, "test", "-f", path])
return ret.returncode == 0
+ logger.debug("Trying to run a WSL test without Windows?")
return False
@classmethod
@@ -282,11 +289,17 @@ def __wsl_available(cls) -> bool:
if EnvInfo.isWindowsHost():
wsl = shutil.which("wsl.exe")
if not wsl:
+ logger.debug("wsl.exe not found on the local system.")
return False
+ logger.debug("running: wsl.exe --status")
ret = subprocess.Popen(["wsl.exe", "--status"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ret.wait()
if ret.returncode == 0:
return True
+ else:
+ logger.debug(f"wsl.exe --status return code {ret.returncode}")
+ logger.debug(str(ret.stdout))
+ logger.debug(str(ret.stderr))
logger.debug("WSL status command failed.. Trying a fallback check method.")
return cls.__wsl_test("/etc/os-release", name=None) or cls.__wsl_test("/etc/os-release")
@@ -300,10 +313,12 @@ def __wslg_installed(cls) -> bool:
if (Path("/mnt/host/wslg/versions.txt").is_file() or
Path("/mnt/wslg/versions.txt").is_file()):
return True
+ logger.debug("Unable to find WSLg locally.. Check /mnt/wslg/ or /mnt/host/wslg/")
else:
if (cls.__wsl_test("/mnt/host/wslg/versions.txt", name=cls.__distro_name) or
cls.__wsl_test("/mnt/wslg/versions.txt", name=cls.__distro_name)):
return True
+ logger.debug(f"Unable to find WSLg.. Check /mnt/wslg/ or /mnt/host/wslg/ on {cls.__distro_name}")
logger.debug("WSLg check failed.. Trying a fallback check method.")
return cls.__wsl_test("/mnt/host/wslg/versions.txt") or cls.__wsl_test("/mnt/wslg/versions.txt", name=None)
@@ -318,14 +333,15 @@ def __wslg_eligible() -> bool:
return True
try:
os_version_raw, _, build_number_raw = EnvInfo.getWindowsRelease().split('.')[:3]
+ os_version = int(os_version_raw)
except ValueError:
logger.debug(f"Impossible to find the version of windows: '{EnvInfo.getWindowsRelease()}'")
logger.error("Exegol can't know if your [orange3]version of Windows[/orange3] can support dockerized GUIs (X11 sharing).")
return False
# Available for Windows 10 & 11
- os_version = int(os_version_raw)
if os_version >= 10:
return True
+ logger.debug(f"Current version of Windows doesn't support WSLg: {os_version_raw}.?.{build_number_raw}")
return False
@classmethod
@@ -333,6 +349,7 @@ def __find_wsl_distro(cls) -> str:
distro_name = ""
# these distros cannot be used to load WSLg socket
blacklisted_distro = ["docker-desktop", "docker-desktop-data"]
+ logger.debug("Running: C:\\Windows\\system32\\wsl.exe -l")
ret = subprocess.Popen(["C:\\Windows\\system32\\wsl.exe", "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Wait for WSL process to end
ret.wait()
@@ -387,6 +404,7 @@ def __find_wsl_distro(cls) -> str:
@classmethod
def __create_default_wsl(cls) -> bool:
logger.info("Creating Ubuntu WSL distribution. Please wait.")
+ logger.debug("Running: C:\\Windows\\system32\\wsl.exe --install -d Ubuntu")
ret = subprocess.Popen(["C:\\Windows\\system32\\wsl.exe", "--install", "-d", "Ubuntu"], stderr=subprocess.PIPE)
ret.wait()
logger.info("Please follow installation instructions on the new window.")
@@ -402,6 +420,7 @@ def __create_default_wsl(cls) -> bool:
docker_settings = EnvInfo.getDockerDesktopSettings()
if docker_settings is not None and docker_settings.get("enableIntegrationWithDefaultWslDistro", False):
logger.verbose("Set WSL Ubuntu as default to automatically enable docker integration")
+ logger.debug("Running: C:\\Windows\\system32\\wsl.exe -s Ubuntu")
# Set new WSL distribution as default to start it and enable docker integration
ret = subprocess.Popen(["C:\\Windows\\system32\\wsl.exe", "-s", "Ubuntu"], stderr=subprocess.PIPE)
ret.wait()
diff --git a/exegol/utils/WebUtils.py b/exegol/utils/WebUtils.py
index c3da213d..d6da2580 100644
--- a/exegol/utils/WebUtils.py
+++ b/exegol/utils/WebUtils.py
@@ -132,6 +132,9 @@ def __runRequest(cls, url: str, service_name: str, headers: Optional[Dict] = Non
https_proxy = os.environ.get('HTTPS_PROXY') or os.environ.get('https_proxy')
if https_proxy:
proxies['https'] = https_proxy
+ no_proxy = os.environ.get('NO_PROXY') or os.environ.get('no_proxy')
+ if no_proxy:
+ proxies['no_proxy'] = no_proxy
response = requests.request(method=method, url=url, timeout=(10, 20), verify=ParametersManager().verify, headers=headers, data=data, proxies=proxies if len(proxies) > 0 else None)
return response
except requests.exceptions.HTTPError as e:
diff --git a/exegol/utils/argParse.py b/exegol/utils/argParse.py
index ca3aa63a..31a5e900 100644
--- a/exegol/utils/argParse.py
+++ b/exegol/utils/argParse.py
@@ -1,8 +1,9 @@
import argparse
-import argcomplete
from logging import CRITICAL
from typing import IO, Optional, List, Union, Dict, cast
+import argcomplete
+
from exegol.console.cli.actions.Command import Command, Option
from exegol.utils.ExeLog import logger
@@ -19,8 +20,9 @@ def _print_message(self, message: str, file: Optional[IO[str]] = None) -> None:
class Parser:
"""Custom Exegol CLI Parser. Main controller of argument building and parsing."""
- __description = "This Python script is a wrapper for Exegol. It can be used to easily manage Exegol on " \
- "your machine."
+ __description = """This Python script is a wrapper for Exegol. It can be used to easily manage Exegol on your machine.
+
+[bold magenta]Exegol documentation:[/bold magenta] [underline magenta]https://exegol.rtfd.io[/underline magenta]"""
__formatter_class = argparse.RawTextHelpFormatter
def __init__(self, actions: List[Command]):
@@ -53,8 +55,11 @@ def __set_action_parser(self) -> None:
# Each action has a dedicated sub-parser with different options
# the 'help' description of the current action is retrieved
# from the comment of the corresponding action class
+ if action.__doc__ is None:
+ action.__doc__ = "Unknown action"
sub_parser = self.subParser.add_parser(action.name, help=action.__doc__,
- description=action.__doc__,
+ description=action.__doc__ + f"""\n
+[bold magenta]Exegol documentation:[/bold magenta] [underline magenta]https://exegol.rtfd.io/en/latest/exegol-wrapper/{action.name}.html[/underline magenta]""",
epilog=action.formatEpilog(),
formatter_class=self.__formatter_class)
sub_parser.set_defaults(action=action)
diff --git a/exegol/utils/imgsync/entrypoint.sh b/exegol/utils/imgsync/entrypoint.sh
index d5a2262b..cc700c38 100755
--- a/exegol/utils/imgsync/entrypoint.sh
+++ b/exegol/utils/imgsync/entrypoint.sh
@@ -93,6 +93,7 @@ function desktop() {
if command -v desktop-start &> /dev/null
then
echo "Starting Exegol [green]desktop[/green] with [blue]${EXEGOL_DESKTOP_PROTO}[/blue]"
+ ln -sf /root/.vnc /var/log/exegol/desktop
desktop-start &>> ~/.vnc/startup.log # Disable logging
sleep 2 # Waiting 2 seconds for the Desktop to start before continuing
else
diff --git a/requirements.txt b/requirements.txt
index b18d4f68..af0af7db 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,7 +1,7 @@
-docker~=7.0.0
-# Request holdback: temp fix for https://github.com/docker/docker-py/issues/3256
-requests~=2.31.0
+docker~=7.1.0
+requests~=2.32.3
rich~=13.7.1
GitPython~=3.1.43
-PyYAML>=6.0.1
-argcomplete~=3.3.0
\ No newline at end of file
+PyYAML>=6.0.2
+argcomplete~=3.5.0
+tzlocal~=5.2
diff --git a/setup.py b/setup.py
index 07dec25f..89d4c500 100644
--- a/setup.py
+++ b/setup.py
@@ -31,6 +31,9 @@
for k, v in data_files_dict.items():
data_files.append((k, v))
+with open("requirements.txt", "r", encoding="utf-8") as f:
+ requirements = [x.strip() for x in f.readlines()]
+
setup(
name='Exegol',
version=__version__,
@@ -54,14 +57,7 @@
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: OS Independent",
],
- install_requires=[
- 'docker~=7.0.0',
- 'requests~=2.31.0',
- 'rich~=13.7.1',
- 'PyYAML',
- 'GitPython~=3.1.43',
- 'argcomplete~=3.3.0'
- ],
+ install_requires=requirements,
packages=find_packages(exclude=["tests"]),
include_package_data=True,
data_files=data_files,
| Mount /etc/localtime on macOS with Orbstack
# Description
This PR is to delete line that block Orbstack to mount /etc/localtime. Orbstack on macOS can mount this folder proprely now. I have tested several time (no option, VPN and Desktop) and no issue, the time is sync with the host !
# Related issues
No related issue
# Point of attention
Hope that my modification is correct :)
| 2024-06-23T17:54:25 | 0.0 | [] | [] |
|||
wtclarke/spec2nii | wtclarke__spec2nii-121 | 2b90b292d2431e4bc8357071529b42d7d5107478 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index b6442ec..f8904a7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,9 +1,10 @@
This document contains the Spec2nii release history in reverse chronological order.
-0.7.2 (WIP)
----------------------------------
+0.7.2 (Thursday 7th December 2023)
+----------------------------------
- SpectralWidth now added to header extension automatically to match bids specification.
- NIfTI-MRS V0.8 now generated.
+- Better handling of philips spar/sdat tags and singleton dimensions.
0.7.1 (Tuesday 7th November 2023)
---------------------------------
diff --git a/spec2nii/Philips/philips.py b/spec2nii/Philips/philips.py
index e04fce0..fe1be0b 100644
--- a/spec2nii/Philips/philips.py
+++ b/spec2nii/Philips/philips.py
@@ -12,6 +12,8 @@
from spec2nii.nifti_orientation import NIFTIOrient, calc_affine
from spec2nii import __version__ as spec2nii_ver
+default_tag_order = ['DIM_DYN', 'DIM_EDIT', 'DIM_USER_0']
+
def read_sdat_spar_pair(sdat_file, spar_file, shape=None, tags=None, fileout=None, special=None):
"""Read and convert SPAR/SDAT pairs from Philips scanners
@@ -55,9 +57,19 @@ def read_sdat_spar_pair(sdat_file, spar_file, shape=None, tags=None, fileout=Non
meta = spar_to_nmrs_hdrext(spar_params)
meta.set_standard_def('OriginalFile', [sdat_file.name])
- for idx, tag in enumerate(tags):
+ # Sort dimension tags
+ # First ensure user defined tags has length of 3.
+ while len(tags) < 3:
+ tags.append(None)
+
+ # Set user defined tags, or the default if the dimension exists
+ # and is larger than 1
+ for idx, (tag, default) in enumerate(zip(tags, default_tag_order)):
+ npdim = idx + 4
if tag is not None:
meta.set_dim_info(idx, tag)
+ elif data.ndim > npdim and data.shape[npdim] > 1:
+ meta.set_dim_info(idx, default)
# Orientation
if spar_params["volume_selection_enable"] == "yes":
diff --git a/spec2nii/spec2nii.py b/spec2nii/spec2nii.py
index 9ea7897..86f78d9 100644
--- a/spec2nii/spec2nii.py
+++ b/spec2nii/spec2nii.py
@@ -108,8 +108,10 @@ def add_common_parameters(subparser):
"-t", "--tags",
type=str,
nargs='+',
- default=["DIM_DYN", None, None],
- help="Specify NIfTI MRS tags used for higher (5th-7th) dimensions.")
+ default=[None, None, None],
+ help="Specify NIfTI MRS tags used for higher (5th-7th) dimensions. "
+ "Defaults to DIM_DYN if more than one spectrum is present. "
+ "Can be used to create singleton higher dimensions.")
parser_philips.add_argument(
"-s", "--shape",
type=int,
| Singleton dimension generated for single transient SPAR/SDAT
See https://forum.mrshub.org/t/numpy-axis-error-fsl-mrs/1569/4
| 2023-12-07T09:46:20 | 0.0 | [] | [] |
|||
wtclarke/spec2nii | wtclarke__spec2nii-120 | 5d166e35661c1b648d11b6bbc661d45c525157a0 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 62e8984..b6442ec 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,10 @@
This document contains the Spec2nii release history in reverse chronological order.
+0.7.2 (WIP)
+---------------------------------
+- SpectralWidth now added to header extension automatically to match bids specification.
+- NIfTI-MRS V0.8 now generated.
+
0.7.1 (Tuesday 7th November 2023)
---------------------------------
- The --anon flag can be passed with any call to anonymise after writing files.
diff --git a/requirements.yml b/requirements.yml
index 09d716f..4e0f419 100644
--- a/requirements.yml
+++ b/requirements.yml
@@ -6,4 +6,4 @@ dependencies:
- scipy
- brukerapi>=0.1.8
- pandas
- - nifti-mrs>=1.0.2
+ - nifti-mrs>=1.1.1
diff --git a/spec2nii/spec2nii.py b/spec2nii/spec2nii.py
index 56742e0..9ea7897 100644
--- a/spec2nii/spec2nii.py
+++ b/spec2nii/spec2nii.py
@@ -13,6 +13,7 @@
import json
from nibabel.nifti2 import Nifti2Image
from spec2nii import __version__ as spec2nii_ver
+from numpy import isclose
# There are case specific imports below
@@ -291,6 +292,8 @@ def add_common_parameters(subparser):
if self.imageOut:
self.implement_overrides(args)
+ self.insert_spectralwidth()
+
if args.anon:
from spec2nii.anonymise import anon_nifti_mrs
for idx, nifti_mrs_img in enumerate(self.imageOut):
@@ -325,6 +328,19 @@ def implement_overrides(self, args):
nifti_mrs_img.header.extensions.clear()
nifti_mrs_img.header.extensions.append(new_ext)
+ def insert_spectralwidth(self):
+ """Ensure that the correct spectral width is inserted into the header extension"""
+ for nifti_mrs_img in self.imageOut:
+ if 'SpectralWidth' in nifti_mrs_img.hdr_ext\
+ and not isclose(
+ nifti_mrs_img.hdr_ext['SpectralWidth'],
+ 1 / nifti_mrs_img.dwelltime,
+ atol=1E-2):
+ nifti_mrs_img.remove_hdr_field('SpectralWidth')
+ nifti_mrs_img.add_hdr_field('SpectralWidth', 1 / nifti_mrs_img.dwelltime)
+ else:
+ nifti_mrs_img.add_hdr_field('SpectralWidth', 1 / nifti_mrs_img.dwelltime)
+
def validate_output(self):
"""Run NIfTI MRS validation on output."""
import nifti_mrs.validator as validate
| "spectral width" field not stored in nii header extension
Hi Will,
One more thing: my Philips SPAR/SDAT data have the dwell time correctly stored in the 5-th pixdim element, however, the "spectral width" metadata field is not written out explicitly in the header extension or the json sidecar, although it is a mandatory metadata field according to the BEP (https://bids-specification.readthedocs.io/en/bep022/modality-specific-files/magnetic-resonance-spectroscopy.html#mrs-specific-fields).
Thanks :)
Georg
| Ugh, this is a conflict between the BIDs and the NIfTI-MRS standard. `SpectralWidth` isn't a defined parameter in the NIfTI-MRS standard because I always argued that we should just use the pixdim field. I guess I'll just align with BIDs and make it so, but automatically populate it from the pixdim value.
Yeah, I think that's the best solution. Thanks! | 2023-12-06T21:20:08 | 0.0 | [] | [] |
||
wtclarke/spec2nii | wtclarke__spec2nii-115 | ba2010813d295c2892fc8724846a7b25709be28c | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8e75b23..84797b7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,7 @@ This document contains the Spec2nii release history in reverse chronological ord
--------------------------------
- The --anon flag can be passed with any call to anonymise after writing files.
- The Siemens enhanced dicom filetype pathway now handles CSI data.
+- Fixed issue with RDA files having latin1 encoding. Thanks to gaunab on github. Fixes Issue #96.
0.7.0 (Saturday 5th August 2023)
--------------------------------
diff --git a/spec2nii/Siemens/rda.py b/spec2nii/Siemens/rda.py
index a3cc25e..294948f 100644
--- a/spec2nii/Siemens/rda.py
+++ b/spec2nii/Siemens/rda.py
@@ -44,18 +44,33 @@ def convert_rda(rda_path, fname_out, verbose):
with open(rda_path, 'rb') as fp:
for line in fp:
- if hdr_st.search(line.decode()):
- pass
- # print('header found')
- elif hdr_end.search(line.decode()):
- # print('header end')
- break
- else:
- match = hdr_val.search(line.decode())
- if len(match.groups()) < 2:
- hdr[match[1]] = None
+ try:
+ if hdr_st.search(line.decode()):
+ pass
+ # print('header found')
+ elif hdr_end.search(line.decode()):
+ # print('header end')
+ break
+ else:
+ match = hdr_val.search(line.decode())
+ if len(match.groups()) < 2:
+ hdr[match[1]] = None
+ else:
+ hdr[match[1]] = match[2]
+ except UnicodeDecodeError:
+ print('Trying latin-1 encoding.')
+ if hdr_st.search(line.decode('latin-1')):
+ pass
+ # print('header found')
+ elif hdr_end.search(line.decode('latin-1')):
+ # print('header end')
+ break
else:
- hdr[match[1]] = match[2]
+ match = hdr_val.search(line.decode('latin-1'))
+ if len(match.groups()) < 2:
+ hdr[match[1]] = None
+ else:
+ hdr[match[1]] = match[2]
if verbose:
print(hdr)
| converting RDA results in UnicodeDecodeError
When I try to convert a Siemens RDA-File I get the following Error:
`UnicodeDecodeError: 'utf-8' codec can't decode byte 0xf6 in position 27: invalid start byte`
The Error is raised by:
`spec2nii/Siemens/rda.py", line 47, in convert_rda`
| 2023-10-12T09:59:01 | 0.0 | [] | [] |
|||
wtclarke/spec2nii | wtclarke__spec2nii-11 | f259a0d2d0d29a7a06cff71fd5f99db71e9187b3 | diff --git a/spec2nii/twixfunctions.py b/spec2nii/twixfunctions.py
index e2b16f6..325e156 100644
--- a/spec2nii/twixfunctions.py
+++ b/spec2nii/twixfunctions.py
@@ -41,8 +41,14 @@ def process_twix(twixObj, base_name_out, name_in, dataKey, dim_overides, quiet=F
n_voxels = twixObj.hdr.Meas.lFinalMatrixSizeSlice \
* twixObj.hdr.Meas.lFinalMatrixSizePhase \
* twixObj.hdr.Meas.lFinalMatrixSizeRead
- else:
+ elif twixObj.hdr.Meas.lFinalMatrixSizeSlice:
n_voxels = twixObj.hdr.Meas.lFinalMatrixSizeSlice
+ else:
+ # If lFinalMatrixSize{Slice,Phase,Read} are all empty
+ # Either unlocalised or unusually filled in headers.
+ # Assume 1 voxel for either SVS or unlocalised case.
+ # RM's SPECIAL sequence hits this. See https://github.com/wexeee/spec2nii/issues/6.
+ n_voxels = 1
if n_voxels > 1:
return process_mrsi(twixObj, base_name_out, name_in, dataKey, quiet=quiet, verbose=verbose)
| TypeError: '>' not supported between instances of 'str' and 'int' during TWIX conversion
I'm triyng to convert a TWIX file from a 7T scanner, but `spec2nii` fails with the error in the object.
```
spec2nii twix -v sub-701_nuc-1H_loc-pcc_spec-lr-special.dat
pymapVBVD version 0.4.1
Software version: VB
Scan 1/1, read all mdhs: 0%| | 0.00/64.8M [00:00<?, ?B/s]Contents of file: sub-701_nuc-1H_loc-pcc_spec-lr-special.dat
The file contains these evalinfo flags with dimensions and sizes as follows:
image : Col, Cha, Set [4096 32 64]
Scan 1/1, read all mdhs: 98%|██████████████████████████████████████████████████████████ | 63.7M/64.8M [00:00<00:00, 744MB/s]
```
```
spec2nii twix -e image sub-701_nuc-1H_loc-pcc_spec-lr-special.dat
pymapVBVD version 0.4.1
Software version: VB
Scan 1/1, read all mdhs: 0%| | 0.00/64.8M [00:00<?, ?B/s]Converting twix file sub-701_nuc-1H_loc-pcc_spec-lr-special.dat.
Looking for evalinfo flag image.
Traceback (most recent call last):
File "/home/orco/.conda/envs/mrs/bin/spec2nii", line 10, in <module>
sys.exit(main())
File "/home/orco/.conda/envs/mrs/lib/python3.7/site-packages/spec2nii/spec2nii.py", line 401, in main
spec2nii(*args)
File "/home/orco/.conda/envs/mrs/lib/python3.7/site-packages/spec2nii/spec2nii.py", line 188, in __init__
args.func(args)
File "/home/orco/.conda/envs/mrs/lib/python3.7/site-packages/spec2nii/spec2nii.py", line 267, in twix
args.verbose)
File "/home/orco/.conda/envs/mrs/lib/python3.7/site-packages/spec2nii/twixfunctions.py", line 47, in process_twix
if n_voxels > 1:
TypeError: '>' not supported between instances of 'str' and 'int'
Scan 1/1, read all mdhs: 98%|██████████████████████████████████████████████████████████ | 63.7M/64.8M [00:00<00:00, 702MB/s]
```
If more informations are needed just tell me.
| Hi @0rC0 ,
Thanks for reporting this. Are you able to share this data file with me? I can then have a look at it in the debugger.
What sort of data is it? I guess from the name it's a SPECIAL sequence. Does it have localisation apart from the ISIS module? And what sort of 7T is it? I haven't seen much from the Terra.
BW,
Will
Hi @wexeee,
and thanks for the fast answer!
It's a Magnetom scanner, not Terra and yes, it is SPECIAL sequence, as in (1).
I cannot post the TWIX here on GitHub, but I can send you a TWIX, that gives the same error, acquired with the same sequence on a phantom.
I've sent the download link to the email address found on your profile.
BW
Andrea
(1) Mekle, R., Mlynárik, V., Gambarota, G., Hergt, M., Krueger, G., & Gruetter, R. (2009). MR spectroscopy of the human brain with enhanced signal intensity at ultrashort echo times on a clinical platform at 3T and 7T. Magnetic Resonance in Medicine: An Official Journal of the International Society for Magnetic Resonance in Medicine, 61(6), 1279-1285. | 2021-04-16T14:59:51 | 0.0 | [] | [] |
||
imi-bigpicture/wsidicomizer | imi-bigpicture__wsidicomizer-55 | 68fd31605ede2bd55da3c7f186b1a3f28f29c8a9 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 187a4cb..caa61ff 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased] -
+## [0.7.0] - 2023-02-13
+
+### Added
+
+- Parameter to change label to given image.
+
+### Changed
+
+- Refactored to enable re-use of instance creation methods.
+
## [0.6.0] - 2023-01-25
### Changed
@@ -108,7 +118,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Initial release of wsidicomizer
-[Unreleased]: https://github.com/imi-bigpicture/wsidicomizer/compare/0.6.0..HEAD
+[Unreleased]: https://github.com/imi-bigpicture/wsidicomizer/compare/0.7.0..HEAD
+[0.7.0]: https://github.com/imi-bigpicture/wsidicomizer/compare/0.6.0..0.7.0
[0.6.0]: https://github.com/imi-bigpicture/wsidicomizer/compare/0.5.1..0.6.0
[0.5.1]: https://github.com/imi-bigpicture/wsidicomizer/compare/0.5.0..0.5.1
[0.5.0]: https://github.com/imi-bigpicture/wsidicomizer/compare/0.4.0..0.5.0
diff --git a/poetry.lock b/poetry.lock
index 01b6ce7..d2a2d4b 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -148,7 +148,7 @@ python-versions = "*"
[[package]]
name = "numpy"
-version = "1.24.1"
+version = "1.24.2"
description = "Fundamental package for array computing in Python"
category = "main"
optional = false
@@ -391,7 +391,7 @@ watchmedo = ["PyYAML (>=3.10)"]
[[package]]
name = "wsidicom"
-version = "0.6.0"
+version = "0.7.0"
description = "Tools for handling DICOM based whole scan images"
category = "main"
optional = false
@@ -405,7 +405,7 @@ pydicom = ">=2.1.0,<3.0.0"
[metadata]
lock-version = "1.1"
python-versions = ">=3.8,<3.12"
-content-hash = "be808d5f70bee4914f13153d4d092f1a73fc5ad31294f0eb146113044e5399a2"
+content-hash = "b16cb4c8e9f9d4114fbbb13ff1f8a6d73accd6730d4158a418bebbb09e6605d6"
[metadata.files]
attrs = [
@@ -572,34 +572,34 @@ mccabe = [
{file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"},
]
numpy = [
- {file = "numpy-1.24.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:179a7ef0889ab769cc03573b6217f54c8bd8e16cef80aad369e1e8185f994cd7"},
- {file = "numpy-1.24.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b09804ff570b907da323b3d762e74432fb07955701b17b08ff1b5ebaa8cfe6a9"},
- {file = "numpy-1.24.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1b739841821968798947d3afcefd386fa56da0caf97722a5de53e07c4ccedc7"},
- {file = "numpy-1.24.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e3463e6ac25313462e04aea3fb8a0a30fb906d5d300f58b3bc2c23da6a15398"},
- {file = "numpy-1.24.1-cp310-cp310-win32.whl", hash = "sha256:b31da69ed0c18be8b77bfce48d234e55d040793cebb25398e2a7d84199fbc7e2"},
- {file = "numpy-1.24.1-cp310-cp310-win_amd64.whl", hash = "sha256:b07b40f5fb4fa034120a5796288f24c1fe0e0580bbfff99897ba6267af42def2"},
- {file = "numpy-1.24.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7094891dcf79ccc6bc2a1f30428fa5edb1e6fb955411ffff3401fb4ea93780a8"},
- {file = "numpy-1.24.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:28e418681372520c992805bb723e29d69d6b7aa411065f48216d8329d02ba032"},
- {file = "numpy-1.24.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e274f0f6c7efd0d577744f52032fdd24344f11c5ae668fe8d01aac0422611df1"},
- {file = "numpy-1.24.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0044f7d944ee882400890f9ae955220d29b33d809a038923d88e4e01d652acd9"},
- {file = "numpy-1.24.1-cp311-cp311-win32.whl", hash = "sha256:442feb5e5bada8408e8fcd43f3360b78683ff12a4444670a7d9e9824c1817d36"},
- {file = "numpy-1.24.1-cp311-cp311-win_amd64.whl", hash = "sha256:de92efa737875329b052982e37bd4371d52cabf469f83e7b8be9bb7752d67e51"},
- {file = "numpy-1.24.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b162ac10ca38850510caf8ea33f89edcb7b0bb0dfa5592d59909419986b72407"},
- {file = "numpy-1.24.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:26089487086f2648944f17adaa1a97ca6aee57f513ba5f1c0b7ebdabbe2b9954"},
- {file = "numpy-1.24.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caf65a396c0d1f9809596be2e444e3bd4190d86d5c1ce21f5fc4be60a3bc5b36"},
- {file = "numpy-1.24.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0677a52f5d896e84414761531947c7a330d1adc07c3a4372262f25d84af7bf7"},
- {file = "numpy-1.24.1-cp38-cp38-win32.whl", hash = "sha256:dae46bed2cb79a58d6496ff6d8da1e3b95ba09afeca2e277628171ca99b99db1"},
- {file = "numpy-1.24.1-cp38-cp38-win_amd64.whl", hash = "sha256:6ec0c021cd9fe732e5bab6401adea5a409214ca5592cd92a114f7067febcba0c"},
- {file = "numpy-1.24.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:28bc9750ae1f75264ee0f10561709b1462d450a4808cd97c013046073ae64ab6"},
- {file = "numpy-1.24.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:84e789a085aabef2f36c0515f45e459f02f570c4b4c4c108ac1179c34d475ed7"},
- {file = "numpy-1.24.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e669fbdcdd1e945691079c2cae335f3e3a56554e06bbd45d7609a6cf568c700"},
- {file = "numpy-1.24.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef85cf1f693c88c1fd229ccd1055570cb41cdf4875873b7728b6301f12cd05bf"},
- {file = "numpy-1.24.1-cp39-cp39-win32.whl", hash = "sha256:87a118968fba001b248aac90e502c0b13606721b1343cdaddbc6e552e8dfb56f"},
- {file = "numpy-1.24.1-cp39-cp39-win_amd64.whl", hash = "sha256:ddc7ab52b322eb1e40521eb422c4e0a20716c271a306860979d450decbb51b8e"},
- {file = "numpy-1.24.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ed5fb71d79e771ec930566fae9c02626b939e37271ec285e9efaf1b5d4370e7d"},
- {file = "numpy-1.24.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad2925567f43643f51255220424c23d204024ed428afc5aad0f86f3ffc080086"},
- {file = "numpy-1.24.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:cfa1161c6ac8f92dea03d625c2d0c05e084668f4a06568b77a25a89111621566"},
- {file = "numpy-1.24.1.tar.gz", hash = "sha256:2386da9a471cc00a1f47845e27d916d5ec5346ae9696e01a8a34760858fe9dd2"},
+ {file = "numpy-1.24.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eef70b4fc1e872ebddc38cddacc87c19a3709c0e3e5d20bf3954c147b1dd941d"},
+ {file = "numpy-1.24.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8d2859428712785e8a8b7d2b3ef0a1d1565892367b32f915c4a4df44d0e64f5"},
+ {file = "numpy-1.24.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6524630f71631be2dabe0c541e7675db82651eb998496bbe16bc4f77f0772253"},
+ {file = "numpy-1.24.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a51725a815a6188c662fb66fb32077709a9ca38053f0274640293a14fdd22978"},
+ {file = "numpy-1.24.2-cp310-cp310-win32.whl", hash = "sha256:2620e8592136e073bd12ee4536149380695fbe9ebeae845b81237f986479ffc9"},
+ {file = "numpy-1.24.2-cp310-cp310-win_amd64.whl", hash = "sha256:97cf27e51fa078078c649a51d7ade3c92d9e709ba2bfb97493007103c741f1d0"},
+ {file = "numpy-1.24.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7de8fdde0003f4294655aa5d5f0a89c26b9f22c0a58790c38fae1ed392d44a5a"},
+ {file = "numpy-1.24.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4173bde9fa2a005c2c6e2ea8ac1618e2ed2c1c6ec8a7657237854d42094123a0"},
+ {file = "numpy-1.24.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cecaed30dc14123020f77b03601559fff3e6cd0c048f8b5289f4eeabb0eb281"},
+ {file = "numpy-1.24.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a23f8440561a633204a67fb44617ce2a299beecf3295f0d13c495518908e910"},
+ {file = "numpy-1.24.2-cp311-cp311-win32.whl", hash = "sha256:e428c4fbfa085f947b536706a2fc349245d7baa8334f0c5723c56a10595f9b95"},
+ {file = "numpy-1.24.2-cp311-cp311-win_amd64.whl", hash = "sha256:557d42778a6869c2162deb40ad82612645e21d79e11c1dc62c6e82a2220ffb04"},
+ {file = "numpy-1.24.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d0a2db9d20117bf523dde15858398e7c0858aadca7c0f088ac0d6edd360e9ad2"},
+ {file = "numpy-1.24.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c72a6b2f4af1adfe193f7beb91ddf708ff867a3f977ef2ec53c0ffb8283ab9f5"},
+ {file = "numpy-1.24.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c29e6bd0ec49a44d7690ecb623a8eac5ab8a923bce0bea6293953992edf3a76a"},
+ {file = "numpy-1.24.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2eabd64ddb96a1239791da78fa5f4e1693ae2dadc82a76bc76a14cbb2b966e96"},
+ {file = "numpy-1.24.2-cp38-cp38-win32.whl", hash = "sha256:e3ab5d32784e843fc0dd3ab6dcafc67ef806e6b6828dc6af2f689be0eb4d781d"},
+ {file = "numpy-1.24.2-cp38-cp38-win_amd64.whl", hash = "sha256:76807b4063f0002c8532cfeac47a3068a69561e9c8715efdad3c642eb27c0756"},
+ {file = "numpy-1.24.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4199e7cfc307a778f72d293372736223e39ec9ac096ff0a2e64853b866a8e18a"},
+ {file = "numpy-1.24.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:adbdce121896fd3a17a77ab0b0b5eedf05a9834a18699db6829a64e1dfccca7f"},
+ {file = "numpy-1.24.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:889b2cc88b837d86eda1b17008ebeb679d82875022200c6e8e4ce6cf549b7acb"},
+ {file = "numpy-1.24.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f64bb98ac59b3ea3bf74b02f13836eb2e24e48e0ab0145bbda646295769bd780"},
+ {file = "numpy-1.24.2-cp39-cp39-win32.whl", hash = "sha256:63e45511ee4d9d976637d11e6c9864eae50e12dc9598f531c035265991910468"},
+ {file = "numpy-1.24.2-cp39-cp39-win_amd64.whl", hash = "sha256:a77d3e1163a7770164404607b7ba3967fb49b24782a6ef85d9b5f54126cc39e5"},
+ {file = "numpy-1.24.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:92011118955724465fb6853def593cf397b4a1367495e0b59a7e69d40c4eb71d"},
+ {file = "numpy-1.24.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9006288bcf4895917d02583cf3411f98631275bc67cce355a7f39f8c14338fa"},
+ {file = "numpy-1.24.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:150947adbdfeceec4e5926d956a06865c1c690f2fd902efede4ca6fe2e657c3f"},
+ {file = "numpy-1.24.2.tar.gz", hash = "sha256:003a9f530e880cb2cd177cba1af7220b9aa42def9c4afc2a2fc3ee6be7eb2b22"},
]
openslide-python = [
{file = "openslide-python-1.2.0.tar.gz", hash = "sha256:f484f833566dbe5788d03d6dbab6fafeadabefa9de8d497715527d2c0cbd986e"},
@@ -818,6 +818,6 @@ watchdog = [
{file = "watchdog-2.2.1.tar.gz", hash = "sha256:cdcc23c9528601a8a293eb4369cbd14f6b4f34f07ae8769421252e9c22718b6f"},
]
wsidicom = [
- {file = "wsidicom-0.6.0-py3-none-any.whl", hash = "sha256:6e77a9cd61584b3ebe2f16881abcf9b80f76d755254cc9038518ca8837401fa2"},
- {file = "wsidicom-0.6.0.tar.gz", hash = "sha256:0b6ec232964c391637b4a414ab21dffb63acb275f01bc0c4b673691760640a9d"},
+ {file = "wsidicom-0.7.0-py3-none-any.whl", hash = "sha256:549f37ff089ef8ef23f54ca330c15710596967a32f765cac4f4ea0bb4466aa2e"},
+ {file = "wsidicom-0.7.0.tar.gz", hash = "sha256:c55f3da1ee55d76f0b9dccc796769cda727b5a24ccd73c2546b50b7c6a7d570c"},
]
diff --git a/pyproject.toml b/pyproject.toml
index 1ee8442..3dea492 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "wsidicomizer"
-version = "0.6.0"
+version = "0.7.0"
description = "Tool for converting wsi-files to DICOM"
authors = ["Erik O Gabrielsson <[email protected]>"]
license = "Apache-2.0"
@@ -18,7 +18,7 @@ classifiers = [
[tool.poetry.dependencies]
python = ">=3.8,<3.12"
-wsidicom = "^0.6.0"
+wsidicom = "^0.7.0"
opentile = "^0.6.0"
openslide-python = "^1.1.2"
numpy = "^1.22.0"
diff --git a/wsidicomizer/__init__.py b/wsidicomizer/__init__.py
index 55e2a85..f1db44e 100644
--- a/wsidicomizer/__init__.py
+++ b/wsidicomizer/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2021 SECTRA AB
+# Copyright 2021, 2022, 2023 SECTRA AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,6 +16,6 @@
create_device_module, create_patient_module,
create_sample, create_specimen_module,
create_study_module)
-from wsidicomizer.interface import WsiDicomizer
+from wsidicomizer.wsidicomizer import WsiDicomizer
-__version__ = '0.6.0'
+__version__ = '0.7.0'
diff --git a/wsidicomizer/base_dicomizer.py b/wsidicomizer/base_dicomizer.py
new file mode 100644
index 0000000..3ae0c39
--- /dev/null
+++ b/wsidicomizer/base_dicomizer.py
@@ -0,0 +1,230 @@
+# Copyright 2021, 2022, 2023 SECTRA AB
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from abc import ABCMeta, abstractmethod
+from pathlib import Path
+from typing import List, Optional, Sequence, Union
+from PIL.Image import Image as PILImage
+from opentile.metadata import Metadata
+from pydicom import Dataset, config
+from pydicom.dataset import Dataset
+from wsidicom import (WsiDicomLabels, WsiDicomLevels,
+ WsiDicomOverviews, WsiInstance)
+from wsidicom.dataset import ImageType
+
+from wsidicomizer.dataset import create_base_dataset, populate_base_dataset
+from wsidicomizer.encoding import Encoder
+from wsidicomizer.image_data import DicomizerImageData
+
+config.enforce_valid_values = True
+config.future_behavior()
+
+
+class BaseDicomizer(metaclass=ABCMeta):
+ """Metaclass for Dicomizers. Subclasses should implement is_supported() and
+ open().
+ """
+ def __init__(
+ self,
+ filepath: Path,
+ encoder: Encoder,
+ tile_size: int = 512,
+ modules: Optional[Union[Dataset, Sequence[Dataset]]] = None,
+ include_confidential: bool = True,
+ ) -> None:
+ self._filepath = filepath
+ self._encoder = encoder
+ self._tile_size = tile_size
+ self._modules = modules
+ self._include_confidential = include_confidential
+ self._base_dataset = populate_base_dataset(
+ self.metadata,
+ create_base_dataset(modules),
+ include_confidential
+ )
+
+ @staticmethod
+ @abstractmethod
+ def is_supported(path: Path) -> bool:
+ """Return True if file in filepath is supported by Dicomizer."""
+ raise NotImplementedError()
+
+ @property
+ @abstractmethod
+ def metadata(self) -> Metadata:
+ """Return metadata for file."""
+ raise NotImplementedError()
+
+ @property
+ @abstractmethod
+ def pyramid_levels(self) -> List[int]:
+ """Return pyramid levels (scalings) for file."""
+ raise NotImplementedError()
+
+ @property
+ @abstractmethod
+ def has_label(self) -> bool:
+ """Return True if file has a label image."""
+ raise NotImplementedError()
+
+ @property
+ @abstractmethod
+ def has_overview(self) -> bool:
+ """Return True if file has a overview image."""
+ raise NotImplementedError()
+
+ @abstractmethod
+ def _create_level_image_data(self, level_index: int) -> DicomizerImageData:
+ """Return image data instance for level."""
+ raise NotImplementedError()
+
+ @abstractmethod
+ def _create_label_image_data(self,) -> DicomizerImageData:
+ """Return image data instance for label."""
+ raise NotImplementedError()
+
+ @abstractmethod
+ def _create_overview_image_data(self) -> DicomizerImageData:
+ """Return image data instance for overview."""
+ raise NotImplementedError()
+
+ def create_levels(
+ self,
+ include_levels: Optional[Sequence[int]] = None,
+ ) -> WsiDicomLevels:
+ """Return levels from file
+
+ Parameters
+ ----------
+ include_levels: Optional[Sequence[int]] = None
+ Optional list indices (in present levels) to include, e.g. [0, 1]
+ includes the two lowest levels. Negative indicies can be used,
+ e.g. [-1, -2] includes the two highest levels.
+
+ Returns
+ ----------
+ WsiDicomLevels
+ Created levels.
+ """
+ level_instances = [
+ WsiInstance.create_instance(
+ self._create_level_image_data(level_index),
+ self._base_dataset,
+ ImageType.VOLUME
+ )
+ for level_index in range(len(self.pyramid_levels))
+ if self._is_included_level(
+ self.pyramid_levels[level_index],
+ self.pyramid_levels,
+ include_levels
+ )
+ ]
+ return WsiDicomLevels.open(level_instances)
+
+ def create_labels(
+ self,
+ include_label: bool,
+ label: Optional[Union[PILImage, str, Path]] = None
+ ) -> Optional[WsiDicomLabels]:
+ """Return labels from file
+
+ Parameters
+ ----------
+ include_label: bool
+ Include label(s).
+ label: Optional[Union[PILImage, str, Path]] = None
+ Optional label image to use instead of label found in file.
+
+ Returns
+ ----------
+ Optional[WsiDicomLabels]
+ Created labels.
+ """
+ if include_label:
+ if label is not None:
+ label_instance = WsiInstance.create_label(
+ label,
+ self._base_dataset,
+ )
+ elif self.has_label:
+ label_instance = WsiInstance.create_instance(
+ self._create_label_image_data(),
+ self._base_dataset,
+ ImageType.LABEL
+ )
+ else:
+ return None
+ return WsiDicomLabels.open([label_instance])
+ return None
+
+ def create_oveviews(
+ self,
+ include_overview: bool
+ ) -> Optional[WsiDicomOverviews]:
+ """Return overviews from file
+
+ Parameters
+ ----------
+ include_overwiew: bool
+ Include overview(s).
+
+ Returns
+ ----------
+ Optional[WsiDicomOverviews]
+ Created overviews.
+ """
+ if include_overview and self.has_overview:
+ overview_instance = WsiInstance.create_instance(
+ self._create_overview_image_data(),
+ self._base_dataset,
+ ImageType.OVERVIEW
+ )
+ return WsiDicomOverviews.open([overview_instance])
+ return None
+
+ @staticmethod
+ def _is_included_level(
+ level: int,
+ present_levels: Sequence[int],
+ include_indices: Optional[Sequence[int]] = None
+ ) -> bool:
+ """Return true if pyramid level is in included levels.
+
+ Parameters
+ ----------
+ level: int
+ Pyramid level to check.
+ present_levels: Sequence[int]
+ List of pyramid levels present.
+ include_indices: Optional[Sequence[int]] = None
+ Optional list indices (in present levels) to include, e.g. [0, 1]
+ includes the two lowest levels. Negative indicies can be used,
+ e.g. [-1, -2] includes the two highest levels. Default of None
+ will not limit the selection. An empty sequence will exluded all
+ levels.
+
+ Returns
+ ----------
+ bool
+ True if level should be included.
+ """
+ if level not in present_levels:
+ return False
+ if include_indices is None:
+ return True
+ absolute_levels = [
+ present_levels[level] for level in include_indices
+ if -len(present_levels) <= level < len(present_levels)
+ ]
+ return level in absolute_levels
diff --git a/wsidicomizer/cli.py b/wsidicomizer/cli.py
index ae0b76b..52869c7 100644
--- a/wsidicomizer/cli.py
+++ b/wsidicomizer/cli.py
@@ -20,7 +20,7 @@
from pydicom.dataset import Dataset
-from wsidicomizer.interface import WsiDicomizer
+from wsidicomizer.wsidicomizer import WsiDicomizer
def main():
diff --git a/wsidicomizer/common.py b/wsidicomizer/common.py
deleted file mode 100644
index a11466d..0000000
--- a/wsidicomizer/common.py
+++ /dev/null
@@ -1,342 +0,0 @@
-# Copyright 2021 SECTRA AB
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from abc import ABCMeta, abstractmethod
-from copy import deepcopy
-from typing import Optional, Sequence, Union
-
-import numpy as np
-from pydicom import Dataset, config
-from pydicom.dataset import Dataset
-from pydicom.sequence import Sequence as DicomSequence
-from pydicom.uid import (JPEG2000, JPEG2000Lossless, JPEGBaseline8Bit,
- generate_uid)
-from pydicom.valuerep import DSfloat
-from wsidicom import ImageData, WsiDicom, WsiInstance
-from wsidicom.image_data import ImageOrigin
-from wsidicom.instance import WsiDataset
-
-from wsidicomizer.encoding import Encoder
-
-from .dataset import get_image_type
-
-config.enforce_valid_values = True
-config.future_behavior()
-
-
-class MetaImageData(ImageData, metaclass=ABCMeta):
- _default_z = None
-
- def __init__(
- self,
- encoder: Encoder
- ):
- """Metaclass for Dicomized image data.
-
- Parameters
- ----------
- encoded: Encoder
- Encoder to use.
- """
- self._encoder = encoder
-
- @property
- @abstractmethod
- def pyramid_index(self) -> int:
- """Should return pyramid level for image data."""
- raise NotImplementedError()
-
- @property
- def image_origin(self) -> ImageOrigin:
- """Return a default ImageOrigin."""
- return ImageOrigin()
-
- def create_instance_dataset(
- self,
- base_dataset: Dataset,
- image_flavor: str,
- instance_number: int,
- image_data: ImageData
- ) -> WsiDataset:
- """Return instance dataset for image_data based on base dataset.
-
- Parameters
- ----------
- base_dataset: Dataset
- Dataset common for all instances.
- image_flavor:
- Type of instance ('VOLUME', 'LABEL', 'OVERVIEW)
- instance_number: int
- image_data:
- Image data to crate dataset for.
-
- Returns
- ----------
- WsiDataset
- Dataset for instance.
- """
- dataset = deepcopy(base_dataset)
- dataset.ImageType = get_image_type(
- image_flavor,
- self.pyramid_index
- )
- dataset.SOPInstanceUID = generate_uid(prefix=None)
- shared_functional_group_sequence = Dataset()
- if self.pixel_spacing is None:
- if image_flavor == 'VOLUME':
- raise ValueError(
- "Image flavor 'VOLUME' requires pixel spacing to be set"
- )
- else:
-
- pixel_measure_sequence = Dataset()
- pixel_measure_sequence.PixelSpacing = [
- DSfloat(self.pixel_spacing.width, True),
- DSfloat(self.pixel_spacing.height, True)
- ]
- pixel_measure_sequence.SpacingBetweenSlices = 0.0
- # DICOM 2022a part 3 IODs - C.8.12.4.1.2 Imaged Volume Width,
- # Height, Depth. Depth must not be 0. Default to 0.5 microns
- pixel_measure_sequence.SliceThickness = 0.0005
- shared_functional_group_sequence.PixelMeasuresSequence = (
- DicomSequence([pixel_measure_sequence])
- )
- dataset.SharedFunctionalGroupsSequence = DicomSequence(
- [shared_functional_group_sequence]
- )
- dataset.ImagedVolumeWidth = (
- self.image_size.width * self.pixel_spacing.width
- )
- dataset.ImagedVolumeHeight = (
- self.image_size.height * self.pixel_spacing.height
- )
- dataset.ImagedVolumeDepth = pixel_measure_sequence.SliceThickness
- # DICOM 2022a part 3 IODs - C.8.12.9 Whole Slide Microscopy Image
- # Frame Type Macro. Analogous to ImageType and shared by all
- # frames so clone
- wsi_frame_type_item = Dataset()
- wsi_frame_type_item.FrameType = dataset.ImageType
- (
- shared_functional_group_sequence.
- WholeSlideMicroscopyImageFrameTypeSequence
- ) = (
- DicomSequence([wsi_frame_type_item])
- )
-
- dataset.DimensionOrganizationType = 'TILED_FULL'
- dataset.TotalPixelMatrixColumns = self.image_size.width
- dataset.TotalPixelMatrixRows = self.image_size.height
- dataset.Columns = self.tile_size.width
- dataset.Rows = self.tile_size.height
- dataset.NumberOfFrames = (
- self.tiled_size.width
- * self.tiled_size.height
- )
-
- if image_data.transfer_syntax == JPEGBaseline8Bit:
- dataset.BitsAllocated = 8
- dataset.BitsStored = 8
- dataset.HighBit = 7
- dataset.PixelRepresentation = 0
- dataset.LossyImageCompression = '01'
- dataset.LossyImageCompressionRatio = 1
- dataset.LossyImageCompressionMethod = 'ISO_10918_1'
- dataset.LossyImageCompression = '01'
- elif image_data.transfer_syntax == JPEG2000:
- # TODO JPEG2000 can have higher bitcount
- dataset.BitsAllocated = 8
- dataset.BitsStored = 8
- dataset.HighBit = 7
- dataset.PixelRepresentation = 0
- # dataset.LossyImageCompressionRatio = 1
- dataset.LossyImageCompressionMethod = 'ISO_15444_1'
- dataset.LossyImageCompression = '01'
- elif image_data.transfer_syntax == JPEG2000Lossless:
- # TODO JPEG2000 can have higher bitcount
- dataset.BitsAllocated = 8
- dataset.BitsStored = 8
- dataset.HighBit = 7
- dataset.PixelRepresentation = 0
- dataset.LossyImageCompression = '00'
- else:
- raise ValueError("Non-supported transfer syntax.")
-
- dataset.PhotometricInterpretation = (
- image_data.photometric_interpretation
- )
- dataset.SamplesPerPixel = image_data.samples_per_pixel
-
- dataset.PlanarConfiguration = 0
-
- dataset.InstanceNumber = instance_number
- dataset.FocusMethod = 'AUTO'
- dataset.ExtendedDepthOfField = 'NO'
- return WsiDataset(dataset)
-
- def _encode(
- self,
- image_data: np.ndarray
- ) -> bytes:
- """Return image data encoded in jpeg using set quality and subsample
- options.
-
- Parameters
- ----------
- image_data: np.ndarray
- Image data to encode.
-
- Returns
- ----------
- bytes
- Jpeg bytes.
- """
- return self._encoder.encode(image_data)
-
-
-class MetaDicomizer(WsiDicom, metaclass=ABCMeta):
- """Metaclass for Dicomizers. Subclasses should implement is_supported() and
- open().
- """
- @staticmethod
- @abstractmethod
- def is_supported(path: str) -> bool:
- """Return True if file in filepath is supported by Dicomizer."""
- raise NotImplementedError()
-
- @classmethod
- @abstractmethod
- def open(
- cls,
- filepath: str,
- modules: Optional[Union[Dataset, Sequence[Dataset]]] = None,
- tile_size: Optional[int] = 512,
- include_levels: Optional[Sequence[int]] = None,
- include_label: bool = True,
- include_overview: bool = True,
- include_confidential: bool = True,
- encoding_format: str = 'jpeg',
- encoding_quality: int = 90,
- jpeg_subsampling: str = '420'
- ) -> WsiDicom:
- """Open file in filepath as WsiDicom object. Note that created
- instances always has a random UID.
-
- Parameters
- ----------
- filepath: str
- Path to tiff file
- modules: Optional[Union[Dataset, Sequence[Dataset]]] = None
- Module datasets to use in files. If none, use default modules.
- tile_size: Optional[int] = 512
- Tile size to use if not defined by file.
- include_levels: Sequence[int] = None
- Optional list of level indices to include. If None include all
- levels, if empty sequence exlude all levels. E.g. [0, 1]
- includes only the two lowest levels. Negative indicies can be used,
- e.g. [-1, -2] includes only the two highest levels.
- include_label: bool = True
- Inclube label.
- include_overview: bool = True
- Include overview.
- include_confidential: bool = True
- Include confidential metadata.
- encoding_format: str = 'jpeg'
- Encoding format to use if re-encoding. 'jpeg' or 'jpeg2000'.
- encoding_quality: int = 90
- Quality to use if re-encoding. Do not use > 95 for jpeg. Use 100
- for lossless jpeg2000.
- jpeg_subsampling: str = '420'
- Subsampling option if using jpeg for re-encoding. Use '444' for
- no subsampling, '422' for 2x1 subsampling, and '420' for 2x2
- subsampling.
-
- Returns
- ----------
- WsiDicom
- WsiDicom object of file in filepath.
- """
- raise NotImplementedError()
-
- @staticmethod
- def _create_instance(
- image_data: MetaImageData,
- base_dataset: Dataset,
- image_type: str,
- instance_number: int
- ) -> WsiInstance:
- """Create WsiInstance from MetaImageData.
-
- Parameters
- ----------
- image_data: ImageData
- Image data and metadata.
- base_dataset: Dataset
- Base dataset to include.
- image_type: str
- Type of instance to create.
- instance_number: int
- The number of the instance (in a series).
-
- Returns
- ----------
- WsiInstance
- Created WsiInstance.
- """
- instance_dataset = image_data.create_instance_dataset(
- base_dataset,
- image_type,
- instance_number,
- image_data
- )
-
- return WsiInstance(
- instance_dataset,
- image_data
- )
-
- @staticmethod
- def _is_included_level(
- level: int,
- present_levels: Sequence[int],
- include_indices: Optional[Sequence[int]] = None
- ) -> bool:
- """Return true if pyramid level is in included levels.
-
- Parameters
- ----------
- level: int
- Pyramid level to check.
- present_levels: Sequence[int]
- List of pyramid levels present.
- include_indices: Optional[Sequence[int]] = None
- Optional list indices (in present levels) to include, e.g. [0, 1]
- includes the two lowest levels. Negative indicies can be used,
- e.g. [-1, -2] includes the two highest levels. Default of None
- will not limit the selection. An empty sequence will exluded all
- levels.
-
- Returns
- ----------
- bool
- True if level should be included.
- """
- if level not in present_levels:
- return False
- if include_indices is None:
- return True
- absolute_levels = [
- present_levels[level] for level in include_indices
- if -len(present_levels) <= level < len(present_levels)
- ]
- return level in absolute_levels
diff --git a/wsidicomizer/czi.py b/wsidicomizer/czi.py
index 4f21e8c..68fb361 100644
--- a/wsidicomizer/czi.py
+++ b/wsidicomizer/czi.py
@@ -1,4 +1,4 @@
-# Copyright 2021 SECTRA AB
+# Copyright 2021, 2022, 2023 SECTRA AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -21,22 +21,20 @@
from typing import Dict, List, Optional, Sequence, Tuple, Type, TypeVar, Union
from xml.etree import ElementTree
-from dateutil import parser as dateparser
import numpy as np
from czifile import CziFile, DirectoryEntryDV
+from dateutil import parser as dateparser
from opentile.metadata import Metadata
from PIL import Image
+from PIL.Image import Image as PILImage
from pydicom import Dataset
from pydicom.uid import UID as Uid
-from wsidicom import (WsiDicom, WsiDicomLabels, WsiDicomLevels,
- WsiDicomOverviews)
from wsidicom.geometry import Point, Region, Size, SizeMm
-from wsidicom.wsidicom import WsiDicom
-from wsidicomizer.common import MetaDicomizer, MetaImageData
+from wsidicomizer.base_dicomizer import BaseDicomizer
+from wsidicomizer.image_data import DicomizerImageData
from wsidicomizer.config import settings
-from wsidicomizer.dataset import create_base_dataset
-from wsidicomizer.encoding import Encoder, create_encoder
+from wsidicomizer.encoding import Encoder
ElementType = TypeVar('ElementType', str, int, float)
@@ -254,10 +252,10 @@ class CziBlock:
size: Size
-class CziImageData(MetaImageData):
+class CziImageData(DicomizerImageData):
def __init__(
self,
- filepath: str,
+ filepath: Path,
tile_size: int,
encoder: Encoder
) -> None:
@@ -332,7 +330,7 @@ def optical_paths(self) -> List[str]:
return self._metadata.channel_mapping
@cached_property
- def blank_decoded_tile(self) -> Image.Image:
+ def blank_decoded_tile(self) -> PILImage:
return Image.fromarray(self._create_blank_tile())
@cached_property
@@ -464,7 +462,7 @@ def _get_decoded_tile(
tile: Point,
z: float,
path: str
- ) -> Image.Image:
+ ) -> PILImage:
"""Return Image for tile.
Parameters
@@ -478,7 +476,7 @@ def _get_decoded_tile(
Returns
----------
- Image.Image
+ PILImage
Tile as Image.
"""
if (tile, z, path) not in self.tile_directory:
@@ -617,75 +615,57 @@ def _size_to_numpy_shape(self, size: Size) -> Tuple[int, ...]:
return size.height, size.width, self.samples_per_pixel
-class CziDicomizer(MetaDicomizer):
- @classmethod
- def open(
- cls,
- filepath: str,
+class CziDicomizer(BaseDicomizer):
+ def __init__(
+ self,
+ filepath: Path,
+ encoder: Encoder,
+ tile_size: int = 512,
modules: Optional[Union[Dataset, Sequence[Dataset]]] = None,
- tile_size: Optional[int] = None,
- include_levels: Optional[Sequence[int]] = None,
- include_label: bool = True,
- include_overview: bool = True,
include_confidential: bool = True,
- encoding_format: str = 'jpeg',
- encoding_quality: int = 90,
- jpeg_subsampling: str = '420'
- ) -> WsiDicom:
- """Open czi file in filepath as WsiDicom object. Note that created
- instances always has a random UID.
-
- Parameters
- ----------
- filepath: str
- Path to tiff file
- modules: Optional[Union[Dataset, Sequence[Dataset]]] = None
- Module datasets to use in files. If none, use default modules.
- tile_size: Optional[int]
- Tile size to use if not defined by file.
- include_levels: Sequence[int] = None
- Levels to include. Not implemented.
- include_label: bool = True
- Inclube label. Not implemented.
- include_overview: bool = True
- Include overview. Not implemented.
- include_confidential: bool = True
- Include confidential metadata. Not implemented.
- encoding_format: str = 'jpeg'
- Encoding format to use if re-encoding. 'jpeg' or 'jpeg2000'.
- encoding_quality: int = 90
- Quality to use if re-encoding. Do not use > 95 for jpeg. Use 100
- for lossless jpeg2000.
- jpeg_subsampling: str = '420'
- Subsampling option if using jpeg for re-encoding. Use '444' for
- no subsampling, '422' for 2x1 subsampling, and '420' for 2x2
- subsampling.
-
- Returns
- ----------
- WsiDicom
- WsiDicom object of czi file in filepath.
- """
- if tile_size is None:
- raise ValueError("Tile size required for czi")
- encoder = create_encoder(
- encoding_format,
- encoding_quality,
- jpeg_subsampling
+ ) -> None:
+ self._imaga_data = CziImageData(
+ filepath,
+ tile_size,
+ encoder
)
- base_dataset = create_base_dataset(modules)
- base_level_instance = cls._create_instance(
- CziImageData(filepath, tile_size, encoder),
- base_dataset,
- 'VOLUME',
- 0
+ self._metadata = self._imaga_data.metadata
+ super().__init__(
+ filepath,
+ encoder,
+ tile_size,
+ modules,
+ include_confidential
)
- levels = WsiDicomLevels.open([base_level_instance])
- labels = WsiDicomLabels.open([])
- overviews = WsiDicomOverviews.open([])
- return cls(levels, labels, overviews)
+
+ @property
+ def has_label(self) -> bool:
+ return False
+
+ @property
+ def has_overview(self) -> bool:
+ return False
+
+ @property
+ def pyramid_levels(self) -> List[int]:
+ return [0]
+
+ @property
+ def metadata(self) -> Metadata:
+ return self._metadata
@staticmethod
- def is_supported(filepath: str) -> bool:
+ def is_supported(filepath: Path) -> bool:
"""Return True if file in filepath is supported by CziFile."""
- return CziImageData.detect_format(Path(filepath)) is not None
+ return CziImageData.detect_format(filepath) is not None
+
+ def _create_level_image_data(self, level_index: int) -> DicomizerImageData:
+ if level_index != 0:
+ raise ValueError() # TODO
+ return CziImageData(self._filepath, self._tile_size, self._encoder)
+
+ def _create_label_image_data(self) -> DicomizerImageData:
+ return super()._create_label_image_data()
+
+ def _create_overview_image_data(self) -> DicomizerImageData:
+ return super()._create_overview_image_data()
diff --git a/wsidicomizer/dataset.py b/wsidicomizer/dataset.py
index a7ba335..93aa0a5 100644
--- a/wsidicomizer/dataset.py
+++ b/wsidicomizer/dataset.py
@@ -31,29 +31,6 @@
from opentile.common import Metadata
-def get_image_type(image_flavor: str, level_index: int) -> List[str]:
- """Return image type.
-
- Parameters
- ----------
- image_flavor: str
- Image flavor ('VOLUME', 'LABEL', 'OVERVIEW')
- level_index: int:
- Pyramidal level index of the image.
-
- Returns
- ----------
- List[str]
- Image type.
- """
- if image_flavor == 'VOLUME' and level_index == 0:
- resampled = 'NONE'
- else:
- resampled = 'RESAMPLED'
-
- return ['ORIGINAL', 'PRIMARY', image_flavor, resampled]
-
-
def create_base_dataset(
modules: Optional[Union[Dataset, Sequence[Dataset]]] = None
) -> Dataset:
diff --git a/wsidicomizer/encoding.py b/wsidicomizer/encoding.py
index 4350fbf..a241a98 100644
--- a/wsidicomizer/encoding.py
+++ b/wsidicomizer/encoding.py
@@ -44,7 +44,41 @@ def encode(
data: np.ndarray
) -> bytes:
"""Should return data as encoded bytes."""
- raise NotImplementedError
+ raise NotImplementedError()
+
+ @classmethod
+ def create_encoder(
+ cls,
+ format: str,
+ quality: float,
+ subsampling: Optional[str] = None
+ ) -> 'Encoder':
+ """Creates an encoder with specified settings.
+
+ Parameters
+ ----------
+ format: str
+ Format for encoder, either 'jpeg' or 'jpeg2000.
+ quality: float
+ The encoding quality.
+ subsampling: Optional[str] = None
+ Subsampling setting (for jpeg).
+
+ Returns
+ ----------
+ Enocer
+ Encoder for settings.
+ """
+ if format == 'jpeg':
+ return JpegEncoder(
+ quality=int(quality),
+ subsampling=subsampling
+ )
+ elif format == 'jpeg2000':
+ return Jpeg2000Encoder(
+ quality=quality
+ )
+ raise ValueError("Encoder format must be 'jpeg' or 'jpeg2000'")
class JpegEncoder(Encoder):
@@ -179,36 +213,3 @@ def encode(
level=self._quality,
codecformat='J2K',
)
-
-
-def create_encoder(
- format: str,
- quality: float,
- subsampling: Optional[str] = None
-) -> Encoder:
- """Creates an encoder with specified settings.
-
- Parameters
- ----------
- format: str
- Format for encoder, either 'jpeg' or 'jpeg2000.
- quality: float
- The encoding quality.
- subsampling: Optional[str] = None
- Subsampling setting (for jpeg).
-
- Returns
- ----------
- Enocer
- Encoder for settings.
- """
- if format == 'jpeg':
- return JpegEncoder(
- quality=int(quality),
- subsampling=subsampling
- )
- elif format == 'jpeg2000':
- return Jpeg2000Encoder(
- quality=quality
- )
- raise ValueError("Encoder format must be 'jpeg' or 'jpeg2000'")
diff --git a/wsidicomizer/image_data.py b/wsidicomizer/image_data.py
new file mode 100644
index 0000000..3b7b403
--- /dev/null
+++ b/wsidicomizer/image_data.py
@@ -0,0 +1,69 @@
+# Copyright 2021, 2022, 2023 SECTRA AB
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from abc import ABCMeta, abstractmethod
+
+import numpy as np
+from pydicom import config
+from wsidicom import ImageData
+from wsidicom.image_data import ImageOrigin
+
+from wsidicomizer.encoding import Encoder
+
+
+class DicomizerImageData(ImageData, metaclass=ABCMeta):
+ _default_z = None
+
+ def __init__(
+ self,
+ encoder: Encoder
+ ):
+ """Metaclass for Dicomized image data.
+
+ Parameters
+ ----------
+ encoded: Encoder
+ Encoder to use.
+ """
+ self._encoder = encoder
+
+ @property
+ @abstractmethod
+ def pyramid_index(self) -> int:
+ """Should return pyramid level for image data."""
+ raise NotImplementedError()
+
+ @property
+ def image_origin(self) -> ImageOrigin:
+ """Return a default ImageOrigin."""
+ return ImageOrigin()
+
+ def _encode(
+ self,
+ image_data: np.ndarray
+ ) -> bytes:
+ """Return image data encoded in jpeg using set quality and subsample
+ options.
+
+ Parameters
+ ----------
+ image_data: np.ndarray
+ Image data to encode.
+
+ Returns
+ ----------
+ bytes
+ Jpeg bytes.
+ """
+ return self._encoder.encode(image_data)
diff --git a/wsidicomizer/openslide.py b/wsidicomizer/openslide.py
index fb19ec5..1a4913f 100644
--- a/wsidicomizer/openslide.py
+++ b/wsidicomizer/openslide.py
@@ -1,4 +1,4 @@
-# Copyright 2021 SECTRA AB
+# Copyright 2021, 2022, 2023 SECTRA AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,6 +13,7 @@
# limitations under the License.
import ctypes
+from enum import Enum
import math
import os
import re
@@ -23,17 +24,15 @@
import numpy as np
from opentile.metadata import Metadata
from PIL import Image
+from PIL.Image import Image as PILImage
from pydicom import Dataset
from pydicom.uid import UID as Uid
-from wsidicom import (WsiDicom, WsiDicomLabels, WsiDicomLevels,
- WsiDicomOverviews)
from wsidicom.errors import WsiDicomNotFoundError
from wsidicom.geometry import Point, Region, Size, SizeMm
-from wsidicom.wsidicom import WsiDicom
-from wsidicomizer.common import MetaDicomizer, MetaImageData
-from wsidicomizer.dataset import create_base_dataset, populate_base_dataset
-from wsidicomizer.encoding import Encoder, create_encoder
+from wsidicomizer.base_dicomizer import BaseDicomizer
+from wsidicomizer.image_data import DicomizerImageData
+from wsidicomizer.encoding import Encoder
# On windows, use find_library to find directory with openslide dll in
# the Path environmental variable.
@@ -77,6 +76,11 @@
from openslide.lowlevel import _read_region, get_associated_image_names
+class OpenSlideAssociatedImageType(Enum):
+ LABEL = 'label'
+ MACRO = 'macro'
+
+
class OpenSlideMetadata(Metadata):
def __init__(self, slide: OpenSlide):
magnification = slide.properties.get(
@@ -99,7 +103,7 @@ def scanner_manufacturer(self) -> Optional[str]:
return self._scanner_manufacturer
-class OpenSlideImageData(MetaImageData):
+class OpenSlideImageData(DicomizerImageData):
def __init__(
self,
open_slide: OpenSlide,
@@ -196,7 +200,7 @@ class OpenSlideAssociatedImageData(OpenSlideImageData):
def __init__(
self,
open_slide: OpenSlide,
- image_type: str,
+ image_type: OpenSlideAssociatedImageType,
encoder: Encoder
):
"""Wraps a OpenSlide associated image (label or overview) to ImageData.
@@ -205,17 +209,20 @@ def __init__(
----------
open_slide: OpenSlide
OpenSlide object to wrap.
- image_type: str
+ image_type: OpenSlideAssociatedImageType
Type of image to wrap.
encoded: Encoder
Encoder to use.
"""
super().__init__(open_slide, encoder)
self._image_type = image_type
- if image_type not in get_associated_image_names(self._slide._osr):
- raise ValueError(f"{image_type} not in {self._slide}")
+ if (
+ image_type.value
+ not in get_associated_image_names(self._slide._osr)
+ ):
+ raise ValueError(f"{image_type.value} not in {self._slide}")
- image = self._slide.associated_images[image_type]
+ image = self._slide.associated_images[image_type.value]
no_alpha = Image.new('RGB', image.size, self.blank_color)
no_alpha.paste(image, mask=image.split()[3])
self._image_size = Size.from_tuple(no_alpha.size)
@@ -258,7 +265,7 @@ def _get_decoded_tile(
tile: Point,
z: float,
path: str
- ) -> Image.Image:
+ ) -> PILImage:
if tile != Point(0, 0):
raise ValueError("Point(0, 0) only valid tile for non-tiled image")
return self._decoded_image
@@ -360,7 +367,7 @@ def stitch_tiles(
region: Region,
path: str,
z: float
- ) -> Image.Image:
+ ) -> PILImage:
"""Overrides ImageData stitch_tiles() to read reagion directly from
openslide object.
@@ -375,7 +382,7 @@ def stitch_tiles(
Returns
----------
- Image.Image
+ PILImage
Stitched image
"""
if z not in self.focal_planes:
@@ -444,7 +451,7 @@ def _get_blank_encoded_frame(self, size: Size) -> bytes:
self._blank_encoded_frame_size = size
return self._blank_encoded_frame
- def _get_blank_decoded_frame(self, size: Size) -> Image.Image:
+ def _get_blank_decoded_frame(self, size: Size) -> PILImage:
"""Return cached blank decoded frame for size, or create frame if
cached frame not available or of wrong size.
@@ -469,7 +476,7 @@ def _get_blank_decoded_frame(self, size: Size) -> Image.Image:
def _get_region(
self,
region: Region
- ) -> Optional[Image.Image]:
+ ) -> Optional[PILImage]:
"""Return Image read from region in openslide image. If image data for
region is blank, None is returned. Transparent pixels are made into
background color
@@ -481,7 +488,7 @@ def _get_region(
Returns
----------
- Optional[Image.Image]
+ Optional[PILImage]
Image of region, or None if region is blank.
"""
if region.size.width < 0 or region.size.height < 0:
@@ -555,7 +562,7 @@ def _get_decoded_tile(
tile_point: Point,
z: float,
path: str
- ) -> Image.Image:
+ ) -> PILImage:
"""Return Image for tile. Image mode is RGB.
Parameters
@@ -569,7 +576,7 @@ def _get_decoded_tile(
Returns
----------
- Image.Image
+ PILImage
Tile as Image.
"""
if z not in self.focal_planes:
@@ -584,123 +591,75 @@ def _get_decoded_tile(
return tile
-class OpenSlideDicomizer(MetaDicomizer):
- @classmethod
- def open(
- cls,
- filepath: str,
+class OpenSlideDicomizer(BaseDicomizer):
+ def __init__(
+ self,
+ filepath: Path,
+ encoder: Encoder,
+ tile_size: int,
modules: Optional[Union[Dataset, Sequence[Dataset]]] = None,
- tile_size: Optional[int] = None,
- include_levels: Optional[Sequence[int]] = None,
- include_label: bool = True,
- include_overview: bool = True,
include_confidential: bool = True,
- encoding_format: str = 'jpeg',
- encoding_quality: int = 90,
- jpeg_subsampling: str = '420'
- ) -> WsiDicom:
- """Open openslide file in filepath as WsiDicom object. Note that
- created instances always has a random UID.
-
- Parameters
- ----------
- filepath: str
- Path to tiff file
- modules: Optional[Union[Dataset, Sequence[Dataset]]] = None
- Module datasets to use in files. If none, use default modules.
- tile_size: Optional[int]
- Tile size to use if not defined by file.
- include_levels: Sequence[int] = None
- Optional list of level indices to include. If None include all
- levels, if empty sequence exlude all levels. E.g. [0, 1]
- includes only the two lowest levels. Negative indicies can be used,
- e.g. [-1, -2] includes only the two highest levels.
- include_label: bool = True
- Inclube label.
- include_overview: bool = True
- Include overview.
- include_confidential: bool = True
- Include confidential metadata. Not implemented.
- encoding_format: str = 'jpeg'
- Encoding format to use if re-encoding. 'jpeg' or 'jpeg2000'.
- encoding_quality: int = 90
- Quality to use if re-encoding. Do not use > 95 for jpeg. Use 100
- for lossless jpeg2000.
- jpeg_subsampling: str = '420'
- Subsampling option if using jpeg for re-encoding. Use '444' for
- no subsampling, '422' for 2x1 subsampling, and '420' for 2x2
- subsampling.
+ ) -> None:
+ self._slide = OpenSlide(filepath)
+ self._pyramid_levels = self._get_pyramid_levels(self._slide)
+ self._metadata = OpenSlideMetadata(self._slide)
+ super().__init__(
+ filepath,
+ encoder,
+ tile_size,
+ modules,
+ include_confidential
+ )
- Returns
- ----------
- WsiDicom
- WsiDicom object of openslide file in filepath.
- """
- if tile_size is None:
- raise ValueError("Tile size required for open slide")
- encoder = create_encoder(
- encoding_format,
- encoding_quality,
- subsampling=jpeg_subsampling
+ @property
+ def has_label(self) -> bool:
+ return (
+ OpenSlideAssociatedImageType.LABEL.value
+ in self._slide.associated_images
)
- slide = OpenSlide(filepath)
- pyramid_levels = cls._get_pyramid_levels(slide)
- metadata = OpenSlideMetadata(slide)
- base_dataset = populate_base_dataset(
- metadata,
- create_base_dataset(modules),
- include_confidential
+
+ @property
+ def has_overview(self) -> bool:
+ return (
+ OpenSlideAssociatedImageType.MACRO.value
+ in self._slide.associated_images
)
- instance_number = 0
- level_instances = [
- cls._create_instance(
- OpenSlideLevelImageData(
- slide,
- level_index,
- tile_size,
- encoder
- ),
- base_dataset,
- 'VOLUME',
- instance_number+level_index
- )
- for level_index in range(slide.level_count)
- if cls._is_included_level(
- pyramid_levels[level_index],
- pyramid_levels,
- include_levels
- )
- ]
- instance_number += len(level_instances)
- if include_label and 'label' in slide.associated_images:
- label_instances = [cls._create_instance(
- OpenSlideAssociatedImageData(slide, 'label', encoder),
- base_dataset,
- 'LABEL',
- instance_number
- )]
- else:
- label_instances = []
- instance_number += len(label_instances)
- if include_overview and 'macro' in slide.associated_images:
- overview_instances = [cls._create_instance(
- OpenSlideAssociatedImageData(slide, 'macro', encoder),
- base_dataset,
- 'OVERVIEW',
- instance_number
- )]
- else:
- overview_instances = []
- levels = WsiDicomLevels.open(level_instances)
- labels = WsiDicomLabels.open(label_instances)
- overviews = WsiDicomOverviews.open(overview_instances)
- return cls(levels, labels, overviews)
+
+ @property
+ def metadata(self) -> Metadata:
+ return self._metadata
+
+ @property
+ def pyramid_levels(self) -> List[int]:
+ return self._pyramid_levels
@staticmethod
- def is_supported(filepath: str) -> bool:
+ def is_supported(filepath: Path) -> bool:
"""Return True if file in filepath is supported by OpenSlide."""
return OpenSlide.detect_format(str(filepath)) is not None
+ def _create_level_image_data(self, level_index: int) -> DicomizerImageData:
+ return OpenSlideLevelImageData(
+ self._slide,
+ level_index,
+ self._tile_size,
+ self._encoder
+ )
+
+ def _create_label_image_data(self) -> DicomizerImageData:
+ return OpenSlideAssociatedImageData(
+ self._slide,
+ OpenSlideAssociatedImageType.LABEL,
+ self._encoder
+ )
+
+ def _create_overview_image_data(self) -> DicomizerImageData:
+ return OpenSlideAssociatedImageData(
+ self._slide,
+ OpenSlideAssociatedImageType.MACRO,
+ self._encoder
+ )
+
@staticmethod
def _get_pyramid_levels(slide: OpenSlide) -> List[int]:
"""Return list of pyramid levels present in openslide slide."""
diff --git a/wsidicomizer/opentile.py b/wsidicomizer/opentile.py
index 43f9bbc..ad0e667 100644
--- a/wsidicomizer/opentile.py
+++ b/wsidicomizer/opentile.py
@@ -1,4 +1,4 @@
-# Copyright 2021 SECTRA AB
+# Copyright 2021, 2022, 2023 SECTRA AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,24 +13,23 @@
# limitations under the License.
from pathlib import Path
-from typing import List, Optional, Sequence, Tuple, Union
+from typing import List, Optional, Sequence, Union
+from opentile import OpenTile
+from opentile.common import OpenTilePage
+from opentile.metadata import Metadata
from PIL import Image
from pydicom import Dataset
from pydicom.uid import JPEG2000, UID, JPEG2000Lossless, JPEGBaseline8Bit
-from wsidicom import (WsiDicom, WsiDicomLabels, WsiDicomLevels,
- WsiDicomOverviews, WsiInstance)
+from tifffile.tifffile import COMPRESSION, PHOTOMETRIC
from wsidicom.geometry import Point, Size, SizeMm
-from tifffile.tifffile import COMPRESSION, PHOTOMETRIC
-from opentile import OpenTile
-from opentile.common import OpenTilePage, Tiler
-from wsidicomizer.common import MetaDicomizer, MetaImageData
-from wsidicomizer.dataset import (create_base_dataset, populate_base_dataset)
-from wsidicomizer.encoding import Encoder, create_encoder
+from wsidicomizer.base_dicomizer import BaseDicomizer
+from wsidicomizer.image_data import DicomizerImageData
+from wsidicomizer.encoding import Encoder
-class OpenTileImageData(MetaImageData):
+class OpenTileImageData(DicomizerImageData):
def __init__(
self,
tiled_page: OpenTilePage,
@@ -261,162 +260,54 @@ def get_transfer_syntax(self) -> UID:
)
-class OpenTileDicomizer(MetaDicomizer):
- @classmethod
- def open(
- cls,
- filepath: str,
+class OpenTileDicomizer(BaseDicomizer):
+ def __init__(
+ self,
+ filepath: Path,
+ encoder: Encoder,
+ tile_size: int,
modules: Optional[Union[Dataset, Sequence[Dataset]]] = None,
- tile_size: int = 512,
- include_levels: Optional[Sequence[int]] = None,
- include_label: bool = True,
- include_overview: bool = True,
include_confidential: bool = True,
- encoding_format: str = 'jpeg',
- encoding_quality: int = 90,
- jpeg_subsampling: str = '420'
- ) -> WsiDicom:
- """Open tiff file in filepath as WsiDicom object. Note that created
- instances always has a random UID.
-
- Parameters
- ----------
- filepath: str
- Path to tiff file
- modules: Optional[Union[Dataset, Sequence[Dataset]]] = None
- Module datasets to use in files. If none, use default modules.
- tile_size: int = 512
- Tile size to use if not defined by file.
- include_levels: Sequence[int] = None
- Optional list of level indices to include. If None include all
- levels, if empty sequence exlude all levels. E.g. [0, 1]
- includes only the two lowest levels. Negative indicies can be used,
- e.g. [-1, -2] includes only the two highest levels.
- include_label: bool = True
- Inclube label.
- include_overview: bool = True
- Include overview.
- include_confidential: bool = True
- Include confidential metadata.
- encoding_format: str = 'jpeg'
- Encoding format to use if re-encoding. 'jpeg' or 'jpeg2000'.
- encoding_quality: int = 90
- Quality to use if re-encoding. Do not use > 95 for jpeg. Use 100
- for lossless jpeg2000.
- jpeg_subsampling: str = '420'
- Subsampling option if using jpeg for re-encoding. Use '444' for
- no subsampling, '422' for 2x1 subsampling, and '420' for 2x2
- subsampling.
-
- Returns
- ----------
- WsiDicom
- WsiDicom object of tiff file in filepath.
- """
- encoder = create_encoder(
- encoding_format,
- encoding_quality,
- subsampling=jpeg_subsampling
- )
- base_dataset = create_base_dataset(modules)
- tiler = OpenTile.open(filepath, tile_size)
- level_instances, label_instances, overview_instances = cls._open_tiler(
- tiler,
+ ) -> None:
+ self._tiler = OpenTile.open(filepath, tile_size)
+ self._metadata = self._tiler.metadata
+ super().__init__(
+ filepath,
encoder,
- base_dataset,
- include_levels=include_levels,
- include_label=include_label,
- include_overview=include_overview,
- include_confidential=include_confidential
+ tile_size,
+ modules,
+ include_confidential
)
- levels = WsiDicomLevels.open(level_instances)
- labels = WsiDicomLabels.open(label_instances)
- overviews = WsiDicomOverviews.open(overview_instances)
- return cls(levels, labels, overviews)
+
+ @property
+ def has_label(self) -> bool:
+ return len(self._tiler.labels) > 0
+
+ @property
+ def has_overview(self) -> bool:
+ return len(self._tiler.overviews) > 0
+
+ @property
+ def metadata(self) -> Metadata:
+ return self._metadata
+
+ @property
+ def pyramid_levels(self) -> List[int]:
+ return [level.pyramid_index for level in self._tiler.levels]
@staticmethod
- def is_supported(filepath: str) -> bool:
+ def is_supported(filepath: Path) -> bool:
"""Return True if file in filepath is supported by OpenTile."""
- return OpenTile.detect_format(Path(filepath)) is not None
+ return OpenTile.detect_format(filepath) is not None
- @classmethod
- def _open_tiler(
- cls,
- tiler: Tiler,
- encoder: Encoder,
- base_dataset: Dataset,
- include_levels: Optional[Sequence[int]] = None,
- include_label: bool = True,
- include_overview: bool = True,
- include_confidential: bool = True
- ) -> Tuple[List[WsiInstance], List[WsiInstance], List[WsiInstance]]:
- """Open tiler to produce WsiInstances.
-
- Parameters
- ----------
- tiler: Tiler
- Tiler that can produce WsiInstances.
- encoder: Encoder
- Encoder to use for re-encoding.
- base_dataset: Dataset
- Base dataset to include in files.
- include_levels: Optional[Sequence[int]] = None
- Optional list indices (in present levels) to include, e.g. [0, 1]
- includes the two lowest levels. Negative indicies can be used,
- e.g. [-1, -2] includes the two highest levels.
- include_label: bool = True
- Include label(s), default true.
- include_overwiew: bool = True
- Include overview(s), default true.
- include_confidential: bool = True
- Include confidential metadata.
+ def _create_level_image_data(self, level_index: int) -> DicomizerImageData:
+ level = self._tiler.levels[level_index]
+ return OpenTileImageData(level, self._encoder)
- Returns
- ----------
- Tuple[List[WsiInstance], List[WsiInstance], List[WsiInstance]]
- Lists of created level, label and overivew instances.
- """
- base_dataset = populate_base_dataset(
- tiler.metadata,
- base_dataset,
- include_confidential
- )
- instance_number = 0
- level_instances = [
- cls._create_instance(
- OpenTileImageData(level, encoder),
- base_dataset,
- 'VOLUME',
- instance_number+index
- )
- for index, level in enumerate(tiler.levels)
- if cls._is_included_level(
- level.pyramid_index,
- [level.pyramid_index for level in tiler.levels],
- include_levels
- )
- ]
- instance_number += len(level_instances)
- label_instances = [
- cls._create_instance(
- OpenTileImageData(label, encoder),
- base_dataset,
- 'LABEL',
- instance_number+index
- )
- for index, label in enumerate(tiler.labels)
- if include_label
- ]
- instance_number += len(level_instances)
- overview_instances = [
- cls._create_instance(
- OpenTileImageData(overview, encoder),
- base_dataset,
- 'OVERVIEW',
- instance_number+index
- )
- for index, overview in enumerate(tiler.overviews)
- if include_overview
- ]
+ def _create_label_image_data(self) -> DicomizerImageData:
+ label = self._tiler.labels[0]
+ return OpenTileImageData(label, self._encoder)
- return level_instances, label_instances, overview_instances
+ def _create_overview_image_data(self) -> DicomizerImageData:
+ overview = self._tiler.overviews[0]
+ return OpenTileImageData(overview, self._encoder)
diff --git a/wsidicomizer/interface.py b/wsidicomizer/wsidicomizer.py
similarity index 80%
rename from wsidicomizer/interface.py
rename to wsidicomizer/wsidicomizer.py
index 782edb4..ff96d73 100644
--- a/wsidicomizer/interface.py
+++ b/wsidicomizer/wsidicomizer.py
@@ -1,4 +1,4 @@
-# Copyright 2021 SECTRA AB
+# Copyright 2021, 2022, 2023 SECTRA AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,34 +18,38 @@
from pydicom.dataset import Dataset
from pydicom.uid import UID, generate_uid
from wsidicom import WsiDicom
+from PIL.Image import Image as PILImage
-from wsidicomizer.common import MetaDicomizer
+from wsidicomizer.base_dicomizer import BaseDicomizer
from wsidicomizer.czi import CziDicomizer
+from wsidicomizer.encoding import Encoder
from wsidicomizer.openslide import OpenSlideDicomizer
from wsidicomizer.opentile import OpenTileDicomizer
# List of supported Dicomizers in prioritization order.
-SUPPORTED_TILE_SOURCES: List[Type[MetaDicomizer]] = [
+SUPPORTED_DICOMIZERS: List[Type[BaseDicomizer]] = [
OpenTileDicomizer,
CziDicomizer,
OpenSlideDicomizer
]
-class WsiDicomizer:
+class WsiDicomizer(WsiDicom):
"""Interface for Dicomizing files."""
- @staticmethod
+ @classmethod
def open(
- filepath: str,
+ cls,
+ filepath: Union[str, Path],
modules: Optional[Union[Dataset, Sequence[Dataset]]] = None,
- tile_size: Optional[int] = 512,
+ tile_size: int = 512,
include_levels: Optional[Sequence[int]] = None,
include_label: bool = True,
include_overview: bool = True,
include_confidential: bool = True,
encoding_format: str = 'jpeg',
encoding_quality: int = 90,
- jpeg_subsampling: str = '420'
+ jpeg_subsampling: str = '420',
+ label: Optional[Union[PILImage, str, Path]] = None
) -> WsiDicom:
"""Open data in file in filepath as WsiDicom.
@@ -55,7 +59,7 @@ def open(
Path to file
modules: Optional[Union[Dataset, Sequence[Dataset]]] = None
Module datasets to use in files. If none, use default modules.
- tile_size: Optional[int] = 512
+ tile_size: int = 512
Tile size to use if not defined by file.
include_levels: Optional[Sequence[int]] = None
Optional list indices (in present levels) to include, e.g. [0, 1]
@@ -76,34 +80,44 @@ def open(
Subsampling option if using jpeg for re-encoding. Use '444' for
no subsampling, '422' for 2x1 subsampling, and '420' for 2x2
subsampling.
+ label: Optional[Union[PILImage, str, Path]] = None
+ Optional label image to use instead of label found in file.
+
Returns
----------
WsiDicom
WsiDicom object of file.
"""
- selected_tile_source = next(
+ if not isinstance(filepath, Path):
+ filepath = Path(filepath)
+
+ selected_dicomizer = next(
(
- tile_source for tile_source in SUPPORTED_TILE_SOURCES
- if tile_source.is_supported(filepath)
+ dicomizer for dicomizer in SUPPORTED_DICOMIZERS
+ if dicomizer.is_supported(filepath)
),
None
)
- if selected_tile_source is None:
+ if selected_dicomizer is None:
raise NotImplementedError(f"{filepath} is not supported")
+ encoder = Encoder.create_encoder(
+ encoding_format,
+ encoding_quality,
+ subsampling=jpeg_subsampling
+ )
- return selected_tile_source.open(
+ dicomizer = selected_dicomizer(
filepath,
- modules,
+ encoder,
tile_size,
- include_levels,
- include_label,
- include_overview,
- include_confidential,
- encoding_format,
- encoding_quality,
- jpeg_subsampling
+ modules,
+ include_confidential
)
+ levels = dicomizer.create_levels(include_levels)
+ labels = dicomizer.create_labels(include_label, label)
+ overviews = dicomizer.create_oveviews(include_overview)
+ return cls(levels, labels, overviews)
@classmethod
def convert(
@@ -111,7 +125,7 @@ def convert(
filepath: str,
output_path: Optional[str] = None,
modules: Optional[Union[Dataset, Sequence[Dataset]]] = None,
- tile_size: Optional[int] = 512,
+ tile_size: int = 512,
uid_generator: Callable[..., UID] = generate_uid,
include_levels: Optional[Sequence[int]] = None,
include_label: bool = True,
@@ -122,7 +136,8 @@ def convert(
encoding_format: str = 'jpeg',
encoding_quality: int = 90,
jpeg_subsampling: str = '420',
- offset_table: Optional[str] = 'bot'
+ offset_table: Optional[str] = 'bot',
+ label: Optional[Union[PILImage, str, Path]] = None
) -> List[str]:
"""Convert data in file to DICOM files in output path. Created
instances get UID from uid_generator. Closes when finished.
@@ -135,7 +150,7 @@ def convert(
Folder path to save files to.
modules: Optional[Union[Dataset, Sequence[Dataset]]] = None
Module datasets to use in files. If none, use default modules.
- tile_size: Optional[int] = 512
+ tile_size: int = 512
Tile size to use if not defined by file.
uid_generator: Callable[..., UID] = generate_uid
Function that can gernerate unique identifiers.
@@ -166,6 +181,8 @@ def convert(
offset_table: Optional[str] = 'bot'
Offset table to use, 'bot' basic offset table, 'eot' extended
offset table, None - no offset table.
+ label: Optional[Union[PILImage, str, Path]] = None
+ Optional label image to use instead of label found in file.
Returns
----------
@@ -182,7 +199,8 @@ def convert(
include_confidential,
encoding_format,
encoding_quality,
- jpeg_subsampling
+ jpeg_subsampling,
+ label
) as wsi:
if output_path is None:
output_path = str(Path(filepath).parents[0].joinpath(
@@ -191,7 +209,7 @@ def convert(
try:
os.mkdir(output_path)
except FileExistsError:
- ValueError(f'Output path {output_path} already excists')
+ ValueError(f'Output path {output_path} already exists')
created_files = wsi.save(
output_path,
uid_generator,
| Label-manipulation
First of all: Thank you very much for this great and easy-to-use tool.
May I ask where the WSI-label is retrieved in your class? The background to my question is that we want to manipulate the label.
With kind regards
Option to replace or mask label and overview images
It would be nice to be able to replace or mask/replace part of a wsis label and/or overview image during conversion.
| Hi @catweis and thanks for the feedback.
Can you tell me more on how you want to manipulate the label? Do you want to mask or replace parts in order to anonymize before conversion?
Thanks
Dear Erik,
thank you for your response.
Exactly, we want to change the label to remove our lab-IDs. Or we want to replace it with a scientific ID.
This can be done before conversion or in the course of conversion.
With kind regards
> On 27 Jan 2023, at 07:45, Erik Gabrielsson ***@***.***> wrote:
>
>
> Hi @catweis <https://github.com/catweis> and thanks for the feedback.
>
> Can you tell me more on how you want to manipulate the label? Do you want to mask or replace parts in order to anonymize before conversion?
> Thanks
>
> —
> Reply to this email directly, view it on GitHub <https://github.com/imi-bigpicture/wsidicomizer/issues/50#issuecomment-1406091770>, or unsubscribe <https://github.com/notifications/unsubscribe-auth/ABY4ZSXDWXQHRRVOWLNQPX3WUNVIBANCNFSM6AAAAAAUFCWQUI>.
> You are receiving this because you were mentioned.
>
Thanks for the details.
So in this case I understand it would be enough to add a optional parameter, specifying a image in some format (either path to a file or a pillow image), that the converter would use instead of any label image found in the original file? I.e. we don´t have to modify the original label image?
Thank you for your idea.
You are right, we do not need to modifiy the image, but we just need to exchange it.
May I ask where in you code we should add our image path and not the path to the original label?
With kind regards
Gesendet: Montag, 30. Januar 2023 um 08:50 Uhr
Von: "Erik Gabrielsson" ***@***.***>
An: "imi-bigpicture/wsidicomizer" ***@***.***>
Cc: "catweis" ***@***.***>, "Mention" ***@***.***>
Betreff: Re: [imi-bigpicture/wsidicomizer] Label-manipulation (Issue #50)
Thanks for the details.
So in this case I understand it would be enough to add a optional parameter, specifying a image in some format (either path to a file or a pillow image), that the converter would use instead of any label image found in the original file? I.e. we don´t have to modify the original label image?
—
Reply to this email directly, view it on GitHub, or unsubscribe.
You are receiving this because you were mentioned.Message ID: ***@***.***>
Hi catweis and thanks for the clarification,
The use case for providing a external image as label is not only relevant for wsidicomizer but also for wsidicom. So I would start with adding a classmethod to the [`Instance class`](https://github.com/imi-bigpicture/wsidicom/blob/686dbb8685552019c65affcfa9ab2f3d8a13b39d/wsidicom/instance.py#L37) to create a label instance from file. We can then use [this instance](https://github.com/imi-bigpicture/wsidicomizer/blob/main/wsidicomizer/opentile.py#L333) and create the wsi-object (which is of WsiDicom-class).
Is this something you want to work with? Otherwise I can do it.
Thank you very much for your tips. So, the instance class should get a new classmethod to load an image from another path? And then for the wsi-dicomizer file there should be a new option like "no label" to load another image as label?
I will try to implement that. However, to be honest, if you would do it, it would be much more efficient.
Meanwhile, I tried to modify the label. However, it is hard for me to find where the pixel data is (in your code)? My idea would be to modify directly or exchange the pixel data for the label images.
I also thought about changing the dcm files after creation. However, I need help understanding the system behind several dcm files in the folder and, finally (when uploading to Omero) to images (one overview and the WSI).
Hi catweis,
I have started working on adding a parameter to replace the label, and it seems to work. I hope to get it done during next week.
> Meanwhile, I tried to modify the label. However, it is hard for me to find where the pixel data is (in your code)? My idea would be to modify directly or exchange the pixel data for the label images.
Depending on what format your original file is the pixel data will come from different classes.
> I also thought about changing the dcm files after creation. However, I need help understanding the system behind several dcm files in the folder and, finally (when uploading to Omero) to images (one overview and the WSI).
There is one dcm file for each pyramid level, label image, and overview image. So to modify the label image you would have to find the dcm file with [ImageType](https://dicom.innolitics.com/ciods/vl-whole-slide-microscopy-image/whole-slide-microscopy-image/00080008) 'LABEL'. You could then change the pixel data (most likely only one frame) and the image size (if not the same).
| 2023-02-07T12:24:02 | 0.0 | [] | [] |
||
imi-bigpicture/wsidicomizer | imi-bigpicture__wsidicomizer-13 | a7686765b72949cd8b91584c22d7dc1bbfe0048a | diff --git a/wsidicomizer/common.py b/wsidicomizer/common.py
index b3c7a3d..e7c903b 100644
--- a/wsidicomizer/common.py
+++ b/wsidicomizer/common.py
@@ -17,6 +17,7 @@
from typing import Optional, Sequence, Union
import numpy as np
+from PIL import Image
from pydicom import Dataset, config
from pydicom.dataset import Dataset
from pydicom.sequence import Sequence as DicomSequence
@@ -147,14 +148,17 @@ def create_instance_dataset(
dataset.ExtendedDepthOfField = 'NO'
return WsiDataset(dataset)
- def _encode(self, image_data: np.ndarray) -> bytes:
+ def _encode(
+ self,
+ image_data: np.ndarray
+ ) -> bytes:
"""Return image data encoded in jpeg using set quality and subsample
options.
Parameters
----------
image_data: np.ndarray
- Image data to encode, in BGRA-pixel format.
+ Image data to encode.
Returns
----------
diff --git a/wsidicomizer/encoding.py b/wsidicomizer/encoding.py
index 4954fca..2c0996c 100644
--- a/wsidicomizer/encoding.py
+++ b/wsidicomizer/encoding.py
@@ -14,55 +14,88 @@
from abc import ABCMeta, abstractmethod
from typing import Union, Optional
-
import numpy as np
from imagecodecs import jpeg2k_encode, jpeg8_encode
from pydicom.uid import UID, JPEGBaseline8Bit, JPEG2000Lossless, JPEG2000
class Encoder(metaclass=ABCMeta):
+ """Abstract class for an image encoder."""
+
@property
@abstractmethod
def transfer_syntax(self) -> UID:
+ """Should return correspodning transfer syntax for encoded data."""
raise NotImplementedError
@property
@abstractmethod
- def quality(self) -> int:
+ def quality(self) -> Union[int, float]:
+ """Should return quality setting of encoder."""
raise NotImplementedError
@abstractmethod
- def encode(self, data: np.ndarray) -> bytes:
+ def encode(
+ self,
+ data: np.ndarray
+ ) -> bytes:
+ """Should return data as encoded bytes."""
raise NotImplementedError
class JpegEncoder(Encoder):
+ """Encoder for JPEG."""
+
def __init__(
self,
quality: int = 90,
- subsampling: Optional[str] = '422',
- colorspace: Optional[Union[int, str]] = None
+ subsampling: Optional[str] = '422'
) -> None:
+ """Creates a JPEG encoder with specified settings.
+
+ Parameters
+ ----------
+ quality: int = 90
+ The encoding quality. To not use higher than 95.
+ subsampling: Optional[str] = '422'
+ Subsampling option.
+
+ """
self._quality = quality
self._subsampling = subsampling
- self._colorspace = colorspace
self._outcolorspace = 'YCBCR'
@property
def transfer_syntax(self) -> UID:
+ """Transfer syntax of encoder."""
return JPEGBaseline8Bit
@property
def quality(self) -> int:
+ """Quality setting of encoder"""
return self._quality
- def encode(self, data: np.ndarray) -> bytes:
+ def encode(
+ self,
+ data: np.ndarray
+ ) -> bytes:
+ """Encodes data as JPEG. Converts data to uint8 before conversion.
+
+ Parameters
+ ----------
+ data: np.ndarray
+ Data to encode.
+
+ Returns
+ ----------
+ bytes:
+ JPEG bytes.
+ """
if data.dtype != np.dtype(np.uint8):
data = (data * 255 / np.iinfo(data.dtype).max).astype(np.uint8)
return jpeg8_encode(
data,
level=self._quality,
- colorspace=self._colorspace,
outcolorspace=self._outcolorspace,
subsampling=self._subsampling
)
@@ -71,40 +104,81 @@ def encode(self, data: np.ndarray) -> bytes:
class Jpeg2000Encoder(Encoder):
def __init__(
self,
- quality: int = 90,
+ quality: float = 20.0
) -> None:
+ """Creates a JPEG2000 encoder with specified settings.
+
+ Parameters
+ ----------
+ quality: float = 20.0.
+ The encoding quality as peak signal to noise (PSNR). Use < 1 for
+ lossless quality. Up to 60 gives acceptable results.
+
+ """
self._quality = quality
- if self.quality == 100:
+ if self.quality < 1:
self._transfer_syntax = JPEG2000Lossless
else:
self._transfer_syntax = JPEG2000
@property
def transfer_syntax(self) -> UID:
+ """Transfer syntax of encoder."""
return self._transfer_syntax
@property
- def quality(self) -> int:
+ def quality(self) -> float:
+ """Quality setting of encoder"""
return self._quality
- def encode(self, data: np.ndarray) -> bytes:
+ def encode(
+ self,
+ data: np.ndarray
+ ) -> bytes:
+ """Encodes data as JPEG2000.
+
+ Parameters
+ ----------
+ data: np.ndarray
+ Numpy array of data to encode.
+
+ Returns
+ ----------
+ bytes
+ JPEG2000 bytes.
+ """
return jpeg2k_encode(
data,
- level=self._quality
+ level=self._quality,
+ codecformat='J2K',
)
def create_encoder(
format: str,
- quality: int,
- subsampling: Optional[str] = None,
- colorspace: Optional[Union[int, str]] = None
+ quality: float,
+ subsampling: Optional[str] = None
) -> Encoder:
+ """Creates an encoder with specified settings.
+
+ Parameters
+ ----------
+ format: str
+ Format for encoder, either 'jpeg' or 'jpeg2000.
+ quality: float
+ The encoding quality.
+ subsampling: Optional[str] = None
+ Subsampling setting (for jpeg).
+
+ Returns
+ ----------
+ Enocer
+ Encoder for settings.
+ """
if format == 'jpeg':
return JpegEncoder(
- quality=quality,
- subsampling=subsampling,
- colorspace=colorspace
+ quality=int(quality),
+ subsampling=subsampling
)
elif format == 'jpeg2000':
return Jpeg2000Encoder(
diff --git a/wsidicomizer/openslide.py b/wsidicomizer/openslide.py
index 07ff44c..93250b3 100644
--- a/wsidicomizer/openslide.py
+++ b/wsidicomizer/openslide.py
@@ -25,9 +25,9 @@
from pydicom.uid import UID as Uid
from wsidicom import (WsiDicom, WsiDicomLabels, WsiDicomLevels,
WsiDicomOverviews)
-from wsidicom.geometry import Point, Size, SizeMm, Region
-from wsidicom.wsidicom import WsiDicom
from wsidicom.errors import WsiDicomNotFoundError
+from wsidicom.geometry import Point, Region, Size, SizeMm
+from wsidicom.wsidicom import WsiDicom
from wsidicomizer.common import MetaDicomizer, MetaImageData
from wsidicomizer.dataset import create_base_dataset
@@ -47,21 +47,18 @@
os.environ['PATH'] = (
openslide_dir + os.pathsep + os.environ['PATH']
)
+"""
+OpenSlideImageData uses proteted functions from OpenSlide to get image data as
+numpy arrays instead of pillow images. The proteted function _read_region is
+used to get raw data from the OpenSlide C API. We consider this safe, as these
+directly map to the Openslide C API and are thus not likely to change.
+"""
from openslide import OpenSlide
from openslide._convert import argb2rgba as convert_argb_to_rgba
-from openslide.lowlevel import (ArgumentError, _read_associated_image,
- _read_region, get_associated_image_dimensions,
+from openslide.lowlevel import (ArgumentError, _read_region,
get_associated_image_names)
-"""
-OpenSlideWrapper uses private functions from OpenSlide to get image data as
-numpy arrays instead of pillow images. The private functions
-(_read_associated_image, _read_region) are used to get raw data from the
-OpenSlide C API. We consider this safe, as these directly map to the Openslide
-C API and are thus not likely to change.
-"""
-
class OpenSlideImageData(MetaImageData, metaclass=ABCMeta):
def __init__(
@@ -79,47 +76,21 @@ def __init__(
Encoder to use.
"""
super().__init__(encoder)
-
- self._open_slide = open_slide
+ self._slide = open_slide
@property
def files(self) -> List[Path]:
- return [Path(self._open_slide._filename)]
+ return [Path(self._slide._filename)]
@property
def transfer_syntax(self) -> Uid:
"""The uid of the transfer syntax of the image."""
return self._encoder.transfer_syntax
- @staticmethod
- def _make_transparent_pixels_white(image_data: np.ndarray) -> np.ndarray:
- """Return image data where all pixels with transparency is replaced
- by white pixels. Openslide returns fully transparent pixels with
- RGBA-value 0, 0, 0, 0 for 'sparse' areas. At the edge to 'sparse' areas
- there can also be partial transparency. This function 'aggresively'
- removes all transparent pixels (instead of calculating RGB-values
- with transparency for partial transparent pixels) as it is much,
- simpler, faster, and the partial transparency is at the edge of the
- ROIs.
-
- Parameters
- ----------
- image_data: np.ndarray
- Image data in RGBA pixel format to remove transparency from.
-
- Returns
- ----------
- image_data: np.ndarray
- Image data in RGBA pixel format without transparency.
- """
- transparency = image_data[:, :, 3]
- image_data[transparency != 255, :] = 255
- return image_data
-
def close(self) -> None:
"""Close the open slide object, if not already closed."""
try:
- self._open_slide.close()
+ self._slide.close()
except ArgumentError:
# Slide already closed
pass
@@ -145,23 +116,15 @@ def __init__(
"""
super().__init__(open_slide, encoder)
self._image_type = image_type
- if image_type not in get_associated_image_names(self._open_slide._osr):
- raise ValueError(f"{image_type} not in {self._open_slide}")
+ if image_type not in get_associated_image_names(self._slide._osr):
+ raise ValueError(f"{image_type} not in {self._slide}")
- width, height = get_associated_image_dimensions(
- self._open_slide._osr,
- image_type
- )
- buffer = (width * height * c_uint32)()
- _read_associated_image(self._open_slide._osr, image_type, buffer)
- image_data: np.ndarray = np.frombuffer(buffer, dtype=np.uint8)
- image_data.shape = (width, height, 4)
- image_data = self._make_transparent_pixels_white(image_data)
- self._encoded_image = self._encode(image_data)
- convert_argb_to_rgba(image_data)
- self._decoded_image = Image.fromarray(image_data).convert('RGB')
- (height, width) = image_data.shape[0:2]
- self._image_size = Size(width, height)
+ image = self._slide.associated_images[image_type]
+ no_alpha = Image.new('RGB', image.size, self.blank_color)
+ no_alpha.paste(image, mask=image.split()[3])
+ self._image_size = Size.from_tuple(no_alpha.size)
+ self._decoded_image = no_alpha
+ self._encoded_image = self._encode(np.asarray(no_alpha))
@property
def image_size(self) -> Size:
@@ -191,7 +154,7 @@ def _get_encoded_tile(
path: str
) -> bytes:
if tile != Point(0, 0):
- raise ValueError
+ raise ValueError("Point(0, 0) only valid tile for non-tiled image")
return self._encoded_image
def _get_decoded_tile(
@@ -201,7 +164,7 @@ def _get_decoded_tile(
path: str
) -> Image.Image:
if tile != Point(0, 0):
- raise ValueError
+ raise ValueError("Point(0, 0) only valid tile for non-tiled image")
return self._decoded_image
@@ -228,23 +191,43 @@ def __init__(
Encoder to use.
"""
self._tile_size = Size(tile_size, tile_size)
- self._open_slide = open_slide
+ self._slide = open_slide
self._level_index = level_index
self._image_size = Size.from_tuple(
- self._open_slide.level_dimensions[self._level_index]
+ self._slide.level_dimensions[self._level_index]
)
self._downsample = int(
- self._open_slide.level_downsamples[self._level_index]
+ self._slide.level_downsamples[self._level_index]
)
self._pyramid_index = int(math.log2(self.downsample))
- base_mpp_x = float(self._open_slide.properties['openslide.mpp-x'])
- base_mpp_y = float(self._open_slide.properties['openslide.mpp-y'])
+ base_mpp_x = float(self._slide.properties['openslide.mpp-x'])
+ base_mpp_y = float(self._slide.properties['openslide.mpp-y'])
self._pixel_spacing = SizeMm(
base_mpp_x * self.downsample / 1000.0,
base_mpp_y * self.downsample / 1000.0
)
+ # Get set image origin and size to bounds if available
+ bounds_x = self._slide.properties.get('openslide.bounds-x', 0)
+ bounds_y = self._slide.properties.get('openslide.bounds-y', 0)
+ bounds_w = self._slide.properties.get('openslide.bounds-width', None)
+ bounds_h = self._slide.properties.get('openslide.bounds-height', None)
+ self._offset = Point(int(bounds_x), int(bounds_y))
+ if None not in [bounds_w, bounds_h]:
+ self._image_size = (
+ Size(int(bounds_w), int(bounds_h)) // self.downsample
+ )
+ else:
+ self._image_size = Size.from_tuple(
+ self._slide.level_dimensions[self._level_index]
+ )
+
+ self._blank_encoded_frame = bytes()
+ self._blank_encoded_frame_size = None
+ self._blank_decoded_frame = None
+ self._blank_decoded_frame_size = None
+
@property
def image_size(self) -> Size:
"""The pixel size of the image."""
@@ -293,70 +276,137 @@ def stitch_tiles(
Image.Image
Stitched image
"""
- if path not in self.optical_paths:
- raise WsiDicomNotFoundError(f"Optical path {path}", str(self))
if z not in self.focal_planes:
- raise WsiDicomNotFoundError(f"Z {z}", str(self))
- if region.size.width < 0 or region.size.height < 0:
- raise ValueError('Negative size not allowed')
+ raise WsiDicomNotFoundError(f'focal plane {z}', str(self))
+ if path not in self.optical_paths:
+ raise WsiDicomNotFoundError(f'optical path {path}', str(self))
+ image_data = self._get_region(region)
+ if image_data is None:
+ image_data = self._get_blank_decoded_frame(region.size)
+ return image_data
- location_in_base_level = region.start * self.downsample
+ def _detect_blank_tile(self, data: np.ndarray) -> bool:
+ """Detect if tile data is a blank tile, i.e. either has full
+ transparency or is filled with background color. First checks if the
+ corners are transparent or has background color before checking whole
+ data.
- buffer = (region.size.width * region.size.height * c_uint32)()
- _read_region(
- self._open_slide._osr,
- buffer,
- location_in_base_level.x,
- location_in_base_level.y,
- self._level_index,
- region.size.width,
- region.size.height
- )
- tile_data: np.ndarray = np.frombuffer(buffer, dtype=np.uint8)
- tile_data.shape = (region.size.width, region.size.height, 4)
- tile_data = self._make_transparent_pixels_white(tile_data)
- convert_argb_to_rgba(tile_data)
- return Image.fromarray(tile_data).convert('RGB')
+ Parameters
+ ----------
+ data: np.ndarray
+ Data to check if blank.
- def _get_tile(self, tile: Point, flip: bool = False) -> np.ndarray:
- """Return tile as np array. Transparency is removed. Optionally the
- pixel format can be flipped to RGBA, suitable for opening with PIL.
+ Returns
+ ----------
+ bool
+ True if tile is blank.
+ """
+ TOP = RIGHT = -1
+ BOTTOM = LEFT = 0
+ CORNERS_Y = (BOTTOM, BOTTOM, TOP, TOP)
+ CORNERS_X = (LEFT, RIGHT, LEFT, RIGHT)
+ TRANSPARENCY = 3
+ background = np.array(self.blank_color)
+ transparency = data[:, :, TRANSPARENCY]
+ if np.all(transparency[CORNERS_Y, CORNERS_X] == 0):
+ if np.all(transparency == 0):
+ return True
+ if np.all(data[CORNERS_Y, CORNERS_X, 0:TRANSPARENCY] == background):
+ if np.all(data[:, :, 0:TRANSPARENCY] == background):
+ return True
+ return False
+
+ def _get_blank_encoded_frame(self, size: Size) -> bytes:
+ """Return cached blank encoded frame for size, or create frame if
+ cached frame not available or of wrong size.
Parameters
----------
- tile: Point
- Tile position to get.
- flip: bool
- If to flip the pixel format from ARGB to RGBA.
+ size: Size
+ Size of frame to get.
Returns
----------
- np.ndarray
- Numpy array of tile.
+ bytes
+ Encoded blank frame.
"""
- tile_point_in_base_level = tile * self.downsample * self._tile_size
- if self._tile_size.width < 0 or self._tile_size.height < 0:
+ if self._blank_encoded_frame_size != size:
+ frame = np.full(
+ size.to_tuple() + (3,),
+ self.blank_color,
+ dtype=np.dtype(np.uint8)
+ )
+ self._blank_encoded_frame = self._encode(frame)
+ self._blank_encoded_frame_size = size
+ return self._blank_encoded_frame
+
+ def _get_blank_decoded_frame(self, size: Size) -> Image.Image:
+ """Return cached blank decoded frame for size, or create frame if
+ cached frame not available or of wrong size.
+
+ Parameters
+ ----------
+ size: Size
+ Size of frame to get.
+
+ Returns
+ ----------
+ bytes
+ Decoded blank frame.
+ """
+ if (
+ self._blank_decoded_frame is None
+ or self._blank_decoded_frame_size != size
+ ):
+ frame = Image.new('RGB', size.to_tuple(), self.blank_color)
+ self._blank_decoded_frame = frame
+ return self._blank_decoded_frame
+
+ def _get_region(
+ self,
+ region: Region
+ ) -> Optional[Image.Image]:
+ """Return Image read from region in openslide image. If image data for
+ region is blank, None is returned. Transparent pixels are made into
+ background color
+
+ Parameters
+ ----------
+ region: Region
+ Region to get image for.
+
+ Returns
+ ----------
+ Optional[Image.Image]
+ Image of region, or None if region is blank.
+ """
+ if region.size.width < 0 or region.size.height < 0:
raise ValueError('Negative size not allowed')
- buffer = (self._tile_size.width * self._tile_size.height * c_uint32)()
+
+ location_in_base_level = region.start * self.downsample + self._offset
+ buffer = (region.size.width * region.size.height * c_uint32)()
_read_region(
- self._open_slide._osr,
+ self._slide._osr,
buffer,
- tile_point_in_base_level.x,
- tile_point_in_base_level.y,
+ location_in_base_level.x,
+ location_in_base_level.y,
self._level_index,
- self._tile_size.width,
- self._tile_size.height
+ region.size.width,
+ region.size.height
)
- tile_data: np.ndarray = np.frombuffer(buffer, dtype=np.uint8)
- tile_data.shape = (self._tile_size.width, self._tile_size.height, 4)
- tile_data = self._make_transparent_pixels_white(tile_data)
- if flip:
- convert_argb_to_rgba(tile_data)
- return tile_data
+ tile_data: np.ndarray = np.frombuffer(buffer, dtype=np.dtype(np.uint8))
+ tile_data.shape = (region.size.height, region.size.width, 4)
+ if self._detect_blank_tile(tile_data):
+ return None
+ convert_argb_to_rgba(tile_data)
+ image = Image.fromarray(tile_data)
+ no_alpha = Image.new('RGB', image.size, self.blank_color)
+ no_alpha.paste(image, mask=image.split()[3])
+ return no_alpha
def _get_encoded_tile(
self,
- tile: Point,
+ tile_point: Point,
z: float,
path: str
) -> bytes:
@@ -365,7 +415,7 @@ def _get_encoded_tile(
Parameters
----------
- tile: Point
+ tile_point: Point
Tile position to get.
z: float
Focal plane of tile to get.
@@ -377,9 +427,16 @@ def _get_encoded_tile(
bytes
Tile bytes.
"""
- if z not in self.focal_planes or path not in self.optical_paths:
- raise ValueError
- return self._encode(self._get_tile(tile))
+ if z not in self.focal_planes:
+ raise WsiDicomNotFoundError(f'focal plane {z}', str(self))
+ if path not in self.optical_paths:
+ raise WsiDicomNotFoundError(f'optical path {path}', str(self))
+ tile = self._get_region(
+ Region(tile_point*self.tile_size, self.tile_size)
+ )
+ if tile is None:
+ return self._get_blank_encoded_frame(self.tile_size)
+ return self._encode(np.asarray(tile))
def _get_decoded_tile(
self,
@@ -403,10 +460,16 @@ def _get_decoded_tile(
Image.Image
Tile as Image.
"""
- if z not in self.focal_planes or path not in self.optical_paths:
- raise ValueError
- tile = self._get_tile(tile_point, True)
- return Image.fromarray(tile).convert('RGB')
+ if z not in self.focal_planes:
+ raise WsiDicomNotFoundError(f'focal plane {z}', str(self))
+ if path not in self.optical_paths:
+ raise WsiDicomNotFoundError(f'optical path {path}', str(self))
+ tile = self._get_region(
+ Region(tile_point*self.tile_size, self.tile_size)
+ )
+ if tile is None:
+ return self._get_blank_decoded_frame(self.tile_size)
+ return tile
class OpenSlideDicomizer(MetaDicomizer):
@@ -459,12 +522,10 @@ def open(
"""
if tile_size is None:
raise ValueError("Tile size required for open slide")
- JCS_EXT_BGRA = 9
encoder = create_encoder(
encoding_format,
encoding_quality,
- subsampling=jpeg_subsampling,
- colorspace=JCS_EXT_BGRA
+ subsampling=jpeg_subsampling
)
base_dataset = create_base_dataset(modules)
slide = OpenSlide(filepath)
| WsiDicomizer stores DICOM with NumberOfSamples = 3, but should be 4 when converting from OpenSlide
Took me a while to figure this out, but when converting from OpenSlide and encoding to JPEG2000, the DICOM Tag SamplesPerPixel is set to 3, however, OpenSlide is returning ARGB and this is also stored in the JPEG2000-encoded. This makes it challenging to allocate the correct buffers in third-party applications. There are two solutions, which is why I didn't fix it myself:
1. Strip the alpha part from the returned openslide tile
2. Store the SamplesPerPixel as 4
As I don't know very well if DICOM handles an alpha channel very well, I thought stripping it might be better?
| Thanks for the bug report. Yes the openslide data comes as ARGB. The jpeg-encoder can handle (ignore) the extra channel by setting the input color space. I have to test if the jpeg2000 does the same (there is a colorspace flag). Otherwise we must strip it before sending it to the encoder. It is not allowed to have anything else than 1 or 3 SamplesPerPixel. | 2021-12-16T13:56:01 | 0.0 | [] | [] |
||
inveniosoftware/invenio-accounts | inveniosoftware__invenio-accounts-381 | 4e2656b6b91cd17e0ef566ae5a6e72009f9f01ec | diff --git a/invenio_accounts/cli.py b/invenio_accounts/cli.py
index f8c90437..cc537c0b 100644
--- a/invenio_accounts/cli.py
+++ b/invenio_accounts/cli.py
@@ -52,7 +52,7 @@ def users_create(email, password, active):
"""Create a user."""
kwargs = dict(email=email, password=password, active='y' if active else '')
- form = ConfirmRegisterForm(MultiDict(kwargs), csrf_enabled=False)
+ form = ConfirmRegisterForm(MultiDict(kwargs), meta={'csrf': False})
if form.validate():
kwargs['password'] = hash_password(kwargs['password'])
| Check if Flask-WTF version 1.0.0 is breaking the CLI
In `invenio-accounts` package version `1.4.8` it was not possible to create a new user with invenio CLI:
- `invenio users create [email protected] --password=123456 --active`
```
...
File "/dir/venv/lib/python3.8/site-packages/invenio_accounts/cli.py", line 55, in users_create
form = ConfirmRegisterForm(MultiDict(kwargs), csrf_enabled=False)
File "/dir/venv/lib/python3.8/site-packages/wtforms/form.py", line 208, in __call__
return type.__call__(cls, *args, **kwargs)
File "/dir/venv/lib/python3.8/site-packages/flask_security/forms.py", line 94, in __init__
super(Form, self).__init__(*args, **kwargs)
File "/dir/venv/lib/python3.8/site-packages/flask_wtf/form.py", line 73, in __init__
super().__init__(formdata=formdata, **kwargs)
File "/dir/venv/lib/python3.8/site-packages/wtforms/form.py", line 274, in __init__
self.process(formdata, obj, data=data, **kwargs)
File "/dir/venv/lib/python3.8/site-packages/wtforms/form.py", line 131, in process
field.process(formdata)
File "/dir/venv/lib/python3.8/site-packages/wtforms/csrf/core.py", line 43, in process
self.current_token = self.csrf_impl.generate_csrf_token(self)
File "/dir/venv/lib/python3.8/site-packages/flask_wtf/csrf.py", line 147, in generate_csrf_token
return generate_csrf(
File "/dir/venv/lib/python3.8/site-packages/flask_wtf/csrf.py", line 52, in generate_csrf
if field_name not in session:
...
```
Checking the CLI code (`cli.py`) on [line 55](https://github.com/inveniosoftware/invenio-accounts/blob/4e2656b6b91cd17e0ef566ae5a6e72009f9f01ec/invenio_accounts/cli.py#L55):
- `form = ConfirmRegisterForm(MultiDict(kwargs), csrf_enabled=False)`
The second argument in `ConfirmRegisterForm` is breaking the code for `Flask-WTF version 1.0.0` but works fine with `Flask-WTF 0.15.1`. In version 1.0.0 of Flask-WTF the argument as `meta={'csrf': False}` worked.
| 2021-12-04T17:05:29 | 0.0 | [] | [] |
|||
Deltares/imod-python | Deltares__imod-python-743 | 2ae9f55fa2fb6049e82c39e4dbbe704b6287e3b8 | diff --git a/docs/api/changelog.rst b/docs/api/changelog.rst
index d4943d349..72c3e505e 100644
--- a/docs/api/changelog.rst
+++ b/docs/api/changelog.rst
@@ -14,6 +14,9 @@ Fixed
- iMOD Python now supports versions of pandas >= 2
- Fixed bugs with clipping :class:`imod.mf6.HorizontalFlowBarrier` for
structured grids
+- Improved performance for merging structured multimodel Modflow 6 output
+- Bug where :function:`imod.formats.idf.open_subdomains` did not properly support custom
+ patterns
Changed
~~~~~~~
@@ -22,6 +25,7 @@ Changed
advice doing development installations with pixi from now on. `See the
documentation. <https://deltares.github.io/imod-python/installation.html>`_
+
[0.15.1] - 2023-12-22
---------------------
diff --git a/imod/formats/idf.py b/imod/formats/idf.py
index 1a8953b74..8b6f017f2 100644
--- a/imod/formats/idf.py
+++ b/imod/formats/idf.py
@@ -5,21 +5,22 @@
:func:`imod.idf.save`, though lower level functions are also available.
"""
-import collections
-import functools
import glob
-import itertools
import pathlib
-import re
import struct
import warnings
+from collections import defaultdict
+from collections.abc import Iterable
+from pathlib import Path
+from re import Pattern
+from typing import Any
-import dask
import numpy as np
import xarray as xr
from imod import util
from imod.formats import array_io
+from imod.typing.structured import merge_partitions
# Make sure we can still use the built-in function...
f_open = open
@@ -252,194 +253,69 @@ def open(path, use_cftime=False, pattern=None):
return array_io.reading._open(path, use_cftime, pattern, header, _read)
-def _merge_subdomains(pathlists, use_cftime, pattern):
- das = []
- for paths in pathlists.values():
- headers = [header(p, pattern) for p in paths]
- das.append(array_io.reading._load(paths, use_cftime, _read, headers))
-
- x = np.unique(np.concatenate([da.x.values for da in das]))
- y = np.unique(np.concatenate([da.y.values for da in das]))
-
- nrow = y.size
- ncol = x.size
- nlayer = das[0].coords["layer"].size
- if "species" in das[0].dims:
- has_species = True
- nspecies = das[0].coords["species"].size
- out = np.full((nspecies, 1, nlayer, nrow, ncol), np.nan)
- else:
- has_species = False
- out = np.full((1, nlayer, nrow, ncol), np.nan)
-
- for da in das:
- ix = np.searchsorted(x, da.x.values[0], side="left")
- iy = nrow - np.searchsorted(y, da.y.values[0], side="right")
- ysize, xsize = da.shape[-2:]
- if has_species:
- out[:, :, :, iy : iy + ysize, ix : ix + xsize] = da.values
- else:
- out[:, :, iy : iy + ysize, ix : ix + xsize] = da.values
-
- return out
+def _more_than_one_unique_value(values: Iterable[Any]):
+ """Returns if more than one unique value in list"""
+ return len(set(values)) != 1
-def open_subdomains(path, use_cftime=False, pattern=None):
+def open_subdomains(
+ path: str | Path, use_cftime: bool = False, pattern: str | Pattern = None
+) -> xr.DataArray:
"""
Combine IDF files of multiple subdomains.
Parameters
----------
- path : str, Path or list
+ path : str or Path
+ Global path.
use_cftime : bool, optional
- pattern : regex pattern, optional
+ pattern : str, regex pattern, optional
+ If no pattern is provided, the function will first try:
+ "{name}_c{species}_{time}_l{layer}_p{subdomain}"
+ and if that fails:
+ "{name}_{time}_l{layer}_p{subdomain}"
+ Following the iMOD5/iMOD-WQ filename conventions.
Returns
-------
xarray.DataArray
"""
- if isinstance(path, pathlib.Path):
- path = str(path)
- paths = glob.glob(path)
- n = len(paths)
- if n == 0:
- raise FileNotFoundError(f"Could not find any files matching {path}")
+ paths = sorted(glob.glob(str(path)))
if pattern is None:
- pattern = re.compile(
- r"[\w-]+_(?P<time>[0-9-]+)_l(?P<layer>[0-9]+)_p(?P<subdomain>[0-9]{3})",
- re.IGNORECASE,
- )
- pattern_species = re.compile(
- r"[\w-]+_c(?P<species>[0-9]+)_(?P<time>[0-9-]+)_l(?P<layer>[0-9]+)_p(?P<subdomain>[0-9]{3})",
- re.IGNORECASE,
+ # If no pattern provided test if
+ pattern = "{name}_c{species}_{time}_l{layer}_p{subdomain}"
+ re_pattern_species = util._custom_pattern_to_regex_pattern(pattern)
+ has_species = re_pattern_species.search(paths[0])
+ if not has_species:
+ pattern = "{name}_{time}_l{layer}_p{subdomain}"
+
+ parsed = [util.decompose(path, pattern) for path in paths]
+ grouped = defaultdict(list)
+ for match, path in zip(parsed, paths):
+ try:
+ key = match["subdomain"]
+ except KeyError as e:
+ raise KeyError(f"{e} in path: {path} with pattern: {pattern}")
+ grouped[key].append(path)
+
+ n_idf_per_subdomain = {
+ subdomain_id: len(path_ls) for subdomain_id, path_ls in grouped.items()
+ }
+ if _more_than_one_unique_value(n_idf_per_subdomain.values()):
+ raise ValueError(
+ f"Each subdomain must have the same number of IDF files, found: {n_idf_per_subdomain}"
)
- else:
- pattern_species = None
- # There are no real benefits to itertools.groupby in this case, as there's
- # no benefit to using a (lazy) iterator in this case
- grouped_by_time = collections.defaultdict(
- functools.partial(collections.defaultdict, list)
- )
- count_per_subdomain = collections.defaultdict(int) # used only for checking counts
- timestrings = []
- layers = []
- numbers = []
- species = []
-
- # Check if species are present:
- if pattern_species and pattern_species.search(paths[0]) is not None:
- has_species = True
- pattern = pattern_species
- else:
- has_species = False
-
- for p in paths:
- search = pattern.search(p)
- timestr = search["time"]
- layer = int(search["layer"])
- number = int(search["subdomain"])
- grouped_by_time[timestr][number].append(p)
- count_per_subdomain[number] += 1
- numbers.append(number)
- layers.append(layer)
- timestrings.append(timestr)
- if has_species:
- species.append(int(search["species"]))
-
- # Test whether subdomains are complete
- numbers = sorted(set(numbers))
- first = numbers[0]
- first_len = count_per_subdomain[first]
- for number in numbers:
- group_len = count_per_subdomain[number]
- if group_len != first_len:
- raise ValueError(
- f"The number of IDFs are not identical for every subdomain. "
- f"Subdomain p{first} has {first_len} IDF files, subdomain p{number} "
- f"has {group_len} IDF files."
- )
-
- if has_species:
- pattern = r"{name}_c{species}_{time}_l{layer}_p\d+"
- else:
- pattern = r"{name}_{time}_l{layer}_p\d+"
- timestrings = list(grouped_by_time.keys())
-
- # Prepare output coordinates
- coords = {}
- first_time = timestrings[0]
- samplingpaths = [
- pathlist[first] for pathlist in grouped_by_time[first_time].values()
- ]
- headers = [header(path, pattern) for path in samplingpaths]
- subdomain_bounds = [(h["xmin"], h["xmax"], h["ymin"], h["ymax"]) for h in headers]
- subdomain_cellsizes = [(h["dx"], h["dy"]) for h in headers]
- subdomain_coords = [
- util._xycoords(bounds, cellsizes)
- for bounds, cellsizes in zip(subdomain_bounds, subdomain_cellsizes)
- ]
- coords["y"] = np.unique(
- np.concatenate([coords["y"] for coords in subdomain_coords])
- )[::-1]
- coords["x"] = np.unique(
- np.concatenate([coords["x"] for coords in subdomain_coords])
- )
- coords["layer"] = np.array(sorted(set(layers)))
-
- times = [util.to_datetime(timestr) for timestr in timestrings]
- times, use_cftime = util._convert_datetimes(times, use_cftime)
- if use_cftime:
- # unique also sorts
- coords["time"] = xr.CFTimeIndex(np.unique(times))
- else:
- coords["time"] = np.unique(times)
-
- if has_species:
- coords["species"] = np.array(sorted(set(species)))
- shape = (
- coords["species"].size,
- 1,
- coords["layer"].size,
- coords["y"].size,
- coords["x"].size,
- )
- dims = ("species", "time", "layer", "y", "x")
- time_axis = 1
- else:
- shape = (1, coords["layer"].size, coords["y"].size, coords["x"].size)
- dims = ("time", "layer", "y", "x")
- time_axis = 0
-
- # Collect and merge data
- merged = []
- dtype = headers[0]["dtype"]
- sorted_order = np.argsort(times) # get time ordering right before merging
- sorted_timestrings = np.array(timestrings)[sorted_order]
- for timestr in sorted_timestrings:
- group = grouped_by_time[timestr]
- # Build a single array per timestep
- timestep_data = dask.delayed(_merge_subdomains)(group, use_cftime, pattern)
- dask_array = dask.array.from_delayed(timestep_data, shape, dtype=dtype)
- merged.append(dask_array)
- data = dask.array.concatenate(merged, axis=time_axis)
-
- # Get tops and bottoms if possible
- headers = [header(path, pattern) for path in grouped_by_time[first_time][first]]
- tops = [c.get("top", None) for c in headers]
- bots = [c.get("bot", None) for c in headers]
- layers = [c.get("layer", None) for c in headers]
- _, unique_indices = np.unique(layers, return_index=True)
- all_have_z = all(map(lambda v: v is not None, itertools.chain(tops, bots)))
- if all_have_z:
- if coords["layer"].size > 1:
- coords = array_io.reading._array_z_coord(coords, tops, bots, unique_indices)
- else:
- coords = array_io.reading._scalar_z_coord(coords, tops, bots)
+ das = []
+ for pathlist in grouped.values():
+ da = open(pathlist, use_cftime=use_cftime, pattern=pattern)
+ da = da.isel(subdomain=0, drop=True)
+ das.append(da)
- return xr.DataArray(data, coords, dims)
+ name = das[0].name
+ return merge_partitions(das)[name] # as DataArray for backwards compatibility
def open_dataset(globpath, use_cftime=False, pattern=None):
diff --git a/imod/mf6/simulation.py b/imod/mf6/simulation.py
index 22581d3d8..cfb648cc2 100644
--- a/imod/mf6/simulation.py
+++ b/imod/mf6/simulation.py
@@ -34,7 +34,7 @@
from imod.mf6.write_context import WriteContext
from imod.schemata import ValidationError
from imod.typing import GridDataArray, GridDataset
-from imod.typing.grid import concat, is_unstructured, merge, merge_partitions
+from imod.typing.grid import concat, is_unstructured, merge, merge_partitions, nan_like
OUTPUT_FUNC_MAPPING = {
"head": (open_hds, GroundwaterFlowModel),
@@ -502,7 +502,7 @@ def _open_output(self, output: str, **settings) -> GridDataArray | GridDataset:
return self._open_output_single_model(modelname, output, **settings)
elif is_split(self):
if "budget" in output:
- return self._merge_fluxes(modelnames, output, **settings)
+ return self._merge_budgets(modelnames, output, **settings)
else:
return self._merge_states(modelnames, output, **settings)
elif output == "concentration":
@@ -528,26 +528,26 @@ def _merge_states(
)
return merge_partitions(state_partitions)
- def _merge_and_assign_exchange_fluxes(self, cbc: GridDataset) -> GridDataset:
+ def _merge_and_assign_exchange_budgets(self, cbc: GridDataset) -> GridDataset:
"""
- Merge and assign exchange fluxes to cell by cell budgets:
+ Merge and assign exchange budgets to cell by cell budgets:
cbc[[gwf-gwf_1, gwf-gwf_3]] to cbc[gwf-gwf]
"""
exchange_names = [
key for key in cbc.keys() if ("gwf-gwf" in key) or ("gwt-gwt" in key)
]
- exchange_flux = cbc[exchange_names].to_array().sum(dim="variable")
+ exchange_budgets = cbc[exchange_names].to_array().sum(dim="variable")
cbc = cbc.drop_vars(exchange_names)
# "gwf-gwf" or "gwt-gwt"
exchange_key = exchange_names[0].split("_")[0]
- cbc[exchange_key] = exchange_flux
+ cbc[exchange_key] = exchange_budgets
return cbc
- def _merge_fluxes(
+ def _merge_budgets(
self, modelnames: list[str], output: str, **settings
) -> GridDataset:
if settings["flowja"] is True:
- raise ValueError("``flowja`` cannot be set to True when merging fluxes.")
+ raise ValueError("``flowja`` cannot be set to True when merging budgets.")
cbc_per_partition = []
for modelname in modelnames:
@@ -558,14 +558,24 @@ def _merge_fluxes(
# https://github.com/Deltares/xugrid/issues/179
cbc_list = [da.rename(key) for key, da in cbc_dict.items()]
cbc = merge(cbc_list)
- # Merge and assign exchange fluxes to dataset
- # FUTURE: Refactor to insert these exchange fluxes in horizontal
+ # Merge and assign exchange budgets to dataset
+ # FUTURE: Refactor to insert these exchange budgets in horizontal
# flows.
- cbc = self._merge_and_assign_exchange_fluxes(cbc)
+ cbc = self._merge_and_assign_exchange_budgets(cbc)
if not is_unstructured(cbc):
cbc = cbc.where(partition_domain, other=np.nan)
cbc_per_partition.append(cbc)
+ # Boundary conditions can be missing in certain partitions, as do their
+ # budgets, in which case we manually assign an empty grid of nans.
+ unique_keys = set([key for cbc in cbc_per_partition for key in cbc.keys()])
+ for cbc in cbc_per_partition:
+ missing_keys = unique_keys - set(cbc.keys())
+ present_keys = unique_keys & set(cbc.keys())
+ first_present_key = next(iter(present_keys))
+ for missing in missing_keys:
+ cbc[missing] = nan_like(cbc[first_present_key], dtype=np.float64)
+
return merge_partitions(cbc_per_partition)
def _concat_concentrations(
diff --git a/imod/typing/grid.py b/imod/typing/grid.py
index ade7c2e76..4e23f102c 100644
--- a/imod/typing/grid.py
+++ b/imod/typing/grid.py
@@ -6,7 +6,7 @@
from fastcore.dispatch import typedispatch
from imod.prepare import polygonize
-from imod.typing import GridDataArray, GridDataset
+from imod.typing import GridDataArray, GridDataset, structured
@typedispatch
@@ -30,13 +30,13 @@ def ones_like(grid: xu.UgridDataArray, *args, **kwargs):
@typedispatch
-def nan_like(grid: xr.DataArray, *args, **kwargs):
- return xr.full_like(grid, fill_value=np.nan, dtype=np.float32, *args, **kwargs)
+def nan_like(grid: xr.DataArray, dtype=np.float32, *args, **kwargs):
+ return xr.full_like(grid, fill_value=np.nan, dtype=dtype, *args, **kwargs)
@typedispatch
-def nan_like(grid: xu.UgridDataArray, *args, **kwargs):
- return xu.full_like(grid, fill_value=np.nan, dtype=np.float32, *args, **kwargs)
+def nan_like(grid: xu.UgridDataArray, dtype=np.float32, *args, **kwargs):
+ return xu.full_like(grid, fill_value=np.nan, dtype=dtype, *args, **kwargs)
@typedispatch
@@ -105,7 +105,7 @@ def merge_partitions(
objects: Sequence[GridDataArray | GridDataset], *args, **kwargs
) -> GridDataArray | GridDataset:
return _type_dispatch_functions_on_grid_sequence(
- objects, xu.merge_partitions, xr.merge, *args, **kwargs
+ objects, xu.merge_partitions, structured.merge_partitions, *args, **kwargs
)
diff --git a/imod/typing/structured.py b/imod/typing/structured.py
new file mode 100644
index 000000000..76b0c5040
--- /dev/null
+++ b/imod/typing/structured.py
@@ -0,0 +1,252 @@
+# %%
+
+import itertools
+from collections import defaultdict
+from typing import Any, DefaultDict, Dict, List, Set, Tuple
+
+import dask
+import numpy as np
+import xarray as xr
+
+# %%
+
+
+def check_dtypes(das: List[xr.DataArray]) -> None:
+ """Check whether the dtypes of all arrays are the same."""
+ dtypes = set(da.dtype for da in das)
+ if len(dtypes) != 1:
+ raise TypeError(f"DataArrays do not match in dtype: {dtypes}")
+ return
+
+
+def _is_nonunique_dimsize(sizes: Set[int]) -> bool:
+ return len(sizes) != 1
+
+
+def check_sizes(sizes: DefaultDict[str, Set[int]], attribute: str) -> None:
+ """Utility for checking a dict of dimension names and sizes. Skips x and y."""
+ sizes.pop("x", None)
+ sizes.pop("y", None)
+ conflicting = {k: v for k, v in sizes.items() if _is_nonunique_dimsize(v)}
+ if conflicting:
+ message = (
+ f"DataArrays do not match in {attribute} along dimension(s):\n"
+ + "\n".join([f" {k}: {v}" for k, v in conflicting.items()])
+ )
+ raise ValueError(message)
+ return
+
+
+def check_dims(das: List[xr.DataArray]) -> None:
+ all_dims = set(da.dims for da in das)
+ if len(all_dims) != 1:
+ raise ValueError(
+ f"All DataArrays should have exactly the same dimensions. Found: {all_dims}"
+ )
+ last_dims = das[0].dims[-2:]
+ if not last_dims == ("y", "x"):
+ raise ValueError(f'Last dimensions must be ("y", "x"). Found: {last_dims}')
+ check_dim_sizes(das)
+
+
+def check_dim_sizes(das: List[xr.DataArray]) -> None:
+ """Check whether all non-xy dims are equally sized."""
+ sizes = defaultdict(set)
+ for da in das:
+ for key, value in da.sizes.items():
+ sizes[key].add(value)
+ check_sizes(sizes, "size")
+ return
+
+
+def check_coords(das: List[xr.DataArray]):
+ def drop_xy(coords) -> Dict[str, Any]:
+ coords = dict(coords)
+ coords.pop("y")
+ coords.pop("x")
+ return xr.Coordinates(coords)
+
+ first_coords = drop_xy(das[0].coords)
+ disjoint = [
+ i + 1
+ for i, da in enumerate(das[1:])
+ if not first_coords.equals(drop_xy(da.coords))
+ ]
+ if disjoint:
+ raise ValueError(
+ f"Non x-y coordinates do not match for partition 0 with partitions: {disjoint}"
+ )
+ return
+
+
+def check_chunk_sizes(das: List[xr.DataArray]) -> None:
+ """Check whether all chunks are equal on non-xy dims."""
+ chunks = [da.chunks for da in das]
+ iterator = (item is None for item in chunks)
+ allnone = all(iterator)
+ if allnone:
+ return
+ if any(iterator) != allnone:
+ raise ValueError("Some DataArrays are chunked, while others are not.")
+
+ sizes = defaultdict(set)
+ for da in das:
+ for key, value in zip(da.dims, da.chunks):
+ sizes[key].add(value)
+ check_sizes(sizes, "chunks")
+ return
+
+
+def merge_arrays(
+ arrays: List[np.ndarray],
+ ixs: List[np.ndarray],
+ iys: List[np.ndarray],
+ yx_shape: Tuple[int, int],
+) -> np.ndarray:
+ """
+ Merge the arrays in the last two (y, x) dimensions.
+
+ Parameters
+ ----------
+ arrays: list of N np.ndarray
+ ixs: list of N np.ndarray of int
+ The i-th element are the x indices of the i-th array into the merged
+ array.
+ iys: list of N np.ndarray of int
+ The i-th element are the y indices of the i-th array into the merged
+ array.
+ yx_shape: tuple of int
+ The number of rows and columns of the merged array.
+
+ Returns
+ -------
+ merged: np.ndarray
+ """
+ first = arrays[0]
+ shape = first.shape[:-2] + yx_shape
+ out = np.full(shape, np.nan, dtype=first.dtype)
+ for a, ix, iy in zip(arrays, ixs, iys):
+ ysize, xsize = a.shape[-2:]
+ # Create view of partition, see:
+ # https://numpy.org/doc/stable/user/basics.indexing.html#slicing-and-striding
+ out_partition_view = out[..., iy : iy + ysize, ix : ix + xsize]
+ # Assign active values to view (updates `out` inplace)
+ out_partition_view[...] = np.where(~np.isnan(a), a, out_partition_view)
+ return out
+
+
+def _unique_coords(das: List[xr.DataArray], dim: str) -> xr.DataArray:
+ """Collect unique coords in list of dataarrays"""
+ return np.unique(np.concatenate([da.coords[dim].values for da in das]))
+
+
+def _merge_nonequidistant_coords(
+ das: List[xr.DataArray], coordname: str, indices: List[np.ndarray], nsize: int
+):
+ dtype = das[0].coords[coordname].dtype
+ out = np.full((nsize,), np.nan, dtype=dtype)
+ for da, index in zip(das, indices):
+ coords = da.coords[coordname]
+ out[index : index + coords.size] = coords.values
+ return out
+
+
+def _merge_partitions(das: List[xr.DataArray]) -> xr.DataArray:
+ # Do some input checking
+ check_dtypes(das)
+ check_dims(das)
+ check_chunk_sizes(das)
+ check_coords(das)
+
+ # Create the x and y coordinates of the merged grid.
+ x = _unique_coords(das, "x")
+ y = _unique_coords(das, "y")
+ nrow = y.size
+ ncol = x.size
+ # Compute the indices for where the different subdomain parts belong
+ # in the merged grid.
+ ixs = [np.searchsorted(x, da.x.values[0], side="left") for da in das]
+ iys = [nrow - np.searchsorted(y, da.y.values[0], side="right") for da in das]
+ yx_shape = (nrow, ncol)
+
+ # Collect coordinates
+ first = das[0]
+ coords = dict(first.coords)
+ coords["x"] = x
+ coords["y"] = y[::-1]
+ if "dx" in first.coords:
+ coords["dx"] = ("x", _merge_nonequidistant_coords(das, "dx", ixs, ncol))
+ if "dy" in first.coords:
+ coords["dy"] = ("y", _merge_nonequidistant_coords(das, "dy", iys, nrow)[::-1])
+
+ arrays = [da.data for da in das]
+ if first.chunks is None:
+ # If the data is in memory, merge all at once.
+ data = merge_arrays(arrays, ixs, iys, yx_shape)
+ else:
+ # Iterate over the chunks of the dask array. Collect the chunks
+ # from every partition and merge them, chunk by chunk.
+ # The delayed merged result is stored as a flat list. These can
+ # be directly concatenated into a new dask array if chunking occurs
+ # on only the first dimension (e.g. time), but not if chunks exist
+ # in multiple dimensions (e.g. time and layer).
+ #
+ # dask.array.block() is capable of concatenating over multiple
+ # dimensions if we feed it a nested list of lists of dask arrays.
+ # This is more easily represented by a numpy array of objects
+ # (dask arrays), since numpy has nice tooling for reshaping.
+ #
+ # Normally, we'd append to a list, then convert to numpy array and
+ # reshape. However, numpy attempts to join a list of dask arrays into
+ # a single large numpy array when initialized. This behavior is not
+ # triggered when setting individual elements of the array, so we
+ # create the numpy array in advance and set its elements.
+
+ block_shape = das[0].data.blocks.shape[:-2]
+ merged_blocks = np.empty(np.prod(block_shape), dtype=object)
+ dimension_ranges = [range(size) for size in block_shape]
+ for i, index in enumerate(itertools.product(*dimension_ranges)):
+ # This is a workaround for python 3.10
+ # FUTURE: can be rewritten to arr.blocks[*index, ...] in python 3.11
+ index_with_ellipsis = tuple(index) + (...,)
+ # arr.blocks provides us access to the chunks of the array.
+ arrays_to_merge = [arr.blocks[index_with_ellipsis] for arr in arrays]
+ delayed_merged = dask.delayed(merge_arrays)(
+ arrays_to_merge, ixs, iys, yx_shape
+ )
+ dask_merged = dask.array.from_delayed(
+ delayed_merged,
+ shape=arrays_to_merge[0].shape[:-2] + yx_shape,
+ dtype=first.dtype,
+ )
+ merged_blocks[i] = dask_merged
+
+ # After merging, the xy chunks are always (1, 1)
+ reshaped = merged_blocks.reshape(block_shape + (1, 1))
+ data = dask.array.block(reshaped.tolist())
+
+ return xr.DataArray(
+ data=data,
+ coords=coords,
+ dims=first.dims,
+ )
+
+
+def merge_partitions(
+ das: List[xr.DataArray | xr.Dataset],
+) -> xr.Dataset:
+ first_item = das[0]
+ if isinstance(first_item, xr.Dataset):
+ unique_keys = set([key for da in das for key in da.keys()])
+ merged_ls = []
+ for key in unique_keys:
+ merged_ls.append(_merge_partitions([da[key] for da in das]).rename(key))
+ return xr.merge(merged_ls)
+ elif isinstance(first_item, xr.DataArray):
+ # Store name to rename after concatenation
+ name = first_item.name
+ return _merge_partitions(das).to_dataset(name=name)
+ else:
+ raise TypeError(
+ f"Expected type: xr.DataArray or xr.Dataset, got {type(first_item)}"
+ )
diff --git a/imod/util.py b/imod/util.py
index f69918e3b..c71ff4bde 100644
--- a/imod/util.py
+++ b/imod/util.py
@@ -57,18 +57,28 @@ def to_datetime(s):
return time
-def _groupdict(stem: str, pattern: str) -> Dict:
+def _custom_pattern_to_regex_pattern(pattern: str):
+ """
+ Compile iMOD Python's simplified custom pattern to regex pattern:
+ _custom_pattern_to_regex_pattern({name}_c{species})
+ is the same as calling:
+ (?P<name>[\\w.-]+)_c(?P<species>[\\w.-]+)).compile()
+ """
+ pattern = pattern.lower()
+ # Get the variables between curly braces
+ in_curly = re.compile(r"{(.*?)}").findall(pattern)
+ regex_parts = {key: f"(?P<{key}>[\\w.-]+)" for key in in_curly}
+ # Format the regex string, by filling in the variables
+ simple_regex = pattern.format(**regex_parts)
+ return re.compile(simple_regex)
+
+
+def _groupdict(stem: str, pattern: str | Pattern) -> Dict:
if pattern is not None:
if isinstance(pattern, Pattern):
d = pattern.match(stem).groupdict()
else:
- pattern = pattern.lower()
- # Get the variables between curly braces
- in_curly = re.compile(r"{(.*?)}").findall(pattern)
- regex_parts = {key: f"(?P<{key}>[\\w.-]+)" for key in in_curly}
- # Format the regex string, by filling in the variables
- simple_regex = pattern.format(**regex_parts)
- re_pattern = re.compile(simple_regex)
+ re_pattern = _custom_pattern_to_regex_pattern(pattern)
# Use it to get the required variables
d = re_pattern.match(stem).groupdict()
else: # Default to "iMOD conventions": {name}_c{species}_{time}_l{layer}
| imod.idf.open_subdomains is insufficiently flexible
The current implementation of imod.idf.open_subdomains contains the following code:
```python
if pattern is None:
pattern = re.compile(
r"[\w-]+_(?P<time>[0-9-]+)_l(?P<layer>[0-9]+)_p(?P<subdomain>[0-9]{3})",
re.IGNORECASE,
)
pattern_species = re.compile(
r"[\w-]+_c(?P<species>[0-9]+)_(?P<time>[0-9-]+)_l(?P<layer>[0-9]+)_p(?P<subdomain>[0-9]{3})",
re.IGNORECASE,
)
else:
pattern_species = None
```
Which is **very** specific in terms of the expected IDFs to merge: they must always have at least a time, layer, and subdomain "coordinate". The reasons we chose this is because the method is almost exclusively used to merge iMODFLOW and iMOD-WQ output which follow these naming conventions. Every now and then, a use case comes up where this is not the case. There is a workaround, which consists of renaming all the IDF files, but this is obviously unsatisfactory and quite error prone.
Ideally, the open_subdomains function has the niceties of the regular idf.open functionality in that it allows for some more room. Secondly, the function is rather long in its current form. I've just checked what's being done for MODFLOW 6, and it just calls `xr.merge`. I think I found that it was very slow in the past, but that might've changed (or I did something wrong).
If the performance is good, we could simply replace the entire open_subdomains function by a loop grouping by partition number, followed by an xr.merge.
So I think the first step would be doing some benchmarking, comparing the xr.merge with the specialized _merge_subdomains functions that's found in the idf.module.
| So to do some benchmarking, this set's up some example data:
```python
# %%
import itertools
import xarray as xr
import pandas as pd
import numpy as np
import imod
# %%
x = np.arange(1000.0) + 0.5
y = np.arange(1000.0, 0.0, -1.0) + 0.5
time = pd.date_range("2000-01-01", "2001-01-01")
# %%
shape = (time.size, 1, y.size, x.size)
data = np.random.rand(*shape)
# %%
da = xr.DataArray(
data=data,
coords={"time": time, "layer": [1], "y": y, "x": x},
dims=("time", "layer", "y", "x"),
)
# %%
starts = [0, 250, 500, 750, 1000]
slices = [slice(s, e) for s, e in zip(starts[:-1], starts[1:])]
for i, slices in enumerate(itertools.product(slices, slices)):
part = da.isel(y=slices[0], x=slices[1])
part.to_netcdf(f"part_{i}.nc")
imod.idf.save(f"idf/part_p{i}", part)
# %%
```
And some new/refactored code to merge structured partitions:
```python
# %%
from collections import defaultdict
from glob import glob
from typing import DefaultDict, List, Sequence, Set, Tuple
import itertools
import dask
import xarray as xr
import imod
import numpy as np
# %%
def check_dtypes(das: Sequence[xr.DataArray]) -> None:
"""Check whether the dtypes of all arrays are the same."""
dtypes = set(da.dtype for da in das)
if len(dtypes) != 1:
raise TypeError(
f"DataArrays do not match in dtype: {dtypes}"
)
return
def check_sizes(sizes: DefaultDict[str, Set[int]], attribute: str) -> None:
"""Utility for checking a dict of dimension names and sizes. Skips x and y."""
sizes.pop("x", None)
sizes.pop("y", None)
conflicting = {k: v for k, v in sizes.items() if len(v) != 1}
if conflicting:
message = (
f"DataArrays do not match in {attribute} along dimension(s):\n" +
"\n".join([f" {k}: {v}" for k, v in conflicting.items()])
)
raise ValueError(message)
return
def check_dim_sizes(das: Sequence[xr.DataArray]) -> None:
"""Check whether all non-xy dims are equally sized."""
sizes = defaultdict(set)
for da in das:
for key, value in da.sizes.items():
sizes[key].add(value)
check_sizes(sizes, "size")
return
def check_chunk_sizes(das: Sequence[xr.DataArray]) -> None:
"""Check whether all chunks are equal on non-xy dims."""
chunks = [da.chunks for da in das]
iterator = (item is None for item in chunks)
allnone = all(iterator)
if allnone:
return
if any(iterator) != allnone:
raise ValueError("Some DataArrays are chunked, while others are not.")
sizes = defaultdict(set)
for da in das:
for key, value in zip(da.dims, da.chunks):
sizes[key].add(value)
check_sizes(sizes, "chunks")
return
def merge_arrays(arrays: List[np.ndarray], ixs: List[np.ndarray], iys: List[np.ndarray], yx_shape: Tuple[int, int]) -> np.ndarray:
"""
Merge the arrays in the last two (y, x) dimensions.
Parameters
----------
arrays: list of N np.ndarray
ixs: list of N np.ndarray of int
The i-th element are the x indices of the i-th array into the merged
array.
iys: list of N np.ndarray of int
The i-th element are the y indices of the i-th array into the merged
array.
yx_shape: tuple of int
The number of rows and columns of the merged array.
Returns
-------
merged: np.ndarray
"""
first = arrays[0]
shape = first.shape[:-2] + yx_shape
out = np.full(shape, np.nan)
for a, ix, iy in zip(arrays, ixs, iys):
ysize, xsize = a.shape[-2:]
out[..., iy : iy + ysize, ix : ix + xsize] = a
return out
def merge_subdomains(das: Sequence[xr.DataArray]) -> xr.DataArray:
# Do some input checking
check_dtypes(das)
check_dim_sizes(das)
check_chunk_sizes(das)
# Create the x and y coordinates of the merged grid.
x = np.unique(np.concatenate([da.x.values for da in das]))
y = np.unique(np.concatenate([da.y.values for da in das]))
nrow = y.size
ncol = x.size
# Compute the indices for where the different subdomain parts belong
# in the merged grid.
ixs = [np.searchsorted(x, da.x.values[0], side="left") for da in das]
iys = [nrow - np.searchsorted(y, da.y.values[0], side="right") for da in das]
yx_shape = (nrow, ncol)
first = das[0]
coords = dict(first.coords)
coords["x"] = x
coords["y"] = y
arrays = [da.data for da in das]
if first.chunks is None:
# If the data is in memory, merge all at once.
data = merge_arrays(arrays, ixs, iys, yx_shape)
else:
# Iterate over the chunks of the dask array. Collect the chunks
# from every partition and merge them, chunk by chunk.
# The delayed merged result is stored as a flat list. These can
# be directly concatenated into a new dask array if chunking occurs
# on only the first dimension (e.g. time), but not if chunks exist
# in multiple dimensions (e.g. time and layer).
#
# dask.array.block() is capable of concatenating over multiple
# dimensions if we feed it a nested list of lists of dask arrays.
# This is more easily represented by a numpy array of objects
# (dask arrays), since numpy has nice tooling for reshaping.
#
# Normally, we'd append to a list, then convert to numpy array and
# reshape. However, numpy attempts to join a list of dask arrays into
# a single large numpy array when initialized. This behavior is not
# triggered when setting individual elements of the array, so we
# create the numpy array in advance and set its elements.
block_shape = das[0].data.blocks.shape[:-2]
merged_blocks = np.empty(np.prod(block_shape), dtype=object)
dimension_ranges = [range(size) for size in block_shape]
for i, index in enumerate(itertools.product(*dimension_ranges)):
# arr.blocks provides us access to the chunks of the array.
arrays_to_merge = [arr.blocks[*index, ...] for arr in arrays]
delayed_merged = dask.delayed(merge_arrays)(arrays_to_merge, ixs, iys, yx_shape)
dask_merged = dask.array.from_delayed(
delayed_merged,
shape=arrays_to_merge[0].shape[:-2] + yx_shape,
dtype=first.dtype,
)
merged_blocks[i] = dask_merged
# After merging, the xy chunks are always (1, 1)
reshaped = merged_blocks.reshape(block_shape + (1, 1))
data = dask.array.block(reshaped.tolist())
return xr.DataArray(
data=data,
coords=coords,
dims=first.dims,
)
def new_open_subdomains(path, use_cftime=False, pattern=None):
paths = sorted(glob(path))
parsed = [imod.util.decompose(path, pattern) for path in paths]
grouped = defaultdict(list)
for match, path in zip(parsed, paths):
key = match["subdomain"]
grouped[key].append(path)
n_idf = {k: len(v) for k, v in grouped.items()}
if len(set(n_idf.values())) != 1:
raise ValueError(
f"Each partition must have the same number of IDF files, found: {n_idf}"
)
das = []
for pathlist in grouped.values():
da = imod.idf.open(pathlist, use_cftime=use_cftime, pattern=pattern)
das.append(da)
return merge_subdomains(das)
```
Because of my naming, we can't rely on the standard imod idf name parsing, so we provide a custom pattern.
Due to improvized nature of this open_subdomains function, it doesn't take the "format" strings (which are much easier to setup):
```python
import re
pattern=re.compile(r"(?P<name>[\w-]+)_p(?P<subdomain>[0-9]+)_(?P<time>[0-9-]+)_l(?P<layer>[0-9]+)")
%timeit merged = imod.idf.open_subdomains("idf/*.idf", pattern=pattern)
# 68 ns
%timeit merged = imod.idf.open_subdomains("idf/*.idf", pattern=pattern).compute()
# 15.1 s
```
The new open_subdomains fortunately does support the format strings, since it's just a wrapper around the regular idf open:
```python
%timeit new_merged = new_open_subdomains("idf/*.idf", pattern="{name}_p{subdomain}_{time}_l{layer}")
# 9.5 s
%timeit new_merged = new_open_subdomains("idf/*.idf", pattern="{name}_p{subdomain}_{time}_l{layer}").compute()
# 13 s
```
So the thing to notice here is that open_subdomains is **much** faster without the compute. This is somewhat expected, it looks at a lot less data, it only opens a single IDF headers of a partition. It's slower during the compute, because it ends up doing slight more work (it calls searchsorted for every time slice).
Secondly, the merge_subdomains function is useful by itself:
```python
parts = []
for i in range(16):
da = xr.open_dataarray(f"part_{i}.nc", chunks={"time": 1})
da.name = "rand"
parts.append(da)
%timeit merged = merge_subdomains(parts)
# 662 ms
```
Xarray's own merge is excessively slow in this case, more than a factor 100:
```python
merged = xr.merge(parts)
# 67 s
```
Even if I reduce the number of partitions down to four, it's 210 ms versus 17.5 s (factor 80).
In terms of idf.open_subdomains:
* It's nice that the current implementation does the least amount of work, but in most cases, the function is used to open all files, merge the data, and write to a new file. So the initial performance boost is not as important, arguably (the penalty is the same as manually calling idf.open on every individual partition).
* It's a lot less flexible, because it doesn't have decent pattern and multi-dimension support. Even if you provide a pattern, it always requires time, layer, subdomain.
* There are some additional details: it detects species dimension in the name, and makes sure to cast to int, and then sort. Arguably this should/could be included in the general logic of imod.idf open as well. Currently, you get int species back if you use open_subdomains, but you'd get a str species back if you use idf.open...
Is there a way to get to maintain the current (initial) performance?
* The key here is to only look at the names of the files, and open the first IDF of every partition.
* However, you need to adjust a bunch of logic from array_io.reading._load
* Currently, it requires both the filename and the file header to build the DataArray dimensions and coordinates.
* Given the amount of work and potential for new bugs (for IDF support(!)), I don't think it's worth it.
So my conclusion so far:
* Everything except "new_open_subdomains" is a necessary addition because xr.merge is so slow.
* If we replace the current open_subdomains, we should add a line in imod.util.decompose so that species are always converted to int (if people have multiple named species in IDFs that cannot be converted to int, they can use a pattern with a different name).
* The new open_subdomains is more flexible, we just at least add test cases with multiple types of chunking (none; time only; time and layer; species, time, and layer). | 2024-01-12T17:11:26 | 0.0 | [] | [] |
||
Deltares/imod-python | Deltares__imod-python-741 | 3d1746986b135bf756e02ce4e1581715c73f0e4c | diff --git a/imod/mf6/gwfgwf.py b/imod/mf6/gwfgwf.py
index a580851a3..ef21fb0cc 100644
--- a/imod/mf6/gwfgwf.py
+++ b/imod/mf6/gwfgwf.py
@@ -1,10 +1,12 @@
from typing import Dict, Optional, Tuple
+import cftime
import numpy as np
import xarray as xr
from imod.mf6.auxiliary_variables import add_periodic_auxiliary_variable
from imod.mf6.package import Package
+from imod.typing import GridDataArray
class GWFGWF(Package):
@@ -90,14 +92,16 @@ def get_specification(self) -> Tuple[str, str, str, str]:
def clip_box(
self,
- time_min=None,
- time_max=None,
- layer_min=None,
- layer_max=None,
- x_min=None,
- x_max=None,
- y_min=None,
- y_max=None,
- state_for_boundary=None,
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
+ layer_min: Optional[int] = None,
+ layer_max: Optional[int] = None,
+ x_min: Optional[float] = None,
+ x_max: Optional[float] = None,
+ y_min: Optional[float] = None,
+ y_max: Optional[float] = None,
+ top: Optional[GridDataArray] = None,
+ bottom: Optional[GridDataArray] = None,
+ state_for_boundary: Optional[GridDataArray] = None,
) -> Package:
raise NotImplementedError("this package cannot be clipped")
diff --git a/imod/mf6/hfb.py b/imod/mf6/hfb.py
index 75dd76ea3..b1ff78867 100644
--- a/imod/mf6/hfb.py
+++ b/imod/mf6/hfb.py
@@ -4,8 +4,9 @@
import typing
from copy import deepcopy
from enum import Enum
-from typing import Tuple
+from typing import Optional, Tuple
+import cftime
import geopandas as gpd
import numpy as np
import shapely.wkt
@@ -518,15 +519,17 @@ def _get_vertical_variables(self) -> list:
def clip_box(
self,
- time_min=None,
- time_max=None,
- layer_min=None,
- layer_max=None,
- x_min=None,
- x_max=None,
- y_min=None,
- y_max=None,
- *args,
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
+ layer_min: Optional[int] = None,
+ layer_max: Optional[int] = None,
+ x_min: Optional[float] = None,
+ x_max: Optional[float] = None,
+ y_min: Optional[float] = None,
+ y_max: Optional[float] = None,
+ top: Optional[GridDataArray] = None,
+ bottom: Optional[GridDataArray] = None,
+ state_for_boundary: Optional[GridDataArray] = None,
) -> "HorizontalFlowBarrierBase":
"""
Clip a package by a bounding box (time, layer, y, x).
@@ -550,6 +553,9 @@ def clip_box(
x_max: optional, float
y_min: optional, float
y_max: optional, float
+ top: optional, GridDataArray
+ bottom: optional, GridDataArray
+ state_for_boundary: optional, GridDataArray
Returns
-------
diff --git a/imod/mf6/model.py b/imod/mf6/model.py
index f8b2e417b..988eec333 100644
--- a/imod/mf6/model.py
+++ b/imod/mf6/model.py
@@ -348,8 +348,8 @@ def model_id(cls) -> str:
def clip_box(
self,
- time_min: Optional[str] = None,
- time_max: Optional[str] = None,
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
layer_min: Optional[int] = None,
layer_max: Optional[int] = None,
x_min: Optional[float] = None,
@@ -383,21 +383,29 @@ def clip_box(
state_for_boundary :
"""
clipped = self._clip_box_packages(
- time_min, time_max, layer_min, layer_max, x_min, x_max, y_min, y_max
+ time_min,
+ time_max,
+ layer_min,
+ layer_max,
+ x_min,
+ x_max,
+ y_min,
+ y_max,
)
return clipped
def _clip_box_packages(
self,
- time_min: Optional[str] = None,
- time_max: Optional[str] = None,
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
layer_min: Optional[int] = None,
layer_max: Optional[int] = None,
x_min: Optional[float] = None,
x_max: Optional[float] = None,
y_min: Optional[float] = None,
y_max: Optional[float] = None,
+ state_for_boundary: Optional[GridDataArray] = None,
):
"""
Clip a model by a bounding box (time, layer, y, x).
@@ -426,6 +434,9 @@ def _clip_box_packages(
-------
clipped : Modflow6Model
"""
+
+ top, bottom, idomain = self.__get_domain_geometry()
+
clipped = type(self)(**self._options)
for key, pkg in self.items():
clipped[key] = pkg.clip_box(
@@ -437,6 +448,9 @@ def _clip_box_packages(
x_max=x_max,
y_min=y_min,
y_max=y_max,
+ top=top,
+ bottom=bottom,
+ state_for_boundary=state_for_boundary,
)
return clipped
diff --git a/imod/mf6/model_gwf.py b/imod/mf6/model_gwf.py
index 5ff4ad0a2..3d71e88c5 100644
--- a/imod/mf6/model_gwf.py
+++ b/imod/mf6/model_gwf.py
@@ -2,6 +2,9 @@
from typing import Dict, List, Optional
+import cftime
+import numpy as np
+
from imod.mf6 import ConstantHead
from imod.mf6.clipped_boundary_condition_creator import create_clipped_boundary
from imod.mf6.model import Modflow6Model, initialize_template
@@ -61,8 +64,8 @@ def _get_unique_regridder_types(self) -> Dict[RegridderType, str]:
def clip_box(
self,
- time_min: Optional[str] = None,
- time_max: Optional[str] = None,
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
layer_min: Optional[int] = None,
layer_max: Optional[int] = None,
x_min: Optional[float] = None,
diff --git a/imod/mf6/package.py b/imod/mf6/package.py
index 311c3fd4d..b1e3b0104 100644
--- a/imod/mf6/package.py
+++ b/imod/mf6/package.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import abc
import copy
import numbers
@@ -401,16 +403,18 @@ def __to_datetime(self, time, use_cftime):
def clip_box(
self,
- time_min=None,
- time_max=None,
- layer_min=None,
- layer_max=None,
- x_min=None,
- x_max=None,
- y_min=None,
- y_max=None,
- state_for_boundary=None,
- ) -> "Package":
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
+ layer_min: Optional[int] = None,
+ layer_max: Optional[int] = None,
+ x_min: Optional[float] = None,
+ x_max: Optional[float] = None,
+ y_min: Optional[float] = None,
+ y_max: Optional[float] = None,
+ top: Optional[GridDataArray] = None,
+ bottom: Optional[GridDataArray] = None,
+ state_for_boundary: Optional[GridDataArray] = None,
+ ) -> Package:
"""
Clip a package by a bounding box (time, layer, y, x).
@@ -433,6 +437,10 @@ def clip_box(
x_max: optional, float
y_min: optional, float
y_max: optional, float
+ top: optional, GridDataArray
+ bottom: optional, GridDataArray
+ state_for_boundary: optional, GridDataArray
+
Returns
-------
diff --git a/imod/mf6/simulation.py b/imod/mf6/simulation.py
index c260c7f9d..9989c62e9 100644
--- a/imod/mf6/simulation.py
+++ b/imod/mf6/simulation.py
@@ -8,6 +8,7 @@
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
+import cftime
import jinja2
import numpy as np
import tomli
@@ -755,8 +756,8 @@ def get_models_of_type(self, modeltype):
def clip_box(
self,
- time_min: Optional[str] = None,
- time_max: Optional[str] = None,
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
layer_min: Optional[int] = None,
layer_max: Optional[int] = None,
x_min: Optional[float] = None,
@@ -764,7 +765,7 @@ def clip_box(
y_min: Optional[float] = None,
y_max: Optional[float] = None,
states_for_boundary: Optional[dict[str, GridDataArray]] = None,
- ) -> "Modflow6Simulation":
+ ) -> Modflow6Simulation:
"""
Clip a simulation by a bounding box (time, layer, y, x).
diff --git a/imod/mf6/wel.py b/imod/mf6/wel.py
index 75474520a..c86f91f13 100644
--- a/imod/mf6/wel.py
+++ b/imod/mf6/wel.py
@@ -3,12 +3,14 @@
import warnings
from typing import Any, List, Optional, Tuple, Union
+import cftime
import numpy as np
import numpy.typing as npt
import pandas as pd
import xarray as xr
import xugrid as xu
+import imod
from imod.mf6.auxiliary_variables import add_periodic_auxiliary_variable
from imod.mf6.boundary_condition import (
BoundaryCondition,
@@ -20,12 +22,13 @@
from imod.mf6.package import Package
from imod.mf6.utilities.clip import clip_by_grid
from imod.mf6.utilities.dataset import remove_inactive
+from imod.mf6.utilities.grid import create_layered_top
from imod.mf6.write_context import WriteContext
from imod.prepare import assign_wells
from imod.schemata import AllNoDataSchema, DTypeSchema
from imod.select.points import points_indices, points_values
from imod.typing import GridDataArray
-from imod.typing.grid import ones_like
+from imod.typing.grid import is_spatial_2D, ones_like
from imod.util import values_within_range
@@ -195,18 +198,27 @@ def is_grid_agnostic_package(cls) -> bool:
def clip_box(
self,
- time_min=None,
- time_max=None,
- z_min=None,
- z_max=None,
- x_min=None,
- x_max=None,
- y_min=None,
- y_max=None,
- ) -> Well:
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
+ layer_min: Optional[int] = None,
+ layer_max: Optional[int] = None,
+ x_min: Optional[float] = None,
+ x_max: Optional[float] = None,
+ y_min: Optional[float] = None,
+ y_max: Optional[float] = None,
+ top: Optional[GridDataArray] = None,
+ bottom: Optional[GridDataArray] = None,
+ state_for_boundary: Optional[GridDataArray] = None,
+ ) -> Package:
"""
Clip a package by a bounding box (time, layer, y, x).
+ The well package doesn't use the layer attribute to describe its depth and length.
+ Instead, it uses the screen_top and screen_bottom parameters which corresponds with
+ the z-coordinates of the top and bottom of the well. To go from a layer_min and
+ layer_max to z-values used for clipping the well a top and bottom array have to be
+ provided as well.
+
Slicing intervals may be half-bounded, by providing None:
* To select 500.0 <= x <= 1000.0:
@@ -220,36 +232,74 @@ def clip_box(
----------
time_min: optional
time_max: optional
- z_min: optional, float
- z_max: optional, float
+ layer_min: optional, int
+ layer_max: optional, int
x_min: optional, float
x_max: optional, float
y_min: optional, float
y_max: optional, float
+ top: optional, GridDataArray
+ bottom: optional, GridDataArray
+ state_for_boundary: optional, GridDataArray
Returns
-------
sliced : Package
"""
+ if (layer_max or layer_min) and (top is None or bottom is None):
+ raise ValueError(
+ "When clipping by layer both the top and bottom should be defined"
+ )
+
+ if top is not None:
+ if not isinstance(top, GridDataArray) or "layer" not in top.coords:
+ top = create_layered_top(bottom, top)
# The super method will select in the time dimension without issues.
new = super().clip_box(time_min=time_min, time_max=time_max)
ds = new.dataset
+ z_max = self._find_well_value_at_layer(ds, top, layer_max)
+ z_min = self._find_well_value_at_layer(ds, bottom, layer_min)
+
+ if z_max is not None:
+ ds["screen_top"] = ds["screen_top"].clip(None, z_max)
+ if z_min is not None:
+ ds["screen_bottom"] = ds["screen_bottom"].clip(z_min, None)
+
# Initiate array of True with right shape to deal with case no spatial
# selection needs to be done.
in_bounds = np.full(ds.dims["index"], True)
# Select all variables along "index" dimension
in_bounds &= values_within_range(ds["x"], x_min, x_max)
in_bounds &= values_within_range(ds["y"], y_min, y_max)
- in_bounds &= values_within_range(ds["screen_top"], None, z_max)
- in_bounds &= values_within_range(ds["screen_bottom"], z_min, None)
+ in_bounds &= values_within_range(ds["screen_top"], z_min, z_max)
+ in_bounds &= values_within_range(ds["screen_bottom"], z_min, z_max)
+ # remove wells where the screen bottom and top are the same
+ in_bounds &= abs(ds["screen_bottom"] - ds["screen_top"]) > 1e-5
# Replace dataset with reduced dataset based on booleans
new.dataset = ds.loc[{"index": in_bounds}]
return new
+ @staticmethod
+ def _find_well_value_at_layer(
+ well_dataset: xr.Dataset, grid: GridDataArray, layer: int
+ ):
+ value = None if layer is None else grid.isel(layer=layer)
+
+ # if value is a grid select the values at the well locations and drop the dimensions
+ if (value is not None) and is_spatial_2D(value):
+ value = imod.select.points_values(
+ value,
+ x=well_dataset["x"].values,
+ y=well_dataset["y"].values,
+ out_of_bounds="ignore",
+ ).drop_vars(lambda x: x.coords)
+
+ return value
+
def write(
self,
pkgname: str,
@@ -640,14 +690,17 @@ def __init__(
def clip_box(
self,
- time_min=None,
- time_max=None,
- layer_min=None,
- layer_max=None,
- x_min=None,
- x_max=None,
- y_min=None,
- y_max=None,
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
+ layer_min: Optional[int] = None,
+ layer_max: Optional[int] = None,
+ x_min: Optional[float] = None,
+ x_max: Optional[float] = None,
+ y_min: Optional[float] = None,
+ y_max: Optional[float] = None,
+ top: Optional[GridDataArray] = None,
+ bottom: Optional[GridDataArray] = None,
+ state_for_boundary: Optional[GridDataArray] = None,
) -> Package:
"""
Clip a package by a bounding box (time, layer, y, x).
@@ -668,9 +721,12 @@ def clip_box(
layer_min: optional, int
layer_max: optional, int
x_min: optional, float
- x_min: optional, float
- y_max: optional, float
+ x_max: optional, float
+ y_min: optional, float
y_max: optional, float
+ top: optional, GridDataArray
+ bottom: optional, GridDataArray
+ state_for_boundary: optional, GridDataArray
Returns
-------
@@ -791,14 +847,17 @@ def __init__(
def clip_box(
self,
- time_min=None,
- time_max=None,
- layer_min=None,
- layer_max=None,
- x_min=None,
- x_max=None,
- y_min=None,
- y_max=None,
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
+ layer_min: Optional[int] = None,
+ layer_max: Optional[int] = None,
+ x_min: Optional[float] = None,
+ x_max: Optional[float] = None,
+ y_min: Optional[float] = None,
+ y_max: Optional[float] = None,
+ top: Optional[GridDataArray] = None,
+ bottom: Optional[GridDataArray] = None,
+ state_for_boundary: Optional[GridDataArray] = None,
) -> Package:
"""
Clip a package by a bounding box (time, layer, y, x).
@@ -819,9 +878,12 @@ def clip_box(
layer_min: optional, int
layer_max: optional, int
x_min: optional, float
- x_min: optional, float
- y_max: optional, float
+ x_max: optional, float
+ y_min: optional, float
y_max: optional, float
+ top: optional, GridDataArray
+ bottom: optional, GridDataArray
+ state_for_boundary: optional, GridDataArray
Returns
-------
| Fix clip_box arguments in the well package
| 2024-01-10T17:44:57 | 0.0 | [] | [] |
|||
Deltares/imod-python | Deltares__imod-python-739 | e852ca52df1e7a0cbff7c5045000fb7f45f84c06 | diff --git a/imod/mf6/__init__.py b/imod/mf6/__init__.py
index f733c3667..e296bc197 100644
--- a/imod/mf6/__init__.py
+++ b/imod/mf6/__init__.py
@@ -31,7 +31,8 @@
)
from imod.mf6.ist import ImmobileStorageTransfer
from imod.mf6.lak import Lake, LakeData, OutletManning, OutletSpecified, OutletWeir
-from imod.mf6.model import GroundwaterFlowModel, GroundwaterTransportModel
+from imod.mf6.model_gwf import GroundwaterFlowModel
+from imod.mf6.model_gwt import GroundwaterTransportModel
from imod.mf6.mst import MobileStorageTransfer
from imod.mf6.npf import NodePropertyFlow
from imod.mf6.oc import OutputControl
diff --git a/imod/mf6/gwfgwf.py b/imod/mf6/gwfgwf.py
index a580851a3..ef21fb0cc 100644
--- a/imod/mf6/gwfgwf.py
+++ b/imod/mf6/gwfgwf.py
@@ -1,10 +1,12 @@
from typing import Dict, Optional, Tuple
+import cftime
import numpy as np
import xarray as xr
from imod.mf6.auxiliary_variables import add_periodic_auxiliary_variable
from imod.mf6.package import Package
+from imod.typing import GridDataArray
class GWFGWF(Package):
@@ -90,14 +92,16 @@ def get_specification(self) -> Tuple[str, str, str, str]:
def clip_box(
self,
- time_min=None,
- time_max=None,
- layer_min=None,
- layer_max=None,
- x_min=None,
- x_max=None,
- y_min=None,
- y_max=None,
- state_for_boundary=None,
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
+ layer_min: Optional[int] = None,
+ layer_max: Optional[int] = None,
+ x_min: Optional[float] = None,
+ x_max: Optional[float] = None,
+ y_min: Optional[float] = None,
+ y_max: Optional[float] = None,
+ top: Optional[GridDataArray] = None,
+ bottom: Optional[GridDataArray] = None,
+ state_for_boundary: Optional[GridDataArray] = None,
) -> Package:
raise NotImplementedError("this package cannot be clipped")
diff --git a/imod/mf6/hfb.py b/imod/mf6/hfb.py
index 56a0a6094..1f8cdbfa8 100644
--- a/imod/mf6/hfb.py
+++ b/imod/mf6/hfb.py
@@ -4,8 +4,9 @@
import typing
from copy import deepcopy
from enum import Enum
-from typing import Tuple
+from typing import Optional, Tuple
+import cftime
import geopandas as gpd
import numpy as np
import shapely.wkt
@@ -23,7 +24,7 @@
from imod.typing import GridDataArray
-@typedispatch
+@typedispatch # type: ignore[no-redef]
def _derive_connected_cell_ids(
idomain: xr.DataArray, grid: xu.Ugrid2d, edge_index: np.ndarray
):
@@ -72,7 +73,7 @@ def _derive_connected_cell_ids(
return cell_ids
-@typedispatch
+@typedispatch # type: ignore[no-redef]
def _derive_connected_cell_ids(
_: xu.UgridDataArray, grid: xu.Ugrid2d, edge_index: np.ndarray
):
@@ -526,15 +527,17 @@ def _get_vertical_variables(self) -> list:
def clip_box(
self,
- time_min=None,
- time_max=None,
- layer_min=None,
- layer_max=None,
- x_min=None,
- x_max=None,
- y_min=None,
- y_max=None,
- *args,
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
+ layer_min: Optional[int] = None,
+ layer_max: Optional[int] = None,
+ x_min: Optional[float] = None,
+ x_max: Optional[float] = None,
+ y_min: Optional[float] = None,
+ y_max: Optional[float] = None,
+ top: Optional[GridDataArray] = None,
+ bottom: Optional[GridDataArray] = None,
+ state_for_boundary: Optional[GridDataArray] = None,
) -> "HorizontalFlowBarrierBase":
"""
Clip a package by a bounding box (time, layer, y, x).
@@ -558,6 +561,9 @@ def clip_box(
x_max: optional, float
y_min: optional, float
y_max: optional, float
+ top: optional, GridDataArray
+ bottom: optional, GridDataArray
+ state_for_boundary: optional, GridDataArray
Returns
-------
diff --git a/imod/mf6/model.py b/imod/mf6/model.py
index 4691da699..4f2df6fbc 100644
--- a/imod/mf6/model.py
+++ b/imod/mf6/model.py
@@ -6,11 +6,12 @@
import pathlib
from copy import deepcopy
from pathlib import Path
-from typing import Dict, List, Optional, Tuple, Union
+from typing import Dict, Optional, Tuple, Union
import cftime
import jinja2
import numpy as np
+import numpy.typing as npt
import tomli
import tomli_w
import xarray as xr
@@ -18,7 +19,6 @@
from jinja2 import Template
import imod
-from imod.mf6.clipped_boundary_condition_creator import create_clipped_boundary
from imod.mf6.package import Package
from imod.mf6.regridding_utils import RegridderInstancesCollection, RegridderType
from imod.mf6.statusinfo import NestedStatusInfo, StatusInfo, StatusInfoBase
@@ -36,8 +36,8 @@ def initialize_template(name: str) -> Template:
class Modflow6Model(collections.UserDict, abc.ABC):
- _mandatory_packages = None
- _model_id = None
+ _mandatory_packages: Tuple[str, ...] = ()
+ _model_id: Optional[str] = None
def __init__(self, **kwargs):
collections.UserDict.__init__(self)
@@ -226,7 +226,7 @@ def __write_well(
self,
wellpackage: Well,
pkg_name: str,
- globaltimes: np.ndarray[np.datetime64],
+ globaltimes: npt.NDArray[np.datetime64],
write_context: WriteContext,
validate: bool = True,
):
@@ -298,7 +298,7 @@ def dump(
if statusinfo.has_errors():
raise ValidationError(statusinfo.to_string())
- toml_content = collections.defaultdict(dict)
+ toml_content: Dict = collections.defaultdict(dict)
for pkgname, pkg in self.items():
pkg_path = f"{pkgname}.nc"
toml_content[type(pkg).__name__][pkgname] = pkg_path
@@ -342,12 +342,14 @@ def from_file(cls, toml_path):
@classmethod
def model_id(cls) -> str:
+ if cls._model_id is None:
+ raise ValueError("Model id has not been set")
return cls._model_id
def clip_box(
self,
- time_min: Optional[str] = None,
- time_max: Optional[str] = None,
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
layer_min: Optional[int] = None,
layer_max: Optional[int] = None,
x_min: Optional[float] = None,
@@ -356,18 +358,54 @@ def clip_box(
y_max: Optional[float] = None,
state_for_boundary: Optional[GridDataArray] = None,
):
- raise NotImplementedError
+ """
+ Clip a model by a bounding box (time, layer, y, x).
+
+ Slicing intervals may be half-bounded, by providing None:
+
+ * To select 500.0 <= x <= 1000.0:
+ ``clip_box(x_min=500.0, x_max=1000.0)``.
+ * To select x <= 1000.0: ``clip_box(x_min=None, x_max=1000.0)``
+ or ``clip_box(x_max=1000.0)``.
+ * To select x >= 500.0: ``clip_box(x_min = 500.0, x_max=None.0)``
+ or ``clip_box(x_min=1000.0)``.
+
+ Parameters
+ ----------
+ time_min: optional
+ time_max: optional
+ layer_min: optional, int
+ layer_max: optional, int
+ x_min: optional, float
+ x_max: optional, float
+ y_min: optional, float
+ y_max: optional, float
+ state_for_boundary :
+ """
+ clipped = self._clip_box_packages(
+ time_min,
+ time_max,
+ layer_min,
+ layer_max,
+ x_min,
+ x_max,
+ y_min,
+ y_max,
+ )
+
+ return clipped
def _clip_box_packages(
self,
- time_min: Optional[str] = None,
- time_max: Optional[str] = None,
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
layer_min: Optional[int] = None,
layer_max: Optional[int] = None,
x_min: Optional[float] = None,
x_max: Optional[float] = None,
y_min: Optional[float] = None,
y_max: Optional[float] = None,
+ state_for_boundary: Optional[GridDataArray] = None,
):
"""
Clip a model by a bounding box (time, layer, y, x).
@@ -396,6 +434,9 @@ def _clip_box_packages(
-------
clipped : Modflow6Model
"""
+
+ top, bottom, idomain = self.__get_domain_geometry()
+
clipped = type(self)(**self._options)
for key, pkg in self.items():
clipped[key] = pkg.clip_box(
@@ -407,6 +448,9 @@ def _clip_box_packages(
x_max=x_max,
y_min=y_min,
y_max=y_max,
+ top=top,
+ bottom=bottom,
+ state_for_boundary=state_for_boundary,
)
return clipped
@@ -466,7 +510,7 @@ def _mask_all_packages(
for pkgname, pkg in self.items():
self[pkgname] = pkg.mask(domain)
- def purge_empty_packages(self, model_name: Optional[str] = "") -> Modflow6Model:
+ def purge_empty_packages(self, model_name: Optional[str] = "") -> None:
"""
This function removes empty packages from the model.
"""
@@ -511,6 +555,10 @@ def _get_regridding_domain(
included_in_all = regridded_idomain
else:
included_in_all = included_in_all.where(regridded_idomain.notnull())
+
+ if included_in_all is None:
+ raise ValueError("No regridder is able to regrid the domain")
+
new_idomain = included_in_all.where(included_in_all.notnull(), other=0)
new_idomain = new_idomain.astype(int)
@@ -535,169 +583,3 @@ def __repr__(self) -> str:
def is_use_newton(self):
return False
-
-
-class GroundwaterFlowModel(Modflow6Model):
- _mandatory_packages = ("npf", "ic", "oc", "sto")
- _model_id = "gwf6"
-
- def __init__(
- self,
- listing_file: str = None,
- print_input: bool = False,
- print_flows: bool = False,
- save_flows: bool = False,
- newton: bool = False,
- under_relaxation: bool = False,
- ):
- super().__init__()
- self._options = {
- "listing_file": listing_file,
- "print_input": print_input,
- "print_flows": print_flows,
- "save_flows": save_flows,
- "newton": newton,
- "under_relaxation": under_relaxation,
- }
- self._template = initialize_template("gwf-nam.j2")
-
- def _get_unique_regridder_types(self) -> Dict[RegridderType, str]:
- """
- This function loops over the packages and collects all regridder-types that are in use.
- Differences in associated functions are ignored. It focusses only on the types. So if a
- model uses both Overlap(mean) and Overlap(harmonic_mean), this function will return just one
- Overlap regridder: the first one found, in this case Overlap(mean)
- """
- methods = {}
- for pkg_name, pkg in self.items():
- if pkg.is_regridding_supported():
- pkg_methods = pkg.get_regrid_methods()
- for variable in pkg_methods:
- if (
- variable in pkg.dataset.data_vars
- and pkg.dataset[variable].values[()] is not None
- ):
- regriddertype = pkg_methods[variable][0]
- if regriddertype not in methods.keys():
- functiontype = pkg_methods[variable][1]
- methods[regriddertype] = functiontype
- else:
- raise NotImplementedError(
- f"regridding is not implemented for package {pkg_name} of type {type(pkg)}"
- )
- return methods
-
- def clip_box(
- self,
- time_min: Optional[str] = None,
- time_max: Optional[str] = None,
- layer_min: Optional[int] = None,
- layer_max: Optional[int] = None,
- x_min: Optional[float] = None,
- x_max: Optional[float] = None,
- y_min: Optional[float] = None,
- y_max: Optional[float] = None,
- state_for_boundary: Optional[GridDataArray] = None,
- ):
- clipped = super()._clip_box_packages(
- time_min, time_max, layer_min, layer_max, x_min, x_max, y_min, y_max
- )
-
- clipped_boundary_condition = self.__create_boundary_condition_clipped_boundary(
- self, clipped, state_for_boundary
- )
- if clipped_boundary_condition is not None:
- clipped["chd_clipped"] = clipped_boundary_condition
-
- clipped.purge_empty_packages()
- return clipped
-
- def __create_boundary_condition_clipped_boundary(
- self,
- original_model: Modflow6Model,
- clipped_model: Modflow6Model,
- state_for_boundary: Optional[GridDataArray],
- ):
- unassigned_boundary_original_domain = (
- self.__create_boundary_condition_for_unassigned_boundary(
- original_model, state_for_boundary
- )
- )
-
- return self.__create_boundary_condition_for_unassigned_boundary(
- clipped_model, state_for_boundary, [unassigned_boundary_original_domain]
- )
-
- @staticmethod
- def __create_boundary_condition_for_unassigned_boundary(
- model: Modflow6Model,
- state_for_boundary: Optional[GridDataArray],
- additional_boundaries: Optional[List[imod.mf6.ConstantHead]] = None,
- ):
- if state_for_boundary is None:
- return None
-
- constant_head_packages = [
- pkg for name, pkg in model.items() if isinstance(pkg, imod.mf6.ConstantHead)
- ]
-
- additional_boundaries = [
- item for item in additional_boundaries or [] if item is not None
- ]
-
- constant_head_packages.extend(additional_boundaries)
-
- return create_clipped_boundary(
- model.domain, state_for_boundary, constant_head_packages
- )
-
- def is_use_newton(self):
- return self._options["newton"]
-
- def set_newton(self, is_newton: bool) -> None:
- self._options["newton"] = is_newton
-
-
-class GroundwaterTransportModel(Modflow6Model):
- """
- The GroundwaterTransportModel (GWT) simulates transport of a single solute
- species flowing in groundwater.
- """
-
- _mandatory_packages = ("mst", "dsp", "oc", "ic")
- _model_id = "gwt6"
-
- def __init__(
- self,
- listing_file: str = None,
- print_input: bool = False,
- print_flows: bool = False,
- save_flows: bool = False,
- ):
- super().__init__()
- self._options = {
- "listing_file": listing_file,
- "print_input": print_input,
- "print_flows": print_flows,
- "save_flows": save_flows,
- }
-
- self._template = initialize_template("gwt-nam.j2")
-
- def clip_box(
- self,
- time_min: str = None,
- time_max: str = None,
- layer_min: int = None,
- layer_max: int = None,
- x_min: float = None,
- x_max: float = None,
- y_min: float = None,
- y_max: float = None,
- state_for_boundary: GridDataArray = None,
- ):
- clipped = super()._clip_box_packages(
- time_min, time_max, layer_min, layer_max, x_min, x_max, y_min, y_max
- )
-
- return clipped
diff --git a/imod/mf6/model_gwf.py b/imod/mf6/model_gwf.py
new file mode 100644
index 000000000..48e0ad19b
--- /dev/null
+++ b/imod/mf6/model_gwf.py
@@ -0,0 +1,134 @@
+from __future__ import annotations
+
+from typing import Dict, List, Optional
+
+import cftime
+import numpy as np
+
+from imod.mf6 import ConstantHead
+from imod.mf6.clipped_boundary_condition_creator import create_clipped_boundary
+from imod.mf6.model import Modflow6Model, initialize_template
+from imod.mf6.regridding_utils import RegridderType
+from imod.typing import GridDataArray
+
+
+class GroundwaterFlowModel(Modflow6Model):
+ _mandatory_packages = ("npf", "ic", "oc", "sto")
+ _model_id = "gwf6"
+
+ def __init__(
+ self,
+ listing_file: Optional[str] = None,
+ print_input: bool = False,
+ print_flows: bool = False,
+ save_flows: bool = False,
+ newton: bool = False,
+ under_relaxation: bool = False,
+ ):
+ super().__init__()
+ self._options = {
+ "listing_file": listing_file,
+ "print_input": print_input,
+ "print_flows": print_flows,
+ "save_flows": save_flows,
+ "newton": newton,
+ "under_relaxation": under_relaxation,
+ }
+ self._template = initialize_template("gwf-nam.j2")
+
+ def _get_unique_regridder_types(self) -> Dict[RegridderType, str]:
+ """
+ This function loops over the packages and collects all regridder-types that are in use.
+ Differences in associated functions are ignored. It focusses only on the types. So if a
+ model uses both Overlap(mean) and Overlap(harmonic_mean), this function will return just one
+ Overlap regridder: the first one found, in this case Overlap(mean)
+ """
+ methods: Dict[RegridderType, str] = {}
+ for pkg_name, pkg in self.items():
+ if pkg.is_regridding_supported():
+ pkg_methods = pkg.get_regrid_methods()
+ for variable in pkg_methods:
+ if (
+ variable in pkg.dataset.data_vars
+ and pkg.dataset[variable].values[()] is not None
+ ):
+ regriddertype = pkg_methods[variable][0]
+ if regriddertype not in methods.keys():
+ functiontype = pkg_methods[variable][1]
+ methods[regriddertype] = functiontype
+ else:
+ raise NotImplementedError(
+ f"regridding is not implemented for package {pkg_name} of type {type(pkg)}"
+ )
+ return methods
+
+ def clip_box(
+ self,
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
+ layer_min: Optional[int] = None,
+ layer_max: Optional[int] = None,
+ x_min: Optional[float] = None,
+ x_max: Optional[float] = None,
+ y_min: Optional[float] = None,
+ y_max: Optional[float] = None,
+ state_for_boundary: Optional[GridDataArray] = None,
+ ):
+ clipped = super().clip_box(
+ time_min, time_max, layer_min, layer_max, x_min, x_max, y_min, y_max
+ )
+
+ clipped_boundary_condition = self.__create_boundary_condition_clipped_boundary(
+ self, clipped, state_for_boundary
+ )
+ if clipped_boundary_condition is not None:
+ clipped["chd_clipped"] = clipped_boundary_condition
+
+ clipped.purge_empty_packages()
+
+ return clipped
+
+ def __create_boundary_condition_clipped_boundary(
+ self,
+ original_model: Modflow6Model,
+ clipped_model: Modflow6Model,
+ state_for_boundary: Optional[GridDataArray],
+ ):
+ unassigned_boundary_original_domain = (
+ self.__create_boundary_condition_for_unassigned_boundary(
+ original_model, state_for_boundary
+ )
+ )
+
+ return self.__create_boundary_condition_for_unassigned_boundary(
+ clipped_model, state_for_boundary, [unassigned_boundary_original_domain]
+ )
+
+ @staticmethod
+ def __create_boundary_condition_for_unassigned_boundary(
+ model: Modflow6Model,
+ state_for_boundary: Optional[GridDataArray],
+ additional_boundaries: Optional[List[ConstantHead]] = None,
+ ):
+ if state_for_boundary is None:
+ return None
+
+ constant_head_packages = [
+ pkg for name, pkg in model.items() if isinstance(pkg, ConstantHead)
+ ]
+
+ additional_boundaries = [
+ item for item in additional_boundaries or [] if item is not None
+ ]
+
+ constant_head_packages.extend(additional_boundaries)
+
+ return create_clipped_boundary(
+ model.domain, state_for_boundary, constant_head_packages
+ )
+
+ def is_use_newton(self):
+ return self._options["newton"]
+
+ def set_newton(self, is_newton: bool) -> None:
+ self._options["newton"] = is_newton
diff --git a/imod/mf6/model_gwt.py b/imod/mf6/model_gwt.py
new file mode 100644
index 000000000..ad8ee5f07
--- /dev/null
+++ b/imod/mf6/model_gwt.py
@@ -0,0 +1,32 @@
+from __future__ import annotations
+
+from typing import Optional
+
+from imod.mf6.model import Modflow6Model, initialize_template
+
+
+class GroundwaterTransportModel(Modflow6Model):
+ """
+ The GroundwaterTransportModel (GWT) simulates transport of a single solute
+ species flowing in groundwater.
+ """
+
+ _mandatory_packages = ("mst", "dsp", "oc", "ic")
+ _model_id = "gwt6"
+
+ def __init__(
+ self,
+ listing_file: Optional[str] = None,
+ print_input: bool = False,
+ print_flows: bool = False,
+ save_flows: bool = False,
+ ):
+ super().__init__()
+ self._options = {
+ "listing_file": listing_file,
+ "print_input": print_input,
+ "print_flows": print_flows,
+ "save_flows": save_flows,
+ }
+
+ self._template = initialize_template("gwt-nam.j2")
diff --git a/imod/mf6/multimodel/modelsplitter.py b/imod/mf6/multimodel/modelsplitter.py
index 830d492c0..aae7968cc 100644
--- a/imod/mf6/multimodel/modelsplitter.py
+++ b/imod/mf6/multimodel/modelsplitter.py
@@ -3,7 +3,8 @@
import numpy as np
from imod.mf6.hfb import HorizontalFlowBarrierBase
-from imod.mf6.model import GroundwaterFlowModel, Modflow6Model
+from imod.mf6.model import Modflow6Model
+from imod.mf6.model_gwf import GroundwaterFlowModel
from imod.mf6.utilities.clip import clip_by_grid
from imod.mf6.utilities.grid import get_active_domain_slice
from imod.mf6.wel import Well
diff --git a/imod/mf6/package.py b/imod/mf6/package.py
index 10625c17a..6d1342cf8 100644
--- a/imod/mf6/package.py
+++ b/imod/mf6/package.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import abc
import copy
import numbers
@@ -23,7 +25,12 @@
from imod.mf6.utilities.schemata import filter_schemata_dict
from imod.mf6.validation import validation_pkg_error_message
from imod.mf6.write_context import WriteContext
-from imod.schemata import AllNoDataSchema, EmptyIndexesSchema, ValidationError
+from imod.schemata import (
+ AllNoDataSchema,
+ EmptyIndexesSchema,
+ SchemaType,
+ ValidationError,
+)
from imod.typing import GridDataArray
@@ -41,8 +48,9 @@ class Package(PackageBase, abc.ABC):
"""
_pkg_id = ""
- _init_schemata = {}
- _write_schemata = {}
+ _init_schemata: Dict[str, List[SchemaType] | Tuple[SchemaType]] = {}
+ _write_schemata: Dict[str, List[SchemaType] | Tuple[SchemaType]] = {}
+ _keyword_map: Dict[str, str] = {}
def __init__(self, allargs=None):
super().__init__(allargs)
@@ -418,16 +426,18 @@ def __to_datetime(self, time, use_cftime):
def clip_box(
self,
- time_min=None,
- time_max=None,
- layer_min=None,
- layer_max=None,
- x_min=None,
- x_max=None,
- y_min=None,
- y_max=None,
- state_for_boundary=None,
- ) -> "Package":
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
+ layer_min: Optional[int] = None,
+ layer_max: Optional[int] = None,
+ x_min: Optional[float] = None,
+ x_max: Optional[float] = None,
+ y_min: Optional[float] = None,
+ y_max: Optional[float] = None,
+ top: Optional[GridDataArray] = None,
+ bottom: Optional[GridDataArray] = None,
+ state_for_boundary: Optional[GridDataArray] = None,
+ ) -> Package:
"""
Clip a package by a bounding box (time, layer, y, x).
@@ -450,6 +460,10 @@ def clip_box(
x_max: optional, float
y_min: optional, float
y_max: optional, float
+ top: optional, GridDataArray
+ bottom: optional, GridDataArray
+ state_for_boundary: optional, GridDataArray
+
Returns
-------
diff --git a/imod/mf6/simulation.py b/imod/mf6/simulation.py
index d029ffc25..425e3e7ef 100644
--- a/imod/mf6/simulation.py
+++ b/imod/mf6/simulation.py
@@ -8,6 +8,7 @@
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
+import cftime
import jinja2
import numpy as np
import tomli
@@ -17,11 +18,9 @@
import imod
from imod.mf6.gwfgwf import GWFGWF
-from imod.mf6.model import (
- GroundwaterFlowModel,
- GroundwaterTransportModel,
- Modflow6Model,
-)
+from imod.mf6.model import Modflow6Model
+from imod.mf6.model_gwf import GroundwaterFlowModel
+from imod.mf6.model_gwt import GroundwaterTransportModel
from imod.mf6.multimodel.exchange_creator_structured import ExchangeCreator_Structured
from imod.mf6.multimodel.exchange_creator_unstructured import (
ExchangeCreator_Unstructured,
@@ -783,8 +782,8 @@ def get_models_of_type(self, modeltype):
def clip_box(
self,
- time_min: Optional[str] = None,
- time_max: Optional[str] = None,
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
layer_min: Optional[int] = None,
layer_max: Optional[int] = None,
x_min: Optional[float] = None,
@@ -792,7 +791,7 @@ def clip_box(
y_min: Optional[float] = None,
y_max: Optional[float] = None,
states_for_boundary: Optional[dict[str, GridDataArray]] = None,
- ) -> "Modflow6Simulation":
+ ) -> Modflow6Simulation:
"""
Clip a simulation by a bounding box (time, layer, y, x).
diff --git a/imod/mf6/wel.py b/imod/mf6/wel.py
index 6a118a9ed..c86f91f13 100644
--- a/imod/mf6/wel.py
+++ b/imod/mf6/wel.py
@@ -1,14 +1,16 @@
from __future__ import annotations
import warnings
-from typing import Dict, List, Union
+from typing import Any, List, Optional, Tuple, Union
+import cftime
import numpy as np
+import numpy.typing as npt
import pandas as pd
import xarray as xr
import xugrid as xu
-from numpy import ndarray
+import imod
from imod.mf6.auxiliary_variables import add_periodic_auxiliary_variable
from imod.mf6.boundary_condition import (
BoundaryCondition,
@@ -17,18 +19,20 @@
)
from imod.mf6.interfaces.ipointdatapackage import IPointDataPackage
from imod.mf6.mf6_wel_adapter import Mf6Wel
+from imod.mf6.package import Package
from imod.mf6.utilities.clip import clip_by_grid
from imod.mf6.utilities.dataset import remove_inactive
+from imod.mf6.utilities.grid import create_layered_top
from imod.mf6.write_context import WriteContext
from imod.prepare import assign_wells
from imod.schemata import AllNoDataSchema, DTypeSchema
from imod.select.points import points_indices, points_values
from imod.typing import GridDataArray
-from imod.typing.grid import ones_like
+from imod.typing.grid import is_spatial_2D, ones_like
from imod.util import values_within_range
-def _assign_dims(arg) -> Dict:
+def _assign_dims(arg: Any) -> Tuple | xr.DataArray:
is_da = isinstance(arg, xr.DataArray)
if is_da and "time" in arg.coords:
if arg.ndim != 2:
@@ -40,9 +44,9 @@ def _assign_dims(arg) -> Dict:
)
return da
elif is_da:
- return ("index", arg.values)
+ return "index", arg.values
else:
- return ("index", arg)
+ return "index", arg
def mask_2D(package: Well, domain_2d: GridDataArray) -> Well:
@@ -121,11 +125,11 @@ class Well(BoundaryCondition, IPointDataPackage):
"""
@property
- def x(self) -> ndarray[float]:
+ def x(self) -> npt.NDArray[float]:
return self.dataset["x"].values
@property
- def y(self) -> ndarray[float]:
+ def y(self) -> npt.NDArray[float]:
return self.dataset["y"].values
_pkg_id = "wel"
@@ -148,22 +152,22 @@ def y(self) -> ndarray[float]:
def __init__(
self,
- x,
- y,
- screen_top,
- screen_bottom,
- rate,
- concentration=None,
+ x: List[float],
+ y: List[float],
+ screen_top: List[float],
+ screen_bottom: List[float],
+ rate: List[float],
+ concentration: Optional[List[float] | xr.DataArray] = None,
concentration_boundary_type="aux",
- id=None,
- minimum_k=0.1,
- minimum_thickness=1.0,
- print_input=False,
- print_flows=False,
- save_flows=False,
+ id: Optional[List[int]] = None,
+ minimum_k: float = 0.1,
+ minimum_thickness: float = 1.0,
+ print_input: bool = False,
+ print_flows: bool = False,
+ save_flows: bool = False,
observations=None,
validate: bool = True,
- repeat_stress=None,
+ repeat_stress: Optional[xr.DataArray] = None,
):
super().__init__()
self.dataset["screen_top"] = _assign_dims(screen_top)
@@ -194,18 +198,27 @@ def is_grid_agnostic_package(cls) -> bool:
def clip_box(
self,
- time_min=None,
- time_max=None,
- z_min=None,
- z_max=None,
- x_min=None,
- x_max=None,
- y_min=None,
- y_max=None,
- ) -> Well:
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
+ layer_min: Optional[int] = None,
+ layer_max: Optional[int] = None,
+ x_min: Optional[float] = None,
+ x_max: Optional[float] = None,
+ y_min: Optional[float] = None,
+ y_max: Optional[float] = None,
+ top: Optional[GridDataArray] = None,
+ bottom: Optional[GridDataArray] = None,
+ state_for_boundary: Optional[GridDataArray] = None,
+ ) -> Package:
"""
Clip a package by a bounding box (time, layer, y, x).
+ The well package doesn't use the layer attribute to describe its depth and length.
+ Instead, it uses the screen_top and screen_bottom parameters which corresponds with
+ the z-coordinates of the top and bottom of the well. To go from a layer_min and
+ layer_max to z-values used for clipping the well a top and bottom array have to be
+ provided as well.
+
Slicing intervals may be half-bounded, by providing None:
* To select 500.0 <= x <= 1000.0:
@@ -219,40 +232,78 @@ def clip_box(
----------
time_min: optional
time_max: optional
- z_min: optional, float
- z_max: optional, float
+ layer_min: optional, int
+ layer_max: optional, int
x_min: optional, float
x_max: optional, float
y_min: optional, float
y_max: optional, float
+ top: optional, GridDataArray
+ bottom: optional, GridDataArray
+ state_for_boundary: optional, GridDataArray
Returns
-------
sliced : Package
"""
+ if (layer_max or layer_min) and (top is None or bottom is None):
+ raise ValueError(
+ "When clipping by layer both the top and bottom should be defined"
+ )
+
+ if top is not None:
+ if not isinstance(top, GridDataArray) or "layer" not in top.coords:
+ top = create_layered_top(bottom, top)
# The super method will select in the time dimension without issues.
new = super().clip_box(time_min=time_min, time_max=time_max)
ds = new.dataset
+ z_max = self._find_well_value_at_layer(ds, top, layer_max)
+ z_min = self._find_well_value_at_layer(ds, bottom, layer_min)
+
+ if z_max is not None:
+ ds["screen_top"] = ds["screen_top"].clip(None, z_max)
+ if z_min is not None:
+ ds["screen_bottom"] = ds["screen_bottom"].clip(z_min, None)
+
# Initiate array of True with right shape to deal with case no spatial
# selection needs to be done.
in_bounds = np.full(ds.dims["index"], True)
# Select all variables along "index" dimension
in_bounds &= values_within_range(ds["x"], x_min, x_max)
in_bounds &= values_within_range(ds["y"], y_min, y_max)
- in_bounds &= values_within_range(ds["screen_top"], None, z_max)
- in_bounds &= values_within_range(ds["screen_bottom"], z_min, None)
+ in_bounds &= values_within_range(ds["screen_top"], z_min, z_max)
+ in_bounds &= values_within_range(ds["screen_bottom"], z_min, z_max)
+ # remove wells where the screen bottom and top are the same
+ in_bounds &= abs(ds["screen_bottom"] - ds["screen_top"]) > 1e-5
# Replace dataset with reduced dataset based on booleans
new.dataset = ds.loc[{"index": in_bounds}]
return new
+ @staticmethod
+ def _find_well_value_at_layer(
+ well_dataset: xr.Dataset, grid: GridDataArray, layer: int
+ ):
+ value = None if layer is None else grid.isel(layer=layer)
+
+ # if value is a grid select the values at the well locations and drop the dimensions
+ if (value is not None) and is_spatial_2D(value):
+ value = imod.select.points_values(
+ value,
+ x=well_dataset["x"].values,
+ y=well_dataset["y"].values,
+ out_of_bounds="ignore",
+ ).drop_vars(lambda x: x.coords)
+
+ return value
+
def write(
self,
pkgname: str,
- globaltimes: np.ndarray[np.datetime64],
+ globaltimes: npt.NDArray[np.datetime64],
validate: bool,
write_context: WriteContext,
idomain: Union[xr.DataArray, xu.UgridDataArray],
@@ -316,7 +367,7 @@ def __create_assigned_wells(
def __create_dataset_vars(
self, wells_assigned: pd.DataFrame, wells_df: pd.DataFrame, cellid: xr.DataArray
- ) -> list:
+ ) -> xr.Dataset:
"""
Create dataset with all variables (rate, concentration), with a similar shape as the cellids.
"""
@@ -639,15 +690,18 @@ def __init__(
def clip_box(
self,
- time_min=None,
- time_max=None,
- layer_min=None,
- layer_max=None,
- x_min=None,
- x_max=None,
- y_min=None,
- y_max=None,
- ) -> "WellDisStructured":
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
+ layer_min: Optional[int] = None,
+ layer_max: Optional[int] = None,
+ x_min: Optional[float] = None,
+ x_max: Optional[float] = None,
+ y_min: Optional[float] = None,
+ y_max: Optional[float] = None,
+ top: Optional[GridDataArray] = None,
+ bottom: Optional[GridDataArray] = None,
+ state_for_boundary: Optional[GridDataArray] = None,
+ ) -> Package:
"""
Clip a package by a bounding box (time, layer, y, x).
@@ -667,9 +721,12 @@ def clip_box(
layer_min: optional, int
layer_max: optional, int
x_min: optional, float
- x_min: optional, float
- y_max: optional, float
+ x_max: optional, float
+ y_min: optional, float
y_max: optional, float
+ top: optional, GridDataArray
+ bottom: optional, GridDataArray
+ state_for_boundary: optional, GridDataArray
Returns
-------
@@ -790,15 +847,18 @@ def __init__(
def clip_box(
self,
- time_min=None,
- time_max=None,
- layer_min=None,
- layer_max=None,
- x_min=None,
- x_max=None,
- y_min=None,
- y_max=None,
- ) -> "WellDisStructured":
+ time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
+ time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
+ layer_min: Optional[int] = None,
+ layer_max: Optional[int] = None,
+ x_min: Optional[float] = None,
+ x_max: Optional[float] = None,
+ y_min: Optional[float] = None,
+ y_max: Optional[float] = None,
+ top: Optional[GridDataArray] = None,
+ bottom: Optional[GridDataArray] = None,
+ state_for_boundary: Optional[GridDataArray] = None,
+ ) -> Package:
"""
Clip a package by a bounding box (time, layer, y, x).
@@ -818,9 +878,12 @@ def clip_box(
layer_min: optional, int
layer_max: optional, int
x_min: optional, float
- x_min: optional, float
- y_max: optional, float
+ x_max: optional, float
+ y_min: optional, float
y_max: optional, float
+ top: optional, GridDataArray
+ bottom: optional, GridDataArray
+ state_for_boundary: optional, GridDataArray
Returns
-------
diff --git a/imod/schemata.py b/imod/schemata.py
index 1405d4ca8..1e22316a6 100644
--- a/imod/schemata.py
+++ b/imod/schemata.py
@@ -34,7 +34,7 @@
import abc
import operator
from functools import partial
-from typing import Any, Callable, Dict, Optional, Tuple, Union
+from typing import Any, Callable, Dict, Optional, Tuple, TypeVar, Union
import numpy as np
import scipy
@@ -46,7 +46,6 @@
ShapeT = Tuple[Union[int, None]]
ChunksT = Union[bool, Dict[str, Union[int, None]]]
-
OPERATORS = {
"<": operator.lt,
"<=": operator.le,
@@ -96,6 +95,9 @@ def __or__(self, other):
return SchemaUnion(self, other)
+SchemaType = TypeVar("SchemaType", bound=BaseSchema)
+
+
class SchemaUnion:
"""
Succesful validation only requires a single succes.
diff --git a/imod/util.py b/imod/util.py
index f69918e3b..e29c9882f 100644
--- a/imod/util.py
+++ b/imod/util.py
@@ -20,7 +20,7 @@
import re
import tempfile
import warnings
-from typing import Any, Dict, List, Sequence, Tuple, Union
+from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
import affine
import cftime
@@ -1159,7 +1159,9 @@ def _replace(
)
-def values_within_range(da, min=None, max=None):
+def values_within_range(
+ da: xr.DataArray, min: Optional[float] = None, max: Optional[float] = None
+) -> xr.DataArray | bool:
"""
Find which values are within range.
Function checks which values are unaffected by the clip method, to
diff --git a/pyproject.toml b/pyproject.toml
index baee70b80..394264a77 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -138,6 +138,7 @@ module = [
"contextily.*",
"dateutil.*",
"flopy.*",
+ "fastcore.*",
"geopandas.*",
"matplotlib.*",
"mpl_toolkits.*",
| Fix clip_box arguments in the well package
Move the GroundWaterFlow a GrounWaterTransport models to separate files
In GitLab by @Manangka on Dec 7, 2023, 1:36
| 2024-01-10T15:44:21 | 0.0 | [] | [] |
|||
Deltares/imod-python | Deltares__imod-python-715 | 43aa56925f0521a9cdc5fb745f9d9046ab0b6a23 | diff --git a/imod/data/sample_data.py b/imod/data/sample_data.py
index 9569870d9..ef74b933e 100644
--- a/imod/data/sample_data.py
+++ b/imod/data/sample_data.py
@@ -64,7 +64,11 @@ def hondsrug_drainage() -> xr.Dataset:
def head_observations() -> pd.DataFrame:
fname = REGISTRY.fetch("head-observations.csv")
- return pd.read_csv(fname)
+ df = pd.read_csv(fname)
+ # Manually convert time column to datetime type because pandas >2.0 doesn't
+ # do this automatically anymore upon reading.
+ df["time"] = pd.to_datetime(df["time"])
+ return df
def fluxes() -> xr.Dataset:
diff --git a/imod/prepare/spatial.py b/imod/prepare/spatial.py
index eaff304c0..c4763818f 100644
--- a/imod/prepare/spatial.py
+++ b/imod/prepare/spatial.py
@@ -896,7 +896,9 @@ def _zonal_aggregate_raster(
# This may result in areas significantly smaller than the polygon geometry,
# but should come in handy for weighting later?
df = df[df["data"].notnull()]
- result = df.groupby(column, as_index=False).agg(["count", method]).reset_index()
+ result = df.groupby(column, as_index=False).agg(["count", method])
+ # Reset index to set "column" as column again, make sure index is dropped
+ result = result.reset_index(drop=True)
# Compute the area from the counted number of cells
result["data", "count"] *= resolution * resolution
name = raster.name if raster.name else "aggregated"
@@ -946,7 +948,9 @@ def _zonal_aggregate_polygons(
# Remove entries where the raster has nodata.
# This may result in areas significantly smaller than the polygon geometry,
# but should come in handy for weighting later?
- result = df.groupby(column_a, as_index=False).agg(["count", method]).reset_index()
+ result = df.groupby(column_a, as_index=False).agg(["count", method])
+ # Reset index to set "column_a" as column again, make sure index is dropped
+ result = result.reset_index(drop=True)
# Compute the area from the counted number of cells
result[column_b, "count"] *= resolution * resolution
result.columns = [column_a, "area", column_b]
diff --git a/pixi.lock b/pixi.lock
index 5952a1fd4..96e87266d 100644
--- a/pixi.lock
+++ b/pixi.lock
@@ -799,188 +799,188 @@ package:
timestamp: 1660065501192
- platform: linux-64
name: attrs
- version: 23.1.0
+ version: 23.2.0
category: main
manager: conda
dependencies:
- python >=3.7
- url: https://conda.anaconda.org/conda-forge/noarch/attrs-23.1.0-pyh71513ae_1.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/attrs-23.2.0-pyh71513ae_0.conda
hash:
- md5: 3edfead7cedd1ab4400a6c588f3e75f8
- sha256: 063639cd568f5c7a557b0fb1cc27f098598c0d8ff869088bfeb82934674f8821
- build: pyh71513ae_1
+ md5: 5e4c0743c70186509d1412e03c2d8dfa
+ sha256: 77c7d03bdb243a048fff398cedc74327b7dc79169ebe3b4c8448b0331ea55fea
+ build: pyh71513ae_0
arch: x86_64
subdir: linux-64
- build_number: 1
+ build_number: 0
license: MIT
license_family: MIT
noarch: python
- size: 55022
- timestamp: 1683424195402
+ size: 54582
+ timestamp: 1704011393776
purls:
- pkg:pypi/attrs
- platform: osx-64
name: attrs
- version: 23.1.0
+ version: 23.2.0
category: main
manager: conda
dependencies:
- python >=3.7
- url: https://conda.anaconda.org/conda-forge/noarch/attrs-23.1.0-pyh71513ae_1.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/attrs-23.2.0-pyh71513ae_0.conda
hash:
- md5: 3edfead7cedd1ab4400a6c588f3e75f8
- sha256: 063639cd568f5c7a557b0fb1cc27f098598c0d8ff869088bfeb82934674f8821
- build: pyh71513ae_1
+ md5: 5e4c0743c70186509d1412e03c2d8dfa
+ sha256: 77c7d03bdb243a048fff398cedc74327b7dc79169ebe3b4c8448b0331ea55fea
+ build: pyh71513ae_0
arch: x86_64
subdir: osx-64
- build_number: 1
+ build_number: 0
license: MIT
license_family: MIT
noarch: python
- size: 55022
- timestamp: 1683424195402
+ size: 54582
+ timestamp: 1704011393776
purls:
- pkg:pypi/attrs
- platform: osx-arm64
name: attrs
- version: 23.1.0
+ version: 23.2.0
category: main
manager: conda
dependencies:
- python >=3.7
- url: https://conda.anaconda.org/conda-forge/noarch/attrs-23.1.0-pyh71513ae_1.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/attrs-23.2.0-pyh71513ae_0.conda
hash:
- md5: 3edfead7cedd1ab4400a6c588f3e75f8
- sha256: 063639cd568f5c7a557b0fb1cc27f098598c0d8ff869088bfeb82934674f8821
- build: pyh71513ae_1
+ md5: 5e4c0743c70186509d1412e03c2d8dfa
+ sha256: 77c7d03bdb243a048fff398cedc74327b7dc79169ebe3b4c8448b0331ea55fea
+ build: pyh71513ae_0
arch: aarch64
subdir: osx-arm64
- build_number: 1
+ build_number: 0
license: MIT
license_family: MIT
noarch: python
- size: 55022
- timestamp: 1683424195402
+ size: 54582
+ timestamp: 1704011393776
purls:
- pkg:pypi/attrs
- platform: win-64
name: attrs
- version: 23.1.0
+ version: 23.2.0
category: main
manager: conda
dependencies:
- python >=3.7
- url: https://conda.anaconda.org/conda-forge/noarch/attrs-23.1.0-pyh71513ae_1.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/attrs-23.2.0-pyh71513ae_0.conda
hash:
- md5: 3edfead7cedd1ab4400a6c588f3e75f8
- sha256: 063639cd568f5c7a557b0fb1cc27f098598c0d8ff869088bfeb82934674f8821
- build: pyh71513ae_1
+ md5: 5e4c0743c70186509d1412e03c2d8dfa
+ sha256: 77c7d03bdb243a048fff398cedc74327b7dc79169ebe3b4c8448b0331ea55fea
+ build: pyh71513ae_0
arch: x86_64
subdir: win-64
- build_number: 1
+ build_number: 0
license: MIT
license_family: MIT
noarch: python
- size: 55022
- timestamp: 1683424195402
+ size: 54582
+ timestamp: 1704011393776
purls:
- pkg:pypi/attrs
- platform: linux-64
name: aws-c-auth
- version: 0.7.8
+ version: 0.7.10
category: main
manager: conda
dependencies:
- aws-c-cal >=0.6.9,<0.6.10.0a0
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-http >=0.7.15,<0.7.16.0a0
+ - aws-c-http >=0.8.0,<0.8.1.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- aws-c-sdkutils >=0.1.13,<0.1.14.0a0
- libgcc-ng >=12
- url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-auth-0.7.8-hcf8cf63_3.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-auth-0.7.10-h0100c56_1.conda
hash:
- md5: 8634ef2e79e9c8065fd69ead7902d27c
- sha256: b1cb74ebd72cf3b2aeb7e17697ef1404cd6911b149af8c63a6432c810943c4c0
- build: hcf8cf63_3
+ md5: 00f2ad7c8c2fb1da92d977e295db497f
+ sha256: c822e61835f090e0fc321817d722a6053faca660057c33bd95954d80c679f36a
+ build: h0100c56_1
arch: x86_64
subdir: linux-64
- build_number: 3
+ build_number: 1
license: Apache-2.0
license_family: Apache
- size: 102855
- timestamp: 1703165528993
+ size: 102789
+ timestamp: 1704305750207
- platform: osx-64
name: aws-c-auth
- version: 0.7.8
+ version: 0.7.10
category: main
manager: conda
dependencies:
- aws-c-cal >=0.6.9,<0.6.10.0a0
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-http >=0.7.15,<0.7.16.0a0
+ - aws-c-http >=0.8.0,<0.8.1.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- aws-c-sdkutils >=0.1.13,<0.1.14.0a0
- url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-auth-0.7.8-ha83ec0a_3.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-auth-0.7.10-h5ed86db_1.conda
hash:
- md5: 2f16f127874ecb4f062cbcb6e8bd7380
- sha256: dd16042873d727c17ee66fad5f4c6c4d084dfc1802f6d05884fe2b537492f39d
- build: ha83ec0a_3
+ md5: d73780e88fdb08b65d2feac9d3146489
+ sha256: 120cf938c27b84a1c097fe5d1cecd1795f1a931c1927b4d8a660e3ae4c51c612
+ build: h5ed86db_1
arch: x86_64
subdir: osx-64
- build_number: 3
+ build_number: 1
license: Apache-2.0
license_family: Apache
- size: 90256
- timestamp: 1703166007544
+ size: 89858
+ timestamp: 1704306024343
- platform: osx-arm64
name: aws-c-auth
- version: 0.7.8
+ version: 0.7.10
category: main
manager: conda
dependencies:
- aws-c-cal >=0.6.9,<0.6.10.0a0
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-http >=0.7.15,<0.7.16.0a0
+ - aws-c-http >=0.8.0,<0.8.1.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- aws-c-sdkutils >=0.1.13,<0.1.14.0a0
- url: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-auth-0.7.8-h251af53_3.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-auth-0.7.10-h8e8137d_1.conda
hash:
- md5: 01906ebab8cbeb02a1e06a6440dd9cee
- sha256: 46bb596905aeb026f9221aecb7f3edb2641c2b41d18d0a8ad8de9790cf893ab9
- build: h251af53_3
+ md5: cc56487f52b54dd9c1cccdc24f0249ea
+ sha256: adfad1ddb4aad1cb2b0a83971cd80c82910cdcd092ef52e137a0c571a6d89f56
+ build: h8e8137d_1
arch: aarch64
subdir: osx-arm64
- build_number: 3
+ build_number: 1
license: Apache-2.0
license_family: Apache
- size: 88970
- timestamp: 1703165894117
+ size: 89155
+ timestamp: 1704306064106
- platform: win-64
name: aws-c-auth
- version: 0.7.8
+ version: 0.7.10
category: main
manager: conda
dependencies:
- aws-c-cal >=0.6.9,<0.6.10.0a0
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-http >=0.7.15,<0.7.16.0a0
+ - aws-c-http >=0.8.0,<0.8.1.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- aws-c-sdkutils >=0.1.13,<0.1.14.0a0
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/aws-c-auth-0.7.8-h6bf0135_3.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/aws-c-auth-0.7.10-h9ca94be_1.conda
hash:
- md5: edaf1616bedf390be016227042f64bbe
- sha256: 63cba07bd9fcb3fbf6a1147527c39c3dad80ce26818c96ef3ab131de76fdf6a8
- build: h6bf0135_3
+ md5: e1eac5752ab1e8e6c7337a3d7d67d369
+ sha256: 66d6fa8cd3f2ad8525776dc1b31ce2174c05db72114c41232dba49f86a9b0ffd
+ build: h9ca94be_1
arch: x86_64
subdir: win-64
- build_number: 3
+ build_number: 1
license: Apache-2.0
license_family: Apache
- size: 98993
- timestamp: 1703166164182
+ size: 98828
+ timestamp: 1704306645080
- platform: linux-64
name: aws-c-cal
version: 0.6.9
@@ -1220,7 +1220,7 @@ package:
timestamp: 1701212989240
- platform: linux-64
name: aws-c-event-stream
- version: 0.3.2
+ version: 0.4.0
category: main
manager: conda
dependencies:
@@ -1229,67 +1229,65 @@ package:
- aws-checksums >=0.1.17,<0.1.18.0a0
- libgcc-ng >=12
- libstdcxx-ng >=12
- url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-event-stream-0.3.2-h0bcb0bb_8.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-event-stream-0.4.0-h0bcb0bb_0.conda
hash:
- md5: 21dafb60b5854f82b196f32e5857dec6
- sha256: d2855cd791a95648ac773aa6561c61f9e77450f123c8aa82eea1d66e90d5bfb1
- build: h0bcb0bb_8
+ md5: 9bbc75881d8fe9a6803a8c5a0432efaa
+ sha256: d17a3e562f3166cbff44b7e4e0682a3a62102de3f55b8ac5f947a6e9a74023d3
+ build: h0bcb0bb_0
arch: x86_64
subdir: linux-64
- build_number: 8
+ build_number: 0
license: Apache-2.0
license_family: Apache
- size: 54136
- timestamp: 1701263274039
+ size: 53782
+ timestamp: 1703907019941
- platform: osx-64
name: aws-c-event-stream
- version: 0.3.2
+ version: 0.4.0
category: main
manager: conda
dependencies:
- - __osx >=10.9
- aws-c-common >=0.9.10,<0.9.11.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- aws-checksums >=0.1.17,<0.1.18.0a0
- - libcxx >=16.0.6
- url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-event-stream-0.3.2-hbc2660c_8.conda
+ - libcxx >=15
+ url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-event-stream-0.4.0-h451788f_0.conda
hash:
- md5: ffada75190a10b0d60271b147fd877a4
- sha256: d57c92483040d58b6653b0273a1a2570cc79ff5eb8ac2c2b18a85099e7576983
- build: hbc2660c_8
+ md5: 1ae42d84a8228e5068325cb407c0ad9f
+ sha256: a6538855dd93800c2e7235875e3616cd10dc6d0fa1b6993d2c4c0ce3b9f72b57
+ build: h451788f_0
arch: x86_64
subdir: osx-64
- build_number: 8
+ build_number: 0
license: Apache-2.0
license_family: Apache
- size: 47252
- timestamp: 1701263477904
+ size: 46862
+ timestamp: 1703907246896
- platform: osx-arm64
name: aws-c-event-stream
- version: 0.3.2
+ version: 0.4.0
category: main
manager: conda
dependencies:
- - __osx >=10.9
- aws-c-common >=0.9.10,<0.9.11.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- aws-checksums >=0.1.17,<0.1.18.0a0
- - libcxx >=16.0.6
- url: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-event-stream-0.3.2-hb5e90b3_8.conda
+ - libcxx >=15
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-event-stream-0.4.0-h0ed9fda_0.conda
hash:
- md5: 43263bfb2e9a2cc4162bcf90ce9c73b9
- sha256: c9495e5133feacacef792a8f3101f1e44464144c603fccd7f1b6262975983094
- build: hb5e90b3_8
+ md5: c55732ba808815f3a03d2b5794134280
+ sha256: 2e575e82e96e4a51dac746788b020934614d852a2a835a3d7e059676fac9e14b
+ build: h0ed9fda_0
arch: aarch64
subdir: osx-arm64
- build_number: 8
+ build_number: 0
license: Apache-2.0
license_family: Apache
- size: 47429
- timestamp: 1701263443184
+ size: 46786
+ timestamp: 1703907291564
- platform: win-64
name: aws-c-event-stream
- version: 0.3.2
+ version: 0.4.0
category: main
manager: conda
dependencies:
@@ -1299,21 +1297,21 @@ package:
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/aws-c-event-stream-0.3.2-h51e6447_8.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/aws-c-event-stream-0.4.0-h51e6447_0.conda
hash:
- md5: 3baf64c4f15d5e0968b3a2325b2ffa12
- sha256: 2ee92a2b343bb455f730315f3510375b03ed65e51a8c3e130a0bed64508c1f4b
- build: h51e6447_8
+ md5: 800d13259e8dc2b433a58b0cbbe8b583
+ sha256: 2670ae79526739fc52c7c765b0de61799dc3ca8f0eee107b8e72acc929fbe1b0
+ build: h51e6447_0
arch: x86_64
subdir: win-64
- build_number: 8
+ build_number: 0
license: Apache-2.0
license_family: Apache
- size: 54737
- timestamp: 1701263880350
+ size: 55245
+ timestamp: 1703907564028
- platform: linux-64
name: aws-c-http
- version: 0.7.15
+ version: 0.8.0
category: main
manager: conda
dependencies:
@@ -1322,21 +1320,21 @@ package:
- aws-c-compression >=0.2.17,<0.2.18.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- libgcc-ng >=12
- url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-http-0.7.15-hd268abd_0.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-http-0.8.0-hd268abd_0.conda
hash:
- md5: 7fe29e473e2d4491610ccb51285218c9
- sha256: 8c1bafb14d320e395f27fa5d3f9f5bb41ab0bd0a1659c90c3dd4294cf562781b
+ md5: 1599ecff110d53e20423f7849eec49d5
+ sha256: 4ec9f1d427587e57dcd5777a6aa38efd3b9492023b2ea10ba3ff2b3ef288b3a5
build: hd268abd_0
arch: x86_64
subdir: linux-64
build_number: 0
license: Apache-2.0
license_family: Apache
- size: 194897
- timestamp: 1703141370645
+ size: 195138
+ timestamp: 1703907239593
- platform: osx-64
name: aws-c-http
- version: 0.7.15
+ version: 0.8.0
category: main
manager: conda
dependencies:
@@ -1344,21 +1342,21 @@ package:
- aws-c-common >=0.9.10,<0.9.11.0a0
- aws-c-compression >=0.2.17,<0.2.18.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-http-0.7.15-h1fa4523_0.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-http-0.8.0-h1fa4523_0.conda
hash:
- md5: d569f6aabb7ba3d3e207c61a8b9a0528
- sha256: 5a61f144e140d5f6f572aa814d51d58bb4948c523a9ff40062369ea2e1474ef2
+ md5: b1e224c2d021d01fcb7a9320ecd3c8af
+ sha256: afb48dedc10cb9cab50aee069bc0232190928f65416bad814d07d77d7305da79
build: h1fa4523_0
arch: x86_64
subdir: osx-64
build_number: 0
license: Apache-2.0
license_family: Apache
- size: 162902
- timestamp: 1703141713315
+ size: 162823
+ timestamp: 1703907372701
- platform: osx-arm64
name: aws-c-http
- version: 0.7.15
+ version: 0.8.0
category: main
manager: conda
dependencies:
@@ -1366,21 +1364,21 @@ package:
- aws-c-common >=0.9.10,<0.9.11.0a0
- aws-c-compression >=0.2.17,<0.2.18.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- url: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-http-0.7.15-hd747585_0.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-http-0.8.0-hd747585_0.conda
hash:
- md5: ffe1c31bad43c5b89d0b408ebf487a79
- sha256: a70aa5372100e44a38349fbe82b1528c52e9808d7113123fc672f626e00b5a6a
+ md5: 506d80c1882e0368b58e0e348469a452
+ sha256: d4757939742d600180f85827801f486ba53208b9216780e0d741cebd4cdd5546
build: hd747585_0
arch: aarch64
subdir: osx-arm64
build_number: 0
license: Apache-2.0
license_family: Apache
- size: 150982
- timestamp: 1703141639387
+ size: 151658
+ timestamp: 1703907442170
- platform: win-64
name: aws-c-http
- version: 0.7.15
+ version: 0.8.0
category: main
manager: conda
dependencies:
@@ -1391,18 +1389,18 @@ package:
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/aws-c-http-0.7.15-h80119a0_0.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/aws-c-http-0.8.0-h80119a0_0.conda
hash:
- md5: 66e69e2fcc21defb3ac75c1196d98ccb
- sha256: 849d1ccbe7d5e79eb68ce5c3cb8c6ee9911e98c98d048326f0e66df6c6234bef
+ md5: b6bdba53d4f89d5b74009499bddd80b2
+ sha256: 0705fde59a6f3894d53fb597c79ad0514cbd245b6f06fec71b0a801e123563de
build: h80119a0_0
arch: x86_64
subdir: win-64
build_number: 0
license: Apache-2.0
license_family: Apache
- size: 180391
- timestamp: 1703141823530
+ size: 180505
+ timestamp: 1703907512117
- platform: linux-64
name: aws-c-io
version: 0.13.36
@@ -1412,19 +1410,19 @@ package:
- aws-c-cal >=0.6.9,<0.6.10.0a0
- aws-c-common >=0.9.10,<0.9.11.0a0
- libgcc-ng >=12
- - s2n >=1.4.0,<1.4.1.0a0
- url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-io-0.13.36-he0cd244_2.conda
+ - s2n >=1.4.1,<1.4.2.0a0
+ url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-io-0.13.36-hb3b01f7_3.conda
hash:
- md5: c930336aa72995f1b5459b51df3ba841
- sha256: 7426f7444cd43cd7a649670c7330c163b40f40aa832e82be873d9de91e49b05e
- build: he0cd244_2
+ md5: e699c37931ecbb452a6d074c1c738b07
+ sha256: ed17d57f20e62b77680d91eec3b98ce4f208ad58deb87c1ca6114942ecb9aecd
+ build: hb3b01f7_3
arch: x86_64
subdir: linux-64
- build_number: 2
+ build_number: 3
license: Apache-2.0
license_family: Apache
- size: 156952
- timestamp: 1702039096578
+ size: 156739
+ timestamp: 1703277263617
- platform: osx-64
name: aws-c-io
version: 0.13.36
@@ -1433,18 +1431,18 @@ package:
dependencies:
- aws-c-cal >=0.6.9,<0.6.10.0a0
- aws-c-common >=0.9.10,<0.9.11.0a0
- url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-io-0.13.36-h3728bb0_2.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-io-0.13.36-h3728bb0_3.conda
hash:
- md5: 4074a589e94ea9703a7dd067c65e96b7
- sha256: 9f7e4a703b69a990f2f3719cdfb10b0dfa59b7e593e5dc1cc58ff08fb6db57e4
- build: h3728bb0_2
+ md5: 5fb516f968b8465843147e70502b3b30
+ sha256: 6e6698f3f7ccc3b7adc4eff3c1fc0f01f28960a67b7cfbd8ada2f9e1e5aae957
+ build: h3728bb0_3
arch: x86_64
subdir: osx-64
- build_number: 2
+ build_number: 3
license: Apache-2.0
license_family: Apache
- size: 136726
- timestamp: 1702039476503
+ size: 137884
+ timestamp: 1703277470793
- platform: osx-arm64
name: aws-c-io
version: 0.13.36
@@ -1453,18 +1451,18 @@ package:
dependencies:
- aws-c-cal >=0.6.9,<0.6.10.0a0
- aws-c-common >=0.9.10,<0.9.11.0a0
- url: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-io-0.13.36-h1112932_2.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-io-0.13.36-h1112932_3.conda
hash:
- md5: f1cc3b657bd5c4ba8e9f9ea0bd526478
- sha256: f4cbb1bbe3afc2391d8a15cd89549a700bb7f066bb4056283dbea3317be63925
- build: h1112932_2
+ md5: 732c739df5de131665801871650c2399
+ sha256: 8770803baa7323891abcf892e36abce13d6f67d675ebe91ab538621f90e8319f
+ build: h1112932_3
arch: aarch64
subdir: osx-arm64
- build_number: 2
+ build_number: 3
license: Apache-2.0
license_family: Apache
- size: 136652
- timestamp: 1702039441322
+ size: 136832
+ timestamp: 1703277731454
- platform: win-64
name: aws-c-io
version: 0.13.36
@@ -1476,18 +1474,18 @@ package:
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/aws-c-io-0.13.36-ha737126_2.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/aws-c-io-0.13.36-ha737126_3.conda
hash:
- md5: 283afac329a0042f08da0d2aff8d6061
- sha256: 2c993d2c10602e5e89f41017e313712fd81523c7021b1ad5c39b83011bbdfb9c
- build: ha737126_2
+ md5: 615c52e4ca99d6f05115bdcaf7989e4a
+ sha256: 909ab6d661cdea988cccc501d0920759ad436d73f2e29b0b94898812b78a61f0
+ build: ha737126_3
arch: x86_64
subdir: win-64
- build_number: 2
+ build_number: 3
license: Apache-2.0
license_family: Apache
- size: 159221
- timestamp: 1702039600056
+ size: 158048
+ timestamp: 1703277767799
- platform: linux-64
name: aws-c-mqtt
version: 0.10.0
@@ -1495,21 +1493,21 @@ package:
manager: conda
dependencies:
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-http >=0.7.15,<0.7.16.0a0
+ - aws-c-http >=0.8.0,<0.8.1.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- libgcc-ng >=12
- url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-mqtt-0.10.0-hbafccad_1.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-mqtt-0.10.0-hf5d392a_2.conda
hash:
- md5: b587eb2e3fa4cabcf5b6ea7fd06f1043
- sha256: 6ef1dce310845f3e16b83d3143acf2980a0aaf920d8ba73d2cc67a3f4e4d6307
- build: hbafccad_1
+ md5: 18eb32f275d7294045298f69fbed6ad1
+ sha256: 3fcd61a58aaeadcc6be2af3a3014647d72d78f3e0dcfa34d34d36c7363f465c9
+ build: hf5d392a_2
arch: x86_64
subdir: linux-64
- build_number: 1
+ build_number: 2
license: Apache-2.0
license_family: Apache
- size: 163849
- timestamp: 1703166300210
+ size: 163855
+ timestamp: 1704306435642
- platform: osx-64
name: aws-c-mqtt
version: 0.10.0
@@ -1517,20 +1515,20 @@ package:
manager: conda
dependencies:
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-http >=0.7.15,<0.7.16.0a0
+ - aws-c-http >=0.8.0,<0.8.1.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-mqtt-0.10.0-ha95a165_1.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-mqtt-0.10.0-hdd2773f_2.conda
hash:
- md5: 541826cab7e772f0c44446dae082a799
- sha256: e6a02fbc80073b80735dadb288016f403d970a0aa3f4ec3346cb375816e7f9b8
- build: ha95a165_1
+ md5: a8f39569f96c7aa29df8719bd221c37b
+ sha256: e08fa33b2a8e2d912524b9bb58a9cb243665e44735fddd885b6a1d9623168cc8
+ build: hdd2773f_2
arch: x86_64
subdir: osx-64
- build_number: 1
+ build_number: 2
license: Apache-2.0
license_family: Apache
- size: 138818
- timestamp: 1703166557285
+ size: 138442
+ timestamp: 1704306696839
- platform: osx-arm64
name: aws-c-mqtt
version: 0.10.0
@@ -1538,20 +1536,20 @@ package:
manager: conda
dependencies:
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-http >=0.7.15,<0.7.16.0a0
+ - aws-c-http >=0.8.0,<0.8.1.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- url: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-mqtt-0.10.0-h3e71869_1.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-mqtt-0.10.0-hefd2eba_2.conda
hash:
- md5: b7c14b80d7792cf657fe475998f702af
- sha256: 798f665778ad548f24a0cb775182522829ebc3c085032214f5fda2cc09142076
- build: h3e71869_1
+ md5: 36b6e920548d1a9862214d6b85f3cda4
+ sha256: ffd577632ab5847b678070079f1eb796706fee641e1bd123676d72ccaab4c359
+ build: hefd2eba_2
arch: aarch64
subdir: osx-arm64
- build_number: 1
+ build_number: 2
license: Apache-2.0
license_family: Apache
- size: 118177
- timestamp: 1703166449666
+ size: 117880
+ timestamp: 1704306343120
- platform: win-64
name: aws-c-mqtt
version: 0.10.0
@@ -1559,124 +1557,124 @@ package:
manager: conda
dependencies:
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-http >=0.7.15,<0.7.16.0a0
+ - aws-c-http >=0.8.0,<0.8.1.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/aws-c-mqtt-0.10.0-h9e0d744_1.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/aws-c-mqtt-0.10.0-h2889a98_2.conda
hash:
- md5: 10e6785801e8a771313dd887e97def03
- sha256: 2b120a7046855d654e948a4a3eb16b463e07eb80ea50b16b5e3eda6fb15ecdce
- build: h9e0d744_1
+ md5: e97e0269641768be849c4b7d4ac6acd1
+ sha256: b068b77bb7bda33edab25e5f97756127bd985850156068f5d879a6cbe492c72c
+ build: h2889a98_2
arch: x86_64
subdir: win-64
- build_number: 1
+ build_number: 2
license: Apache-2.0
license_family: Apache
- size: 158521
- timestamp: 1703166641152
+ size: 158809
+ timestamp: 1704306951855
- platform: linux-64
name: aws-c-s3
- version: 0.4.5
+ version: 0.4.7
category: main
manager: conda
dependencies:
- - aws-c-auth >=0.7.8,<0.7.9.0a0
+ - aws-c-auth >=0.7.10,<0.7.11.0a0
- aws-c-cal >=0.6.9,<0.6.10.0a0
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-http >=0.7.15,<0.7.16.0a0
+ - aws-c-http >=0.8.0,<0.8.1.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- aws-checksums >=0.1.17,<0.1.18.0a0
- libgcc-ng >=12
- openssl >=3.2.0,<4.0a0
- url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-s3-0.4.5-h47b1690_1.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/aws-c-s3-0.4.7-he8c168f_2.conda
hash:
- md5: 740dd0758ba132d614b1b07a3782fd18
- sha256: 88df08401682eaaffda5ccf45e8dd3bf4186cfeac2712f96c2c66a4840e9eda2
- build: h47b1690_1
+ md5: 8efc4a83d09d0985a1209e7db46a85df
+ sha256: 5dd6fe2c6008ecf36828364b737013bc3f577eabc087d521c739d16fa1b54e6d
+ build: he8c168f_2
arch: x86_64
subdir: linux-64
- build_number: 1
+ build_number: 2
license: Apache-2.0
license_family: Apache
- size: 103242
- timestamp: 1703180180170
+ size: 103912
+ timestamp: 1704321466794
- platform: osx-64
name: aws-c-s3
- version: 0.4.5
+ version: 0.4.7
category: main
manager: conda
dependencies:
- - aws-c-auth >=0.7.8,<0.7.9.0a0
+ - aws-c-auth >=0.7.10,<0.7.11.0a0
- aws-c-cal >=0.6.9,<0.6.10.0a0
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-http >=0.7.15,<0.7.16.0a0
+ - aws-c-http >=0.8.0,<0.8.1.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- aws-checksums >=0.1.17,<0.1.18.0a0
- url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-s3-0.4.5-h222382d_1.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/aws-c-s3-0.4.7-hfc07516_2.conda
hash:
- md5: 711a0c583136e9cbeaa5d49e7972d1b7
- sha256: baf0ac5b868ed97cff302c304ab2840cee3e3dc72c967d5989f8f7ab543e6925
- build: h222382d_1
+ md5: 86621c40b75c87e88ed649dc67baca4a
+ sha256: 1fa26b0d77f38b381376240fd264427c45e8e54f18ed894bb0d3edcdc3bfbf36
+ build: hfc07516_2
arch: x86_64
subdir: osx-64
- build_number: 1
+ build_number: 2
license: Apache-2.0
license_family: Apache
- size: 89283
- timestamp: 1703180422552
+ size: 90455
+ timestamp: 1704321754265
- platform: osx-arm64
name: aws-c-s3
- version: 0.4.5
+ version: 0.4.7
category: main
manager: conda
dependencies:
- - aws-c-auth >=0.7.8,<0.7.9.0a0
+ - aws-c-auth >=0.7.10,<0.7.11.0a0
- aws-c-cal >=0.6.9,<0.6.10.0a0
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-http >=0.7.15,<0.7.16.0a0
+ - aws-c-http >=0.8.0,<0.8.1.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- aws-checksums >=0.1.17,<0.1.18.0a0
- url: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-s3-0.4.5-ha7f36e9_1.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-s3-0.4.7-h0d871e0_2.conda
hash:
- md5: f5db669aaabae33b505c89a3d67c03d7
- sha256: 82a72e3258910556181b936a6740dfb6f620da1cf2d6c474ea276d7a50056da0
- build: ha7f36e9_1
+ md5: 6367f7720abb8c4a8d12af09686dac6b
+ sha256: d94cee3ef3842e4200cd9535f27567fa1047f843dd31d3fa3e529ccd06ca32da
+ build: h0d871e0_2
arch: aarch64
subdir: osx-arm64
- build_number: 1
+ build_number: 2
license: Apache-2.0
license_family: Apache
- size: 88278
- timestamp: 1703180405756
+ size: 89206
+ timestamp: 1704321752655
- platform: win-64
name: aws-c-s3
- version: 0.4.5
+ version: 0.4.7
category: main
manager: conda
dependencies:
- - aws-c-auth >=0.7.8,<0.7.9.0a0
+ - aws-c-auth >=0.7.10,<0.7.11.0a0
- aws-c-cal >=0.6.9,<0.6.10.0a0
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-http >=0.7.15,<0.7.16.0a0
+ - aws-c-http >=0.8.0,<0.8.1.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- aws-checksums >=0.1.17,<0.1.18.0a0
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/aws-c-s3-0.4.5-he94bf34_1.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/aws-c-s3-0.4.7-hb620688_2.conda
hash:
- md5: c043ea31f64afa288cb3872eb65b3f4f
- sha256: eea11380cd6773e44584bfaf80b985aeebcae672d0872f677edbf674680fed0d
- build: he94bf34_1
+ md5: 414c1d6c071e7a43fe0700d2df846905
+ sha256: 7be77cb9ff14834ff360ece85c5ba0a43ac3971c20f6276cd3c5a8792fea21e8
+ build: hb620688_2
arch: x86_64
subdir: win-64
- build_number: 1
+ build_number: 2
license: Apache-2.0
license_family: Apache
- size: 100137
- timestamp: 1703180469862
+ size: 99991
+ timestamp: 1704322032884
- platform: linux-64
name: aws-c-sdkutils
version: 0.1.13
@@ -1839,119 +1837,119 @@ package:
timestamp: 1701247480992
- platform: linux-64
name: aws-crt-cpp
- version: 0.25.0
+ version: 0.26.0
category: main
manager: conda
dependencies:
- - aws-c-auth >=0.7.8,<0.7.9.0a0
+ - aws-c-auth >=0.7.10,<0.7.11.0a0
- aws-c-cal >=0.6.9,<0.6.10.0a0
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-event-stream >=0.3.2,<0.3.3.0a0
- - aws-c-http >=0.7.15,<0.7.16.0a0
+ - aws-c-event-stream >=0.4.0,<0.4.1.0a0
+ - aws-c-http >=0.8.0,<0.8.1.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- aws-c-mqtt >=0.10.0,<0.10.1.0a0
- - aws-c-s3 >=0.4.5,<0.4.6.0a0
+ - aws-c-s3 >=0.4.7,<0.4.8.0a0
- aws-c-sdkutils >=0.1.13,<0.1.14.0a0
- libgcc-ng >=12
- libstdcxx-ng >=12
- url: https://conda.anaconda.org/conda-forge/linux-64/aws-crt-cpp-0.25.0-h169d4cb_3.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/aws-crt-cpp-0.26.0-h3b5eec7_3.conda
hash:
- md5: 09d6ffbbbf0a3e640a4e88ea329e0d48
- sha256: 964299a7e158457c0048d709916701e194544195a7116174ed7c9ec1fa4d0c5a
- build: h169d4cb_3
+ md5: 76a7d5e9caccda4194477b6040863f27
+ sha256: 6a4638caea971b7588842309e8ceda7e9c422ac7877e66ffb371be9499ab7f52
+ build: h3b5eec7_3
arch: x86_64
subdir: linux-64
build_number: 3
license: Apache-2.0
license_family: Apache
- size: 332456
- timestamp: 1703191929061
+ size: 332540
+ timestamp: 1704352136069
- platform: osx-64
name: aws-crt-cpp
- version: 0.25.0
+ version: 0.26.0
category: main
manager: conda
dependencies:
- - aws-c-auth >=0.7.8,<0.7.9.0a0
+ - aws-c-auth >=0.7.10,<0.7.11.0a0
- aws-c-cal >=0.6.9,<0.6.10.0a0
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-event-stream >=0.3.2,<0.3.3.0a0
- - aws-c-http >=0.7.15,<0.7.16.0a0
+ - aws-c-event-stream >=0.4.0,<0.4.1.0a0
+ - aws-c-http >=0.8.0,<0.8.1.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- aws-c-mqtt >=0.10.0,<0.10.1.0a0
- - aws-c-s3 >=0.4.5,<0.4.6.0a0
+ - aws-c-s3 >=0.4.7,<0.4.8.0a0
- aws-c-sdkutils >=0.1.13,<0.1.14.0a0
- libcxx >=15
- url: https://conda.anaconda.org/conda-forge/osx-64/aws-crt-cpp-0.25.0-h40be75d_3.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/aws-crt-cpp-0.26.0-h88f2ebf_3.conda
hash:
- md5: 55f0390e52f193dc0caf866f8084a41a
- sha256: 822a253208e6cc2a1ce9b2074a51792be7af596ade0d5c27796ddae450fe4389
- build: h40be75d_3
+ md5: 4f21830317f110bb0c53332d55272587
+ sha256: 84d2cf38803bbdde1b4b6f9034efd9b76c14127d7f8e9e6bfabe89abeded0f88
+ build: h88f2ebf_3
arch: x86_64
subdir: osx-64
build_number: 3
license: Apache-2.0
license_family: Apache
- size: 280374
- timestamp: 1703192359296
+ size: 280692
+ timestamp: 1704352576363
- platform: osx-arm64
name: aws-crt-cpp
- version: 0.25.0
+ version: 0.26.0
category: main
manager: conda
dependencies:
- - aws-c-auth >=0.7.8,<0.7.9.0a0
+ - aws-c-auth >=0.7.10,<0.7.11.0a0
- aws-c-cal >=0.6.9,<0.6.10.0a0
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-event-stream >=0.3.2,<0.3.3.0a0
- - aws-c-http >=0.7.15,<0.7.16.0a0
+ - aws-c-event-stream >=0.4.0,<0.4.1.0a0
+ - aws-c-http >=0.8.0,<0.8.1.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- aws-c-mqtt >=0.10.0,<0.10.1.0a0
- - aws-c-s3 >=0.4.5,<0.4.6.0a0
+ - aws-c-s3 >=0.4.7,<0.4.8.0a0
- aws-c-sdkutils >=0.1.13,<0.1.14.0a0
- libcxx >=15
- url: https://conda.anaconda.org/conda-forge/osx-arm64/aws-crt-cpp-0.25.0-h497ef68_3.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/aws-crt-cpp-0.26.0-hcc526ff_3.conda
hash:
- md5: 840acc8b8f1be537c74a0f6c1b0e5f14
- sha256: df46f64a6587acd05bd8ce708db6d1f35e49571b3ead20cdebb38c8856f2b3aa
- build: h497ef68_3
+ md5: 21e0476de051cf3221eebceb21e1e890
+ sha256: a8586d39c8a8c0b2b3f07b702f898c6caaad904ddeab655cfed4b818d753b627
+ build: hcc526ff_3
arch: aarch64
subdir: osx-arm64
build_number: 3
license: Apache-2.0
license_family: Apache
- size: 216223
- timestamp: 1703192297664
+ size: 217235
+ timestamp: 1704352353546
- platform: win-64
name: aws-crt-cpp
- version: 0.25.0
+ version: 0.26.0
category: main
manager: conda
dependencies:
- - aws-c-auth >=0.7.8,<0.7.9.0a0
+ - aws-c-auth >=0.7.10,<0.7.11.0a0
- aws-c-cal >=0.6.9,<0.6.10.0a0
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-event-stream >=0.3.2,<0.3.3.0a0
- - aws-c-http >=0.7.15,<0.7.16.0a0
+ - aws-c-event-stream >=0.4.0,<0.4.1.0a0
+ - aws-c-http >=0.8.0,<0.8.1.0a0
- aws-c-io >=0.13.36,<0.13.37.0a0
- aws-c-mqtt >=0.10.0,<0.10.1.0a0
- - aws-c-s3 >=0.4.5,<0.4.6.0a0
+ - aws-c-s3 >=0.4.7,<0.4.8.0a0
- aws-c-sdkutils >=0.1.13,<0.1.14.0a0
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/aws-crt-cpp-0.25.0-h50a9806_3.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/aws-crt-cpp-0.26.0-hda755dc_3.conda
hash:
- md5: 35e047c581355bb617d57d8cb15b958b
- sha256: 85db298a73b4b39c3f6361f97928b3a90fce8b69b3730aa4253c93ac22751f76
- build: h50a9806_3
+ md5: 37c6ae5c9f1141b62b1eae46eebc35c8
+ sha256: 9c3d6b39a80892766aae30e25f42ebe4362b860eab3383d168797a289f477421
+ build: hda755dc_3
arch: x86_64
subdir: win-64
build_number: 3
license: Apache-2.0
license_family: Apache
- size: 243163
- timestamp: 1703192507573
+ size: 242642
+ timestamp: 1704352564082
- platform: linux-64
name: aws-sdk-cpp
version: 1.11.210
@@ -1959,26 +1957,26 @@ package:
manager: conda
dependencies:
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-event-stream >=0.3.2,<0.3.3.0a0
+ - aws-c-event-stream >=0.4.0,<0.4.1.0a0
- aws-checksums >=0.1.17,<0.1.18.0a0
- - aws-crt-cpp >=0.25.0,<0.25.1.0a0
+ - aws-crt-cpp >=0.26.0,<0.26.1.0a0
- libcurl >=8.5.0,<9.0a0
- libgcc-ng >=12
- libstdcxx-ng >=12
- libzlib >=1.2.13,<1.3.0a0
- openssl >=3.2.0,<4.0a0
- url: https://conda.anaconda.org/conda-forge/linux-64/aws-sdk-cpp-1.11.210-h0853bfa_5.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/aws-sdk-cpp-1.11.210-hac0d6e5_8.conda
hash:
- md5: b923ffd222c9bb4bbf93b0fd18beac02
- sha256: faa693932ed5d51c1d1f79385d6a63ea4b589a5be837227bec85456997e81000
- build: h0853bfa_5
+ md5: e7adaa813ace3f3b8516689716a28970
+ sha256: a9e58d64d613c075ded9fa812ce028f3d484f96e3f5c03559a004f761bced036
+ build: hac0d6e5_8
arch: x86_64
subdir: linux-64
- build_number: 5
+ build_number: 8
license: Apache-2.0
license_family: Apache
- size: 3527043
- timestamp: 1702578061165
+ size: 3547874
+ timestamp: 1704352516690
- platform: osx-64
name: aws-sdk-cpp
version: 1.11.210
@@ -1986,25 +1984,25 @@ package:
manager: conda
dependencies:
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-event-stream >=0.3.2,<0.3.3.0a0
+ - aws-c-event-stream >=0.4.0,<0.4.1.0a0
- aws-checksums >=0.1.17,<0.1.18.0a0
- - aws-crt-cpp >=0.25.0,<0.25.1.0a0
+ - aws-crt-cpp >=0.26.0,<0.26.1.0a0
- libcurl >=8.5.0,<9.0a0
- libcxx >=15
- libzlib >=1.2.13,<1.3.0a0
- openssl >=3.2.0,<4.0a0
- url: https://conda.anaconda.org/conda-forge/osx-64/aws-sdk-cpp-1.11.210-hcffc49a_5.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/aws-sdk-cpp-1.11.210-heeba50e_8.conda
hash:
- md5: 8aff1ef8615316b2f85f4414ab5d95f8
- sha256: c104eb9b191f3d5d2705acb9b18eeb3d6254458df1b52755463d0f7123728216
- build: hcffc49a_5
+ md5: 31af5e0fb37e72362f087ca9e2748f5c
+ sha256: 7128b18540395212d4d6b4f8741f658d3900024bde9b5d3e32d6eaa4448325b3
+ build: heeba50e_8
arch: x86_64
subdir: osx-64
- build_number: 5
+ build_number: 8
license: Apache-2.0
license_family: Apache
- size: 3262242
- timestamp: 1702578712176
+ size: 3267017
+ timestamp: 1704353210503
- platform: osx-arm64
name: aws-sdk-cpp
version: 1.11.210
@@ -2012,25 +2010,25 @@ package:
manager: conda
dependencies:
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-event-stream >=0.3.2,<0.3.3.0a0
+ - aws-c-event-stream >=0.4.0,<0.4.1.0a0
- aws-checksums >=0.1.17,<0.1.18.0a0
- - aws-crt-cpp >=0.25.0,<0.25.1.0a0
+ - aws-crt-cpp >=0.26.0,<0.26.1.0a0
- libcurl >=8.5.0,<9.0a0
- libcxx >=15
- libzlib >=1.2.13,<1.3.0a0
- openssl >=3.2.0,<4.0a0
- url: https://conda.anaconda.org/conda-forge/osx-arm64/aws-sdk-cpp-1.11.210-h87406ae_5.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/aws-sdk-cpp-1.11.210-ha042220_8.conda
hash:
- md5: 606df59dc06c0081f75bd0838d2825ee
- sha256: b523c0f9405ad8275dc0a2e20d632795edd5650e6b89412117bdd0e4d12399d4
- build: h87406ae_5
+ md5: e7aa143c60437c1625019add833b143e
+ sha256: 8588fec5385ae35add918a33b9b75fd79ef3007fd5d1c7db59b047bf1cac0311
+ build: ha042220_8
arch: aarch64
subdir: osx-arm64
- build_number: 5
+ build_number: 8
license: Apache-2.0
license_family: Apache
- size: 3287128
- timestamp: 1702578622340
+ size: 3304720
+ timestamp: 1704352998014
- platform: win-64
name: aws-sdk-cpp
version: 1.11.210
@@ -2038,25 +2036,25 @@ package:
manager: conda
dependencies:
- aws-c-common >=0.9.10,<0.9.11.0a0
- - aws-c-event-stream >=0.3.2,<0.3.3.0a0
+ - aws-c-event-stream >=0.4.0,<0.4.1.0a0
- aws-checksums >=0.1.17,<0.1.18.0a0
- - aws-crt-cpp >=0.25.0,<0.25.1.0a0
+ - aws-crt-cpp >=0.26.0,<0.26.1.0a0
- libzlib >=1.2.13,<1.3.0a0
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/aws-sdk-cpp-1.11.210-h0441c79_5.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/aws-sdk-cpp-1.11.210-h79fa1a6_8.conda
hash:
- md5: 5bfdc7cd659cc8fc9402716faa97df25
- sha256: c9e9b0fd808571fd88767de44244acc2f72cd8efd73c18bbc6bdf15a08e3b9a8
- build: h0441c79_5
+ md5: f065f24d6d6276cf251eb97a0c6037ab
+ sha256: b3d98f4d88953d70a71a25c0878656e3629663377904b80630346a267c240461
+ build: h79fa1a6_8
arch: x86_64
subdir: win-64
- build_number: 5
+ build_number: 8
license: Apache-2.0
license_family: Apache
- size: 3299752
- timestamp: 1702579002364
+ size: 3334520
+ timestamp: 1704353765135
- platform: linux-64
name: babel
version: 2.14.0
@@ -2327,7 +2325,7 @@ package:
timestamp: 1680888259061
- platform: linux-64
name: black
- version: 23.11.0
+ version: 23.12.1
category: main
manager: conda
dependencies:
@@ -2338,21 +2336,21 @@ package:
- platformdirs >=2
- python >=3.11,<3.12.0a0
- python_abi 3.11.* *_cp311
- url: https://conda.anaconda.org/conda-forge/linux-64/black-23.11.0-py311h38be061_0.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/black-23.12.1-py311h38be061_0.conda
hash:
- md5: c7faa183cb5197081521cb7fc26a2789
- sha256: 545eadf8ce937bbada29e7195fea0c3af2a28969d04de21c1cd5d15d6022f026
+ md5: cb563ab54c59917f004d4faf7a29c610
+ sha256: 90c29112da654aa4f713b03220b884aa4d11a74e642a72bf9d56e872700c7423
build: py311h38be061_0
arch: x86_64
subdir: linux-64
build_number: 0
license: MIT
license_family: MIT
- size: 374323
- timestamp: 1701335801524
+ size: 378436
+ timestamp: 1703317664029
- platform: osx-64
name: black
- version: 23.11.0
+ version: 23.12.1
category: main
manager: conda
dependencies:
@@ -2363,21 +2361,21 @@ package:
- platformdirs >=2
- python >=3.11,<3.12.0a0
- python_abi 3.11.* *_cp311
- url: https://conda.anaconda.org/conda-forge/osx-64/black-23.11.0-py311h6eed73b_0.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/black-23.12.1-py311h6eed73b_0.conda
hash:
- md5: 4d604785cb7620e8f9edd1a6907c9f35
- sha256: 29d08a9c1c5b3cd040cc50f33d90233df6d0f4de8afde5370fc142a352556b9c
+ md5: df285ee5615270aa4172c322dbecac3f
+ sha256: 6f82c6dbb55421163945bc4d03cc23734903e93c0ffd724d67530ce7c6d5a010
build: py311h6eed73b_0
arch: x86_64
subdir: osx-64
build_number: 0
license: MIT
license_family: MIT
- size: 373834
- timestamp: 1701336109317
+ size: 379701
+ timestamp: 1703317853109
- platform: osx-arm64
name: black
- version: 23.11.0
+ version: 23.12.1
category: main
manager: conda
dependencies:
@@ -2389,21 +2387,21 @@ package:
- python >=3.11,<3.12.0a0
- python >=3.11,<3.12.0a0 *_cpython
- python_abi 3.11.* *_cp311
- url: https://conda.anaconda.org/conda-forge/osx-arm64/black-23.11.0-py311h267d04e_0.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/black-23.12.1-py311h267d04e_0.conda
hash:
- md5: c491e9bbddeac830df4f388d1498bd1c
- sha256: 3f4167650e1ace7f0ce1fb5956547e11fea3af17f8cdcfa9583b02c2ac151bb9
+ md5: c4cfd1e85e013c9a0995166897d06b73
+ sha256: b4ebb13643a6d77429f24b3fb3985d8e7eebd4804ea59b0834273ba0de451220
build: py311h267d04e_0
arch: aarch64
subdir: osx-arm64
build_number: 0
license: MIT
license_family: MIT
- size: 374740
- timestamp: 1701336245983
+ size: 379989
+ timestamp: 1703317897071
- platform: win-64
name: black
- version: 23.11.0
+ version: 23.12.1
category: main
manager: conda
dependencies:
@@ -2414,18 +2412,18 @@ package:
- platformdirs >=2
- python >=3.11,<3.12.0a0
- python_abi 3.11.* *_cp311
- url: https://conda.anaconda.org/conda-forge/win-64/black-23.11.0-py311h1ea47a8_0.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/black-23.12.1-py311h1ea47a8_0.conda
hash:
- md5: 2dae38002774c3adc34179552161648b
- sha256: 4c595343c9260e4b5797e8e2a27f56f1ab03442c4706f254620b059076c0a376
+ md5: aa307be056896f2da4ac4fcf2f348b08
+ sha256: 79f6f11a0057c6c648e72a976b88f3bc5d14b6f0fdb4f66eafefb643c063e692
build: py311h1ea47a8_0
arch: x86_64
subdir: win-64
build_number: 0
license: MIT
license_family: MIT
- size: 390702
- timestamp: 1701336315064
+ size: 395159
+ timestamp: 1703318197279
- platform: linux-64
name: blosc
version: 1.21.5
@@ -4437,7 +4435,7 @@ package:
- pkg:pypi/colorama
- platform: linux-64
name: contextily
- version: 1.4.0
+ version: 1.5.0
category: main
manager: conda
dependencies:
@@ -4450,10 +4448,10 @@ package:
- rasterio
- requests
- xyzservices
- url: https://conda.anaconda.org/conda-forge/noarch/contextily-1.4.0-pyhd8ed1ab_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/contextily-1.5.0-pyhd8ed1ab_0.conda
hash:
- md5: e539a3ee857193310d0c4e56b75065ff
- sha256: aa467b146557712f44930eb9e73272e3bf5ad1c7dabb5b7d34ef4872ca017351
+ md5: e8fadec2cd6178f95e2dea74182e6fdf
+ sha256: 143a00a3eec47b0cf8ae931a93d9eb144f1c73567ff9ef344e2f228e56fec1c8
build: pyhd8ed1ab_0
arch: x86_64
subdir: linux-64
@@ -4461,13 +4459,13 @@ package:
license: BSD-3-Clause
license_family: BSD
noarch: python
- size: 20565
- timestamp: 1696881145234
+ size: 20757
+ timestamp: 1703886389745
purls:
- pkg:pypi/contextily
- platform: osx-64
name: contextily
- version: 1.4.0
+ version: 1.5.0
category: main
manager: conda
dependencies:
@@ -4480,10 +4478,10 @@ package:
- rasterio
- requests
- xyzservices
- url: https://conda.anaconda.org/conda-forge/noarch/contextily-1.4.0-pyhd8ed1ab_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/contextily-1.5.0-pyhd8ed1ab_0.conda
hash:
- md5: e539a3ee857193310d0c4e56b75065ff
- sha256: aa467b146557712f44930eb9e73272e3bf5ad1c7dabb5b7d34ef4872ca017351
+ md5: e8fadec2cd6178f95e2dea74182e6fdf
+ sha256: 143a00a3eec47b0cf8ae931a93d9eb144f1c73567ff9ef344e2f228e56fec1c8
build: pyhd8ed1ab_0
arch: x86_64
subdir: osx-64
@@ -4491,13 +4489,13 @@ package:
license: BSD-3-Clause
license_family: BSD
noarch: python
- size: 20565
- timestamp: 1696881145234
+ size: 20757
+ timestamp: 1703886389745
purls:
- pkg:pypi/contextily
- platform: osx-arm64
name: contextily
- version: 1.4.0
+ version: 1.5.0
category: main
manager: conda
dependencies:
@@ -4510,10 +4508,10 @@ package:
- rasterio
- requests
- xyzservices
- url: https://conda.anaconda.org/conda-forge/noarch/contextily-1.4.0-pyhd8ed1ab_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/contextily-1.5.0-pyhd8ed1ab_0.conda
hash:
- md5: e539a3ee857193310d0c4e56b75065ff
- sha256: aa467b146557712f44930eb9e73272e3bf5ad1c7dabb5b7d34ef4872ca017351
+ md5: e8fadec2cd6178f95e2dea74182e6fdf
+ sha256: 143a00a3eec47b0cf8ae931a93d9eb144f1c73567ff9ef344e2f228e56fec1c8
build: pyhd8ed1ab_0
arch: aarch64
subdir: osx-arm64
@@ -4521,13 +4519,13 @@ package:
license: BSD-3-Clause
license_family: BSD
noarch: python
- size: 20565
- timestamp: 1696881145234
+ size: 20757
+ timestamp: 1703886389745
purls:
- pkg:pypi/contextily
- platform: win-64
name: contextily
- version: 1.4.0
+ version: 1.5.0
category: main
manager: conda
dependencies:
@@ -4540,10 +4538,10 @@ package:
- rasterio
- requests
- xyzservices
- url: https://conda.anaconda.org/conda-forge/noarch/contextily-1.4.0-pyhd8ed1ab_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/contextily-1.5.0-pyhd8ed1ab_0.conda
hash:
- md5: e539a3ee857193310d0c4e56b75065ff
- sha256: aa467b146557712f44930eb9e73272e3bf5ad1c7dabb5b7d34ef4872ca017351
+ md5: e8fadec2cd6178f95e2dea74182e6fdf
+ sha256: 143a00a3eec47b0cf8ae931a93d9eb144f1c73567ff9ef344e2f228e56fec1c8
build: pyhd8ed1ab_0
arch: x86_64
subdir: win-64
@@ -4551,8 +4549,8 @@ package:
license: BSD-3-Clause
license_family: BSD
noarch: python
- size: 20565
- timestamp: 1696881145234
+ size: 20757
+ timestamp: 1703886389745
purls:
- pkg:pypi/contextily
- platform: linux-64
@@ -4659,7 +4657,7 @@ package:
- pkg:pypi/contourpy
- platform: linux-64
name: coverage
- version: 7.3.4
+ version: 7.4.0
category: main
manager: conda
dependencies:
@@ -4667,46 +4665,46 @@ package:
- python >=3.11,<3.12.0a0
- python_abi 3.11.* *_cp311
- tomli
- url: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.3.4-py311h459d7ec_0.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.4.0-py311h459d7ec_0.conda
hash:
- md5: 8fbe3dd4619336c2657d3607d3823ee0
- sha256: 17e648a13e596011becd8bbc9b528fe1d9874ef60b5f7a721fdfa5b166ce6115
+ md5: bbaf0376ed2f153a90f167ad908da3d0
+ sha256: 3d1a0ae99477d91f2c7e4f5a7554e6de2eaa9bc4450a2db307005c65e394e7f2
build: py311h459d7ec_0
arch: x86_64
subdir: linux-64
build_number: 0
license: Apache-2.0
license_family: APACHE
- size: 355842
- timestamp: 1703135756833
+ size: 364870
+ timestamp: 1703727330547
purls:
- pkg:pypi/coverage
- platform: osx-64
name: coverage
- version: 7.3.4
+ version: 7.4.0
category: main
manager: conda
dependencies:
- python >=3.11,<3.12.0a0
- python_abi 3.11.* *_cp311
- tomli
- url: https://conda.anaconda.org/conda-forge/osx-64/coverage-7.3.4-py311he705e18_0.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/coverage-7.4.0-py311he705e18_0.conda
hash:
- md5: 2b062bc06a86d0fd47f7d15d114af3a1
- sha256: d82dd23845c745621095d77ce25190a6a7c11cfea051a54dd0d34426fd272064
+ md5: 26c6acf173e93e71cb28339544abc377
+ sha256: eb603b678fa508acade2a96899c8d235095c9b6c915fb64e9a82d77bc33665c3
build: py311he705e18_0
arch: x86_64
subdir: osx-64
build_number: 0
license: Apache-2.0
license_family: APACHE
- size: 354187
- timestamp: 1703136087040
+ size: 363494
+ timestamp: 1703727528872
purls:
- pkg:pypi/coverage
- platform: osx-arm64
name: coverage
- version: 7.3.4
+ version: 7.4.0
category: main
manager: conda
dependencies:
@@ -4714,23 +4712,23 @@ package:
- python >=3.11,<3.12.0a0 *_cpython
- python_abi 3.11.* *_cp311
- tomli
- url: https://conda.anaconda.org/conda-forge/osx-arm64/coverage-7.3.4-py311h05b510d_0.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/coverage-7.4.0-py311h05b510d_0.conda
hash:
- md5: 6b2a0b3ec6e7403e080839937a1f1cd9
- sha256: df29d89e5ac6ed85cf708ee8e3c3356717d0385b00028ae5c93b37360f7cd82f
+ md5: 7a801e12fd286ee7d3be2bf7fb1e029f
+ sha256: 78c909fcedf2aa360b95e4ea395706557df4adde27ce3b9086f7e2934c26a2b2
build: py311h05b510d_0
arch: aarch64
subdir: osx-arm64
build_number: 0
license: Apache-2.0
license_family: APACHE
- size: 354717
- timestamp: 1703136059169
+ size: 365440
+ timestamp: 1703727575915
purls:
- pkg:pypi/coverage
- platform: win-64
name: coverage
- version: 7.3.4
+ version: 7.4.0
category: main
manager: conda
dependencies:
@@ -4740,18 +4738,18 @@ package:
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/coverage-7.3.4-py311ha68e1ae_0.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/coverage-7.4.0-py311ha68e1ae_0.conda
hash:
- md5: 0af4dcb814ccb4a82b7171a39ddbb498
- sha256: 249b89463e5a3ff327b010f6612a72757c2b6a8a3a7453af960178938978af76
+ md5: 4e57a4f9db2ff69d14204ae3863686e2
+ sha256: 138dcbfb37bee8b22ba8e577933843a4da5713d68e2562de92a2e63dadd78ddb
build: py311ha68e1ae_0
arch: x86_64
subdir: win-64
build_number: 0
license: Apache-2.0
license_family: APACHE
- size: 372861
- timestamp: 1703136282091
+ size: 382380
+ timestamp: 1703727762345
purls:
- pkg:pypi/coverage
- platform: linux-64
@@ -6458,94 +6456,98 @@ package:
- pkg:pypi/fiona
- platform: linux-64
name: flopy
- version: 3.4.3
+ version: 3.5.0
category: main
manager: conda
dependencies:
- matplotlib-base >=1.4.0
- numpy >=1.15.0
+ - pandas >=2.0.0
- python >=3.8
- url: https://conda.anaconda.org/conda-forge/noarch/flopy-3.4.3-pyhd8ed1ab_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/flopy-3.5.0-pyhd8ed1ab_0.conda
hash:
- md5: 7434458ec6c1ac6a065159a3d24f5256
- sha256: 87eb5acb6a4448b7b2978aaf2994d5c7f75d7e62f62a3c2ecf6d29fd380b4b92
+ md5: 6e65c5bd8939bd6d1e45bf163f11aacb
+ sha256: 55c68dfd4933a50ac5d9efa6c93baeeb5ce747625ed8ae777a0568e9dfcc1d61
build: pyhd8ed1ab_0
arch: x86_64
subdir: linux-64
build_number: 0
license: CC0-1.0
noarch: python
- size: 763490
- timestamp: 1696354539510
+ size: 777108
+ timestamp: 1701006956496
purls:
- pkg:pypi/flopy
- platform: osx-64
name: flopy
- version: 3.4.3
+ version: 3.5.0
category: main
manager: conda
dependencies:
- matplotlib-base >=1.4.0
- numpy >=1.15.0
+ - pandas >=2.0.0
- python >=3.8
- url: https://conda.anaconda.org/conda-forge/noarch/flopy-3.4.3-pyhd8ed1ab_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/flopy-3.5.0-pyhd8ed1ab_0.conda
hash:
- md5: 7434458ec6c1ac6a065159a3d24f5256
- sha256: 87eb5acb6a4448b7b2978aaf2994d5c7f75d7e62f62a3c2ecf6d29fd380b4b92
+ md5: 6e65c5bd8939bd6d1e45bf163f11aacb
+ sha256: 55c68dfd4933a50ac5d9efa6c93baeeb5ce747625ed8ae777a0568e9dfcc1d61
build: pyhd8ed1ab_0
arch: x86_64
subdir: osx-64
build_number: 0
license: CC0-1.0
noarch: python
- size: 763490
- timestamp: 1696354539510
+ size: 777108
+ timestamp: 1701006956496
purls:
- pkg:pypi/flopy
- platform: osx-arm64
name: flopy
- version: 3.4.3
+ version: 3.5.0
category: main
manager: conda
dependencies:
- matplotlib-base >=1.4.0
- numpy >=1.15.0
+ - pandas >=2.0.0
- python >=3.8
- url: https://conda.anaconda.org/conda-forge/noarch/flopy-3.4.3-pyhd8ed1ab_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/flopy-3.5.0-pyhd8ed1ab_0.conda
hash:
- md5: 7434458ec6c1ac6a065159a3d24f5256
- sha256: 87eb5acb6a4448b7b2978aaf2994d5c7f75d7e62f62a3c2ecf6d29fd380b4b92
+ md5: 6e65c5bd8939bd6d1e45bf163f11aacb
+ sha256: 55c68dfd4933a50ac5d9efa6c93baeeb5ce747625ed8ae777a0568e9dfcc1d61
build: pyhd8ed1ab_0
arch: aarch64
subdir: osx-arm64
build_number: 0
license: CC0-1.0
noarch: python
- size: 763490
- timestamp: 1696354539510
+ size: 777108
+ timestamp: 1701006956496
purls:
- pkg:pypi/flopy
- platform: win-64
name: flopy
- version: 3.4.3
+ version: 3.5.0
category: main
manager: conda
dependencies:
- matplotlib-base >=1.4.0
- numpy >=1.15.0
+ - pandas >=2.0.0
- python >=3.8
- url: https://conda.anaconda.org/conda-forge/noarch/flopy-3.4.3-pyhd8ed1ab_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/flopy-3.5.0-pyhd8ed1ab_0.conda
hash:
- md5: 7434458ec6c1ac6a065159a3d24f5256
- sha256: 87eb5acb6a4448b7b2978aaf2994d5c7f75d7e62f62a3c2ecf6d29fd380b4b92
+ md5: 6e65c5bd8939bd6d1e45bf163f11aacb
+ sha256: 55c68dfd4933a50ac5d9efa6c93baeeb5ce747625ed8ae777a0568e9dfcc1d61
build: pyhd8ed1ab_0
arch: x86_64
subdir: win-64
build_number: 0
license: CC0-1.0
noarch: python
- size: 763490
- timestamp: 1696354539510
+ size: 777108
+ timestamp: 1701006956496
purls:
- pkg:pypi/flopy
- platform: linux-64
@@ -10447,7 +10449,7 @@ package:
timestamp: 1701791365837
- platform: linux-64
name: hypothesis
- version: 6.92.1
+ version: 6.92.2
category: main
manager: conda
dependencies:
@@ -10458,10 +10460,10 @@ package:
- python >=3.8
- setuptools
- sortedcontainers >=2.1.0,<3.0.0
- url: https://conda.anaconda.org/conda-forge/noarch/hypothesis-6.92.1-pyha770c72_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/hypothesis-6.92.2-pyha770c72_0.conda
hash:
- md5: 9955984dbdac8dc762a85ac3ad2e372b
- sha256: 56f38bd21afb049b01778a4c6831c746aecebc2658a110382693f05efa442ae4
+ md5: 02ea14cc71885b316075df33c51b666f
+ sha256: a2c48bdc2a44a1069a517f5f39c54bb594cb29a37c5436fe24633c0b9376b6d2
build: pyha770c72_0
arch: x86_64
subdir: linux-64
@@ -10469,13 +10471,13 @@ package:
license: MPL-2.0
license_family: MOZILLA
noarch: python
- size: 307471
- timestamp: 1702710295239
+ size: 307559
+ timestamp: 1703646339242
purls:
- pkg:pypi/hypothesis
- platform: osx-64
name: hypothesis
- version: 6.92.1
+ version: 6.92.2
category: main
manager: conda
dependencies:
@@ -10486,10 +10488,10 @@ package:
- python >=3.8
- setuptools
- sortedcontainers >=2.1.0,<3.0.0
- url: https://conda.anaconda.org/conda-forge/noarch/hypothesis-6.92.1-pyha770c72_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/hypothesis-6.92.2-pyha770c72_0.conda
hash:
- md5: 9955984dbdac8dc762a85ac3ad2e372b
- sha256: 56f38bd21afb049b01778a4c6831c746aecebc2658a110382693f05efa442ae4
+ md5: 02ea14cc71885b316075df33c51b666f
+ sha256: a2c48bdc2a44a1069a517f5f39c54bb594cb29a37c5436fe24633c0b9376b6d2
build: pyha770c72_0
arch: x86_64
subdir: osx-64
@@ -10497,13 +10499,13 @@ package:
license: MPL-2.0
license_family: MOZILLA
noarch: python
- size: 307471
- timestamp: 1702710295239
+ size: 307559
+ timestamp: 1703646339242
purls:
- pkg:pypi/hypothesis
- platform: osx-arm64
name: hypothesis
- version: 6.92.1
+ version: 6.92.2
category: main
manager: conda
dependencies:
@@ -10514,10 +10516,10 @@ package:
- python >=3.8
- setuptools
- sortedcontainers >=2.1.0,<3.0.0
- url: https://conda.anaconda.org/conda-forge/noarch/hypothesis-6.92.1-pyha770c72_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/hypothesis-6.92.2-pyha770c72_0.conda
hash:
- md5: 9955984dbdac8dc762a85ac3ad2e372b
- sha256: 56f38bd21afb049b01778a4c6831c746aecebc2658a110382693f05efa442ae4
+ md5: 02ea14cc71885b316075df33c51b666f
+ sha256: a2c48bdc2a44a1069a517f5f39c54bb594cb29a37c5436fe24633c0b9376b6d2
build: pyha770c72_0
arch: aarch64
subdir: osx-arm64
@@ -10525,13 +10527,13 @@ package:
license: MPL-2.0
license_family: MOZILLA
noarch: python
- size: 307471
- timestamp: 1702710295239
+ size: 307559
+ timestamp: 1703646339242
purls:
- pkg:pypi/hypothesis
- platform: win-64
name: hypothesis
- version: 6.92.1
+ version: 6.92.2
category: main
manager: conda
dependencies:
@@ -10542,10 +10544,10 @@ package:
- python >=3.8
- setuptools
- sortedcontainers >=2.1.0,<3.0.0
- url: https://conda.anaconda.org/conda-forge/noarch/hypothesis-6.92.1-pyha770c72_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/hypothesis-6.92.2-pyha770c72_0.conda
hash:
- md5: 9955984dbdac8dc762a85ac3ad2e372b
- sha256: 56f38bd21afb049b01778a4c6831c746aecebc2658a110382693f05efa442ae4
+ md5: 02ea14cc71885b316075df33c51b666f
+ sha256: a2c48bdc2a44a1069a517f5f39c54bb594cb29a37c5436fe24633c0b9376b6d2
build: pyha770c72_0
arch: x86_64
subdir: win-64
@@ -10553,8 +10555,8 @@ package:
license: MPL-2.0
license_family: MOZILLA
noarch: python
- size: 307471
- timestamp: 1702710295239
+ size: 307559
+ timestamp: 1703646339242
purls:
- pkg:pypi/hypothesis
- platform: linux-64
@@ -10895,16 +10897,16 @@ package:
timestamp: 1685625210193
- platform: linux-64
name: importlib-metadata
- version: 7.0.0
+ version: 7.0.1
category: main
manager: conda
dependencies:
- python >=3.8
- zipp >=0.5
- url: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-7.0.0-pyha770c72_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-7.0.1-pyha770c72_0.conda
hash:
- md5: a941237cd06538837b25cd245fcd25d8
- sha256: 9731e82a00d36b182dc515e31723e711ac82890bb1ca86c6a17a4b471135564f
+ md5: 746623a787e06191d80a2133e5daff17
+ sha256: e72d05f171f4567004c9360a838e9d5df21e23dcfeb945066b53a6e5f754b861
build: pyha770c72_0
arch: x86_64
subdir: linux-64
@@ -10912,20 +10914,20 @@ package:
license: Apache-2.0
license_family: APACHE
noarch: python
- size: 26076
- timestamp: 1701632335069
+ size: 26450
+ timestamp: 1703269427097
- platform: osx-64
name: importlib-metadata
- version: 7.0.0
+ version: 7.0.1
category: main
manager: conda
dependencies:
- python >=3.8
- zipp >=0.5
- url: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-7.0.0-pyha770c72_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-7.0.1-pyha770c72_0.conda
hash:
- md5: a941237cd06538837b25cd245fcd25d8
- sha256: 9731e82a00d36b182dc515e31723e711ac82890bb1ca86c6a17a4b471135564f
+ md5: 746623a787e06191d80a2133e5daff17
+ sha256: e72d05f171f4567004c9360a838e9d5df21e23dcfeb945066b53a6e5f754b861
build: pyha770c72_0
arch: x86_64
subdir: osx-64
@@ -10933,20 +10935,20 @@ package:
license: Apache-2.0
license_family: APACHE
noarch: python
- size: 26076
- timestamp: 1701632335069
+ size: 26450
+ timestamp: 1703269427097
- platform: osx-arm64
name: importlib-metadata
- version: 7.0.0
+ version: 7.0.1
category: main
manager: conda
dependencies:
- python >=3.8
- zipp >=0.5
- url: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-7.0.0-pyha770c72_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-7.0.1-pyha770c72_0.conda
hash:
- md5: a941237cd06538837b25cd245fcd25d8
- sha256: 9731e82a00d36b182dc515e31723e711ac82890bb1ca86c6a17a4b471135564f
+ md5: 746623a787e06191d80a2133e5daff17
+ sha256: e72d05f171f4567004c9360a838e9d5df21e23dcfeb945066b53a6e5f754b861
build: pyha770c72_0
arch: aarch64
subdir: osx-arm64
@@ -10954,20 +10956,20 @@ package:
license: Apache-2.0
license_family: APACHE
noarch: python
- size: 26076
- timestamp: 1701632335069
+ size: 26450
+ timestamp: 1703269427097
- platform: win-64
name: importlib-metadata
- version: 7.0.0
+ version: 7.0.1
category: main
manager: conda
dependencies:
- python >=3.8
- zipp >=0.5
- url: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-7.0.0-pyha770c72_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-7.0.1-pyha770c72_0.conda
hash:
- md5: a941237cd06538837b25cd245fcd25d8
- sha256: 9731e82a00d36b182dc515e31723e711ac82890bb1ca86c6a17a4b471135564f
+ md5: 746623a787e06191d80a2133e5daff17
+ sha256: e72d05f171f4567004c9360a838e9d5df21e23dcfeb945066b53a6e5f754b861
build: pyha770c72_0
arch: x86_64
subdir: win-64
@@ -10975,19 +10977,19 @@ package:
license: Apache-2.0
license_family: APACHE
noarch: python
- size: 26076
- timestamp: 1701632335069
+ size: 26450
+ timestamp: 1703269427097
- platform: linux-64
name: importlib_metadata
- version: 7.0.0
+ version: 7.0.1
category: main
manager: conda
dependencies:
- - importlib-metadata >=7.0.0,<7.0.1.0a0
- url: https://conda.anaconda.org/conda-forge/noarch/importlib_metadata-7.0.0-hd8ed1ab_0.conda
+ - importlib-metadata >=7.0.1,<7.0.2.0a0
+ url: https://conda.anaconda.org/conda-forge/noarch/importlib_metadata-7.0.1-hd8ed1ab_0.conda
hash:
- md5: 12aff14f84c337be5e5636bf612f4140
- sha256: b9e8ed41df6c55222e3777f422e77a22a6a19ff779b2e65aa8dfdea792c1f7de
+ md5: 4a2f43a20fa404b998859c6a470ba316
+ sha256: bc362df1d4f5a04c38dff29cd9c2d0ac584f9c4b45d3e4683ee090944a38fba4
build: hd8ed1ab_0
arch: x86_64
subdir: linux-64
@@ -10995,19 +10997,19 @@ package:
license: Apache-2.0
license_family: APACHE
noarch: generic
- size: 9534
- timestamp: 1701632342298
+ size: 9575
+ timestamp: 1703269436329
- platform: osx-64
name: importlib_metadata
- version: 7.0.0
+ version: 7.0.1
category: main
manager: conda
dependencies:
- - importlib-metadata >=7.0.0,<7.0.1.0a0
- url: https://conda.anaconda.org/conda-forge/noarch/importlib_metadata-7.0.0-hd8ed1ab_0.conda
+ - importlib-metadata >=7.0.1,<7.0.2.0a0
+ url: https://conda.anaconda.org/conda-forge/noarch/importlib_metadata-7.0.1-hd8ed1ab_0.conda
hash:
- md5: 12aff14f84c337be5e5636bf612f4140
- sha256: b9e8ed41df6c55222e3777f422e77a22a6a19ff779b2e65aa8dfdea792c1f7de
+ md5: 4a2f43a20fa404b998859c6a470ba316
+ sha256: bc362df1d4f5a04c38dff29cd9c2d0ac584f9c4b45d3e4683ee090944a38fba4
build: hd8ed1ab_0
arch: x86_64
subdir: osx-64
@@ -11015,19 +11017,19 @@ package:
license: Apache-2.0
license_family: APACHE
noarch: generic
- size: 9534
- timestamp: 1701632342298
+ size: 9575
+ timestamp: 1703269436329
- platform: osx-arm64
name: importlib_metadata
- version: 7.0.0
+ version: 7.0.1
category: main
manager: conda
dependencies:
- - importlib-metadata >=7.0.0,<7.0.1.0a0
- url: https://conda.anaconda.org/conda-forge/noarch/importlib_metadata-7.0.0-hd8ed1ab_0.conda
+ - importlib-metadata >=7.0.1,<7.0.2.0a0
+ url: https://conda.anaconda.org/conda-forge/noarch/importlib_metadata-7.0.1-hd8ed1ab_0.conda
hash:
- md5: 12aff14f84c337be5e5636bf612f4140
- sha256: b9e8ed41df6c55222e3777f422e77a22a6a19ff779b2e65aa8dfdea792c1f7de
+ md5: 4a2f43a20fa404b998859c6a470ba316
+ sha256: bc362df1d4f5a04c38dff29cd9c2d0ac584f9c4b45d3e4683ee090944a38fba4
build: hd8ed1ab_0
arch: aarch64
subdir: osx-arm64
@@ -11035,19 +11037,19 @@ package:
license: Apache-2.0
license_family: APACHE
noarch: generic
- size: 9534
- timestamp: 1701632342298
+ size: 9575
+ timestamp: 1703269436329
- platform: win-64
name: importlib_metadata
- version: 7.0.0
+ version: 7.0.1
category: main
manager: conda
dependencies:
- - importlib-metadata >=7.0.0,<7.0.1.0a0
- url: https://conda.anaconda.org/conda-forge/noarch/importlib_metadata-7.0.0-hd8ed1ab_0.conda
+ - importlib-metadata >=7.0.1,<7.0.2.0a0
+ url: https://conda.anaconda.org/conda-forge/noarch/importlib_metadata-7.0.1-hd8ed1ab_0.conda
hash:
- md5: 12aff14f84c337be5e5636bf612f4140
- sha256: b9e8ed41df6c55222e3777f422e77a22a6a19ff779b2e65aa8dfdea792c1f7de
+ md5: 4a2f43a20fa404b998859c6a470ba316
+ sha256: bc362df1d4f5a04c38dff29cd9c2d0ac584f9c4b45d3e4683ee090944a38fba4
build: hd8ed1ab_0
arch: x86_64
subdir: win-64
@@ -11055,8 +11057,8 @@ package:
license: Apache-2.0
license_family: APACHE
noarch: generic
- size: 9534
- timestamp: 1701632342298
+ size: 9575
+ timestamp: 1703269436329
- platform: linux-64
name: iniconfig
version: 2.0.0
@@ -11680,75 +11682,76 @@ package:
category: main
manager: conda
dependencies:
- - libgcc-ng >=9.3.0
- url: https://conda.anaconda.org/conda-forge/linux-64/jxrlib-1.1-h7f98852_2.tar.bz2
+ - libgcc-ng >=12
+ url: https://conda.anaconda.org/conda-forge/linux-64/jxrlib-1.1-hd590300_3.conda
hash:
- md5: 8e787b08fe19986d99d034b839df2961
- sha256: 3ffc19c2ca272e6d5b8edc7cfc5bb71763dfdfa1810dd4b8820cc6b212ecbd95
- build: h7f98852_2
+ md5: 5aeabe88534ea4169d4c49998f293d6c
+ sha256: 2057ca87b313bde5b74b93b0e696f8faab69acd4cb0edebb78469f3f388040c0
+ build: hd590300_3
arch: x86_64
subdir: linux-64
- build_number: 2
+ build_number: 3
license: BSD-2-Clause
license_family: BSD
- size: 240904
- timestamp: 1607309174409
+ size: 239104
+ timestamp: 1703333860145
- platform: osx-64
name: jxrlib
version: '1.1'
category: main
manager: conda
dependencies: []
- url: https://conda.anaconda.org/conda-forge/osx-64/jxrlib-1.1-h35c211d_2.tar.bz2
+ url: https://conda.anaconda.org/conda-forge/osx-64/jxrlib-1.1-h10d778d_3.conda
hash:
- md5: 1c2379fd9d5d4ecb151231f6282e699d
- sha256: daeb6fe1e06549117a549bd94f4fb1ac575f80c67891171307057cb83a1d74fb
- build: h35c211d_2
+ md5: cfaf81d843a80812fe16a68bdae60562
+ sha256: a548a4be14a4c76d6d992a5c1feffcbb08062f5c57abc6e4278d40c2c9a7185b
+ build: h10d778d_3
arch: x86_64
subdir: osx-64
- build_number: 2
+ build_number: 3
license: BSD-2-Clause
license_family: BSD
- size: 231009
- timestamp: 1607309184331
+ size: 220376
+ timestamp: 1703334073774
- platform: osx-arm64
name: jxrlib
version: '1.1'
category: main
manager: conda
dependencies: []
- url: https://conda.anaconda.org/conda-forge/osx-arm64/jxrlib-1.1-h27ca646_2.tar.bz2
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/jxrlib-1.1-h93a5062_3.conda
hash:
- md5: 71447f8ae7d2a2fd0f16424983cf31e6
- sha256: 448795a54fe49a15cdef110b2d094799d933f36c2d8f5bc1e1c192b3617efe5f
- build: h27ca646_2
+ md5: 879997fd868f8e9e4c2a12aec8583799
+ sha256: c9e0d3cf9255d4585fa9b3d07ace3bd934fdc6a67ef4532e5507282eff2364ab
+ build: h93a5062_3
arch: aarch64
subdir: osx-arm64
- build_number: 2
+ build_number: 3
license: BSD-2-Clause
license_family: BSD
- size: 219467
- timestamp: 1607309174654
+ size: 197843
+ timestamp: 1703334079437
- platform: win-64
name: jxrlib
version: '1.1'
category: main
manager: conda
dependencies:
- - vc >=14.1,<15.0a0
- - vs2015_runtime >=14.16.27012
- url: https://conda.anaconda.org/conda-forge/win-64/jxrlib-1.1-h8ffe710_2.tar.bz2
+ - ucrt >=10.0.20348.0
+ - vc >=14.2,<15
+ - vc14_runtime >=14.29.30139
+ url: https://conda.anaconda.org/conda-forge/win-64/jxrlib-1.1-hcfcfb64_3.conda
hash:
- md5: 69f82948e102dc14928619140c29468d
- sha256: af50c7f499a6ecb0812e7a9fb63cc2a8264a721ea28b653f811a1cc174248b60
- build: h8ffe710_2
+ md5: a9dff8432c11dfa980346e934c29ca3f
+ sha256: a9ac265bcf65fce57cfb6512a1b072d5489445d14aa1b60c9bdf73370cf261b2
+ build: hcfcfb64_3
arch: x86_64
subdir: win-64
- build_number: 2
+ build_number: 3
license: BSD-2-Clause
license_family: BSD
- size: 635245
- timestamp: 1607309452074
+ size: 355340
+ timestamp: 1703334132631
- platform: linux-64
name: kealib
version: 1.5.3
@@ -12660,7 +12663,7 @@ package:
category: main
manager: conda
dependencies:
- - aws-crt-cpp >=0.25.0,<0.25.1.0a0
+ - aws-crt-cpp >=0.26.0,<0.26.1.0a0
- aws-sdk-cpp >=1.11.210,<1.11.211.0a0
- bzip2 >=1.0.8,<2.0a0
- glog >=0.6.0,<0.7.0a0
@@ -12679,22 +12682,21 @@ package:
- re2
- snappy >=1.1.10,<2.0a0
- zstd >=1.5.5,<1.6.0a0
- url: https://conda.anaconda.org/conda-forge/linux-64/libarrow-14.0.2-hfb4d3a9_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/libarrow-14.0.2-h84dd17c_2_cpu.conda
hash:
- md5: 3fb24292de146d04d471ab88b44630e6
- sha256: b5d3061b86ce945f81f44206e9fd4d2b41b49e0773b90f92c2a21b285c695085
- build: hfb4d3a9_0_cpu
+ md5: 571e88947e7cadb42bfacb517c346662
+ sha256: 3490212354a8dc8960eeaa8de43cb85356c5318fd49a243a7108818e09aca982
+ build: h84dd17c_2_cpu
arch: x86_64
subdir: linux-64
- build_number: 0
+ build_number: 2
constrains:
+ - parquet-cpp <0.0a0
- arrow-cpp <0.0a0
- apache-arrow-proc =*=cpu
- - parquet-cpp <0.0a0
license: Apache-2.0
- license_family: APACHE
- size: 22758814
- timestamp: 1703071141489
+ size: 22721490
+ timestamp: 1704354970709
- platform: osx-64
name: libarrow
version: 14.0.2
@@ -12702,7 +12704,7 @@ package:
manager: conda
dependencies:
- __osx >=10.13
- - aws-crt-cpp >=0.25.0,<0.25.1.0a0
+ - aws-crt-cpp >=0.26.0,<0.26.1.0a0
- aws-sdk-cpp >=1.11.210,<1.11.211.0a0
- bzip2 >=1.0.8,<2.0a0
- glog >=0.6.0,<0.7.0a0
@@ -12720,29 +12722,28 @@ package:
- re2
- snappy >=1.1.10,<2.0a0
- zstd >=1.5.5,<1.6.0a0
- url: https://conda.anaconda.org/conda-forge/osx-64/libarrow-14.0.2-h2ef8067_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/libarrow-14.0.2-h1aaacd4_2_cpu.conda
hash:
- md5: 6c36c96346898ac92cd54ce542a6652f
- sha256: 5eb549b8559307562d26fecfb628d18593440acef2e0a837efd7eed56425d7b5
- build: h2ef8067_0_cpu
+ md5: ab5d6d9fc2f3304d99bbe6e50059a4f0
+ sha256: 13fde6b9ca0357272c9710a438a769a18921490051e2dbcc815853d0c17a2a1c
+ build: h1aaacd4_2_cpu
arch: x86_64
subdir: osx-64
- build_number: 0
+ build_number: 2
constrains:
- parquet-cpp <0.0a0
- - arrow-cpp <0.0a0
- apache-arrow-proc =*=cpu
+ - arrow-cpp <0.0a0
license: Apache-2.0
- license_family: APACHE
- size: 15796373
- timestamp: 1703071443269
+ size: 15783377
+ timestamp: 1704355600914
- platform: osx-arm64
name: libarrow
version: 14.0.2
category: main
manager: conda
dependencies:
- - aws-crt-cpp >=0.25.0,<0.25.1.0a0
+ - aws-crt-cpp >=0.26.0,<0.26.1.0a0
- aws-sdk-cpp >=1.11.210,<1.11.211.0a0
- bzip2 >=1.0.8,<2.0a0
- glog >=0.6.0,<0.7.0a0
@@ -12760,29 +12761,28 @@ package:
- re2
- snappy >=1.1.10,<2.0a0
- zstd >=1.5.5,<1.6.0a0
- url: https://conda.anaconda.org/conda-forge/osx-arm64/libarrow-14.0.2-hfcbd24e_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/libarrow-14.0.2-h4ce3932_2_cpu.conda
hash:
- md5: 704cb4ae73330c6f4789df5d1f98f655
- sha256: 5af555a1e85e92d3c11ca6ad0ae16b475525cb1a5f42316daaeb4863cbafdb17
- build: hfcbd24e_0_cpu
+ md5: d61d4cee3c195a5f574b3ade7a85ef94
+ sha256: 6c176a5ece3c72c0c1b7d7be5cc0f0a8dc637e634c936730a6d744e564fb75cb
+ build: h4ce3932_2_cpu
arch: aarch64
subdir: osx-arm64
- build_number: 0
+ build_number: 2
constrains:
- - parquet-cpp <0.0a0
- apache-arrow-proc =*=cpu
- arrow-cpp <0.0a0
+ - parquet-cpp <0.0a0
license: Apache-2.0
- license_family: APACHE
- size: 14646375
- timestamp: 1703072187529
+ size: 14634457
+ timestamp: 1704355766916
- platform: win-64
name: libarrow
version: 14.0.2
category: main
manager: conda
dependencies:
- - aws-crt-cpp >=0.25.0,<0.25.1.0a0
+ - aws-crt-cpp >=0.26.0,<0.26.1.0a0
- aws-sdk-cpp >=1.11.210,<1.11.211.0a0
- bzip2 >=1.0.8,<2.0a0
- libabseil * cxx17*
@@ -12804,196 +12804,187 @@ package:
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- zstd >=1.5.5,<1.6.0a0
- url: https://conda.anaconda.org/conda-forge/win-64/libarrow-14.0.2-h1048771_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/libarrow-14.0.2-he5f67d5_2_cpu.conda
hash:
- md5: a4d4ff48a632a78408ccd0bee161f8c9
- sha256: e402f00fef6a71065adfd22fd98c31847324265028a5ff6a791ba5ceb22a057d
- build: h1048771_0_cpu
+ md5: 4deff1889c1047ab2920e63ce527e840
+ sha256: 12f7d520b023579cf9229ed4e20206254212f6faead301077c1204e4a2ccf879
+ build: he5f67d5_2_cpu
arch: x86_64
subdir: win-64
- build_number: 0
+ build_number: 2
constrains:
- parquet-cpp <0.0a0
- - arrow-cpp <0.0a0
- apache-arrow-proc =*=cpu
+ - arrow-cpp <0.0a0
license: Apache-2.0
- license_family: APACHE
- size: 4967263
- timestamp: 1703071991391
+ size: 5000205
+ timestamp: 1704355416005
- platform: linux-64
name: libarrow-acero
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 hfb4d3a9_0_cpu
+ - libarrow 14.0.2 h84dd17c_2_cpu
- libgcc-ng >=12
- libstdcxx-ng >=12
- url: https://conda.anaconda.org/conda-forge/linux-64/libarrow-acero-14.0.2-h59595ed_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/libarrow-acero-14.0.2-h59595ed_2_cpu.conda
hash:
- md5: 8d23c56e691ff7dfc4b3eb7422976171
- sha256: aa53355876012975d0654f9f184e5f1e4b046bde87216c73ee14ab5bd12df1c3
- build: h59595ed_0_cpu
+ md5: 4b00f12f1f3c7350a0345aa9c18798d6
+ sha256: 4b847de73614a65edd892250ccd6e617d44fe88c2f56f10dfa0370b5c84120a7
+ build: h59595ed_2_cpu
arch: x86_64
subdir: linux-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 576635
- timestamp: 1703071227194
+ size: 576913
+ timestamp: 1704355059610
- platform: osx-64
name: libarrow-acero
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 h2ef8067_0_cpu
+ - libarrow 14.0.2 h1aaacd4_2_cpu
- libcxx >=14
- url: https://conda.anaconda.org/conda-forge/osx-64/libarrow-acero-14.0.2-h000cb23_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/libarrow-acero-14.0.2-h000cb23_2_cpu.conda
hash:
- md5: ad8780ab5549b76b0f55703188185498
- sha256: 163b3e2a4a6cb6f654111eadac0a5624c9b6120bc9a40c103789101b7fc0dd5e
- build: h000cb23_0_cpu
+ md5: 4e2fc03720088ab54db18aa84fbb75fe
+ sha256: dea3527b02eaf5eede8c94d3eb10d16c08f413aa61c574a941579e1a9b0a5a59
+ build: h000cb23_2_cpu
arch: x86_64
subdir: osx-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 511657
- timestamp: 1703071560970
+ size: 511952
+ timestamp: 1704355744885
- platform: osx-arm64
name: libarrow-acero
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 hfcbd24e_0_cpu
+ - libarrow 14.0.2 h4ce3932_2_cpu
- libcxx >=14
- url: https://conda.anaconda.org/conda-forge/osx-arm64/libarrow-acero-14.0.2-h13dd4ca_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/libarrow-acero-14.0.2-h13dd4ca_2_cpu.conda
hash:
- md5: ab067b5484b3078d1ed40c2471df8a7b
- sha256: 3fb317eb02be28568eb8f1f0c9dc466835b50fce6059ecaab07268fd746e982b
- build: h13dd4ca_0_cpu
+ md5: b4b1760597af9889cd2f5311b0c34e7f
+ sha256: 93784ab7aec5fe72a96bb028868037fc95ee0f72a43c5cdcdc98b31b3f6b3ef6
+ build: h13dd4ca_2_cpu
arch: aarch64
subdir: osx-arm64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 494780
- timestamp: 1703072321533
+ size: 494126
+ timestamp: 1704355933680
- platform: win-64
name: libarrow-acero
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 h1048771_0_cpu
+ - libarrow 14.0.2 he5f67d5_2_cpu
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/libarrow-acero-14.0.2-h63175ca_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/libarrow-acero-14.0.2-h63175ca_2_cpu.conda
hash:
- md5: e9b6b897fd6383dd6992bce38bff31ee
- sha256: 1523ee13fb280e45e5f9ca151677909fa30e278957781997af78734b7f2d7e6c
- build: h63175ca_0_cpu
+ md5: e3425ace09ee0422e654638f21fb2a9e
+ sha256: 46e4fda9f254981875138e4dd42624806081886b62b15216c7977a5edeff79bf
+ build: h63175ca_2_cpu
arch: x86_64
subdir: win-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 429549
- timestamp: 1703072106684
+ size: 430004
+ timestamp: 1704355525077
- platform: linux-64
name: libarrow-dataset
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 hfb4d3a9_0_cpu
- - libarrow-acero 14.0.2 h59595ed_0_cpu
+ - libarrow 14.0.2 h84dd17c_2_cpu
+ - libarrow-acero 14.0.2 h59595ed_2_cpu
- libgcc-ng >=12
- - libparquet 14.0.2 h352af49_0_cpu
+ - libparquet 14.0.2 h352af49_2_cpu
- libstdcxx-ng >=12
- url: https://conda.anaconda.org/conda-forge/linux-64/libarrow-dataset-14.0.2-h59595ed_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/libarrow-dataset-14.0.2-h59595ed_2_cpu.conda
hash:
- md5: 6a469f0dcc01b9ca7a19aa3e07bd8ea8
- sha256: f4987d736140a0a7c00d9856cefc68c87949961934e87a750d98d08dcd66c496
- build: h59595ed_0_cpu
+ md5: 5561fbdff1a630b3768fcbcd1307bcc2
+ sha256: edd010a1edcd73dc0b1e9b0fed1bd42f2e4252e0390a7f5432ae796fc39f2144
+ build: h59595ed_2_cpu
arch: x86_64
subdir: linux-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 581750
- timestamp: 1703071350329
+ size: 582023
+ timestamp: 1704355222590
- platform: osx-64
name: libarrow-dataset
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 h2ef8067_0_cpu
- - libarrow-acero 14.0.2 h000cb23_0_cpu
+ - libarrow 14.0.2 h1aaacd4_2_cpu
+ - libarrow-acero 14.0.2 h000cb23_2_cpu
- libcxx >=14
- - libparquet 14.0.2 h381d950_0_cpu
- url: https://conda.anaconda.org/conda-forge/osx-64/libarrow-dataset-14.0.2-h000cb23_0_cpu.conda
+ - libparquet 14.0.2 h381d950_2_cpu
+ url: https://conda.anaconda.org/conda-forge/osx-64/libarrow-dataset-14.0.2-h000cb23_2_cpu.conda
hash:
- md5: 038830ab57f5728724316a694c8a3b25
- sha256: 9a05c784ac1da1aa4e6052b73899285631ebc57ffd741c98a357262713bd719a
- build: h000cb23_0_cpu
+ md5: 912e66fb84d6979da1d37535691b0a1b
+ sha256: 9c7773e8068d8230bc1a7af3bca60fd23ab69b0ee58bd1327e5ea7a4349f9b5c
+ build: h000cb23_2_cpu
arch: x86_64
subdir: osx-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 511767
- timestamp: 1703071786274
+ size: 511562
+ timestamp: 1704356220922
- platform: osx-arm64
name: libarrow-dataset
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 hfcbd24e_0_cpu
- - libarrow-acero 14.0.2 h13dd4ca_0_cpu
+ - libarrow 14.0.2 h4ce3932_2_cpu
+ - libarrow-acero 14.0.2 h13dd4ca_2_cpu
- libcxx >=14
- - libparquet 14.0.2 hf6ce1d5_0_cpu
- url: https://conda.anaconda.org/conda-forge/osx-arm64/libarrow-dataset-14.0.2-h13dd4ca_0_cpu.conda
+ - libparquet 14.0.2 hf6ce1d5_2_cpu
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/libarrow-dataset-14.0.2-h13dd4ca_2_cpu.conda
hash:
- md5: 6701be46791c612a3729a5b122fbf1ba
- sha256: 0b67fca23fce743d8d517b4ad32fc807e5668f0e661c3ffdc9d7acaf40886921
- build: h13dd4ca_0_cpu
+ md5: bec70ec592be6a282cb06250fa5e71a4
+ sha256: 4a72d1476f49b5c234a2ef798ef33c473f8d6307aaceec65341a4f11db5ba23c
+ build: h13dd4ca_2_cpu
arch: aarch64
subdir: osx-arm64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 526762
- timestamp: 1703072641168
+ size: 526384
+ timestamp: 1704356487597
- platform: win-64
name: libarrow-dataset
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 h1048771_0_cpu
- - libarrow-acero 14.0.2 h63175ca_0_cpu
- - libparquet 14.0.2 h7ec3a38_0_cpu
+ - libarrow 14.0.2 he5f67d5_2_cpu
+ - libarrow-acero 14.0.2 h63175ca_2_cpu
+ - libparquet 14.0.2 h7ec3a38_2_cpu
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/libarrow-dataset-14.0.2-h63175ca_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/libarrow-dataset-14.0.2-h63175ca_2_cpu.conda
hash:
- md5: 9484ada58fb3d9aebe17f7ed73688dbc
- sha256: 24a096de976d98ae2d3681ac0b158c7daf9ce58ffa1eaa4a831892504328c248
- build: h63175ca_0_cpu
+ md5: de680262729b7a718ee3427b7af68416
+ sha256: 53ae88b6b05f80309fa1789885b8b13a435d1c0cef71d2eb635da45417db8327
+ build: h63175ca_2_cpu
arch: x86_64
subdir: win-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 428226
- timestamp: 1703072444542
+ size: 428399
+ timestamp: 1704355889001
- platform: linux-64
name: libarrow-flight
version: 14.0.2
@@ -13002,24 +12993,23 @@ package:
dependencies:
- libabseil * cxx17*
- libabseil >=20230802.1,<20230803.0a0
- - libarrow 14.0.2 hfb4d3a9_0_cpu
+ - libarrow 14.0.2 h84dd17c_2_cpu
- libgcc-ng >=12
- libgrpc >=1.59.3,<1.60.0a0
- libprotobuf >=4.24.4,<4.24.5.0a0
- libstdcxx-ng >=12
- ucx >=1.15.0,<1.16.0a0
- url: https://conda.anaconda.org/conda-forge/linux-64/libarrow-flight-14.0.2-h120cb0d_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/libarrow-flight-14.0.2-h120cb0d_2_cpu.conda
hash:
- md5: 5b2207ce6f7a383a094be45e4fab8abd
- sha256: d2d0231d776ddb23ec1c5ee4afc5033ecb2edbf329b2c71d90f4b1075b6448b8
- build: h120cb0d_0_cpu
+ md5: e03930dbf4508a5153cd0fc237756a75
+ sha256: aa6ca307db6fb91cff2712a6c6091a3f9277ce054550fbe26c7bb93d65e395fb
+ build: h120cb0d_2_cpu
arch: x86_64
subdir: linux-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 500536
- timestamp: 1703071259814
+ size: 500908
+ timestamp: 1704355109389
- platform: osx-64
name: libarrow-flight
version: 14.0.2
@@ -13029,22 +13019,21 @@ package:
- __osx >=10.13
- libabseil * cxx17*
- libabseil >=20230802.1,<20230803.0a0
- - libarrow 14.0.2 h2ef8067_0_cpu
+ - libarrow 14.0.2 h1aaacd4_2_cpu
- libcxx >=14
- libgrpc >=1.59.3,<1.60.0a0
- libprotobuf >=4.24.4,<4.24.5.0a0
- url: https://conda.anaconda.org/conda-forge/osx-64/libarrow-flight-14.0.2-ha1803ca_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/libarrow-flight-14.0.2-ha1803ca_2_cpu.conda
hash:
- md5: 71f9f37cdf68bdef865daa456b3d4bc6
- sha256: c785b623aeff70dc66f84db0b0fa94c2a2e2ade842098388407328d400591707
- build: ha1803ca_0_cpu
+ md5: ea3430af75e5aad9d123eeae096e7413
+ sha256: 2c41f0edfa2ec26ede42d17211bac938a862252cc9ffb47a1e65d88ab53bf00e
+ build: ha1803ca_2_cpu
arch: x86_64
subdir: osx-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 319125
- timestamp: 1703071617154
+ size: 319741
+ timestamp: 1704355863264
- platform: osx-arm64
name: libarrow-flight
version: 14.0.2
@@ -13053,22 +13042,21 @@ package:
dependencies:
- libabseil * cxx17*
- libabseil >=20230802.1,<20230803.0a0
- - libarrow 14.0.2 hfcbd24e_0_cpu
+ - libarrow 14.0.2 h4ce3932_2_cpu
- libcxx >=14
- libgrpc >=1.59.3,<1.60.0a0
- libprotobuf >=4.24.4,<4.24.5.0a0
- url: https://conda.anaconda.org/conda-forge/osx-arm64/libarrow-flight-14.0.2-ha94d253_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/libarrow-flight-14.0.2-ha94d253_2_cpu.conda
hash:
- md5: 11056ee855450a86cf867cf003c0405b
- sha256: 09fcfa9236137ef8f450ed12d7731601f48abaca851e6d86cba3b1b7cfcc4d94
- build: ha94d253_0_cpu
+ md5: 9acf1572e3db073db00e67d21cfb7477
+ sha256: 6e18f49f8c6b58958882d9735529ebfec402ce3443e0934b9fd89ac1d7d22c57
+ build: ha94d253_2_cpu
arch: aarch64
subdir: osx-arm64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 332327
- timestamp: 1703072400999
+ size: 331157
+ timestamp: 1704356092185
- platform: win-64
name: libarrow-flight
version: 14.0.2
@@ -13077,47 +13065,45 @@ package:
dependencies:
- libabseil * cxx17*
- libabseil >=20230802.1,<20230803.0a0
- - libarrow 14.0.2 h1048771_0_cpu
+ - libarrow 14.0.2 he5f67d5_2_cpu
- libgrpc >=1.59.3,<1.60.0a0
- libprotobuf >=4.24.4,<4.24.5.0a0
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/libarrow-flight-14.0.2-h53b1db0_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/libarrow-flight-14.0.2-h53b1db0_2_cpu.conda
hash:
- md5: ad4f0ee41c58cbc8875d1cc01b9f500c
- sha256: 982cc31e210f122dbaeaa2fa1fa02dac9de3f622d2ac0b7fe3f41acb03f63d40
- build: h53b1db0_0_cpu
+ md5: 65b3299f58b837ec5dd5de3ea9a9ac73
+ sha256: d1abcb363232ab848fd556f9861af14d846d5ad8ecbd3225af36d1a9bd45f5eb
+ build: h53b1db0_2_cpu
arch: x86_64
subdir: win-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 285234
- timestamp: 1703072192743
+ size: 285319
+ timestamp: 1704355615110
- platform: linux-64
name: libarrow-flight-sql
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 hfb4d3a9_0_cpu
- - libarrow-flight 14.0.2 h120cb0d_0_cpu
+ - libarrow 14.0.2 h84dd17c_2_cpu
+ - libarrow-flight 14.0.2 h120cb0d_2_cpu
- libgcc-ng >=12
- libprotobuf >=4.24.4,<4.24.5.0a0
- libstdcxx-ng >=12
- url: https://conda.anaconda.org/conda-forge/linux-64/libarrow-flight-sql-14.0.2-h61ff412_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/libarrow-flight-sql-14.0.2-h61ff412_2_cpu.conda
hash:
- md5: 0c70731f58f42b47940df94cc50e6b7e
- sha256: 7e6f63daf618f36fa8d0b69e12b3e06d44167c254812cf4dd6daae764d50b9da
- build: h61ff412_0_cpu
+ md5: 1afaafe003abec8aec34f46ec418980a
+ sha256: 839f7e2682e9b1b004df3c2c28ac4c0c04e1e9bd8f1942d48a3abe24700f1437
+ build: h61ff412_2_cpu
arch: x86_64
subdir: linux-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 195566
- timestamp: 1703071381232
+ size: 193226
+ timestamp: 1704355263191
- platform: osx-64
name: libarrow-flight-sql
version: 14.0.2
@@ -13125,75 +13111,72 @@ package:
manager: conda
dependencies:
- __osx >=10.13
- - libarrow 14.0.2 h2ef8067_0_cpu
- - libarrow-flight 14.0.2 ha1803ca_0_cpu
+ - libarrow 14.0.2 h1aaacd4_2_cpu
+ - libarrow-flight 14.0.2 ha1803ca_2_cpu
- libcxx >=14
- libprotobuf >=4.24.4,<4.24.5.0a0
- url: https://conda.anaconda.org/conda-forge/osx-64/libarrow-flight-sql-14.0.2-h8ec153b_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/libarrow-flight-sql-14.0.2-h8ec153b_2_cpu.conda
hash:
- md5: f3f7a7dd573374ab394d208b11588174
- sha256: 2d7a42c49df8b51de0b8644f565938da6a23ddeed4945947fd7816c1cb12d2aa
- build: h8ec153b_0_cpu
+ md5: 67d65eb4d9921ab25a440db879138d09
+ sha256: a961db70a5256049e8cd6f08fcfbec1f397a33e79e8ad103e57e801b4fc3675f
+ build: h8ec153b_2_cpu
arch: x86_64
subdir: osx-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 153957
- timestamp: 1703071841766
+ size: 154235
+ timestamp: 1704356322848
- platform: osx-arm64
name: libarrow-flight-sql
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 hfcbd24e_0_cpu
- - libarrow-flight 14.0.2 ha94d253_0_cpu
+ - libarrow 14.0.2 h4ce3932_2_cpu
+ - libarrow-flight 14.0.2 ha94d253_2_cpu
- libcxx >=14
- libprotobuf >=4.24.4,<4.24.5.0a0
- url: https://conda.anaconda.org/conda-forge/osx-arm64/libarrow-flight-sql-14.0.2-h39a9b85_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/libarrow-flight-sql-14.0.2-h39a9b85_2_cpu.conda
hash:
- md5: d4dfd1737ce7ffd3abd9f9baed4596a1
- sha256: de48dbd7afc48e850553e77a782370309504ed6be61512769df04761bb60dc3a
- build: h39a9b85_0_cpu
+ md5: c6e0c75f69f3c31b0a4f02d415e0739d
+ sha256: 3621589405facf900779210f9f49556290cee0861a9797585eb6af7933017d65
+ build: h39a9b85_2_cpu
arch: aarch64
subdir: osx-arm64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 162272
- timestamp: 1703072720594
+ size: 161406
+ timestamp: 1704356624291
- platform: win-64
name: libarrow-flight-sql
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 h1048771_0_cpu
- - libarrow-flight 14.0.2 h53b1db0_0_cpu
+ - libarrow 14.0.2 he5f67d5_2_cpu
+ - libarrow-flight 14.0.2 h53b1db0_2_cpu
- libprotobuf >=4.24.4,<4.24.5.0a0
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/libarrow-flight-sql-14.0.2-h78eab7c_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/libarrow-flight-sql-14.0.2-h78eab7c_2_cpu.conda
hash:
- md5: bd89da75c2f0cfae8dfb8634bd2d07c3
- sha256: aa618b8624dc1b68eca75dcc71b2d59665cb09b55ace8b0621c5acac54cc5c7c
- build: h78eab7c_0_cpu
+ md5: 27731c55915b881c92112baac7fd4f04
+ sha256: 6dd037f69f153308f6c16009d76bc75ba12adf8c7f69b3006e57b2e07528f84f
+ build: h78eab7c_2_cpu
arch: x86_64
subdir: win-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 222340
- timestamp: 1703072515211
+ size: 222701
+ timestamp: 1704355966327
- platform: linux-64
name: libarrow-gandiva
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 hfb4d3a9_0_cpu
+ - libarrow 14.0.2 h84dd17c_2_cpu
- libgcc-ng >=12
- libllvm15 >=15.0.7,<15.1.0a0
- libre2-11 >=2023.6.2,<2024.0a0
@@ -13201,75 +13184,72 @@ package:
- libutf8proc >=2.8.0,<3.0a0
- openssl >=3.2.0,<4.0a0
- re2
- url: https://conda.anaconda.org/conda-forge/linux-64/libarrow-gandiva-14.0.2-hacb8726_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/libarrow-gandiva-14.0.2-hacb8726_2_cpu.conda
hash:
- md5: 1a59387c247c08029b8ac364c672d645
- sha256: 6998f800e556c5b30bc6969f87f35eaa128365eba1677e03a59b760e426005d3
- build: hacb8726_0_cpu
+ md5: db1a164476c3b26f4425542ab9ce11df
+ sha256: bc3c180af8e842877455d1d429c28a3207da4f3f243390b0ba8e06051525ff48
+ build: hacb8726_2_cpu
arch: x86_64
subdir: linux-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
size: 893895
- timestamp: 1703071291229
+ timestamp: 1704355148995
- platform: osx-64
name: libarrow-gandiva
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 h2ef8067_0_cpu
+ - libarrow 14.0.2 h1aaacd4_2_cpu
- libcxx >=14
- libllvm15 >=15.0.7,<15.1.0a0
- libre2-11 >=2023.6.2,<2024.0a0
- libutf8proc >=2.8.0,<3.0a0
- openssl >=3.2.0,<4.0a0
- re2
- url: https://conda.anaconda.org/conda-forge/osx-64/libarrow-gandiva-14.0.2-h01dce7f_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/libarrow-gandiva-14.0.2-h01dce7f_2_cpu.conda
hash:
- md5: 49dcce64c602416552839bc72d1e19af
- sha256: 4f5f08268290eac39e203067cc129a77097fef08d0a803dc84074e51158eb653
- build: h01dce7f_0_cpu
+ md5: 3903071e62de031ce169187cdb2e32af
+ sha256: d17c1474d202bac57c590b9f2cf062c837a69991c59135b422a520bff4bbae57
+ build: h01dce7f_2_cpu
arch: x86_64
subdir: osx-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 699203
- timestamp: 1703071675108
+ size: 699571
+ timestamp: 1704355988767
- platform: osx-arm64
name: libarrow-gandiva
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 hfcbd24e_0_cpu
+ - libarrow 14.0.2 h4ce3932_2_cpu
- libcxx >=14
- libllvm15 >=15.0.7,<15.1.0a0
- libre2-11 >=2023.6.2,<2024.0a0
- libutf8proc >=2.8.0,<3.0a0
- openssl >=3.2.0,<4.0a0
- re2
- url: https://conda.anaconda.org/conda-forge/osx-arm64/libarrow-gandiva-14.0.2-hf757142_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/libarrow-gandiva-14.0.2-hf757142_2_cpu.conda
hash:
- md5: c2859f7f9ac0734456150376aa96a76d
- sha256: 0cad54bbaae0edc326c70ec339373be828ddc4318c0c4352f4acf9f789d93829
- build: hf757142_0_cpu
+ md5: f4285d86a247a339bf7bf43fda4ded17
+ sha256: 237007a5c35c612823762327ae355f396ba34b66b40317e87de41f36a2f839bc
+ build: hf757142_2_cpu
arch: aarch64
subdir: osx-arm64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 688682
- timestamp: 1703072482479
+ size: 689008
+ timestamp: 1704356234671
- platform: win-64
name: libarrow-gandiva
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 h1048771_0_cpu
+ - libarrow 14.0.2 he5f67d5_2_cpu
- libre2-11 >=2023.6.2,<2024.0a0
- libutf8proc >=2.8.0,<3.0a0
- libzlib >=1.2.13,<1.3.0a0
@@ -13278,42 +13258,40 @@ package:
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/libarrow-gandiva-14.0.2-hb2eaab1_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/libarrow-gandiva-14.0.2-hb2eaab1_2_cpu.conda
hash:
- md5: ae05a265c97a4eebfb9f011747093792
- sha256: b406a3cf5576c0a6c84bfa3ad38659b647564d08580b389eaac868f8caed7626
- build: hb2eaab1_0_cpu
+ md5: dfaa10cd86363d7509cd4dbe8f13f031
+ sha256: c5681de79ecc4e054f32814927d918103f1a72a762990f8745c154a36a955fc7
+ build: hb2eaab1_2_cpu
arch: x86_64
subdir: win-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 10173041
- timestamp: 1703072274240
+ size: 10173801
+ timestamp: 1704355710068
- platform: linux-64
name: libarrow-substrait
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 hfb4d3a9_0_cpu
- - libarrow-acero 14.0.2 h59595ed_0_cpu
- - libarrow-dataset 14.0.2 h59595ed_0_cpu
+ - libarrow 14.0.2 h84dd17c_2_cpu
+ - libarrow-acero 14.0.2 h59595ed_2_cpu
+ - libarrow-dataset 14.0.2 h59595ed_2_cpu
- libgcc-ng >=12
- libprotobuf >=4.24.4,<4.24.5.0a0
- libstdcxx-ng >=12
- url: https://conda.anaconda.org/conda-forge/linux-64/libarrow-substrait-14.0.2-h61ff412_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/libarrow-substrait-14.0.2-h61ff412_2_cpu.conda
hash:
- md5: 6d789ad04a56815daf8785f7e8517430
- sha256: 9e36f0deb390f4a7074df94bca0ad61012cf870c7551056a1012f836532f025e
- build: h61ff412_0_cpu
+ md5: 838d3d33c458225578f64e59c4410a9f
+ sha256: 675d616f8454d38f41b1833e8deffc51dbb2c2b4b48c54e8adba4795ff606bec
+ build: h61ff412_2_cpu
arch: x86_64
subdir: linux-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 510828
- timestamp: 1703071409132
+ size: 508750
+ timestamp: 1704355298174
- platform: osx-64
name: libarrow-substrait
version: 14.0.2
@@ -13321,46 +13299,44 @@ package:
manager: conda
dependencies:
- __osx >=10.13
- - libarrow 14.0.2 h2ef8067_0_cpu
- - libarrow-acero 14.0.2 h000cb23_0_cpu
- - libarrow-dataset 14.0.2 h000cb23_0_cpu
+ - libarrow 14.0.2 h1aaacd4_2_cpu
+ - libarrow-acero 14.0.2 h000cb23_2_cpu
+ - libarrow-dataset 14.0.2 h000cb23_2_cpu
- libcxx >=14
- libprotobuf >=4.24.4,<4.24.5.0a0
- url: https://conda.anaconda.org/conda-forge/osx-64/libarrow-substrait-14.0.2-h8ec153b_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/libarrow-substrait-14.0.2-h8ec153b_2_cpu.conda
hash:
- md5: a1ec755583bab2b633005689197abafd
- sha256: 2b28f8919b1a49ad356817e80eb600d7f009fa031a5618ac91d2f68c26aaa759
- build: h8ec153b_0_cpu
+ md5: 3e279309bd00e8115283665bdbe257b8
+ sha256: eabb1705fbd324b55a2f6cc7b7451b7a15882bab25cfd05113f3f1f05f2a787e
+ build: h8ec153b_2_cpu
arch: x86_64
subdir: osx-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 452777
- timestamp: 1703071898443
+ size: 453318
+ timestamp: 1704356431617
- platform: osx-arm64
name: libarrow-substrait
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 hfcbd24e_0_cpu
- - libarrow-acero 14.0.2 h13dd4ca_0_cpu
- - libarrow-dataset 14.0.2 h13dd4ca_0_cpu
+ - libarrow 14.0.2 h4ce3932_2_cpu
+ - libarrow-acero 14.0.2 h13dd4ca_2_cpu
+ - libarrow-dataset 14.0.2 h13dd4ca_2_cpu
- libcxx >=14
- libprotobuf >=4.24.4,<4.24.5.0a0
- url: https://conda.anaconda.org/conda-forge/osx-arm64/libarrow-substrait-14.0.2-h7fd9903_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/libarrow-substrait-14.0.2-h7fd9903_2_cpu.conda
hash:
- md5: e448064eaa9d0bb1e209762eb67aad62
- sha256: e6e417720a9f9ee4c0f8267aa8ac2fcc13b31ef86955915594b1f895f67a9bdf
- build: h7fd9903_0_cpu
+ md5: 98b3c3e9288e0d451d9329fa784fe256
+ sha256: f7712ad3916fb83171c82ba5031f4356df7cb96cff2a6fa9ef17e44fe9940270
+ build: h7fd9903_2_cpu
arch: aarch64
subdir: osx-arm64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 473587
- timestamp: 1703072796242
+ size: 474175
+ timestamp: 1704356753787
- platform: win-64
name: libarrow-substrait
version: 14.0.2
@@ -13369,25 +13345,24 @@ package:
dependencies:
- libabseil * cxx17*
- libabseil >=20230802.1,<20230803.0a0
- - libarrow 14.0.2 h1048771_0_cpu
- - libarrow-acero 14.0.2 h63175ca_0_cpu
- - libarrow-dataset 14.0.2 h63175ca_0_cpu
+ - libarrow 14.0.2 he5f67d5_2_cpu
+ - libarrow-acero 14.0.2 h63175ca_2_cpu
+ - libarrow-dataset 14.0.2 h63175ca_2_cpu
- libprotobuf >=4.24.4,<4.24.5.0a0
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/libarrow-substrait-14.0.2-hd4c9904_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/libarrow-substrait-14.0.2-hd4c9904_2_cpu.conda
hash:
- md5: 39e1fd51d79d24acfaa07eed22a87bc9
- sha256: d16c0400ee591236c5a235f98e5ec7b7dff53385e88380d45c882d778e27c5ba
- build: hd4c9904_0_cpu
+ md5: 3d267143a5827a81b607619a12e3f7cd
+ sha256: cec14db792abd05497b40bfcc4c41ec3ad463cfcceeec5bd1422da5c297b4228
+ build: hd4c9904_2_cpu
arch: x86_64
subdir: win-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 346392
- timestamp: 1703072588075
+ size: 346667
+ timestamp: 1704356049987
- platform: linux-64
name: libass
version: 0.17.1
@@ -16884,91 +16859,87 @@ package:
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 hfb4d3a9_0_cpu
+ - libarrow 14.0.2 h84dd17c_2_cpu
- libgcc-ng >=12
- libstdcxx-ng >=12
- libthrift >=0.19.0,<0.19.1.0a0
- openssl >=3.2.0,<4.0a0
- url: https://conda.anaconda.org/conda-forge/linux-64/libparquet-14.0.2-h352af49_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/libparquet-14.0.2-h352af49_2_cpu.conda
hash:
- md5: 2a325960d715af08b098bf4f746bf5c4
- sha256: 21ed9c44f9b26673dae3882b7582bf05159315179308c60dababae32566ea751
- build: h352af49_0_cpu
+ md5: b265698e6c69269fe78a8cc972269c35
+ sha256: 4d0113cb62bb15c7fde3da36b4bb1862182ec5f89e0165ebcf5ae0bce155097f
+ build: h352af49_2_cpu
arch: x86_64
subdir: linux-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 1160779
- timestamp: 1703071320513
+ size: 1163710
+ timestamp: 1704355184404
- platform: osx-64
name: libparquet
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 h2ef8067_0_cpu
+ - libarrow 14.0.2 h1aaacd4_2_cpu
- libcxx >=14
- libthrift >=0.19.0,<0.19.1.0a0
- openssl >=3.2.0,<4.0a0
- url: https://conda.anaconda.org/conda-forge/osx-64/libparquet-14.0.2-h381d950_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/libparquet-14.0.2-h381d950_2_cpu.conda
hash:
- md5: 9aa0fe2c2f544d3f2fbd5f4c881c0354
- sha256: 80075697afb0af5c1ecad648020a0b5990296afe0e264f1576b00e1b39a23109
- build: h381d950_0_cpu
+ md5: c9b769d78c837821bfabc4fbd3cf3936
+ sha256: 8f58f4a46ae7eb500fff04a26e6f15de111999df4017bd4c4aac8f7245c95016
+ build: h381d950_2_cpu
arch: x86_64
subdir: osx-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 924802
- timestamp: 1703071729894
+ size: 928914
+ timestamp: 1704356105480
- platform: osx-arm64
name: libparquet
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 hfcbd24e_0_cpu
+ - libarrow 14.0.2 h4ce3932_2_cpu
- libcxx >=14
- libthrift >=0.19.0,<0.19.1.0a0
- openssl >=3.2.0,<4.0a0
- url: https://conda.anaconda.org/conda-forge/osx-arm64/libparquet-14.0.2-hf6ce1d5_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/libparquet-14.0.2-hf6ce1d5_2_cpu.conda
hash:
- md5: ece537fe02aef2f54d2aaf1189f2befb
- sha256: c78231121950b872a613b24fa0efb895774a7f1aeb153333a40c8a6a1a32ba92
- build: hf6ce1d5_0_cpu
+ md5: 3ec6dde23ee1ecea255c788e5ce16c82
+ sha256: e1deb2b68a0a24c85f75831e44a59d582168b5b9481ab7138683226e702416c2
+ build: hf6ce1d5_2_cpu
arch: aarch64
subdir: osx-arm64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 920078
- timestamp: 1703072561677
+ size: 911760
+ timestamp: 1704356359375
- platform: win-64
name: libparquet
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 h1048771_0_cpu
+ - libarrow 14.0.2 he5f67d5_2_cpu
- libthrift >=0.19.0,<0.19.1.0a0
- openssl >=3.2.0,<4.0a0
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/libparquet-14.0.2-h7ec3a38_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/libparquet-14.0.2-h7ec3a38_2_cpu.conda
hash:
- md5: b8f6b924db4c58ffacec67eceda316b5
- sha256: 8e935a4de26beec50c93b9c9a0c4e11f5bc44f13c48ec40dd58e1ad549b4826d
- build: h7ec3a38_0_cpu
+ md5: 4de7b992bc7bc1a71b07fa893c80fcfb
+ sha256: 929bb7e2a0903d9442aac788969b8626017e97193929bceae991a4ddcdde3875
+ build: h7ec3a38_2_cpu
arch: x86_64
subdir: win-64
- build_number: 0
+ build_number: 2
license: Apache-2.0
- license_family: APACHE
- size: 781065
- timestamp: 1703072371135
+ size: 780687
+ timestamp: 1704355806769
- platform: linux-64
name: libpciaccess
version: '0.17'
@@ -20521,15 +20492,15 @@ package:
- pkg:pypi/matplotlib
- platform: linux-64
name: mdurl
- version: 0.1.0
+ version: 0.1.2
category: main
manager: conda
dependencies:
- python >=3.6
- url: https://conda.anaconda.org/conda-forge/noarch/mdurl-0.1.0-pyhd8ed1ab_0.tar.bz2
+ url: https://conda.anaconda.org/conda-forge/noarch/mdurl-0.1.2-pyhd8ed1ab_0.conda
hash:
- md5: f8dab71fdc13b1bf29a01248b156d268
- sha256: c678b9194e025b1fb665bec30ee20aab93399203583875b1dcc0a3b52a8f5523
+ md5: 776a8dd9e824f77abac30e6ef43a8f7a
+ sha256: 64073dfb6bb429d52fff30891877b48c7ec0f89625b1bf844905b66a81cce6e1
build: pyhd8ed1ab_0
arch: x86_64
subdir: linux-64
@@ -20537,21 +20508,21 @@ package:
license: MIT
license_family: MIT
noarch: python
- size: 13707
- timestamp: 1639515992326
+ size: 14680
+ timestamp: 1704317789138
purls:
- pkg:pypi/mdurl
- platform: osx-64
name: mdurl
- version: 0.1.0
+ version: 0.1.2
category: main
manager: conda
dependencies:
- python >=3.6
- url: https://conda.anaconda.org/conda-forge/noarch/mdurl-0.1.0-pyhd8ed1ab_0.tar.bz2
+ url: https://conda.anaconda.org/conda-forge/noarch/mdurl-0.1.2-pyhd8ed1ab_0.conda
hash:
- md5: f8dab71fdc13b1bf29a01248b156d268
- sha256: c678b9194e025b1fb665bec30ee20aab93399203583875b1dcc0a3b52a8f5523
+ md5: 776a8dd9e824f77abac30e6ef43a8f7a
+ sha256: 64073dfb6bb429d52fff30891877b48c7ec0f89625b1bf844905b66a81cce6e1
build: pyhd8ed1ab_0
arch: x86_64
subdir: osx-64
@@ -20559,21 +20530,21 @@ package:
license: MIT
license_family: MIT
noarch: python
- size: 13707
- timestamp: 1639515992326
+ size: 14680
+ timestamp: 1704317789138
purls:
- pkg:pypi/mdurl
- platform: osx-arm64
name: mdurl
- version: 0.1.0
+ version: 0.1.2
category: main
manager: conda
dependencies:
- python >=3.6
- url: https://conda.anaconda.org/conda-forge/noarch/mdurl-0.1.0-pyhd8ed1ab_0.tar.bz2
+ url: https://conda.anaconda.org/conda-forge/noarch/mdurl-0.1.2-pyhd8ed1ab_0.conda
hash:
- md5: f8dab71fdc13b1bf29a01248b156d268
- sha256: c678b9194e025b1fb665bec30ee20aab93399203583875b1dcc0a3b52a8f5523
+ md5: 776a8dd9e824f77abac30e6ef43a8f7a
+ sha256: 64073dfb6bb429d52fff30891877b48c7ec0f89625b1bf844905b66a81cce6e1
build: pyhd8ed1ab_0
arch: aarch64
subdir: osx-arm64
@@ -20581,21 +20552,21 @@ package:
license: MIT
license_family: MIT
noarch: python
- size: 13707
- timestamp: 1639515992326
+ size: 14680
+ timestamp: 1704317789138
purls:
- pkg:pypi/mdurl
- platform: win-64
name: mdurl
- version: 0.1.0
+ version: 0.1.2
category: main
manager: conda
dependencies:
- python >=3.6
- url: https://conda.anaconda.org/conda-forge/noarch/mdurl-0.1.0-pyhd8ed1ab_0.tar.bz2
+ url: https://conda.anaconda.org/conda-forge/noarch/mdurl-0.1.2-pyhd8ed1ab_0.conda
hash:
- md5: f8dab71fdc13b1bf29a01248b156d268
- sha256: c678b9194e025b1fb665bec30ee20aab93399203583875b1dcc0a3b52a8f5523
+ md5: 776a8dd9e824f77abac30e6ef43a8f7a
+ sha256: 64073dfb6bb429d52fff30891877b48c7ec0f89625b1bf844905b66a81cce6e1
build: pyhd8ed1ab_0
arch: x86_64
subdir: win-64
@@ -20603,8 +20574,8 @@ package:
license: MIT
license_family: MIT
noarch: python
- size: 13707
- timestamp: 1639515992326
+ size: 14680
+ timestamp: 1704317789138
purls:
- pkg:pypi/mdurl
- platform: linux-64
@@ -20783,7 +20754,7 @@ package:
timestamp: 1698848042467
- platform: linux-64
name: minizip
- version: 4.0.3
+ version: 4.0.4
category: main
manager: conda
dependencies:
@@ -20792,76 +20763,74 @@ package:
- libiconv >=1.17,<2.0a0
- libstdcxx-ng >=12
- libzlib >=1.2.13,<1.3.0a0
- - openssl >=3.1.4,<4.0a0
+ - openssl >=3.2.0,<4.0a0
- xz >=5.2.6,<6.0a0
- zstd >=1.5.5,<1.6.0a0
- url: https://conda.anaconda.org/conda-forge/linux-64/minizip-4.0.3-h0ab5242_0.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/minizip-4.0.4-h0ab5242_0.conda
hash:
- md5: 3f9b5f4400be3cee11b426a8cd653b7c
- sha256: cf33c24fa8375d17fad4e1da631b4c2e8ed9a109480fa45c82fbfa2a7c5bdd41
+ md5: 813bc75d9c33ddd9c9d5b8d9c560e152
+ sha256: e25d24c4841aa85ed2153f826ae58e56ae4d12704fd9e52005a3d7edfeb3b95a
build: h0ab5242_0
arch: x86_64
subdir: linux-64
build_number: 0
license: Zlib
license_family: Other
- size: 92378
- timestamp: 1699930958451
+ size: 91696
+ timestamp: 1703874701383
- platform: osx-64
name: minizip
- version: 4.0.3
+ version: 4.0.4
category: main
manager: conda
dependencies:
- - __osx >=10.9
- bzip2 >=1.0.8,<2.0a0
- - libcxx >=16.0.6
+ - libcxx >=15
- libiconv >=1.17,<2.0a0
- libzlib >=1.2.13,<1.3.0a0
- - openssl >=3.1.4,<4.0a0
+ - openssl >=3.2.0,<4.0a0
- xz >=5.2.6,<6.0a0
- zstd >=1.5.5,<1.6.0a0
- url: https://conda.anaconda.org/conda-forge/osx-64/minizip-4.0.3-h23f18a7_0.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/minizip-4.0.4-h37d7099_0.conda
hash:
- md5: 2facac17555d3078a0abfbe20a331086
- sha256: 779cdb3ee14c653b6094414c251164b2398e50b825ba44455c67e7deeb6e48e1
- build: h23f18a7_0
+ md5: 36eb00b2cad8e12ee18683dbd15aeba6
+ sha256: c0be39fda07d913da8dbedc15306a1452780890822a8c04dcc8f46b533ca2908
+ build: h37d7099_0
arch: x86_64
subdir: osx-64
build_number: 0
license: Zlib
license_family: Other
- size: 78841
- timestamp: 1699931181751
+ size: 78578
+ timestamp: 1703874953968
- platform: osx-arm64
name: minizip
- version: 4.0.3
+ version: 4.0.4
category: main
manager: conda
dependencies:
- - __osx >=10.9
- bzip2 >=1.0.8,<2.0a0
- - libcxx >=16.0.6
+ - libcxx >=15
- libiconv >=1.17,<2.0a0
- libzlib >=1.2.13,<1.3.0a0
- - openssl >=3.1.4,<4.0a0
+ - openssl >=3.2.0,<4.0a0
- xz >=5.2.6,<6.0a0
- zstd >=1.5.5,<1.6.0a0
- url: https://conda.anaconda.org/conda-forge/osx-arm64/minizip-4.0.3-hd5cad61_0.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/minizip-4.0.4-hc35e051_0.conda
hash:
- md5: 8f1bf9ea12bca129b7a3d49eec9efd76
- sha256: 9db88831aa3485d98cad155d989d4de45edfec13e6cbe81b0093ba7e6ba8817d
- build: hd5cad61_0
+ md5: 293ad87f065d0e1dc011ccafeb1bb0be
+ sha256: 0fbf65095148cfe9dab8b32b533b3d2752a66bbf459816345773ed73844a448b
+ build: hc35e051_0
arch: aarch64
subdir: osx-arm64
build_number: 0
license: Zlib
license_family: Other
- size: 77965
- timestamp: 1699931186188
+ size: 78452
+ timestamp: 1703874960663
- platform: win-64
name: minizip
- version: 4.0.3
+ version: 4.0.4
category: main
manager: conda
dependencies:
@@ -20872,18 +20841,18 @@ package:
- vc14_runtime >=14.29.30139
- xz >=5.2.6,<6.0a0
- zstd >=1.5.5,<1.6.0a0
- url: https://conda.anaconda.org/conda-forge/win-64/minizip-4.0.3-h5bed578_0.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/minizip-4.0.4-h5bed578_0.conda
hash:
- md5: 958b153628ecd3bf3cfd1644e2385bb4
- sha256: 317c43e644024f4ac820468f09c49d1f8491b14650e11d5c3516116320273c4b
+ md5: 26363ae28ac1928dcf846b4d68d5f29f
+ sha256: d9073fe4159263314b25f436b99ee0ebedad12fbf518937761089a5ff17259f5
build: h5bed578_0
arch: x86_64
subdir: win-64
build_number: 0
license: Zlib
license_family: Other
- size: 84913
- timestamp: 1699931497035
+ size: 85593
+ timestamp: 1703875236297
- platform: win-64
name: mkl
version: 2023.2.0
@@ -22700,7 +22669,7 @@ package:
- pkg:pypi/numcodecs
- platform: linux-64
name: numpy
- version: 1.26.2
+ version: 1.26.3
category: main
manager: conda
dependencies:
@@ -22711,10 +22680,10 @@ package:
- libstdcxx-ng >=12
- python >=3.11,<3.12.0a0
- python_abi 3.11.* *_cp311
- url: https://conda.anaconda.org/conda-forge/linux-64/numpy-1.26.2-py311h64a7726_0.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/numpy-1.26.3-py311h64a7726_0.conda
hash:
- md5: fd2f142dcd680413b5ede5d0fb799205
- sha256: c68b2c0ce95b79913134ec6ba2a2f1c10adcd60133afd48e4a57fdd128b694b7
+ md5: 231eef4f33640338f64ef9ab690ba08d
+ sha256: e1366ff215f071077b5cba57549bd5fe91196e7621e39b7aeff9e51c2fe236dc
build: py311h64a7726_0
arch: x86_64
subdir: linux-64
@@ -22723,28 +22692,27 @@ package:
- numpy-base <0a0
license: BSD-3-Clause
license_family: BSD
- size: 8120086
- timestamp: 1700874920400
+ size: 8192083
+ timestamp: 1704280748704
purls:
- pkg:pypi/numpy
- platform: osx-64
name: numpy
- version: 1.26.2
+ version: 1.26.3
category: main
manager: conda
dependencies:
- - __osx >=10.9
- libblas >=3.9.0,<4.0a0
- libcblas >=3.9.0,<4.0a0
- - libcxx >=16.0.6
+ - libcxx >=15
- liblapack >=3.9.0,<4.0a0
- python >=3.11,<3.12.0a0
- python_abi 3.11.* *_cp311
- url: https://conda.anaconda.org/conda-forge/osx-64/numpy-1.26.2-py311h93c810c_0.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/numpy-1.26.3-py311hc43a94b_0.conda
hash:
- md5: 94d3dd8a8c0ad89583648b31e1a7f72a
- sha256: ea57e333df9e3067b084a1d1b5c0abb65bc8dbdca22e1ee0300e58167032cb50
- build: py311h93c810c_0
+ md5: 2b34ce65028c82008ad41281b427e06d
+ sha256: e8258dcb0fcd7fee3c346adcee32d015d751a717adae9db6fb8b007b1793da21
+ build: py311hc43a94b_0
arch: x86_64
subdir: osx-64
build_number: 0
@@ -22752,29 +22720,28 @@ package:
- numpy-base <0a0
license: BSD-3-Clause
license_family: BSD
- size: 7675285
- timestamp: 1700875144507
+ size: 7551883
+ timestamp: 1704281405307
purls:
- pkg:pypi/numpy
- platform: osx-arm64
name: numpy
- version: 1.26.2
+ version: 1.26.3
category: main
manager: conda
dependencies:
- - __osx >=10.9
- libblas >=3.9.0,<4.0a0
- libcblas >=3.9.0,<4.0a0
- - libcxx >=16.0.6
+ - libcxx >=15
- liblapack >=3.9.0,<4.0a0
- python >=3.11,<3.12.0a0
- python >=3.11,<3.12.0a0 *_cpython
- python_abi 3.11.* *_cp311
- url: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-1.26.2-py311h6d074dd_0.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-1.26.3-py311h7125741_0.conda
hash:
- md5: 686a22bdbc601bfe7e7243f0263c10e3
- sha256: a88e48461bea106a0ce41684625f79efe473d42cf0f0310535dcfe1d98c888ba
- build: py311h6d074dd_0
+ md5: 13f78850d667ea2761b6d77294a9793b
+ sha256: 3b219a60de76c9c6d451353ad3f284aaa58c678d490d81dd6ecbe374844f4faf
+ build: py311h7125741_0
arch: aarch64
subdir: osx-arm64
build_number: 0
@@ -22782,13 +22749,13 @@ package:
- numpy-base <0a0
license: BSD-3-Clause
license_family: BSD
- size: 6710261
- timestamp: 1700875222261
+ size: 6753221
+ timestamp: 1704281444008
purls:
- pkg:pypi/numpy
- platform: win-64
name: numpy
- version: 1.26.2
+ version: 1.26.3
category: main
manager: conda
dependencies:
@@ -22800,10 +22767,10 @@ package:
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/numpy-1.26.2-py311h0b4df5a_0.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/numpy-1.26.3-py311h0b4df5a_0.conda
hash:
- md5: 4bdfc84f67dafd5867702b1003dc8146
- sha256: a1ee243779b274afd7dfe3bba6dfc740716411990bb91e6ae126a5f481dfd92a
+ md5: 9f9218268a56328c2d0425d7f81cb1ad
+ sha256: 6553390519a01b2ba94a6cf508413f0f3a81473e6047a0cfe597c03420c3ff08
build: py311h0b4df5a_0
arch: x86_64
subdir: win-64
@@ -22812,8 +22779,8 @@ package:
- numpy-base <0a0
license: BSD-3-Clause
license_family: BSD
- size: 7081908
- timestamp: 1700875402881
+ size: 7092012
+ timestamp: 1704281575150
purls:
- pkg:pypi/numpy
- platform: linux-64
@@ -23632,7 +23599,7 @@ package:
- pkg:pypi/pandamesh
- platform: linux-64
name: pandas
- version: 1.5.3
+ version: 2.1.4
category: main
manager: conda
dependencies:
@@ -23641,95 +23608,101 @@ package:
- numpy >=1.23.5,<2.0a0
- python >=3.11,<3.12.0a0
- python-dateutil >=2.8.1
+ - python-tzdata >=2022a
- python_abi 3.11.* *_cp311
- pytz >=2020.1
- url: https://conda.anaconda.org/conda-forge/linux-64/pandas-1.5.3-py311h2872171_1.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/pandas-2.1.4-py311h320fe9a_0.conda
hash:
- md5: 6bb03bf6d4fab68174eae8b06c3b6934
- sha256: b53eda154e13fd49c494eb7ba95b22b2b7c72cbeab4ed3a2213144d75558bc9f
- build: py311h2872171_1
+ md5: e44ccb61b6621bf3f8053ae66eba7397
+ sha256: 4f40035c77c381e0151c0fb0b39d0dfc343947f7d283c2bced2123273c5d4884
+ build: py311h320fe9a_0
arch: x86_64
subdir: linux-64
- build_number: 1
+ build_number: 0
license: BSD-3-Clause
license_family: BSD
- size: 14424060
- timestamp: 1680108166917
+ size: 14990951
+ timestamp: 1702057771639
- platform: osx-64
name: pandas
- version: 1.5.3
+ version: 2.1.4
category: main
manager: conda
dependencies:
- - libcxx >=14.0.6
+ - __osx >=10.9
+ - libcxx >=16.0.6
- numpy >=1.23.5,<2.0a0
- python >=3.11,<3.12.0a0
- python-dateutil >=2.8.1
+ - python-tzdata >=2022a
- python_abi 3.11.* *_cp311
- pytz >=2020.1
- url: https://conda.anaconda.org/conda-forge/osx-64/pandas-1.5.3-py311hd84f3f5_1.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/pandas-2.1.4-py311h1eadf79_0.conda
hash:
- md5: 91c1a8abfd1e7d130941422807942991
- sha256: 288dba9a2cc0392ce435c2e63cdad5de65c7e6b4767ddf9d27ea4c32e80e8cf0
- build: py311hd84f3f5_1
+ md5: 905aff7f27a734e251526b9a7ecc20ac
+ sha256: 994d226d29293c1b118c0902af50719c5fc25c63c2c6cf7be7808b44226e19eb
+ build: py311h1eadf79_0
arch: x86_64
subdir: osx-64
- build_number: 1
+ build_number: 0
license: BSD-3-Clause
license_family: BSD
- size: 13906883
- timestamp: 1680108905350
+ size: 14378188
+ timestamp: 1702058207156
- platform: osx-arm64
name: pandas
- version: 1.5.3
+ version: 2.1.4
category: main
manager: conda
dependencies:
- - libcxx >=14.0.6
+ - __osx >=10.9
+ - libcxx >=16.0.6
- numpy >=1.23.5,<2.0a0
- python >=3.11,<3.12.0a0
- python >=3.11,<3.12.0a0 *_cpython
- python-dateutil >=2.8.1
+ - python-tzdata >=2022a
- python_abi 3.11.* *_cp311
- pytz >=2020.1
- url: https://conda.anaconda.org/conda-forge/osx-arm64/pandas-1.5.3-py311h4eec4a9_1.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/pandas-2.1.4-py311h6e08293_0.conda
hash:
- md5: b70851b2bd189e56c4100194312ecb99
- sha256: 21f25fdb1cb947097eb83cd730efcf7d6e28ddeaeaa7245f7198da719f1d2fe2
- build: py311h4eec4a9_1
+ md5: 5130445e7fe8465365dab3ed3694c770
+ sha256: fa6c79cf5252c22ef8141a405ea9650ea9df2d6436c003b279086e146eaef1e5
+ build: py311h6e08293_0
arch: aarch64
subdir: osx-arm64
- build_number: 1
+ build_number: 0
license: BSD-3-Clause
license_family: BSD
- size: 13789860
- timestamp: 1680108642752
+ size: 14300774
+ timestamp: 1702058260980
- platform: win-64
name: pandas
- version: 1.5.3
+ version: 2.1.4
category: main
manager: conda
dependencies:
- numpy >=1.23.5,<2.0a0
- python >=3.11,<3.12.0a0
- python-dateutil >=2.8.1
+ - python-tzdata >=2022a
- python_abi 3.11.* *_cp311
- pytz >=2020.1
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- - vs2015_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/pandas-1.5.3-py311hf63dbb6_1.conda
+ - vc14_runtime >=14.29.30139
+ url: https://conda.anaconda.org/conda-forge/win-64/pandas-2.1.4-py311hf63dbb6_0.conda
hash:
- md5: f9af1ae5b501548160bbcae6c324ba9c
- sha256: 3cb430806e8818a93ca650d29ef3b74c21b4368a5dccbb2e74a292e3d845784a
- build: py311hf63dbb6_1
+ md5: 5179a87c529fa08864d899f05e16345b
+ sha256: be6d27e9c7e83c4dbc40003f2d62b79a5af0a3febb3a69a496d867763d983958
+ build: py311hf63dbb6_0
arch: x86_64
subdir: win-64
- build_number: 1
+ build_number: 0
license: BSD-3-Clause
license_family: BSD
- size: 13224533
- timestamp: 1680109726099
+ size: 13785578
+ timestamp: 1702058272745
- platform: linux-64
name: pango
version: 1.50.14
@@ -24101,12 +24074,12 @@ package:
timestamp: 1698611415241
- platform: linux-64
name: pillow
- version: 10.1.0
+ version: 10.2.0
category: main
manager: conda
dependencies:
- freetype >=2.12.1,<3.0a0
- - lcms2 >=2.15,<3.0a0
+ - lcms2 >=2.16,<3.0a0
- libgcc-ng >=12
- libjpeg-turbo >=3.0.0,<4.0a0
- libtiff >=4.6.0,<4.7.0a0
@@ -24117,25 +24090,25 @@ package:
- python >=3.11,<3.12.0a0
- python_abi 3.11.* *_cp311
- tk >=8.6.13,<8.7.0a0
- url: https://conda.anaconda.org/conda-forge/linux-64/pillow-10.1.0-py311ha6c5da5_0.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/pillow-10.2.0-py311ha6c5da5_0.conda
hash:
- md5: 83a988daf5c49e57f7d2086fb6781fe8
- sha256: 5b037243f76644fe2e565aa6a3764039dba47cddf8bbef8ef01643775a459b60
+ md5: a5ccd7f2271f28b7d2de0b02b64e3796
+ sha256: 3cd4827d822c9888b672bfac9017e905348ac5bd2237a98b30a734ed6573b248
build: py311ha6c5da5_0
arch: x86_64
subdir: linux-64
build_number: 0
license: HPND
- size: 46400469
- timestamp: 1697423839735
+ size: 41629216
+ timestamp: 1704252244851
- platform: osx-64
name: pillow
- version: 10.1.0
+ version: 10.2.0
category: main
manager: conda
dependencies:
- freetype >=2.12.1,<3.0a0
- - lcms2 >=2.15,<3.0a0
+ - lcms2 >=2.16,<3.0a0
- libjpeg-turbo >=3.0.0,<4.0a0
- libtiff >=4.6.0,<4.7.0a0
- libwebp-base >=1.3.2,<2.0a0
@@ -24145,25 +24118,25 @@ package:
- python >=3.11,<3.12.0a0
- python_abi 3.11.* *_cp311
- tk >=8.6.13,<8.7.0a0
- url: https://conda.anaconda.org/conda-forge/osx-64/pillow-10.1.0-py311hea5c87a_0.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/pillow-10.2.0-py311hea5c87a_0.conda
hash:
- md5: ffff517d90b21a5d44ef907a5a01f695
- sha256: 44bb951ae60cc96ab9273592ede9ee94a422857e857e52c55c261ad5f1525686
+ md5: 1709b31ce50343c7a7b3940ed30cc429
+ sha256: c3f3d2276943d5bf27d184df76dcef15ad120d23f9eea92e05340093acee98fc
build: py311hea5c87a_0
arch: x86_64
subdir: osx-64
build_number: 0
license: HPND
- size: 46474850
- timestamp: 1697423945362
+ size: 42176355
+ timestamp: 1704252505386
- platform: osx-arm64
name: pillow
- version: 10.1.0
+ version: 10.2.0
category: main
manager: conda
dependencies:
- freetype >=2.12.1,<3.0a0
- - lcms2 >=2.15,<3.0a0
+ - lcms2 >=2.16,<3.0a0
- libjpeg-turbo >=3.0.0,<4.0a0
- libtiff >=4.6.0,<4.7.0a0
- libwebp-base >=1.3.2,<2.0a0
@@ -24174,25 +24147,25 @@ package:
- python >=3.11,<3.12.0a0 *_cpython
- python_abi 3.11.* *_cp311
- tk >=8.6.13,<8.7.0a0
- url: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-10.1.0-py311hb9c5795_0.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-10.2.0-py311hb9c5795_0.conda
hash:
- md5: 90fd1f60da9f3d5eccdece0945043037
- sha256: 9699ba6886e94e32eb949009195ed78c2c949f74450235af491cd4cbe390d7b4
+ md5: 97c499f0ac4792fb1e33295c9adfb351
+ sha256: c09ed761df062c62e83b78c66a1987a6a727fa45dd5fadde3b436ad5566c216e
build: py311hb9c5795_0
arch: aarch64
subdir: osx-arm64
build_number: 0
license: HPND
- size: 45850858
- timestamp: 1697424033737
+ size: 41593553
+ timestamp: 1704252636313
- platform: win-64
name: pillow
- version: 10.1.0
+ version: 10.2.0
category: main
manager: conda
dependencies:
- freetype >=2.12.1,<3.0a0
- - lcms2 >=2.15,<3.0a0
+ - lcms2 >=2.16,<3.0a0
- libjpeg-turbo >=3.0.0,<4.0a0
- libtiff >=4.6.0,<4.7.0a0
- libwebp-base >=1.3.2,<2.0a0
@@ -24205,17 +24178,17 @@ package:
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/pillow-10.1.0-py311h4dd8a23_0.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/pillow-10.2.0-py311h4dd8a23_0.conda
hash:
- md5: c03c410ca23313c36381a128737ac25f
- sha256: 494426ecfcbf5f177b5350dfdb5ca93a529a1873247c38beda25a70ad369fe36
+ md5: 8e4d6eed54fea0725d77c0a333e9fa51
+ sha256: 609d0106c35798345eb155605d21d8dfee0608d68bd51a97f7842d7c73ec10f8
build: py311h4dd8a23_0
arch: x86_64
subdir: win-64
build_number: 0
license: HPND
- size: 46022436
- timestamp: 1697424195333
+ size: 42099257
+ timestamp: 1704252849476
- platform: linux-64
name: pip
version: 23.3.2
@@ -25680,132 +25653,128 @@ package:
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 hfb4d3a9_0_cpu
- - libarrow-acero 14.0.2 h59595ed_0_cpu
- - libarrow-dataset 14.0.2 h59595ed_0_cpu
- - libarrow-flight 14.0.2 h120cb0d_0_cpu
- - libarrow-flight-sql 14.0.2 h61ff412_0_cpu
- - libarrow-gandiva 14.0.2 hacb8726_0_cpu
- - libarrow-substrait 14.0.2 h61ff412_0_cpu
+ - libarrow 14.0.2 h84dd17c_2_cpu
+ - libarrow-acero 14.0.2 h59595ed_2_cpu
+ - libarrow-dataset 14.0.2 h59595ed_2_cpu
+ - libarrow-flight 14.0.2 h120cb0d_2_cpu
+ - libarrow-flight-sql 14.0.2 h61ff412_2_cpu
+ - libarrow-gandiva 14.0.2 hacb8726_2_cpu
+ - libarrow-substrait 14.0.2 h61ff412_2_cpu
- libgcc-ng >=12
- - libparquet 14.0.2 h352af49_0_cpu
+ - libparquet 14.0.2 h352af49_2_cpu
- libstdcxx-ng >=12
- numpy >=1.23.5,<2.0a0
- python >=3.11,<3.12.0a0
- python_abi 3.11.* *_cp311
- url: https://conda.anaconda.org/conda-forge/linux-64/pyarrow-14.0.2-py311h39c9aba_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/pyarrow-14.0.2-py311h39c9aba_2_cpu.conda
hash:
- md5: 27885a87fe1250c1cfea5dd92f22fcfd
- sha256: d9c524522982acce87f40a22f7ca352481804c1f79fa0cb58a3e8bd4f97a2842
- build: py311h39c9aba_0_cpu
+ md5: 07fb7193fa96aab8e61c4483b9e61e51
+ sha256: 9117ea0cf236f9bb43cc05ec2d0744c9f789132dbc255d82f1fd2b6fd9e0b3ba
+ build: py311h39c9aba_2_cpu
arch: x86_64
subdir: linux-64
- build_number: 0
+ build_number: 2
constrains:
- apache-arrow-proc =*=cpu
license: Apache-2.0
- license_family: APACHE
- size: 4519798
- timestamp: 1703074371259
+ size: 4515359
+ timestamp: 1704356601353
- platform: osx-64
name: pyarrow
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 h2ef8067_0_cpu
- - libarrow-acero 14.0.2 h000cb23_0_cpu
- - libarrow-dataset 14.0.2 h000cb23_0_cpu
- - libarrow-flight 14.0.2 ha1803ca_0_cpu
- - libarrow-flight-sql 14.0.2 h8ec153b_0_cpu
- - libarrow-gandiva 14.0.2 h01dce7f_0_cpu
- - libarrow-substrait 14.0.2 h8ec153b_0_cpu
+ - libarrow 14.0.2 h1aaacd4_2_cpu
+ - libarrow-acero 14.0.2 h000cb23_2_cpu
+ - libarrow-dataset 14.0.2 h000cb23_2_cpu
+ - libarrow-flight 14.0.2 ha1803ca_2_cpu
+ - libarrow-flight-sql 14.0.2 h8ec153b_2_cpu
+ - libarrow-gandiva 14.0.2 h01dce7f_2_cpu
+ - libarrow-substrait 14.0.2 h8ec153b_2_cpu
- libcxx >=14
- - libparquet 14.0.2 h381d950_0_cpu
+ - libparquet 14.0.2 h381d950_2_cpu
- numpy >=1.23.5,<2.0a0
- python >=3.11,<3.12.0a0
- python_abi 3.11.* *_cp311
- url: https://conda.anaconda.org/conda-forge/osx-64/pyarrow-14.0.2-py311h54e7ce8_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/pyarrow-14.0.2-py311h54e7ce8_2_cpu.conda
hash:
- md5: 023f38e1d72e043d22fca20ea0123871
- sha256: 3882df6eb9fb74c3b9b850c6907f929534c40581d212bf2e7fb49e2fe71bdc72
- build: py311h54e7ce8_0_cpu
+ md5: a4c277c33b47c06b3c3fd2939fdb9c9a
+ sha256: fb631eb88510f59c2a5dc21764ce554e961e7970ea952e17bfb5742b54b322ba
+ build: py311h54e7ce8_2_cpu
arch: x86_64
subdir: osx-64
- build_number: 0
+ build_number: 2
constrains:
- apache-arrow-proc =*=cpu
license: Apache-2.0
- license_family: APACHE
- size: 4007213
- timestamp: 1703073037985
+ size: 4029309
+ timestamp: 1704362351224
- platform: osx-arm64
name: pyarrow
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 hfcbd24e_0_cpu
- - libarrow-acero 14.0.2 h13dd4ca_0_cpu
- - libarrow-dataset 14.0.2 h13dd4ca_0_cpu
- - libarrow-flight 14.0.2 ha94d253_0_cpu
- - libarrow-flight-sql 14.0.2 h39a9b85_0_cpu
- - libarrow-gandiva 14.0.2 hf757142_0_cpu
- - libarrow-substrait 14.0.2 h7fd9903_0_cpu
+ - libarrow 14.0.2 h4ce3932_2_cpu
+ - libarrow-acero 14.0.2 h13dd4ca_2_cpu
+ - libarrow-dataset 14.0.2 h13dd4ca_2_cpu
+ - libarrow-flight 14.0.2 ha94d253_2_cpu
+ - libarrow-flight-sql 14.0.2 h39a9b85_2_cpu
+ - libarrow-gandiva 14.0.2 hf757142_2_cpu
+ - libarrow-substrait 14.0.2 h7fd9903_2_cpu
- libcxx >=14
- - libparquet 14.0.2 hf6ce1d5_0_cpu
+ - libparquet 14.0.2 hf6ce1d5_2_cpu
- numpy >=1.23.5,<2.0a0
- python >=3.11,<3.12.0a0
- python >=3.11,<3.12.0a0 *_cpython
- python_abi 3.11.* *_cp311
- url: https://conda.anaconda.org/conda-forge/osx-arm64/pyarrow-14.0.2-py311hd7bc329_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/pyarrow-14.0.2-py311hd7bc329_2_cpu.conda
hash:
- md5: c3721cd231033c181a914d87f2f01f70
- sha256: a9cfb9a064b3da9eac0fd37a4c904159dee848cc18d975480ef3f1b4b158a578
- build: py311hd7bc329_0_cpu
+ md5: 6a599e9f9b2ab76b1006cf60fe1730a1
+ sha256: 382cae1613f740afac2bedf7b4b3d7d19775ab7c31b419553351806d0ac9bc29
+ build: py311hd7bc329_2_cpu
arch: aarch64
subdir: osx-arm64
- build_number: 0
+ build_number: 2
constrains:
- apache-arrow-proc =*=cpu
license: Apache-2.0
- license_family: APACHE
- size: 4093748
- timestamp: 1703074691041
+ size: 4090895
+ timestamp: 1704361615592
- platform: win-64
name: pyarrow
version: 14.0.2
category: main
manager: conda
dependencies:
- - libarrow 14.0.2 h1048771_0_cpu
- - libarrow-acero 14.0.2 h63175ca_0_cpu
- - libarrow-dataset 14.0.2 h63175ca_0_cpu
- - libarrow-flight 14.0.2 h53b1db0_0_cpu
- - libarrow-flight-sql 14.0.2 h78eab7c_0_cpu
- - libarrow-gandiva 14.0.2 hb2eaab1_0_cpu
- - libarrow-substrait 14.0.2 hd4c9904_0_cpu
- - libparquet 14.0.2 h7ec3a38_0_cpu
+ - libarrow 14.0.2 he5f67d5_2_cpu
+ - libarrow-acero 14.0.2 h63175ca_2_cpu
+ - libarrow-dataset 14.0.2 h63175ca_2_cpu
+ - libarrow-flight 14.0.2 h53b1db0_2_cpu
+ - libarrow-flight-sql 14.0.2 h78eab7c_2_cpu
+ - libarrow-gandiva 14.0.2 hb2eaab1_2_cpu
+ - libarrow-substrait 14.0.2 hd4c9904_2_cpu
+ - libparquet 14.0.2 h7ec3a38_2_cpu
- numpy >=1.23.5,<2.0a0
- python >=3.11,<3.12.0a0
- python_abi 3.11.* *_cp311
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/pyarrow-14.0.2-py311h6a6099b_0_cpu.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/pyarrow-14.0.2-py311h6a6099b_2_cpu.conda
hash:
- md5: 2d2e32ba7d7533349566c4a10e4ba803
- sha256: 07f28f5b054093c159a8dba2534bc8bc867355b1b2678c6782efb903f73f74ff
- build: py311h6a6099b_0_cpu
+ md5: 3e9caae94be752658cbe9449440681ee
+ sha256: cd32ac9c0b740933f5c47eb3b9e731b7ff171cc648fe72434de3c95585d7af5e
+ build: py311h6a6099b_2_cpu
arch: x86_64
subdir: win-64
- build_number: 0
+ build_number: 2
constrains:
- apache-arrow-proc =*=cpu
license: Apache-2.0
- license_family: APACHE
- size: 3483222
- timestamp: 1703074974424
+ size: 3486905
+ timestamp: 1704357865002
- platform: linux-64
name: pyarrow-hotfix
version: '0.6'
@@ -26678,7 +26647,7 @@ package:
- pkg:pypi/pysocks
- platform: linux-64
name: pytest
- version: 7.4.3
+ version: 7.4.4
category: main
manager: conda
dependencies:
@@ -26689,10 +26658,10 @@ package:
- pluggy >=0.12,<2.0
- python >=3.7
- tomli >=1.0.0
- url: https://conda.anaconda.org/conda-forge/noarch/pytest-7.4.3-pyhd8ed1ab_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/pytest-7.4.4-pyhd8ed1ab_0.conda
hash:
- md5: 5bdca0aca30b0ee62bb84854e027eae0
- sha256: 14e948e620ec87d9e62a8d9c21d40084b4805a939cfee322be7d457379dc96a0
+ md5: a9d145de8c5f064b5fa68fb34725d9f4
+ sha256: 8979721b7f86b183d21103f3ec2734783847d317c1b754f462f407efc7c60886
build: pyhd8ed1ab_0
arch: x86_64
subdir: linux-64
@@ -26702,13 +26671,13 @@ package:
license: MIT
license_family: MIT
noarch: python
- size: 244758
- timestamp: 1698233883003
+ size: 244564
+ timestamp: 1704035308916
purls:
- pkg:pypi/pytest
- platform: osx-64
name: pytest
- version: 7.4.3
+ version: 7.4.4
category: main
manager: conda
dependencies:
@@ -26719,10 +26688,10 @@ package:
- pluggy >=0.12,<2.0
- python >=3.7
- tomli >=1.0.0
- url: https://conda.anaconda.org/conda-forge/noarch/pytest-7.4.3-pyhd8ed1ab_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/pytest-7.4.4-pyhd8ed1ab_0.conda
hash:
- md5: 5bdca0aca30b0ee62bb84854e027eae0
- sha256: 14e948e620ec87d9e62a8d9c21d40084b4805a939cfee322be7d457379dc96a0
+ md5: a9d145de8c5f064b5fa68fb34725d9f4
+ sha256: 8979721b7f86b183d21103f3ec2734783847d317c1b754f462f407efc7c60886
build: pyhd8ed1ab_0
arch: x86_64
subdir: osx-64
@@ -26732,13 +26701,13 @@ package:
license: MIT
license_family: MIT
noarch: python
- size: 244758
- timestamp: 1698233883003
+ size: 244564
+ timestamp: 1704035308916
purls:
- pkg:pypi/pytest
- platform: osx-arm64
name: pytest
- version: 7.4.3
+ version: 7.4.4
category: main
manager: conda
dependencies:
@@ -26749,10 +26718,10 @@ package:
- pluggy >=0.12,<2.0
- python >=3.7
- tomli >=1.0.0
- url: https://conda.anaconda.org/conda-forge/noarch/pytest-7.4.3-pyhd8ed1ab_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/pytest-7.4.4-pyhd8ed1ab_0.conda
hash:
- md5: 5bdca0aca30b0ee62bb84854e027eae0
- sha256: 14e948e620ec87d9e62a8d9c21d40084b4805a939cfee322be7d457379dc96a0
+ md5: a9d145de8c5f064b5fa68fb34725d9f4
+ sha256: 8979721b7f86b183d21103f3ec2734783847d317c1b754f462f407efc7c60886
build: pyhd8ed1ab_0
arch: aarch64
subdir: osx-arm64
@@ -26762,13 +26731,13 @@ package:
license: MIT
license_family: MIT
noarch: python
- size: 244758
- timestamp: 1698233883003
+ size: 244564
+ timestamp: 1704035308916
purls:
- pkg:pypi/pytest
- platform: win-64
name: pytest
- version: 7.4.3
+ version: 7.4.4
category: main
manager: conda
dependencies:
@@ -26779,10 +26748,10 @@ package:
- pluggy >=0.12,<2.0
- python >=3.7
- tomli >=1.0.0
- url: https://conda.anaconda.org/conda-forge/noarch/pytest-7.4.3-pyhd8ed1ab_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/pytest-7.4.4-pyhd8ed1ab_0.conda
hash:
- md5: 5bdca0aca30b0ee62bb84854e027eae0
- sha256: 14e948e620ec87d9e62a8d9c21d40084b4805a939cfee322be7d457379dc96a0
+ md5: a9d145de8c5f064b5fa68fb34725d9f4
+ sha256: 8979721b7f86b183d21103f3ec2734783847d317c1b754f462f407efc7c60886
build: pyhd8ed1ab_0
arch: x86_64
subdir: win-64
@@ -26792,8 +26761,8 @@ package:
license: MIT
license_family: MIT
noarch: python
- size: 244758
- timestamp: 1698233883003
+ size: 244564
+ timestamp: 1704035308916
purls:
- pkg:pypi/pytest
- platform: linux-64
@@ -27476,6 +27445,94 @@ package:
timestamp: 1658658750417
purls:
- pkg:pypi/graphviz
+- platform: linux-64
+ name: python-tzdata
+ version: '2023.4'
+ category: main
+ manager: conda
+ dependencies:
+ - python >=3.6
+ url: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2023.4-pyhd8ed1ab_0.conda
+ hash:
+ md5: c79cacf8a06a51552fc651652f170208
+ sha256: d2381037bf362c78654a8ece0e0f54715e09113448ddd7ed837f688536cbf176
+ build: pyhd8ed1ab_0
+ arch: x86_64
+ subdir: linux-64
+ build_number: 0
+ license: Apache-2.0
+ license_family: APACHE
+ noarch: python
+ size: 146007
+ timestamp: 1703878849208
+ purls:
+ - pkg:pypi/tzdata
+- platform: osx-64
+ name: python-tzdata
+ version: '2023.4'
+ category: main
+ manager: conda
+ dependencies:
+ - python >=3.6
+ url: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2023.4-pyhd8ed1ab_0.conda
+ hash:
+ md5: c79cacf8a06a51552fc651652f170208
+ sha256: d2381037bf362c78654a8ece0e0f54715e09113448ddd7ed837f688536cbf176
+ build: pyhd8ed1ab_0
+ arch: x86_64
+ subdir: osx-64
+ build_number: 0
+ license: Apache-2.0
+ license_family: APACHE
+ noarch: python
+ size: 146007
+ timestamp: 1703878849208
+ purls:
+ - pkg:pypi/tzdata
+- platform: osx-arm64
+ name: python-tzdata
+ version: '2023.4'
+ category: main
+ manager: conda
+ dependencies:
+ - python >=3.6
+ url: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2023.4-pyhd8ed1ab_0.conda
+ hash:
+ md5: c79cacf8a06a51552fc651652f170208
+ sha256: d2381037bf362c78654a8ece0e0f54715e09113448ddd7ed837f688536cbf176
+ build: pyhd8ed1ab_0
+ arch: aarch64
+ subdir: osx-arm64
+ build_number: 0
+ license: Apache-2.0
+ license_family: APACHE
+ noarch: python
+ size: 146007
+ timestamp: 1703878849208
+ purls:
+ - pkg:pypi/tzdata
+- platform: win-64
+ name: python-tzdata
+ version: '2023.4'
+ category: main
+ manager: conda
+ dependencies:
+ - python >=3.6
+ url: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2023.4-pyhd8ed1ab_0.conda
+ hash:
+ md5: c79cacf8a06a51552fc651652f170208
+ sha256: d2381037bf362c78654a8ece0e0f54715e09113448ddd7ed837f688536cbf176
+ build: pyhd8ed1ab_0
+ arch: x86_64
+ subdir: win-64
+ build_number: 0
+ license: Apache-2.0
+ license_family: APACHE
+ noarch: python
+ size: 146007
+ timestamp: 1703878849208
+ purls:
+ - pkg:pypi/tzdata
- platform: linux-64
name: python_abi
version: '3.11'
@@ -29102,7 +29159,7 @@ package:
- pkg:pypi/rtree
- platform: linux-64
name: ruff
- version: 0.1.9
+ version: 0.1.11
category: main
manager: conda
dependencies:
@@ -29110,42 +29167,42 @@ package:
- libstdcxx-ng >=12
- python >=3.11,<3.12.0a0
- python_abi 3.11.* *_cp311
- url: https://conda.anaconda.org/conda-forge/linux-64/ruff-0.1.9-py311h7145743_0.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/ruff-0.1.11-py311h7145743_0.conda
hash:
- md5: 6573b3a7833e2586936b05d99f4a1a75
- sha256: ace238ba12e4eddf866ad006e0af32410c378627e9b01b820a05c172c77460b9
+ md5: 7d148942936a7a586fcd5528ba477dbf
+ sha256: cecdc23d6b179897fe155914cc461cf421c6a26a607fda0ab6e0c6cfadbcde08
build: py311h7145743_0
arch: x86_64
subdir: linux-64
build_number: 0
license: MIT
license_family: MIT
- size: 5459717
- timestamp: 1703209411721
+ size: 5483251
+ timestamp: 1704293303507
- platform: osx-64
name: ruff
- version: 0.1.9
+ version: 0.1.11
category: main
manager: conda
dependencies:
- libcxx >=15
- python >=3.11,<3.12.0a0
- python_abi 3.11.* *_cp311
- url: https://conda.anaconda.org/conda-forge/osx-64/ruff-0.1.9-py311ha071555_0.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/ruff-0.1.11-py311ha071555_0.conda
hash:
- md5: 237091298be0de6ae323ae7dafd74c5d
- sha256: 37b07e24a7a9265783f175cfd0e78af3676b06dc06c072d98c78839d94bd9da7
+ md5: 108f666d4b0d67c4dfbd28769927097f
+ sha256: 1a50dd68d504ca89696ec5738b1448e5b391f470732d8a5265db8e1d747c39ce
build: py311ha071555_0
arch: x86_64
subdir: osx-64
build_number: 0
license: MIT
license_family: MIT
- size: 5279079
- timestamp: 1703216580602
+ size: 5296599
+ timestamp: 1704298924933
- platform: osx-arm64
name: ruff
- version: 0.1.9
+ version: 0.1.11
category: main
manager: conda
dependencies:
@@ -29153,21 +29210,21 @@ package:
- python >=3.11,<3.12.0a0
- python >=3.11,<3.12.0a0 *_cpython
- python_abi 3.11.* *_cp311
- url: https://conda.anaconda.org/conda-forge/osx-arm64/ruff-0.1.9-py311h8c97afb_0.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/ruff-0.1.11-py311h8c97afb_0.conda
hash:
- md5: f0f8d9a74f922859b289b5648ec1f325
- sha256: d18fc5cb710547ba5a30679762a40c19c2549aa6ad3369afc465df6450389294
+ md5: 31b74cd1042a05ece0949e2a584a4a57
+ sha256: c8b1ac1907df121200c18cde7f145f46d4177846de6e68f4a6ef3eec35f4a4c9
build: py311h8c97afb_0
arch: aarch64
subdir: osx-arm64
build_number: 0
license: MIT
license_family: MIT
- size: 5078688
- timestamp: 1703216392615
+ size: 5093204
+ timestamp: 1704300759924
- platform: win-64
name: ruff
- version: 0.1.9
+ version: 0.1.11
category: main
manager: conda
dependencies:
@@ -29176,38 +29233,38 @@ package:
- ucrt >=10.0.20348.0
- vc >=14.2,<15
- vc14_runtime >=14.29.30139
- url: https://conda.anaconda.org/conda-forge/win-64/ruff-0.1.9-py311hc14472d_0.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/ruff-0.1.11-py311hc14472d_0.conda
hash:
- md5: 3082c6ff2771eaca859404e3ba147520
- sha256: cd654198d5b40ae6bc136bb09bb764f1fd84bb239b2ce3c0fefb79e2eb172fda
+ md5: eee011b1274627300c8e89e8ca24a67e
+ sha256: ca19452220d8af043f31b2f72ffd7a5d67665af05ebb4c3e3721866b0d5a25c1
build: py311hc14472d_0
arch: x86_64
subdir: win-64
build_number: 0
license: MIT
license_family: MIT
- size: 5319266
- timestamp: 1703216601404
+ size: 5337963
+ timestamp: 1704304941343
- platform: linux-64
name: s2n
- version: 1.4.0
+ version: 1.4.1
category: main
manager: conda
dependencies:
- libgcc-ng >=12
- openssl >=3.2.0,<4.0a0
- url: https://conda.anaconda.org/conda-forge/linux-64/s2n-1.4.0-h06160fa_0.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/s2n-1.4.1-h06160fa_0.conda
hash:
- md5: 3d1b58d2664d96f9fbc0afe5e1d04632
- sha256: f6cc2bdcb5d809bbaae218e03bdefef4a309d1fc7ccc9444fda59bd4553a83f8
+ md5: 54ae57d17d038b6a7aa7fdb55350d338
+ sha256: 6f21a270e5fcf824d71b637ea26e389e469b3dc44a7e51062c27556c6e771b37
build: h06160fa_0
arch: x86_64
subdir: linux-64
build_number: 0
license: Apache-2.0
license_family: Apache
- size: 329669
- timestamp: 1701891861649
+ size: 331403
+ timestamp: 1703228891919
- platform: linux-64
name: scikit-learn
version: 1.3.2
@@ -29552,15 +29609,15 @@ package:
- pkg:pypi/secretstorage
- platform: linux-64
name: setuptools
- version: 68.2.2
+ version: 69.0.3
category: main
manager: conda
dependencies:
- python >=3.7
- url: https://conda.anaconda.org/conda-forge/noarch/setuptools-68.2.2-pyhd8ed1ab_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/setuptools-69.0.3-pyhd8ed1ab_0.conda
hash:
- md5: fc2166155db840c634a1291a5c35a709
- sha256: 851901b1f8f2049edb36a675f0c3f9a98e1495ef4eb214761b048c6f696a06f7
+ md5: 40695fdfd15a92121ed2922900d0308b
+ sha256: 0fe2a0473ad03dac6c7f5c42ef36a8e90673c88a0350dfefdea4b08d43803db2
build: pyhd8ed1ab_0
arch: x86_64
subdir: linux-64
@@ -29568,21 +29625,21 @@ package:
license: MIT
license_family: MIT
noarch: python
- size: 464399
- timestamp: 1694548452441
+ size: 470548
+ timestamp: 1704224855187
purls:
- pkg:pypi/setuptools
- platform: osx-64
name: setuptools
- version: 68.2.2
+ version: 69.0.3
category: main
manager: conda
dependencies:
- python >=3.7
- url: https://conda.anaconda.org/conda-forge/noarch/setuptools-68.2.2-pyhd8ed1ab_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/setuptools-69.0.3-pyhd8ed1ab_0.conda
hash:
- md5: fc2166155db840c634a1291a5c35a709
- sha256: 851901b1f8f2049edb36a675f0c3f9a98e1495ef4eb214761b048c6f696a06f7
+ md5: 40695fdfd15a92121ed2922900d0308b
+ sha256: 0fe2a0473ad03dac6c7f5c42ef36a8e90673c88a0350dfefdea4b08d43803db2
build: pyhd8ed1ab_0
arch: x86_64
subdir: osx-64
@@ -29590,21 +29647,21 @@ package:
license: MIT
license_family: MIT
noarch: python
- size: 464399
- timestamp: 1694548452441
+ size: 470548
+ timestamp: 1704224855187
purls:
- pkg:pypi/setuptools
- platform: osx-arm64
name: setuptools
- version: 68.2.2
+ version: 69.0.3
category: main
manager: conda
dependencies:
- python >=3.7
- url: https://conda.anaconda.org/conda-forge/noarch/setuptools-68.2.2-pyhd8ed1ab_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/setuptools-69.0.3-pyhd8ed1ab_0.conda
hash:
- md5: fc2166155db840c634a1291a5c35a709
- sha256: 851901b1f8f2049edb36a675f0c3f9a98e1495ef4eb214761b048c6f696a06f7
+ md5: 40695fdfd15a92121ed2922900d0308b
+ sha256: 0fe2a0473ad03dac6c7f5c42ef36a8e90673c88a0350dfefdea4b08d43803db2
build: pyhd8ed1ab_0
arch: aarch64
subdir: osx-arm64
@@ -29612,21 +29669,21 @@ package:
license: MIT
license_family: MIT
noarch: python
- size: 464399
- timestamp: 1694548452441
+ size: 470548
+ timestamp: 1704224855187
purls:
- pkg:pypi/setuptools
- platform: win-64
name: setuptools
- version: 68.2.2
+ version: 69.0.3
category: main
manager: conda
dependencies:
- python >=3.7
- url: https://conda.anaconda.org/conda-forge/noarch/setuptools-68.2.2-pyhd8ed1ab_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/setuptools-69.0.3-pyhd8ed1ab_0.conda
hash:
- md5: fc2166155db840c634a1291a5c35a709
- sha256: 851901b1f8f2049edb36a675f0c3f9a98e1495ef4eb214761b048c6f696a06f7
+ md5: 40695fdfd15a92121ed2922900d0308b
+ sha256: 0fe2a0473ad03dac6c7f5c42ef36a8e90673c88a0350dfefdea4b08d43803db2
build: pyhd8ed1ab_0
arch: x86_64
subdir: win-64
@@ -29634,8 +29691,8 @@ package:
license: MIT
license_family: MIT
noarch: python
- size: 464399
- timestamp: 1694548452441
+ size: 470548
+ timestamp: 1704224855187
purls:
- pkg:pypi/setuptools
- platform: linux-64
@@ -32808,132 +32865,132 @@ package:
- pkg:pypi/typing-extensions
- platform: linux-64
name: tzcode
- version: 2023c
+ version: 2023d
category: main
manager: conda
dependencies:
- __glibc >=2.17,<3.0.a0
- libgcc-ng >=12
- url: https://conda.anaconda.org/conda-forge/linux-64/tzcode-2023c-h0b41bf4_0.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/tzcode-2023d-h3f72095_0.conda
hash:
- md5: 0c0533894f21c3d35697cb8378d390e2
- sha256: 62b0d3eee4260d310f578015305834b8a588377f796e5e290ec267da8a51a027
- build: h0b41bf4_0
+ md5: 1c63518899838477ebd497e3e3327f81
+ sha256: 0eab7ec2f4c983efb365bacc2e7bd6620f516a50d2e9b183ba1c9c243601cce3
+ build: h3f72095_0
arch: x86_64
subdir: linux-64
build_number: 0
license: BSD-3-Clause
license_family: BSD
- size: 68632
- timestamp: 1680049336647
+ size: 69778
+ timestamp: 1703250978164
- platform: osx-64
name: tzcode
- version: 2023c
+ version: 2023d
category: main
manager: conda
dependencies: []
- url: https://conda.anaconda.org/conda-forge/osx-64/tzcode-2023c-hb7f2c08_0.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/tzcode-2023d-h10d778d_0.conda
hash:
- md5: a7ba8e96323b9d8ce4f0edc4f4dab27f
- sha256: 0d4b111314bea267454f48691debc1ff4c0ce8cb91491d2be30381de498ac59e
- build: hb7f2c08_0
+ md5: 6ae344465457da354af4d35b567fb141
+ sha256: e312a782d139884c6399c45c3218eda11ac17d12dc0d0711623d07c7751e97ab
+ build: h10d778d_0
arch: x86_64
subdir: osx-64
build_number: 0
license: BSD-3-Clause
license_family: BSD
- size: 62711
- timestamp: 1680049599804
+ size: 63744
+ timestamp: 1703251160420
- platform: osx-arm64
name: tzcode
- version: 2023c
+ version: 2023d
category: main
manager: conda
dependencies: []
- url: https://conda.anaconda.org/conda-forge/osx-arm64/tzcode-2023c-h1a8c8d9_0.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/tzcode-2023d-h93a5062_0.conda
hash:
- md5: 96779d3be996d78411b083f99a51199c
- sha256: 0a60ff53272547a0f80862f0a1969a5d1cec16bd2e9098ed5b07d317682a4361
- build: h1a8c8d9_0
+ md5: 8f6c1eef62c660bfb43897fe14b2ca95
+ sha256: a4bed39a41b26d5f8134fd0cb0df15bc0b9e645e3ee6f88ee81c8d24651eb8cd
+ build: h93a5062_0
arch: aarch64
subdir: osx-arm64
build_number: 0
license: BSD-3-Clause
license_family: BSD
- size: 63064
- timestamp: 1680049656503
+ size: 63558
+ timestamp: 1703251392865
- platform: linux-64
name: tzdata
- version: 2023c
+ version: 2023d
category: main
manager: conda
dependencies: []
- url: https://conda.anaconda.org/conda-forge/noarch/tzdata-2023c-h71feb2d_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/tzdata-2023d-h0c530f3_0.conda
hash:
- md5: 939e3e74d8be4dac89ce83b20de2492a
- sha256: 0449138224adfa125b220154408419ec37c06b0b49f63c5954724325903ecf55
- build: h71feb2d_0
+ md5: 8dee24b8be2d9ff81e7bd4d7d97ff1b0
+ sha256: 04f2ab3e36f2015841551415bf16bf62933bd94b7085d4be5493b388e95a9c3d
+ build: h0c530f3_0
arch: x86_64
subdir: linux-64
build_number: 0
license: LicenseRef-Public-Domain
noarch: generic
- size: 117580
- timestamp: 1680041306008
+ size: 119639
+ timestamp: 1703250910370
- platform: osx-64
name: tzdata
- version: 2023c
+ version: 2023d
category: main
manager: conda
dependencies: []
- url: https://conda.anaconda.org/conda-forge/noarch/tzdata-2023c-h71feb2d_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/tzdata-2023d-h0c530f3_0.conda
hash:
- md5: 939e3e74d8be4dac89ce83b20de2492a
- sha256: 0449138224adfa125b220154408419ec37c06b0b49f63c5954724325903ecf55
- build: h71feb2d_0
+ md5: 8dee24b8be2d9ff81e7bd4d7d97ff1b0
+ sha256: 04f2ab3e36f2015841551415bf16bf62933bd94b7085d4be5493b388e95a9c3d
+ build: h0c530f3_0
arch: x86_64
subdir: osx-64
build_number: 0
license: LicenseRef-Public-Domain
noarch: generic
- size: 117580
- timestamp: 1680041306008
+ size: 119639
+ timestamp: 1703250910370
- platform: osx-arm64
name: tzdata
- version: 2023c
+ version: 2023d
category: main
manager: conda
dependencies: []
- url: https://conda.anaconda.org/conda-forge/noarch/tzdata-2023c-h71feb2d_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/tzdata-2023d-h0c530f3_0.conda
hash:
- md5: 939e3e74d8be4dac89ce83b20de2492a
- sha256: 0449138224adfa125b220154408419ec37c06b0b49f63c5954724325903ecf55
- build: h71feb2d_0
+ md5: 8dee24b8be2d9ff81e7bd4d7d97ff1b0
+ sha256: 04f2ab3e36f2015841551415bf16bf62933bd94b7085d4be5493b388e95a9c3d
+ build: h0c530f3_0
arch: aarch64
subdir: osx-arm64
build_number: 0
license: LicenseRef-Public-Domain
noarch: generic
- size: 117580
- timestamp: 1680041306008
+ size: 119639
+ timestamp: 1703250910370
- platform: win-64
name: tzdata
- version: 2023c
+ version: 2023d
category: main
manager: conda
dependencies: []
- url: https://conda.anaconda.org/conda-forge/noarch/tzdata-2023c-h71feb2d_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/tzdata-2023d-h0c530f3_0.conda
hash:
- md5: 939e3e74d8be4dac89ce83b20de2492a
- sha256: 0449138224adfa125b220154408419ec37c06b0b49f63c5954724325903ecf55
- build: h71feb2d_0
+ md5: 8dee24b8be2d9ff81e7bd4d7d97ff1b0
+ sha256: 04f2ab3e36f2015841551415bf16bf62933bd94b7085d4be5493b388e95a9c3d
+ build: h0c530f3_0
arch: x86_64
subdir: win-64
build_number: 0
license: LicenseRef-Public-Domain
noarch: generic
- size: 117580
- timestamp: 1680041306008
+ size: 119639
+ timestamp: 1703250910370
- platform: win-64
name: ucrt
version: 10.0.22621.0
@@ -33155,72 +33212,72 @@ package:
- pkg:pypi/urllib3
- platform: linux-64
name: utfcpp
- version: 4.0.4
+ version: 4.0.5
category: main
manager: conda
dependencies: []
- url: https://conda.anaconda.org/conda-forge/linux-64/utfcpp-4.0.4-ha770c72_0.conda
+ url: https://conda.anaconda.org/conda-forge/linux-64/utfcpp-4.0.5-ha770c72_0.conda
hash:
- md5: fe0aa3ea88da0546dd903bbed6629f0a
- sha256: 57d3aaae5dcc690fe24fe2bbadcef05344346e07621d650ae7fe7efb91921874
+ md5: 25965c1d1d5fc00ce2b663b73008e3b7
+ sha256: c4a286b5ee817ab58c091fbfeb790c931f919c13a3dd18e7770936e08b19b50b
build: ha770c72_0
arch: x86_64
subdir: linux-64
build_number: 0
license: BSL-1.0
- size: 13671
- timestamp: 1702283684836
+ size: 13698
+ timestamp: 1704191017780
- platform: osx-64
name: utfcpp
- version: 4.0.4
+ version: 4.0.5
category: main
manager: conda
dependencies: []
- url: https://conda.anaconda.org/conda-forge/osx-64/utfcpp-4.0.4-h694c41f_0.conda
+ url: https://conda.anaconda.org/conda-forge/osx-64/utfcpp-4.0.5-h694c41f_0.conda
hash:
- md5: 59d8987e777ca5b896d7656fe111c1d1
- sha256: 498da3de812ae5f6f2507eb2e2520ec2d431af69722ebd71b8a880ace750327e
+ md5: f59ae41dec5f4035713eb00b552c6eb9
+ sha256: a480e0a0e563d3915221d52bb11e0acf1a1bb58aa913349eefe8dca6ce02d4f4
build: h694c41f_0
arch: x86_64
subdir: osx-64
build_number: 0
license: BSL-1.0
- size: 13816
- timestamp: 1702284056019
+ size: 13826
+ timestamp: 1704191204619
- platform: osx-arm64
name: utfcpp
- version: 4.0.4
+ version: 4.0.5
category: main
manager: conda
dependencies: []
- url: https://conda.anaconda.org/conda-forge/osx-arm64/utfcpp-4.0.4-hce30654_0.conda
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/utfcpp-4.0.5-hce30654_0.conda
hash:
- md5: a2f7e7c0661eee5325dbe1052fc7feb2
- sha256: 859c27c8ba850706380c3599b65032ce222bc12b0423bdf417d3460e6ff487ee
+ md5: 17c57ab4937831545a31bb00d756e2db
+ sha256: d1075c4e4c70f487c497880cf373906054fe3b235d757f6dc17ab7b876608fce
build: hce30654_0
arch: aarch64
subdir: osx-arm64
build_number: 0
license: BSL-1.0
- size: 13810
- timestamp: 1702284057948
+ size: 13829
+ timestamp: 1704191173739
- platform: win-64
name: utfcpp
- version: 4.0.4
+ version: 4.0.5
category: main
manager: conda
dependencies: []
- url: https://conda.anaconda.org/conda-forge/win-64/utfcpp-4.0.4-h57928b3_0.conda
+ url: https://conda.anaconda.org/conda-forge/win-64/utfcpp-4.0.5-h57928b3_0.conda
hash:
- md5: 02dc235e0db6e51270dc04a649318e5a
- sha256: 8cf7cf549c97680515add44d998495988f5762c990dba26c1742bd05e7fef4b4
+ md5: 116f6c77011fd7869f71b163cdd837b5
+ sha256: 1380fd2eb7336ee08faaed098675a305ed6185b9491ebe8ad08a30fb657ddee3
build: h57928b3_0
arch: x86_64
subdir: win-64
build_number: 0
license: BSL-1.0
- size: 14070
- timestamp: 1702283926669
+ size: 14042
+ timestamp: 1704191209163
- platform: win-64
name: vc
version: '14.3'
diff --git a/pixi.toml b/pixi.toml
index 71a76c759..05753a406 100644
--- a/pixi.toml
+++ b/pixi.toml
@@ -59,7 +59,7 @@ netcdf4 = "*"
numba = ">=0.50"
numpy = "*"
pandamesh = "*"
-pandas = "<2.0" # TODO: unpin when newer xarray on conda-forge
+pandas = "*"
pooch = "*"
pydata-sphinx-theme = "*"
pymetis = "*"
@@ -86,7 +86,7 @@ toolz = "*"
tqdm = "*"
twine = "*"
vtk = { version = ">=9.0", build = "*qt*", channel = "conda-forge" }
-xarray = ">=0.15"
+xarray = ">=2023.04.0"
xugrid = ">=0.6.4"
zarr = "*"
| Support pandas 2.0
In GitLab by @JoerivanEngelen on Apr 26, 2023, 11:33
Things to pay attention to:
- Xarray should be pinned higher than this release: https://github.com/pydata/xarray/releases/tag/v2023.04.0
- I remember reading things changed with the time handling, we test investigate if our cftime things still work
| In GitLab by @JoerivanEngelen on Dec 4, 2023, 14:41
marked this issue as related to #676 | 2024-01-03T13:23:23 | 0.0 | [] | [] |
||
Deltares/imod-python | Deltares__imod-python-698 | 1fdc0ac7383c808f2b497d6834d23a27c43bea6b | diff --git a/docs/api/changelog.rst b/docs/api/changelog.rst
index 589fa23af..30048971f 100644
--- a/docs/api/changelog.rst
+++ b/docs/api/changelog.rst
@@ -20,6 +20,13 @@ Fixed
:function:`imod.select.points.point_values`
- Fixed bug where :class:`imod.mf6.Well` could not be assigned to the first cell
of an unstructured grid.
+- HorizontalFlowBarrier package now dropped if completely outside partition in a
+ split model.
+- HorizontalFlowBarrier package clipped with ``clip_by_grid`` based on active
+ cells, consistent with how other packages are treated by this function. This
+ affects the :meth:`imod.mf6.HorizontalFlowBarrier.regrid_like` and
+ :meth:`imod.mf6.Modflow6Simulation.split` methods.
+
Changed
~~~~~~~
@@ -88,12 +95,12 @@ Added
grids designating respectively the highest and lowest active cells in a grid.
- validation of ``transient`` argument in :class:`imod.mf6.StorageCoefficient`
and :class:`imod.mf6.SpecificStorage`.
-- :meth:`imod.mf6.Simulation.open_concentration`,
- :meth:`imod.mf6.Simulation.open_head`,
- :meth:`imod.mf6.Simulation.open_transport_budget`, and
- :meth:`imod.mf6.Simulation.open_flow_budget`, were added as convenience
+- :meth:`imod.mf6.Modflow6Simulation.open_concentration`,
+ :meth:`imod.mf6.Modflow6Simulation.open_head`,
+ :meth:`imod.mf6.Modflow6Simulation.open_transport_budget`, and
+ :meth:`imod.mf6.Modflow6Simulation.open_flow_budget`, were added as convenience
methods to open simulation output easier (without having to specify paths).
-- The :meth:`imod.mf6.Simulation.split` method has been added. This method makes
+- The :meth:`imod.mf6.Modflow6Simulation.split` method has been added. This method makes
it possible for a user to create a Multi-Model simulation. A user needs to
provide a submodel label array in which they specify to which submodel a cell
belongs. The method will then create the submodels and split the nested
@@ -105,10 +112,10 @@ Added
- Once a split simulation has been executed by MF6, we find head and balance
results in each of the partition models. These can now be merged into head and
balance datasets for the original domain using
- :meth:`imod.mf6.Simulation.open_concentration`,
- :meth:`imod.mf6.Simulation.open_head`,
- :meth:`imod.mf6.Simulation.open_transport_budget`,
- :meth:`imod.mf6.Simulation.open_flow_budget`.
+ :meth:`imod.mf6.Modflow6Simulation.open_concentration`,
+ :meth:`imod.mf6.Modflow6Simulation.open_head`,
+ :meth:`imod.mf6.Modflow6Simulation.open_transport_budget`,
+ :meth:`imod.mf6.Modflow6Simulation.open_flow_budget`.
In the case of balances, the exchanges through the partition boundary are not
yet added to this merged balance.
- Settings such as ``save_flows`` can be passed through
@@ -124,7 +131,7 @@ Removed
- Tox has been removed from the project.
- Dropped support for writing .qgs files directly for QGIS, as this was hard to
maintain and rarely used. To export your model to QGIS readable files, call
- the ``dump`` method :class:`imod.mf6.Simulation` with ``mdal_compliant=True``.
+ the ``dump`` method :class:`imod.mf6.Modflow6Simulation` with ``mdal_compliant=True``.
This writes UGRID NetCDFs which can read as meshes in QGIS.
- Removed ``declxml`` from repository.
@@ -159,7 +166,7 @@ Changed
Added
~~~~~
-- :meth:`imod.mf6.Simulation.regrid_like` to regrid a Modflow6 simulation to a
+- :meth:`imod.mf6.Modflow6Simulation.regrid_like` to regrid a Modflow6 simulation to a
new grid (structured or unstructured), using `xugrid's regridding
functionality.
<https://deltares.github.io/xugrid/examples/regridder_overview.html>`_
@@ -169,7 +176,7 @@ Added
grid) or to speed up a simulation (by coarsening the grid) to name a few
- :meth:`imod.mf6.Package.regrid_like` to regrid packages. The user can
specify their own custom regridder types and methods for variables.
-- :meth:`imod.mf6.Simulation.clip_box` got an extra argument
+- :meth:`imod.mf6.Modflow6Simulation.clip_box` got an extra argument
``states_for_boundary``, which takes a dictionary with modelname as key and
griddata as value. This data is specified as fixed state on the model
boundary. At present only `imod.mf6.GroundwaterFlowModel` is supported, grid
@@ -195,7 +202,7 @@ Fixed
This has been fixed so that both are not optional dependencies when
installing via pip (installing via conda or mamba will always pull all
dependencies and supports full functionality).
-- :meth:`imod.mf6.Simulation._validate` now print all validation errors for all
+- :meth:`imod.mf6.Modflow6Simulation._validate` now print all validation errors for all
models and packages in one message.
- The gen file reader can now handle feature id's that contain commas and spaces
- :class:`imod.mf6.EvapoTranspiration` now supports segments, by adding a
@@ -383,11 +390,11 @@ Changed
file name being used over and over.
- :meth:`imod.flow.ImodflowModel.time_discretization`,
:meth:`imod.wq.SeawatModel.time_discretization`,
- :meth:`imod.mf6.Simulation.time_discretization`,
+ :meth:`imod.mf6.Modflow6Simulation.time_discretization`,
are renamed to:
:meth:`imod.flow.ImodflowModel.create_time_discretization`,
:meth:`imod.wq.SeawatModel.create_time_discretization`,
- :meth:`imod.mf6.Simulation.create_time_discretization`,
+ :meth:`imod.mf6.Modflow6Simulation.create_time_discretization`,
- Moved tests inside `imod` directory, added an entry point for pytest fixtures.
Running the tests now requires an editable install, and also existing
installations have to be reinstalled to run the tests.
@@ -418,7 +425,7 @@ Added
to enable adding maps to existing axes.
- :meth:`imod.flow.ImodflowModel.create_time_discretization`,
:meth:`imod.wq.SeawatModel.create_time_discretization`,
- :meth:`imod.mf6.Simulation.create_time_discretization`, now have a
+ :meth:`imod.mf6.Modflow6Simulation.create_time_discretization`, now have a
documentation section.
- :class:`imod.mf6.GroundwaterTransportModel` has been added with associated
simple classes to allow creation of solute transport models. Advanced
@@ -495,7 +502,7 @@ Added
the Netherlands).
- Added :py:class:`imod.flow.ImodflowModel` to write to model iMODFLOW project
file.
-- :meth:`imod.mf6.Simulation.write` now has a ``binary`` keyword. When set
+- :meth:`imod.mf6.Modflow6Simulation.write` now has a ``binary`` keyword. When set
to ``False``, all MODFLOW6 input is written to text rather than binary files.
- Added :class:`imod.mf6.DiscretizationVertices` to write MODFLOW6 DISV model
input.
@@ -511,7 +518,7 @@ Added
:func:`imod.util.empty_3d`, and :func:`imod.util.empty_3d_transient`.
- :func:`imod.util.where` has been added for easier if-then-else operations,
especially for preserving NaN nodata values.
-- :meth:`imod.mf6.Simulation.run` has been added to more easily run a model,
+- :meth:`imod.mf6.Modflow6Simulation.run` has been added to more easily run a model,
especially in examples and tests.
- :func:`imod.mf6.open_cbc` and :func:`imod.mf6.open_hds` will automatically
return a ``xugrid.UgridDataArray`` for MODFLOW6 DISV model output.
diff --git a/imod/mf6/hfb.py b/imod/mf6/hfb.py
index ebd11cb4b..1abf4eb3c 100644
--- a/imod/mf6/hfb.py
+++ b/imod/mf6/hfb.py
@@ -18,6 +18,7 @@
from imod.mf6.package import Package
from imod.mf6.utilities.clip import clip_by_grid
from imod.mf6.utilities.grid import broadcast_to_full_domain
+from imod.schemata import EmptyIndexesSchema
from imod.typing import GridDataArray
@@ -268,7 +269,7 @@ class HorizontalFlowBarrierBase(BoundaryCondition, ILineDataPackage):
_period_data = ()
_init_schemata = {}
- _write_schemata = {}
+ _write_schemata = {"geometry": [EmptyIndexesSchema()]}
_regrid_method = {}
diff --git a/imod/mf6/multimodel/modelsplitter.py b/imod/mf6/multimodel/modelsplitter.py
index 6eb449f9a..230927d18 100644
--- a/imod/mf6/multimodel/modelsplitter.py
+++ b/imod/mf6/multimodel/modelsplitter.py
@@ -8,7 +8,7 @@
from imod.mf6.utilities.grid import get_active_domain_slice
from imod.mf6.utilities.schemata import filter_schemata_dict
from imod.mf6.wel import Well
-from imod.schemata import AllNoDataSchema
+from imod.schemata import AllNoDataSchema, EmptyIndexesSchema
from imod.typing import GridDataArray
from imod.typing.grid import is_unstructured, ones_like
@@ -92,11 +92,13 @@ def slice_model(partition_info: PartitionInfo, model: Modflow6Model) -> Modflow6
sliced_package = sliced_package.mask(new_idomain)
# The masking can result in packages with AllNoData.Therefore we'll have
# to drop these packages. Create schemata dict only containing the
- # variables with a AllNoDataSchema.
+ # variables with a AllNoDataSchema and EmptyIndexesSchema (in case of
+ # HFB) in the write schemata.
allnodata_schemata = filter_schemata_dict(
- package._write_schemata, (AllNoDataSchema)
+ package._write_schemata, (AllNoDataSchema, EmptyIndexesSchema)
)
- # Find if packages throws ValidationError for AllNoDataSchema.
+ # Find if packages throws ValidationError for AllNoDataSchema or
+ # EmptyIndexesSchema.
allnodata_errors = sliced_package._validate(allnodata_schemata)
# Drop if allnodata error thrown
if not allnodata_errors:
diff --git a/imod/mf6/utilities/clip.py b/imod/mf6/utilities/clip.py
index c11a307c6..a80a03ece 100644
--- a/imod/mf6/utilities/clip.py
+++ b/imod/mf6/utilities/clip.py
@@ -2,6 +2,7 @@
import geopandas as gpd
import numpy as np
+import shapely
import xarray as xr
import xugrid as xu
from fastcore.dispatch import typedispatch
@@ -115,6 +116,10 @@ def clip_by_grid(package: ILineDataPackage, active: GridDataArray) -> ILineDataP
# Clip line with polygon
bounding_gdf = bounding_polygon(active)
package_gdf_clipped = package_gdf.clip(bounding_gdf)
+ # Catch edge case: when line crosses only vertex of polygon, a point
+ # (type_id == 0) is returned. Drop these.
+ is_points = shapely.get_type_id(package_gdf_clipped.geometry) == 0
+ package_gdf_clipped = package_gdf_clipped[~is_points]
# Get settings
settings = _get_settings(package)
# Create new instance
diff --git a/imod/schemata.py b/imod/schemata.py
index 7e9d42662..1405d4ca8 100644
--- a/imod/schemata.py
+++ b/imod/schemata.py
@@ -194,16 +194,16 @@ def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs) -> Non
raise ValidationError(f"dim mismatch: expected {expected}, got {actual}")
-class IndexesSchema(BaseSchema):
+class EmptyIndexesSchema(BaseSchema):
"""
- Verify indexes, check if no dims with zero size are included and that
- indexes are monotonic. Skips unstructured grid dimensions.
+ Verify indexes, check if no dims with zero size are included. Skips
+ unstructured grid dimensions.
"""
def __init__(self) -> None:
pass
- def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs) -> None:
+ def get_dims_to_validate(self, obj: Union[xr.DataArray, xu.UgridDataArray]):
dims_to_validate = list(obj.dims)
# Remove face dim from list to validate, as it has no ``indexes``
@@ -213,11 +213,31 @@ def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs) -> Non
dims_to_validate = [
dim for dim in dims_to_validate if dim not in ugrid_dims
]
+ return dims_to_validate
+
+ def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs) -> None:
+ dims_to_validate = self.get_dims_to_validate(obj)
for dim in dims_to_validate:
if len(obj.indexes[dim]) == 0:
raise ValidationError(f"provided dimension {dim} with size 0")
+
+class IndexesSchema(EmptyIndexesSchema):
+ """
+ Verify indexes, check if no dims with zero size are included and that
+ indexes are monotonic. Skips unstructured grid dimensions.
+ """
+
+ def __init__(self) -> None:
+ pass
+
+ def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs) -> None:
+ # Test if indexes all empty
+ super().validate(obj)
+
+ dims_to_validate = self.get_dims_to_validate(obj)
+
for dim in dims_to_validate:
if dim == "y":
if not obj.indexes[dim].is_monotonic_decreasing:
diff --git a/imod/typing/grid.py b/imod/typing/grid.py
index 6c0f79edc..8ff90ecd1 100644
--- a/imod/typing/grid.py
+++ b/imod/typing/grid.py
@@ -128,7 +128,11 @@ def bounding_polygon(active: xr.DataArray):
@typedispatch
def bounding_polygon(active: xu.UgridDataArray):
"""Return bounding polygon of active cells"""
- return active.ugrid.grid.bounding_polygon()
+ active_indices = np.where(active > 0)[0]
+ domain_slice = {f"{active.ugrid.grid.face_dimension}": active_indices}
+ active_clipped = active.isel(domain_slice, missing_dims="ignore")
+
+ return active_clipped.ugrid.grid.bounding_polygon()
@typedispatch
| Test and fix (if necessary) partitioning for HFB package
In GitLab by @JoerivanEngelen on Nov 22, 2023, 12:37
Partitioning: HFB package not dropped if outside partition
In this case ``xu.snap_to_edges`` drops the error:
```
max_n_new_edges = len(face_indices) * topology.n_max_node_per_face - 1
> edge_index = np.empty(max_n_new_edges, dtype=IntDType)
E ValueError: negative dimensions are not allowed
```
This is because ``len(face_indices) = 0``. The fact that xugrid throws this error is a bug in itself, but still we do not want this situation to occur, probably dropping the package after clipping is a better solution.
| Found the following issues: https://github.com/Deltares/imod-python/issues/696, https://github.com/Deltares/imod-python/issues/697
| 2023-12-13T16:12:12 | 0.0 | [] | [] |
||
tree-sitter/tree-sitter-javascript | tree-sitter__tree-sitter-javascript-328 | 12e45374422f6051648717be62f0ffc40a279ee2 | diff --git a/grammar.js b/grammar.js
index f36889c..d96d9e4 100644
--- a/grammar.js
+++ b/grammar.js
@@ -608,10 +608,11 @@ module.exports = grammar({
field('close_tag', $.jsx_closing_element),
),
- // Should not contain new lines and should not start or end with a space
jsx_text: _ => choice(
- /[^{}<>\n& ]([^{}<>\n&]*[^{}<>\n& ])?/,
- /\/\/[^\n]*/,
+ // if there is a newline, only capture if there's non-whitespace-text
+ token.immediate(/[^{}<>&]*[^{}<>\s\p{Zs}\uFEFF\u2028\u2029\u2060\u200B&][^{}<>&]*/),
+ // whitespace between jsx_tags should be captured if there's no newline
+ token.immediate(/[^{}<>\n&]+/),
),
// An entity can be named, numeric (decimal), or numeric (hexadecimal). The
diff --git a/src/grammar.json b/src/grammar.json
index 944e2a2..2a892d9 100644
--- a/src/grammar.json
+++ b/src/grammar.json
@@ -2468,12 +2468,18 @@
"type": "CHOICE",
"members": [
{
- "type": "PATTERN",
- "value": "[^{}<>\\n& ]([^{}<>\\n&]*[^{}<>\\n& ])?"
+ "type": "IMMEDIATE_TOKEN",
+ "content": {
+ "type": "PATTERN",
+ "value": "[^{}<>&]*[^{}<>\\s\\p{Zs}\\uFEFF\\u2028\\u2029\\u2060\\u200B&][^{}<>&]*"
+ }
},
{
- "type": "PATTERN",
- "value": "\\/\\/[^\\n]*"
+ "type": "IMMEDIATE_TOKEN",
+ "content": {
+ "type": "PATTERN",
+ "value": "[^{}<>\\n&]+"
+ }
}
]
},
diff --git a/src/parser.c b/src/parser.c
index a5f56a6..9fbefb5 100644
--- a/src/parser.c
+++ b/src/parser.c
@@ -4180,902 +4180,886 @@ static bool ts_lex(TSLexer *lexer, TSStateId state) {
eof = lexer->eof(lexer);
switch (state) {
case 0:
- if (eof) ADVANCE(149);
+ if (eof) ADVANCE(147);
ADVANCE_MAP(
- '!', 268,
- '"', 201,
- '#', 7,
- '$', 315,
- '%', 256,
- '&', 243,
- '\'', 202,
- '(', 157,
- ')', 158,
- '*', 152,
- '+', 250,
- ',', 155,
- '-', 252,
- '.', 197,
- '/', 295,
- '0', 300,
- ':', 160,
- ';', 159,
- '<', 187,
- '=', 163,
- '>', 193,
- '?', 34,
- '@', 319,
- '[', 165,
- '\\', 103,
- ']', 166,
- '^', 246,
- '`', 293,
- 's', 313,
- '{', 154,
- '|', 247,
- '}', 156,
- '~', 269,
+ '!', 266,
+ '"', 199,
+ '#', 5,
+ '$', 313,
+ '%', 254,
+ '&', 241,
+ '\'', 200,
+ '(', 155,
+ ')', 156,
+ '*', 150,
+ '+', 248,
+ ',', 153,
+ '-', 250,
+ '.', 195,
+ '/', 293,
+ '0', 298,
+ ':', 158,
+ ';', 157,
+ '<', 185,
+ '=', 161,
+ '>', 191,
+ '?', 32,
+ '@', 317,
+ '[', 163,
+ '\\', 101,
+ ']', 164,
+ '^', 244,
+ '`', 291,
+ 's', 311,
+ '{', 152,
+ '|', 245,
+ '}', 154,
+ '~', 267,
);
- if (('1' <= lookahead && lookahead <= '9')) ADVANCE(301);
- if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(146);
- if (lookahead > '@') ADVANCE(317);
+ if (('1' <= lookahead && lookahead <= '9')) ADVANCE(299);
+ if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(144);
+ if (lookahead > '@') ADVANCE(315);
END_STATE();
case 1:
- if (lookahead == '\n') ADVANCE(320);
+ if (lookahead == '\n') ADVANCE(318);
if (('\t' <= lookahead && lookahead <= '\r') ||
lookahead == ' ') ADVANCE(1);
END_STATE();
case 2:
- if (lookahead == '\n') SKIP(23);
- if (lookahead == ' ') ADVANCE(2);
- if (lookahead == '&') ADVANCE(18);
- if (lookahead == '/') ADVANCE(173);
- if (lookahead == '<') ADVANCE(186);
- if (lookahead == '{') ADVANCE(154);
- if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(175);
+ if (lookahead == '\n') ADVANCE(21);
+ if (lookahead == '&') ADVANCE(16);
+ if (lookahead == '/') ADVANCE(171);
+ if (lookahead == '<') ADVANCE(184);
+ if (lookahead == '{') ADVANCE(152);
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(181);
if (lookahead != 0 &&
lookahead != '>' &&
- lookahead != '}') ADVANCE(174);
+ lookahead != '}') ADVANCE(173);
END_STATE();
case 3:
if (lookahead == '\n') SKIP(3);
- if (lookahead == '/') ADVANCE(168);
- if (lookahead == '<') ADVANCE(170);
- if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(169);
- if (lookahead != 0) ADVANCE(167);
+ if (lookahead == '/') ADVANCE(166);
+ if (lookahead == '<') ADVANCE(168);
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(167);
+ if (lookahead != 0) ADVANCE(165);
END_STATE();
case 4:
- if (lookahead == '\n') SKIP(39);
- if (lookahead == '/') ADVANCE(27);
- if (lookahead == '[') ADVANCE(84);
- if (lookahead == '\\') ADVANCE(145);
- if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(296);
- if (lookahead != 0) ADVANCE(297);
+ if (lookahead == '\n') SKIP(37);
+ if (lookahead == '/') ADVANCE(25);
+ if (lookahead == '[') ADVANCE(82);
+ if (lookahead == '\\') ADVANCE(143);
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(294);
+ if (lookahead != 0) ADVANCE(295);
END_STATE();
case 5:
- if (lookahead == ' ') ADVANCE(5);
- if (lookahead != 0 &&
- lookahead != '\n' &&
- lookahead != '&' &&
- lookahead != '<' &&
- lookahead != '>' &&
- lookahead != '{' &&
- lookahead != '}') ADVANCE(174);
+ if (lookahead == '!') ADVANCE(148);
+ if (lookahead == '\\') ADVANCE(102);
+ if (set_contains(sym_identifier_character_set_1, 14, lookahead)) ADVANCE(316);
END_STATE();
case 6:
ADVANCE_MAP(
- ' ', 6,
- '*', 176,
- '\n', 30,
- '&', 30,
- '<', 30,
- '>', 30,
- '{', 30,
- '}', 30,
+ '!', 265,
+ '"', 199,
+ '#', 81,
+ '\'', 200,
+ '(', 155,
+ '*', 149,
+ '+', 247,
+ ',', 153,
+ '-', 249,
+ '.', 31,
+ '/', 251,
+ '0', 298,
+ ';', 157,
+ '<', 189,
+ '@', 317,
+ '[', 163,
+ '\\', 103,
+ '`', 291,
+ 's', 311,
+ '{', 152,
+ '}', 154,
+ '~', 267,
);
- if (lookahead != 0) ADVANCE(177);
+ if (('1' <= lookahead && lookahead <= '9')) ADVANCE(299);
+ if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(6);
+ if (lookahead > '#' &&
+ (lookahead < '%' || '@' < lookahead) &&
+ (lookahead < '[' || '^' < lookahead) &&
+ (lookahead < '{' || '~' < lookahead)) ADVANCE(315);
END_STATE();
case 7:
- if (lookahead == '!') ADVANCE(150);
- if (lookahead == '\\') ADVANCE(104);
- if (set_contains(sym_identifier_character_set_1, 14, lookahead)) ADVANCE(318);
+ ADVANCE_MAP(
+ '!', 265,
+ '"', 199,
+ '#', 81,
+ '\'', 200,
+ '(', 155,
+ '+', 247,
+ '-', 249,
+ '.', 196,
+ '/', 251,
+ '0', 298,
+ ':', 158,
+ '<', 189,
+ '>', 190,
+ '@', 317,
+ '[', 163,
+ '\\', 103,
+ '`', 291,
+ '{', 152,
+ '~', 267,
+ );
+ if (('1' <= lookahead && lookahead <= '9')) ADVANCE(299);
+ if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(7);
+ if (lookahead > '#' &&
+ (lookahead < '%' || '@' < lookahead) &&
+ (lookahead < '[' || '^' < lookahead) &&
+ (lookahead < '{' || '~' < lookahead)) ADVANCE(315);
END_STATE();
case 8:
ADVANCE_MAP(
- '!', 267,
- '"', 201,
- '#', 83,
- '\'', 202,
- '(', 157,
- '*', 151,
- '+', 249,
- ',', 155,
- '-', 251,
- '.', 33,
- '/', 253,
- '0', 300,
- ';', 159,
- '<', 191,
- '@', 319,
- '[', 165,
- '\\', 105,
- '`', 293,
- 's', 313,
- '{', 154,
- '}', 156,
- '~', 269,
+ '!', 77,
+ '"', 199,
+ '#', 81,
+ '%', 254,
+ '&', 241,
+ '\'', 200,
+ '(', 155,
+ ')', 156,
+ '*', 150,
+ '+', 248,
+ ',', 153,
+ '-', 250,
+ '.', 196,
+ '/', 252,
+ '0', 298,
+ ':', 158,
+ ';', 157,
+ '<', 186,
+ '=', 161,
+ '>', 191,
+ '?', 32,
+ '@', 317,
+ '[', 163,
+ '\\', 103,
+ ']', 164,
+ '^', 244,
+ '`', 291,
+ '{', 152,
+ '|', 245,
+ '}', 154,
);
- if (('1' <= lookahead && lookahead <= '9')) ADVANCE(301);
+ if (('1' <= lookahead && lookahead <= '9')) ADVANCE(299);
if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(8);
if (lookahead > '#' &&
- (lookahead < '%' || '@' < lookahead) &&
- (lookahead < '[' || '^' < lookahead) &&
- (lookahead < '{' || '~' < lookahead)) ADVANCE(317);
+ (lookahead < '{' || '~' < lookahead)) ADVANCE(315);
END_STATE();
case 9:
ADVANCE_MAP(
- '!', 267,
- '"', 201,
- '#', 83,
- '\'', 202,
- '(', 157,
- '+', 249,
- '-', 251,
- '.', 198,
- '/', 253,
- '0', 300,
- ':', 160,
- '<', 191,
+ '!', 77,
+ '%', 253,
+ '&', 242,
+ '(', 155,
+ ')', 156,
+ '*', 151,
+ '+', 247,
+ ',', 153,
+ '-', 249,
+ '.', 194,
+ '/', 251,
+ ':', 158,
+ ';', 157,
+ '<', 187,
+ '=', 160,
'>', 192,
- '@', 319,
- '[', 165,
- '\\', 105,
- '`', 293,
- '{', 154,
- '~', 269,
+ '?', 33,
+ '[', 163,
+ '\\', 103,
+ ']', 164,
+ '^', 243,
+ '`', 291,
+ '{', 152,
+ '|', 246,
+ '}', 154,
);
- if (('1' <= lookahead && lookahead <= '9')) ADVANCE(301);
if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(9);
if (lookahead > '#' &&
(lookahead < '%' || '@' < lookahead) &&
- (lookahead < '[' || '^' < lookahead) &&
- (lookahead < '{' || '~' < lookahead)) ADVANCE(317);
+ (lookahead < '{' || '~' < lookahead)) ADVANCE(315);
END_STATE();
case 10:
ADVANCE_MAP(
- '!', 79,
- '"', 201,
- '#', 83,
- '%', 256,
- '&', 243,
- '\'', 202,
- '(', 157,
- ')', 158,
- '*', 152,
- '+', 250,
- ',', 155,
- '-', 252,
- '.', 198,
- '/', 254,
- '0', 300,
- ':', 160,
- ';', 159,
- '<', 188,
- '=', 163,
- '>', 193,
- '?', 34,
- '@', 319,
- '[', 165,
- '\\', 105,
- ']', 166,
- '^', 246,
- '`', 293,
- '{', 154,
- '|', 247,
- '}', 156,
+ '!', 77,
+ '%', 253,
+ '&', 242,
+ '(', 155,
+ ')', 156,
+ '*', 151,
+ '+', 247,
+ ',', 153,
+ '-', 249,
+ '.', 194,
+ '/', 251,
+ ':', 158,
+ ';', 157,
+ '<', 187,
+ '=', 78,
+ '>', 192,
+ '?', 33,
+ '[', 163,
+ '\\', 103,
+ ']', 164,
+ '^', 243,
+ '`', 291,
+ '{', 152,
+ '|', 246,
+ '}', 154,
);
- if (('1' <= lookahead && lookahead <= '9')) ADVANCE(301);
- if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(10);
+ if (('a' <= lookahead && lookahead <= 'z')) ADVANCE(296);
+ if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(11);
if (lookahead > '#' &&
- (lookahead < '{' || '~' < lookahead)) ADVANCE(317);
+ (lookahead < '%' || '@' < lookahead) &&
+ (lookahead < '`' || '~' < lookahead)) ADVANCE(315);
END_STATE();
case 11:
ADVANCE_MAP(
- '!', 79,
- '%', 255,
- '&', 244,
- '(', 157,
- ')', 158,
- '*', 153,
- '+', 249,
- ',', 155,
- '-', 251,
- '.', 196,
- '/', 253,
- ':', 160,
- ';', 159,
- '<', 189,
- '=', 162,
- '>', 194,
- '?', 35,
- '[', 165,
- '\\', 105,
- ']', 166,
- '^', 245,
- '`', 293,
- '{', 154,
- '|', 248,
- '}', 156,
+ '!', 77,
+ '%', 253,
+ '&', 242,
+ '(', 155,
+ ')', 156,
+ '*', 151,
+ '+', 247,
+ ',', 153,
+ '-', 249,
+ '.', 194,
+ '/', 251,
+ ':', 158,
+ ';', 157,
+ '<', 187,
+ '=', 78,
+ '>', 192,
+ '?', 33,
+ '[', 163,
+ '\\', 103,
+ ']', 164,
+ '^', 243,
+ '`', 291,
+ '{', 152,
+ '|', 246,
+ '}', 154,
);
if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(11);
if (lookahead > '#' &&
(lookahead < '%' || '@' < lookahead) &&
- (lookahead < '{' || '~' < lookahead)) ADVANCE(317);
+ (lookahead < '{' || '~' < lookahead)) ADVANCE(315);
END_STATE();
case 12:
ADVANCE_MAP(
- '!', 79,
- '%', 255,
- '&', 244,
- '(', 157,
- ')', 158,
- '*', 153,
- '+', 249,
- ',', 155,
- '-', 251,
+ '"', 199,
+ '#', 81,
+ '\'', 200,
+ '(', 155,
+ '*', 149,
'.', 196,
- '/', 253,
- ':', 160,
- ';', 159,
- '<', 189,
- '=', 80,
- '>', 194,
- '?', 35,
- '[', 165,
- '\\', 105,
- ']', 166,
- '^', 245,
- '`', 293,
- '{', 154,
- '|', 248,
- '}', 156,
+ '/', 25,
+ '0', 298,
+ '<', 183,
+ '@', 317,
+ '[', 163,
+ '\\', 103,
+ 's', 311,
+ '{', 152,
);
- if (('a' <= lookahead && lookahead <= 'z')) ADVANCE(298);
- if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(13);
+ if (('1' <= lookahead && lookahead <= '9')) ADVANCE(299);
+ if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(12);
if (lookahead > '#' &&
(lookahead < '%' || '@' < lookahead) &&
- (lookahead < '`' || '~' < lookahead)) ADVANCE(317);
+ (lookahead < '[' || '^' < lookahead) &&
+ lookahead != '`' &&
+ (lookahead < '{' || '~' < lookahead)) ADVANCE(315);
END_STATE();
case 13:
- ADVANCE_MAP(
- '!', 79,
- '%', 255,
- '&', 244,
- '(', 157,
- ')', 158,
- '*', 153,
- '+', 249,
- ',', 155,
- '-', 251,
- '.', 196,
- '/', 253,
- ':', 160,
- ';', 159,
- '<', 189,
- '=', 80,
- '>', 194,
- '?', 35,
- '[', 165,
- '\\', 105,
- ']', 166,
- '^', 245,
- '`', 293,
- '{', 154,
- '|', 248,
- '}', 156,
- );
- if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(13);
- if (lookahead > '#' &&
- (lookahead < '%' || '@' < lookahead) &&
- (lookahead < '{' || '~' < lookahead)) ADVANCE(317);
+ if (lookahead == '"') ADVANCE(199);
+ if (lookahead == '&') ADVANCE(17);
+ if (lookahead == '/') ADVANCE(202);
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(201);
+ if (lookahead != 0) ADVANCE(203);
END_STATE();
case 14:
- ADVANCE_MAP(
- '"', 201,
- '#', 83,
- '\'', 202,
- '(', 157,
- '*', 151,
- '.', 198,
- '/', 27,
- '0', 300,
- '<', 185,
- '@', 319,
- '[', 165,
- '\\', 105,
- 's', 313,
- '{', 154,
- );
- if (('1' <= lookahead && lookahead <= '9')) ADVANCE(301);
+ if (lookahead == '"') ADVANCE(199);
+ if (lookahead == '/') ADVANCE(25);
if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(14);
- if (lookahead > '#' &&
- (lookahead < '%' || '@' < lookahead) &&
- (lookahead < '[' || '^' < lookahead) &&
- lookahead != '`' &&
- (lookahead < '{' || '~' < lookahead)) ADVANCE(317);
END_STATE();
case 15:
- if (lookahead == '"') ADVANCE(201);
- if (lookahead == '&') ADVANCE(19);
- if (lookahead == '/') ADVANCE(204);
- if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(203);
- if (lookahead != 0) ADVANCE(205);
+ if (lookahead == '"') ADVANCE(199);
+ if (lookahead == '/') ADVANCE(270);
+ if (lookahead == '\\') ADVANCE(104);
+ if (lookahead == '\n' ||
+ lookahead == '\r') SKIP(14);
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(273);
+ if (lookahead != 0) ADVANCE(275);
END_STATE();
case 16:
- if (lookahead == '"') ADVANCE(201);
- if (lookahead == '/') ADVANCE(27);
- if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(16);
+ if (lookahead == '#') ADVANCE(115);
+ if (('A' <= lookahead && lookahead <= 'Z') ||
+ ('a' <= lookahead && lookahead <= 'z')) ADVANCE(76);
END_STATE();
case 17:
- if (lookahead == '"') ADVANCE(201);
- if (lookahead == '/') ADVANCE(272);
- if (lookahead == '\\') ADVANCE(106);
- if (lookahead == '\n' ||
- lookahead == '\r') SKIP(16);
- if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(275);
- if (lookahead != 0) ADVANCE(277);
+ if (lookahead == '#') ADVANCE(115);
+ if (('A' <= lookahead && lookahead <= 'Z') ||
+ ('a' <= lookahead && lookahead <= 'z')) ADVANCE(76);
+ if (lookahead != 0) ADVANCE(203);
END_STATE();
case 18:
- if (lookahead == '#') ADVANCE(117);
+ if (lookahead == '#') ADVANCE(115);
if (('A' <= lookahead && lookahead <= 'Z') ||
- ('a' <= lookahead && lookahead <= 'z')) ADVANCE(78);
+ ('a' <= lookahead && lookahead <= 'z')) ADVANCE(76);
+ if (lookahead != 0) ADVANCE(209);
END_STATE();
case 19:
- if (lookahead == '#') ADVANCE(117);
- if (('A' <= lookahead && lookahead <= 'Z') ||
- ('a' <= lookahead && lookahead <= 'z')) ADVANCE(78);
- if (lookahead != 0) ADVANCE(205);
+ if (lookahead == '$') ADVANCE(105);
+ if (lookahead == '/') ADVANCE(25);
+ if (lookahead == '\\') ADVANCE(104);
+ if (lookahead == '`') ADVANCE(291);
+ if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(20);
END_STATE();
case 20:
- if (lookahead == '#') ADVANCE(117);
- if (('A' <= lookahead && lookahead <= 'Z') ||
- ('a' <= lookahead && lookahead <= 'z')) ADVANCE(78);
- if (lookahead != 0) ADVANCE(211);
+ if (lookahead == '$') ADVANCE(105);
+ if (lookahead == '/') ADVANCE(25);
+ if (lookahead == '`') ADVANCE(291);
+ if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(20);
END_STATE();
case 21:
- if (lookahead == '$') ADVANCE(107);
- if (lookahead == '/') ADVANCE(27);
- if (lookahead == '\\') ADVANCE(106);
- if (lookahead == '`') ADVANCE(293);
- if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(22);
+ if (lookahead == '&') ADVANCE(16);
+ if (lookahead == '/') ADVANCE(176);
+ if (lookahead == '<') ADVANCE(184);
+ if (lookahead == '{') ADVANCE(152);
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(21);
+ if (lookahead != 0 &&
+ lookahead != '>' &&
+ lookahead != '}') ADVANCE(180);
END_STATE();
case 22:
- if (lookahead == '$') ADVANCE(107);
- if (lookahead == '/') ADVANCE(27);
- if (lookahead == '`') ADVANCE(293);
- if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(22);
+ if (lookahead == '&') ADVANCE(18);
+ if (lookahead == '\'') ADVANCE(200);
+ if (lookahead == '/') ADVANCE(208);
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(207);
+ if (lookahead != 0) ADVANCE(209);
END_STATE();
case 23:
- if (lookahead == '&') ADVANCE(18);
- if (lookahead == '/') ADVANCE(173);
- if (lookahead == '<') ADVANCE(186);
- if (lookahead == '{') ADVANCE(154);
- if (lookahead == '\n' ||
- lookahead == ' ') SKIP(23);
- if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(175);
- if (lookahead != 0 &&
- lookahead != '>' &&
- lookahead != '}') ADVANCE(174);
+ if (lookahead == '\'') ADVANCE(200);
+ if (lookahead == '/') ADVANCE(25);
+ if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(23);
END_STATE();
case 24:
- if (lookahead == '&') ADVANCE(20);
- if (lookahead == '\'') ADVANCE(202);
- if (lookahead == '/') ADVANCE(210);
- if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(209);
- if (lookahead != 0) ADVANCE(211);
+ if (lookahead == '\'') ADVANCE(200);
+ if (lookahead == '/') ADVANCE(276);
+ if (lookahead == '\\') ADVANCE(104);
+ if (lookahead == '\n' ||
+ lookahead == '\r') SKIP(23);
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(279);
+ if (lookahead != 0) ADVANCE(281);
END_STATE();
case 25:
- if (lookahead == '\'') ADVANCE(202);
- if (lookahead == '/') ADVANCE(27);
- if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(25);
+ if (lookahead == '*') ADVANCE(28);
+ if (lookahead == '/') ADVANCE(290);
END_STATE();
case 26:
- if (lookahead == '\'') ADVANCE(202);
- if (lookahead == '/') ADVANCE(278);
- if (lookahead == '\\') ADVANCE(106);
- if (lookahead == '\n' ||
- lookahead == '\r') SKIP(25);
- if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(281);
- if (lookahead != 0) ADVANCE(283);
+ if (lookahead == '*') ADVANCE(28);
+ if (lookahead == '/') ADVANCE(290);
+ if (lookahead == '>') ADVANCE(198);
END_STATE();
case 27:
- if (lookahead == '*') ADVANCE(30);
- if (lookahead == '/') ADVANCE(292);
+ if (lookahead == '*') ADVANCE(27);
+ if (lookahead == '/') ADVANCE(287);
+ if (lookahead != 0) ADVANCE(28);
END_STATE();
case 28:
- if (lookahead == '*') ADVANCE(30);
- if (lookahead == '/') ADVANCE(292);
- if (lookahead == '>') ADVANCE(200);
+ if (lookahead == '*') ADVANCE(27);
+ if (lookahead != 0) ADVANCE(28);
END_STATE();
case 29:
- if (lookahead == '*') ADVANCE(29);
- if (lookahead == '/') ADVANCE(289);
- if (lookahead != 0) ADVANCE(30);
+ if (lookahead == '*') ADVANCE(204);
+ if (lookahead == '#' ||
+ ('A' <= lookahead && lookahead <= 'Z') ||
+ ('a' <= lookahead && lookahead <= 'z')) ADVANCE(28);
+ if (lookahead != 0) ADVANCE(205);
END_STATE();
case 30:
- if (lookahead == '*') ADVANCE(29);
- if (lookahead != 0) ADVANCE(30);
- END_STATE();
- case 31:
- if (lookahead == '*') ADVANCE(206);
+ if (lookahead == '*') ADVANCE(210);
if (lookahead == '#' ||
('A' <= lookahead && lookahead <= 'Z') ||
- ('a' <= lookahead && lookahead <= 'z')) ADVANCE(30);
- if (lookahead != 0) ADVANCE(207);
+ ('a' <= lookahead && lookahead <= 'z')) ADVANCE(28);
+ if (lookahead != 0) ADVANCE(211);
+ END_STATE();
+ case 31:
+ if (lookahead == '.') ADVANCE(34);
+ if (('0' <= lookahead && lookahead <= '9')) ADVANCE(304);
END_STATE();
case 32:
- if (lookahead == '*') ADVANCE(212);
- if (lookahead == '#' ||
- ('A' <= lookahead && lookahead <= 'Z') ||
- ('a' <= lookahead && lookahead <= 'z')) ADVANCE(30);
- if (lookahead != 0) ADVANCE(213);
+ if (lookahead == '.') ADVANCE(214);
+ if (lookahead == '?') ADVANCE(264);
END_STATE();
case 33:
- if (lookahead == '.') ADVANCE(36);
- if (('0' <= lookahead && lookahead <= '9')) ADVANCE(306);
+ if (lookahead == '.') ADVANCE(214);
+ if (lookahead == '?') ADVANCE(263);
END_STATE();
case 34:
- if (lookahead == '.') ADVANCE(216);
- if (lookahead == '?') ADVANCE(266);
+ if (lookahead == '.') ADVANCE(230);
END_STATE();
case 35:
- if (lookahead == '.') ADVANCE(216);
- if (lookahead == '?') ADVANCE(265);
+ if (lookahead == '.') ADVANCE(194);
+ if (lookahead == '/') ADVANCE(26);
+ if (lookahead == ':') ADVANCE(158);
+ if (lookahead == '=') ADVANCE(159);
+ if (lookahead == '>') ADVANCE(190);
+ if (lookahead == '\\') ADVANCE(103);
+ if (lookahead == '{') ADVANCE(152);
+ if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(35);
+ if (lookahead == '$' ||
+ ('A' <= lookahead && lookahead <= 'Z') ||
+ lookahead == '_' ||
+ ('a' <= lookahead && lookahead <= 'z')) ADVANCE(307);
+ if (lookahead > '~') ADVANCE(315);
END_STATE();
case 36:
- if (lookahead == '.') ADVANCE(232);
+ if (lookahead == '/') ADVANCE(293);
+ if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(37);
END_STATE();
case 37:
- if (lookahead == '.') ADVANCE(196);
- if (lookahead == '/') ADVANCE(28);
- if (lookahead == ':') ADVANCE(160);
- if (lookahead == '=') ADVANCE(161);
- if (lookahead == '>') ADVANCE(192);
- if (lookahead == '\\') ADVANCE(105);
- if (lookahead == '{') ADVANCE(154);
+ if (lookahead == '/') ADVANCE(25);
if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(37);
- if (lookahead == '$' ||
- ('A' <= lookahead && lookahead <= 'Z') ||
- lookahead == '_' ||
- ('a' <= lookahead && lookahead <= 'z')) ADVANCE(309);
- if (lookahead > '~') ADVANCE(317);
END_STATE();
case 38:
- if (lookahead == '/') ADVANCE(295);
- if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(39);
+ if (lookahead == ';') ADVANCE(182);
END_STATE();
case 39:
- if (lookahead == '/') ADVANCE(27);
- if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(39);
+ if (lookahead == ';') ADVANCE(182);
+ if (('0' <= lookahead && lookahead <= '9')) ADVANCE(38);
END_STATE();
case 40:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
+ if (('0' <= lookahead && lookahead <= '9')) ADVANCE(39);
END_STATE();
case 41:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('0' <= lookahead && lookahead <= '9')) ADVANCE(40);
END_STATE();
case 42:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('0' <= lookahead && lookahead <= '9')) ADVANCE(41);
END_STATE();
case 43:
- if (lookahead == ';') ADVANCE(184);
- if (('0' <= lookahead && lookahead <= '9')) ADVANCE(42);
+ if (lookahead == ';') ADVANCE(182);
+ if (('0' <= lookahead && lookahead <= '9') ||
+ ('A' <= lookahead && lookahead <= 'F') ||
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(38);
END_STATE();
case 44:
- if (lookahead == ';') ADVANCE(184);
- if (('0' <= lookahead && lookahead <= '9')) ADVANCE(43);
+ if (lookahead == ';') ADVANCE(182);
+ if (('0' <= lookahead && lookahead <= '9') ||
+ ('A' <= lookahead && lookahead <= 'F') ||
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(43);
END_STATE();
case 45:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(40);
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(44);
END_STATE();
case 46:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
('a' <= lookahead && lookahead <= 'f')) ADVANCE(45);
END_STATE();
case 47:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
('a' <= lookahead && lookahead <= 'f')) ADVANCE(46);
END_STATE();
case 48:
- if (lookahead == ';') ADVANCE(184);
- if (('0' <= lookahead && lookahead <= '9') ||
- ('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(47);
+ if (lookahead == ';') ADVANCE(182);
+ if (('A' <= lookahead && lookahead <= 'Z') ||
+ ('a' <= lookahead && lookahead <= 'z')) ADVANCE(38);
END_STATE();
case 49:
- if (lookahead == ';') ADVANCE(184);
- if (('0' <= lookahead && lookahead <= '9') ||
- ('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(48);
+ if (lookahead == ';') ADVANCE(182);
+ if (('A' <= lookahead && lookahead <= 'Z') ||
+ ('a' <= lookahead && lookahead <= 'z')) ADVANCE(48);
END_STATE();
case 50:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
- ('a' <= lookahead && lookahead <= 'z')) ADVANCE(40);
+ ('a' <= lookahead && lookahead <= 'z')) ADVANCE(49);
END_STATE();
case 51:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(50);
END_STATE();
case 52:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(51);
END_STATE();
case 53:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(52);
END_STATE();
case 54:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(53);
END_STATE();
case 55:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(54);
END_STATE();
case 56:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(55);
END_STATE();
case 57:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(56);
END_STATE();
case 58:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(57);
END_STATE();
case 59:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(58);
END_STATE();
case 60:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(59);
END_STATE();
case 61:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(60);
END_STATE();
case 62:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(61);
END_STATE();
case 63:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(62);
END_STATE();
case 64:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(63);
END_STATE();
case 65:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(64);
END_STATE();
case 66:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(65);
END_STATE();
case 67:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(66);
END_STATE();
case 68:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(67);
END_STATE();
case 69:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(68);
END_STATE();
case 70:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(69);
END_STATE();
case 71:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(70);
END_STATE();
case 72:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(71);
END_STATE();
case 73:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(72);
END_STATE();
case 74:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(73);
END_STATE();
case 75:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(74);
END_STATE();
case 76:
- if (lookahead == ';') ADVANCE(184);
+ if (lookahead == ';') ADVANCE(182);
if (('A' <= lookahead && lookahead <= 'Z') ||
('a' <= lookahead && lookahead <= 'z')) ADVANCE(75);
END_STATE();
case 77:
- if (lookahead == ';') ADVANCE(184);
- if (('A' <= lookahead && lookahead <= 'Z') ||
- ('a' <= lookahead && lookahead <= 'z')) ADVANCE(76);
+ if (lookahead == '=') ADVANCE(260);
END_STATE();
case 78:
- if (lookahead == ';') ADVANCE(184);
- if (('A' <= lookahead && lookahead <= 'Z') ||
- ('a' <= lookahead && lookahead <= 'z')) ADVANCE(77);
+ if (lookahead == '=') ADVANCE(258);
END_STATE();
case 79:
- if (lookahead == '=') ADVANCE(262);
+ if (lookahead == '>') ADVANCE(169);
END_STATE();
case 80:
- if (lookahead == '=') ADVANCE(260);
+ if (lookahead == '>') ADVANCE(170);
END_STATE();
case 81:
- if (lookahead == '>') ADVANCE(171);
+ if (lookahead == '\\') ADVANCE(102);
+ if (set_contains(sym_identifier_character_set_1, 14, lookahead)) ADVANCE(316);
END_STATE();
case 82:
- if (lookahead == '>') ADVANCE(172);
+ if (lookahead == '\\') ADVANCE(142);
+ if (lookahead == ']') ADVANCE(295);
+ if (lookahead != 0 &&
+ lookahead != '\n') ADVANCE(82);
END_STATE();
case 83:
- if (lookahead == '\\') ADVANCE(104);
- if (set_contains(sym_identifier_character_set_1, 14, lookahead)) ADVANCE(318);
+ if (lookahead == 'a') ADVANCE(98);
END_STATE();
case 84:
- if (lookahead == '\\') ADVANCE(144);
- if (lookahead == ']') ADVANCE(297);
- if (lookahead != 0 &&
- lookahead != '\n') ADVANCE(84);
+ if (lookahead == 'a') ADVANCE(99);
END_STATE();
case 85:
- if (lookahead == 'a') ADVANCE(100);
+ if (lookahead == 'e') ADVANCE(93);
END_STATE();
case 86:
- if (lookahead == 'a') ADVANCE(101);
+ if (lookahead == 'e') ADVANCE(79);
END_STATE();
case 87:
- if (lookahead == 'e') ADVANCE(95);
+ if (lookahead == 'e') ADVANCE(97);
END_STATE();
case 88:
- if (lookahead == 'e') ADVANCE(81);
+ if (lookahead == 'e') ADVANCE(80);
END_STATE();
case 89:
- if (lookahead == 'e') ADVANCE(99);
+ if (lookahead == 'e') ADVANCE(94);
END_STATE();
case 90:
- if (lookahead == 'e') ADVANCE(82);
+ if (lookahead == 'g') ADVANCE(87);
+ if (('\t' <= lookahead && lookahead <= '\r') ||
+ lookahead == ' ') ADVANCE(90);
END_STATE();
case 91:
- if (lookahead == 'e') ADVANCE(96);
+ if (lookahead == 'l') ADVANCE(83);
END_STATE();
case 92:
- if (lookahead == 'g') ADVANCE(89);
- if (('\t' <= lookahead && lookahead <= '\r') ||
- lookahead == ' ') ADVANCE(92);
+ if (lookahead == 'l') ADVANCE(84);
END_STATE();
case 93:
- if (lookahead == 'l') ADVANCE(85);
+ if (lookahead == 'm') ADVANCE(95);
END_STATE();
case 94:
- if (lookahead == 'l') ADVANCE(86);
+ if (lookahead == 'm') ADVANCE(96);
END_STATE();
case 95:
- if (lookahead == 'm') ADVANCE(97);
+ if (lookahead == 'p') ADVANCE(91);
END_STATE();
case 96:
- if (lookahead == 'm') ADVANCE(98);
+ if (lookahead == 'p') ADVANCE(92);
END_STATE();
case 97:
- if (lookahead == 'p') ADVANCE(93);
+ if (lookahead == 't') ADVANCE(1);
END_STATE();
case 98:
- if (lookahead == 'p') ADVANCE(94);
+ if (lookahead == 't') ADVANCE(86);
END_STATE();
case 99:
- if (lookahead == 't') ADVANCE(1);
+ if (lookahead == 't') ADVANCE(88);
END_STATE();
case 100:
- if (lookahead == 't') ADVANCE(88);
+ if (lookahead == 't') ADVANCE(89);
END_STATE();
case 101:
- if (lookahead == 't') ADVANCE(90);
+ if (lookahead == 'u') ADVANCE(106);
+ if (lookahead == 'x') ADVANCE(132);
+ if (lookahead == '\r' ||
+ lookahead == '?') ADVANCE(284);
+ if (('0' <= lookahead && lookahead <= '7')) ADVANCE(286);
+ if (lookahead != 0) ADVANCE(282);
END_STATE();
case 102:
- if (lookahead == 't') ADVANCE(91);
+ if (lookahead == 'u') ADVANCE(107);
END_STATE();
case 103:
if (lookahead == 'u') ADVANCE(108);
- if (lookahead == 'x') ADVANCE(134);
- if (lookahead == '\r' ||
- lookahead == '?') ADVANCE(286);
- if (('0' <= lookahead && lookahead <= '7')) ADVANCE(288);
- if (lookahead != 0) ADVANCE(284);
END_STATE();
case 104:
if (lookahead == 'u') ADVANCE(109);
+ if (lookahead == 'x') ADVANCE(132);
+ if (lookahead == '\r' ||
+ lookahead == '?') ADVANCE(284);
+ if (('0' <= lookahead && lookahead <= '7')) ADVANCE(286);
+ if (lookahead != 0) ADVANCE(282);
END_STATE();
case 105:
- if (lookahead == 'u') ADVANCE(110);
+ if (lookahead == '{') ADVANCE(292);
END_STATE();
case 106:
- if (lookahead == 'u') ADVANCE(111);
- if (lookahead == 'x') ADVANCE(134);
- if (lookahead == '\r' ||
- lookahead == '?') ADVANCE(286);
- if (('0' <= lookahead && lookahead <= '7')) ADVANCE(288);
- if (lookahead != 0) ADVANCE(284);
+ if (lookahead == '{') ADVANCE(126);
+ if (('0' <= lookahead && lookahead <= '9') ||
+ ('A' <= lookahead && lookahead <= 'F') ||
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(137);
END_STATE();
case 107:
- if (lookahead == '{') ADVANCE(294);
+ if (lookahead == '{') ADVANCE(130);
+ if (('0' <= lookahead && lookahead <= '9') ||
+ ('A' <= lookahead && lookahead <= 'F') ||
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(138);
END_STATE();
case 108:
- if (lookahead == '{') ADVANCE(128);
+ if (lookahead == '{') ADVANCE(131);
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
('a' <= lookahead && lookahead <= 'f')) ADVANCE(139);
END_STATE();
case 109:
- if (lookahead == '{') ADVANCE(132);
+ if (lookahead == '{') ADVANCE(133);
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(140);
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(129);
END_STATE();
case 110:
- if (lookahead == '{') ADVANCE(133);
+ if (lookahead == '}') ADVANCE(315);
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(141);
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(110);
END_STATE();
case 111:
- if (lookahead == '{') ADVANCE(135);
+ if (lookahead == '}') ADVANCE(316);
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(131);
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(111);
END_STATE();
case 112:
- if (lookahead == '}') ADVANCE(317);
+ if (lookahead == '}') ADVANCE(282);
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
('a' <= lookahead && lookahead <= 'f')) ADVANCE(112);
END_STATE();
case 113:
- if (lookahead == '}') ADVANCE(318);
+ if (lookahead == '}') ADVANCE(283);
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
('a' <= lookahead && lookahead <= 'f')) ADVANCE(113);
END_STATE();
case 114:
- if (lookahead == '}') ADVANCE(284);
- if (('0' <= lookahead && lookahead <= '9') ||
- ('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(114);
+ if (lookahead == '+' ||
+ lookahead == '-') ADVANCE(121);
+ if (('0' <= lookahead && lookahead <= '9')) ADVANCE(305);
END_STATE();
case 115:
- if (lookahead == '}') ADVANCE(285);
- if (('0' <= lookahead && lookahead <= '9') ||
- ('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(115);
+ if (lookahead == 'X' ||
+ lookahead == 'x') ADVANCE(128);
+ if (('0' <= lookahead && lookahead <= '9')) ADVANCE(42);
END_STATE();
case 116:
- if (lookahead == '+' ||
- lookahead == '-') ADVANCE(123);
- if (('0' <= lookahead && lookahead <= '9')) ADVANCE(307);
+ if (lookahead == '0' ||
+ lookahead == '1') ADVANCE(301);
END_STATE();
case 117:
- if (lookahead == 'X' ||
- lookahead == 'x') ADVANCE(130);
- if (('0' <= lookahead && lookahead <= '9')) ADVANCE(44);
+ if (('0' <= lookahead && lookahead <= '7')) ADVANCE(302);
END_STATE();
case 118:
- if (lookahead == '0' ||
- lookahead == '1') ADVANCE(303);
+ if (('0' <= lookahead && lookahead <= '9')) ADVANCE(299);
END_STATE();
case 119:
- if (('0' <= lookahead && lookahead <= '7')) ADVANCE(304);
+ if (('0' <= lookahead && lookahead <= '9')) ADVANCE(304);
END_STATE();
case 120:
- if (('0' <= lookahead && lookahead <= '9')) ADVANCE(301);
+ if (('0' <= lookahead && lookahead <= '9')) ADVANCE(300);
END_STATE();
case 121:
- if (('0' <= lookahead && lookahead <= '9')) ADVANCE(306);
+ if (('0' <= lookahead && lookahead <= '9')) ADVANCE(305);
END_STATE();
case 122:
- if (('0' <= lookahead && lookahead <= '9')) ADVANCE(302);
+ if (('0' <= lookahead && lookahead <= '9') ||
+ ('A' <= lookahead && lookahead <= 'F') ||
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(315);
END_STATE();
case 123:
- if (('0' <= lookahead && lookahead <= '9')) ADVANCE(307);
+ if (('0' <= lookahead && lookahead <= '9') ||
+ ('A' <= lookahead && lookahead <= 'F') ||
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(316);
END_STATE();
case 124:
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(317);
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(282);
END_STATE();
case 125:
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(318);
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(303);
END_STATE();
case 126:
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(284);
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(113);
END_STATE();
case 127:
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(305);
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(283);
END_STATE();
case 128:
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(115);
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(47);
END_STATE();
case 129:
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(285);
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(132);
END_STATE();
case 130:
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(49);
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(111);
END_STATE();
case 131:
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(134);
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(110);
END_STATE();
case 132:
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(113);
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(124);
END_STATE();
case 133:
if (('0' <= lookahead && lookahead <= '9') ||
@@ -5085,27 +5069,27 @@ static bool ts_lex(TSLexer *lexer, TSStateId state) {
case 134:
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(126);
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(127);
END_STATE();
case 135:
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(114);
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(123);
END_STATE();
case 136:
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(129);
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(122);
END_STATE();
case 137:
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(125);
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(134);
END_STATE();
case 138:
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(124);
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(135);
END_STATE();
case 139:
if (('0' <= lookahead && lookahead <= '9') ||
@@ -5113,1109 +5097,1106 @@ static bool ts_lex(TSLexer *lexer, TSStateId state) {
('a' <= lookahead && lookahead <= 'f')) ADVANCE(136);
END_STATE();
case 140:
- if (('0' <= lookahead && lookahead <= '9') ||
- ('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(137);
- END_STATE();
- case 141:
- if (('0' <= lookahead && lookahead <= '9') ||
- ('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(138);
- END_STATE();
- case 142:
if (lookahead != 0 &&
lookahead != '#' &&
(lookahead < 'A' || 'Z' < lookahead) &&
- (lookahead < 'a' || 'z' < lookahead)) ADVANCE(205);
+ (lookahead < 'a' || 'z' < lookahead)) ADVANCE(203);
END_STATE();
- case 143:
+ case 141:
if (lookahead != 0 &&
lookahead != '#' &&
(lookahead < 'A' || 'Z' < lookahead) &&
- (lookahead < 'a' || 'z' < lookahead)) ADVANCE(211);
+ (lookahead < 'a' || 'z' < lookahead)) ADVANCE(209);
END_STATE();
- case 144:
+ case 142:
if (lookahead != 0 &&
- lookahead != '\n') ADVANCE(84);
+ lookahead != '\n') ADVANCE(82);
END_STATE();
- case 145:
+ case 143:
if (lookahead != 0 &&
- lookahead != '\n') ADVANCE(297);
+ lookahead != '\n') ADVANCE(295);
END_STATE();
- case 146:
- if (eof) ADVANCE(149);
+ case 144:
+ if (eof) ADVANCE(147);
ADVANCE_MAP(
- '!', 268,
- '"', 201,
- '#', 7,
- '$', 315,
- '%', 256,
- '&', 243,
- '\'', 202,
- '(', 157,
- ')', 158,
- '*', 152,
- '+', 250,
- ',', 155,
- '-', 252,
- '.', 197,
- '/', 254,
- '0', 300,
- ':', 160,
- ';', 159,
- '<', 187,
- '=', 163,
- '>', 193,
- '?', 34,
- '@', 319,
- '[', 165,
- '\\', 105,
- ']', 166,
- '^', 246,
- '`', 293,
- 's', 313,
- '{', 154,
- '|', 247,
- '}', 156,
- '~', 269,
+ '!', 266,
+ '"', 199,
+ '#', 5,
+ '$', 313,
+ '%', 254,
+ '&', 241,
+ '\'', 200,
+ '(', 155,
+ ')', 156,
+ '*', 150,
+ '+', 248,
+ ',', 153,
+ '-', 250,
+ '.', 195,
+ '/', 252,
+ '0', 298,
+ ':', 158,
+ ';', 157,
+ '<', 185,
+ '=', 161,
+ '>', 191,
+ '?', 32,
+ '@', 317,
+ '[', 163,
+ '\\', 103,
+ ']', 164,
+ '^', 244,
+ '`', 291,
+ 's', 311,
+ '{', 152,
+ '|', 245,
+ '}', 154,
+ '~', 267,
);
- if (('1' <= lookahead && lookahead <= '9')) ADVANCE(301);
- if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(146);
- if (lookahead > '@') ADVANCE(317);
+ if (('1' <= lookahead && lookahead <= '9')) ADVANCE(299);
+ if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(144);
+ if (lookahead > '@') ADVANCE(315);
END_STATE();
- case 147:
- if (eof) ADVANCE(149);
+ case 145:
+ if (eof) ADVANCE(147);
ADVANCE_MAP(
- '!', 268,
- '"', 201,
- '#', 83,
- '%', 255,
- '&', 244,
- '\'', 202,
- '(', 157,
- ')', 158,
- '*', 153,
- '+', 249,
- ',', 155,
- '-', 251,
- '.', 198,
- '/', 253,
- '0', 300,
- ':', 160,
- ';', 159,
- '<', 190,
- '=', 162,
- '>', 194,
- '?', 35,
- '@', 319,
- '[', 165,
- '\\', 105,
- ']', 166,
- '^', 245,
- '`', 293,
- '{', 154,
- '|', 248,
- '}', 156,
- '~', 269,
+ '!', 266,
+ '"', 199,
+ '#', 81,
+ '%', 253,
+ '&', 242,
+ '\'', 200,
+ '(', 155,
+ ')', 156,
+ '*', 151,
+ '+', 247,
+ ',', 153,
+ '-', 249,
+ '.', 196,
+ '/', 251,
+ '0', 298,
+ ':', 158,
+ ';', 157,
+ '<', 188,
+ '=', 160,
+ '>', 192,
+ '?', 33,
+ '@', 317,
+ '[', 163,
+ '\\', 103,
+ ']', 164,
+ '^', 243,
+ '`', 291,
+ '{', 152,
+ '|', 246,
+ '}', 154,
+ '~', 267,
);
- if (('1' <= lookahead && lookahead <= '9')) ADVANCE(301);
- if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(147);
- if (lookahead > '#') ADVANCE(317);
+ if (('1' <= lookahead && lookahead <= '9')) ADVANCE(299);
+ if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(145);
+ if (lookahead > '#') ADVANCE(315);
END_STATE();
- case 148:
- if (eof) ADVANCE(149);
+ case 146:
+ if (eof) ADVANCE(147);
ADVANCE_MAP(
- '!', 267,
- '"', 201,
- '#', 7,
- '\'', 202,
- '(', 157,
- ')', 158,
- '*', 151,
- '+', 249,
- ',', 155,
- '-', 251,
- '.', 33,
- '/', 253,
- '0', 300,
- ':', 160,
- ';', 159,
- '<', 191,
- '=', 164,
- '>', 192,
- '@', 319,
- '[', 165,
- '\\', 105,
- ']', 166,
- '`', 293,
- '{', 154,
- '}', 156,
- '~', 269,
+ '!', 265,
+ '"', 199,
+ '#', 5,
+ '\'', 200,
+ '(', 155,
+ ')', 156,
+ '*', 149,
+ '+', 247,
+ ',', 153,
+ '-', 249,
+ '.', 31,
+ '/', 251,
+ '0', 298,
+ ':', 158,
+ ';', 157,
+ '<', 189,
+ '=', 162,
+ '>', 190,
+ '@', 317,
+ '[', 163,
+ '\\', 103,
+ ']', 164,
+ '`', 291,
+ '{', 152,
+ '}', 154,
+ '~', 267,
);
- if (('1' <= lookahead && lookahead <= '9')) ADVANCE(301);
- if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(148);
+ if (('1' <= lookahead && lookahead <= '9')) ADVANCE(299);
+ if (set_contains(extras_character_set_1, 10, lookahead)) SKIP(146);
if (lookahead > '#' &&
(lookahead < '%' || '@' < lookahead) &&
(lookahead < '[' || '^' < lookahead) &&
- (lookahead < '{' || '~' < lookahead)) ADVANCE(317);
+ (lookahead < '{' || '~' < lookahead)) ADVANCE(315);
END_STATE();
- case 149:
+ case 147:
ACCEPT_TOKEN(ts_builtin_sym_end);
END_STATE();
- case 150:
+ case 148:
ACCEPT_TOKEN(sym_hash_bang_line);
if (lookahead != 0 &&
- lookahead != '\n') ADVANCE(150);
+ lookahead != '\n') ADVANCE(148);
+ END_STATE();
+ case 149:
+ ACCEPT_TOKEN(anon_sym_STAR);
+ END_STATE();
+ case 150:
+ ACCEPT_TOKEN(anon_sym_STAR);
+ if (lookahead == '*') ADVANCE(256);
+ if (lookahead == '=') ADVANCE(217);
END_STATE();
case 151:
ACCEPT_TOKEN(anon_sym_STAR);
+ if (lookahead == '*') ADVANCE(255);
END_STATE();
case 152:
- ACCEPT_TOKEN(anon_sym_STAR);
- if (lookahead == '*') ADVANCE(258);
- if (lookahead == '=') ADVANCE(219);
+ ACCEPT_TOKEN(anon_sym_LBRACE);
END_STATE();
case 153:
- ACCEPT_TOKEN(anon_sym_STAR);
- if (lookahead == '*') ADVANCE(257);
+ ACCEPT_TOKEN(anon_sym_COMMA);
END_STATE();
case 154:
- ACCEPT_TOKEN(anon_sym_LBRACE);
+ ACCEPT_TOKEN(anon_sym_RBRACE);
END_STATE();
case 155:
- ACCEPT_TOKEN(anon_sym_COMMA);
+ ACCEPT_TOKEN(anon_sym_LPAREN);
END_STATE();
case 156:
- ACCEPT_TOKEN(anon_sym_RBRACE);
+ ACCEPT_TOKEN(anon_sym_RPAREN);
END_STATE();
case 157:
- ACCEPT_TOKEN(anon_sym_LPAREN);
+ ACCEPT_TOKEN(anon_sym_SEMI);
END_STATE();
case 158:
- ACCEPT_TOKEN(anon_sym_RPAREN);
+ ACCEPT_TOKEN(anon_sym_COLON);
END_STATE();
case 159:
- ACCEPT_TOKEN(anon_sym_SEMI);
+ ACCEPT_TOKEN(anon_sym_EQ);
END_STATE();
case 160:
- ACCEPT_TOKEN(anon_sym_COLON);
+ ACCEPT_TOKEN(anon_sym_EQ);
+ if (lookahead == '=') ADVANCE(258);
END_STATE();
case 161:
ACCEPT_TOKEN(anon_sym_EQ);
+ if (lookahead == '=') ADVANCE(258);
+ if (lookahead == '>') ADVANCE(213);
END_STATE();
case 162:
ACCEPT_TOKEN(anon_sym_EQ);
- if (lookahead == '=') ADVANCE(260);
+ if (lookahead == '>') ADVANCE(213);
END_STATE();
case 163:
- ACCEPT_TOKEN(anon_sym_EQ);
- if (lookahead == '=') ADVANCE(260);
- if (lookahead == '>') ADVANCE(215);
- END_STATE();
- case 164:
- ACCEPT_TOKEN(anon_sym_EQ);
- if (lookahead == '>') ADVANCE(215);
- END_STATE();
- case 165:
ACCEPT_TOKEN(anon_sym_LBRACK);
END_STATE();
- case 166:
+ case 164:
ACCEPT_TOKEN(anon_sym_RBRACK);
END_STATE();
- case 167:
+ case 165:
ACCEPT_TOKEN(sym__glimmer_template_content);
END_STATE();
- case 168:
+ case 166:
ACCEPT_TOKEN(sym__glimmer_template_content);
- if (lookahead == '*') ADVANCE(30);
- if (lookahead == '/') ADVANCE(292);
+ if (lookahead == '*') ADVANCE(28);
+ if (lookahead == '/') ADVANCE(290);
END_STATE();
- case 169:
+ case 167:
ACCEPT_TOKEN(sym__glimmer_template_content);
- if (lookahead == '/') ADVANCE(168);
- if (lookahead == '<') ADVANCE(170);
+ if (lookahead == '/') ADVANCE(166);
+ if (lookahead == '<') ADVANCE(168);
if ((set_contains(extras_character_set_1, 10, lookahead)) &&
- lookahead != '\n') ADVANCE(169);
+ lookahead != '\n') ADVANCE(167);
if (lookahead != 0 &&
- (lookahead < '\t' || '\r' < lookahead)) ADVANCE(167);
+ (lookahead < '\t' || '\r' < lookahead)) ADVANCE(165);
END_STATE();
- case 170:
+ case 168:
ACCEPT_TOKEN(sym__glimmer_template_content);
- if (lookahead == '/') ADVANCE(102);
+ if (lookahead == '/') ADVANCE(100);
END_STATE();
- case 171:
+ case 169:
ACCEPT_TOKEN(sym_glimmer_opening_tag);
END_STATE();
- case 172:
+ case 170:
ACCEPT_TOKEN(sym_glimmer_closing_tag);
END_STATE();
- case 173:
+ case 171:
ACCEPT_TOKEN(aux_sym_jsx_text_token1);
- if (lookahead == ' ') ADVANCE(5);
- if (lookahead == '*') ADVANCE(177);
- if (lookahead == '/') ADVANCE(178);
+ if (lookahead == '\n') ADVANCE(180);
+ if (lookahead == '*') ADVANCE(175);
+ if (lookahead == '/') ADVANCE(172);
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(173);
if (lookahead != 0 &&
- lookahead != '\n' &&
lookahead != '&' &&
lookahead != '<' &&
lookahead != '>' &&
lookahead != '{' &&
- lookahead != '}') ADVANCE(174);
+ lookahead != '}') ADVANCE(173);
END_STATE();
- case 174:
+ case 172:
ACCEPT_TOKEN(aux_sym_jsx_text_token1);
- if (lookahead == ' ') ADVANCE(5);
+ ADVANCE_MAP(
+ '\n', 180,
+ '\r', 173,
+ 0x2028, 173,
+ 0x2029, 173,
+ '&', 290,
+ '<', 290,
+ '>', 290,
+ '{', 290,
+ '}', 290,
+ );
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(172);
+ if (lookahead != 0) ADVANCE(172);
+ END_STATE();
+ case 173:
+ ACCEPT_TOKEN(aux_sym_jsx_text_token1);
+ if (lookahead == '\n') ADVANCE(180);
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(173);
if (lookahead != 0 &&
- lookahead != '\n' &&
lookahead != '&' &&
lookahead != '<' &&
lookahead != '>' &&
lookahead != '{' &&
- lookahead != '}') ADVANCE(174);
+ lookahead != '}') ADVANCE(173);
+ END_STATE();
+ case 174:
+ ACCEPT_TOKEN(aux_sym_jsx_text_token1);
+ ADVANCE_MAP(
+ '\n', 178,
+ '*', 174,
+ '/', 173,
+ '&', 28,
+ '<', 28,
+ '>', 28,
+ '{', 28,
+ '}', 28,
+ );
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(175);
+ if (lookahead != 0) ADVANCE(175);
END_STATE();
case 175:
ACCEPT_TOKEN(aux_sym_jsx_text_token1);
- if (lookahead == ' ') ADVANCE(2);
- if (lookahead == '/') ADVANCE(173);
- if ((set_contains(extras_character_set_1, 10, lookahead)) &&
- lookahead != '\n') ADVANCE(175);
+ if (lookahead == '\n') ADVANCE(178);
+ if (lookahead == '*') ADVANCE(174);
+ if (lookahead == '&' ||
+ lookahead == '<' ||
+ lookahead == '>' ||
+ lookahead == '{' ||
+ lookahead == '}') ADVANCE(28);
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(175);
+ if (lookahead != 0) ADVANCE(175);
+ END_STATE();
+ case 176:
+ ACCEPT_TOKEN(aux_sym_jsx_text_token1);
+ if (lookahead == '*') ADVANCE(178);
+ if (lookahead == '/') ADVANCE(179);
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(180);
if (lookahead != 0 &&
- (lookahead < '\t' || '\r' < lookahead) &&
lookahead != '&' &&
lookahead != '<' &&
lookahead != '>' &&
lookahead != '{' &&
- lookahead != '}') ADVANCE(174);
- END_STATE();
- case 176:
- ACCEPT_TOKEN(aux_sym_jsx_text_token1);
- ADVANCE_MAP(
- ' ', 6,
- '*', 176,
- '/', 174,
- '\n', 30,
- '&', 30,
- '<', 30,
- '>', 30,
- '{', 30,
- '}', 30,
- );
- if (lookahead != 0) ADVANCE(177);
+ lookahead != '}') ADVANCE(180);
END_STATE();
case 177:
ACCEPT_TOKEN(aux_sym_jsx_text_token1);
- ADVANCE_MAP(
- ' ', 6,
- '*', 176,
- '\n', 30,
- '&', 30,
- '<', 30,
- '>', 30,
- '{', 30,
- '}', 30,
- );
- if (lookahead != 0) ADVANCE(177);
+ if (lookahead == '*') ADVANCE(177);
+ if (lookahead == '/') ADVANCE(180);
+ if (lookahead == '&' ||
+ lookahead == '<' ||
+ lookahead == '>' ||
+ lookahead == '{' ||
+ lookahead == '}') ADVANCE(28);
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(178);
+ if (lookahead != 0) ADVANCE(178);
END_STATE();
case 178:
ACCEPT_TOKEN(aux_sym_jsx_text_token1);
- ADVANCE_MAP(
- ' ', 180,
- '\r', 179,
- 0x2028, 179,
- 0x2029, 179,
- '&', 182,
- '<', 182,
- '>', 182,
- '{', 182,
- '}', 182,
- );
- if (lookahead != 0 &&
- lookahead != '\n') ADVANCE(178);
- END_STATE();
- case 179:
- ACCEPT_TOKEN(aux_sym_jsx_text_token1);
- if (lookahead == ' ') ADVANCE(181);
+ if (lookahead == '*') ADVANCE(177);
if (lookahead == '&' ||
lookahead == '<' ||
lookahead == '>' ||
lookahead == '{' ||
- lookahead == '}') ADVANCE(183);
- if (lookahead != 0 &&
- lookahead != '\n') ADVANCE(179);
+ lookahead == '}') ADVANCE(28);
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(178);
+ if (lookahead != 0) ADVANCE(178);
END_STATE();
- case 180:
- ACCEPT_TOKEN(aux_sym_jsx_text_token2);
+ case 179:
+ ACCEPT_TOKEN(aux_sym_jsx_text_token1);
ADVANCE_MAP(
- ' ', 180,
- '\r', 179,
- 0x2028, 179,
- 0x2029, 179,
- '&', 182,
- '<', 182,
- '>', 182,
- '{', 182,
- '}', 182,
+ '\n', 180,
+ '\r', 180,
+ 0x2028, 180,
+ 0x2029, 180,
+ '&', 290,
+ '<', 290,
+ '>', 290,
+ '{', 290,
+ '}', 290,
);
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(179);
+ if (lookahead != 0) ADVANCE(179);
+ END_STATE();
+ case 180:
+ ACCEPT_TOKEN(aux_sym_jsx_text_token1);
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(180);
if (lookahead != 0 &&
- lookahead != '\n') ADVANCE(178);
+ lookahead != '&' &&
+ lookahead != '<' &&
+ lookahead != '>' &&
+ lookahead != '{' &&
+ lookahead != '}') ADVANCE(180);
END_STATE();
case 181:
ACCEPT_TOKEN(aux_sym_jsx_text_token2);
- if (lookahead == ' ') ADVANCE(181);
- if (lookahead == '&' ||
- lookahead == '<' ||
- lookahead == '>' ||
- lookahead == '{' ||
- lookahead == '}') ADVANCE(183);
+ if (lookahead == '/') ADVANCE(171);
+ if ((set_contains(extras_character_set_1, 10, lookahead)) &&
+ lookahead != '\n') ADVANCE(181);
if (lookahead != 0 &&
- lookahead != '\n') ADVANCE(179);
+ (lookahead < '\t' || '\r' < lookahead) &&
+ lookahead != '&' &&
+ lookahead != '<' &&
+ lookahead != '>' &&
+ lookahead != '{' &&
+ lookahead != '}') ADVANCE(173);
END_STATE();
case 182:
- ACCEPT_TOKEN(aux_sym_jsx_text_token2);
- if (lookahead == '\r' ||
- lookahead == 0x2028 ||
- lookahead == 0x2029) ADVANCE(183);
- if (lookahead != 0 &&
- lookahead != '\n') ADVANCE(182);
+ ACCEPT_TOKEN(sym_html_character_reference);
END_STATE();
case 183:
- ACCEPT_TOKEN(aux_sym_jsx_text_token2);
- if (lookahead != 0 &&
- lookahead != '\n') ADVANCE(183);
+ ACCEPT_TOKEN(anon_sym_LT);
END_STATE();
case 184:
- ACCEPT_TOKEN(sym_html_character_reference);
+ ACCEPT_TOKEN(anon_sym_LT);
+ if (lookahead == '/') ADVANCE(197);
END_STATE();
case 185:
ACCEPT_TOKEN(anon_sym_LT);
+ if (lookahead == '/') ADVANCE(197);
+ if (lookahead == '<') ADVANCE(240);
+ if (lookahead == '=') ADVANCE(257);
+ if (lookahead == 't') ADVANCE(85);
END_STATE();
case 186:
ACCEPT_TOKEN(anon_sym_LT);
- if (lookahead == '/') ADVANCE(199);
+ if (lookahead == '<') ADVANCE(240);
+ if (lookahead == '=') ADVANCE(257);
END_STATE();
case 187:
ACCEPT_TOKEN(anon_sym_LT);
- if (lookahead == '/') ADVANCE(199);
- if (lookahead == '<') ADVANCE(242);
- if (lookahead == '=') ADVANCE(259);
- if (lookahead == 't') ADVANCE(87);
+ if (lookahead == '<') ADVANCE(239);
+ if (lookahead == '=') ADVANCE(257);
END_STATE();
case 188:
ACCEPT_TOKEN(anon_sym_LT);
- if (lookahead == '<') ADVANCE(242);
- if (lookahead == '=') ADVANCE(259);
+ if (lookahead == '<') ADVANCE(239);
+ if (lookahead == '=') ADVANCE(257);
+ if (lookahead == 't') ADVANCE(85);
END_STATE();
case 189:
ACCEPT_TOKEN(anon_sym_LT);
- if (lookahead == '<') ADVANCE(241);
- if (lookahead == '=') ADVANCE(259);
+ if (lookahead == 't') ADVANCE(85);
END_STATE();
case 190:
- ACCEPT_TOKEN(anon_sym_LT);
- if (lookahead == '<') ADVANCE(241);
- if (lookahead == '=') ADVANCE(259);
- if (lookahead == 't') ADVANCE(87);
+ ACCEPT_TOKEN(anon_sym_GT);
END_STATE();
case 191:
- ACCEPT_TOKEN(anon_sym_LT);
- if (lookahead == 't') ADVANCE(87);
+ ACCEPT_TOKEN(anon_sym_GT);
+ if (lookahead == '=') ADVANCE(262);
+ if (lookahead == '>') ADVANCE(235);
END_STATE();
case 192:
ACCEPT_TOKEN(anon_sym_GT);
+ if (lookahead == '=') ADVANCE(262);
+ if (lookahead == '>') ADVANCE(236);
END_STATE();
case 193:
- ACCEPT_TOKEN(anon_sym_GT);
- if (lookahead == '=') ADVANCE(264);
- if (lookahead == '>') ADVANCE(237);
- END_STATE();
- case 194:
- ACCEPT_TOKEN(anon_sym_GT);
- if (lookahead == '=') ADVANCE(264);
- if (lookahead == '>') ADVANCE(238);
- END_STATE();
- case 195:
ACCEPT_TOKEN(sym_jsx_identifier);
if (lookahead == '$' ||
lookahead == '-' ||
('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'Z') ||
lookahead == '_' ||
- ('a' <= lookahead && lookahead <= 'z')) ADVANCE(195);
+ ('a' <= lookahead && lookahead <= 'z')) ADVANCE(193);
+ END_STATE();
+ case 194:
+ ACCEPT_TOKEN(anon_sym_DOT);
+ END_STATE();
+ case 195:
+ ACCEPT_TOKEN(anon_sym_DOT);
+ if (lookahead == '.') ADVANCE(34);
+ if (('0' <= lookahead && lookahead <= '9')) ADVANCE(304);
END_STATE();
case 196:
ACCEPT_TOKEN(anon_sym_DOT);
+ if (('0' <= lookahead && lookahead <= '9')) ADVANCE(304);
END_STATE();
case 197:
- ACCEPT_TOKEN(anon_sym_DOT);
- if (lookahead == '.') ADVANCE(36);
- if (('0' <= lookahead && lookahead <= '9')) ADVANCE(306);
+ ACCEPT_TOKEN(anon_sym_LT_SLASH);
END_STATE();
case 198:
- ACCEPT_TOKEN(anon_sym_DOT);
- if (('0' <= lookahead && lookahead <= '9')) ADVANCE(306);
+ ACCEPT_TOKEN(anon_sym_SLASH_GT);
END_STATE();
case 199:
- ACCEPT_TOKEN(anon_sym_LT_SLASH);
+ ACCEPT_TOKEN(anon_sym_DQUOTE);
END_STATE();
case 200:
- ACCEPT_TOKEN(anon_sym_SLASH_GT);
+ ACCEPT_TOKEN(anon_sym_SQUOTE);
END_STATE();
case 201:
- ACCEPT_TOKEN(anon_sym_DQUOTE);
+ ACCEPT_TOKEN(sym_unescaped_double_jsx_string_fragment);
+ if (lookahead == '&') ADVANCE(17);
+ if (lookahead == '/') ADVANCE(202);
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(201);
+ if (lookahead != 0 &&
+ lookahead != '"') ADVANCE(203);
END_STATE();
case 202:
- ACCEPT_TOKEN(anon_sym_SQUOTE);
+ ACCEPT_TOKEN(sym_unescaped_double_jsx_string_fragment);
+ if (lookahead == '&') ADVANCE(140);
+ if (lookahead == '*') ADVANCE(205);
+ if (lookahead == '/') ADVANCE(206);
+ if (lookahead != 0 &&
+ lookahead != '"') ADVANCE(203);
END_STATE();
case 203:
ACCEPT_TOKEN(sym_unescaped_double_jsx_string_fragment);
- if (lookahead == '&') ADVANCE(19);
- if (lookahead == '/') ADVANCE(204);
- if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(203);
+ if (lookahead == '&') ADVANCE(140);
if (lookahead != 0 &&
- lookahead != '"') ADVANCE(205);
+ lookahead != '"') ADVANCE(203);
END_STATE();
case 204:
ACCEPT_TOKEN(sym_unescaped_double_jsx_string_fragment);
- if (lookahead == '&') ADVANCE(142);
- if (lookahead == '*') ADVANCE(207);
- if (lookahead == '/') ADVANCE(208);
+ if (lookahead == '&') ADVANCE(29);
+ if (lookahead == '*') ADVANCE(204);
+ if (lookahead == '/') ADVANCE(203);
if (lookahead != 0 &&
lookahead != '"') ADVANCE(205);
END_STATE();
case 205:
ACCEPT_TOKEN(sym_unescaped_double_jsx_string_fragment);
- if (lookahead == '&') ADVANCE(142);
+ if (lookahead == '&') ADVANCE(29);
+ if (lookahead == '*') ADVANCE(204);
if (lookahead != 0 &&
lookahead != '"') ADVANCE(205);
END_STATE();
case 206:
ACCEPT_TOKEN(sym_unescaped_double_jsx_string_fragment);
- if (lookahead == '&') ADVANCE(31);
- if (lookahead == '*') ADVANCE(206);
- if (lookahead == '/') ADVANCE(205);
+ if (lookahead == '&') ADVANCE(288);
+ if (lookahead == '\n' ||
+ lookahead == '\r' ||
+ lookahead == 0x2028 ||
+ lookahead == 0x2029) ADVANCE(203);
if (lookahead != 0 &&
- lookahead != '"') ADVANCE(207);
+ lookahead != '"') ADVANCE(206);
END_STATE();
case 207:
- ACCEPT_TOKEN(sym_unescaped_double_jsx_string_fragment);
- if (lookahead == '&') ADVANCE(31);
- if (lookahead == '*') ADVANCE(206);
+ ACCEPT_TOKEN(sym_unescaped_single_jsx_string_fragment);
+ if (lookahead == '&') ADVANCE(18);
+ if (lookahead == '/') ADVANCE(208);
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(207);
if (lookahead != 0 &&
- lookahead != '"') ADVANCE(207);
+ lookahead != '&' &&
+ lookahead != '\'') ADVANCE(209);
END_STATE();
case 208:
- ACCEPT_TOKEN(sym_unescaped_double_jsx_string_fragment);
- if (lookahead == '&') ADVANCE(290);
- if (lookahead == '\n' ||
- lookahead == '\r' ||
- lookahead == 0x2028 ||
- lookahead == 0x2029) ADVANCE(205);
+ ACCEPT_TOKEN(sym_unescaped_single_jsx_string_fragment);
+ if (lookahead == '&') ADVANCE(141);
+ if (lookahead == '*') ADVANCE(211);
+ if (lookahead == '/') ADVANCE(212);
if (lookahead != 0 &&
- lookahead != '"') ADVANCE(208);
+ lookahead != '&' &&
+ lookahead != '\'') ADVANCE(209);
END_STATE();
case 209:
ACCEPT_TOKEN(sym_unescaped_single_jsx_string_fragment);
- if (lookahead == '&') ADVANCE(20);
- if (lookahead == '/') ADVANCE(210);
- if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(209);
+ if (lookahead == '&') ADVANCE(141);
if (lookahead != 0 &&
lookahead != '&' &&
- lookahead != '\'') ADVANCE(211);
+ lookahead != '\'') ADVANCE(209);
END_STATE();
case 210:
ACCEPT_TOKEN(sym_unescaped_single_jsx_string_fragment);
- if (lookahead == '&') ADVANCE(143);
- if (lookahead == '*') ADVANCE(213);
- if (lookahead == '/') ADVANCE(214);
+ if (lookahead == '&') ADVANCE(30);
+ if (lookahead == '*') ADVANCE(210);
+ if (lookahead == '/') ADVANCE(209);
if (lookahead != 0 &&
lookahead != '&' &&
lookahead != '\'') ADVANCE(211);
END_STATE();
case 211:
ACCEPT_TOKEN(sym_unescaped_single_jsx_string_fragment);
- if (lookahead == '&') ADVANCE(143);
+ if (lookahead == '&') ADVANCE(30);
+ if (lookahead == '*') ADVANCE(210);
if (lookahead != 0 &&
lookahead != '&' &&
lookahead != '\'') ADVANCE(211);
END_STATE();
case 212:
ACCEPT_TOKEN(sym_unescaped_single_jsx_string_fragment);
- if (lookahead == '&') ADVANCE(32);
- if (lookahead == '*') ADVANCE(212);
- if (lookahead == '/') ADVANCE(211);
- if (lookahead != 0 &&
- lookahead != '&' &&
- lookahead != '\'') ADVANCE(213);
- END_STATE();
- case 213:
- ACCEPT_TOKEN(sym_unescaped_single_jsx_string_fragment);
- if (lookahead == '&') ADVANCE(32);
- if (lookahead == '*') ADVANCE(212);
- if (lookahead != 0 &&
- lookahead != '&' &&
- lookahead != '\'') ADVANCE(213);
- END_STATE();
- case 214:
- ACCEPT_TOKEN(sym_unescaped_single_jsx_string_fragment);
- if (lookahead == '&') ADVANCE(291);
+ if (lookahead == '&') ADVANCE(289);
if (lookahead == '\n' ||
lookahead == '\r' ||
lookahead == 0x2028 ||
- lookahead == 0x2029) ADVANCE(211);
+ lookahead == 0x2029) ADVANCE(209);
if (lookahead != 0 &&
lookahead != '&' &&
- lookahead != '\'') ADVANCE(214);
+ lookahead != '\'') ADVANCE(212);
END_STATE();
- case 215:
+ case 213:
ACCEPT_TOKEN(anon_sym_EQ_GT);
END_STATE();
- case 216:
+ case 214:
ACCEPT_TOKEN(sym_optional_chain);
END_STATE();
- case 217:
+ case 215:
ACCEPT_TOKEN(anon_sym_PLUS_EQ);
END_STATE();
- case 218:
+ case 216:
ACCEPT_TOKEN(anon_sym_DASH_EQ);
END_STATE();
- case 219:
+ case 217:
ACCEPT_TOKEN(anon_sym_STAR_EQ);
END_STATE();
- case 220:
+ case 218:
ACCEPT_TOKEN(anon_sym_SLASH_EQ);
END_STATE();
- case 221:
+ case 219:
ACCEPT_TOKEN(anon_sym_PERCENT_EQ);
END_STATE();
- case 222:
+ case 220:
ACCEPT_TOKEN(anon_sym_CARET_EQ);
END_STATE();
- case 223:
+ case 221:
ACCEPT_TOKEN(anon_sym_AMP_EQ);
END_STATE();
- case 224:
+ case 222:
ACCEPT_TOKEN(anon_sym_PIPE_EQ);
END_STATE();
- case 225:
+ case 223:
ACCEPT_TOKEN(anon_sym_GT_GT_EQ);
END_STATE();
- case 226:
+ case 224:
ACCEPT_TOKEN(anon_sym_GT_GT_GT_EQ);
END_STATE();
- case 227:
+ case 225:
ACCEPT_TOKEN(anon_sym_LT_LT_EQ);
END_STATE();
- case 228:
+ case 226:
ACCEPT_TOKEN(anon_sym_STAR_STAR_EQ);
END_STATE();
- case 229:
+ case 227:
ACCEPT_TOKEN(anon_sym_AMP_AMP_EQ);
END_STATE();
- case 230:
+ case 228:
ACCEPT_TOKEN(anon_sym_PIPE_PIPE_EQ);
END_STATE();
- case 231:
+ case 229:
ACCEPT_TOKEN(anon_sym_QMARK_QMARK_EQ);
END_STATE();
- case 232:
+ case 230:
ACCEPT_TOKEN(anon_sym_DOT_DOT_DOT);
END_STATE();
- case 233:
+ case 231:
ACCEPT_TOKEN(anon_sym_AMP_AMP);
END_STATE();
- case 234:
+ case 232:
ACCEPT_TOKEN(anon_sym_AMP_AMP);
- if (lookahead == '=') ADVANCE(229);
+ if (lookahead == '=') ADVANCE(227);
END_STATE();
- case 235:
+ case 233:
ACCEPT_TOKEN(anon_sym_PIPE_PIPE);
END_STATE();
- case 236:
+ case 234:
ACCEPT_TOKEN(anon_sym_PIPE_PIPE);
- if (lookahead == '=') ADVANCE(230);
+ if (lookahead == '=') ADVANCE(228);
END_STATE();
- case 237:
+ case 235:
ACCEPT_TOKEN(anon_sym_GT_GT);
- if (lookahead == '=') ADVANCE(225);
- if (lookahead == '>') ADVANCE(240);
+ if (lookahead == '=') ADVANCE(223);
+ if (lookahead == '>') ADVANCE(238);
END_STATE();
- case 238:
+ case 236:
ACCEPT_TOKEN(anon_sym_GT_GT);
- if (lookahead == '>') ADVANCE(239);
+ if (lookahead == '>') ADVANCE(237);
END_STATE();
- case 239:
+ case 237:
ACCEPT_TOKEN(anon_sym_GT_GT_GT);
END_STATE();
- case 240:
+ case 238:
ACCEPT_TOKEN(anon_sym_GT_GT_GT);
- if (lookahead == '=') ADVANCE(226);
+ if (lookahead == '=') ADVANCE(224);
END_STATE();
- case 241:
+ case 239:
ACCEPT_TOKEN(anon_sym_LT_LT);
END_STATE();
- case 242:
+ case 240:
ACCEPT_TOKEN(anon_sym_LT_LT);
- if (lookahead == '=') ADVANCE(227);
+ if (lookahead == '=') ADVANCE(225);
END_STATE();
- case 243:
+ case 241:
ACCEPT_TOKEN(anon_sym_AMP);
- if (lookahead == '&') ADVANCE(234);
- if (lookahead == '=') ADVANCE(223);
+ if (lookahead == '&') ADVANCE(232);
+ if (lookahead == '=') ADVANCE(221);
END_STATE();
- case 244:
+ case 242:
ACCEPT_TOKEN(anon_sym_AMP);
- if (lookahead == '&') ADVANCE(233);
+ if (lookahead == '&') ADVANCE(231);
END_STATE();
- case 245:
+ case 243:
ACCEPT_TOKEN(anon_sym_CARET);
END_STATE();
- case 246:
+ case 244:
ACCEPT_TOKEN(anon_sym_CARET);
- if (lookahead == '=') ADVANCE(222);
+ if (lookahead == '=') ADVANCE(220);
END_STATE();
- case 247:
+ case 245:
ACCEPT_TOKEN(anon_sym_PIPE);
- if (lookahead == '=') ADVANCE(224);
- if (lookahead == '|') ADVANCE(236);
+ if (lookahead == '=') ADVANCE(222);
+ if (lookahead == '|') ADVANCE(234);
END_STATE();
- case 248:
+ case 246:
ACCEPT_TOKEN(anon_sym_PIPE);
- if (lookahead == '|') ADVANCE(235);
+ if (lookahead == '|') ADVANCE(233);
END_STATE();
- case 249:
+ case 247:
ACCEPT_TOKEN(anon_sym_PLUS);
- if (lookahead == '+') ADVANCE(270);
+ if (lookahead == '+') ADVANCE(268);
END_STATE();
- case 250:
+ case 248:
ACCEPT_TOKEN(anon_sym_PLUS);
- if (lookahead == '+') ADVANCE(270);
- if (lookahead == '=') ADVANCE(217);
+ if (lookahead == '+') ADVANCE(268);
+ if (lookahead == '=') ADVANCE(215);
END_STATE();
- case 251:
+ case 249:
ACCEPT_TOKEN(anon_sym_DASH);
- if (lookahead == '-') ADVANCE(271);
+ if (lookahead == '-') ADVANCE(269);
END_STATE();
- case 252:
+ case 250:
ACCEPT_TOKEN(anon_sym_DASH);
- if (lookahead == '-') ADVANCE(271);
- if (lookahead == '=') ADVANCE(218);
+ if (lookahead == '-') ADVANCE(269);
+ if (lookahead == '=') ADVANCE(216);
END_STATE();
- case 253:
+ case 251:
ACCEPT_TOKEN(anon_sym_SLASH);
- if (lookahead == '*') ADVANCE(30);
- if (lookahead == '/') ADVANCE(292);
+ if (lookahead == '*') ADVANCE(28);
+ if (lookahead == '/') ADVANCE(290);
END_STATE();
- case 254:
+ case 252:
ACCEPT_TOKEN(anon_sym_SLASH);
- if (lookahead == '*') ADVANCE(30);
- if (lookahead == '/') ADVANCE(292);
- if (lookahead == '=') ADVANCE(220);
+ if (lookahead == '*') ADVANCE(28);
+ if (lookahead == '/') ADVANCE(290);
+ if (lookahead == '=') ADVANCE(218);
END_STATE();
- case 255:
+ case 253:
ACCEPT_TOKEN(anon_sym_PERCENT);
END_STATE();
- case 256:
+ case 254:
ACCEPT_TOKEN(anon_sym_PERCENT);
- if (lookahead == '=') ADVANCE(221);
+ if (lookahead == '=') ADVANCE(219);
END_STATE();
- case 257:
+ case 255:
ACCEPT_TOKEN(anon_sym_STAR_STAR);
END_STATE();
- case 258:
+ case 256:
ACCEPT_TOKEN(anon_sym_STAR_STAR);
- if (lookahead == '=') ADVANCE(228);
+ if (lookahead == '=') ADVANCE(226);
END_STATE();
- case 259:
+ case 257:
ACCEPT_TOKEN(anon_sym_LT_EQ);
END_STATE();
- case 260:
+ case 258:
ACCEPT_TOKEN(anon_sym_EQ_EQ);
- if (lookahead == '=') ADVANCE(261);
+ if (lookahead == '=') ADVANCE(259);
END_STATE();
- case 261:
+ case 259:
ACCEPT_TOKEN(anon_sym_EQ_EQ_EQ);
END_STATE();
- case 262:
+ case 260:
ACCEPT_TOKEN(anon_sym_BANG_EQ);
- if (lookahead == '=') ADVANCE(263);
+ if (lookahead == '=') ADVANCE(261);
END_STATE();
- case 263:
+ case 261:
ACCEPT_TOKEN(anon_sym_BANG_EQ_EQ);
END_STATE();
- case 264:
+ case 262:
ACCEPT_TOKEN(anon_sym_GT_EQ);
END_STATE();
- case 265:
+ case 263:
ACCEPT_TOKEN(anon_sym_QMARK_QMARK);
END_STATE();
- case 266:
+ case 264:
ACCEPT_TOKEN(anon_sym_QMARK_QMARK);
- if (lookahead == '=') ADVANCE(231);
+ if (lookahead == '=') ADVANCE(229);
END_STATE();
- case 267:
+ case 265:
ACCEPT_TOKEN(anon_sym_BANG);
END_STATE();
- case 268:
+ case 266:
ACCEPT_TOKEN(anon_sym_BANG);
- if (lookahead == '=') ADVANCE(262);
+ if (lookahead == '=') ADVANCE(260);
END_STATE();
- case 269:
+ case 267:
ACCEPT_TOKEN(anon_sym_TILDE);
END_STATE();
- case 270:
+ case 268:
ACCEPT_TOKEN(anon_sym_PLUS_PLUS);
END_STATE();
- case 271:
+ case 269:
ACCEPT_TOKEN(anon_sym_DASH_DASH);
END_STATE();
- case 272:
+ case 270:
ACCEPT_TOKEN(sym_unescaped_double_string_fragment);
- if (lookahead == '*') ADVANCE(274);
- if (lookahead == '/') ADVANCE(276);
+ if (lookahead == '*') ADVANCE(272);
+ if (lookahead == '/') ADVANCE(274);
if (lookahead != 0 &&
lookahead != '\n' &&
lookahead != '\r' &&
lookahead != '"' &&
- lookahead != '\\') ADVANCE(277);
+ lookahead != '\\') ADVANCE(275);
END_STATE();
- case 273:
+ case 271:
ACCEPT_TOKEN(sym_unescaped_double_string_fragment);
- if (lookahead == '*') ADVANCE(273);
- if (lookahead == '/') ADVANCE(277);
+ if (lookahead == '*') ADVANCE(271);
+ if (lookahead == '/') ADVANCE(275);
if (lookahead != 0 &&
lookahead != '\n' &&
lookahead != '\r' &&
lookahead != '"' &&
- lookahead != '\\') ADVANCE(274);
+ lookahead != '\\') ADVANCE(272);
END_STATE();
- case 274:
+ case 272:
ACCEPT_TOKEN(sym_unescaped_double_string_fragment);
- if (lookahead == '*') ADVANCE(273);
+ if (lookahead == '*') ADVANCE(271);
if (lookahead != 0 &&
lookahead != '\n' &&
lookahead != '\r' &&
lookahead != '"' &&
- lookahead != '\\') ADVANCE(274);
+ lookahead != '\\') ADVANCE(272);
END_STATE();
- case 275:
+ case 273:
ACCEPT_TOKEN(sym_unescaped_double_string_fragment);
- if (lookahead == '/') ADVANCE(272);
+ if (lookahead == '/') ADVANCE(270);
if ((set_contains(extras_character_set_1, 10, lookahead)) &&
lookahead != '\n' &&
- lookahead != '\r') ADVANCE(275);
+ lookahead != '\r') ADVANCE(273);
if (lookahead != 0 &&
(lookahead < '\t' || '\r' < lookahead) &&
lookahead != '"' &&
- lookahead != '\\') ADVANCE(277);
+ lookahead != '\\') ADVANCE(275);
END_STATE();
- case 276:
+ case 274:
ACCEPT_TOKEN(sym_unescaped_double_string_fragment);
if (lookahead == 0x2028 ||
- lookahead == 0x2029) ADVANCE(277);
+ lookahead == 0x2029) ADVANCE(275);
if (lookahead != 0 &&
lookahead != '\n' &&
lookahead != '\r' &&
lookahead != '"' &&
- lookahead != '\\') ADVANCE(276);
+ lookahead != '\\') ADVANCE(274);
END_STATE();
- case 277:
+ case 275:
ACCEPT_TOKEN(sym_unescaped_double_string_fragment);
if (lookahead != 0 &&
lookahead != '\n' &&
lookahead != '\r' &&
lookahead != '"' &&
- lookahead != '\\') ADVANCE(277);
+ lookahead != '\\') ADVANCE(275);
END_STATE();
- case 278:
+ case 276:
ACCEPT_TOKEN(sym_unescaped_single_string_fragment);
- if (lookahead == '*') ADVANCE(280);
- if (lookahead == '/') ADVANCE(282);
+ if (lookahead == '*') ADVANCE(278);
+ if (lookahead == '/') ADVANCE(280);
if (lookahead != 0 &&
lookahead != '\n' &&
lookahead != '\r' &&
lookahead != '\'' &&
- lookahead != '\\') ADVANCE(283);
+ lookahead != '\\') ADVANCE(281);
END_STATE();
- case 279:
+ case 277:
ACCEPT_TOKEN(sym_unescaped_single_string_fragment);
- if (lookahead == '*') ADVANCE(279);
- if (lookahead == '/') ADVANCE(283);
+ if (lookahead == '*') ADVANCE(277);
+ if (lookahead == '/') ADVANCE(281);
if (lookahead != 0 &&
lookahead != '\n' &&
lookahead != '\r' &&
lookahead != '\'' &&
- lookahead != '\\') ADVANCE(280);
+ lookahead != '\\') ADVANCE(278);
END_STATE();
- case 280:
+ case 278:
ACCEPT_TOKEN(sym_unescaped_single_string_fragment);
- if (lookahead == '*') ADVANCE(279);
+ if (lookahead == '*') ADVANCE(277);
if (lookahead != 0 &&
lookahead != '\n' &&
lookahead != '\r' &&
lookahead != '\'' &&
- lookahead != '\\') ADVANCE(280);
+ lookahead != '\\') ADVANCE(278);
END_STATE();
- case 281:
+ case 279:
ACCEPT_TOKEN(sym_unescaped_single_string_fragment);
- if (lookahead == '/') ADVANCE(278);
+ if (lookahead == '/') ADVANCE(276);
if ((set_contains(extras_character_set_1, 10, lookahead)) &&
lookahead != '\n' &&
- lookahead != '\r') ADVANCE(281);
+ lookahead != '\r') ADVANCE(279);
if (lookahead != 0 &&
(lookahead < '\t' || '\r' < lookahead) &&
lookahead != '\'' &&
- lookahead != '\\') ADVANCE(283);
+ lookahead != '\\') ADVANCE(281);
END_STATE();
- case 282:
+ case 280:
ACCEPT_TOKEN(sym_unescaped_single_string_fragment);
if (lookahead == 0x2028 ||
- lookahead == 0x2029) ADVANCE(283);
+ lookahead == 0x2029) ADVANCE(281);
if (lookahead != 0 &&
lookahead != '\n' &&
lookahead != '\r' &&
lookahead != '\'' &&
- lookahead != '\\') ADVANCE(282);
+ lookahead != '\\') ADVANCE(280);
END_STATE();
- case 283:
+ case 281:
ACCEPT_TOKEN(sym_unescaped_single_string_fragment);
if (lookahead != 0 &&
lookahead != '\n' &&
lookahead != '\r' &&
lookahead != '\'' &&
- lookahead != '\\') ADVANCE(283);
+ lookahead != '\\') ADVANCE(281);
END_STATE();
- case 284:
+ case 282:
ACCEPT_TOKEN(sym_escape_sequence);
END_STATE();
- case 285:
+ case 283:
ACCEPT_TOKEN(sym_escape_sequence);
- if (lookahead == '\\') ADVANCE(105);
- if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(317);
+ if (lookahead == '\\') ADVANCE(103);
+ if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(315);
END_STATE();
- case 286:
+ case 284:
ACCEPT_TOKEN(sym_escape_sequence);
if (lookahead == '\n' ||
lookahead == 0x2028 ||
- lookahead == 0x2029) ADVANCE(284);
+ lookahead == 0x2029) ADVANCE(282);
END_STATE();
- case 287:
+ case 285:
ACCEPT_TOKEN(sym_escape_sequence);
- if (('0' <= lookahead && lookahead <= '7')) ADVANCE(284);
+ if (('0' <= lookahead && lookahead <= '7')) ADVANCE(282);
END_STATE();
- case 288:
+ case 286:
ACCEPT_TOKEN(sym_escape_sequence);
- if (('0' <= lookahead && lookahead <= '7')) ADVANCE(287);
+ if (('0' <= lookahead && lookahead <= '7')) ADVANCE(285);
END_STATE();
- case 289:
+ case 287:
ACCEPT_TOKEN(sym_comment);
END_STATE();
- case 290:
+ case 288:
ACCEPT_TOKEN(sym_comment);
if (lookahead == '\n' ||
lookahead == '\r' ||
lookahead == 0x2028 ||
- lookahead == 0x2029) ADVANCE(205);
+ lookahead == 0x2029) ADVANCE(203);
if (lookahead == '#' ||
('A' <= lookahead && lookahead <= 'Z') ||
- ('a' <= lookahead && lookahead <= 'z')) ADVANCE(292);
- if (lookahead != 0) ADVANCE(208);
+ ('a' <= lookahead && lookahead <= 'z')) ADVANCE(290);
+ if (lookahead != 0) ADVANCE(206);
END_STATE();
- case 291:
+ case 289:
ACCEPT_TOKEN(sym_comment);
if (lookahead == '\n' ||
lookahead == '\r' ||
lookahead == 0x2028 ||
- lookahead == 0x2029) ADVANCE(211);
+ lookahead == 0x2029) ADVANCE(209);
if (lookahead == '#' ||
('A' <= lookahead && lookahead <= 'Z') ||
- ('a' <= lookahead && lookahead <= 'z')) ADVANCE(292);
- if (lookahead != 0) ADVANCE(214);
+ ('a' <= lookahead && lookahead <= 'z')) ADVANCE(290);
+ if (lookahead != 0) ADVANCE(212);
END_STATE();
- case 292:
+ case 290:
ACCEPT_TOKEN(sym_comment);
if (lookahead != 0 &&
lookahead != '\n' &&
lookahead != '\r' &&
lookahead != 0x2028 &&
- lookahead != 0x2029) ADVANCE(292);
+ lookahead != 0x2029) ADVANCE(290);
END_STATE();
- case 293:
+ case 291:
ACCEPT_TOKEN(anon_sym_BQUOTE);
END_STATE();
- case 294:
+ case 292:
ACCEPT_TOKEN(anon_sym_DOLLAR_LBRACE);
END_STATE();
- case 295:
+ case 293:
ACCEPT_TOKEN(anon_sym_SLASH2);
END_STATE();
- case 296:
+ case 294:
ACCEPT_TOKEN(sym_regex_pattern);
- if (lookahead == '\n') SKIP(39);
- if (lookahead == '/') ADVANCE(27);
- if (lookahead == '[') ADVANCE(84);
- if (lookahead == '\\') ADVANCE(145);
- if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(296);
- if (lookahead != 0) ADVANCE(297);
+ if (lookahead == '\n') SKIP(37);
+ if (lookahead == '/') ADVANCE(25);
+ if (lookahead == '[') ADVANCE(82);
+ if (lookahead == '\\') ADVANCE(143);
+ if (set_contains(extras_character_set_1, 10, lookahead)) ADVANCE(294);
+ if (lookahead != 0) ADVANCE(295);
END_STATE();
- case 297:
+ case 295:
ACCEPT_TOKEN(sym_regex_pattern);
- if (lookahead == '[') ADVANCE(84);
- if (lookahead == '\\') ADVANCE(145);
+ if (lookahead == '[') ADVANCE(82);
+ if (lookahead == '\\') ADVANCE(143);
if (lookahead != 0 &&
lookahead != '\n' &&
- lookahead != '/') ADVANCE(297);
+ lookahead != '/') ADVANCE(295);
END_STATE();
- case 298:
+ case 296:
ACCEPT_TOKEN(sym_regex_flags);
- if (lookahead == '\\') ADVANCE(105);
- if (('a' <= lookahead && lookahead <= 'z')) ADVANCE(298);
- if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(317);
+ if (lookahead == '\\') ADVANCE(103);
+ if (('a' <= lookahead && lookahead <= 'z')) ADVANCE(296);
+ if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(315);
END_STATE();
- case 299:
+ case 297:
ACCEPT_TOKEN(sym_number);
END_STATE();
- case 300:
+ case 298:
ACCEPT_TOKEN(sym_number);
ADVANCE_MAP(
- '.', 308,
- '0', 302,
- '_', 122,
- 'n', 299,
- 'B', 118,
- 'b', 118,
- 'E', 116,
- 'e', 116,
- 'O', 119,
- 'o', 119,
- 'X', 127,
- 'x', 127,
+ '.', 306,
+ '0', 300,
+ '_', 120,
+ 'n', 297,
+ 'B', 116,
+ 'b', 116,
+ 'E', 114,
+ 'e', 114,
+ 'O', 117,
+ 'o', 117,
+ 'X', 125,
+ 'x', 125,
);
- if (('1' <= lookahead && lookahead <= '9')) ADVANCE(301);
+ if (('1' <= lookahead && lookahead <= '9')) ADVANCE(299);
END_STATE();
- case 301:
+ case 299:
ACCEPT_TOKEN(sym_number);
- if (lookahead == '.') ADVANCE(308);
- if (lookahead == '_') ADVANCE(120);
- if (lookahead == 'n') ADVANCE(299);
+ if (lookahead == '.') ADVANCE(306);
+ if (lookahead == '_') ADVANCE(118);
+ if (lookahead == 'n') ADVANCE(297);
if (lookahead == 'E' ||
- lookahead == 'e') ADVANCE(116);
- if (('0' <= lookahead && lookahead <= '9')) ADVANCE(301);
+ lookahead == 'e') ADVANCE(114);
+ if (('0' <= lookahead && lookahead <= '9')) ADVANCE(299);
END_STATE();
- case 302:
+ case 300:
ACCEPT_TOKEN(sym_number);
- if (lookahead == '_') ADVANCE(122);
- if (lookahead == 'n') ADVANCE(299);
- if (('0' <= lookahead && lookahead <= '9')) ADVANCE(302);
+ if (lookahead == '_') ADVANCE(120);
+ if (lookahead == 'n') ADVANCE(297);
+ if (('0' <= lookahead && lookahead <= '9')) ADVANCE(300);
END_STATE();
- case 303:
+ case 301:
ACCEPT_TOKEN(sym_number);
- if (lookahead == '_') ADVANCE(118);
- if (lookahead == 'n') ADVANCE(299);
+ if (lookahead == '_') ADVANCE(116);
+ if (lookahead == 'n') ADVANCE(297);
if (lookahead == '0' ||
- lookahead == '1') ADVANCE(303);
+ lookahead == '1') ADVANCE(301);
END_STATE();
- case 304:
+ case 302:
ACCEPT_TOKEN(sym_number);
- if (lookahead == '_') ADVANCE(119);
- if (lookahead == 'n') ADVANCE(299);
- if (('0' <= lookahead && lookahead <= '7')) ADVANCE(304);
+ if (lookahead == '_') ADVANCE(117);
+ if (lookahead == 'n') ADVANCE(297);
+ if (('0' <= lookahead && lookahead <= '7')) ADVANCE(302);
END_STATE();
- case 305:
+ case 303:
ACCEPT_TOKEN(sym_number);
- if (lookahead == '_') ADVANCE(127);
- if (lookahead == 'n') ADVANCE(299);
+ if (lookahead == '_') ADVANCE(125);
+ if (lookahead == 'n') ADVANCE(297);
if (('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'F') ||
- ('a' <= lookahead && lookahead <= 'f')) ADVANCE(305);
+ ('a' <= lookahead && lookahead <= 'f')) ADVANCE(303);
END_STATE();
- case 306:
+ case 304:
ACCEPT_TOKEN(sym_number);
- if (lookahead == '_') ADVANCE(121);
+ if (lookahead == '_') ADVANCE(119);
if (lookahead == 'E' ||
- lookahead == 'e') ADVANCE(116);
- if (('0' <= lookahead && lookahead <= '9')) ADVANCE(306);
+ lookahead == 'e') ADVANCE(114);
+ if (('0' <= lookahead && lookahead <= '9')) ADVANCE(304);
END_STATE();
- case 307:
+ case 305:
ACCEPT_TOKEN(sym_number);
- if (lookahead == '_') ADVANCE(123);
- if (('0' <= lookahead && lookahead <= '9')) ADVANCE(307);
+ if (lookahead == '_') ADVANCE(121);
+ if (('0' <= lookahead && lookahead <= '9')) ADVANCE(305);
END_STATE();
- case 308:
+ case 306:
ACCEPT_TOKEN(sym_number);
if (lookahead == 'E' ||
- lookahead == 'e') ADVANCE(116);
- if (('0' <= lookahead && lookahead <= '9')) ADVANCE(306);
+ lookahead == 'e') ADVANCE(114);
+ if (('0' <= lookahead && lookahead <= '9')) ADVANCE(304);
END_STATE();
- case 309:
+ case 307:
ACCEPT_TOKEN(sym_identifier);
- if (lookahead == '-') ADVANCE(195);
- if (lookahead == '\\') ADVANCE(105);
+ if (lookahead == '-') ADVANCE(193);
+ if (lookahead == '\\') ADVANCE(103);
if (lookahead == '$' ||
('0' <= lookahead && lookahead <= '9') ||
('A' <= lookahead && lookahead <= 'Z') ||
lookahead == '_' ||
- ('a' <= lookahead && lookahead <= 'z')) ADVANCE(309);
- if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(317);
+ ('a' <= lookahead && lookahead <= 'z')) ADVANCE(307);
+ if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(315);
+ END_STATE();
+ case 308:
+ ACCEPT_TOKEN(sym_identifier);
+ if (lookahead == '\\') ADVANCE(103);
+ if (lookahead == 'a') ADVANCE(312);
+ if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(315);
+ END_STATE();
+ case 309:
+ ACCEPT_TOKEN(sym_identifier);
+ if (lookahead == '\\') ADVANCE(103);
+ if (lookahead == 'c') ADVANCE(314);
+ if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(315);
END_STATE();
case 310:
ACCEPT_TOKEN(sym_identifier);
- if (lookahead == '\\') ADVANCE(105);
- if (lookahead == 'a') ADVANCE(314);
- if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(317);
+ if (lookahead == '\\') ADVANCE(103);
+ if (lookahead == 'i') ADVANCE(309);
+ if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(315);
END_STATE();
case 311:
ACCEPT_TOKEN(sym_identifier);
- if (lookahead == '\\') ADVANCE(105);
- if (lookahead == 'c') ADVANCE(316);
- if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(317);
+ if (lookahead == '\\') ADVANCE(103);
+ if (lookahead == 't') ADVANCE(308);
+ if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(315);
END_STATE();
case 312:
ACCEPT_TOKEN(sym_identifier);
- if (lookahead == '\\') ADVANCE(105);
- if (lookahead == 'i') ADVANCE(311);
- if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(317);
+ if (lookahead == '\\') ADVANCE(103);
+ if (lookahead == 't') ADVANCE(310);
+ if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(315);
END_STATE();
case 313:
ACCEPT_TOKEN(sym_identifier);
- if (lookahead == '\\') ADVANCE(105);
- if (lookahead == 't') ADVANCE(310);
- if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(317);
+ if (lookahead == '\\') ADVANCE(103);
+ if (lookahead == '{') ADVANCE(292);
+ if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(315);
END_STATE();
case 314:
ACCEPT_TOKEN(sym_identifier);
- if (lookahead == '\\') ADVANCE(105);
- if (lookahead == 't') ADVANCE(312);
- if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(317);
+ if (lookahead == '\\') ADVANCE(103);
+ if (('\t' <= lookahead && lookahead <= '\r') ||
+ lookahead == ' ') ADVANCE(90);
+ if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(315);
END_STATE();
case 315:
ACCEPT_TOKEN(sym_identifier);
- if (lookahead == '\\') ADVANCE(105);
- if (lookahead == '{') ADVANCE(294);
- if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(317);
+ if (lookahead == '\\') ADVANCE(103);
+ if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(315);
END_STATE();
case 316:
- ACCEPT_TOKEN(sym_identifier);
- if (lookahead == '\\') ADVANCE(105);
- if (('\t' <= lookahead && lookahead <= '\r') ||
- lookahead == ' ') ADVANCE(92);
- if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(317);
- END_STATE();
- case 317:
- ACCEPT_TOKEN(sym_identifier);
- if (lookahead == '\\') ADVANCE(105);
- if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(317);
- END_STATE();
- case 318:
ACCEPT_TOKEN(sym_private_property_identifier);
- if (lookahead == '\\') ADVANCE(104);
- if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(318);
+ if (lookahead == '\\') ADVANCE(102);
+ if (set_contains(sym_identifier_character_set_2, 15, lookahead)) ADVANCE(316);
END_STATE();
- case 319:
+ case 317:
ACCEPT_TOKEN(anon_sym_AT);
END_STATE();
- case 320:
+ case 318:
ACCEPT_TOKEN(aux_sym_method_definition_token1);
- if (lookahead == '\n') ADVANCE(320);
+ if (lookahead == '\n') ADVANCE(318);
if (('\t' <= lookahead && lookahead <= '\r') ||
lookahead == ' ') ADVANCE(1);
END_STATE();
@@ -6860,1699 +6841,1699 @@ static bool ts_lex_keywords(TSLexer *lexer, TSStateId state) {
static const TSLexMode ts_lex_modes[STATE_COUNT] = {
[0] = {.lex_state = 0, .external_lex_state = 1},
- [1] = {.lex_state = 148, .external_lex_state = 2},
- [2] = {.lex_state = 8, .external_lex_state = 2},
- [3] = {.lex_state = 8, .external_lex_state = 2},
- [4] = {.lex_state = 8, .external_lex_state = 2},
- [5] = {.lex_state = 8, .external_lex_state = 2},
- [6] = {.lex_state = 8, .external_lex_state = 2},
- [7] = {.lex_state = 148, .external_lex_state = 2},
- [8] = {.lex_state = 148, .external_lex_state = 2},
- [9] = {.lex_state = 148, .external_lex_state = 2},
- [10] = {.lex_state = 148, .external_lex_state = 2},
- [11] = {.lex_state = 148, .external_lex_state = 2},
- [12] = {.lex_state = 148, .external_lex_state = 2},
- [13] = {.lex_state = 148, .external_lex_state = 2},
- [14] = {.lex_state = 148, .external_lex_state = 2},
- [15] = {.lex_state = 148, .external_lex_state = 2},
- [16] = {.lex_state = 148, .external_lex_state = 2},
- [17] = {.lex_state = 148, .external_lex_state = 2},
- [18] = {.lex_state = 148, .external_lex_state = 2},
- [19] = {.lex_state = 148, .external_lex_state = 2},
- [20] = {.lex_state = 148, .external_lex_state = 2},
- [21] = {.lex_state = 148, .external_lex_state = 2},
- [22] = {.lex_state = 148, .external_lex_state = 2},
- [23] = {.lex_state = 148, .external_lex_state = 2},
- [24] = {.lex_state = 148, .external_lex_state = 2},
- [25] = {.lex_state = 148, .external_lex_state = 2},
- [26] = {.lex_state = 148, .external_lex_state = 2},
- [27] = {.lex_state = 148, .external_lex_state = 2},
- [28] = {.lex_state = 148, .external_lex_state = 2},
- [29] = {.lex_state = 148, .external_lex_state = 2},
- [30] = {.lex_state = 148, .external_lex_state = 2},
- [31] = {.lex_state = 148, .external_lex_state = 2},
- [32] = {.lex_state = 148, .external_lex_state = 2},
- [33] = {.lex_state = 148, .external_lex_state = 2},
- [34] = {.lex_state = 148, .external_lex_state = 2},
- [35] = {.lex_state = 148, .external_lex_state = 2},
- [36] = {.lex_state = 148, .external_lex_state = 2},
- [37] = {.lex_state = 148, .external_lex_state = 2},
- [38] = {.lex_state = 148, .external_lex_state = 2},
- [39] = {.lex_state = 148, .external_lex_state = 2},
- [40] = {.lex_state = 148, .external_lex_state = 2},
- [41] = {.lex_state = 148, .external_lex_state = 2},
- [42] = {.lex_state = 148, .external_lex_state = 2},
- [43] = {.lex_state = 148, .external_lex_state = 2},
- [44] = {.lex_state = 148, .external_lex_state = 2},
- [45] = {.lex_state = 148, .external_lex_state = 2},
- [46] = {.lex_state = 147, .external_lex_state = 3},
- [47] = {.lex_state = 147, .external_lex_state = 4},
- [48] = {.lex_state = 147, .external_lex_state = 4},
- [49] = {.lex_state = 147, .external_lex_state = 3},
- [50] = {.lex_state = 147, .external_lex_state = 3},
- [51] = {.lex_state = 147, .external_lex_state = 4},
- [52] = {.lex_state = 147, .external_lex_state = 4},
- [53] = {.lex_state = 147, .external_lex_state = 4},
- [54] = {.lex_state = 147, .external_lex_state = 4},
- [55] = {.lex_state = 147, .external_lex_state = 4},
- [56] = {.lex_state = 147, .external_lex_state = 4},
- [57] = {.lex_state = 148, .external_lex_state = 2},
- [58] = {.lex_state = 147, .external_lex_state = 4},
- [59] = {.lex_state = 147, .external_lex_state = 4},
- [60] = {.lex_state = 147, .external_lex_state = 4},
- [61] = {.lex_state = 148, .external_lex_state = 2},
- [62] = {.lex_state = 148, .external_lex_state = 2},
- [63] = {.lex_state = 148, .external_lex_state = 2},
- [64] = {.lex_state = 147, .external_lex_state = 4},
- [65] = {.lex_state = 148, .external_lex_state = 2},
- [66] = {.lex_state = 148, .external_lex_state = 2},
- [67] = {.lex_state = 147, .external_lex_state = 4},
- [68] = {.lex_state = 147, .external_lex_state = 4},
- [69] = {.lex_state = 148, .external_lex_state = 2},
- [70] = {.lex_state = 147, .external_lex_state = 4},
- [71] = {.lex_state = 148, .external_lex_state = 2},
- [72] = {.lex_state = 147, .external_lex_state = 4},
- [73] = {.lex_state = 147, .external_lex_state = 4},
- [74] = {.lex_state = 148, .external_lex_state = 2},
- [75] = {.lex_state = 148, .external_lex_state = 2},
- [76] = {.lex_state = 148, .external_lex_state = 2},
- [77] = {.lex_state = 148, .external_lex_state = 2},
- [78] = {.lex_state = 148, .external_lex_state = 2},
- [79] = {.lex_state = 148, .external_lex_state = 2},
- [80] = {.lex_state = 148, .external_lex_state = 2},
- [81] = {.lex_state = 148, .external_lex_state = 2},
- [82] = {.lex_state = 148, .external_lex_state = 2},
- [83] = {.lex_state = 148, .external_lex_state = 2},
- [84] = {.lex_state = 148, .external_lex_state = 2},
- [85] = {.lex_state = 148, .external_lex_state = 2},
- [86] = {.lex_state = 148, .external_lex_state = 2},
- [87] = {.lex_state = 148, .external_lex_state = 2},
- [88] = {.lex_state = 148, .external_lex_state = 2},
- [89] = {.lex_state = 148, .external_lex_state = 2},
- [90] = {.lex_state = 148, .external_lex_state = 2},
- [91] = {.lex_state = 148, .external_lex_state = 2},
- [92] = {.lex_state = 148, .external_lex_state = 2},
- [93] = {.lex_state = 148, .external_lex_state = 2},
- [94] = {.lex_state = 148, .external_lex_state = 2},
- [95] = {.lex_state = 148, .external_lex_state = 2},
- [96] = {.lex_state = 148, .external_lex_state = 2},
- [97] = {.lex_state = 148, .external_lex_state = 2},
- [98] = {.lex_state = 148, .external_lex_state = 2},
- [99] = {.lex_state = 148, .external_lex_state = 2},
- [100] = {.lex_state = 148, .external_lex_state = 2},
- [101] = {.lex_state = 148, .external_lex_state = 5},
- [102] = {.lex_state = 148, .external_lex_state = 2},
- [103] = {.lex_state = 148, .external_lex_state = 2},
- [104] = {.lex_state = 148, .external_lex_state = 2},
- [105] = {.lex_state = 148, .external_lex_state = 2},
- [106] = {.lex_state = 148, .external_lex_state = 2},
- [107] = {.lex_state = 148, .external_lex_state = 2},
- [108] = {.lex_state = 148, .external_lex_state = 2},
- [109] = {.lex_state = 148, .external_lex_state = 2},
- [110] = {.lex_state = 148, .external_lex_state = 2},
- [111] = {.lex_state = 148, .external_lex_state = 2},
- [112] = {.lex_state = 148, .external_lex_state = 2},
- [113] = {.lex_state = 148, .external_lex_state = 2},
- [114] = {.lex_state = 148, .external_lex_state = 2},
- [115] = {.lex_state = 148, .external_lex_state = 2},
- [116] = {.lex_state = 148, .external_lex_state = 2},
- [117] = {.lex_state = 9, .external_lex_state = 2},
- [118] = {.lex_state = 148, .external_lex_state = 2},
- [119] = {.lex_state = 148, .external_lex_state = 2},
- [120] = {.lex_state = 148, .external_lex_state = 2},
- [121] = {.lex_state = 148, .external_lex_state = 2},
- [122] = {.lex_state = 148, .external_lex_state = 2},
- [123] = {.lex_state = 148, .external_lex_state = 2},
- [124] = {.lex_state = 148, .external_lex_state = 2},
- [125] = {.lex_state = 148, .external_lex_state = 2},
- [126] = {.lex_state = 148, .external_lex_state = 2},
- [127] = {.lex_state = 9, .external_lex_state = 2},
- [128] = {.lex_state = 148, .external_lex_state = 2},
- [129] = {.lex_state = 148, .external_lex_state = 2},
- [130] = {.lex_state = 148, .external_lex_state = 2},
- [131] = {.lex_state = 148, .external_lex_state = 2},
- [132] = {.lex_state = 148, .external_lex_state = 2},
- [133] = {.lex_state = 148, .external_lex_state = 2},
- [134] = {.lex_state = 148, .external_lex_state = 2},
- [135] = {.lex_state = 148, .external_lex_state = 2},
- [136] = {.lex_state = 148, .external_lex_state = 2},
- [137] = {.lex_state = 148, .external_lex_state = 2},
- [138] = {.lex_state = 148, .external_lex_state = 2},
- [139] = {.lex_state = 148, .external_lex_state = 2},
- [140] = {.lex_state = 148, .external_lex_state = 2},
- [141] = {.lex_state = 148, .external_lex_state = 2},
- [142] = {.lex_state = 148, .external_lex_state = 2},
- [143] = {.lex_state = 148, .external_lex_state = 2},
- [144] = {.lex_state = 9, .external_lex_state = 2},
- [145] = {.lex_state = 148, .external_lex_state = 2},
- [146] = {.lex_state = 148, .external_lex_state = 2},
- [147] = {.lex_state = 148, .external_lex_state = 2},
- [148] = {.lex_state = 148, .external_lex_state = 2},
- [149] = {.lex_state = 148, .external_lex_state = 2},
- [150] = {.lex_state = 148, .external_lex_state = 2},
- [151] = {.lex_state = 148, .external_lex_state = 2},
- [152] = {.lex_state = 9, .external_lex_state = 2},
- [153] = {.lex_state = 9, .external_lex_state = 2},
- [154] = {.lex_state = 148, .external_lex_state = 2},
- [155] = {.lex_state = 148, .external_lex_state = 2},
- [156] = {.lex_state = 148, .external_lex_state = 2},
- [157] = {.lex_state = 148, .external_lex_state = 2},
- [158] = {.lex_state = 148, .external_lex_state = 2},
- [159] = {.lex_state = 148, .external_lex_state = 2},
- [160] = {.lex_state = 148, .external_lex_state = 2},
- [161] = {.lex_state = 148, .external_lex_state = 2},
- [162] = {.lex_state = 148, .external_lex_state = 2},
- [163] = {.lex_state = 148, .external_lex_state = 2},
- [164] = {.lex_state = 148, .external_lex_state = 2},
- [165] = {.lex_state = 148, .external_lex_state = 2},
- [166] = {.lex_state = 148, .external_lex_state = 2},
- [167] = {.lex_state = 148, .external_lex_state = 2},
- [168] = {.lex_state = 148, .external_lex_state = 2},
- [169] = {.lex_state = 148, .external_lex_state = 2},
- [170] = {.lex_state = 148, .external_lex_state = 2},
- [171] = {.lex_state = 148, .external_lex_state = 2},
- [172] = {.lex_state = 148, .external_lex_state = 2},
- [173] = {.lex_state = 148, .external_lex_state = 2},
- [174] = {.lex_state = 148, .external_lex_state = 2},
- [175] = {.lex_state = 148, .external_lex_state = 2},
- [176] = {.lex_state = 148, .external_lex_state = 2},
- [177] = {.lex_state = 148, .external_lex_state = 2},
- [178] = {.lex_state = 148, .external_lex_state = 2},
- [179] = {.lex_state = 148, .external_lex_state = 2},
- [180] = {.lex_state = 148, .external_lex_state = 2},
- [181] = {.lex_state = 148, .external_lex_state = 2},
- [182] = {.lex_state = 148, .external_lex_state = 2},
- [183] = {.lex_state = 148, .external_lex_state = 2},
- [184] = {.lex_state = 148, .external_lex_state = 2},
- [185] = {.lex_state = 148, .external_lex_state = 2},
- [186] = {.lex_state = 148, .external_lex_state = 2},
- [187] = {.lex_state = 148, .external_lex_state = 2},
- [188] = {.lex_state = 148, .external_lex_state = 2},
- [189] = {.lex_state = 148, .external_lex_state = 2},
- [190] = {.lex_state = 148, .external_lex_state = 2},
- [191] = {.lex_state = 148, .external_lex_state = 2},
- [192] = {.lex_state = 148, .external_lex_state = 2},
- [193] = {.lex_state = 148, .external_lex_state = 2},
- [194] = {.lex_state = 148, .external_lex_state = 2},
- [195] = {.lex_state = 148, .external_lex_state = 2},
- [196] = {.lex_state = 148, .external_lex_state = 2},
- [197] = {.lex_state = 148, .external_lex_state = 2},
- [198] = {.lex_state = 148, .external_lex_state = 2},
- [199] = {.lex_state = 148, .external_lex_state = 2},
- [200] = {.lex_state = 148, .external_lex_state = 2},
- [201] = {.lex_state = 148, .external_lex_state = 2},
- [202] = {.lex_state = 148, .external_lex_state = 2},
- [203] = {.lex_state = 148, .external_lex_state = 2},
- [204] = {.lex_state = 148, .external_lex_state = 2},
- [205] = {.lex_state = 148, .external_lex_state = 2},
- [206] = {.lex_state = 148, .external_lex_state = 2},
- [207] = {.lex_state = 148, .external_lex_state = 2},
- [208] = {.lex_state = 148, .external_lex_state = 2},
- [209] = {.lex_state = 148, .external_lex_state = 2},
- [210] = {.lex_state = 148, .external_lex_state = 2},
- [211] = {.lex_state = 148, .external_lex_state = 2},
- [212] = {.lex_state = 148, .external_lex_state = 2},
- [213] = {.lex_state = 148, .external_lex_state = 2},
- [214] = {.lex_state = 148, .external_lex_state = 2},
- [215] = {.lex_state = 148, .external_lex_state = 2},
- [216] = {.lex_state = 148, .external_lex_state = 2},
- [217] = {.lex_state = 148, .external_lex_state = 2},
- [218] = {.lex_state = 148, .external_lex_state = 2},
- [219] = {.lex_state = 148, .external_lex_state = 2},
- [220] = {.lex_state = 148, .external_lex_state = 2},
- [221] = {.lex_state = 148, .external_lex_state = 2},
- [222] = {.lex_state = 148, .external_lex_state = 2},
- [223] = {.lex_state = 148, .external_lex_state = 2},
- [224] = {.lex_state = 148, .external_lex_state = 2},
- [225] = {.lex_state = 148, .external_lex_state = 2},
- [226] = {.lex_state = 148, .external_lex_state = 2},
- [227] = {.lex_state = 148, .external_lex_state = 2},
- [228] = {.lex_state = 148, .external_lex_state = 2},
- [229] = {.lex_state = 148, .external_lex_state = 2},
- [230] = {.lex_state = 148, .external_lex_state = 2},
- [231] = {.lex_state = 148, .external_lex_state = 2},
- [232] = {.lex_state = 148, .external_lex_state = 2},
- [233] = {.lex_state = 148, .external_lex_state = 2},
- [234] = {.lex_state = 148, .external_lex_state = 2},
- [235] = {.lex_state = 148, .external_lex_state = 2},
- [236] = {.lex_state = 148, .external_lex_state = 2},
- [237] = {.lex_state = 148, .external_lex_state = 2},
- [238] = {.lex_state = 148, .external_lex_state = 2},
- [239] = {.lex_state = 148, .external_lex_state = 2},
- [240] = {.lex_state = 148, .external_lex_state = 2},
- [241] = {.lex_state = 148, .external_lex_state = 2},
- [242] = {.lex_state = 148, .external_lex_state = 2},
- [243] = {.lex_state = 148, .external_lex_state = 2},
- [244] = {.lex_state = 148, .external_lex_state = 2},
- [245] = {.lex_state = 148, .external_lex_state = 2},
- [246] = {.lex_state = 148, .external_lex_state = 2},
- [247] = {.lex_state = 148, .external_lex_state = 2},
- [248] = {.lex_state = 148, .external_lex_state = 2},
- [249] = {.lex_state = 148, .external_lex_state = 2},
- [250] = {.lex_state = 148, .external_lex_state = 2},
- [251] = {.lex_state = 148, .external_lex_state = 2},
- [252] = {.lex_state = 148, .external_lex_state = 2},
- [253] = {.lex_state = 148, .external_lex_state = 2},
- [254] = {.lex_state = 148, .external_lex_state = 2},
- [255] = {.lex_state = 148, .external_lex_state = 2},
- [256] = {.lex_state = 148, .external_lex_state = 2},
- [257] = {.lex_state = 148, .external_lex_state = 2},
- [258] = {.lex_state = 148, .external_lex_state = 2},
- [259] = {.lex_state = 148, .external_lex_state = 2},
- [260] = {.lex_state = 148, .external_lex_state = 2},
- [261] = {.lex_state = 148, .external_lex_state = 2},
- [262] = {.lex_state = 148, .external_lex_state = 2},
- [263] = {.lex_state = 148, .external_lex_state = 2},
- [264] = {.lex_state = 148, .external_lex_state = 2},
- [265] = {.lex_state = 148, .external_lex_state = 2},
- [266] = {.lex_state = 148, .external_lex_state = 2},
- [267] = {.lex_state = 148, .external_lex_state = 2},
- [268] = {.lex_state = 148, .external_lex_state = 2},
- [269] = {.lex_state = 148, .external_lex_state = 2},
- [270] = {.lex_state = 148, .external_lex_state = 2},
- [271] = {.lex_state = 148, .external_lex_state = 2},
- [272] = {.lex_state = 148, .external_lex_state = 2},
- [273] = {.lex_state = 148, .external_lex_state = 2},
- [274] = {.lex_state = 148, .external_lex_state = 2},
- [275] = {.lex_state = 148, .external_lex_state = 2},
- [276] = {.lex_state = 148, .external_lex_state = 2},
- [277] = {.lex_state = 10, .external_lex_state = 4},
- [278] = {.lex_state = 10, .external_lex_state = 4},
- [279] = {.lex_state = 10, .external_lex_state = 4},
- [280] = {.lex_state = 10, .external_lex_state = 4},
- [281] = {.lex_state = 10, .external_lex_state = 4},
- [282] = {.lex_state = 10, .external_lex_state = 4},
- [283] = {.lex_state = 10, .external_lex_state = 4},
- [284] = {.lex_state = 10, .external_lex_state = 4},
- [285] = {.lex_state = 10, .external_lex_state = 4},
- [286] = {.lex_state = 10, .external_lex_state = 4},
- [287] = {.lex_state = 10, .external_lex_state = 4},
- [288] = {.lex_state = 10, .external_lex_state = 4},
- [289] = {.lex_state = 10, .external_lex_state = 4},
- [290] = {.lex_state = 10, .external_lex_state = 4},
- [291] = {.lex_state = 148, .external_lex_state = 2},
- [292] = {.lex_state = 10, .external_lex_state = 3},
- [293] = {.lex_state = 10, .external_lex_state = 3},
- [294] = {.lex_state = 148, .external_lex_state = 5},
- [295] = {.lex_state = 148, .external_lex_state = 5},
- [296] = {.lex_state = 10, .external_lex_state = 4},
- [297] = {.lex_state = 10, .external_lex_state = 4},
- [298] = {.lex_state = 10, .external_lex_state = 3},
- [299] = {.lex_state = 10, .external_lex_state = 4},
- [300] = {.lex_state = 10, .external_lex_state = 3},
- [301] = {.lex_state = 10, .external_lex_state = 4},
- [302] = {.lex_state = 10, .external_lex_state = 3},
- [303] = {.lex_state = 10, .external_lex_state = 4},
- [304] = {.lex_state = 10, .external_lex_state = 4},
- [305] = {.lex_state = 148, .external_lex_state = 2},
- [306] = {.lex_state = 148, .external_lex_state = 2},
- [307] = {.lex_state = 10, .external_lex_state = 4},
- [308] = {.lex_state = 10, .external_lex_state = 4},
- [309] = {.lex_state = 10, .external_lex_state = 4},
- [310] = {.lex_state = 10, .external_lex_state = 4},
- [311] = {.lex_state = 10, .external_lex_state = 3},
- [312] = {.lex_state = 148, .external_lex_state = 2},
- [313] = {.lex_state = 10, .external_lex_state = 3},
- [314] = {.lex_state = 148, .external_lex_state = 5},
- [315] = {.lex_state = 10, .external_lex_state = 3},
- [316] = {.lex_state = 148, .external_lex_state = 5},
- [317] = {.lex_state = 148, .external_lex_state = 5},
- [318] = {.lex_state = 148, .external_lex_state = 5},
- [319] = {.lex_state = 148, .external_lex_state = 5},
- [320] = {.lex_state = 148, .external_lex_state = 5},
- [321] = {.lex_state = 10, .external_lex_state = 4},
- [322] = {.lex_state = 148, .external_lex_state = 5},
- [323] = {.lex_state = 148, .external_lex_state = 5},
- [324] = {.lex_state = 148, .external_lex_state = 2},
- [325] = {.lex_state = 148, .external_lex_state = 2},
- [326] = {.lex_state = 148, .external_lex_state = 2},
- [327] = {.lex_state = 148, .external_lex_state = 2},
- [328] = {.lex_state = 148, .external_lex_state = 5},
- [329] = {.lex_state = 148, .external_lex_state = 2},
- [330] = {.lex_state = 148, .external_lex_state = 5},
- [331] = {.lex_state = 10, .external_lex_state = 4},
- [332] = {.lex_state = 148, .external_lex_state = 5},
- [333] = {.lex_state = 148, .external_lex_state = 5},
- [334] = {.lex_state = 148, .external_lex_state = 5},
- [335] = {.lex_state = 148, .external_lex_state = 5},
- [336] = {.lex_state = 148, .external_lex_state = 5},
- [337] = {.lex_state = 148, .external_lex_state = 5},
- [338] = {.lex_state = 148, .external_lex_state = 2},
- [339] = {.lex_state = 148, .external_lex_state = 2},
- [340] = {.lex_state = 148, .external_lex_state = 2},
- [341] = {.lex_state = 148, .external_lex_state = 2},
- [342] = {.lex_state = 148, .external_lex_state = 2},
- [343] = {.lex_state = 148, .external_lex_state = 2},
- [344] = {.lex_state = 148, .external_lex_state = 2},
- [345] = {.lex_state = 148, .external_lex_state = 2},
- [346] = {.lex_state = 148, .external_lex_state = 2},
- [347] = {.lex_state = 148, .external_lex_state = 2},
- [348] = {.lex_state = 148, .external_lex_state = 2},
- [349] = {.lex_state = 148, .external_lex_state = 2},
- [350] = {.lex_state = 148, .external_lex_state = 2},
- [351] = {.lex_state = 148, .external_lex_state = 2},
- [352] = {.lex_state = 148, .external_lex_state = 2},
- [353] = {.lex_state = 148, .external_lex_state = 2},
- [354] = {.lex_state = 148, .external_lex_state = 2},
- [355] = {.lex_state = 148, .external_lex_state = 2},
- [356] = {.lex_state = 148, .external_lex_state = 2},
- [357] = {.lex_state = 148, .external_lex_state = 2},
- [358] = {.lex_state = 148, .external_lex_state = 2},
- [359] = {.lex_state = 148, .external_lex_state = 2},
- [360] = {.lex_state = 148, .external_lex_state = 2},
- [361] = {.lex_state = 10, .external_lex_state = 4},
- [362] = {.lex_state = 148, .external_lex_state = 2},
- [363] = {.lex_state = 148, .external_lex_state = 2},
- [364] = {.lex_state = 148, .external_lex_state = 2},
- [365] = {.lex_state = 148, .external_lex_state = 2},
- [366] = {.lex_state = 148, .external_lex_state = 2},
- [367] = {.lex_state = 148, .external_lex_state = 2},
- [368] = {.lex_state = 10, .external_lex_state = 3},
- [369] = {.lex_state = 148, .external_lex_state = 2},
- [370] = {.lex_state = 148, .external_lex_state = 2},
- [371] = {.lex_state = 148, .external_lex_state = 2},
- [372] = {.lex_state = 148, .external_lex_state = 2},
- [373] = {.lex_state = 148, .external_lex_state = 2},
- [374] = {.lex_state = 148, .external_lex_state = 2},
- [375] = {.lex_state = 10, .external_lex_state = 4},
- [376] = {.lex_state = 148, .external_lex_state = 2},
- [377] = {.lex_state = 10, .external_lex_state = 4},
- [378] = {.lex_state = 148, .external_lex_state = 2},
- [379] = {.lex_state = 148, .external_lex_state = 2},
- [380] = {.lex_state = 148, .external_lex_state = 2},
- [381] = {.lex_state = 148, .external_lex_state = 2},
- [382] = {.lex_state = 148, .external_lex_state = 2},
- [383] = {.lex_state = 148, .external_lex_state = 2},
- [384] = {.lex_state = 148, .external_lex_state = 2},
- [385] = {.lex_state = 148, .external_lex_state = 2},
- [386] = {.lex_state = 148, .external_lex_state = 2},
- [387] = {.lex_state = 148, .external_lex_state = 2},
- [388] = {.lex_state = 148, .external_lex_state = 2},
- [389] = {.lex_state = 148, .external_lex_state = 2},
- [390] = {.lex_state = 148, .external_lex_state = 2},
- [391] = {.lex_state = 148, .external_lex_state = 2},
- [392] = {.lex_state = 148, .external_lex_state = 2},
- [393] = {.lex_state = 148, .external_lex_state = 2},
- [394] = {.lex_state = 148, .external_lex_state = 2},
- [395] = {.lex_state = 148, .external_lex_state = 2},
- [396] = {.lex_state = 148, .external_lex_state = 2},
- [397] = {.lex_state = 10, .external_lex_state = 3},
- [398] = {.lex_state = 148, .external_lex_state = 2},
- [399] = {.lex_state = 10, .external_lex_state = 3},
- [400] = {.lex_state = 148, .external_lex_state = 2},
- [401] = {.lex_state = 148, .external_lex_state = 2},
- [402] = {.lex_state = 10, .external_lex_state = 4},
- [403] = {.lex_state = 10, .external_lex_state = 4},
- [404] = {.lex_state = 10, .external_lex_state = 4},
- [405] = {.lex_state = 10, .external_lex_state = 4},
- [406] = {.lex_state = 10, .external_lex_state = 3},
- [407] = {.lex_state = 10, .external_lex_state = 3},
- [408] = {.lex_state = 10, .external_lex_state = 3},
- [409] = {.lex_state = 10, .external_lex_state = 3},
- [410] = {.lex_state = 10, .external_lex_state = 3},
- [411] = {.lex_state = 10, .external_lex_state = 3},
- [412] = {.lex_state = 10, .external_lex_state = 3},
- [413] = {.lex_state = 10, .external_lex_state = 3},
- [414] = {.lex_state = 10, .external_lex_state = 3},
- [415] = {.lex_state = 148, .external_lex_state = 2},
- [416] = {.lex_state = 10, .external_lex_state = 3},
- [417] = {.lex_state = 10, .external_lex_state = 3},
- [418] = {.lex_state = 10, .external_lex_state = 3},
- [419] = {.lex_state = 10, .external_lex_state = 3},
- [420] = {.lex_state = 148, .external_lex_state = 2},
- [421] = {.lex_state = 148, .external_lex_state = 2},
- [422] = {.lex_state = 148, .external_lex_state = 2},
- [423] = {.lex_state = 148, .external_lex_state = 2},
- [424] = {.lex_state = 148, .external_lex_state = 2},
- [425] = {.lex_state = 148, .external_lex_state = 2},
- [426] = {.lex_state = 148, .external_lex_state = 2},
- [427] = {.lex_state = 10, .external_lex_state = 4},
- [428] = {.lex_state = 10, .external_lex_state = 3},
- [429] = {.lex_state = 10, .external_lex_state = 4},
- [430] = {.lex_state = 10, .external_lex_state = 4},
- [431] = {.lex_state = 10, .external_lex_state = 4},
- [432] = {.lex_state = 10, .external_lex_state = 4},
- [433] = {.lex_state = 10, .external_lex_state = 4},
- [434] = {.lex_state = 10, .external_lex_state = 3},
- [435] = {.lex_state = 10, .external_lex_state = 4},
- [436] = {.lex_state = 10, .external_lex_state = 4},
- [437] = {.lex_state = 10, .external_lex_state = 4},
- [438] = {.lex_state = 10, .external_lex_state = 4},
- [439] = {.lex_state = 10, .external_lex_state = 4},
- [440] = {.lex_state = 10, .external_lex_state = 4},
- [441] = {.lex_state = 10, .external_lex_state = 4},
- [442] = {.lex_state = 10, .external_lex_state = 4},
- [443] = {.lex_state = 10, .external_lex_state = 3},
- [444] = {.lex_state = 10, .external_lex_state = 3},
- [445] = {.lex_state = 10, .external_lex_state = 3},
- [446] = {.lex_state = 10, .external_lex_state = 4},
- [447] = {.lex_state = 10, .external_lex_state = 4},
- [448] = {.lex_state = 10, .external_lex_state = 4},
- [449] = {.lex_state = 10, .external_lex_state = 4},
- [450] = {.lex_state = 10, .external_lex_state = 4},
- [451] = {.lex_state = 10, .external_lex_state = 3},
- [452] = {.lex_state = 10, .external_lex_state = 4},
- [453] = {.lex_state = 10, .external_lex_state = 4},
- [454] = {.lex_state = 10, .external_lex_state = 4},
- [455] = {.lex_state = 10, .external_lex_state = 3},
- [456] = {.lex_state = 10, .external_lex_state = 4},
- [457] = {.lex_state = 10, .external_lex_state = 3},
- [458] = {.lex_state = 10, .external_lex_state = 4},
- [459] = {.lex_state = 10, .external_lex_state = 3},
- [460] = {.lex_state = 10, .external_lex_state = 4},
- [461] = {.lex_state = 10, .external_lex_state = 3},
- [462] = {.lex_state = 10, .external_lex_state = 3},
- [463] = {.lex_state = 10, .external_lex_state = 3},
- [464] = {.lex_state = 10, .external_lex_state = 3},
- [465] = {.lex_state = 10, .external_lex_state = 3},
- [466] = {.lex_state = 10, .external_lex_state = 3},
- [467] = {.lex_state = 10, .external_lex_state = 3},
- [468] = {.lex_state = 10, .external_lex_state = 3},
- [469] = {.lex_state = 10, .external_lex_state = 3},
- [470] = {.lex_state = 10, .external_lex_state = 3},
- [471] = {.lex_state = 10, .external_lex_state = 3},
- [472] = {.lex_state = 10, .external_lex_state = 3},
- [473] = {.lex_state = 10, .external_lex_state = 3},
- [474] = {.lex_state = 10, .external_lex_state = 3},
- [475] = {.lex_state = 10, .external_lex_state = 3},
- [476] = {.lex_state = 10, .external_lex_state = 3},
- [477] = {.lex_state = 10, .external_lex_state = 3},
- [478] = {.lex_state = 11, .external_lex_state = 3},
- [479] = {.lex_state = 11, .external_lex_state = 4},
- [480] = {.lex_state = 11, .external_lex_state = 3},
- [481] = {.lex_state = 11, .external_lex_state = 3},
- [482] = {.lex_state = 11, .external_lex_state = 3},
- [483] = {.lex_state = 11, .external_lex_state = 3},
- [484] = {.lex_state = 11, .external_lex_state = 3},
- [485] = {.lex_state = 11, .external_lex_state = 3},
- [486] = {.lex_state = 11, .external_lex_state = 4},
- [487] = {.lex_state = 11, .external_lex_state = 4},
- [488] = {.lex_state = 11, .external_lex_state = 3},
- [489] = {.lex_state = 11, .external_lex_state = 3},
- [490] = {.lex_state = 11, .external_lex_state = 3},
- [491] = {.lex_state = 11, .external_lex_state = 3},
- [492] = {.lex_state = 11, .external_lex_state = 4},
- [493] = {.lex_state = 11, .external_lex_state = 4},
- [494] = {.lex_state = 11, .external_lex_state = 4},
- [495] = {.lex_state = 11, .external_lex_state = 3},
- [496] = {.lex_state = 11, .external_lex_state = 3},
- [497] = {.lex_state = 11, .external_lex_state = 3},
- [498] = {.lex_state = 11, .external_lex_state = 3},
- [499] = {.lex_state = 11, .external_lex_state = 3},
- [500] = {.lex_state = 11, .external_lex_state = 3},
- [501] = {.lex_state = 11, .external_lex_state = 3},
- [502] = {.lex_state = 11, .external_lex_state = 3},
- [503] = {.lex_state = 11, .external_lex_state = 3},
- [504] = {.lex_state = 11, .external_lex_state = 3},
- [505] = {.lex_state = 11, .external_lex_state = 3},
- [506] = {.lex_state = 11, .external_lex_state = 3},
- [507] = {.lex_state = 11, .external_lex_state = 3},
- [508] = {.lex_state = 11, .external_lex_state = 3},
- [509] = {.lex_state = 11, .external_lex_state = 3},
- [510] = {.lex_state = 11, .external_lex_state = 3},
- [511] = {.lex_state = 11, .external_lex_state = 3},
- [512] = {.lex_state = 11, .external_lex_state = 3},
- [513] = {.lex_state = 11, .external_lex_state = 3},
- [514] = {.lex_state = 11, .external_lex_state = 3},
- [515] = {.lex_state = 11, .external_lex_state = 4},
- [516] = {.lex_state = 11, .external_lex_state = 3},
- [517] = {.lex_state = 11, .external_lex_state = 4},
- [518] = {.lex_state = 11, .external_lex_state = 4},
- [519] = {.lex_state = 11, .external_lex_state = 3},
- [520] = {.lex_state = 11, .external_lex_state = 3},
- [521] = {.lex_state = 11, .external_lex_state = 3},
- [522] = {.lex_state = 11, .external_lex_state = 3},
- [523] = {.lex_state = 11, .external_lex_state = 3},
- [524] = {.lex_state = 11, .external_lex_state = 3},
- [525] = {.lex_state = 11, .external_lex_state = 3},
- [526] = {.lex_state = 11, .external_lex_state = 3},
- [527] = {.lex_state = 11, .external_lex_state = 3},
- [528] = {.lex_state = 12, .external_lex_state = 3},
- [529] = {.lex_state = 11, .external_lex_state = 3},
- [530] = {.lex_state = 11, .external_lex_state = 3},
- [531] = {.lex_state = 11, .external_lex_state = 3},
- [532] = {.lex_state = 11, .external_lex_state = 3},
- [533] = {.lex_state = 11, .external_lex_state = 3},
- [534] = {.lex_state = 11, .external_lex_state = 3},
- [535] = {.lex_state = 11, .external_lex_state = 3},
- [536] = {.lex_state = 11, .external_lex_state = 3},
- [537] = {.lex_state = 11, .external_lex_state = 3},
- [538] = {.lex_state = 11, .external_lex_state = 4},
- [539] = {.lex_state = 11, .external_lex_state = 3},
- [540] = {.lex_state = 11, .external_lex_state = 3},
- [541] = {.lex_state = 11, .external_lex_state = 3},
- [542] = {.lex_state = 11, .external_lex_state = 3},
- [543] = {.lex_state = 11, .external_lex_state = 3},
- [544] = {.lex_state = 11, .external_lex_state = 3},
- [545] = {.lex_state = 11, .external_lex_state = 3},
- [546] = {.lex_state = 11, .external_lex_state = 3},
- [547] = {.lex_state = 11, .external_lex_state = 3},
- [548] = {.lex_state = 11, .external_lex_state = 3},
- [549] = {.lex_state = 11, .external_lex_state = 3},
- [550] = {.lex_state = 11, .external_lex_state = 3},
- [551] = {.lex_state = 11, .external_lex_state = 3},
- [552] = {.lex_state = 11, .external_lex_state = 3},
- [553] = {.lex_state = 11, .external_lex_state = 3},
- [554] = {.lex_state = 11, .external_lex_state = 3},
- [555] = {.lex_state = 11, .external_lex_state = 3},
- [556] = {.lex_state = 11, .external_lex_state = 4},
- [557] = {.lex_state = 11, .external_lex_state = 3},
- [558] = {.lex_state = 11, .external_lex_state = 3},
- [559] = {.lex_state = 11, .external_lex_state = 3},
- [560] = {.lex_state = 11, .external_lex_state = 3},
- [561] = {.lex_state = 11, .external_lex_state = 3},
- [562] = {.lex_state = 11, .external_lex_state = 3},
- [563] = {.lex_state = 11, .external_lex_state = 3},
- [564] = {.lex_state = 11, .external_lex_state = 3},
- [565] = {.lex_state = 11, .external_lex_state = 3},
- [566] = {.lex_state = 11, .external_lex_state = 3},
- [567] = {.lex_state = 11, .external_lex_state = 3},
- [568] = {.lex_state = 11, .external_lex_state = 3},
- [569] = {.lex_state = 11, .external_lex_state = 3},
- [570] = {.lex_state = 11, .external_lex_state = 3},
- [571] = {.lex_state = 11, .external_lex_state = 3},
- [572] = {.lex_state = 11, .external_lex_state = 3},
- [573] = {.lex_state = 11, .external_lex_state = 3},
- [574] = {.lex_state = 11, .external_lex_state = 3},
- [575] = {.lex_state = 11, .external_lex_state = 3},
- [576] = {.lex_state = 11, .external_lex_state = 3},
- [577] = {.lex_state = 11, .external_lex_state = 3},
- [578] = {.lex_state = 11, .external_lex_state = 3},
- [579] = {.lex_state = 11, .external_lex_state = 3},
- [580] = {.lex_state = 11, .external_lex_state = 3},
- [581] = {.lex_state = 11, .external_lex_state = 4},
- [582] = {.lex_state = 11, .external_lex_state = 3},
- [583] = {.lex_state = 11, .external_lex_state = 4},
- [584] = {.lex_state = 11, .external_lex_state = 4},
- [585] = {.lex_state = 11, .external_lex_state = 4},
- [586] = {.lex_state = 11, .external_lex_state = 3},
- [587] = {.lex_state = 11, .external_lex_state = 4},
- [588] = {.lex_state = 11, .external_lex_state = 4},
- [589] = {.lex_state = 11, .external_lex_state = 4},
- [590] = {.lex_state = 11, .external_lex_state = 4},
- [591] = {.lex_state = 11, .external_lex_state = 4},
- [592] = {.lex_state = 11, .external_lex_state = 3},
- [593] = {.lex_state = 11, .external_lex_state = 4},
- [594] = {.lex_state = 11, .external_lex_state = 3},
- [595] = {.lex_state = 11, .external_lex_state = 4},
- [596] = {.lex_state = 11, .external_lex_state = 4},
- [597] = {.lex_state = 11, .external_lex_state = 4},
- [598] = {.lex_state = 11, .external_lex_state = 4},
- [599] = {.lex_state = 11, .external_lex_state = 4},
- [600] = {.lex_state = 11, .external_lex_state = 4},
- [601] = {.lex_state = 11, .external_lex_state = 4},
- [602] = {.lex_state = 11, .external_lex_state = 4},
- [603] = {.lex_state = 11, .external_lex_state = 3},
- [604] = {.lex_state = 11, .external_lex_state = 3},
- [605] = {.lex_state = 11, .external_lex_state = 3},
- [606] = {.lex_state = 11, .external_lex_state = 4},
- [607] = {.lex_state = 11, .external_lex_state = 4},
- [608] = {.lex_state = 11, .external_lex_state = 4},
- [609] = {.lex_state = 11, .external_lex_state = 4},
- [610] = {.lex_state = 11, .external_lex_state = 4},
- [611] = {.lex_state = 11, .external_lex_state = 4},
- [612] = {.lex_state = 11, .external_lex_state = 4},
- [613] = {.lex_state = 11, .external_lex_state = 4},
- [614] = {.lex_state = 11, .external_lex_state = 4},
- [615] = {.lex_state = 11, .external_lex_state = 4},
- [616] = {.lex_state = 11, .external_lex_state = 4},
- [617] = {.lex_state = 11, .external_lex_state = 4},
- [618] = {.lex_state = 11, .external_lex_state = 4},
- [619] = {.lex_state = 11, .external_lex_state = 4},
- [620] = {.lex_state = 11, .external_lex_state = 4},
- [621] = {.lex_state = 11, .external_lex_state = 4},
- [622] = {.lex_state = 11, .external_lex_state = 4},
- [623] = {.lex_state = 11, .external_lex_state = 4},
- [624] = {.lex_state = 11, .external_lex_state = 4},
- [625] = {.lex_state = 11, .external_lex_state = 4},
- [626] = {.lex_state = 11, .external_lex_state = 4},
- [627] = {.lex_state = 11, .external_lex_state = 4},
- [628] = {.lex_state = 11, .external_lex_state = 4},
- [629] = {.lex_state = 11, .external_lex_state = 4},
- [630] = {.lex_state = 11, .external_lex_state = 4},
- [631] = {.lex_state = 11, .external_lex_state = 4},
- [632] = {.lex_state = 11, .external_lex_state = 4},
- [633] = {.lex_state = 11, .external_lex_state = 4},
- [634] = {.lex_state = 11, .external_lex_state = 4},
- [635] = {.lex_state = 11, .external_lex_state = 4},
- [636] = {.lex_state = 11, .external_lex_state = 4},
- [637] = {.lex_state = 11, .external_lex_state = 4},
- [638] = {.lex_state = 11, .external_lex_state = 4},
- [639] = {.lex_state = 11, .external_lex_state = 4},
- [640] = {.lex_state = 11, .external_lex_state = 4},
- [641] = {.lex_state = 11, .external_lex_state = 4},
- [642] = {.lex_state = 11, .external_lex_state = 4},
- [643] = {.lex_state = 11, .external_lex_state = 4},
- [644] = {.lex_state = 11, .external_lex_state = 4},
- [645] = {.lex_state = 11, .external_lex_state = 4},
- [646] = {.lex_state = 11, .external_lex_state = 4},
- [647] = {.lex_state = 11, .external_lex_state = 4},
- [648] = {.lex_state = 11, .external_lex_state = 4},
- [649] = {.lex_state = 11, .external_lex_state = 4},
- [650] = {.lex_state = 11, .external_lex_state = 4},
- [651] = {.lex_state = 11, .external_lex_state = 4},
- [652] = {.lex_state = 11, .external_lex_state = 4},
- [653] = {.lex_state = 11, .external_lex_state = 4},
- [654] = {.lex_state = 11, .external_lex_state = 4},
- [655] = {.lex_state = 11, .external_lex_state = 4},
- [656] = {.lex_state = 11, .external_lex_state = 4},
- [657] = {.lex_state = 11, .external_lex_state = 4},
- [658] = {.lex_state = 11, .external_lex_state = 4},
- [659] = {.lex_state = 11, .external_lex_state = 4},
- [660] = {.lex_state = 11, .external_lex_state = 4},
- [661] = {.lex_state = 11, .external_lex_state = 3},
- [662] = {.lex_state = 11, .external_lex_state = 4},
- [663] = {.lex_state = 11, .external_lex_state = 4},
- [664] = {.lex_state = 11, .external_lex_state = 3},
- [665] = {.lex_state = 11, .external_lex_state = 3},
- [666] = {.lex_state = 11, .external_lex_state = 4},
- [667] = {.lex_state = 11, .external_lex_state = 4},
- [668] = {.lex_state = 11, .external_lex_state = 4},
- [669] = {.lex_state = 12, .external_lex_state = 4},
- [670] = {.lex_state = 11, .external_lex_state = 4},
- [671] = {.lex_state = 11, .external_lex_state = 3},
- [672] = {.lex_state = 11, .external_lex_state = 3},
- [673] = {.lex_state = 11, .external_lex_state = 3},
- [674] = {.lex_state = 11, .external_lex_state = 4},
- [675] = {.lex_state = 11, .external_lex_state = 4},
- [676] = {.lex_state = 11, .external_lex_state = 4},
- [677] = {.lex_state = 11, .external_lex_state = 4},
- [678] = {.lex_state = 11, .external_lex_state = 3},
- [679] = {.lex_state = 11, .external_lex_state = 3},
- [680] = {.lex_state = 11, .external_lex_state = 3},
- [681] = {.lex_state = 11, .external_lex_state = 3},
- [682] = {.lex_state = 11, .external_lex_state = 3},
- [683] = {.lex_state = 11, .external_lex_state = 3},
- [684] = {.lex_state = 11, .external_lex_state = 4},
- [685] = {.lex_state = 11, .external_lex_state = 3},
- [686] = {.lex_state = 11, .external_lex_state = 3},
- [687] = {.lex_state = 11, .external_lex_state = 4},
- [688] = {.lex_state = 11, .external_lex_state = 4},
- [689] = {.lex_state = 11, .external_lex_state = 3},
- [690] = {.lex_state = 11, .external_lex_state = 3},
- [691] = {.lex_state = 11, .external_lex_state = 4},
- [692] = {.lex_state = 11, .external_lex_state = 4},
- [693] = {.lex_state = 11, .external_lex_state = 3},
- [694] = {.lex_state = 11, .external_lex_state = 3},
- [695] = {.lex_state = 11, .external_lex_state = 4},
- [696] = {.lex_state = 11, .external_lex_state = 3},
- [697] = {.lex_state = 11, .external_lex_state = 4},
- [698] = {.lex_state = 11, .external_lex_state = 4},
- [699] = {.lex_state = 11, .external_lex_state = 4},
- [700] = {.lex_state = 11, .external_lex_state = 4},
- [701] = {.lex_state = 11, .external_lex_state = 4},
- [702] = {.lex_state = 11, .external_lex_state = 4},
- [703] = {.lex_state = 11, .external_lex_state = 4},
- [704] = {.lex_state = 12, .external_lex_state = 4},
- [705] = {.lex_state = 11, .external_lex_state = 4},
- [706] = {.lex_state = 11, .external_lex_state = 3},
- [707] = {.lex_state = 11, .external_lex_state = 4},
- [708] = {.lex_state = 11, .external_lex_state = 4},
- [709] = {.lex_state = 11, .external_lex_state = 4},
- [710] = {.lex_state = 11, .external_lex_state = 3},
- [711] = {.lex_state = 11, .external_lex_state = 3},
- [712] = {.lex_state = 11, .external_lex_state = 4},
- [713] = {.lex_state = 11, .external_lex_state = 4},
- [714] = {.lex_state = 11, .external_lex_state = 3},
- [715] = {.lex_state = 11, .external_lex_state = 4},
- [716] = {.lex_state = 11, .external_lex_state = 4},
- [717] = {.lex_state = 11, .external_lex_state = 4},
- [718] = {.lex_state = 11, .external_lex_state = 4},
- [719] = {.lex_state = 11, .external_lex_state = 3},
- [720] = {.lex_state = 11, .external_lex_state = 4},
- [721] = {.lex_state = 11, .external_lex_state = 4},
- [722] = {.lex_state = 11, .external_lex_state = 4},
- [723] = {.lex_state = 11, .external_lex_state = 4},
- [724] = {.lex_state = 11, .external_lex_state = 4},
- [725] = {.lex_state = 11, .external_lex_state = 4},
- [726] = {.lex_state = 11, .external_lex_state = 3},
- [727] = {.lex_state = 11, .external_lex_state = 4},
- [728] = {.lex_state = 11, .external_lex_state = 4},
- [729] = {.lex_state = 11, .external_lex_state = 3},
- [730] = {.lex_state = 11, .external_lex_state = 4},
- [731] = {.lex_state = 11, .external_lex_state = 4},
- [732] = {.lex_state = 11, .external_lex_state = 4},
- [733] = {.lex_state = 11, .external_lex_state = 4},
- [734] = {.lex_state = 11, .external_lex_state = 3},
- [735] = {.lex_state = 11, .external_lex_state = 3},
- [736] = {.lex_state = 11, .external_lex_state = 4},
- [737] = {.lex_state = 11, .external_lex_state = 4},
- [738] = {.lex_state = 11, .external_lex_state = 3},
- [739] = {.lex_state = 11, .external_lex_state = 4},
- [740] = {.lex_state = 11, .external_lex_state = 4},
- [741] = {.lex_state = 11, .external_lex_state = 4},
- [742] = {.lex_state = 11, .external_lex_state = 4},
- [743] = {.lex_state = 11, .external_lex_state = 4},
- [744] = {.lex_state = 11, .external_lex_state = 4},
- [745] = {.lex_state = 11, .external_lex_state = 4},
- [746] = {.lex_state = 11, .external_lex_state = 3},
- [747] = {.lex_state = 11, .external_lex_state = 4},
- [748] = {.lex_state = 11, .external_lex_state = 4},
- [749] = {.lex_state = 11, .external_lex_state = 3},
- [750] = {.lex_state = 11, .external_lex_state = 4},
- [751] = {.lex_state = 11, .external_lex_state = 4},
- [752] = {.lex_state = 11, .external_lex_state = 4},
- [753] = {.lex_state = 11, .external_lex_state = 3},
- [754] = {.lex_state = 11, .external_lex_state = 4},
- [755] = {.lex_state = 11, .external_lex_state = 4},
- [756] = {.lex_state = 11, .external_lex_state = 3},
- [757] = {.lex_state = 11, .external_lex_state = 3},
- [758] = {.lex_state = 11, .external_lex_state = 4},
- [759] = {.lex_state = 11, .external_lex_state = 3},
- [760] = {.lex_state = 11, .external_lex_state = 3},
- [761] = {.lex_state = 11, .external_lex_state = 3},
- [762] = {.lex_state = 11, .external_lex_state = 3},
- [763] = {.lex_state = 11, .external_lex_state = 3},
- [764] = {.lex_state = 11, .external_lex_state = 3},
- [765] = {.lex_state = 11, .external_lex_state = 3},
- [766] = {.lex_state = 11, .external_lex_state = 3},
- [767] = {.lex_state = 11, .external_lex_state = 3},
- [768] = {.lex_state = 11, .external_lex_state = 3},
- [769] = {.lex_state = 11, .external_lex_state = 3},
- [770] = {.lex_state = 11, .external_lex_state = 3},
- [771] = {.lex_state = 11, .external_lex_state = 3},
- [772] = {.lex_state = 11, .external_lex_state = 3},
- [773] = {.lex_state = 11, .external_lex_state = 3},
- [774] = {.lex_state = 11, .external_lex_state = 3},
- [775] = {.lex_state = 11, .external_lex_state = 3},
- [776] = {.lex_state = 11, .external_lex_state = 4},
- [777] = {.lex_state = 11, .external_lex_state = 3},
- [778] = {.lex_state = 11, .external_lex_state = 3},
- [779] = {.lex_state = 11, .external_lex_state = 4},
- [780] = {.lex_state = 11, .external_lex_state = 3},
- [781] = {.lex_state = 11, .external_lex_state = 3},
- [782] = {.lex_state = 11, .external_lex_state = 3},
- [783] = {.lex_state = 11, .external_lex_state = 3},
- [784] = {.lex_state = 11, .external_lex_state = 3},
- [785] = {.lex_state = 11, .external_lex_state = 4},
- [786] = {.lex_state = 11, .external_lex_state = 3},
- [787] = {.lex_state = 11, .external_lex_state = 3},
- [788] = {.lex_state = 11, .external_lex_state = 3},
- [789] = {.lex_state = 11, .external_lex_state = 3},
- [790] = {.lex_state = 11, .external_lex_state = 3},
- [791] = {.lex_state = 11, .external_lex_state = 3},
- [792] = {.lex_state = 11, .external_lex_state = 3},
- [793] = {.lex_state = 11, .external_lex_state = 3},
- [794] = {.lex_state = 11, .external_lex_state = 3},
- [795] = {.lex_state = 11, .external_lex_state = 3},
- [796] = {.lex_state = 11, .external_lex_state = 3},
- [797] = {.lex_state = 11, .external_lex_state = 3},
- [798] = {.lex_state = 11, .external_lex_state = 3},
- [799] = {.lex_state = 11, .external_lex_state = 3},
- [800] = {.lex_state = 11, .external_lex_state = 3},
- [801] = {.lex_state = 11, .external_lex_state = 3},
- [802] = {.lex_state = 11, .external_lex_state = 3},
- [803] = {.lex_state = 11, .external_lex_state = 3},
- [804] = {.lex_state = 11, .external_lex_state = 3},
- [805] = {.lex_state = 11, .external_lex_state = 3},
- [806] = {.lex_state = 11, .external_lex_state = 3},
- [807] = {.lex_state = 11, .external_lex_state = 3},
- [808] = {.lex_state = 11, .external_lex_state = 3},
- [809] = {.lex_state = 11, .external_lex_state = 3},
- [810] = {.lex_state = 11, .external_lex_state = 3},
- [811] = {.lex_state = 11, .external_lex_state = 3},
- [812] = {.lex_state = 11, .external_lex_state = 3},
- [813] = {.lex_state = 11, .external_lex_state = 3},
- [814] = {.lex_state = 11, .external_lex_state = 3},
- [815] = {.lex_state = 11, .external_lex_state = 3},
- [816] = {.lex_state = 11, .external_lex_state = 3},
- [817] = {.lex_state = 11, .external_lex_state = 3},
- [818] = {.lex_state = 11, .external_lex_state = 3},
- [819] = {.lex_state = 11, .external_lex_state = 3},
- [820] = {.lex_state = 11, .external_lex_state = 3},
- [821] = {.lex_state = 11, .external_lex_state = 3},
- [822] = {.lex_state = 11, .external_lex_state = 3},
- [823] = {.lex_state = 11, .external_lex_state = 3},
- [824] = {.lex_state = 11, .external_lex_state = 3},
- [825] = {.lex_state = 12, .external_lex_state = 3},
- [826] = {.lex_state = 11, .external_lex_state = 3},
- [827] = {.lex_state = 11, .external_lex_state = 3},
- [828] = {.lex_state = 8, .external_lex_state = 2},
- [829] = {.lex_state = 8, .external_lex_state = 2},
- [830] = {.lex_state = 11, .external_lex_state = 3},
- [831] = {.lex_state = 11, .external_lex_state = 3},
- [832] = {.lex_state = 11, .external_lex_state = 3},
- [833] = {.lex_state = 8, .external_lex_state = 2},
- [834] = {.lex_state = 8, .external_lex_state = 2},
- [835] = {.lex_state = 8, .external_lex_state = 2},
- [836] = {.lex_state = 11, .external_lex_state = 3},
- [837] = {.lex_state = 8, .external_lex_state = 2},
- [838] = {.lex_state = 8, .external_lex_state = 2},
- [839] = {.lex_state = 11, .external_lex_state = 3},
- [840] = {.lex_state = 8, .external_lex_state = 2},
- [841] = {.lex_state = 8, .external_lex_state = 2},
- [842] = {.lex_state = 8, .external_lex_state = 2},
- [843] = {.lex_state = 8, .external_lex_state = 2},
- [844] = {.lex_state = 8, .external_lex_state = 2},
- [845] = {.lex_state = 8, .external_lex_state = 2},
- [846] = {.lex_state = 8, .external_lex_state = 2},
- [847] = {.lex_state = 8, .external_lex_state = 2},
- [848] = {.lex_state = 8, .external_lex_state = 2},
- [849] = {.lex_state = 8, .external_lex_state = 2},
- [850] = {.lex_state = 8, .external_lex_state = 2},
- [851] = {.lex_state = 8, .external_lex_state = 2},
- [852] = {.lex_state = 148, .external_lex_state = 2},
- [853] = {.lex_state = 148, .external_lex_state = 2},
- [854] = {.lex_state = 148, .external_lex_state = 2},
- [855] = {.lex_state = 148, .external_lex_state = 2},
- [856] = {.lex_state = 148, .external_lex_state = 2},
- [857] = {.lex_state = 148, .external_lex_state = 2},
- [858] = {.lex_state = 148, .external_lex_state = 2},
- [859] = {.lex_state = 148, .external_lex_state = 2},
- [860] = {.lex_state = 148, .external_lex_state = 2},
- [861] = {.lex_state = 148, .external_lex_state = 2},
- [862] = {.lex_state = 148, .external_lex_state = 2},
- [863] = {.lex_state = 148, .external_lex_state = 2},
- [864] = {.lex_state = 148, .external_lex_state = 2},
- [865] = {.lex_state = 148, .external_lex_state = 2},
- [866] = {.lex_state = 148, .external_lex_state = 2},
- [867] = {.lex_state = 148, .external_lex_state = 2},
- [868] = {.lex_state = 148, .external_lex_state = 2},
- [869] = {.lex_state = 148, .external_lex_state = 2},
- [870] = {.lex_state = 148, .external_lex_state = 5},
- [871] = {.lex_state = 148, .external_lex_state = 2},
- [872] = {.lex_state = 148, .external_lex_state = 2},
- [873] = {.lex_state = 148, .external_lex_state = 2},
- [874] = {.lex_state = 148, .external_lex_state = 2},
- [875] = {.lex_state = 148, .external_lex_state = 2},
- [876] = {.lex_state = 148, .external_lex_state = 2},
- [877] = {.lex_state = 8, .external_lex_state = 2},
- [878] = {.lex_state = 148, .external_lex_state = 2},
- [879] = {.lex_state = 148, .external_lex_state = 2},
- [880] = {.lex_state = 148, .external_lex_state = 2},
- [881] = {.lex_state = 148, .external_lex_state = 2},
- [882] = {.lex_state = 148, .external_lex_state = 2},
- [883] = {.lex_state = 148, .external_lex_state = 5},
- [884] = {.lex_state = 148, .external_lex_state = 2},
- [885] = {.lex_state = 148, .external_lex_state = 5},
- [886] = {.lex_state = 8, .external_lex_state = 2},
- [887] = {.lex_state = 148, .external_lex_state = 2},
- [888] = {.lex_state = 8, .external_lex_state = 2},
- [889] = {.lex_state = 148, .external_lex_state = 2},
- [890] = {.lex_state = 148, .external_lex_state = 5},
- [891] = {.lex_state = 148, .external_lex_state = 5},
- [892] = {.lex_state = 148, .external_lex_state = 5},
- [893] = {.lex_state = 8, .external_lex_state = 2},
- [894] = {.lex_state = 8, .external_lex_state = 2},
- [895] = {.lex_state = 8, .external_lex_state = 2},
- [896] = {.lex_state = 8, .external_lex_state = 2},
- [897] = {.lex_state = 8, .external_lex_state = 2},
- [898] = {.lex_state = 8, .external_lex_state = 2},
- [899] = {.lex_state = 14, .external_lex_state = 2},
- [900] = {.lex_state = 8, .external_lex_state = 2},
- [901] = {.lex_state = 148, .external_lex_state = 5},
- [902] = {.lex_state = 8, .external_lex_state = 2},
- [903] = {.lex_state = 8, .external_lex_state = 2},
- [904] = {.lex_state = 148, .external_lex_state = 5},
- [905] = {.lex_state = 148, .external_lex_state = 5},
- [906] = {.lex_state = 8, .external_lex_state = 2},
- [907] = {.lex_state = 8, .external_lex_state = 2},
- [908] = {.lex_state = 8, .external_lex_state = 2},
- [909] = {.lex_state = 8, .external_lex_state = 2},
- [910] = {.lex_state = 8, .external_lex_state = 2},
- [911] = {.lex_state = 8, .external_lex_state = 2},
- [912] = {.lex_state = 8, .external_lex_state = 2},
- [913] = {.lex_state = 148, .external_lex_state = 2},
- [914] = {.lex_state = 8, .external_lex_state = 2},
- [915] = {.lex_state = 8, .external_lex_state = 2},
- [916] = {.lex_state = 8, .external_lex_state = 2},
- [917] = {.lex_state = 148, .external_lex_state = 5},
- [918] = {.lex_state = 8, .external_lex_state = 5},
- [919] = {.lex_state = 8, .external_lex_state = 2},
- [920] = {.lex_state = 8, .external_lex_state = 5},
- [921] = {.lex_state = 148, .external_lex_state = 5},
- [922] = {.lex_state = 8, .external_lex_state = 2},
- [923] = {.lex_state = 8, .external_lex_state = 2},
- [924] = {.lex_state = 8, .external_lex_state = 2},
- [925] = {.lex_state = 8, .external_lex_state = 2},
- [926] = {.lex_state = 148, .external_lex_state = 5},
- [927] = {.lex_state = 8, .external_lex_state = 2},
- [928] = {.lex_state = 8, .external_lex_state = 2},
- [929] = {.lex_state = 8, .external_lex_state = 2},
- [930] = {.lex_state = 8, .external_lex_state = 2},
- [931] = {.lex_state = 14, .external_lex_state = 2},
- [932] = {.lex_state = 8, .external_lex_state = 2},
- [933] = {.lex_state = 8, .external_lex_state = 2},
- [934] = {.lex_state = 8, .external_lex_state = 2},
- [935] = {.lex_state = 8, .external_lex_state = 2},
- [936] = {.lex_state = 8, .external_lex_state = 2},
- [937] = {.lex_state = 8, .external_lex_state = 2},
- [938] = {.lex_state = 8, .external_lex_state = 2},
- [939] = {.lex_state = 8, .external_lex_state = 2},
- [940] = {.lex_state = 8, .external_lex_state = 2},
- [941] = {.lex_state = 8, .external_lex_state = 2},
- [942] = {.lex_state = 8, .external_lex_state = 2},
- [943] = {.lex_state = 148, .external_lex_state = 2},
- [944] = {.lex_state = 148, .external_lex_state = 2},
- [945] = {.lex_state = 148, .external_lex_state = 2},
- [946] = {.lex_state = 148, .external_lex_state = 2},
- [947] = {.lex_state = 148, .external_lex_state = 2},
- [948] = {.lex_state = 148, .external_lex_state = 2},
- [949] = {.lex_state = 148, .external_lex_state = 2},
- [950] = {.lex_state = 148, .external_lex_state = 2},
- [951] = {.lex_state = 148, .external_lex_state = 2},
- [952] = {.lex_state = 148, .external_lex_state = 2},
- [953] = {.lex_state = 148, .external_lex_state = 2},
- [954] = {.lex_state = 8, .external_lex_state = 2},
- [955] = {.lex_state = 148, .external_lex_state = 2},
- [956] = {.lex_state = 148, .external_lex_state = 2},
- [957] = {.lex_state = 148, .external_lex_state = 2},
- [958] = {.lex_state = 148, .external_lex_state = 2},
- [959] = {.lex_state = 8, .external_lex_state = 2},
- [960] = {.lex_state = 148, .external_lex_state = 2},
- [961] = {.lex_state = 148, .external_lex_state = 2},
- [962] = {.lex_state = 148, .external_lex_state = 2},
- [963] = {.lex_state = 148, .external_lex_state = 2},
- [964] = {.lex_state = 148, .external_lex_state = 2},
- [965] = {.lex_state = 148, .external_lex_state = 2},
- [966] = {.lex_state = 148, .external_lex_state = 2},
- [967] = {.lex_state = 148, .external_lex_state = 2},
- [968] = {.lex_state = 8, .external_lex_state = 2},
- [969] = {.lex_state = 148, .external_lex_state = 2},
- [970] = {.lex_state = 148, .external_lex_state = 2},
- [971] = {.lex_state = 148, .external_lex_state = 2},
- [972] = {.lex_state = 148, .external_lex_state = 2},
- [973] = {.lex_state = 148, .external_lex_state = 2},
- [974] = {.lex_state = 148, .external_lex_state = 2},
- [975] = {.lex_state = 148, .external_lex_state = 2},
- [976] = {.lex_state = 148, .external_lex_state = 2},
- [977] = {.lex_state = 148, .external_lex_state = 2},
- [978] = {.lex_state = 148, .external_lex_state = 2},
- [979] = {.lex_state = 8, .external_lex_state = 2},
- [980] = {.lex_state = 148, .external_lex_state = 2},
- [981] = {.lex_state = 8, .external_lex_state = 2},
- [982] = {.lex_state = 8, .external_lex_state = 2},
- [983] = {.lex_state = 148, .external_lex_state = 2},
- [984] = {.lex_state = 148, .external_lex_state = 2},
- [985] = {.lex_state = 148, .external_lex_state = 2},
- [986] = {.lex_state = 148, .external_lex_state = 2},
- [987] = {.lex_state = 148, .external_lex_state = 2},
- [988] = {.lex_state = 148, .external_lex_state = 2},
- [989] = {.lex_state = 148, .external_lex_state = 2},
- [990] = {.lex_state = 148, .external_lex_state = 2},
- [991] = {.lex_state = 148, .external_lex_state = 2},
- [992] = {.lex_state = 148, .external_lex_state = 2},
- [993] = {.lex_state = 148, .external_lex_state = 2},
- [994] = {.lex_state = 148, .external_lex_state = 2},
- [995] = {.lex_state = 148, .external_lex_state = 2},
- [996] = {.lex_state = 148, .external_lex_state = 2},
- [997] = {.lex_state = 10, .external_lex_state = 2},
- [998] = {.lex_state = 10, .external_lex_state = 2},
- [999] = {.lex_state = 148, .external_lex_state = 2},
- [1000] = {.lex_state = 148, .external_lex_state = 2},
- [1001] = {.lex_state = 148, .external_lex_state = 2},
- [1002] = {.lex_state = 148, .external_lex_state = 2},
- [1003] = {.lex_state = 148, .external_lex_state = 2},
- [1004] = {.lex_state = 10, .external_lex_state = 2},
- [1005] = {.lex_state = 10, .external_lex_state = 2},
- [1006] = {.lex_state = 148, .external_lex_state = 2},
- [1007] = {.lex_state = 148, .external_lex_state = 2},
- [1008] = {.lex_state = 148, .external_lex_state = 2},
- [1009] = {.lex_state = 10, .external_lex_state = 2},
- [1010] = {.lex_state = 148, .external_lex_state = 2},
- [1011] = {.lex_state = 23, .external_lex_state = 2},
- [1012] = {.lex_state = 23, .external_lex_state = 2},
- [1013] = {.lex_state = 23, .external_lex_state = 2},
- [1014] = {.lex_state = 23, .external_lex_state = 2},
- [1015] = {.lex_state = 23, .external_lex_state = 2},
- [1016] = {.lex_state = 23, .external_lex_state = 2},
- [1017] = {.lex_state = 23, .external_lex_state = 2},
- [1018] = {.lex_state = 23, .external_lex_state = 2},
- [1019] = {.lex_state = 147, .external_lex_state = 2},
- [1020] = {.lex_state = 23, .external_lex_state = 2},
- [1021] = {.lex_state = 37, .external_lex_state = 2},
- [1022] = {.lex_state = 37, .external_lex_state = 2},
- [1023] = {.lex_state = 37, .external_lex_state = 2},
- [1024] = {.lex_state = 37, .external_lex_state = 2},
- [1025] = {.lex_state = 37, .external_lex_state = 2},
- [1026] = {.lex_state = 37, .external_lex_state = 2},
- [1027] = {.lex_state = 37, .external_lex_state = 2},
- [1028] = {.lex_state = 37, .external_lex_state = 2},
- [1029] = {.lex_state = 37, .external_lex_state = 2},
- [1030] = {.lex_state = 37, .external_lex_state = 2},
- [1031] = {.lex_state = 37, .external_lex_state = 2},
- [1032] = {.lex_state = 37, .external_lex_state = 2},
- [1033] = {.lex_state = 37, .external_lex_state = 2},
- [1034] = {.lex_state = 37, .external_lex_state = 2},
- [1035] = {.lex_state = 37, .external_lex_state = 2},
- [1036] = {.lex_state = 37, .external_lex_state = 2},
- [1037] = {.lex_state = 37, .external_lex_state = 2},
- [1038] = {.lex_state = 37, .external_lex_state = 2},
- [1039] = {.lex_state = 37, .external_lex_state = 2},
- [1040] = {.lex_state = 14, .external_lex_state = 2},
- [1041] = {.lex_state = 37, .external_lex_state = 2},
- [1042] = {.lex_state = 37, .external_lex_state = 2},
- [1043] = {.lex_state = 37, .external_lex_state = 2},
- [1044] = {.lex_state = 37, .external_lex_state = 2},
- [1045] = {.lex_state = 37, .external_lex_state = 2},
- [1046] = {.lex_state = 37, .external_lex_state = 2},
- [1047] = {.lex_state = 37, .external_lex_state = 2},
- [1048] = {.lex_state = 37, .external_lex_state = 2},
- [1049] = {.lex_state = 37, .external_lex_state = 2},
- [1050] = {.lex_state = 14, .external_lex_state = 2},
- [1051] = {.lex_state = 37, .external_lex_state = 2},
- [1052] = {.lex_state = 37, .external_lex_state = 2},
- [1053] = {.lex_state = 37, .external_lex_state = 2},
- [1054] = {.lex_state = 37, .external_lex_state = 2},
- [1055] = {.lex_state = 37, .external_lex_state = 2},
- [1056] = {.lex_state = 148, .external_lex_state = 2},
- [1057] = {.lex_state = 148, .external_lex_state = 2},
- [1058] = {.lex_state = 148, .external_lex_state = 2},
- [1059] = {.lex_state = 148, .external_lex_state = 2},
- [1060] = {.lex_state = 148, .external_lex_state = 2},
- [1061] = {.lex_state = 148, .external_lex_state = 2},
- [1062] = {.lex_state = 148, .external_lex_state = 2},
- [1063] = {.lex_state = 148, .external_lex_state = 2},
- [1064] = {.lex_state = 148, .external_lex_state = 2},
- [1065] = {.lex_state = 148, .external_lex_state = 2},
- [1066] = {.lex_state = 148, .external_lex_state = 2},
- [1067] = {.lex_state = 148, .external_lex_state = 2},
- [1068] = {.lex_state = 148, .external_lex_state = 2},
- [1069] = {.lex_state = 148, .external_lex_state = 2},
- [1070] = {.lex_state = 148, .external_lex_state = 2},
- [1071] = {.lex_state = 148, .external_lex_state = 2},
- [1072] = {.lex_state = 148, .external_lex_state = 2},
- [1073] = {.lex_state = 37, .external_lex_state = 2},
- [1074] = {.lex_state = 148, .external_lex_state = 2},
- [1075] = {.lex_state = 148, .external_lex_state = 2},
- [1076] = {.lex_state = 148, .external_lex_state = 2},
- [1077] = {.lex_state = 148, .external_lex_state = 2},
- [1078] = {.lex_state = 148, .external_lex_state = 5},
- [1079] = {.lex_state = 148, .external_lex_state = 5},
- [1080] = {.lex_state = 148, .external_lex_state = 2},
- [1081] = {.lex_state = 148, .external_lex_state = 2},
- [1082] = {.lex_state = 23, .external_lex_state = 2},
- [1083] = {.lex_state = 23, .external_lex_state = 2},
- [1084] = {.lex_state = 21, .external_lex_state = 6},
- [1085] = {.lex_state = 147, .external_lex_state = 2},
- [1086] = {.lex_state = 148, .external_lex_state = 5},
- [1087] = {.lex_state = 148, .external_lex_state = 5},
- [1088] = {.lex_state = 23, .external_lex_state = 2},
- [1089] = {.lex_state = 21, .external_lex_state = 6},
- [1090] = {.lex_state = 23, .external_lex_state = 2},
- [1091] = {.lex_state = 23, .external_lex_state = 2},
- [1092] = {.lex_state = 23, .external_lex_state = 2},
- [1093] = {.lex_state = 21, .external_lex_state = 6},
- [1094] = {.lex_state = 148, .external_lex_state = 2},
- [1095] = {.lex_state = 147, .external_lex_state = 2},
- [1096] = {.lex_state = 23, .external_lex_state = 2},
- [1097] = {.lex_state = 23, .external_lex_state = 2},
- [1098] = {.lex_state = 23, .external_lex_state = 2},
- [1099] = {.lex_state = 148, .external_lex_state = 5},
- [1100] = {.lex_state = 148, .external_lex_state = 5},
- [1101] = {.lex_state = 23, .external_lex_state = 2},
- [1102] = {.lex_state = 23, .external_lex_state = 2},
- [1103] = {.lex_state = 23, .external_lex_state = 2},
- [1104] = {.lex_state = 37, .external_lex_state = 2},
- [1105] = {.lex_state = 148, .external_lex_state = 2},
- [1106] = {.lex_state = 37, .external_lex_state = 2},
- [1107] = {.lex_state = 23, .external_lex_state = 2},
- [1108] = {.lex_state = 148, .external_lex_state = 5},
- [1109] = {.lex_state = 23, .external_lex_state = 2},
- [1110] = {.lex_state = 148, .external_lex_state = 2},
- [1111] = {.lex_state = 23, .external_lex_state = 2},
- [1112] = {.lex_state = 148, .external_lex_state = 2},
- [1113] = {.lex_state = 37, .external_lex_state = 2},
- [1114] = {.lex_state = 148, .external_lex_state = 5},
- [1115] = {.lex_state = 148, .external_lex_state = 5},
- [1116] = {.lex_state = 23, .external_lex_state = 2},
- [1117] = {.lex_state = 148, .external_lex_state = 5},
- [1118] = {.lex_state = 21, .external_lex_state = 6},
- [1119] = {.lex_state = 148, .external_lex_state = 2},
- [1120] = {.lex_state = 148, .external_lex_state = 2},
- [1121] = {.lex_state = 21, .external_lex_state = 6},
- [1122] = {.lex_state = 148, .external_lex_state = 2},
- [1123] = {.lex_state = 148, .external_lex_state = 2},
- [1124] = {.lex_state = 148, .external_lex_state = 2},
- [1125] = {.lex_state = 148, .external_lex_state = 5},
- [1126] = {.lex_state = 148, .external_lex_state = 5},
- [1127] = {.lex_state = 148, .external_lex_state = 5},
- [1128] = {.lex_state = 148, .external_lex_state = 5},
- [1129] = {.lex_state = 23, .external_lex_state = 2},
- [1130] = {.lex_state = 23, .external_lex_state = 2},
- [1131] = {.lex_state = 148, .external_lex_state = 2},
- [1132] = {.lex_state = 148, .external_lex_state = 2},
- [1133] = {.lex_state = 148, .external_lex_state = 2},
- [1134] = {.lex_state = 37, .external_lex_state = 2},
- [1135] = {.lex_state = 148, .external_lex_state = 2},
- [1136] = {.lex_state = 148, .external_lex_state = 2},
- [1137] = {.lex_state = 37, .external_lex_state = 2},
- [1138] = {.lex_state = 148, .external_lex_state = 2},
- [1139] = {.lex_state = 148, .external_lex_state = 2},
- [1140] = {.lex_state = 148, .external_lex_state = 2},
- [1141] = {.lex_state = 148, .external_lex_state = 2},
- [1142] = {.lex_state = 148, .external_lex_state = 2},
- [1143] = {.lex_state = 148, .external_lex_state = 5},
- [1144] = {.lex_state = 37, .external_lex_state = 2},
- [1145] = {.lex_state = 37, .external_lex_state = 2},
- [1146] = {.lex_state = 37, .external_lex_state = 2},
- [1147] = {.lex_state = 148, .external_lex_state = 2},
- [1148] = {.lex_state = 148, .external_lex_state = 2},
- [1149] = {.lex_state = 37, .external_lex_state = 2},
- [1150] = {.lex_state = 37, .external_lex_state = 2},
- [1151] = {.lex_state = 37, .external_lex_state = 2},
- [1152] = {.lex_state = 37, .external_lex_state = 2},
- [1153] = {.lex_state = 37, .external_lex_state = 2},
- [1154] = {.lex_state = 37, .external_lex_state = 2},
- [1155] = {.lex_state = 37, .external_lex_state = 2},
- [1156] = {.lex_state = 148, .external_lex_state = 2},
- [1157] = {.lex_state = 148, .external_lex_state = 2},
- [1158] = {.lex_state = 148, .external_lex_state = 2},
- [1159] = {.lex_state = 148, .external_lex_state = 2},
- [1160] = {.lex_state = 148, .external_lex_state = 2},
- [1161] = {.lex_state = 148, .external_lex_state = 5},
- [1162] = {.lex_state = 37, .external_lex_state = 2},
- [1163] = {.lex_state = 37, .external_lex_state = 2},
- [1164] = {.lex_state = 37, .external_lex_state = 2},
- [1165] = {.lex_state = 37, .external_lex_state = 2},
- [1166] = {.lex_state = 37, .external_lex_state = 2},
- [1167] = {.lex_state = 148, .external_lex_state = 2},
- [1168] = {.lex_state = 148, .external_lex_state = 2},
- [1169] = {.lex_state = 37, .external_lex_state = 2},
- [1170] = {.lex_state = 37, .external_lex_state = 2},
- [1171] = {.lex_state = 148, .external_lex_state = 2},
- [1172] = {.lex_state = 148, .external_lex_state = 2},
- [1173] = {.lex_state = 148, .external_lex_state = 2},
- [1174] = {.lex_state = 37, .external_lex_state = 2},
- [1175] = {.lex_state = 148, .external_lex_state = 5},
- [1176] = {.lex_state = 148, .external_lex_state = 2},
- [1177] = {.lex_state = 148, .external_lex_state = 5},
- [1178] = {.lex_state = 37, .external_lex_state = 2},
- [1179] = {.lex_state = 37, .external_lex_state = 2},
- [1180] = {.lex_state = 37, .external_lex_state = 2},
- [1181] = {.lex_state = 148, .external_lex_state = 2},
- [1182] = {.lex_state = 37, .external_lex_state = 2},
- [1183] = {.lex_state = 148, .external_lex_state = 2},
- [1184] = {.lex_state = 21, .external_lex_state = 6},
- [1185] = {.lex_state = 15, .external_lex_state = 2},
- [1186] = {.lex_state = 148, .external_lex_state = 2},
- [1187] = {.lex_state = 17, .external_lex_state = 7},
- [1188] = {.lex_state = 26, .external_lex_state = 7},
- [1189] = {.lex_state = 17, .external_lex_state = 7},
- [1190] = {.lex_state = 26, .external_lex_state = 7},
- [1191] = {.lex_state = 148, .external_lex_state = 2},
- [1192] = {.lex_state = 148, .external_lex_state = 2},
- [1193] = {.lex_state = 148, .external_lex_state = 2},
- [1194] = {.lex_state = 148, .external_lex_state = 2},
- [1195] = {.lex_state = 148, .external_lex_state = 2},
- [1196] = {.lex_state = 24, .external_lex_state = 2},
- [1197] = {.lex_state = 148, .external_lex_state = 5},
- [1198] = {.lex_state = 15, .external_lex_state = 2},
- [1199] = {.lex_state = 148, .external_lex_state = 2},
- [1200] = {.lex_state = 17, .external_lex_state = 7},
- [1201] = {.lex_state = 17, .external_lex_state = 7},
- [1202] = {.lex_state = 148, .external_lex_state = 2},
- [1203] = {.lex_state = 26, .external_lex_state = 7},
- [1204] = {.lex_state = 148, .external_lex_state = 2},
- [1205] = {.lex_state = 24, .external_lex_state = 2},
- [1206] = {.lex_state = 148, .external_lex_state = 2},
- [1207] = {.lex_state = 26, .external_lex_state = 7},
- [1208] = {.lex_state = 148, .external_lex_state = 2},
- [1209] = {.lex_state = 148, .external_lex_state = 5},
- [1210] = {.lex_state = 148, .external_lex_state = 2},
- [1211] = {.lex_state = 148, .external_lex_state = 2},
- [1212] = {.lex_state = 148, .external_lex_state = 5},
- [1213] = {.lex_state = 148, .external_lex_state = 2},
- [1214] = {.lex_state = 148, .external_lex_state = 2},
- [1215] = {.lex_state = 17, .external_lex_state = 7},
- [1216] = {.lex_state = 148, .external_lex_state = 2},
- [1217] = {.lex_state = 26, .external_lex_state = 7},
- [1218] = {.lex_state = 148, .external_lex_state = 2},
- [1219] = {.lex_state = 148, .external_lex_state = 2},
- [1220] = {.lex_state = 17, .external_lex_state = 7},
- [1221] = {.lex_state = 26, .external_lex_state = 7},
- [1222] = {.lex_state = 17, .external_lex_state = 7},
- [1223] = {.lex_state = 148, .external_lex_state = 2},
- [1224] = {.lex_state = 148, .external_lex_state = 5},
- [1225] = {.lex_state = 148, .external_lex_state = 2},
- [1226] = {.lex_state = 148, .external_lex_state = 2},
- [1227] = {.lex_state = 148, .external_lex_state = 2},
- [1228] = {.lex_state = 148, .external_lex_state = 2},
- [1229] = {.lex_state = 148, .external_lex_state = 5},
- [1230] = {.lex_state = 26, .external_lex_state = 7},
- [1231] = {.lex_state = 148, .external_lex_state = 2},
- [1232] = {.lex_state = 148, .external_lex_state = 2},
- [1233] = {.lex_state = 17, .external_lex_state = 7},
- [1234] = {.lex_state = 17, .external_lex_state = 7},
- [1235] = {.lex_state = 15, .external_lex_state = 2},
- [1236] = {.lex_state = 24, .external_lex_state = 2},
- [1237] = {.lex_state = 148, .external_lex_state = 2},
- [1238] = {.lex_state = 148, .external_lex_state = 5},
- [1239] = {.lex_state = 148, .external_lex_state = 2},
- [1240] = {.lex_state = 148, .external_lex_state = 5},
- [1241] = {.lex_state = 148, .external_lex_state = 5},
- [1242] = {.lex_state = 147, .external_lex_state = 2},
- [1243] = {.lex_state = 148, .external_lex_state = 2},
- [1244] = {.lex_state = 148, .external_lex_state = 5},
- [1245] = {.lex_state = 148, .external_lex_state = 2},
- [1246] = {.lex_state = 148, .external_lex_state = 5},
- [1247] = {.lex_state = 26, .external_lex_state = 7},
- [1248] = {.lex_state = 148, .external_lex_state = 2},
- [1249] = {.lex_state = 26, .external_lex_state = 7},
- [1250] = {.lex_state = 148, .external_lex_state = 5},
- [1251] = {.lex_state = 148, .external_lex_state = 2},
- [1252] = {.lex_state = 148, .external_lex_state = 2},
- [1253] = {.lex_state = 148, .external_lex_state = 2},
- [1254] = {.lex_state = 148, .external_lex_state = 2},
- [1255] = {.lex_state = 148, .external_lex_state = 2},
- [1256] = {.lex_state = 148, .external_lex_state = 5},
+ [1] = {.lex_state = 146, .external_lex_state = 2},
+ [2] = {.lex_state = 6, .external_lex_state = 2},
+ [3] = {.lex_state = 6, .external_lex_state = 2},
+ [4] = {.lex_state = 6, .external_lex_state = 2},
+ [5] = {.lex_state = 6, .external_lex_state = 2},
+ [6] = {.lex_state = 6, .external_lex_state = 2},
+ [7] = {.lex_state = 146, .external_lex_state = 2},
+ [8] = {.lex_state = 146, .external_lex_state = 2},
+ [9] = {.lex_state = 146, .external_lex_state = 2},
+ [10] = {.lex_state = 146, .external_lex_state = 2},
+ [11] = {.lex_state = 146, .external_lex_state = 2},
+ [12] = {.lex_state = 146, .external_lex_state = 2},
+ [13] = {.lex_state = 146, .external_lex_state = 2},
+ [14] = {.lex_state = 146, .external_lex_state = 2},
+ [15] = {.lex_state = 146, .external_lex_state = 2},
+ [16] = {.lex_state = 146, .external_lex_state = 2},
+ [17] = {.lex_state = 146, .external_lex_state = 2},
+ [18] = {.lex_state = 146, .external_lex_state = 2},
+ [19] = {.lex_state = 146, .external_lex_state = 2},
+ [20] = {.lex_state = 146, .external_lex_state = 2},
+ [21] = {.lex_state = 146, .external_lex_state = 2},
+ [22] = {.lex_state = 146, .external_lex_state = 2},
+ [23] = {.lex_state = 146, .external_lex_state = 2},
+ [24] = {.lex_state = 146, .external_lex_state = 2},
+ [25] = {.lex_state = 146, .external_lex_state = 2},
+ [26] = {.lex_state = 146, .external_lex_state = 2},
+ [27] = {.lex_state = 146, .external_lex_state = 2},
+ [28] = {.lex_state = 146, .external_lex_state = 2},
+ [29] = {.lex_state = 146, .external_lex_state = 2},
+ [30] = {.lex_state = 146, .external_lex_state = 2},
+ [31] = {.lex_state = 146, .external_lex_state = 2},
+ [32] = {.lex_state = 146, .external_lex_state = 2},
+ [33] = {.lex_state = 146, .external_lex_state = 2},
+ [34] = {.lex_state = 146, .external_lex_state = 2},
+ [35] = {.lex_state = 146, .external_lex_state = 2},
+ [36] = {.lex_state = 146, .external_lex_state = 2},
+ [37] = {.lex_state = 146, .external_lex_state = 2},
+ [38] = {.lex_state = 146, .external_lex_state = 2},
+ [39] = {.lex_state = 146, .external_lex_state = 2},
+ [40] = {.lex_state = 146, .external_lex_state = 2},
+ [41] = {.lex_state = 146, .external_lex_state = 2},
+ [42] = {.lex_state = 146, .external_lex_state = 2},
+ [43] = {.lex_state = 146, .external_lex_state = 2},
+ [44] = {.lex_state = 146, .external_lex_state = 2},
+ [45] = {.lex_state = 146, .external_lex_state = 2},
+ [46] = {.lex_state = 145, .external_lex_state = 3},
+ [47] = {.lex_state = 145, .external_lex_state = 4},
+ [48] = {.lex_state = 145, .external_lex_state = 4},
+ [49] = {.lex_state = 145, .external_lex_state = 3},
+ [50] = {.lex_state = 145, .external_lex_state = 3},
+ [51] = {.lex_state = 145, .external_lex_state = 4},
+ [52] = {.lex_state = 145, .external_lex_state = 4},
+ [53] = {.lex_state = 145, .external_lex_state = 4},
+ [54] = {.lex_state = 145, .external_lex_state = 4},
+ [55] = {.lex_state = 145, .external_lex_state = 4},
+ [56] = {.lex_state = 145, .external_lex_state = 4},
+ [57] = {.lex_state = 146, .external_lex_state = 2},
+ [58] = {.lex_state = 145, .external_lex_state = 4},
+ [59] = {.lex_state = 145, .external_lex_state = 4},
+ [60] = {.lex_state = 145, .external_lex_state = 4},
+ [61] = {.lex_state = 146, .external_lex_state = 2},
+ [62] = {.lex_state = 146, .external_lex_state = 2},
+ [63] = {.lex_state = 146, .external_lex_state = 2},
+ [64] = {.lex_state = 145, .external_lex_state = 4},
+ [65] = {.lex_state = 146, .external_lex_state = 2},
+ [66] = {.lex_state = 146, .external_lex_state = 2},
+ [67] = {.lex_state = 145, .external_lex_state = 4},
+ [68] = {.lex_state = 145, .external_lex_state = 4},
+ [69] = {.lex_state = 146, .external_lex_state = 2},
+ [70] = {.lex_state = 145, .external_lex_state = 4},
+ [71] = {.lex_state = 146, .external_lex_state = 2},
+ [72] = {.lex_state = 145, .external_lex_state = 4},
+ [73] = {.lex_state = 145, .external_lex_state = 4},
+ [74] = {.lex_state = 146, .external_lex_state = 2},
+ [75] = {.lex_state = 146, .external_lex_state = 2},
+ [76] = {.lex_state = 146, .external_lex_state = 2},
+ [77] = {.lex_state = 146, .external_lex_state = 2},
+ [78] = {.lex_state = 146, .external_lex_state = 2},
+ [79] = {.lex_state = 146, .external_lex_state = 2},
+ [80] = {.lex_state = 146, .external_lex_state = 2},
+ [81] = {.lex_state = 146, .external_lex_state = 2},
+ [82] = {.lex_state = 146, .external_lex_state = 2},
+ [83] = {.lex_state = 146, .external_lex_state = 2},
+ [84] = {.lex_state = 146, .external_lex_state = 2},
+ [85] = {.lex_state = 146, .external_lex_state = 2},
+ [86] = {.lex_state = 146, .external_lex_state = 2},
+ [87] = {.lex_state = 146, .external_lex_state = 2},
+ [88] = {.lex_state = 146, .external_lex_state = 2},
+ [89] = {.lex_state = 146, .external_lex_state = 2},
+ [90] = {.lex_state = 146, .external_lex_state = 2},
+ [91] = {.lex_state = 146, .external_lex_state = 2},
+ [92] = {.lex_state = 146, .external_lex_state = 2},
+ [93] = {.lex_state = 146, .external_lex_state = 2},
+ [94] = {.lex_state = 146, .external_lex_state = 2},
+ [95] = {.lex_state = 146, .external_lex_state = 2},
+ [96] = {.lex_state = 146, .external_lex_state = 2},
+ [97] = {.lex_state = 146, .external_lex_state = 2},
+ [98] = {.lex_state = 146, .external_lex_state = 2},
+ [99] = {.lex_state = 146, .external_lex_state = 2},
+ [100] = {.lex_state = 146, .external_lex_state = 2},
+ [101] = {.lex_state = 146, .external_lex_state = 5},
+ [102] = {.lex_state = 146, .external_lex_state = 2},
+ [103] = {.lex_state = 146, .external_lex_state = 2},
+ [104] = {.lex_state = 146, .external_lex_state = 2},
+ [105] = {.lex_state = 146, .external_lex_state = 2},
+ [106] = {.lex_state = 146, .external_lex_state = 2},
+ [107] = {.lex_state = 146, .external_lex_state = 2},
+ [108] = {.lex_state = 146, .external_lex_state = 2},
+ [109] = {.lex_state = 146, .external_lex_state = 2},
+ [110] = {.lex_state = 146, .external_lex_state = 2},
+ [111] = {.lex_state = 146, .external_lex_state = 2},
+ [112] = {.lex_state = 146, .external_lex_state = 2},
+ [113] = {.lex_state = 146, .external_lex_state = 2},
+ [114] = {.lex_state = 146, .external_lex_state = 2},
+ [115] = {.lex_state = 146, .external_lex_state = 2},
+ [116] = {.lex_state = 146, .external_lex_state = 2},
+ [117] = {.lex_state = 7, .external_lex_state = 2},
+ [118] = {.lex_state = 146, .external_lex_state = 2},
+ [119] = {.lex_state = 146, .external_lex_state = 2},
+ [120] = {.lex_state = 146, .external_lex_state = 2},
+ [121] = {.lex_state = 146, .external_lex_state = 2},
+ [122] = {.lex_state = 146, .external_lex_state = 2},
+ [123] = {.lex_state = 146, .external_lex_state = 2},
+ [124] = {.lex_state = 146, .external_lex_state = 2},
+ [125] = {.lex_state = 146, .external_lex_state = 2},
+ [126] = {.lex_state = 146, .external_lex_state = 2},
+ [127] = {.lex_state = 7, .external_lex_state = 2},
+ [128] = {.lex_state = 146, .external_lex_state = 2},
+ [129] = {.lex_state = 146, .external_lex_state = 2},
+ [130] = {.lex_state = 146, .external_lex_state = 2},
+ [131] = {.lex_state = 146, .external_lex_state = 2},
+ [132] = {.lex_state = 146, .external_lex_state = 2},
+ [133] = {.lex_state = 146, .external_lex_state = 2},
+ [134] = {.lex_state = 146, .external_lex_state = 2},
+ [135] = {.lex_state = 146, .external_lex_state = 2},
+ [136] = {.lex_state = 146, .external_lex_state = 2},
+ [137] = {.lex_state = 146, .external_lex_state = 2},
+ [138] = {.lex_state = 146, .external_lex_state = 2},
+ [139] = {.lex_state = 146, .external_lex_state = 2},
+ [140] = {.lex_state = 146, .external_lex_state = 2},
+ [141] = {.lex_state = 146, .external_lex_state = 2},
+ [142] = {.lex_state = 146, .external_lex_state = 2},
+ [143] = {.lex_state = 146, .external_lex_state = 2},
+ [144] = {.lex_state = 7, .external_lex_state = 2},
+ [145] = {.lex_state = 146, .external_lex_state = 2},
+ [146] = {.lex_state = 146, .external_lex_state = 2},
+ [147] = {.lex_state = 146, .external_lex_state = 2},
+ [148] = {.lex_state = 146, .external_lex_state = 2},
+ [149] = {.lex_state = 146, .external_lex_state = 2},
+ [150] = {.lex_state = 146, .external_lex_state = 2},
+ [151] = {.lex_state = 146, .external_lex_state = 2},
+ [152] = {.lex_state = 7, .external_lex_state = 2},
+ [153] = {.lex_state = 7, .external_lex_state = 2},
+ [154] = {.lex_state = 146, .external_lex_state = 2},
+ [155] = {.lex_state = 146, .external_lex_state = 2},
+ [156] = {.lex_state = 146, .external_lex_state = 2},
+ [157] = {.lex_state = 146, .external_lex_state = 2},
+ [158] = {.lex_state = 146, .external_lex_state = 2},
+ [159] = {.lex_state = 146, .external_lex_state = 2},
+ [160] = {.lex_state = 146, .external_lex_state = 2},
+ [161] = {.lex_state = 146, .external_lex_state = 2},
+ [162] = {.lex_state = 146, .external_lex_state = 2},
+ [163] = {.lex_state = 146, .external_lex_state = 2},
+ [164] = {.lex_state = 146, .external_lex_state = 2},
+ [165] = {.lex_state = 146, .external_lex_state = 2},
+ [166] = {.lex_state = 146, .external_lex_state = 2},
+ [167] = {.lex_state = 146, .external_lex_state = 2},
+ [168] = {.lex_state = 146, .external_lex_state = 2},
+ [169] = {.lex_state = 146, .external_lex_state = 2},
+ [170] = {.lex_state = 146, .external_lex_state = 2},
+ [171] = {.lex_state = 146, .external_lex_state = 2},
+ [172] = {.lex_state = 146, .external_lex_state = 2},
+ [173] = {.lex_state = 146, .external_lex_state = 2},
+ [174] = {.lex_state = 146, .external_lex_state = 2},
+ [175] = {.lex_state = 146, .external_lex_state = 2},
+ [176] = {.lex_state = 146, .external_lex_state = 2},
+ [177] = {.lex_state = 146, .external_lex_state = 2},
+ [178] = {.lex_state = 146, .external_lex_state = 2},
+ [179] = {.lex_state = 146, .external_lex_state = 2},
+ [180] = {.lex_state = 146, .external_lex_state = 2},
+ [181] = {.lex_state = 146, .external_lex_state = 2},
+ [182] = {.lex_state = 146, .external_lex_state = 2},
+ [183] = {.lex_state = 146, .external_lex_state = 2},
+ [184] = {.lex_state = 146, .external_lex_state = 2},
+ [185] = {.lex_state = 146, .external_lex_state = 2},
+ [186] = {.lex_state = 146, .external_lex_state = 2},
+ [187] = {.lex_state = 146, .external_lex_state = 2},
+ [188] = {.lex_state = 146, .external_lex_state = 2},
+ [189] = {.lex_state = 146, .external_lex_state = 2},
+ [190] = {.lex_state = 146, .external_lex_state = 2},
+ [191] = {.lex_state = 146, .external_lex_state = 2},
+ [192] = {.lex_state = 146, .external_lex_state = 2},
+ [193] = {.lex_state = 146, .external_lex_state = 2},
+ [194] = {.lex_state = 146, .external_lex_state = 2},
+ [195] = {.lex_state = 146, .external_lex_state = 2},
+ [196] = {.lex_state = 146, .external_lex_state = 2},
+ [197] = {.lex_state = 146, .external_lex_state = 2},
+ [198] = {.lex_state = 146, .external_lex_state = 2},
+ [199] = {.lex_state = 146, .external_lex_state = 2},
+ [200] = {.lex_state = 146, .external_lex_state = 2},
+ [201] = {.lex_state = 146, .external_lex_state = 2},
+ [202] = {.lex_state = 146, .external_lex_state = 2},
+ [203] = {.lex_state = 146, .external_lex_state = 2},
+ [204] = {.lex_state = 146, .external_lex_state = 2},
+ [205] = {.lex_state = 146, .external_lex_state = 2},
+ [206] = {.lex_state = 146, .external_lex_state = 2},
+ [207] = {.lex_state = 146, .external_lex_state = 2},
+ [208] = {.lex_state = 146, .external_lex_state = 2},
+ [209] = {.lex_state = 146, .external_lex_state = 2},
+ [210] = {.lex_state = 146, .external_lex_state = 2},
+ [211] = {.lex_state = 146, .external_lex_state = 2},
+ [212] = {.lex_state = 146, .external_lex_state = 2},
+ [213] = {.lex_state = 146, .external_lex_state = 2},
+ [214] = {.lex_state = 146, .external_lex_state = 2},
+ [215] = {.lex_state = 146, .external_lex_state = 2},
+ [216] = {.lex_state = 146, .external_lex_state = 2},
+ [217] = {.lex_state = 146, .external_lex_state = 2},
+ [218] = {.lex_state = 146, .external_lex_state = 2},
+ [219] = {.lex_state = 146, .external_lex_state = 2},
+ [220] = {.lex_state = 146, .external_lex_state = 2},
+ [221] = {.lex_state = 146, .external_lex_state = 2},
+ [222] = {.lex_state = 146, .external_lex_state = 2},
+ [223] = {.lex_state = 146, .external_lex_state = 2},
+ [224] = {.lex_state = 146, .external_lex_state = 2},
+ [225] = {.lex_state = 146, .external_lex_state = 2},
+ [226] = {.lex_state = 146, .external_lex_state = 2},
+ [227] = {.lex_state = 146, .external_lex_state = 2},
+ [228] = {.lex_state = 146, .external_lex_state = 2},
+ [229] = {.lex_state = 146, .external_lex_state = 2},
+ [230] = {.lex_state = 146, .external_lex_state = 2},
+ [231] = {.lex_state = 146, .external_lex_state = 2},
+ [232] = {.lex_state = 146, .external_lex_state = 2},
+ [233] = {.lex_state = 146, .external_lex_state = 2},
+ [234] = {.lex_state = 146, .external_lex_state = 2},
+ [235] = {.lex_state = 146, .external_lex_state = 2},
+ [236] = {.lex_state = 146, .external_lex_state = 2},
+ [237] = {.lex_state = 146, .external_lex_state = 2},
+ [238] = {.lex_state = 146, .external_lex_state = 2},
+ [239] = {.lex_state = 146, .external_lex_state = 2},
+ [240] = {.lex_state = 146, .external_lex_state = 2},
+ [241] = {.lex_state = 146, .external_lex_state = 2},
+ [242] = {.lex_state = 146, .external_lex_state = 2},
+ [243] = {.lex_state = 146, .external_lex_state = 2},
+ [244] = {.lex_state = 146, .external_lex_state = 2},
+ [245] = {.lex_state = 146, .external_lex_state = 2},
+ [246] = {.lex_state = 146, .external_lex_state = 2},
+ [247] = {.lex_state = 146, .external_lex_state = 2},
+ [248] = {.lex_state = 146, .external_lex_state = 2},
+ [249] = {.lex_state = 146, .external_lex_state = 2},
+ [250] = {.lex_state = 146, .external_lex_state = 2},
+ [251] = {.lex_state = 146, .external_lex_state = 2},
+ [252] = {.lex_state = 146, .external_lex_state = 2},
+ [253] = {.lex_state = 146, .external_lex_state = 2},
+ [254] = {.lex_state = 146, .external_lex_state = 2},
+ [255] = {.lex_state = 146, .external_lex_state = 2},
+ [256] = {.lex_state = 146, .external_lex_state = 2},
+ [257] = {.lex_state = 146, .external_lex_state = 2},
+ [258] = {.lex_state = 146, .external_lex_state = 2},
+ [259] = {.lex_state = 146, .external_lex_state = 2},
+ [260] = {.lex_state = 146, .external_lex_state = 2},
+ [261] = {.lex_state = 146, .external_lex_state = 2},
+ [262] = {.lex_state = 146, .external_lex_state = 2},
+ [263] = {.lex_state = 146, .external_lex_state = 2},
+ [264] = {.lex_state = 146, .external_lex_state = 2},
+ [265] = {.lex_state = 146, .external_lex_state = 2},
+ [266] = {.lex_state = 146, .external_lex_state = 2},
+ [267] = {.lex_state = 146, .external_lex_state = 2},
+ [268] = {.lex_state = 146, .external_lex_state = 2},
+ [269] = {.lex_state = 146, .external_lex_state = 2},
+ [270] = {.lex_state = 146, .external_lex_state = 2},
+ [271] = {.lex_state = 146, .external_lex_state = 2},
+ [272] = {.lex_state = 146, .external_lex_state = 2},
+ [273] = {.lex_state = 146, .external_lex_state = 2},
+ [274] = {.lex_state = 146, .external_lex_state = 2},
+ [275] = {.lex_state = 146, .external_lex_state = 2},
+ [276] = {.lex_state = 146, .external_lex_state = 2},
+ [277] = {.lex_state = 8, .external_lex_state = 4},
+ [278] = {.lex_state = 8, .external_lex_state = 4},
+ [279] = {.lex_state = 8, .external_lex_state = 4},
+ [280] = {.lex_state = 8, .external_lex_state = 4},
+ [281] = {.lex_state = 8, .external_lex_state = 4},
+ [282] = {.lex_state = 8, .external_lex_state = 4},
+ [283] = {.lex_state = 8, .external_lex_state = 4},
+ [284] = {.lex_state = 8, .external_lex_state = 4},
+ [285] = {.lex_state = 8, .external_lex_state = 4},
+ [286] = {.lex_state = 8, .external_lex_state = 4},
+ [287] = {.lex_state = 8, .external_lex_state = 4},
+ [288] = {.lex_state = 8, .external_lex_state = 4},
+ [289] = {.lex_state = 8, .external_lex_state = 4},
+ [290] = {.lex_state = 8, .external_lex_state = 4},
+ [291] = {.lex_state = 146, .external_lex_state = 2},
+ [292] = {.lex_state = 8, .external_lex_state = 3},
+ [293] = {.lex_state = 8, .external_lex_state = 3},
+ [294] = {.lex_state = 146, .external_lex_state = 5},
+ [295] = {.lex_state = 146, .external_lex_state = 5},
+ [296] = {.lex_state = 8, .external_lex_state = 4},
+ [297] = {.lex_state = 8, .external_lex_state = 4},
+ [298] = {.lex_state = 8, .external_lex_state = 3},
+ [299] = {.lex_state = 8, .external_lex_state = 4},
+ [300] = {.lex_state = 8, .external_lex_state = 3},
+ [301] = {.lex_state = 8, .external_lex_state = 4},
+ [302] = {.lex_state = 8, .external_lex_state = 3},
+ [303] = {.lex_state = 8, .external_lex_state = 4},
+ [304] = {.lex_state = 8, .external_lex_state = 4},
+ [305] = {.lex_state = 146, .external_lex_state = 2},
+ [306] = {.lex_state = 146, .external_lex_state = 2},
+ [307] = {.lex_state = 8, .external_lex_state = 4},
+ [308] = {.lex_state = 8, .external_lex_state = 4},
+ [309] = {.lex_state = 8, .external_lex_state = 4},
+ [310] = {.lex_state = 8, .external_lex_state = 4},
+ [311] = {.lex_state = 8, .external_lex_state = 3},
+ [312] = {.lex_state = 146, .external_lex_state = 2},
+ [313] = {.lex_state = 8, .external_lex_state = 3},
+ [314] = {.lex_state = 146, .external_lex_state = 5},
+ [315] = {.lex_state = 8, .external_lex_state = 3},
+ [316] = {.lex_state = 146, .external_lex_state = 5},
+ [317] = {.lex_state = 146, .external_lex_state = 5},
+ [318] = {.lex_state = 146, .external_lex_state = 5},
+ [319] = {.lex_state = 146, .external_lex_state = 5},
+ [320] = {.lex_state = 146, .external_lex_state = 5},
+ [321] = {.lex_state = 8, .external_lex_state = 4},
+ [322] = {.lex_state = 146, .external_lex_state = 5},
+ [323] = {.lex_state = 146, .external_lex_state = 5},
+ [324] = {.lex_state = 146, .external_lex_state = 2},
+ [325] = {.lex_state = 146, .external_lex_state = 2},
+ [326] = {.lex_state = 146, .external_lex_state = 2},
+ [327] = {.lex_state = 146, .external_lex_state = 2},
+ [328] = {.lex_state = 146, .external_lex_state = 5},
+ [329] = {.lex_state = 146, .external_lex_state = 2},
+ [330] = {.lex_state = 146, .external_lex_state = 5},
+ [331] = {.lex_state = 8, .external_lex_state = 4},
+ [332] = {.lex_state = 146, .external_lex_state = 5},
+ [333] = {.lex_state = 146, .external_lex_state = 5},
+ [334] = {.lex_state = 146, .external_lex_state = 5},
+ [335] = {.lex_state = 146, .external_lex_state = 5},
+ [336] = {.lex_state = 146, .external_lex_state = 5},
+ [337] = {.lex_state = 146, .external_lex_state = 5},
+ [338] = {.lex_state = 146, .external_lex_state = 2},
+ [339] = {.lex_state = 146, .external_lex_state = 2},
+ [340] = {.lex_state = 146, .external_lex_state = 2},
+ [341] = {.lex_state = 146, .external_lex_state = 2},
+ [342] = {.lex_state = 146, .external_lex_state = 2},
+ [343] = {.lex_state = 146, .external_lex_state = 2},
+ [344] = {.lex_state = 146, .external_lex_state = 2},
+ [345] = {.lex_state = 146, .external_lex_state = 2},
+ [346] = {.lex_state = 146, .external_lex_state = 2},
+ [347] = {.lex_state = 146, .external_lex_state = 2},
+ [348] = {.lex_state = 146, .external_lex_state = 2},
+ [349] = {.lex_state = 146, .external_lex_state = 2},
+ [350] = {.lex_state = 146, .external_lex_state = 2},
+ [351] = {.lex_state = 146, .external_lex_state = 2},
+ [352] = {.lex_state = 146, .external_lex_state = 2},
+ [353] = {.lex_state = 146, .external_lex_state = 2},
+ [354] = {.lex_state = 146, .external_lex_state = 2},
+ [355] = {.lex_state = 146, .external_lex_state = 2},
+ [356] = {.lex_state = 146, .external_lex_state = 2},
+ [357] = {.lex_state = 146, .external_lex_state = 2},
+ [358] = {.lex_state = 146, .external_lex_state = 2},
+ [359] = {.lex_state = 146, .external_lex_state = 2},
+ [360] = {.lex_state = 146, .external_lex_state = 2},
+ [361] = {.lex_state = 8, .external_lex_state = 4},
+ [362] = {.lex_state = 146, .external_lex_state = 2},
+ [363] = {.lex_state = 146, .external_lex_state = 2},
+ [364] = {.lex_state = 146, .external_lex_state = 2},
+ [365] = {.lex_state = 146, .external_lex_state = 2},
+ [366] = {.lex_state = 146, .external_lex_state = 2},
+ [367] = {.lex_state = 146, .external_lex_state = 2},
+ [368] = {.lex_state = 8, .external_lex_state = 3},
+ [369] = {.lex_state = 146, .external_lex_state = 2},
+ [370] = {.lex_state = 146, .external_lex_state = 2},
+ [371] = {.lex_state = 146, .external_lex_state = 2},
+ [372] = {.lex_state = 146, .external_lex_state = 2},
+ [373] = {.lex_state = 146, .external_lex_state = 2},
+ [374] = {.lex_state = 146, .external_lex_state = 2},
+ [375] = {.lex_state = 8, .external_lex_state = 4},
+ [376] = {.lex_state = 146, .external_lex_state = 2},
+ [377] = {.lex_state = 8, .external_lex_state = 4},
+ [378] = {.lex_state = 146, .external_lex_state = 2},
+ [379] = {.lex_state = 146, .external_lex_state = 2},
+ [380] = {.lex_state = 146, .external_lex_state = 2},
+ [381] = {.lex_state = 146, .external_lex_state = 2},
+ [382] = {.lex_state = 146, .external_lex_state = 2},
+ [383] = {.lex_state = 146, .external_lex_state = 2},
+ [384] = {.lex_state = 146, .external_lex_state = 2},
+ [385] = {.lex_state = 146, .external_lex_state = 2},
+ [386] = {.lex_state = 146, .external_lex_state = 2},
+ [387] = {.lex_state = 146, .external_lex_state = 2},
+ [388] = {.lex_state = 146, .external_lex_state = 2},
+ [389] = {.lex_state = 146, .external_lex_state = 2},
+ [390] = {.lex_state = 146, .external_lex_state = 2},
+ [391] = {.lex_state = 146, .external_lex_state = 2},
+ [392] = {.lex_state = 146, .external_lex_state = 2},
+ [393] = {.lex_state = 146, .external_lex_state = 2},
+ [394] = {.lex_state = 146, .external_lex_state = 2},
+ [395] = {.lex_state = 146, .external_lex_state = 2},
+ [396] = {.lex_state = 146, .external_lex_state = 2},
+ [397] = {.lex_state = 8, .external_lex_state = 3},
+ [398] = {.lex_state = 146, .external_lex_state = 2},
+ [399] = {.lex_state = 8, .external_lex_state = 3},
+ [400] = {.lex_state = 146, .external_lex_state = 2},
+ [401] = {.lex_state = 146, .external_lex_state = 2},
+ [402] = {.lex_state = 8, .external_lex_state = 4},
+ [403] = {.lex_state = 8, .external_lex_state = 4},
+ [404] = {.lex_state = 8, .external_lex_state = 4},
+ [405] = {.lex_state = 8, .external_lex_state = 4},
+ [406] = {.lex_state = 8, .external_lex_state = 3},
+ [407] = {.lex_state = 8, .external_lex_state = 3},
+ [408] = {.lex_state = 8, .external_lex_state = 3},
+ [409] = {.lex_state = 8, .external_lex_state = 3},
+ [410] = {.lex_state = 8, .external_lex_state = 3},
+ [411] = {.lex_state = 8, .external_lex_state = 3},
+ [412] = {.lex_state = 8, .external_lex_state = 3},
+ [413] = {.lex_state = 8, .external_lex_state = 3},
+ [414] = {.lex_state = 8, .external_lex_state = 3},
+ [415] = {.lex_state = 146, .external_lex_state = 2},
+ [416] = {.lex_state = 8, .external_lex_state = 3},
+ [417] = {.lex_state = 8, .external_lex_state = 3},
+ [418] = {.lex_state = 8, .external_lex_state = 3},
+ [419] = {.lex_state = 8, .external_lex_state = 3},
+ [420] = {.lex_state = 146, .external_lex_state = 2},
+ [421] = {.lex_state = 146, .external_lex_state = 2},
+ [422] = {.lex_state = 146, .external_lex_state = 2},
+ [423] = {.lex_state = 146, .external_lex_state = 2},
+ [424] = {.lex_state = 146, .external_lex_state = 2},
+ [425] = {.lex_state = 146, .external_lex_state = 2},
+ [426] = {.lex_state = 146, .external_lex_state = 2},
+ [427] = {.lex_state = 8, .external_lex_state = 4},
+ [428] = {.lex_state = 8, .external_lex_state = 3},
+ [429] = {.lex_state = 8, .external_lex_state = 4},
+ [430] = {.lex_state = 8, .external_lex_state = 4},
+ [431] = {.lex_state = 8, .external_lex_state = 4},
+ [432] = {.lex_state = 8, .external_lex_state = 4},
+ [433] = {.lex_state = 8, .external_lex_state = 4},
+ [434] = {.lex_state = 8, .external_lex_state = 3},
+ [435] = {.lex_state = 8, .external_lex_state = 4},
+ [436] = {.lex_state = 8, .external_lex_state = 4},
+ [437] = {.lex_state = 8, .external_lex_state = 4},
+ [438] = {.lex_state = 8, .external_lex_state = 4},
+ [439] = {.lex_state = 8, .external_lex_state = 4},
+ [440] = {.lex_state = 8, .external_lex_state = 4},
+ [441] = {.lex_state = 8, .external_lex_state = 4},
+ [442] = {.lex_state = 8, .external_lex_state = 4},
+ [443] = {.lex_state = 8, .external_lex_state = 3},
+ [444] = {.lex_state = 8, .external_lex_state = 3},
+ [445] = {.lex_state = 8, .external_lex_state = 3},
+ [446] = {.lex_state = 8, .external_lex_state = 4},
+ [447] = {.lex_state = 8, .external_lex_state = 4},
+ [448] = {.lex_state = 8, .external_lex_state = 4},
+ [449] = {.lex_state = 8, .external_lex_state = 4},
+ [450] = {.lex_state = 8, .external_lex_state = 4},
+ [451] = {.lex_state = 8, .external_lex_state = 3},
+ [452] = {.lex_state = 8, .external_lex_state = 4},
+ [453] = {.lex_state = 8, .external_lex_state = 4},
+ [454] = {.lex_state = 8, .external_lex_state = 4},
+ [455] = {.lex_state = 8, .external_lex_state = 3},
+ [456] = {.lex_state = 8, .external_lex_state = 4},
+ [457] = {.lex_state = 8, .external_lex_state = 3},
+ [458] = {.lex_state = 8, .external_lex_state = 4},
+ [459] = {.lex_state = 8, .external_lex_state = 3},
+ [460] = {.lex_state = 8, .external_lex_state = 4},
+ [461] = {.lex_state = 8, .external_lex_state = 3},
+ [462] = {.lex_state = 8, .external_lex_state = 3},
+ [463] = {.lex_state = 8, .external_lex_state = 3},
+ [464] = {.lex_state = 8, .external_lex_state = 3},
+ [465] = {.lex_state = 8, .external_lex_state = 3},
+ [466] = {.lex_state = 8, .external_lex_state = 3},
+ [467] = {.lex_state = 8, .external_lex_state = 3},
+ [468] = {.lex_state = 8, .external_lex_state = 3},
+ [469] = {.lex_state = 8, .external_lex_state = 3},
+ [470] = {.lex_state = 8, .external_lex_state = 3},
+ [471] = {.lex_state = 8, .external_lex_state = 3},
+ [472] = {.lex_state = 8, .external_lex_state = 3},
+ [473] = {.lex_state = 8, .external_lex_state = 3},
+ [474] = {.lex_state = 8, .external_lex_state = 3},
+ [475] = {.lex_state = 8, .external_lex_state = 3},
+ [476] = {.lex_state = 8, .external_lex_state = 3},
+ [477] = {.lex_state = 8, .external_lex_state = 3},
+ [478] = {.lex_state = 9, .external_lex_state = 3},
+ [479] = {.lex_state = 9, .external_lex_state = 4},
+ [480] = {.lex_state = 9, .external_lex_state = 3},
+ [481] = {.lex_state = 9, .external_lex_state = 3},
+ [482] = {.lex_state = 9, .external_lex_state = 3},
+ [483] = {.lex_state = 9, .external_lex_state = 3},
+ [484] = {.lex_state = 9, .external_lex_state = 3},
+ [485] = {.lex_state = 9, .external_lex_state = 3},
+ [486] = {.lex_state = 9, .external_lex_state = 4},
+ [487] = {.lex_state = 9, .external_lex_state = 4},
+ [488] = {.lex_state = 9, .external_lex_state = 3},
+ [489] = {.lex_state = 9, .external_lex_state = 3},
+ [490] = {.lex_state = 9, .external_lex_state = 3},
+ [491] = {.lex_state = 9, .external_lex_state = 3},
+ [492] = {.lex_state = 9, .external_lex_state = 4},
+ [493] = {.lex_state = 9, .external_lex_state = 4},
+ [494] = {.lex_state = 9, .external_lex_state = 4},
+ [495] = {.lex_state = 9, .external_lex_state = 3},
+ [496] = {.lex_state = 9, .external_lex_state = 3},
+ [497] = {.lex_state = 9, .external_lex_state = 3},
+ [498] = {.lex_state = 9, .external_lex_state = 3},
+ [499] = {.lex_state = 9, .external_lex_state = 3},
+ [500] = {.lex_state = 9, .external_lex_state = 3},
+ [501] = {.lex_state = 9, .external_lex_state = 3},
+ [502] = {.lex_state = 9, .external_lex_state = 3},
+ [503] = {.lex_state = 9, .external_lex_state = 3},
+ [504] = {.lex_state = 9, .external_lex_state = 3},
+ [505] = {.lex_state = 9, .external_lex_state = 3},
+ [506] = {.lex_state = 9, .external_lex_state = 3},
+ [507] = {.lex_state = 9, .external_lex_state = 3},
+ [508] = {.lex_state = 9, .external_lex_state = 3},
+ [509] = {.lex_state = 9, .external_lex_state = 3},
+ [510] = {.lex_state = 9, .external_lex_state = 3},
+ [511] = {.lex_state = 9, .external_lex_state = 3},
+ [512] = {.lex_state = 9, .external_lex_state = 3},
+ [513] = {.lex_state = 9, .external_lex_state = 3},
+ [514] = {.lex_state = 9, .external_lex_state = 3},
+ [515] = {.lex_state = 9, .external_lex_state = 4},
+ [516] = {.lex_state = 9, .external_lex_state = 3},
+ [517] = {.lex_state = 9, .external_lex_state = 4},
+ [518] = {.lex_state = 9, .external_lex_state = 4},
+ [519] = {.lex_state = 9, .external_lex_state = 3},
+ [520] = {.lex_state = 9, .external_lex_state = 3},
+ [521] = {.lex_state = 9, .external_lex_state = 3},
+ [522] = {.lex_state = 9, .external_lex_state = 3},
+ [523] = {.lex_state = 9, .external_lex_state = 3},
+ [524] = {.lex_state = 9, .external_lex_state = 3},
+ [525] = {.lex_state = 9, .external_lex_state = 3},
+ [526] = {.lex_state = 9, .external_lex_state = 3},
+ [527] = {.lex_state = 9, .external_lex_state = 3},
+ [528] = {.lex_state = 10, .external_lex_state = 3},
+ [529] = {.lex_state = 9, .external_lex_state = 3},
+ [530] = {.lex_state = 9, .external_lex_state = 3},
+ [531] = {.lex_state = 9, .external_lex_state = 3},
+ [532] = {.lex_state = 9, .external_lex_state = 3},
+ [533] = {.lex_state = 9, .external_lex_state = 3},
+ [534] = {.lex_state = 9, .external_lex_state = 3},
+ [535] = {.lex_state = 9, .external_lex_state = 3},
+ [536] = {.lex_state = 9, .external_lex_state = 3},
+ [537] = {.lex_state = 9, .external_lex_state = 3},
+ [538] = {.lex_state = 9, .external_lex_state = 4},
+ [539] = {.lex_state = 9, .external_lex_state = 3},
+ [540] = {.lex_state = 9, .external_lex_state = 3},
+ [541] = {.lex_state = 9, .external_lex_state = 3},
+ [542] = {.lex_state = 9, .external_lex_state = 3},
+ [543] = {.lex_state = 9, .external_lex_state = 3},
+ [544] = {.lex_state = 9, .external_lex_state = 3},
+ [545] = {.lex_state = 9, .external_lex_state = 3},
+ [546] = {.lex_state = 9, .external_lex_state = 3},
+ [547] = {.lex_state = 9, .external_lex_state = 3},
+ [548] = {.lex_state = 9, .external_lex_state = 3},
+ [549] = {.lex_state = 9, .external_lex_state = 3},
+ [550] = {.lex_state = 9, .external_lex_state = 3},
+ [551] = {.lex_state = 9, .external_lex_state = 3},
+ [552] = {.lex_state = 9, .external_lex_state = 3},
+ [553] = {.lex_state = 9, .external_lex_state = 3},
+ [554] = {.lex_state = 9, .external_lex_state = 3},
+ [555] = {.lex_state = 9, .external_lex_state = 3},
+ [556] = {.lex_state = 9, .external_lex_state = 4},
+ [557] = {.lex_state = 9, .external_lex_state = 3},
+ [558] = {.lex_state = 9, .external_lex_state = 3},
+ [559] = {.lex_state = 9, .external_lex_state = 3},
+ [560] = {.lex_state = 9, .external_lex_state = 3},
+ [561] = {.lex_state = 9, .external_lex_state = 3},
+ [562] = {.lex_state = 9, .external_lex_state = 3},
+ [563] = {.lex_state = 9, .external_lex_state = 3},
+ [564] = {.lex_state = 9, .external_lex_state = 3},
+ [565] = {.lex_state = 9, .external_lex_state = 3},
+ [566] = {.lex_state = 9, .external_lex_state = 3},
+ [567] = {.lex_state = 9, .external_lex_state = 3},
+ [568] = {.lex_state = 9, .external_lex_state = 3},
+ [569] = {.lex_state = 9, .external_lex_state = 3},
+ [570] = {.lex_state = 9, .external_lex_state = 3},
+ [571] = {.lex_state = 9, .external_lex_state = 3},
+ [572] = {.lex_state = 9, .external_lex_state = 3},
+ [573] = {.lex_state = 9, .external_lex_state = 3},
+ [574] = {.lex_state = 9, .external_lex_state = 3},
+ [575] = {.lex_state = 9, .external_lex_state = 3},
+ [576] = {.lex_state = 9, .external_lex_state = 3},
+ [577] = {.lex_state = 9, .external_lex_state = 3},
+ [578] = {.lex_state = 9, .external_lex_state = 3},
+ [579] = {.lex_state = 9, .external_lex_state = 3},
+ [580] = {.lex_state = 9, .external_lex_state = 3},
+ [581] = {.lex_state = 9, .external_lex_state = 4},
+ [582] = {.lex_state = 9, .external_lex_state = 3},
+ [583] = {.lex_state = 9, .external_lex_state = 4},
+ [584] = {.lex_state = 9, .external_lex_state = 4},
+ [585] = {.lex_state = 9, .external_lex_state = 4},
+ [586] = {.lex_state = 9, .external_lex_state = 3},
+ [587] = {.lex_state = 9, .external_lex_state = 4},
+ [588] = {.lex_state = 9, .external_lex_state = 4},
+ [589] = {.lex_state = 9, .external_lex_state = 4},
+ [590] = {.lex_state = 9, .external_lex_state = 4},
+ [591] = {.lex_state = 9, .external_lex_state = 4},
+ [592] = {.lex_state = 9, .external_lex_state = 3},
+ [593] = {.lex_state = 9, .external_lex_state = 4},
+ [594] = {.lex_state = 9, .external_lex_state = 3},
+ [595] = {.lex_state = 9, .external_lex_state = 4},
+ [596] = {.lex_state = 9, .external_lex_state = 4},
+ [597] = {.lex_state = 9, .external_lex_state = 4},
+ [598] = {.lex_state = 9, .external_lex_state = 4},
+ [599] = {.lex_state = 9, .external_lex_state = 4},
+ [600] = {.lex_state = 9, .external_lex_state = 4},
+ [601] = {.lex_state = 9, .external_lex_state = 4},
+ [602] = {.lex_state = 9, .external_lex_state = 4},
+ [603] = {.lex_state = 9, .external_lex_state = 3},
+ [604] = {.lex_state = 9, .external_lex_state = 3},
+ [605] = {.lex_state = 9, .external_lex_state = 3},
+ [606] = {.lex_state = 9, .external_lex_state = 4},
+ [607] = {.lex_state = 9, .external_lex_state = 4},
+ [608] = {.lex_state = 9, .external_lex_state = 4},
+ [609] = {.lex_state = 9, .external_lex_state = 4},
+ [610] = {.lex_state = 9, .external_lex_state = 4},
+ [611] = {.lex_state = 9, .external_lex_state = 4},
+ [612] = {.lex_state = 9, .external_lex_state = 4},
+ [613] = {.lex_state = 9, .external_lex_state = 4},
+ [614] = {.lex_state = 9, .external_lex_state = 4},
+ [615] = {.lex_state = 9, .external_lex_state = 4},
+ [616] = {.lex_state = 9, .external_lex_state = 4},
+ [617] = {.lex_state = 9, .external_lex_state = 4},
+ [618] = {.lex_state = 9, .external_lex_state = 4},
+ [619] = {.lex_state = 9, .external_lex_state = 4},
+ [620] = {.lex_state = 9, .external_lex_state = 4},
+ [621] = {.lex_state = 9, .external_lex_state = 4},
+ [622] = {.lex_state = 9, .external_lex_state = 4},
+ [623] = {.lex_state = 9, .external_lex_state = 4},
+ [624] = {.lex_state = 9, .external_lex_state = 4},
+ [625] = {.lex_state = 9, .external_lex_state = 4},
+ [626] = {.lex_state = 9, .external_lex_state = 4},
+ [627] = {.lex_state = 9, .external_lex_state = 4},
+ [628] = {.lex_state = 9, .external_lex_state = 4},
+ [629] = {.lex_state = 9, .external_lex_state = 4},
+ [630] = {.lex_state = 9, .external_lex_state = 4},
+ [631] = {.lex_state = 9, .external_lex_state = 4},
+ [632] = {.lex_state = 9, .external_lex_state = 4},
+ [633] = {.lex_state = 9, .external_lex_state = 4},
+ [634] = {.lex_state = 9, .external_lex_state = 4},
+ [635] = {.lex_state = 9, .external_lex_state = 4},
+ [636] = {.lex_state = 9, .external_lex_state = 4},
+ [637] = {.lex_state = 9, .external_lex_state = 4},
+ [638] = {.lex_state = 9, .external_lex_state = 4},
+ [639] = {.lex_state = 9, .external_lex_state = 4},
+ [640] = {.lex_state = 9, .external_lex_state = 4},
+ [641] = {.lex_state = 9, .external_lex_state = 4},
+ [642] = {.lex_state = 9, .external_lex_state = 4},
+ [643] = {.lex_state = 9, .external_lex_state = 4},
+ [644] = {.lex_state = 9, .external_lex_state = 4},
+ [645] = {.lex_state = 9, .external_lex_state = 4},
+ [646] = {.lex_state = 9, .external_lex_state = 4},
+ [647] = {.lex_state = 9, .external_lex_state = 4},
+ [648] = {.lex_state = 9, .external_lex_state = 4},
+ [649] = {.lex_state = 9, .external_lex_state = 4},
+ [650] = {.lex_state = 9, .external_lex_state = 4},
+ [651] = {.lex_state = 9, .external_lex_state = 4},
+ [652] = {.lex_state = 9, .external_lex_state = 4},
+ [653] = {.lex_state = 9, .external_lex_state = 4},
+ [654] = {.lex_state = 9, .external_lex_state = 4},
+ [655] = {.lex_state = 9, .external_lex_state = 4},
+ [656] = {.lex_state = 9, .external_lex_state = 4},
+ [657] = {.lex_state = 9, .external_lex_state = 4},
+ [658] = {.lex_state = 9, .external_lex_state = 4},
+ [659] = {.lex_state = 9, .external_lex_state = 4},
+ [660] = {.lex_state = 9, .external_lex_state = 4},
+ [661] = {.lex_state = 9, .external_lex_state = 3},
+ [662] = {.lex_state = 9, .external_lex_state = 4},
+ [663] = {.lex_state = 9, .external_lex_state = 4},
+ [664] = {.lex_state = 9, .external_lex_state = 3},
+ [665] = {.lex_state = 9, .external_lex_state = 3},
+ [666] = {.lex_state = 9, .external_lex_state = 4},
+ [667] = {.lex_state = 9, .external_lex_state = 4},
+ [668] = {.lex_state = 9, .external_lex_state = 4},
+ [669] = {.lex_state = 10, .external_lex_state = 4},
+ [670] = {.lex_state = 9, .external_lex_state = 4},
+ [671] = {.lex_state = 9, .external_lex_state = 3},
+ [672] = {.lex_state = 9, .external_lex_state = 3},
+ [673] = {.lex_state = 9, .external_lex_state = 3},
+ [674] = {.lex_state = 9, .external_lex_state = 4},
+ [675] = {.lex_state = 9, .external_lex_state = 4},
+ [676] = {.lex_state = 9, .external_lex_state = 4},
+ [677] = {.lex_state = 9, .external_lex_state = 4},
+ [678] = {.lex_state = 9, .external_lex_state = 3},
+ [679] = {.lex_state = 9, .external_lex_state = 3},
+ [680] = {.lex_state = 9, .external_lex_state = 3},
+ [681] = {.lex_state = 9, .external_lex_state = 3},
+ [682] = {.lex_state = 9, .external_lex_state = 3},
+ [683] = {.lex_state = 9, .external_lex_state = 3},
+ [684] = {.lex_state = 9, .external_lex_state = 4},
+ [685] = {.lex_state = 9, .external_lex_state = 3},
+ [686] = {.lex_state = 9, .external_lex_state = 3},
+ [687] = {.lex_state = 9, .external_lex_state = 4},
+ [688] = {.lex_state = 9, .external_lex_state = 4},
+ [689] = {.lex_state = 9, .external_lex_state = 3},
+ [690] = {.lex_state = 9, .external_lex_state = 3},
+ [691] = {.lex_state = 9, .external_lex_state = 4},
+ [692] = {.lex_state = 9, .external_lex_state = 4},
+ [693] = {.lex_state = 9, .external_lex_state = 3},
+ [694] = {.lex_state = 9, .external_lex_state = 3},
+ [695] = {.lex_state = 9, .external_lex_state = 4},
+ [696] = {.lex_state = 9, .external_lex_state = 3},
+ [697] = {.lex_state = 9, .external_lex_state = 4},
+ [698] = {.lex_state = 9, .external_lex_state = 4},
+ [699] = {.lex_state = 9, .external_lex_state = 4},
+ [700] = {.lex_state = 9, .external_lex_state = 4},
+ [701] = {.lex_state = 9, .external_lex_state = 4},
+ [702] = {.lex_state = 9, .external_lex_state = 4},
+ [703] = {.lex_state = 9, .external_lex_state = 4},
+ [704] = {.lex_state = 10, .external_lex_state = 4},
+ [705] = {.lex_state = 9, .external_lex_state = 4},
+ [706] = {.lex_state = 9, .external_lex_state = 3},
+ [707] = {.lex_state = 9, .external_lex_state = 4},
+ [708] = {.lex_state = 9, .external_lex_state = 4},
+ [709] = {.lex_state = 9, .external_lex_state = 4},
+ [710] = {.lex_state = 9, .external_lex_state = 3},
+ [711] = {.lex_state = 9, .external_lex_state = 3},
+ [712] = {.lex_state = 9, .external_lex_state = 4},
+ [713] = {.lex_state = 9, .external_lex_state = 4},
+ [714] = {.lex_state = 9, .external_lex_state = 3},
+ [715] = {.lex_state = 9, .external_lex_state = 4},
+ [716] = {.lex_state = 9, .external_lex_state = 4},
+ [717] = {.lex_state = 9, .external_lex_state = 4},
+ [718] = {.lex_state = 9, .external_lex_state = 4},
+ [719] = {.lex_state = 9, .external_lex_state = 3},
+ [720] = {.lex_state = 9, .external_lex_state = 4},
+ [721] = {.lex_state = 9, .external_lex_state = 4},
+ [722] = {.lex_state = 9, .external_lex_state = 4},
+ [723] = {.lex_state = 9, .external_lex_state = 4},
+ [724] = {.lex_state = 9, .external_lex_state = 4},
+ [725] = {.lex_state = 9, .external_lex_state = 4},
+ [726] = {.lex_state = 9, .external_lex_state = 3},
+ [727] = {.lex_state = 9, .external_lex_state = 4},
+ [728] = {.lex_state = 9, .external_lex_state = 4},
+ [729] = {.lex_state = 9, .external_lex_state = 3},
+ [730] = {.lex_state = 9, .external_lex_state = 4},
+ [731] = {.lex_state = 9, .external_lex_state = 4},
+ [732] = {.lex_state = 9, .external_lex_state = 4},
+ [733] = {.lex_state = 9, .external_lex_state = 4},
+ [734] = {.lex_state = 9, .external_lex_state = 3},
+ [735] = {.lex_state = 9, .external_lex_state = 3},
+ [736] = {.lex_state = 9, .external_lex_state = 4},
+ [737] = {.lex_state = 9, .external_lex_state = 4},
+ [738] = {.lex_state = 9, .external_lex_state = 3},
+ [739] = {.lex_state = 9, .external_lex_state = 4},
+ [740] = {.lex_state = 9, .external_lex_state = 4},
+ [741] = {.lex_state = 9, .external_lex_state = 4},
+ [742] = {.lex_state = 9, .external_lex_state = 4},
+ [743] = {.lex_state = 9, .external_lex_state = 4},
+ [744] = {.lex_state = 9, .external_lex_state = 4},
+ [745] = {.lex_state = 9, .external_lex_state = 4},
+ [746] = {.lex_state = 9, .external_lex_state = 3},
+ [747] = {.lex_state = 9, .external_lex_state = 4},
+ [748] = {.lex_state = 9, .external_lex_state = 4},
+ [749] = {.lex_state = 9, .external_lex_state = 3},
+ [750] = {.lex_state = 9, .external_lex_state = 4},
+ [751] = {.lex_state = 9, .external_lex_state = 4},
+ [752] = {.lex_state = 9, .external_lex_state = 4},
+ [753] = {.lex_state = 9, .external_lex_state = 3},
+ [754] = {.lex_state = 9, .external_lex_state = 4},
+ [755] = {.lex_state = 9, .external_lex_state = 4},
+ [756] = {.lex_state = 9, .external_lex_state = 3},
+ [757] = {.lex_state = 9, .external_lex_state = 3},
+ [758] = {.lex_state = 9, .external_lex_state = 4},
+ [759] = {.lex_state = 9, .external_lex_state = 3},
+ [760] = {.lex_state = 9, .external_lex_state = 3},
+ [761] = {.lex_state = 9, .external_lex_state = 3},
+ [762] = {.lex_state = 9, .external_lex_state = 3},
+ [763] = {.lex_state = 9, .external_lex_state = 3},
+ [764] = {.lex_state = 9, .external_lex_state = 3},
+ [765] = {.lex_state = 9, .external_lex_state = 3},
+ [766] = {.lex_state = 9, .external_lex_state = 3},
+ [767] = {.lex_state = 9, .external_lex_state = 3},
+ [768] = {.lex_state = 9, .external_lex_state = 3},
+ [769] = {.lex_state = 9, .external_lex_state = 3},
+ [770] = {.lex_state = 9, .external_lex_state = 3},
+ [771] = {.lex_state = 9, .external_lex_state = 3},
+ [772] = {.lex_state = 9, .external_lex_state = 3},
+ [773] = {.lex_state = 9, .external_lex_state = 3},
+ [774] = {.lex_state = 9, .external_lex_state = 3},
+ [775] = {.lex_state = 9, .external_lex_state = 3},
+ [776] = {.lex_state = 9, .external_lex_state = 4},
+ [777] = {.lex_state = 9, .external_lex_state = 3},
+ [778] = {.lex_state = 9, .external_lex_state = 3},
+ [779] = {.lex_state = 9, .external_lex_state = 4},
+ [780] = {.lex_state = 9, .external_lex_state = 3},
+ [781] = {.lex_state = 9, .external_lex_state = 3},
+ [782] = {.lex_state = 9, .external_lex_state = 3},
+ [783] = {.lex_state = 9, .external_lex_state = 3},
+ [784] = {.lex_state = 9, .external_lex_state = 3},
+ [785] = {.lex_state = 9, .external_lex_state = 4},
+ [786] = {.lex_state = 9, .external_lex_state = 3},
+ [787] = {.lex_state = 9, .external_lex_state = 3},
+ [788] = {.lex_state = 9, .external_lex_state = 3},
+ [789] = {.lex_state = 9, .external_lex_state = 3},
+ [790] = {.lex_state = 9, .external_lex_state = 3},
+ [791] = {.lex_state = 9, .external_lex_state = 3},
+ [792] = {.lex_state = 9, .external_lex_state = 3},
+ [793] = {.lex_state = 9, .external_lex_state = 3},
+ [794] = {.lex_state = 9, .external_lex_state = 3},
+ [795] = {.lex_state = 9, .external_lex_state = 3},
+ [796] = {.lex_state = 9, .external_lex_state = 3},
+ [797] = {.lex_state = 9, .external_lex_state = 3},
+ [798] = {.lex_state = 9, .external_lex_state = 3},
+ [799] = {.lex_state = 9, .external_lex_state = 3},
+ [800] = {.lex_state = 9, .external_lex_state = 3},
+ [801] = {.lex_state = 9, .external_lex_state = 3},
+ [802] = {.lex_state = 9, .external_lex_state = 3},
+ [803] = {.lex_state = 9, .external_lex_state = 3},
+ [804] = {.lex_state = 9, .external_lex_state = 3},
+ [805] = {.lex_state = 9, .external_lex_state = 3},
+ [806] = {.lex_state = 9, .external_lex_state = 3},
+ [807] = {.lex_state = 9, .external_lex_state = 3},
+ [808] = {.lex_state = 9, .external_lex_state = 3},
+ [809] = {.lex_state = 9, .external_lex_state = 3},
+ [810] = {.lex_state = 9, .external_lex_state = 3},
+ [811] = {.lex_state = 9, .external_lex_state = 3},
+ [812] = {.lex_state = 9, .external_lex_state = 3},
+ [813] = {.lex_state = 9, .external_lex_state = 3},
+ [814] = {.lex_state = 9, .external_lex_state = 3},
+ [815] = {.lex_state = 9, .external_lex_state = 3},
+ [816] = {.lex_state = 9, .external_lex_state = 3},
+ [817] = {.lex_state = 9, .external_lex_state = 3},
+ [818] = {.lex_state = 9, .external_lex_state = 3},
+ [819] = {.lex_state = 9, .external_lex_state = 3},
+ [820] = {.lex_state = 9, .external_lex_state = 3},
+ [821] = {.lex_state = 9, .external_lex_state = 3},
+ [822] = {.lex_state = 9, .external_lex_state = 3},
+ [823] = {.lex_state = 9, .external_lex_state = 3},
+ [824] = {.lex_state = 9, .external_lex_state = 3},
+ [825] = {.lex_state = 10, .external_lex_state = 3},
+ [826] = {.lex_state = 9, .external_lex_state = 3},
+ [827] = {.lex_state = 9, .external_lex_state = 3},
+ [828] = {.lex_state = 6, .external_lex_state = 2},
+ [829] = {.lex_state = 6, .external_lex_state = 2},
+ [830] = {.lex_state = 9, .external_lex_state = 3},
+ [831] = {.lex_state = 9, .external_lex_state = 3},
+ [832] = {.lex_state = 9, .external_lex_state = 3},
+ [833] = {.lex_state = 6, .external_lex_state = 2},
+ [834] = {.lex_state = 6, .external_lex_state = 2},
+ [835] = {.lex_state = 6, .external_lex_state = 2},
+ [836] = {.lex_state = 9, .external_lex_state = 3},
+ [837] = {.lex_state = 6, .external_lex_state = 2},
+ [838] = {.lex_state = 6, .external_lex_state = 2},
+ [839] = {.lex_state = 9, .external_lex_state = 3},
+ [840] = {.lex_state = 6, .external_lex_state = 2},
+ [841] = {.lex_state = 6, .external_lex_state = 2},
+ [842] = {.lex_state = 6, .external_lex_state = 2},
+ [843] = {.lex_state = 6, .external_lex_state = 2},
+ [844] = {.lex_state = 6, .external_lex_state = 2},
+ [845] = {.lex_state = 6, .external_lex_state = 2},
+ [846] = {.lex_state = 6, .external_lex_state = 2},
+ [847] = {.lex_state = 6, .external_lex_state = 2},
+ [848] = {.lex_state = 6, .external_lex_state = 2},
+ [849] = {.lex_state = 6, .external_lex_state = 2},
+ [850] = {.lex_state = 6, .external_lex_state = 2},
+ [851] = {.lex_state = 6, .external_lex_state = 2},
+ [852] = {.lex_state = 146, .external_lex_state = 2},
+ [853] = {.lex_state = 146, .external_lex_state = 2},
+ [854] = {.lex_state = 146, .external_lex_state = 2},
+ [855] = {.lex_state = 146, .external_lex_state = 2},
+ [856] = {.lex_state = 146, .external_lex_state = 2},
+ [857] = {.lex_state = 146, .external_lex_state = 2},
+ [858] = {.lex_state = 146, .external_lex_state = 2},
+ [859] = {.lex_state = 146, .external_lex_state = 2},
+ [860] = {.lex_state = 146, .external_lex_state = 2},
+ [861] = {.lex_state = 146, .external_lex_state = 2},
+ [862] = {.lex_state = 146, .external_lex_state = 2},
+ [863] = {.lex_state = 146, .external_lex_state = 2},
+ [864] = {.lex_state = 146, .external_lex_state = 2},
+ [865] = {.lex_state = 146, .external_lex_state = 2},
+ [866] = {.lex_state = 146, .external_lex_state = 2},
+ [867] = {.lex_state = 146, .external_lex_state = 2},
+ [868] = {.lex_state = 146, .external_lex_state = 2},
+ [869] = {.lex_state = 146, .external_lex_state = 2},
+ [870] = {.lex_state = 146, .external_lex_state = 5},
+ [871] = {.lex_state = 146, .external_lex_state = 2},
+ [872] = {.lex_state = 146, .external_lex_state = 2},
+ [873] = {.lex_state = 146, .external_lex_state = 2},
+ [874] = {.lex_state = 146, .external_lex_state = 2},
+ [875] = {.lex_state = 146, .external_lex_state = 2},
+ [876] = {.lex_state = 146, .external_lex_state = 2},
+ [877] = {.lex_state = 6, .external_lex_state = 2},
+ [878] = {.lex_state = 146, .external_lex_state = 2},
+ [879] = {.lex_state = 146, .external_lex_state = 2},
+ [880] = {.lex_state = 146, .external_lex_state = 2},
+ [881] = {.lex_state = 146, .external_lex_state = 2},
+ [882] = {.lex_state = 146, .external_lex_state = 2},
+ [883] = {.lex_state = 146, .external_lex_state = 5},
+ [884] = {.lex_state = 146, .external_lex_state = 2},
+ [885] = {.lex_state = 146, .external_lex_state = 5},
+ [886] = {.lex_state = 6, .external_lex_state = 2},
+ [887] = {.lex_state = 146, .external_lex_state = 2},
+ [888] = {.lex_state = 6, .external_lex_state = 2},
+ [889] = {.lex_state = 146, .external_lex_state = 2},
+ [890] = {.lex_state = 146, .external_lex_state = 5},
+ [891] = {.lex_state = 146, .external_lex_state = 5},
+ [892] = {.lex_state = 146, .external_lex_state = 5},
+ [893] = {.lex_state = 6, .external_lex_state = 2},
+ [894] = {.lex_state = 6, .external_lex_state = 2},
+ [895] = {.lex_state = 6, .external_lex_state = 2},
+ [896] = {.lex_state = 6, .external_lex_state = 2},
+ [897] = {.lex_state = 6, .external_lex_state = 2},
+ [898] = {.lex_state = 6, .external_lex_state = 2},
+ [899] = {.lex_state = 12, .external_lex_state = 2},
+ [900] = {.lex_state = 6, .external_lex_state = 2},
+ [901] = {.lex_state = 146, .external_lex_state = 5},
+ [902] = {.lex_state = 6, .external_lex_state = 2},
+ [903] = {.lex_state = 6, .external_lex_state = 2},
+ [904] = {.lex_state = 146, .external_lex_state = 5},
+ [905] = {.lex_state = 146, .external_lex_state = 5},
+ [906] = {.lex_state = 6, .external_lex_state = 2},
+ [907] = {.lex_state = 6, .external_lex_state = 2},
+ [908] = {.lex_state = 6, .external_lex_state = 2},
+ [909] = {.lex_state = 6, .external_lex_state = 2},
+ [910] = {.lex_state = 6, .external_lex_state = 2},
+ [911] = {.lex_state = 6, .external_lex_state = 2},
+ [912] = {.lex_state = 6, .external_lex_state = 2},
+ [913] = {.lex_state = 146, .external_lex_state = 2},
+ [914] = {.lex_state = 6, .external_lex_state = 2},
+ [915] = {.lex_state = 6, .external_lex_state = 2},
+ [916] = {.lex_state = 6, .external_lex_state = 2},
+ [917] = {.lex_state = 146, .external_lex_state = 5},
+ [918] = {.lex_state = 6, .external_lex_state = 5},
+ [919] = {.lex_state = 6, .external_lex_state = 2},
+ [920] = {.lex_state = 6, .external_lex_state = 5},
+ [921] = {.lex_state = 146, .external_lex_state = 5},
+ [922] = {.lex_state = 6, .external_lex_state = 2},
+ [923] = {.lex_state = 6, .external_lex_state = 2},
+ [924] = {.lex_state = 6, .external_lex_state = 2},
+ [925] = {.lex_state = 6, .external_lex_state = 2},
+ [926] = {.lex_state = 146, .external_lex_state = 5},
+ [927] = {.lex_state = 6, .external_lex_state = 2},
+ [928] = {.lex_state = 6, .external_lex_state = 2},
+ [929] = {.lex_state = 6, .external_lex_state = 2},
+ [930] = {.lex_state = 6, .external_lex_state = 2},
+ [931] = {.lex_state = 12, .external_lex_state = 2},
+ [932] = {.lex_state = 6, .external_lex_state = 2},
+ [933] = {.lex_state = 6, .external_lex_state = 2},
+ [934] = {.lex_state = 6, .external_lex_state = 2},
+ [935] = {.lex_state = 6, .external_lex_state = 2},
+ [936] = {.lex_state = 6, .external_lex_state = 2},
+ [937] = {.lex_state = 6, .external_lex_state = 2},
+ [938] = {.lex_state = 6, .external_lex_state = 2},
+ [939] = {.lex_state = 6, .external_lex_state = 2},
+ [940] = {.lex_state = 6, .external_lex_state = 2},
+ [941] = {.lex_state = 6, .external_lex_state = 2},
+ [942] = {.lex_state = 6, .external_lex_state = 2},
+ [943] = {.lex_state = 146, .external_lex_state = 2},
+ [944] = {.lex_state = 146, .external_lex_state = 2},
+ [945] = {.lex_state = 146, .external_lex_state = 2},
+ [946] = {.lex_state = 146, .external_lex_state = 2},
+ [947] = {.lex_state = 146, .external_lex_state = 2},
+ [948] = {.lex_state = 146, .external_lex_state = 2},
+ [949] = {.lex_state = 146, .external_lex_state = 2},
+ [950] = {.lex_state = 146, .external_lex_state = 2},
+ [951] = {.lex_state = 146, .external_lex_state = 2},
+ [952] = {.lex_state = 146, .external_lex_state = 2},
+ [953] = {.lex_state = 146, .external_lex_state = 2},
+ [954] = {.lex_state = 6, .external_lex_state = 2},
+ [955] = {.lex_state = 146, .external_lex_state = 2},
+ [956] = {.lex_state = 146, .external_lex_state = 2},
+ [957] = {.lex_state = 146, .external_lex_state = 2},
+ [958] = {.lex_state = 146, .external_lex_state = 2},
+ [959] = {.lex_state = 6, .external_lex_state = 2},
+ [960] = {.lex_state = 146, .external_lex_state = 2},
+ [961] = {.lex_state = 146, .external_lex_state = 2},
+ [962] = {.lex_state = 146, .external_lex_state = 2},
+ [963] = {.lex_state = 146, .external_lex_state = 2},
+ [964] = {.lex_state = 146, .external_lex_state = 2},
+ [965] = {.lex_state = 146, .external_lex_state = 2},
+ [966] = {.lex_state = 146, .external_lex_state = 2},
+ [967] = {.lex_state = 146, .external_lex_state = 2},
+ [968] = {.lex_state = 6, .external_lex_state = 2},
+ [969] = {.lex_state = 146, .external_lex_state = 2},
+ [970] = {.lex_state = 146, .external_lex_state = 2},
+ [971] = {.lex_state = 146, .external_lex_state = 2},
+ [972] = {.lex_state = 146, .external_lex_state = 2},
+ [973] = {.lex_state = 146, .external_lex_state = 2},
+ [974] = {.lex_state = 146, .external_lex_state = 2},
+ [975] = {.lex_state = 146, .external_lex_state = 2},
+ [976] = {.lex_state = 146, .external_lex_state = 2},
+ [977] = {.lex_state = 146, .external_lex_state = 2},
+ [978] = {.lex_state = 146, .external_lex_state = 2},
+ [979] = {.lex_state = 6, .external_lex_state = 2},
+ [980] = {.lex_state = 146, .external_lex_state = 2},
+ [981] = {.lex_state = 6, .external_lex_state = 2},
+ [982] = {.lex_state = 6, .external_lex_state = 2},
+ [983] = {.lex_state = 146, .external_lex_state = 2},
+ [984] = {.lex_state = 146, .external_lex_state = 2},
+ [985] = {.lex_state = 146, .external_lex_state = 2},
+ [986] = {.lex_state = 146, .external_lex_state = 2},
+ [987] = {.lex_state = 146, .external_lex_state = 2},
+ [988] = {.lex_state = 146, .external_lex_state = 2},
+ [989] = {.lex_state = 146, .external_lex_state = 2},
+ [990] = {.lex_state = 146, .external_lex_state = 2},
+ [991] = {.lex_state = 146, .external_lex_state = 2},
+ [992] = {.lex_state = 146, .external_lex_state = 2},
+ [993] = {.lex_state = 146, .external_lex_state = 2},
+ [994] = {.lex_state = 146, .external_lex_state = 2},
+ [995] = {.lex_state = 146, .external_lex_state = 2},
+ [996] = {.lex_state = 146, .external_lex_state = 2},
+ [997] = {.lex_state = 8, .external_lex_state = 2},
+ [998] = {.lex_state = 8, .external_lex_state = 2},
+ [999] = {.lex_state = 146, .external_lex_state = 2},
+ [1000] = {.lex_state = 146, .external_lex_state = 2},
+ [1001] = {.lex_state = 146, .external_lex_state = 2},
+ [1002] = {.lex_state = 146, .external_lex_state = 2},
+ [1003] = {.lex_state = 146, .external_lex_state = 2},
+ [1004] = {.lex_state = 8, .external_lex_state = 2},
+ [1005] = {.lex_state = 8, .external_lex_state = 2},
+ [1006] = {.lex_state = 146, .external_lex_state = 2},
+ [1007] = {.lex_state = 146, .external_lex_state = 2},
+ [1008] = {.lex_state = 146, .external_lex_state = 2},
+ [1009] = {.lex_state = 8, .external_lex_state = 2},
+ [1010] = {.lex_state = 146, .external_lex_state = 2},
+ [1011] = {.lex_state = 2, .external_lex_state = 2},
+ [1012] = {.lex_state = 2, .external_lex_state = 2},
+ [1013] = {.lex_state = 2, .external_lex_state = 2},
+ [1014] = {.lex_state = 2, .external_lex_state = 2},
+ [1015] = {.lex_state = 2, .external_lex_state = 2},
+ [1016] = {.lex_state = 2, .external_lex_state = 2},
+ [1017] = {.lex_state = 2, .external_lex_state = 2},
+ [1018] = {.lex_state = 2, .external_lex_state = 2},
+ [1019] = {.lex_state = 145, .external_lex_state = 2},
+ [1020] = {.lex_state = 2, .external_lex_state = 2},
+ [1021] = {.lex_state = 35, .external_lex_state = 2},
+ [1022] = {.lex_state = 35, .external_lex_state = 2},
+ [1023] = {.lex_state = 35, .external_lex_state = 2},
+ [1024] = {.lex_state = 35, .external_lex_state = 2},
+ [1025] = {.lex_state = 35, .external_lex_state = 2},
+ [1026] = {.lex_state = 35, .external_lex_state = 2},
+ [1027] = {.lex_state = 35, .external_lex_state = 2},
+ [1028] = {.lex_state = 35, .external_lex_state = 2},
+ [1029] = {.lex_state = 35, .external_lex_state = 2},
+ [1030] = {.lex_state = 35, .external_lex_state = 2},
+ [1031] = {.lex_state = 35, .external_lex_state = 2},
+ [1032] = {.lex_state = 35, .external_lex_state = 2},
+ [1033] = {.lex_state = 35, .external_lex_state = 2},
+ [1034] = {.lex_state = 35, .external_lex_state = 2},
+ [1035] = {.lex_state = 35, .external_lex_state = 2},
+ [1036] = {.lex_state = 35, .external_lex_state = 2},
+ [1037] = {.lex_state = 35, .external_lex_state = 2},
+ [1038] = {.lex_state = 35, .external_lex_state = 2},
+ [1039] = {.lex_state = 35, .external_lex_state = 2},
+ [1040] = {.lex_state = 12, .external_lex_state = 2},
+ [1041] = {.lex_state = 35, .external_lex_state = 2},
+ [1042] = {.lex_state = 35, .external_lex_state = 2},
+ [1043] = {.lex_state = 35, .external_lex_state = 2},
+ [1044] = {.lex_state = 35, .external_lex_state = 2},
+ [1045] = {.lex_state = 35, .external_lex_state = 2},
+ [1046] = {.lex_state = 35, .external_lex_state = 2},
+ [1047] = {.lex_state = 35, .external_lex_state = 2},
+ [1048] = {.lex_state = 35, .external_lex_state = 2},
+ [1049] = {.lex_state = 35, .external_lex_state = 2},
+ [1050] = {.lex_state = 12, .external_lex_state = 2},
+ [1051] = {.lex_state = 35, .external_lex_state = 2},
+ [1052] = {.lex_state = 35, .external_lex_state = 2},
+ [1053] = {.lex_state = 35, .external_lex_state = 2},
+ [1054] = {.lex_state = 35, .external_lex_state = 2},
+ [1055] = {.lex_state = 35, .external_lex_state = 2},
+ [1056] = {.lex_state = 146, .external_lex_state = 2},
+ [1057] = {.lex_state = 146, .external_lex_state = 2},
+ [1058] = {.lex_state = 146, .external_lex_state = 2},
+ [1059] = {.lex_state = 146, .external_lex_state = 2},
+ [1060] = {.lex_state = 146, .external_lex_state = 2},
+ [1061] = {.lex_state = 146, .external_lex_state = 2},
+ [1062] = {.lex_state = 146, .external_lex_state = 2},
+ [1063] = {.lex_state = 146, .external_lex_state = 2},
+ [1064] = {.lex_state = 146, .external_lex_state = 2},
+ [1065] = {.lex_state = 146, .external_lex_state = 2},
+ [1066] = {.lex_state = 146, .external_lex_state = 2},
+ [1067] = {.lex_state = 146, .external_lex_state = 2},
+ [1068] = {.lex_state = 146, .external_lex_state = 2},
+ [1069] = {.lex_state = 146, .external_lex_state = 2},
+ [1070] = {.lex_state = 146, .external_lex_state = 2},
+ [1071] = {.lex_state = 146, .external_lex_state = 2},
+ [1072] = {.lex_state = 146, .external_lex_state = 2},
+ [1073] = {.lex_state = 35, .external_lex_state = 2},
+ [1074] = {.lex_state = 146, .external_lex_state = 2},
+ [1075] = {.lex_state = 146, .external_lex_state = 2},
+ [1076] = {.lex_state = 146, .external_lex_state = 2},
+ [1077] = {.lex_state = 146, .external_lex_state = 2},
+ [1078] = {.lex_state = 146, .external_lex_state = 5},
+ [1079] = {.lex_state = 146, .external_lex_state = 5},
+ [1080] = {.lex_state = 146, .external_lex_state = 2},
+ [1081] = {.lex_state = 146, .external_lex_state = 2},
+ [1082] = {.lex_state = 2, .external_lex_state = 2},
+ [1083] = {.lex_state = 2, .external_lex_state = 2},
+ [1084] = {.lex_state = 19, .external_lex_state = 6},
+ [1085] = {.lex_state = 145, .external_lex_state = 2},
+ [1086] = {.lex_state = 146, .external_lex_state = 5},
+ [1087] = {.lex_state = 146, .external_lex_state = 5},
+ [1088] = {.lex_state = 2, .external_lex_state = 2},
+ [1089] = {.lex_state = 19, .external_lex_state = 6},
+ [1090] = {.lex_state = 2, .external_lex_state = 2},
+ [1091] = {.lex_state = 2, .external_lex_state = 2},
+ [1092] = {.lex_state = 2, .external_lex_state = 2},
+ [1093] = {.lex_state = 19, .external_lex_state = 6},
+ [1094] = {.lex_state = 146, .external_lex_state = 2},
+ [1095] = {.lex_state = 145, .external_lex_state = 2},
+ [1096] = {.lex_state = 2, .external_lex_state = 2},
+ [1097] = {.lex_state = 2, .external_lex_state = 2},
+ [1098] = {.lex_state = 2, .external_lex_state = 2},
+ [1099] = {.lex_state = 146, .external_lex_state = 5},
+ [1100] = {.lex_state = 146, .external_lex_state = 5},
+ [1101] = {.lex_state = 2, .external_lex_state = 2},
+ [1102] = {.lex_state = 2, .external_lex_state = 2},
+ [1103] = {.lex_state = 2, .external_lex_state = 2},
+ [1104] = {.lex_state = 35, .external_lex_state = 2},
+ [1105] = {.lex_state = 146, .external_lex_state = 2},
+ [1106] = {.lex_state = 35, .external_lex_state = 2},
+ [1107] = {.lex_state = 2, .external_lex_state = 2},
+ [1108] = {.lex_state = 146, .external_lex_state = 5},
+ [1109] = {.lex_state = 2, .external_lex_state = 2},
+ [1110] = {.lex_state = 146, .external_lex_state = 2},
+ [1111] = {.lex_state = 2, .external_lex_state = 2},
+ [1112] = {.lex_state = 146, .external_lex_state = 2},
+ [1113] = {.lex_state = 35, .external_lex_state = 2},
+ [1114] = {.lex_state = 146, .external_lex_state = 5},
+ [1115] = {.lex_state = 146, .external_lex_state = 5},
+ [1116] = {.lex_state = 2, .external_lex_state = 2},
+ [1117] = {.lex_state = 146, .external_lex_state = 5},
+ [1118] = {.lex_state = 19, .external_lex_state = 6},
+ [1119] = {.lex_state = 146, .external_lex_state = 2},
+ [1120] = {.lex_state = 146, .external_lex_state = 2},
+ [1121] = {.lex_state = 19, .external_lex_state = 6},
+ [1122] = {.lex_state = 146, .external_lex_state = 2},
+ [1123] = {.lex_state = 146, .external_lex_state = 2},
+ [1124] = {.lex_state = 146, .external_lex_state = 2},
+ [1125] = {.lex_state = 146, .external_lex_state = 5},
+ [1126] = {.lex_state = 146, .external_lex_state = 5},
+ [1127] = {.lex_state = 146, .external_lex_state = 5},
+ [1128] = {.lex_state = 146, .external_lex_state = 5},
+ [1129] = {.lex_state = 2, .external_lex_state = 2},
+ [1130] = {.lex_state = 2, .external_lex_state = 2},
+ [1131] = {.lex_state = 146, .external_lex_state = 2},
+ [1132] = {.lex_state = 146, .external_lex_state = 2},
+ [1133] = {.lex_state = 146, .external_lex_state = 2},
+ [1134] = {.lex_state = 35, .external_lex_state = 2},
+ [1135] = {.lex_state = 146, .external_lex_state = 2},
+ [1136] = {.lex_state = 146, .external_lex_state = 2},
+ [1137] = {.lex_state = 35, .external_lex_state = 2},
+ [1138] = {.lex_state = 146, .external_lex_state = 2},
+ [1139] = {.lex_state = 146, .external_lex_state = 2},
+ [1140] = {.lex_state = 146, .external_lex_state = 2},
+ [1141] = {.lex_state = 146, .external_lex_state = 2},
+ [1142] = {.lex_state = 146, .external_lex_state = 2},
+ [1143] = {.lex_state = 146, .external_lex_state = 5},
+ [1144] = {.lex_state = 35, .external_lex_state = 2},
+ [1145] = {.lex_state = 35, .external_lex_state = 2},
+ [1146] = {.lex_state = 35, .external_lex_state = 2},
+ [1147] = {.lex_state = 146, .external_lex_state = 2},
+ [1148] = {.lex_state = 146, .external_lex_state = 2},
+ [1149] = {.lex_state = 35, .external_lex_state = 2},
+ [1150] = {.lex_state = 35, .external_lex_state = 2},
+ [1151] = {.lex_state = 35, .external_lex_state = 2},
+ [1152] = {.lex_state = 35, .external_lex_state = 2},
+ [1153] = {.lex_state = 35, .external_lex_state = 2},
+ [1154] = {.lex_state = 35, .external_lex_state = 2},
+ [1155] = {.lex_state = 35, .external_lex_state = 2},
+ [1156] = {.lex_state = 146, .external_lex_state = 2},
+ [1157] = {.lex_state = 146, .external_lex_state = 2},
+ [1158] = {.lex_state = 146, .external_lex_state = 2},
+ [1159] = {.lex_state = 146, .external_lex_state = 2},
+ [1160] = {.lex_state = 146, .external_lex_state = 2},
+ [1161] = {.lex_state = 146, .external_lex_state = 5},
+ [1162] = {.lex_state = 35, .external_lex_state = 2},
+ [1163] = {.lex_state = 35, .external_lex_state = 2},
+ [1164] = {.lex_state = 35, .external_lex_state = 2},
+ [1165] = {.lex_state = 35, .external_lex_state = 2},
+ [1166] = {.lex_state = 35, .external_lex_state = 2},
+ [1167] = {.lex_state = 146, .external_lex_state = 2},
+ [1168] = {.lex_state = 146, .external_lex_state = 2},
+ [1169] = {.lex_state = 35, .external_lex_state = 2},
+ [1170] = {.lex_state = 35, .external_lex_state = 2},
+ [1171] = {.lex_state = 146, .external_lex_state = 2},
+ [1172] = {.lex_state = 146, .external_lex_state = 2},
+ [1173] = {.lex_state = 146, .external_lex_state = 2},
+ [1174] = {.lex_state = 35, .external_lex_state = 2},
+ [1175] = {.lex_state = 146, .external_lex_state = 5},
+ [1176] = {.lex_state = 146, .external_lex_state = 2},
+ [1177] = {.lex_state = 146, .external_lex_state = 5},
+ [1178] = {.lex_state = 35, .external_lex_state = 2},
+ [1179] = {.lex_state = 35, .external_lex_state = 2},
+ [1180] = {.lex_state = 35, .external_lex_state = 2},
+ [1181] = {.lex_state = 146, .external_lex_state = 2},
+ [1182] = {.lex_state = 35, .external_lex_state = 2},
+ [1183] = {.lex_state = 146, .external_lex_state = 2},
+ [1184] = {.lex_state = 19, .external_lex_state = 6},
+ [1185] = {.lex_state = 13, .external_lex_state = 2},
+ [1186] = {.lex_state = 146, .external_lex_state = 2},
+ [1187] = {.lex_state = 15, .external_lex_state = 7},
+ [1188] = {.lex_state = 24, .external_lex_state = 7},
+ [1189] = {.lex_state = 15, .external_lex_state = 7},
+ [1190] = {.lex_state = 24, .external_lex_state = 7},
+ [1191] = {.lex_state = 146, .external_lex_state = 2},
+ [1192] = {.lex_state = 146, .external_lex_state = 2},
+ [1193] = {.lex_state = 146, .external_lex_state = 2},
+ [1194] = {.lex_state = 146, .external_lex_state = 2},
+ [1195] = {.lex_state = 146, .external_lex_state = 2},
+ [1196] = {.lex_state = 22, .external_lex_state = 2},
+ [1197] = {.lex_state = 146, .external_lex_state = 5},
+ [1198] = {.lex_state = 13, .external_lex_state = 2},
+ [1199] = {.lex_state = 146, .external_lex_state = 2},
+ [1200] = {.lex_state = 15, .external_lex_state = 7},
+ [1201] = {.lex_state = 15, .external_lex_state = 7},
+ [1202] = {.lex_state = 146, .external_lex_state = 2},
+ [1203] = {.lex_state = 24, .external_lex_state = 7},
+ [1204] = {.lex_state = 146, .external_lex_state = 2},
+ [1205] = {.lex_state = 22, .external_lex_state = 2},
+ [1206] = {.lex_state = 146, .external_lex_state = 2},
+ [1207] = {.lex_state = 24, .external_lex_state = 7},
+ [1208] = {.lex_state = 146, .external_lex_state = 2},
+ [1209] = {.lex_state = 146, .external_lex_state = 5},
+ [1210] = {.lex_state = 146, .external_lex_state = 2},
+ [1211] = {.lex_state = 146, .external_lex_state = 2},
+ [1212] = {.lex_state = 146, .external_lex_state = 5},
+ [1213] = {.lex_state = 146, .external_lex_state = 2},
+ [1214] = {.lex_state = 146, .external_lex_state = 2},
+ [1215] = {.lex_state = 15, .external_lex_state = 7},
+ [1216] = {.lex_state = 146, .external_lex_state = 2},
+ [1217] = {.lex_state = 24, .external_lex_state = 7},
+ [1218] = {.lex_state = 146, .external_lex_state = 2},
+ [1219] = {.lex_state = 146, .external_lex_state = 2},
+ [1220] = {.lex_state = 15, .external_lex_state = 7},
+ [1221] = {.lex_state = 24, .external_lex_state = 7},
+ [1222] = {.lex_state = 15, .external_lex_state = 7},
+ [1223] = {.lex_state = 146, .external_lex_state = 2},
+ [1224] = {.lex_state = 146, .external_lex_state = 5},
+ [1225] = {.lex_state = 146, .external_lex_state = 2},
+ [1226] = {.lex_state = 146, .external_lex_state = 2},
+ [1227] = {.lex_state = 146, .external_lex_state = 2},
+ [1228] = {.lex_state = 146, .external_lex_state = 2},
+ [1229] = {.lex_state = 146, .external_lex_state = 5},
+ [1230] = {.lex_state = 24, .external_lex_state = 7},
+ [1231] = {.lex_state = 146, .external_lex_state = 2},
+ [1232] = {.lex_state = 146, .external_lex_state = 2},
+ [1233] = {.lex_state = 15, .external_lex_state = 7},
+ [1234] = {.lex_state = 15, .external_lex_state = 7},
+ [1235] = {.lex_state = 13, .external_lex_state = 2},
+ [1236] = {.lex_state = 22, .external_lex_state = 2},
+ [1237] = {.lex_state = 146, .external_lex_state = 2},
+ [1238] = {.lex_state = 146, .external_lex_state = 5},
+ [1239] = {.lex_state = 146, .external_lex_state = 2},
+ [1240] = {.lex_state = 146, .external_lex_state = 5},
+ [1241] = {.lex_state = 146, .external_lex_state = 5},
+ [1242] = {.lex_state = 145, .external_lex_state = 2},
+ [1243] = {.lex_state = 146, .external_lex_state = 2},
+ [1244] = {.lex_state = 146, .external_lex_state = 5},
+ [1245] = {.lex_state = 146, .external_lex_state = 2},
+ [1246] = {.lex_state = 146, .external_lex_state = 5},
+ [1247] = {.lex_state = 24, .external_lex_state = 7},
+ [1248] = {.lex_state = 146, .external_lex_state = 2},
+ [1249] = {.lex_state = 24, .external_lex_state = 7},
+ [1250] = {.lex_state = 146, .external_lex_state = 5},
+ [1251] = {.lex_state = 146, .external_lex_state = 2},
+ [1252] = {.lex_state = 146, .external_lex_state = 2},
+ [1253] = {.lex_state = 146, .external_lex_state = 2},
+ [1254] = {.lex_state = 146, .external_lex_state = 2},
+ [1255] = {.lex_state = 146, .external_lex_state = 2},
+ [1256] = {.lex_state = 146, .external_lex_state = 5},
[1257] = {.lex_state = 3, .external_lex_state = 2},
- [1258] = {.lex_state = 148, .external_lex_state = 2},
- [1259] = {.lex_state = 148, .external_lex_state = 2},
- [1260] = {.lex_state = 148, .external_lex_state = 2},
- [1261] = {.lex_state = 148, .external_lex_state = 2},
- [1262] = {.lex_state = 148, .external_lex_state = 2},
- [1263] = {.lex_state = 148, .external_lex_state = 2},
- [1264] = {.lex_state = 148, .external_lex_state = 2},
- [1265] = {.lex_state = 148, .external_lex_state = 2},
- [1266] = {.lex_state = 148, .external_lex_state = 2},
- [1267] = {.lex_state = 148, .external_lex_state = 5},
- [1268] = {.lex_state = 148, .external_lex_state = 2},
- [1269] = {.lex_state = 148, .external_lex_state = 2},
- [1270] = {.lex_state = 148, .external_lex_state = 2},
- [1271] = {.lex_state = 148, .external_lex_state = 2},
- [1272] = {.lex_state = 148, .external_lex_state = 2},
- [1273] = {.lex_state = 148, .external_lex_state = 2},
- [1274] = {.lex_state = 148, .external_lex_state = 5},
- [1275] = {.lex_state = 148, .external_lex_state = 2},
+ [1258] = {.lex_state = 146, .external_lex_state = 2},
+ [1259] = {.lex_state = 146, .external_lex_state = 2},
+ [1260] = {.lex_state = 146, .external_lex_state = 2},
+ [1261] = {.lex_state = 146, .external_lex_state = 2},
+ [1262] = {.lex_state = 146, .external_lex_state = 2},
+ [1263] = {.lex_state = 146, .external_lex_state = 2},
+ [1264] = {.lex_state = 146, .external_lex_state = 2},
+ [1265] = {.lex_state = 146, .external_lex_state = 2},
+ [1266] = {.lex_state = 146, .external_lex_state = 2},
+ [1267] = {.lex_state = 146, .external_lex_state = 5},
+ [1268] = {.lex_state = 146, .external_lex_state = 2},
+ [1269] = {.lex_state = 146, .external_lex_state = 2},
+ [1270] = {.lex_state = 146, .external_lex_state = 2},
+ [1271] = {.lex_state = 146, .external_lex_state = 2},
+ [1272] = {.lex_state = 146, .external_lex_state = 2},
+ [1273] = {.lex_state = 146, .external_lex_state = 2},
+ [1274] = {.lex_state = 146, .external_lex_state = 5},
+ [1275] = {.lex_state = 146, .external_lex_state = 2},
[1276] = {.lex_state = 3, .external_lex_state = 2},
- [1277] = {.lex_state = 148, .external_lex_state = 2},
- [1278] = {.lex_state = 148, .external_lex_state = 5},
- [1279] = {.lex_state = 148, .external_lex_state = 2},
- [1280] = {.lex_state = 148, .external_lex_state = 2},
- [1281] = {.lex_state = 148, .external_lex_state = 2},
- [1282] = {.lex_state = 148, .external_lex_state = 2},
- [1283] = {.lex_state = 148, .external_lex_state = 2},
- [1284] = {.lex_state = 148, .external_lex_state = 5},
- [1285] = {.lex_state = 148, .external_lex_state = 2},
- [1286] = {.lex_state = 148, .external_lex_state = 2},
- [1287] = {.lex_state = 148, .external_lex_state = 2},
+ [1277] = {.lex_state = 146, .external_lex_state = 2},
+ [1278] = {.lex_state = 146, .external_lex_state = 5},
+ [1279] = {.lex_state = 146, .external_lex_state = 2},
+ [1280] = {.lex_state = 146, .external_lex_state = 2},
+ [1281] = {.lex_state = 146, .external_lex_state = 2},
+ [1282] = {.lex_state = 146, .external_lex_state = 2},
+ [1283] = {.lex_state = 146, .external_lex_state = 2},
+ [1284] = {.lex_state = 146, .external_lex_state = 5},
+ [1285] = {.lex_state = 146, .external_lex_state = 2},
+ [1286] = {.lex_state = 146, .external_lex_state = 2},
+ [1287] = {.lex_state = 146, .external_lex_state = 2},
[1288] = {.lex_state = 3, .external_lex_state = 2},
- [1289] = {.lex_state = 148, .external_lex_state = 2},
- [1290] = {.lex_state = 148, .external_lex_state = 2},
- [1291] = {.lex_state = 148, .external_lex_state = 2},
- [1292] = {.lex_state = 148, .external_lex_state = 2},
- [1293] = {.lex_state = 148, .external_lex_state = 2},
- [1294] = {.lex_state = 148, .external_lex_state = 2},
- [1295] = {.lex_state = 148, .external_lex_state = 2},
- [1296] = {.lex_state = 148, .external_lex_state = 2},
- [1297] = {.lex_state = 148, .external_lex_state = 2},
- [1298] = {.lex_state = 148, .external_lex_state = 2},
- [1299] = {.lex_state = 148, .external_lex_state = 2},
- [1300] = {.lex_state = 9, .external_lex_state = 2},
- [1301] = {.lex_state = 148, .external_lex_state = 2},
- [1302] = {.lex_state = 148, .external_lex_state = 2},
+ [1289] = {.lex_state = 146, .external_lex_state = 2},
+ [1290] = {.lex_state = 146, .external_lex_state = 2},
+ [1291] = {.lex_state = 146, .external_lex_state = 2},
+ [1292] = {.lex_state = 146, .external_lex_state = 2},
+ [1293] = {.lex_state = 146, .external_lex_state = 2},
+ [1294] = {.lex_state = 146, .external_lex_state = 2},
+ [1295] = {.lex_state = 146, .external_lex_state = 2},
+ [1296] = {.lex_state = 146, .external_lex_state = 2},
+ [1297] = {.lex_state = 146, .external_lex_state = 2},
+ [1298] = {.lex_state = 146, .external_lex_state = 2},
+ [1299] = {.lex_state = 146, .external_lex_state = 2},
+ [1300] = {.lex_state = 7, .external_lex_state = 2},
+ [1301] = {.lex_state = 146, .external_lex_state = 2},
+ [1302] = {.lex_state = 146, .external_lex_state = 2},
[1303] = {.lex_state = 3, .external_lex_state = 2},
- [1304] = {.lex_state = 148, .external_lex_state = 2},
- [1305] = {.lex_state = 148, .external_lex_state = 2},
- [1306] = {.lex_state = 148, .external_lex_state = 2},
- [1307] = {.lex_state = 148, .external_lex_state = 2},
+ [1304] = {.lex_state = 146, .external_lex_state = 2},
+ [1305] = {.lex_state = 146, .external_lex_state = 2},
+ [1306] = {.lex_state = 146, .external_lex_state = 2},
+ [1307] = {.lex_state = 146, .external_lex_state = 2},
[1308] = {.lex_state = 3, .external_lex_state = 2},
- [1309] = {.lex_state = 148, .external_lex_state = 5},
- [1310] = {.lex_state = 148, .external_lex_state = 5},
- [1311] = {.lex_state = 148, .external_lex_state = 2},
- [1312] = {.lex_state = 148, .external_lex_state = 5},
- [1313] = {.lex_state = 9, .external_lex_state = 2},
- [1314] = {.lex_state = 148, .external_lex_state = 2},
- [1315] = {.lex_state = 148, .external_lex_state = 2},
- [1316] = {.lex_state = 148, .external_lex_state = 2},
- [1317] = {.lex_state = 148, .external_lex_state = 2},
- [1318] = {.lex_state = 148, .external_lex_state = 2},
- [1319] = {.lex_state = 148, .external_lex_state = 2},
- [1320] = {.lex_state = 148, .external_lex_state = 2},
- [1321] = {.lex_state = 148, .external_lex_state = 2},
- [1322] = {.lex_state = 148, .external_lex_state = 2},
- [1323] = {.lex_state = 148, .external_lex_state = 2},
- [1324] = {.lex_state = 148, .external_lex_state = 2},
- [1325] = {.lex_state = 148, .external_lex_state = 2},
- [1326] = {.lex_state = 148, .external_lex_state = 2},
+ [1309] = {.lex_state = 146, .external_lex_state = 5},
+ [1310] = {.lex_state = 146, .external_lex_state = 5},
+ [1311] = {.lex_state = 146, .external_lex_state = 2},
+ [1312] = {.lex_state = 146, .external_lex_state = 5},
+ [1313] = {.lex_state = 7, .external_lex_state = 2},
+ [1314] = {.lex_state = 146, .external_lex_state = 2},
+ [1315] = {.lex_state = 146, .external_lex_state = 2},
+ [1316] = {.lex_state = 146, .external_lex_state = 2},
+ [1317] = {.lex_state = 146, .external_lex_state = 2},
+ [1318] = {.lex_state = 146, .external_lex_state = 2},
+ [1319] = {.lex_state = 146, .external_lex_state = 2},
+ [1320] = {.lex_state = 146, .external_lex_state = 2},
+ [1321] = {.lex_state = 146, .external_lex_state = 2},
+ [1322] = {.lex_state = 146, .external_lex_state = 2},
+ [1323] = {.lex_state = 146, .external_lex_state = 2},
+ [1324] = {.lex_state = 146, .external_lex_state = 2},
+ [1325] = {.lex_state = 146, .external_lex_state = 2},
+ [1326] = {.lex_state = 146, .external_lex_state = 2},
[1327] = {.lex_state = 3, .external_lex_state = 2},
- [1328] = {.lex_state = 148, .external_lex_state = 2},
- [1329] = {.lex_state = 148, .external_lex_state = 2},
- [1330] = {.lex_state = 148, .external_lex_state = 2},
- [1331] = {.lex_state = 148, .external_lex_state = 2},
- [1332] = {.lex_state = 148, .external_lex_state = 2},
- [1333] = {.lex_state = 148, .external_lex_state = 2},
- [1334] = {.lex_state = 148, .external_lex_state = 2},
- [1335] = {.lex_state = 148, .external_lex_state = 2},
- [1336] = {.lex_state = 148, .external_lex_state = 2},
- [1337] = {.lex_state = 148, .external_lex_state = 2},
- [1338] = {.lex_state = 148, .external_lex_state = 2},
- [1339] = {.lex_state = 148, .external_lex_state = 2},
- [1340] = {.lex_state = 9, .external_lex_state = 2},
- [1341] = {.lex_state = 148, .external_lex_state = 2},
- [1342] = {.lex_state = 148, .external_lex_state = 2},
- [1343] = {.lex_state = 148, .external_lex_state = 2},
- [1344] = {.lex_state = 148, .external_lex_state = 2},
- [1345] = {.lex_state = 9, .external_lex_state = 2},
- [1346] = {.lex_state = 148, .external_lex_state = 2},
- [1347] = {.lex_state = 148, .external_lex_state = 2},
- [1348] = {.lex_state = 148, .external_lex_state = 2},
- [1349] = {.lex_state = 148, .external_lex_state = 2},
- [1350] = {.lex_state = 148, .external_lex_state = 2},
- [1351] = {.lex_state = 148, .external_lex_state = 5},
+ [1328] = {.lex_state = 146, .external_lex_state = 2},
+ [1329] = {.lex_state = 146, .external_lex_state = 2},
+ [1330] = {.lex_state = 146, .external_lex_state = 2},
+ [1331] = {.lex_state = 146, .external_lex_state = 2},
+ [1332] = {.lex_state = 146, .external_lex_state = 2},
+ [1333] = {.lex_state = 146, .external_lex_state = 2},
+ [1334] = {.lex_state = 146, .external_lex_state = 2},
+ [1335] = {.lex_state = 146, .external_lex_state = 2},
+ [1336] = {.lex_state = 146, .external_lex_state = 2},
+ [1337] = {.lex_state = 146, .external_lex_state = 2},
+ [1338] = {.lex_state = 146, .external_lex_state = 2},
+ [1339] = {.lex_state = 146, .external_lex_state = 2},
+ [1340] = {.lex_state = 7, .external_lex_state = 2},
+ [1341] = {.lex_state = 146, .external_lex_state = 2},
+ [1342] = {.lex_state = 146, .external_lex_state = 2},
+ [1343] = {.lex_state = 146, .external_lex_state = 2},
+ [1344] = {.lex_state = 146, .external_lex_state = 2},
+ [1345] = {.lex_state = 7, .external_lex_state = 2},
+ [1346] = {.lex_state = 146, .external_lex_state = 2},
+ [1347] = {.lex_state = 146, .external_lex_state = 2},
+ [1348] = {.lex_state = 146, .external_lex_state = 2},
+ [1349] = {.lex_state = 146, .external_lex_state = 2},
+ [1350] = {.lex_state = 146, .external_lex_state = 2},
+ [1351] = {.lex_state = 146, .external_lex_state = 5},
[1352] = {.lex_state = 3, .external_lex_state = 2},
- [1353] = {.lex_state = 148, .external_lex_state = 2},
- [1354] = {.lex_state = 148, .external_lex_state = 2},
- [1355] = {.lex_state = 148, .external_lex_state = 5},
- [1356] = {.lex_state = 148, .external_lex_state = 2},
- [1357] = {.lex_state = 148, .external_lex_state = 2},
- [1358] = {.lex_state = 148, .external_lex_state = 2},
- [1359] = {.lex_state = 148, .external_lex_state = 5},
- [1360] = {.lex_state = 148, .external_lex_state = 2},
- [1361] = {.lex_state = 148, .external_lex_state = 2},
- [1362] = {.lex_state = 148, .external_lex_state = 2},
- [1363] = {.lex_state = 148, .external_lex_state = 2},
- [1364] = {.lex_state = 148, .external_lex_state = 2},
- [1365] = {.lex_state = 148, .external_lex_state = 2},
- [1366] = {.lex_state = 148, .external_lex_state = 2},
- [1367] = {.lex_state = 148, .external_lex_state = 5},
- [1368] = {.lex_state = 148, .external_lex_state = 5},
- [1369] = {.lex_state = 148, .external_lex_state = 2},
- [1370] = {.lex_state = 148, .external_lex_state = 2},
- [1371] = {.lex_state = 148, .external_lex_state = 5},
- [1372] = {.lex_state = 148, .external_lex_state = 2},
- [1373] = {.lex_state = 148, .external_lex_state = 2},
- [1374] = {.lex_state = 148, .external_lex_state = 2},
- [1375] = {.lex_state = 148, .external_lex_state = 2},
- [1376] = {.lex_state = 148, .external_lex_state = 2},
- [1377] = {.lex_state = 148, .external_lex_state = 2},
- [1378] = {.lex_state = 9, .external_lex_state = 2},
- [1379] = {.lex_state = 148, .external_lex_state = 2},
- [1380] = {.lex_state = 148, .external_lex_state = 2},
- [1381] = {.lex_state = 148, .external_lex_state = 2},
- [1382] = {.lex_state = 148, .external_lex_state = 2},
- [1383] = {.lex_state = 148, .external_lex_state = 2},
- [1384] = {.lex_state = 148, .external_lex_state = 2},
- [1385] = {.lex_state = 148, .external_lex_state = 2},
- [1386] = {.lex_state = 148, .external_lex_state = 2},
- [1387] = {.lex_state = 148, .external_lex_state = 2},
- [1388] = {.lex_state = 37, .external_lex_state = 2},
- [1389] = {.lex_state = 148, .external_lex_state = 2},
- [1390] = {.lex_state = 148, .external_lex_state = 2},
- [1391] = {.lex_state = 148, .external_lex_state = 2},
- [1392] = {.lex_state = 148, .external_lex_state = 2},
- [1393] = {.lex_state = 148, .external_lex_state = 2},
- [1394] = {.lex_state = 148, .external_lex_state = 2},
- [1395] = {.lex_state = 148, .external_lex_state = 2},
- [1396] = {.lex_state = 148, .external_lex_state = 2},
- [1397] = {.lex_state = 148, .external_lex_state = 2},
- [1398] = {.lex_state = 148, .external_lex_state = 2},
- [1399] = {.lex_state = 148, .external_lex_state = 2},
- [1400] = {.lex_state = 148, .external_lex_state = 2},
- [1401] = {.lex_state = 9, .external_lex_state = 2},
- [1402] = {.lex_state = 148, .external_lex_state = 2},
- [1403] = {.lex_state = 148, .external_lex_state = 2},
- [1404] = {.lex_state = 148, .external_lex_state = 2},
- [1405] = {.lex_state = 148, .external_lex_state = 2},
- [1406] = {.lex_state = 148, .external_lex_state = 2},
- [1407] = {.lex_state = 148, .external_lex_state = 2},
- [1408] = {.lex_state = 148, .external_lex_state = 2},
- [1409] = {.lex_state = 148, .external_lex_state = 2},
- [1410] = {.lex_state = 148, .external_lex_state = 2},
- [1411] = {.lex_state = 148, .external_lex_state = 2},
- [1412] = {.lex_state = 148, .external_lex_state = 2},
- [1413] = {.lex_state = 148, .external_lex_state = 2},
- [1414] = {.lex_state = 148, .external_lex_state = 2},
- [1415] = {.lex_state = 148, .external_lex_state = 2},
- [1416] = {.lex_state = 148, .external_lex_state = 2},
- [1417] = {.lex_state = 148, .external_lex_state = 2},
- [1418] = {.lex_state = 148, .external_lex_state = 2},
- [1419] = {.lex_state = 148, .external_lex_state = 2},
- [1420] = {.lex_state = 148, .external_lex_state = 2},
- [1421] = {.lex_state = 148, .external_lex_state = 2},
- [1422] = {.lex_state = 148, .external_lex_state = 5},
- [1423] = {.lex_state = 148, .external_lex_state = 2},
- [1424] = {.lex_state = 148, .external_lex_state = 2},
- [1425] = {.lex_state = 148, .external_lex_state = 2},
- [1426] = {.lex_state = 148, .external_lex_state = 2},
- [1427] = {.lex_state = 148, .external_lex_state = 2},
- [1428] = {.lex_state = 148, .external_lex_state = 5},
- [1429] = {.lex_state = 148, .external_lex_state = 2},
- [1430] = {.lex_state = 148, .external_lex_state = 2},
- [1431] = {.lex_state = 148, .external_lex_state = 5},
- [1432] = {.lex_state = 148, .external_lex_state = 2},
- [1433] = {.lex_state = 148, .external_lex_state = 2},
- [1434] = {.lex_state = 148, .external_lex_state = 2},
- [1435] = {.lex_state = 148, .external_lex_state = 2},
- [1436] = {.lex_state = 148, .external_lex_state = 2},
- [1437] = {.lex_state = 148, .external_lex_state = 2},
- [1438] = {.lex_state = 148, .external_lex_state = 2},
- [1439] = {.lex_state = 148, .external_lex_state = 2},
- [1440] = {.lex_state = 148, .external_lex_state = 2},
- [1441] = {.lex_state = 148, .external_lex_state = 2},
- [1442] = {.lex_state = 148, .external_lex_state = 2},
- [1443] = {.lex_state = 148, .external_lex_state = 5},
- [1444] = {.lex_state = 148, .external_lex_state = 2},
- [1445] = {.lex_state = 148, .external_lex_state = 2},
- [1446] = {.lex_state = 148, .external_lex_state = 2},
- [1447] = {.lex_state = 148, .external_lex_state = 2},
- [1448] = {.lex_state = 148, .external_lex_state = 2},
- [1449] = {.lex_state = 148, .external_lex_state = 2},
- [1450] = {.lex_state = 148, .external_lex_state = 2},
- [1451] = {.lex_state = 148, .external_lex_state = 2},
- [1452] = {.lex_state = 148, .external_lex_state = 2},
- [1453] = {.lex_state = 148, .external_lex_state = 5},
- [1454] = {.lex_state = 148, .external_lex_state = 2},
- [1455] = {.lex_state = 148, .external_lex_state = 2},
- [1456] = {.lex_state = 148, .external_lex_state = 2},
- [1457] = {.lex_state = 148, .external_lex_state = 2},
- [1458] = {.lex_state = 148, .external_lex_state = 5},
- [1459] = {.lex_state = 148, .external_lex_state = 2},
- [1460] = {.lex_state = 148, .external_lex_state = 2},
- [1461] = {.lex_state = 148, .external_lex_state = 2},
- [1462] = {.lex_state = 148, .external_lex_state = 2},
- [1463] = {.lex_state = 148, .external_lex_state = 2},
- [1464] = {.lex_state = 148, .external_lex_state = 2},
- [1465] = {.lex_state = 148, .external_lex_state = 2},
- [1466] = {.lex_state = 148, .external_lex_state = 2},
- [1467] = {.lex_state = 148, .external_lex_state = 2},
- [1468] = {.lex_state = 148, .external_lex_state = 2},
- [1469] = {.lex_state = 148, .external_lex_state = 2},
- [1470] = {.lex_state = 148, .external_lex_state = 5},
- [1471] = {.lex_state = 148, .external_lex_state = 5},
- [1472] = {.lex_state = 148, .external_lex_state = 2},
- [1473] = {.lex_state = 148, .external_lex_state = 2},
- [1474] = {.lex_state = 148, .external_lex_state = 2},
- [1475] = {.lex_state = 148, .external_lex_state = 2},
- [1476] = {.lex_state = 148, .external_lex_state = 2},
- [1477] = {.lex_state = 148, .external_lex_state = 2},
- [1478] = {.lex_state = 148, .external_lex_state = 2},
- [1479] = {.lex_state = 148, .external_lex_state = 2},
- [1480] = {.lex_state = 148, .external_lex_state = 2},
- [1481] = {.lex_state = 148, .external_lex_state = 2},
- [1482] = {.lex_state = 148, .external_lex_state = 2},
- [1483] = {.lex_state = 148, .external_lex_state = 2},
- [1484] = {.lex_state = 148, .external_lex_state = 2},
- [1485] = {.lex_state = 148, .external_lex_state = 2},
- [1486] = {.lex_state = 148, .external_lex_state = 2},
- [1487] = {.lex_state = 148, .external_lex_state = 2},
- [1488] = {.lex_state = 148, .external_lex_state = 2},
- [1489] = {.lex_state = 148, .external_lex_state = 2},
- [1490] = {.lex_state = 148, .external_lex_state = 2},
- [1491] = {.lex_state = 148, .external_lex_state = 2},
- [1492] = {.lex_state = 148, .external_lex_state = 2},
- [1493] = {.lex_state = 148, .external_lex_state = 2},
- [1494] = {.lex_state = 148, .external_lex_state = 5},
- [1495] = {.lex_state = 148, .external_lex_state = 2},
- [1496] = {.lex_state = 148, .external_lex_state = 2},
- [1497] = {.lex_state = 148, .external_lex_state = 2},
- [1498] = {.lex_state = 148, .external_lex_state = 2},
- [1499] = {.lex_state = 148, .external_lex_state = 2},
- [1500] = {.lex_state = 148, .external_lex_state = 2},
- [1501] = {.lex_state = 148, .external_lex_state = 2},
- [1502] = {.lex_state = 148, .external_lex_state = 2},
- [1503] = {.lex_state = 148, .external_lex_state = 2},
- [1504] = {.lex_state = 148, .external_lex_state = 2},
- [1505] = {.lex_state = 148, .external_lex_state = 2},
- [1506] = {.lex_state = 148, .external_lex_state = 2},
- [1507] = {.lex_state = 148, .external_lex_state = 2},
- [1508] = {.lex_state = 148, .external_lex_state = 2},
- [1509] = {.lex_state = 148, .external_lex_state = 2},
- [1510] = {.lex_state = 148, .external_lex_state = 2},
- [1511] = {.lex_state = 148, .external_lex_state = 2},
- [1512] = {.lex_state = 148, .external_lex_state = 2},
- [1513] = {.lex_state = 148, .external_lex_state = 2},
- [1514] = {.lex_state = 148, .external_lex_state = 2},
- [1515] = {.lex_state = 148, .external_lex_state = 2},
- [1516] = {.lex_state = 148, .external_lex_state = 2},
- [1517] = {.lex_state = 148, .external_lex_state = 2},
- [1518] = {.lex_state = 148, .external_lex_state = 5},
- [1519] = {.lex_state = 148, .external_lex_state = 2},
- [1520] = {.lex_state = 148, .external_lex_state = 2},
- [1521] = {.lex_state = 148, .external_lex_state = 2},
- [1522] = {.lex_state = 148, .external_lex_state = 2},
- [1523] = {.lex_state = 148, .external_lex_state = 2},
- [1524] = {.lex_state = 148, .external_lex_state = 2},
- [1525] = {.lex_state = 148, .external_lex_state = 2},
- [1526] = {.lex_state = 148, .external_lex_state = 5},
- [1527] = {.lex_state = 148, .external_lex_state = 2},
- [1528] = {.lex_state = 148, .external_lex_state = 2},
- [1529] = {.lex_state = 148, .external_lex_state = 5},
- [1530] = {.lex_state = 148, .external_lex_state = 5},
- [1531] = {.lex_state = 148, .external_lex_state = 2},
- [1532] = {.lex_state = 148, .external_lex_state = 2},
- [1533] = {.lex_state = 148, .external_lex_state = 2},
- [1534] = {.lex_state = 148, .external_lex_state = 2},
- [1535] = {.lex_state = 148, .external_lex_state = 2},
- [1536] = {.lex_state = 148, .external_lex_state = 2},
- [1537] = {.lex_state = 148, .external_lex_state = 2},
- [1538] = {.lex_state = 9, .external_lex_state = 2},
- [1539] = {.lex_state = 148, .external_lex_state = 2},
- [1540] = {.lex_state = 148, .external_lex_state = 2},
- [1541] = {.lex_state = 148, .external_lex_state = 2},
- [1542] = {.lex_state = 148, .external_lex_state = 2},
- [1543] = {.lex_state = 148, .external_lex_state = 2},
- [1544] = {.lex_state = 148, .external_lex_state = 2},
- [1545] = {.lex_state = 148, .external_lex_state = 2},
- [1546] = {.lex_state = 148, .external_lex_state = 2},
- [1547] = {.lex_state = 148, .external_lex_state = 2},
- [1548] = {.lex_state = 148, .external_lex_state = 2},
- [1549] = {.lex_state = 148, .external_lex_state = 2},
- [1550] = {.lex_state = 148, .external_lex_state = 2},
- [1551] = {.lex_state = 148, .external_lex_state = 2},
- [1552] = {.lex_state = 148, .external_lex_state = 2},
- [1553] = {.lex_state = 148, .external_lex_state = 2},
- [1554] = {.lex_state = 148, .external_lex_state = 2},
- [1555] = {.lex_state = 148, .external_lex_state = 2},
- [1556] = {.lex_state = 148, .external_lex_state = 2},
- [1557] = {.lex_state = 148, .external_lex_state = 5},
- [1558] = {.lex_state = 148, .external_lex_state = 2},
- [1559] = {.lex_state = 148, .external_lex_state = 5},
- [1560] = {.lex_state = 148, .external_lex_state = 2},
- [1561] = {.lex_state = 148, .external_lex_state = 5},
- [1562] = {.lex_state = 148, .external_lex_state = 2},
- [1563] = {.lex_state = 148, .external_lex_state = 2},
- [1564] = {.lex_state = 148, .external_lex_state = 2},
- [1565] = {.lex_state = 148, .external_lex_state = 2},
- [1566] = {.lex_state = 9, .external_lex_state = 2},
- [1567] = {.lex_state = 148, .external_lex_state = 2},
- [1568] = {.lex_state = 148, .external_lex_state = 2},
- [1569] = {.lex_state = 148, .external_lex_state = 2},
- [1570] = {.lex_state = 148, .external_lex_state = 5},
- [1571] = {.lex_state = 148, .external_lex_state = 2},
- [1572] = {.lex_state = 148, .external_lex_state = 2},
- [1573] = {.lex_state = 148, .external_lex_state = 2},
- [1574] = {.lex_state = 148, .external_lex_state = 2},
- [1575] = {.lex_state = 148, .external_lex_state = 2},
- [1576] = {.lex_state = 148, .external_lex_state = 2},
- [1577] = {.lex_state = 148, .external_lex_state = 2},
- [1578] = {.lex_state = 148, .external_lex_state = 2},
- [1579] = {.lex_state = 148, .external_lex_state = 2},
- [1580] = {.lex_state = 148, .external_lex_state = 2},
- [1581] = {.lex_state = 148, .external_lex_state = 2},
- [1582] = {.lex_state = 148, .external_lex_state = 2},
- [1583] = {.lex_state = 148, .external_lex_state = 2},
- [1584] = {.lex_state = 148, .external_lex_state = 2},
- [1585] = {.lex_state = 148, .external_lex_state = 2},
- [1586] = {.lex_state = 148, .external_lex_state = 2},
- [1587] = {.lex_state = 148, .external_lex_state = 2},
- [1588] = {.lex_state = 148, .external_lex_state = 2},
- [1589] = {.lex_state = 148, .external_lex_state = 2},
- [1590] = {.lex_state = 148, .external_lex_state = 2},
- [1591] = {.lex_state = 148, .external_lex_state = 2},
- [1592] = {.lex_state = 148, .external_lex_state = 2},
- [1593] = {.lex_state = 148, .external_lex_state = 5},
- [1594] = {.lex_state = 148, .external_lex_state = 2},
- [1595] = {.lex_state = 148, .external_lex_state = 2},
- [1596] = {.lex_state = 148, .external_lex_state = 2},
- [1597] = {.lex_state = 148, .external_lex_state = 2},
- [1598] = {.lex_state = 148, .external_lex_state = 2},
- [1599] = {.lex_state = 148, .external_lex_state = 2},
- [1600] = {.lex_state = 148, .external_lex_state = 2},
- [1601] = {.lex_state = 148, .external_lex_state = 2},
- [1602] = {.lex_state = 148, .external_lex_state = 2},
- [1603] = {.lex_state = 148, .external_lex_state = 2},
- [1604] = {.lex_state = 148, .external_lex_state = 2},
- [1605] = {.lex_state = 148, .external_lex_state = 2},
- [1606] = {.lex_state = 148, .external_lex_state = 2},
- [1607] = {.lex_state = 148, .external_lex_state = 2},
- [1608] = {.lex_state = 148, .external_lex_state = 2},
- [1609] = {.lex_state = 148, .external_lex_state = 2},
- [1610] = {.lex_state = 148, .external_lex_state = 2},
+ [1353] = {.lex_state = 146, .external_lex_state = 2},
+ [1354] = {.lex_state = 146, .external_lex_state = 2},
+ [1355] = {.lex_state = 146, .external_lex_state = 5},
+ [1356] = {.lex_state = 146, .external_lex_state = 2},
+ [1357] = {.lex_state = 146, .external_lex_state = 2},
+ [1358] = {.lex_state = 146, .external_lex_state = 2},
+ [1359] = {.lex_state = 146, .external_lex_state = 5},
+ [1360] = {.lex_state = 146, .external_lex_state = 2},
+ [1361] = {.lex_state = 146, .external_lex_state = 2},
+ [1362] = {.lex_state = 146, .external_lex_state = 2},
+ [1363] = {.lex_state = 146, .external_lex_state = 2},
+ [1364] = {.lex_state = 146, .external_lex_state = 2},
+ [1365] = {.lex_state = 146, .external_lex_state = 2},
+ [1366] = {.lex_state = 146, .external_lex_state = 2},
+ [1367] = {.lex_state = 146, .external_lex_state = 5},
+ [1368] = {.lex_state = 146, .external_lex_state = 5},
+ [1369] = {.lex_state = 146, .external_lex_state = 2},
+ [1370] = {.lex_state = 146, .external_lex_state = 2},
+ [1371] = {.lex_state = 146, .external_lex_state = 5},
+ [1372] = {.lex_state = 146, .external_lex_state = 2},
+ [1373] = {.lex_state = 146, .external_lex_state = 2},
+ [1374] = {.lex_state = 146, .external_lex_state = 2},
+ [1375] = {.lex_state = 146, .external_lex_state = 2},
+ [1376] = {.lex_state = 146, .external_lex_state = 2},
+ [1377] = {.lex_state = 146, .external_lex_state = 2},
+ [1378] = {.lex_state = 7, .external_lex_state = 2},
+ [1379] = {.lex_state = 146, .external_lex_state = 2},
+ [1380] = {.lex_state = 146, .external_lex_state = 2},
+ [1381] = {.lex_state = 146, .external_lex_state = 2},
+ [1382] = {.lex_state = 146, .external_lex_state = 2},
+ [1383] = {.lex_state = 146, .external_lex_state = 2},
+ [1384] = {.lex_state = 146, .external_lex_state = 2},
+ [1385] = {.lex_state = 146, .external_lex_state = 2},
+ [1386] = {.lex_state = 146, .external_lex_state = 2},
+ [1387] = {.lex_state = 146, .external_lex_state = 2},
+ [1388] = {.lex_state = 35, .external_lex_state = 2},
+ [1389] = {.lex_state = 146, .external_lex_state = 2},
+ [1390] = {.lex_state = 146, .external_lex_state = 2},
+ [1391] = {.lex_state = 146, .external_lex_state = 2},
+ [1392] = {.lex_state = 146, .external_lex_state = 2},
+ [1393] = {.lex_state = 146, .external_lex_state = 2},
+ [1394] = {.lex_state = 146, .external_lex_state = 2},
+ [1395] = {.lex_state = 146, .external_lex_state = 2},
+ [1396] = {.lex_state = 146, .external_lex_state = 2},
+ [1397] = {.lex_state = 146, .external_lex_state = 2},
+ [1398] = {.lex_state = 146, .external_lex_state = 2},
+ [1399] = {.lex_state = 146, .external_lex_state = 2},
+ [1400] = {.lex_state = 146, .external_lex_state = 2},
+ [1401] = {.lex_state = 7, .external_lex_state = 2},
+ [1402] = {.lex_state = 146, .external_lex_state = 2},
+ [1403] = {.lex_state = 146, .external_lex_state = 2},
+ [1404] = {.lex_state = 146, .external_lex_state = 2},
+ [1405] = {.lex_state = 146, .external_lex_state = 2},
+ [1406] = {.lex_state = 146, .external_lex_state = 2},
+ [1407] = {.lex_state = 146, .external_lex_state = 2},
+ [1408] = {.lex_state = 146, .external_lex_state = 2},
+ [1409] = {.lex_state = 146, .external_lex_state = 2},
+ [1410] = {.lex_state = 146, .external_lex_state = 2},
+ [1411] = {.lex_state = 146, .external_lex_state = 2},
+ [1412] = {.lex_state = 146, .external_lex_state = 2},
+ [1413] = {.lex_state = 146, .external_lex_state = 2},
+ [1414] = {.lex_state = 146, .external_lex_state = 2},
+ [1415] = {.lex_state = 146, .external_lex_state = 2},
+ [1416] = {.lex_state = 146, .external_lex_state = 2},
+ [1417] = {.lex_state = 146, .external_lex_state = 2},
+ [1418] = {.lex_state = 146, .external_lex_state = 2},
+ [1419] = {.lex_state = 146, .external_lex_state = 2},
+ [1420] = {.lex_state = 146, .external_lex_state = 2},
+ [1421] = {.lex_state = 146, .external_lex_state = 2},
+ [1422] = {.lex_state = 146, .external_lex_state = 5},
+ [1423] = {.lex_state = 146, .external_lex_state = 2},
+ [1424] = {.lex_state = 146, .external_lex_state = 2},
+ [1425] = {.lex_state = 146, .external_lex_state = 2},
+ [1426] = {.lex_state = 146, .external_lex_state = 2},
+ [1427] = {.lex_state = 146, .external_lex_state = 2},
+ [1428] = {.lex_state = 146, .external_lex_state = 5},
+ [1429] = {.lex_state = 146, .external_lex_state = 2},
+ [1430] = {.lex_state = 146, .external_lex_state = 2},
+ [1431] = {.lex_state = 146, .external_lex_state = 5},
+ [1432] = {.lex_state = 146, .external_lex_state = 2},
+ [1433] = {.lex_state = 146, .external_lex_state = 2},
+ [1434] = {.lex_state = 146, .external_lex_state = 2},
+ [1435] = {.lex_state = 146, .external_lex_state = 2},
+ [1436] = {.lex_state = 146, .external_lex_state = 2},
+ [1437] = {.lex_state = 146, .external_lex_state = 2},
+ [1438] = {.lex_state = 146, .external_lex_state = 2},
+ [1439] = {.lex_state = 146, .external_lex_state = 2},
+ [1440] = {.lex_state = 146, .external_lex_state = 2},
+ [1441] = {.lex_state = 146, .external_lex_state = 2},
+ [1442] = {.lex_state = 146, .external_lex_state = 2},
+ [1443] = {.lex_state = 146, .external_lex_state = 5},
+ [1444] = {.lex_state = 146, .external_lex_state = 2},
+ [1445] = {.lex_state = 146, .external_lex_state = 2},
+ [1446] = {.lex_state = 146, .external_lex_state = 2},
+ [1447] = {.lex_state = 146, .external_lex_state = 2},
+ [1448] = {.lex_state = 146, .external_lex_state = 2},
+ [1449] = {.lex_state = 146, .external_lex_state = 2},
+ [1450] = {.lex_state = 146, .external_lex_state = 2},
+ [1451] = {.lex_state = 146, .external_lex_state = 2},
+ [1452] = {.lex_state = 146, .external_lex_state = 2},
+ [1453] = {.lex_state = 146, .external_lex_state = 5},
+ [1454] = {.lex_state = 146, .external_lex_state = 2},
+ [1455] = {.lex_state = 146, .external_lex_state = 2},
+ [1456] = {.lex_state = 146, .external_lex_state = 2},
+ [1457] = {.lex_state = 146, .external_lex_state = 2},
+ [1458] = {.lex_state = 146, .external_lex_state = 5},
+ [1459] = {.lex_state = 146, .external_lex_state = 2},
+ [1460] = {.lex_state = 146, .external_lex_state = 2},
+ [1461] = {.lex_state = 146, .external_lex_state = 2},
+ [1462] = {.lex_state = 146, .external_lex_state = 2},
+ [1463] = {.lex_state = 146, .external_lex_state = 2},
+ [1464] = {.lex_state = 146, .external_lex_state = 2},
+ [1465] = {.lex_state = 146, .external_lex_state = 2},
+ [1466] = {.lex_state = 146, .external_lex_state = 2},
+ [1467] = {.lex_state = 146, .external_lex_state = 2},
+ [1468] = {.lex_state = 146, .external_lex_state = 2},
+ [1469] = {.lex_state = 146, .external_lex_state = 2},
+ [1470] = {.lex_state = 146, .external_lex_state = 5},
+ [1471] = {.lex_state = 146, .external_lex_state = 5},
+ [1472] = {.lex_state = 146, .external_lex_state = 2},
+ [1473] = {.lex_state = 146, .external_lex_state = 2},
+ [1474] = {.lex_state = 146, .external_lex_state = 2},
+ [1475] = {.lex_state = 146, .external_lex_state = 2},
+ [1476] = {.lex_state = 146, .external_lex_state = 2},
+ [1477] = {.lex_state = 146, .external_lex_state = 2},
+ [1478] = {.lex_state = 146, .external_lex_state = 2},
+ [1479] = {.lex_state = 146, .external_lex_state = 2},
+ [1480] = {.lex_state = 146, .external_lex_state = 2},
+ [1481] = {.lex_state = 146, .external_lex_state = 2},
+ [1482] = {.lex_state = 146, .external_lex_state = 2},
+ [1483] = {.lex_state = 146, .external_lex_state = 2},
+ [1484] = {.lex_state = 146, .external_lex_state = 2},
+ [1485] = {.lex_state = 146, .external_lex_state = 2},
+ [1486] = {.lex_state = 146, .external_lex_state = 2},
+ [1487] = {.lex_state = 146, .external_lex_state = 2},
+ [1488] = {.lex_state = 146, .external_lex_state = 2},
+ [1489] = {.lex_state = 146, .external_lex_state = 2},
+ [1490] = {.lex_state = 146, .external_lex_state = 2},
+ [1491] = {.lex_state = 146, .external_lex_state = 2},
+ [1492] = {.lex_state = 146, .external_lex_state = 2},
+ [1493] = {.lex_state = 146, .external_lex_state = 2},
+ [1494] = {.lex_state = 146, .external_lex_state = 5},
+ [1495] = {.lex_state = 146, .external_lex_state = 2},
+ [1496] = {.lex_state = 146, .external_lex_state = 2},
+ [1497] = {.lex_state = 146, .external_lex_state = 2},
+ [1498] = {.lex_state = 146, .external_lex_state = 2},
+ [1499] = {.lex_state = 146, .external_lex_state = 2},
+ [1500] = {.lex_state = 146, .external_lex_state = 2},
+ [1501] = {.lex_state = 146, .external_lex_state = 2},
+ [1502] = {.lex_state = 146, .external_lex_state = 2},
+ [1503] = {.lex_state = 146, .external_lex_state = 2},
+ [1504] = {.lex_state = 146, .external_lex_state = 2},
+ [1505] = {.lex_state = 146, .external_lex_state = 2},
+ [1506] = {.lex_state = 146, .external_lex_state = 2},
+ [1507] = {.lex_state = 146, .external_lex_state = 2},
+ [1508] = {.lex_state = 146, .external_lex_state = 2},
+ [1509] = {.lex_state = 146, .external_lex_state = 2},
+ [1510] = {.lex_state = 146, .external_lex_state = 2},
+ [1511] = {.lex_state = 146, .external_lex_state = 2},
+ [1512] = {.lex_state = 146, .external_lex_state = 2},
+ [1513] = {.lex_state = 146, .external_lex_state = 2},
+ [1514] = {.lex_state = 146, .external_lex_state = 2},
+ [1515] = {.lex_state = 146, .external_lex_state = 2},
+ [1516] = {.lex_state = 146, .external_lex_state = 2},
+ [1517] = {.lex_state = 146, .external_lex_state = 2},
+ [1518] = {.lex_state = 146, .external_lex_state = 5},
+ [1519] = {.lex_state = 146, .external_lex_state = 2},
+ [1520] = {.lex_state = 146, .external_lex_state = 2},
+ [1521] = {.lex_state = 146, .external_lex_state = 2},
+ [1522] = {.lex_state = 146, .external_lex_state = 2},
+ [1523] = {.lex_state = 146, .external_lex_state = 2},
+ [1524] = {.lex_state = 146, .external_lex_state = 2},
+ [1525] = {.lex_state = 146, .external_lex_state = 2},
+ [1526] = {.lex_state = 146, .external_lex_state = 5},
+ [1527] = {.lex_state = 146, .external_lex_state = 2},
+ [1528] = {.lex_state = 146, .external_lex_state = 2},
+ [1529] = {.lex_state = 146, .external_lex_state = 5},
+ [1530] = {.lex_state = 146, .external_lex_state = 5},
+ [1531] = {.lex_state = 146, .external_lex_state = 2},
+ [1532] = {.lex_state = 146, .external_lex_state = 2},
+ [1533] = {.lex_state = 146, .external_lex_state = 2},
+ [1534] = {.lex_state = 146, .external_lex_state = 2},
+ [1535] = {.lex_state = 146, .external_lex_state = 2},
+ [1536] = {.lex_state = 146, .external_lex_state = 2},
+ [1537] = {.lex_state = 146, .external_lex_state = 2},
+ [1538] = {.lex_state = 7, .external_lex_state = 2},
+ [1539] = {.lex_state = 146, .external_lex_state = 2},
+ [1540] = {.lex_state = 146, .external_lex_state = 2},
+ [1541] = {.lex_state = 146, .external_lex_state = 2},
+ [1542] = {.lex_state = 146, .external_lex_state = 2},
+ [1543] = {.lex_state = 146, .external_lex_state = 2},
+ [1544] = {.lex_state = 146, .external_lex_state = 2},
+ [1545] = {.lex_state = 146, .external_lex_state = 2},
+ [1546] = {.lex_state = 146, .external_lex_state = 2},
+ [1547] = {.lex_state = 146, .external_lex_state = 2},
+ [1548] = {.lex_state = 146, .external_lex_state = 2},
+ [1549] = {.lex_state = 146, .external_lex_state = 2},
+ [1550] = {.lex_state = 146, .external_lex_state = 2},
+ [1551] = {.lex_state = 146, .external_lex_state = 2},
+ [1552] = {.lex_state = 146, .external_lex_state = 2},
+ [1553] = {.lex_state = 146, .external_lex_state = 2},
+ [1554] = {.lex_state = 146, .external_lex_state = 2},
+ [1555] = {.lex_state = 146, .external_lex_state = 2},
+ [1556] = {.lex_state = 146, .external_lex_state = 2},
+ [1557] = {.lex_state = 146, .external_lex_state = 5},
+ [1558] = {.lex_state = 146, .external_lex_state = 2},
+ [1559] = {.lex_state = 146, .external_lex_state = 5},
+ [1560] = {.lex_state = 146, .external_lex_state = 2},
+ [1561] = {.lex_state = 146, .external_lex_state = 5},
+ [1562] = {.lex_state = 146, .external_lex_state = 2},
+ [1563] = {.lex_state = 146, .external_lex_state = 2},
+ [1564] = {.lex_state = 146, .external_lex_state = 2},
+ [1565] = {.lex_state = 146, .external_lex_state = 2},
+ [1566] = {.lex_state = 7, .external_lex_state = 2},
+ [1567] = {.lex_state = 146, .external_lex_state = 2},
+ [1568] = {.lex_state = 146, .external_lex_state = 2},
+ [1569] = {.lex_state = 146, .external_lex_state = 2},
+ [1570] = {.lex_state = 146, .external_lex_state = 5},
+ [1571] = {.lex_state = 146, .external_lex_state = 2},
+ [1572] = {.lex_state = 146, .external_lex_state = 2},
+ [1573] = {.lex_state = 146, .external_lex_state = 2},
+ [1574] = {.lex_state = 146, .external_lex_state = 2},
+ [1575] = {.lex_state = 146, .external_lex_state = 2},
+ [1576] = {.lex_state = 146, .external_lex_state = 2},
+ [1577] = {.lex_state = 146, .external_lex_state = 2},
+ [1578] = {.lex_state = 146, .external_lex_state = 2},
+ [1579] = {.lex_state = 146, .external_lex_state = 2},
+ [1580] = {.lex_state = 146, .external_lex_state = 2},
+ [1581] = {.lex_state = 146, .external_lex_state = 2},
+ [1582] = {.lex_state = 146, .external_lex_state = 2},
+ [1583] = {.lex_state = 146, .external_lex_state = 2},
+ [1584] = {.lex_state = 146, .external_lex_state = 2},
+ [1585] = {.lex_state = 146, .external_lex_state = 2},
+ [1586] = {.lex_state = 146, .external_lex_state = 2},
+ [1587] = {.lex_state = 146, .external_lex_state = 2},
+ [1588] = {.lex_state = 146, .external_lex_state = 2},
+ [1589] = {.lex_state = 146, .external_lex_state = 2},
+ [1590] = {.lex_state = 146, .external_lex_state = 2},
+ [1591] = {.lex_state = 146, .external_lex_state = 2},
+ [1592] = {.lex_state = 146, .external_lex_state = 2},
+ [1593] = {.lex_state = 146, .external_lex_state = 5},
+ [1594] = {.lex_state = 146, .external_lex_state = 2},
+ [1595] = {.lex_state = 146, .external_lex_state = 2},
+ [1596] = {.lex_state = 146, .external_lex_state = 2},
+ [1597] = {.lex_state = 146, .external_lex_state = 2},
+ [1598] = {.lex_state = 146, .external_lex_state = 2},
+ [1599] = {.lex_state = 146, .external_lex_state = 2},
+ [1600] = {.lex_state = 146, .external_lex_state = 2},
+ [1601] = {.lex_state = 146, .external_lex_state = 2},
+ [1602] = {.lex_state = 146, .external_lex_state = 2},
+ [1603] = {.lex_state = 146, .external_lex_state = 2},
+ [1604] = {.lex_state = 146, .external_lex_state = 2},
+ [1605] = {.lex_state = 146, .external_lex_state = 2},
+ [1606] = {.lex_state = 146, .external_lex_state = 2},
+ [1607] = {.lex_state = 146, .external_lex_state = 2},
+ [1608] = {.lex_state = 146, .external_lex_state = 2},
+ [1609] = {.lex_state = 146, .external_lex_state = 2},
+ [1610] = {.lex_state = 146, .external_lex_state = 2},
[1611] = {.lex_state = 4, .external_lex_state = 8},
- [1612] = {.lex_state = 148, .external_lex_state = 2},
- [1613] = {.lex_state = 148, .external_lex_state = 2},
- [1614] = {.lex_state = 148, .external_lex_state = 2},
- [1615] = {.lex_state = 148, .external_lex_state = 2},
- [1616] = {.lex_state = 148, .external_lex_state = 2},
- [1617] = {.lex_state = 148, .external_lex_state = 2},
- [1618] = {.lex_state = 148, .external_lex_state = 2},
- [1619] = {.lex_state = 148, .external_lex_state = 2},
- [1620] = {.lex_state = 148, .external_lex_state = 2},
- [1621] = {.lex_state = 148, .external_lex_state = 2},
- [1622] = {.lex_state = 148, .external_lex_state = 2},
- [1623] = {.lex_state = 148, .external_lex_state = 2},
- [1624] = {.lex_state = 148, .external_lex_state = 2},
- [1625] = {.lex_state = 148, .external_lex_state = 2},
- [1626] = {.lex_state = 38, .external_lex_state = 2},
- [1627] = {.lex_state = 148, .external_lex_state = 2},
- [1628] = {.lex_state = 148, .external_lex_state = 2},
- [1629] = {.lex_state = 148, .external_lex_state = 2},
- [1630] = {.lex_state = 148, .external_lex_state = 2},
- [1631] = {.lex_state = 148, .external_lex_state = 2},
- [1632] = {.lex_state = 148, .external_lex_state = 2},
- [1633] = {.lex_state = 148, .external_lex_state = 2},
- [1634] = {.lex_state = 148, .external_lex_state = 2},
- [1635] = {.lex_state = 148, .external_lex_state = 2},
- [1636] = {.lex_state = 38, .external_lex_state = 2},
- [1637] = {.lex_state = 148, .external_lex_state = 2},
- [1638] = {.lex_state = 148, .external_lex_state = 2},
- [1639] = {.lex_state = 148, .external_lex_state = 2},
- [1640] = {.lex_state = 148, .external_lex_state = 2},
- [1641] = {.lex_state = 38, .external_lex_state = 2},
- [1642] = {.lex_state = 148, .external_lex_state = 2},
- [1643] = {.lex_state = 148, .external_lex_state = 2},
- [1644] = {.lex_state = 148, .external_lex_state = 2},
- [1645] = {.lex_state = 148, .external_lex_state = 2},
- [1646] = {.lex_state = 148, .external_lex_state = 2},
+ [1612] = {.lex_state = 146, .external_lex_state = 2},
+ [1613] = {.lex_state = 146, .external_lex_state = 2},
+ [1614] = {.lex_state = 146, .external_lex_state = 2},
+ [1615] = {.lex_state = 146, .external_lex_state = 2},
+ [1616] = {.lex_state = 146, .external_lex_state = 2},
+ [1617] = {.lex_state = 146, .external_lex_state = 2},
+ [1618] = {.lex_state = 146, .external_lex_state = 2},
+ [1619] = {.lex_state = 146, .external_lex_state = 2},
+ [1620] = {.lex_state = 146, .external_lex_state = 2},
+ [1621] = {.lex_state = 146, .external_lex_state = 2},
+ [1622] = {.lex_state = 146, .external_lex_state = 2},
+ [1623] = {.lex_state = 146, .external_lex_state = 2},
+ [1624] = {.lex_state = 146, .external_lex_state = 2},
+ [1625] = {.lex_state = 146, .external_lex_state = 2},
+ [1626] = {.lex_state = 36, .external_lex_state = 2},
+ [1627] = {.lex_state = 146, .external_lex_state = 2},
+ [1628] = {.lex_state = 146, .external_lex_state = 2},
+ [1629] = {.lex_state = 146, .external_lex_state = 2},
+ [1630] = {.lex_state = 146, .external_lex_state = 2},
+ [1631] = {.lex_state = 146, .external_lex_state = 2},
+ [1632] = {.lex_state = 146, .external_lex_state = 2},
+ [1633] = {.lex_state = 146, .external_lex_state = 2},
+ [1634] = {.lex_state = 146, .external_lex_state = 2},
+ [1635] = {.lex_state = 146, .external_lex_state = 2},
+ [1636] = {.lex_state = 36, .external_lex_state = 2},
+ [1637] = {.lex_state = 146, .external_lex_state = 2},
+ [1638] = {.lex_state = 146, .external_lex_state = 2},
+ [1639] = {.lex_state = 146, .external_lex_state = 2},
+ [1640] = {.lex_state = 146, .external_lex_state = 2},
+ [1641] = {.lex_state = 36, .external_lex_state = 2},
+ [1642] = {.lex_state = 146, .external_lex_state = 2},
+ [1643] = {.lex_state = 146, .external_lex_state = 2},
+ [1644] = {.lex_state = 146, .external_lex_state = 2},
+ [1645] = {.lex_state = 146, .external_lex_state = 2},
+ [1646] = {.lex_state = 146, .external_lex_state = 2},
[1647] = {.lex_state = 4, .external_lex_state = 8},
- [1648] = {.lex_state = 38, .external_lex_state = 2},
- [1649] = {.lex_state = 148, .external_lex_state = 2},
- [1650] = {.lex_state = 148, .external_lex_state = 2},
- [1651] = {.lex_state = 148, .external_lex_state = 2},
+ [1648] = {.lex_state = 36, .external_lex_state = 2},
+ [1649] = {.lex_state = 146, .external_lex_state = 2},
+ [1650] = {.lex_state = 146, .external_lex_state = 2},
+ [1651] = {.lex_state = 146, .external_lex_state = 2},
[1652] = {.lex_state = 4, .external_lex_state = 8},
- [1653] = {.lex_state = 148, .external_lex_state = 2},
- [1654] = {.lex_state = 148, .external_lex_state = 2},
- [1655] = {.lex_state = 148, .external_lex_state = 2},
- [1656] = {.lex_state = 148, .external_lex_state = 2},
- [1657] = {.lex_state = 148, .external_lex_state = 2},
- [1658] = {.lex_state = 148, .external_lex_state = 2},
- [1659] = {.lex_state = 148, .external_lex_state = 2},
- [1660] = {.lex_state = 148, .external_lex_state = 2},
- [1661] = {.lex_state = 148, .external_lex_state = 2},
- [1662] = {.lex_state = 148, .external_lex_state = 2},
- [1663] = {.lex_state = 148, .external_lex_state = 2},
- [1664] = {.lex_state = 148, .external_lex_state = 2},
- [1665] = {.lex_state = 148, .external_lex_state = 2},
- [1666] = {.lex_state = 148, .external_lex_state = 2},
- [1667] = {.lex_state = 148, .external_lex_state = 2},
- [1668] = {.lex_state = 148, .external_lex_state = 2},
- [1669] = {.lex_state = 148, .external_lex_state = 2},
- [1670] = {.lex_state = 148, .external_lex_state = 2},
+ [1653] = {.lex_state = 146, .external_lex_state = 2},
+ [1654] = {.lex_state = 146, .external_lex_state = 2},
+ [1655] = {.lex_state = 146, .external_lex_state = 2},
+ [1656] = {.lex_state = 146, .external_lex_state = 2},
+ [1657] = {.lex_state = 146, .external_lex_state = 2},
+ [1658] = {.lex_state = 146, .external_lex_state = 2},
+ [1659] = {.lex_state = 146, .external_lex_state = 2},
+ [1660] = {.lex_state = 146, .external_lex_state = 2},
+ [1661] = {.lex_state = 146, .external_lex_state = 2},
+ [1662] = {.lex_state = 146, .external_lex_state = 2},
+ [1663] = {.lex_state = 146, .external_lex_state = 2},
+ [1664] = {.lex_state = 146, .external_lex_state = 2},
+ [1665] = {.lex_state = 146, .external_lex_state = 2},
+ [1666] = {.lex_state = 146, .external_lex_state = 2},
+ [1667] = {.lex_state = 146, .external_lex_state = 2},
+ [1668] = {.lex_state = 146, .external_lex_state = 2},
+ [1669] = {.lex_state = 146, .external_lex_state = 2},
+ [1670] = {.lex_state = 146, .external_lex_state = 2},
[1671] = {.lex_state = 4, .external_lex_state = 8},
- [1672] = {.lex_state = 148, .external_lex_state = 2},
- [1673] = {.lex_state = 148, .external_lex_state = 2},
- [1674] = {.lex_state = 148, .external_lex_state = 2},
- [1675] = {.lex_state = 148, .external_lex_state = 2},
- [1676] = {.lex_state = 148, .external_lex_state = 2},
- [1677] = {.lex_state = 148, .external_lex_state = 2},
- [1678] = {.lex_state = 148, .external_lex_state = 2},
- [1679] = {.lex_state = 148, .external_lex_state = 2},
- [1680] = {.lex_state = 148, .external_lex_state = 2},
- [1681] = {.lex_state = 148, .external_lex_state = 2},
- [1682] = {.lex_state = 148, .external_lex_state = 2},
- [1683] = {.lex_state = 148, .external_lex_state = 2},
- [1684] = {.lex_state = 148, .external_lex_state = 2},
- [1685] = {.lex_state = 148, .external_lex_state = 2},
- [1686] = {.lex_state = 148, .external_lex_state = 2},
- [1687] = {.lex_state = 148, .external_lex_state = 2},
- [1688] = {.lex_state = 148, .external_lex_state = 2},
- [1689] = {.lex_state = 148, .external_lex_state = 2},
- [1690] = {.lex_state = 148, .external_lex_state = 2},
- [1691] = {.lex_state = 148, .external_lex_state = 2},
- [1692] = {.lex_state = 148, .external_lex_state = 2},
- [1693] = {.lex_state = 148, .external_lex_state = 2},
+ [1672] = {.lex_state = 146, .external_lex_state = 2},
+ [1673] = {.lex_state = 146, .external_lex_state = 2},
+ [1674] = {.lex_state = 146, .external_lex_state = 2},
+ [1675] = {.lex_state = 146, .external_lex_state = 2},
+ [1676] = {.lex_state = 146, .external_lex_state = 2},
+ [1677] = {.lex_state = 146, .external_lex_state = 2},
+ [1678] = {.lex_state = 146, .external_lex_state = 2},
+ [1679] = {.lex_state = 146, .external_lex_state = 2},
+ [1680] = {.lex_state = 146, .external_lex_state = 2},
+ [1681] = {.lex_state = 146, .external_lex_state = 2},
+ [1682] = {.lex_state = 146, .external_lex_state = 2},
+ [1683] = {.lex_state = 146, .external_lex_state = 2},
+ [1684] = {.lex_state = 146, .external_lex_state = 2},
+ [1685] = {.lex_state = 146, .external_lex_state = 2},
+ [1686] = {.lex_state = 146, .external_lex_state = 2},
+ [1687] = {.lex_state = 146, .external_lex_state = 2},
+ [1688] = {.lex_state = 146, .external_lex_state = 2},
+ [1689] = {.lex_state = 146, .external_lex_state = 2},
+ [1690] = {.lex_state = 146, .external_lex_state = 2},
+ [1691] = {.lex_state = 146, .external_lex_state = 2},
+ [1692] = {.lex_state = 146, .external_lex_state = 2},
+ [1693] = {.lex_state = 146, .external_lex_state = 2},
};
static const uint16_t ts_parse_table[LARGE_STATE_COUNT][SYMBOL_COUNT] = {
| JSXText node trims whitespace too often
The following piece of code is valid but it is parsed incorrectly:
```typescript
<div> <div/></div>
```
Here's a link to the TypeScript Playground showing that the snippet above is valid JavaScript or TypeScript:
<!-- Please check your code at https://www.typescriptlang.org/play
and paste the URL below. -->
https://www.typescriptlang.org/play/?#code/DwEwlgbgfABKkHorAeaQ
<!-- Please run `tree-sitter parse YOUR_FILE` and show us the output. -->
The output of `tree-sitter parse` is the following:
```
program [0, 0] - [1, 0]
expression_statement [0, 0] - [0, 18]
jsx_element [0, 0] - [0, 18]
open_tag: jsx_opening_element [0, 0] - [0, 5]
name: identifier [0, 1] - [0, 4]
jsx_self_closing_element [0, 6] - [0, 12]
name: identifier [0, 7] - [0, 10]
close_tag: jsx_closing_element [0, 12] - [0, 18]
name: identifier [0, 14] - [0, 17]
```
<!-- If there is no `ERROR` or `MISSING` node in the output above,
explain what you were expecting: -->
This is wrong because there is no jsx_text node
but as seen in the ts playground link `tsc` will output
```javascript
"use strict";
React.createElement("div", null,
" ",
React.createElement("div", null));
```
Note that " " will end up inside a dom node, so I think there should be a jsx_text node here.
This is not limited to an empty string ie
```typescript
<div> foo </div>
```
will result in a dom node with the text " foo " , so if I query for 'is there a jsx_text node with text === ' foo ' tree-sitter-javascript will tell me 'no' but i think the answer should be 'yes'
---
Advice / pointers
There is *a lot* of un-specified nuance here. Ie the whitespace rules are not part of the JSX spec (https://github.com/facebook/jsx/issues/143), but are implemented by React (https://github.com/facebook/jsx/issues/40)
Basically, i think the jsx_text node should grow to encapsulate the rules around whitespace rather than trimming always.
Relevant: #227
| 2024-07-02T03:18:53 | 0.0 | [] | [] |
|||
tree-sitter/tree-sitter-javascript | tree-sitter__tree-sitter-javascript-296 | 24b9885a211bed50415acabe9a30e2d0f6896e6d | diff --git a/grammar.js b/grammar.js
index 6b2c3cc2..e5a32474 100644
--- a/grammar.js
+++ b/grammar.js
@@ -19,9 +19,10 @@ module.exports = grammar({
$._ternary_qmark,
$.html_comment,
'||',
- // We use escape sequence to tell the scanner if we're currently inside a string or template string, in which case
+ // We use escape sequence and regex pattern to tell the scanner if we're currently inside a string or template string, in which case
// it should NOT parse html comments.
$.escape_sequence,
+ $.regex_pattern,
],
extras: $ => [
diff --git a/src/grammar.json b/src/grammar.json
index fb83f44f..62128c4e 100644
--- a/src/grammar.json
+++ b/src/grammar.json
@@ -6861,6 +6861,10 @@
{
"type": "SYMBOL",
"name": "escape_sequence"
+ },
+ {
+ "type": "SYMBOL",
+ "name": "regex_pattern"
}
],
"inline": [
@@ -6888,4 +6892,3 @@
"pattern"
]
}
-
diff --git a/src/parser.c b/src/parser.c
index f48fd50d..aa43320c 100644
--- a/src/parser.c
+++ b/src/parser.c
@@ -19,7 +19,7 @@
#define SYMBOL_COUNT 267
#define ALIAS_COUNT 4
#define TOKEN_COUNT 136
-#define EXTERNAL_TOKEN_COUNT 6
+#define EXTERNAL_TOKEN_COUNT 7
#define FIELD_COUNT 38
#define MAX_ALIAS_SEQUENCE_LENGTH 7
#define PRODUCTION_ID_COUNT 106
@@ -13499,12 +13499,12 @@ static const TSLexMode ts_lex_modes[STATE_COUNT] = {
[2729] = {.lex_state = 252, .external_lex_state = 2},
[2730] = {.lex_state = 252, .external_lex_state = 2},
[2731] = {.lex_state = 34, .external_lex_state = 2},
- [2732] = {.lex_state = 4, .external_lex_state = 2},
+ [2732] = {.lex_state = 4, .external_lex_state = 8},
[2733] = {.lex_state = 34, .external_lex_state = 2},
[2734] = {.lex_state = 252, .external_lex_state = 2},
[2735] = {.lex_state = 34, .external_lex_state = 2},
[2736] = {.lex_state = 34, .external_lex_state = 2},
- [2737] = {.lex_state = 4, .external_lex_state = 2},
+ [2737] = {.lex_state = 4, .external_lex_state = 8},
[2738] = {.lex_state = 39, .external_lex_state = 2},
[2739] = {.lex_state = 252, .external_lex_state = 2},
[2740] = {.lex_state = 252, .external_lex_state = 2},
@@ -13514,7 +13514,7 @@ static const TSLexMode ts_lex_modes[STATE_COUNT] = {
[2744] = {.lex_state = 252, .external_lex_state = 2},
[2745] = {.lex_state = 34, .external_lex_state = 2},
[2746] = {.lex_state = 34, .external_lex_state = 2},
- [2747] = {.lex_state = 4, .external_lex_state = 2},
+ [2747] = {.lex_state = 4, .external_lex_state = 8},
[2748] = {.lex_state = 252, .external_lex_state = 2},
[2749] = {.lex_state = 30, .external_lex_state = 2},
[2750] = {.lex_state = 252, .external_lex_state = 2},
@@ -13526,7 +13526,7 @@ static const TSLexMode ts_lex_modes[STATE_COUNT] = {
[2756] = {.lex_state = 252, .external_lex_state = 2},
[2757] = {.lex_state = 34, .external_lex_state = 2},
[2758] = {.lex_state = 252, .external_lex_state = 2},
- [2759] = {.lex_state = 4, .external_lex_state = 2},
+ [2759] = {.lex_state = 4, .external_lex_state = 8},
[2760] = {.lex_state = 252, .external_lex_state = 2},
[2761] = {.lex_state = 34, .external_lex_state = 2},
[2762] = {.lex_state = 34, .external_lex_state = 2},
@@ -149066,6 +149066,7 @@ enum ts_external_scanner_symbol_identifiers {
ts_external_token_html_comment = 3,
ts_external_token_PIPE_PIPE = 4,
ts_external_token_escape_sequence = 5,
+ ts_external_token_regex_pattern = 6,
};
static const TSSymbol ts_external_scanner_symbol_map[EXTERNAL_TOKEN_COUNT] = {
@@ -149075,9 +149076,10 @@ static const TSSymbol ts_external_scanner_symbol_map[EXTERNAL_TOKEN_COUNT] = {
[ts_external_token_html_comment] = sym_html_comment,
[ts_external_token_PIPE_PIPE] = anon_sym_PIPE_PIPE,
[ts_external_token_escape_sequence] = sym_escape_sequence,
+ [ts_external_token_regex_pattern] = sym_regex_pattern,
};
-static const bool ts_external_scanner_states[8][EXTERNAL_TOKEN_COUNT] = {
+static const bool ts_external_scanner_states[9][EXTERNAL_TOKEN_COUNT] = {
[1] = {
[ts_external_token__automatic_semicolon] = true,
[ts_external_token__template_chars] = true,
@@ -149113,6 +149115,10 @@ static const bool ts_external_scanner_states[8][EXTERNAL_TOKEN_COUNT] = {
[ts_external_token_html_comment] = true,
[ts_external_token_escape_sequence] = true,
},
+ [8] = {
+ [ts_external_token_html_comment] = true,
+ [ts_external_token_regex_pattern] = true,
+ },
};
#ifdef __cplusplus
diff --git a/src/scanner.c b/src/scanner.c
index a25af2fe..9bea9685 100644
--- a/src/scanner.c
+++ b/src/scanner.c
@@ -9,6 +9,7 @@ enum TokenType {
HTML_COMMENT,
LOGICAL_OR,
ESCAPE_SEQUENCE,
+ REGEX_PATTERN,
};
void *tree_sitter_javascript_external_scanner_create() { return NULL; }
@@ -283,7 +284,8 @@ bool tree_sitter_javascript_external_scanner_scan(void *payload, TSLexer *lexer,
return scan_ternary_qmark(lexer);
}
- if (valid_symbols[HTML_COMMENT] && !valid_symbols[LOGICAL_OR] && !valid_symbols[ESCAPE_SEQUENCE]) {
+ if (valid_symbols[HTML_COMMENT] && !valid_symbols[LOGICAL_OR] && !valid_symbols[ESCAPE_SEQUENCE] &&
+ !valid_symbols[REGEX_PATTERN]) {
return scan_html_comment(lexer);
}
| Fail to recognize regex /<!--/
The following piece of code is valid but it is parsed incorrectly:
```javascript
var a = /<!--/;
```
I suspect that this problem stared with this commit https://github.com/tree-sitter/tree-sitter-javascript/commit/4f279cc39fc7de694258b9e63e4f5c47a872189a , the playground can parse it correctly (probably is using an old version) but if we build form this repo right now we get an error.
Here is the code that I'm using to test:
```C
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
/*
* File: main.c
* Author: mingo
*
* Created on February 6, 2024, 10:47 AM
*/
#include <assert.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include "tree_sitter/api.h"
#include "tree_sitter/parser.h"
// Declare the `tree_sitter_c` function, which is
// implemented by the `tree-sitter-javascript` library.
TSLanguage *tree_sitter_javascript();
typedef struct {
size_t size;
char *str;
} StrData;
static StrData readcontent(const char *filename)
{
StrData data;
data.str = NULL;
data.size = 0;
FILE *fp;
fp = fopen(filename, "r");
if(fp) {
fseek(fp, 0, SEEK_END);
data.size = ftell(fp);
rewind(fp);
data.str = (char*) malloc(sizeof(char) * (data.size+1));
if(data.str)
{
size_t sz = fread(data.str, 1, data.size, fp);
if(sz == data.size)
{
data.str[data.size] = '\0';
}
else
{
free(data.str);
data.str = NULL;
}
}
fclose(fp);
}
return data;
}
bool traverse_tree(const char *fname, TSNode node)
{
uint32_t child_count = ts_node_child_count(node);
for(uint32_t i=0; i < child_count; ++i)
{
TSNode node_child = ts_node_child(node, i);
bool is_error = ts_node_is_error(node_child);
if(is_error)
{
TSPoint lc = ts_node_start_point(node_child);
fprintf(stderr, "%s:%d:%d: Syntax error\n", fname, lc.row+1, lc.column+1);
}
traverse_tree(fname, node_child);
}
}
int main(int argc, char* argv[]) {
if(argc < 2)
{
printf("usage: %s input_file_name\n", argv[0]);
return 1;
}
const char *input_pathname = argv[1];
// Build a syntax tree based on source code stored in a string.
StrData data = readcontent(input_pathname);
if (!data.str)
{
fprintf(stderr, "Error: failed to open %s\n", input_pathname);
return 1;
}
// Create a parser.
TSParser *parser = ts_parser_new();
// Set the parser's language (Javascript in this case).
ts_parser_set_language(parser, tree_sitter_javascript());
TSTree *tree = ts_parser_parse_string(
parser,
NULL,
data.str,
data.size
);
if(!tree)
{
ts_parser_delete(parser);
fprintf(stderr, "Error: parsing %s\n", input_pathname);
return 1;
}
// Get the root node of the syntax tree.
TSNode root_node = ts_tree_root_node(tree);
traverse_tree(input_pathname, root_node);
// Print the syntax tree as an S-expression.
//char *string = ts_node_string(root_node);
//printf("Syntax tree: %s\n", string);
// Free all of the heap-allocated memory.
//free(string);
ts_tree_delete(tree);
ts_parser_delete(parser);
free(data.str);
return EXIT_SUCCESS;
}
```
| 2024-02-07T14:10:32 | 0.0 | [] | [] |
|||
crate-ci/committed | crate-ci__committed-405 | 6e6c0dbe3858b6fca8c18e9147b15b15cb96a695 | diff --git a/Cargo.lock b/Cargo.lock
index 2c8ecc4..959b6c7 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -62,7 +62,7 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b"
dependencies = [
- "windows-sys",
+ "windows-sys 0.48.0",
]
[[package]]
@@ -72,7 +72,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628"
dependencies = [
"anstyle",
- "windows-sys",
+ "windows-sys 0.48.0",
]
[[package]]
@@ -218,11 +218,21 @@ dependencies = [
"regex",
"serde",
"serde_json",
+ "snapbox",
"toml",
"unicase",
"unicode-segmentation",
]
+[[package]]
+name = "content_inspector"
+version = "0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b7bda66e858c683005a53a9a60c69a4aca7eeaa45d124526e389f7aec8e62f38"
+dependencies = [
+ "memchr",
+]
+
[[package]]
name = "derive_more"
version = "1.0.0"
@@ -250,6 +260,12 @@ version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10"
+[[package]]
+name = "dunce"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813"
+
[[package]]
name = "either"
version = "1.8.1"
@@ -285,6 +301,34 @@ version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
+[[package]]
+name = "errno"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba"
+dependencies = [
+ "libc",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "fastrand"
+version = "2.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6"
+
+[[package]]
+name = "filetime"
+version = "0.2.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "libredox",
+ "windows-sys 0.59.0",
+]
+
[[package]]
name = "form_urlencoded"
version = "1.1.0"
@@ -474,6 +518,17 @@ dependencies = [
"pkg-config",
]
+[[package]]
+name = "libredox"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d"
+dependencies = [
+ "bitflags",
+ "libc",
+ "redox_syscall",
+]
+
[[package]]
name = "libz-sys"
version = "1.1.8"
@@ -486,6 +541,12 @@ dependencies = [
"vcpkg",
]
+[[package]]
+name = "linux-raw-sys"
+version = "0.4.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89"
+
[[package]]
name = "log"
version = "0.4.22"
@@ -507,6 +568,12 @@ dependencies = [
"adler",
]
+[[package]]
+name = "normalize-line-endings"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be"
+
[[package]]
name = "object"
version = "0.30.3"
@@ -536,6 +603,16 @@ dependencies = [
"winapi",
]
+[[package]]
+name = "os_pipe"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5ffd2b0a5634335b135d5728d84c5e0fd726954b87111f7506a61c502280d982"
+dependencies = [
+ "libc",
+ "windows-sys 0.59.0",
+]
+
[[package]]
name = "percent-encoding"
version = "2.2.0"
@@ -596,6 +673,15 @@ dependencies = [
"proc-macro2",
]
+[[package]]
+name = "redox_syscall"
+version = "0.5.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f"
+dependencies = [
+ "bitflags",
+]
+
[[package]]
name = "regex"
version = "1.11.0"
@@ -641,12 +727,34 @@ version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342"
+[[package]]
+name = "rustix"
+version = "0.38.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811"
+dependencies = [
+ "bitflags",
+ "errno",
+ "libc",
+ "linux-raw-sys",
+ "windows-sys 0.52.0",
+]
+
[[package]]
name = "ryu"
version = "1.0.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041"
+[[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
+dependencies = [
+ "winapi-util",
+]
+
[[package]]
name = "serde"
version = "1.0.210"
@@ -688,12 +796,49 @@ dependencies = [
"serde",
]
+[[package]]
+name = "similar"
+version = "2.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e"
+
[[package]]
name = "siphasher"
version = "0.3.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de"
+[[package]]
+name = "snapbox"
+version = "0.6.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "881f1849454828a68363dd288b7a0a071e55e2a4356d2c38b567db18a9be0d9f"
+dependencies = [
+ "anstream",
+ "anstyle",
+ "content_inspector",
+ "dunce",
+ "filetime",
+ "libc",
+ "normalize-line-endings",
+ "os_pipe",
+ "similar",
+ "snapbox-macros",
+ "tempfile",
+ "wait-timeout",
+ "walkdir",
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "snapbox-macros"
+version = "0.3.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "16569f53ca23a41bb6f62e0a5084aa1661f4814a67fa33696a79073e03a664af"
+dependencies = [
+ "anstream",
+]
+
[[package]]
name = "strsim"
version = "0.11.0"
@@ -711,6 +856,19 @@ dependencies = [
"unicode-ident",
]
+[[package]]
+name = "tempfile"
+version = "3.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b"
+dependencies = [
+ "cfg-if",
+ "fastrand",
+ "once_cell",
+ "rustix",
+ "windows-sys 0.59.0",
+]
+
[[package]]
name = "termcolor"
version = "1.4.1"
@@ -849,6 +1007,25 @@ version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
+[[package]]
+name = "wait-timeout"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "walkdir"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
+dependencies = [
+ "same-file",
+ "winapi-util",
+]
+
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
@@ -877,7 +1054,7 @@ version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
dependencies = [
- "windows-sys",
+ "windows-sys 0.48.0",
]
[[package]]
@@ -892,7 +1069,25 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
dependencies = [
- "windows-targets",
+ "windows-targets 0.48.0",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
+dependencies = [
+ "windows-targets 0.52.6",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.59.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
+dependencies = [
+ "windows-targets 0.52.6",
]
[[package]]
@@ -901,13 +1096,29 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5"
dependencies = [
- "windows_aarch64_gnullvm",
- "windows_aarch64_msvc",
- "windows_i686_gnu",
- "windows_i686_msvc",
- "windows_x86_64_gnu",
- "windows_x86_64_gnullvm",
- "windows_x86_64_msvc",
+ "windows_aarch64_gnullvm 0.48.0",
+ "windows_aarch64_msvc 0.48.0",
+ "windows_i686_gnu 0.48.0",
+ "windows_i686_msvc 0.48.0",
+ "windows_x86_64_gnu 0.48.0",
+ "windows_x86_64_gnullvm 0.48.0",
+ "windows_x86_64_msvc 0.48.0",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
+dependencies = [
+ "windows_aarch64_gnullvm 0.52.6",
+ "windows_aarch64_msvc 0.52.6",
+ "windows_i686_gnu 0.52.6",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc 0.52.6",
+ "windows_x86_64_gnu 0.52.6",
+ "windows_x86_64_gnullvm 0.52.6",
+ "windows_x86_64_msvc 0.52.6",
]
[[package]]
@@ -916,42 +1127,90 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc"
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
+
[[package]]
name = "windows_aarch64_msvc"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3"
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
+
[[package]]
name = "windows_i686_gnu"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241"
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
+
[[package]]
name = "windows_i686_msvc"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00"
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
+
[[package]]
name = "windows_x86_64_gnu"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1"
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
+
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953"
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
+
[[package]]
name = "windows_x86_64_msvc"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
+
[[package]]
name = "winnow"
version = "0.6.20"
diff --git a/crates/committed/Cargo.toml b/crates/committed/Cargo.toml
index d411018..011f3ca 100644
--- a/crates/committed/Cargo.toml
+++ b/crates/committed/Cargo.toml
@@ -50,5 +50,8 @@ human-panic = "2.0.0"
anstream = "0.6.13"
anstyle = "1.0.6"
+[dev-dependencies]
+snapbox = { version = "0.6.19", features = ["cmd", "path"] }
+
[lints]
workspace = true
diff --git a/crates/committed/src/report.rs b/crates/committed/src/report.rs
index c3a1c95..499ca12 100644
--- a/crates/committed/src/report.rs
+++ b/crates/committed/src/report.rs
@@ -31,7 +31,7 @@ pub(crate) enum Source<'s> {
#[serde(serialize_with = "serialize_oid")]
Oid(git2::Oid),
ShortId(&'s str),
- #[display("{}", "_0.display()")]
+ #[display("{}", _0.display())]
Path(&'s std::path::Path),
}
| Output format changed for stdin
I use committed as an Emacs [Flycheck](https://www.flycheck.org/en/latest/) checker, where the commit I am editing is piped into committed's stdin.
Prior to v1.1.0 the output of e.g. `echo foo | committed` was
```
-: error Subject should be capitalized but found `foo`
```
In v1.1.0 it is now
```
_0.display(): error Subject should be capitalized but found `foo`
```
This breaks my Flycheck config, which itself is something I'm fine with fixing, but the new output doesn't look intentional, so I assumed it was a bug.
| 2024-10-31T16:12:46 | 0.0 | [] | [] |
|||
pfnet/pytorch-pfn-extras | pfnet__pytorch-pfn-extras-789 | 8500fe2ab07f722bf551a623742b94392c1182b5 | diff --git a/pytorch_pfn_extras/training/triggers/__init__.py b/pytorch_pfn_extras/training/triggers/__init__.py
index 99602a71..4ad5450b 100644
--- a/pytorch_pfn_extras/training/triggers/__init__.py
+++ b/pytorch_pfn_extras/training/triggers/__init__.py
@@ -2,6 +2,9 @@
from pytorch_pfn_extras.training.triggers.early_stopping_trigger import ( # NOQA
EarlyStoppingTrigger,
)
+from pytorch_pfn_extras.training.triggers.function_trigger import ( # NOQA
+ FunctionTrigger,
+)
from pytorch_pfn_extras.training.triggers.interval_trigger import ( # NOQA
IntervalTrigger,
)
diff --git a/pytorch_pfn_extras/training/triggers/function_trigger.py b/pytorch_pfn_extras/training/triggers/function_trigger.py
new file mode 100644
index 00000000..ffe23bf8
--- /dev/null
+++ b/pytorch_pfn_extras/training/triggers/function_trigger.py
@@ -0,0 +1,54 @@
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Dict,
+ Mapping,
+ Optional,
+ Sequence,
+)
+
+from pytorch_pfn_extras.training import trigger as trigger_module
+from pytorch_pfn_extras.training._manager_protocol import (
+ ExtensionsManagerProtocol,
+)
+
+if TYPE_CHECKING:
+ from pytorch_pfn_extras.training._trigger_util import TriggerLike
+
+
+class FunctionTrigger(trigger_module.Trigger):
+ def __init__(
+ self,
+ fn: Callable[..., bool],
+ args: Optional[Sequence[Any]] = None,
+ kwargs: Optional[Mapping[str, Any]] = None,
+ trigger: "TriggerLike" = (1, "iteration"),
+ ) -> None:
+ self._fn = fn
+ self._args = args or []
+ self._kwargs = kwargs or {}
+ self._interval_trigger = trigger_module.get_trigger(trigger)
+
+ def __call__(self, manager: ExtensionsManagerProtocol) -> bool:
+ if not self._interval_trigger(manager):
+ return False
+
+ return self._fn(*self._args, **self._kwargs)
+
+ def state_dict(self) -> Dict[str, Any]:
+ state = {
+ "interval_trigger": self._interval_trigger.state_dict(),
+ }
+ return state
+
+ def load_state_dict(self, to_load: Dict[str, Any]) -> None:
+ self._interval_trigger.load_state_dict(to_load["interval_trigger"])
+
+ def may_fire(self, iteration: int, epoch_len: int) -> bool:
+ if self._interval_trigger.may_fire(
+ iteration=iteration, epoch_len=epoch_len
+ ):
+ return self._fn(*self._args, **self._kwargs)
+ else:
+ return False
| Custom function trigger
Implement a helper Trigger class to be triggered by a user-implemented function.
```
class CustomTrigger(trigger.Trigger):
class __init__(self, fn: Callable[..., bool], args: Sequential[Any], kwargs: Mapping[Any], check_trigger: TriggerLike) -> None
...
```
| 2023-11-17T10:34:47 | 0.0 | [] | [] |
|||
pfnet/pytorch-pfn-extras | pfnet__pytorch-pfn-extras-788 | 89a92c51d6d5308854835d083f5d9e3d1d1b147f | diff --git a/pytorch_pfn_extras/distributed/_distributed_validation_sampler.py b/pytorch_pfn_extras/distributed/_distributed_validation_sampler.py
index bbf1bd2ab..32744c587 100644
--- a/pytorch_pfn_extras/distributed/_distributed_validation_sampler.py
+++ b/pytorch_pfn_extras/distributed/_distributed_validation_sampler.py
@@ -26,13 +26,13 @@ def __init__(
seed: int = 0,
) -> None:
if num_replicas is None:
- if not dist.is_available(): # type: ignore[no-untyped-call]
+ if not dist.is_available() or not dist.is_initialized(): # type: ignore[no-untyped-call]
raise RuntimeError(
"Requires distributed package to be available"
)
num_replicas = dist.get_world_size() # type: ignore[no-untyped-call]
if rank is None:
- if not dist.is_available(): # type: ignore[no-untyped-call]
+ if not dist.is_available() or not dist.is_initialized(): # type: ignore[no-untyped-call]
raise RuntimeError(
"Requires distributed package to be available"
)
| Fix nightly CPU test failures
https://github.com/pfnet/pytorch-pfn-extras/actions/workflows/nightly-test-cpu.yml
| Latest summary https://github.com/pfnet/pytorch-pfn-extras/actions/runs/6764593441/job/18383088613#step:5:366
```
=========================== short test summary info ============================
FAILED tests/pytorch_pfn_extras_tests/distributed_tests/test_distributed_validation_sampler.py::test_no_distributed_available - ValueError: Default process group has not been initialized, please make sure to call init_process_group.
FAILED tests/pytorch_pfn_extras_tests/training_tests/test_manager.py::test_ignite_extensions_manager_state_dict - UserWarning: PairwiseParallel is deprecated and will be removed soon. Use ColwiseParallel and RowwiseParallel instead.
FAILED tests/pytorch_pfn_extras_tests/training_tests/extensions_tests/test_evaluator.py::test_ignite_evaluator_reporting_metrics - UserWarning: PairwiseParallel is deprecated and will be removed soon. Use ColwiseParallel and RowwiseParallel instead.
FAILED tests/pytorch_pfn_extras_tests/training_tests/extensions_tests/test_progress_bar_notebook.py::test_ignite_extensions_manager_with_progressbar_notebook - UserWarning: PairwiseParallel is deprecated and will be removed soon. Use ColwiseParallel and RowwiseParallel instead.
``` | 2023-11-15T07:42:20 | 0.0 | [] | [] |
||
pfnet/pytorch-pfn-extras | pfnet__pytorch-pfn-extras-761 | 12d0430f331289bc26ffb586daf41469baa255f8 | diff --git a/pytorch_pfn_extras/training/extensions/__init__.py b/pytorch_pfn_extras/training/extensions/__init__.py
index a768f9d8..56df7ee0 100644
--- a/pytorch_pfn_extras/training/extensions/__init__.py
+++ b/pytorch_pfn_extras/training/extensions/__init__.py
@@ -5,6 +5,13 @@
SnapshotMode,
snapshot_object,
)
+from pytorch_pfn_extras.training.extensions.accumulate import ( # NOQA
+ AverageAccumulate,
+ MaxAccumulate,
+ MinAccumulate,
+ StandardDeviationAccumulate,
+ UnbiasedStandardDeviationAccumulate,
+)
from pytorch_pfn_extras.training.extensions.best_value import BestValue # NOQA
from pytorch_pfn_extras.training.extensions.best_value import MaxValue # NOQA
from pytorch_pfn_extras.training.extensions.best_value import MinValue # NOQA
diff --git a/pytorch_pfn_extras/training/extensions/accumulate/__init__.py b/pytorch_pfn_extras/training/extensions/accumulate/__init__.py
new file mode 100644
index 00000000..a43e69df
--- /dev/null
+++ b/pytorch_pfn_extras/training/extensions/accumulate/__init__.py
@@ -0,0 +1,9 @@
+from .average_accumulate import AverageAccumulate # NOQA: F401
+from .max_accumulate import MaxAccumulate # NOQA: F401
+from .min_accumulate import MinAccumulate # NOQA: F401
+from .standard_deviation_accumulate import ( # NOQA: F401
+ StandardDeviationAccumulate,
+)
+from .unbiased_standard_deviation_accumulate import ( # NOQA: F401
+ UnbiasedStandardDeviationAccumulate,
+)
diff --git a/pytorch_pfn_extras/training/extensions/accumulate/_accumulate_base.py b/pytorch_pfn_extras/training/extensions/accumulate/_accumulate_base.py
new file mode 100644
index 00000000..6432556c
--- /dev/null
+++ b/pytorch_pfn_extras/training/extensions/accumulate/_accumulate_base.py
@@ -0,0 +1,69 @@
+from abc import ABC, abstractmethod
+from typing import Any, Dict, Tuple
+
+import torch.distributed
+from pytorch_pfn_extras import reporting
+from pytorch_pfn_extras.training import extension
+from pytorch_pfn_extras.training._manager_protocol import (
+ ExtensionsManagerProtocol,
+)
+from pytorch_pfn_extras.training.extensions.accumulate._summary import (
+ SummaryBase,
+)
+from pytorch_pfn_extras.training.trigger import TriggerLike, get_trigger
+
+
+class AccumulateBase(ABC, extension.Extension):
+ priority = extension.PRIORITY_EDITOR
+
+ def __init__(
+ self,
+ conversion_key_pair: Tuple[str, str],
+ trigger: TriggerLike = (1, "epoch"),
+ distributed: bool = False,
+ ) -> None:
+ self._conversion_key_pair = conversion_key_pair
+ self._trigger = get_trigger(trigger=trigger)
+ self._distributed = distributed
+ if not torch.distributed.is_initialized() and self._distributed: # type: ignore[no-untyped-call]
+ raise RuntimeError("PyTorch distributed module is not initialized.")
+
+ self._init_summary()
+
+ def __call__(self, manager: ExtensionsManagerProtocol) -> None:
+ observation = manager.observation
+ src_key, dst_key = self._conversion_key_pair
+ self._summary.add(observation[src_key])
+
+ if self._trigger(manager=manager):
+ if self._distributed:
+ summary = self._all_reduce_summaries()
+ else:
+ summary = self._summary
+ reporting.report({dst_key: summary.compute_accumulate()})
+ self._init_summary()
+
+ def state_dict(self) -> Dict[str, Any]:
+ state: Dict[str, Any] = {}
+ if hasattr(self._trigger, "state_dict"):
+ state["_trigger"] = self._trigger.state_dict()
+ state["_summary"] = self._summary.state_dict()
+ return state
+
+ def load_state_dict(self, to_load: Dict[str, Any]) -> None:
+ if hasattr(self._trigger, "load_state_dict"):
+ self._trigger.load_state_dict(to_load["_trigger"])
+ self._summary.load_state_dict(to_load["_summary"])
+
+ @property
+ @abstractmethod
+ def _summary(self) -> SummaryBase:
+ ...
+
+ @abstractmethod
+ def _init_summary(self) -> None:
+ ...
+
+ @abstractmethod
+ def _all_reduce_summaries(self) -> SummaryBase:
+ ...
diff --git a/pytorch_pfn_extras/training/extensions/accumulate/_accumulate_utils.py b/pytorch_pfn_extras/training/extensions/accumulate/_accumulate_utils.py
new file mode 100644
index 00000000..e85325db
--- /dev/null
+++ b/pytorch_pfn_extras/training/extensions/accumulate/_accumulate_utils.py
@@ -0,0 +1,12 @@
+from typing import List, Optional, TypeVar
+
+import torch.distributed
+
+T = TypeVar("T")
+
+
+def all_gather_object(obj: T) -> List[Optional[T]]:
+ world_size = torch.distributed.get_world_size() # type: ignore
+ object_list: List[Optional[T]] = [None for _ in range(world_size)]
+ torch.distributed.all_gather_object(object_list=object_list, obj=obj) # type: ignore
+ return object_list
diff --git a/pytorch_pfn_extras/training/extensions/accumulate/_summary/__init__.py b/pytorch_pfn_extras/training/extensions/accumulate/_summary/__init__.py
new file mode 100644
index 00000000..97c76860
--- /dev/null
+++ b/pytorch_pfn_extras/training/extensions/accumulate/_summary/__init__.py
@@ -0,0 +1,8 @@
+from ._average_summary import AverageSummary # NOQA: F401
+from ._base_summary import SummaryBase # NOQA: F401
+from ._max_summary import MaxSummary # NOQA: F401
+from ._min_summary import MinSummary # NOQA: F401
+from ._standard_deviation_summary import StandardDeviationSummary # NOQA: F401
+from ._unbiased_standard_deviation_summary import ( # NOQA: F401
+ UnbiasedStandardDeviationSummary,
+)
diff --git a/pytorch_pfn_extras/training/extensions/accumulate/_summary/_average_summary.py b/pytorch_pfn_extras/training/extensions/accumulate/_summary/_average_summary.py
new file mode 100644
index 00000000..7905b364
--- /dev/null
+++ b/pytorch_pfn_extras/training/extensions/accumulate/_summary/_average_summary.py
@@ -0,0 +1,61 @@
+from __future__ import annotations
+
+import warnings
+from typing import Any, Dict
+
+from pytorch_pfn_extras.reporting import Scalar, Value
+from pytorch_pfn_extras.training.extensions.accumulate._summary._base_summary import (
+ SummaryBase,
+)
+from pytorch_pfn_extras.training.extensions.accumulate._summary._summary_utils import (
+ nograd,
+)
+
+
+class AverageSummary(SummaryBase):
+ def __init__(self) -> None:
+ self._x: Scalar = 0.0
+ self._n: Scalar = 0
+ super().__init__()
+
+ def add(self, value: Value, weight: Scalar = 1) -> None:
+ if callable(value):
+ self._deferred.append((value, weight))
+ return
+ m = self._n / (self._n + weight)
+ self._x = self._x * m + value / weight * (1 - m)
+ self._n += weight
+
+ def state_dict(self) -> Dict[str, Any]:
+ self._add_deferred_values()
+ state = {}
+ try:
+ # Save the stats as python scalars in order to avoid
+ # different device errors when loading them back
+ state = {
+ "_x": float(self._x),
+ "_n": int(self._n),
+ }
+ except KeyError:
+ warnings.warn("The previous statistics are not saved.")
+ return state
+
+ def load_state_dict(self, to_load: Dict[str, Any]) -> None:
+ self._add_deferred_values()
+ self._x = float(nograd(to_load["_x"]))
+ self._n = int(nograd(to_load["_n"]))
+
+ def compute_average(self) -> Scalar:
+ self._add_deferred_values()
+ return self._x
+
+ def compute_accumulate(self) -> Scalar:
+ return self.compute_average()
+
+ def __add__(self, other: AverageSummary) -> AverageSummary:
+ s = AverageSummary()
+ m = self._n / (self._n + other._n)
+ s._x = self._x * m + other._x * (1 - m)
+ s._n = self._n + other._n
+ s._deferred = self._deferred + other._deferred
+ return s
diff --git a/pytorch_pfn_extras/training/extensions/accumulate/_summary/_base_summary.py b/pytorch_pfn_extras/training/extensions/accumulate/_summary/_base_summary.py
new file mode 100644
index 00000000..f976b014
--- /dev/null
+++ b/pytorch_pfn_extras/training/extensions/accumulate/_summary/_base_summary.py
@@ -0,0 +1,34 @@
+from __future__ import annotations
+
+from abc import ABC, abstractmethod
+from typing import Any, Callable, Dict, List, Tuple
+
+from pytorch_pfn_extras.reporting import Scalar, Value
+
+
+class SummaryBase(ABC):
+ def __init__(self) -> None:
+ super().__init__()
+ self._deferred: List[Tuple[Callable[[], float], Scalar]] = []
+
+ @abstractmethod
+ def add(self, value: Value, weight: Scalar = 1) -> None:
+ ...
+
+ @abstractmethod
+ def compute_accumulate(self) -> Scalar:
+ ...
+
+ @abstractmethod
+ def state_dict(self) -> Dict[str, Any]:
+ ...
+
+ @abstractmethod
+ def load_state_dict(self, to_load: Dict[str, Any]) -> None:
+ ...
+
+ def _add_deferred_values(self) -> None:
+ for fn, weight in self._deferred:
+ value = fn()
+ self.add(value=value, weight=weight)
+ self._deferred.clear()
diff --git a/pytorch_pfn_extras/training/extensions/accumulate/_summary/_max_summary.py b/pytorch_pfn_extras/training/extensions/accumulate/_summary/_max_summary.py
new file mode 100644
index 00000000..f5026e27
--- /dev/null
+++ b/pytorch_pfn_extras/training/extensions/accumulate/_summary/_max_summary.py
@@ -0,0 +1,54 @@
+from __future__ import annotations
+
+import warnings
+from typing import Any, Dict
+
+from pytorch_pfn_extras.reporting import Scalar, Value
+from pytorch_pfn_extras.training.extensions.accumulate._summary._base_summary import (
+ SummaryBase,
+)
+from pytorch_pfn_extras.training.extensions.accumulate._summary._summary_utils import (
+ nograd,
+)
+
+
+class MaxSummary(SummaryBase):
+ def __init__(self) -> None:
+ self._max_x: Scalar = -float("inf")
+ super().__init__()
+
+ def add(self, value: Value, weight: Scalar = 1) -> None:
+ if callable(value):
+ self._deferred.append((value, weight))
+ return
+ self._max_x = self._max_x if self._max_x > value else value
+
+ def state_dict(self) -> Dict[str, Any]:
+ self._add_deferred_values()
+ state = {}
+ try:
+ # Save the stats as python scalars in order to avoid
+ # different device errors when loading them back
+ state = {
+ "_max_x": float(self._max_x),
+ }
+ except KeyError:
+ warnings.warn("The previous statistics are not saved.")
+ return state
+
+ def load_state_dict(self, to_load: Dict[str, Any]) -> None:
+ self._add_deferred_values()
+ self._max_x = float(nograd(to_load["_max_x"]))
+
+ def compute_max(self) -> Scalar:
+ self._add_deferred_values()
+ return self._max_x
+
+ def compute_accumulate(self) -> Scalar:
+ return self.compute_max()
+
+ def __add__(self, other: MaxSummary) -> MaxSummary:
+ s = MaxSummary()
+ s._max_x = self._max_x if self._max_x > other._max_x else other._max_x
+ s._deferred = self._deferred + other._deferred
+ return s
diff --git a/pytorch_pfn_extras/training/extensions/accumulate/_summary/_min_summary.py b/pytorch_pfn_extras/training/extensions/accumulate/_summary/_min_summary.py
new file mode 100644
index 00000000..fe3767f6
--- /dev/null
+++ b/pytorch_pfn_extras/training/extensions/accumulate/_summary/_min_summary.py
@@ -0,0 +1,54 @@
+from __future__ import annotations
+
+import warnings
+from typing import Any, Dict
+
+from pytorch_pfn_extras.reporting import Scalar, Value
+from pytorch_pfn_extras.training.extensions.accumulate._summary._base_summary import (
+ SummaryBase,
+)
+from pytorch_pfn_extras.training.extensions.accumulate._summary._summary_utils import (
+ nograd,
+)
+
+
+class MinSummary(SummaryBase):
+ def __init__(self) -> None:
+ self._min_x: Scalar = float("inf")
+ super().__init__()
+
+ def add(self, value: Value, weight: Scalar = 1) -> None:
+ if callable(value):
+ self._deferred.append((value, weight))
+ return
+ self._min_x = self._min_x if self._min_x < value else value
+
+ def state_dict(self) -> Dict[str, Any]:
+ self._add_deferred_values()
+ state = {}
+ try:
+ # Save the stats as python scalars in order to avoid
+ # different device errors when loading them back
+ state = {
+ "_min_x": float(self._min_x),
+ }
+ except KeyError:
+ warnings.warn("The previous statistics are not saved.")
+ return state
+
+ def load_state_dict(self, to_load: Dict[str, Any]) -> None:
+ self._add_deferred_values()
+ self._min_x = float(nograd(to_load["_min_x"]))
+
+ def compute_min(self) -> Scalar:
+ self._add_deferred_values()
+ return self._min_x
+
+ def compute_accumulate(self) -> Scalar:
+ return self.compute_min()
+
+ def __add__(self, other: MinSummary) -> MinSummary:
+ s = MinSummary()
+ s._min_x = self._min_x if self._min_x < other._min_x else other._min_x
+ s._deferred = self._deferred + other._deferred
+ return s
diff --git a/pytorch_pfn_extras/training/extensions/accumulate/_summary/_standard_deviation_summary.py b/pytorch_pfn_extras/training/extensions/accumulate/_summary/_standard_deviation_summary.py
new file mode 100644
index 00000000..0f01f619
--- /dev/null
+++ b/pytorch_pfn_extras/training/extensions/accumulate/_summary/_standard_deviation_summary.py
@@ -0,0 +1,79 @@
+from __future__ import annotations
+
+import warnings
+from typing import Any, Dict
+
+import numpy
+import torch
+from pytorch_pfn_extras.reporting import Scalar, Value
+from pytorch_pfn_extras.training.extensions.accumulate._summary._base_summary import (
+ SummaryBase,
+)
+from pytorch_pfn_extras.training.extensions.accumulate._summary._summary_utils import (
+ nograd,
+)
+
+
+class StandardDeviationSummary(SummaryBase):
+ def __init__(self) -> None:
+ self._x: Scalar = 0.0
+ self._x2: Scalar = 0.0
+ self._n: Scalar = 0
+ super().__init__()
+
+ def add(self, value: Value, weight: Scalar = 1) -> None:
+ if callable(value):
+ self._deferred.append((value, weight))
+ return
+ self._x += weight * value
+ self._x2 += weight * value * value
+ self._n += weight
+
+ def state_dict(self) -> Dict[str, Any]:
+ self._add_deferred_values()
+ state = {}
+ try:
+ # Save the stats as python scalars in order to avoid
+ # different device errors when loading them back
+ state = {
+ "_x": float(self._x),
+ "_x2": float(self._x2),
+ "_n": float(self._n),
+ }
+ except KeyError:
+ warnings.warn("The previous statistics are not saved.")
+ return state
+
+ def load_state_dict(self, to_load: Dict[str, Any]) -> None:
+ self._add_deferred_values()
+ self._x = float(nograd(to_load["_x"]))
+ self._x2 = float(nograd(to_load["_x2"]))
+ self._n = float(nograd(to_load["_n"]))
+
+ def compute_mean(self) -> Scalar:
+ self._add_deferred_values()
+ x, n = self._x, self._n
+ return x / n
+
+ def compute_standard_deviation(self) -> Scalar:
+ self._add_deferred_values()
+ x, n = self._x, self._n
+ mean = x / n
+ var = self._x2 / n - mean * mean
+ if isinstance(var, torch.Tensor):
+ return torch.sqrt(var)
+ else:
+ return numpy.sqrt(var)
+
+ def compute_accumulate(self) -> Scalar:
+ return self.compute_standard_deviation()
+
+ def __add__(
+ self, other: StandardDeviationSummary
+ ) -> StandardDeviationSummary:
+ s = StandardDeviationSummary()
+ s._x = self._x + other._x
+ s._x2 = self._x2 + other._x2
+ s._n = self._n + other._n
+ s._deferred = self._deferred + other._deferred
+ return s
diff --git a/pytorch_pfn_extras/training/extensions/accumulate/_summary/_summary_utils.py b/pytorch_pfn_extras/training/extensions/accumulate/_summary/_summary_utils.py
new file mode 100644
index 00000000..a89a764a
--- /dev/null
+++ b/pytorch_pfn_extras/training/extensions/accumulate/_summary/_summary_utils.py
@@ -0,0 +1,8 @@
+import torch
+from pytorch_pfn_extras.reporting import Scalar
+
+
+def nograd(value: Scalar) -> Scalar:
+ if isinstance(value, torch.Tensor):
+ return value.detach()
+ return value
diff --git a/pytorch_pfn_extras/training/extensions/accumulate/_summary/_unbiased_standard_deviation_summary.py b/pytorch_pfn_extras/training/extensions/accumulate/_summary/_unbiased_standard_deviation_summary.py
new file mode 100644
index 00000000..b6f57b3f
--- /dev/null
+++ b/pytorch_pfn_extras/training/extensions/accumulate/_summary/_unbiased_standard_deviation_summary.py
@@ -0,0 +1,82 @@
+from __future__ import annotations
+
+import warnings
+from typing import Any, Dict
+
+import numpy
+import torch
+from pytorch_pfn_extras.reporting import Scalar, Value
+from pytorch_pfn_extras.training.extensions.accumulate._summary._base_summary import (
+ SummaryBase,
+)
+from pytorch_pfn_extras.training.extensions.accumulate._summary._summary_utils import (
+ nograd,
+)
+
+
+class UnbiasedStandardDeviationSummary(SummaryBase):
+ def __init__(self) -> None:
+ self._x: Scalar = 0.0
+ self._x2: Scalar = 0.0
+ self._n: Scalar = 0
+ super().__init__()
+
+ def add(self, value: Value, weight: Scalar = 1) -> None:
+ if callable(value):
+ self._deferred.append((value, weight))
+ return
+ self._x += weight * value
+ self._x2 += weight * value * value
+ self._n += weight
+
+ def state_dict(self) -> Dict[str, Any]:
+ self._add_deferred_values()
+ state = {}
+ try:
+ # Save the stats as python scalars in order to avoid
+ # different device errors when loading them back
+ state = {
+ "_x": float(self._x),
+ "_x2": float(self._x2),
+ "_n": float(self._n),
+ }
+ except KeyError:
+ warnings.warn("The previous statistics are not saved.")
+ return state
+
+ def load_state_dict(self, to_load: Dict[str, Any]) -> None:
+ self._add_deferred_values()
+ self._x = float(nograd(to_load["_x"]))
+ self._x2 = float(nograd(to_load["_x2"]))
+ self._n = float(nograd(to_load["_n"]))
+
+ def compute_mean(self) -> Scalar:
+ self._add_deferred_values()
+ x, n = self._x, self._n
+ return x / n
+
+ def compute_unbiased_standard_deviation(self) -> Scalar:
+ self._add_deferred_values()
+ x, n = self._x, self._n
+ if n <= 1:
+ return float("nan")
+ mean = x / n
+ var = self._x2 / n - mean * mean
+ unbiased_var = var * (n / (n - 1))
+ if isinstance(unbiased_var, torch.Tensor):
+ return torch.sqrt(unbiased_var)
+ else:
+ return numpy.sqrt(unbiased_var)
+
+ def compute_accumulate(self) -> Scalar:
+ return self.compute_unbiased_standard_deviation()
+
+ def __add__(
+ self, other: UnbiasedStandardDeviationSummary
+ ) -> UnbiasedStandardDeviationSummary:
+ s = UnbiasedStandardDeviationSummary()
+ s._x = self._x + other._x
+ s._x2 = self._x2 + other._x2
+ s._n = self._n + other._n
+ s._deferred = self._deferred + other._deferred
+ return s
diff --git a/pytorch_pfn_extras/training/extensions/accumulate/average_accumulate.py b/pytorch_pfn_extras/training/extensions/accumulate/average_accumulate.py
new file mode 100644
index 00000000..a2815d7b
--- /dev/null
+++ b/pytorch_pfn_extras/training/extensions/accumulate/average_accumulate.py
@@ -0,0 +1,23 @@
+from pytorch_pfn_extras.training.extensions.accumulate._accumulate_base import (
+ AccumulateBase,
+)
+from pytorch_pfn_extras.training.extensions.accumulate._summary import (
+ AverageSummary,
+ SummaryBase,
+)
+
+from ._accumulate_utils import all_gather_object
+
+
+class AverageAccumulate(AccumulateBase):
+ @property
+ def _summary(self) -> SummaryBase:
+ return self._average_summary
+
+ def _init_summary(self) -> None:
+ self._average_summary = AverageSummary()
+
+ def _all_reduce_summaries(self) -> SummaryBase:
+ summaries = all_gather_object(self._average_summary)
+ all_reduced_summary = sum(filter(None, summaries), AverageSummary())
+ return all_reduced_summary
diff --git a/pytorch_pfn_extras/training/extensions/accumulate/max_accumulate.py b/pytorch_pfn_extras/training/extensions/accumulate/max_accumulate.py
new file mode 100644
index 00000000..19756182
--- /dev/null
+++ b/pytorch_pfn_extras/training/extensions/accumulate/max_accumulate.py
@@ -0,0 +1,23 @@
+from pytorch_pfn_extras.training.extensions.accumulate._accumulate_base import (
+ AccumulateBase,
+)
+from pytorch_pfn_extras.training.extensions.accumulate._summary import (
+ MaxSummary,
+ SummaryBase,
+)
+
+from ._accumulate_utils import all_gather_object
+
+
+class MaxAccumulate(AccumulateBase):
+ @property
+ def _summary(self) -> SummaryBase:
+ return self._max_summary
+
+ def _init_summary(self) -> None:
+ self._max_summary = MaxSummary()
+
+ def _all_reduce_summaries(self) -> SummaryBase:
+ summaries = all_gather_object(self._max_summary)
+ all_reduced_summary = sum(filter(None, summaries), MaxSummary())
+ return all_reduced_summary
diff --git a/pytorch_pfn_extras/training/extensions/accumulate/min_accumulate.py b/pytorch_pfn_extras/training/extensions/accumulate/min_accumulate.py
new file mode 100644
index 00000000..96aed2df
--- /dev/null
+++ b/pytorch_pfn_extras/training/extensions/accumulate/min_accumulate.py
@@ -0,0 +1,23 @@
+from pytorch_pfn_extras.training.extensions.accumulate._accumulate_base import (
+ AccumulateBase,
+)
+from pytorch_pfn_extras.training.extensions.accumulate._summary import (
+ MinSummary,
+ SummaryBase,
+)
+
+from ._accumulate_utils import all_gather_object
+
+
+class MinAccumulate(AccumulateBase):
+ @property
+ def _summary(self) -> SummaryBase:
+ return self._min_summary
+
+ def _init_summary(self) -> None:
+ self._min_summary = MinSummary()
+
+ def _all_reduce_summaries(self) -> SummaryBase:
+ summaries = all_gather_object(self._min_summary)
+ all_reduced_summary = sum(filter(None, summaries), MinSummary())
+ return all_reduced_summary
diff --git a/pytorch_pfn_extras/training/extensions/accumulate/standard_deviation_accumulate.py b/pytorch_pfn_extras/training/extensions/accumulate/standard_deviation_accumulate.py
new file mode 100644
index 00000000..489ac1fd
--- /dev/null
+++ b/pytorch_pfn_extras/training/extensions/accumulate/standard_deviation_accumulate.py
@@ -0,0 +1,25 @@
+from pytorch_pfn_extras.training.extensions.accumulate._accumulate_base import (
+ AccumulateBase,
+)
+from pytorch_pfn_extras.training.extensions.accumulate._summary import (
+ StandardDeviationSummary,
+ SummaryBase,
+)
+
+from ._accumulate_utils import all_gather_object
+
+
+class StandardDeviationAccumulate(AccumulateBase):
+ @property
+ def _summary(self) -> SummaryBase:
+ return self._standard_deviation_summary
+
+ def _init_summary(self) -> None:
+ self._standard_deviation_summary = StandardDeviationSummary()
+
+ def _all_reduce_summaries(self) -> SummaryBase:
+ summaries = all_gather_object(self._standard_deviation_summary)
+ all_reduced_summary = sum(
+ filter(None, summaries), StandardDeviationSummary()
+ )
+ return all_reduced_summary
diff --git a/pytorch_pfn_extras/training/extensions/accumulate/unbiased_standard_deviation_accumulate.py b/pytorch_pfn_extras/training/extensions/accumulate/unbiased_standard_deviation_accumulate.py
new file mode 100644
index 00000000..ba5acef1
--- /dev/null
+++ b/pytorch_pfn_extras/training/extensions/accumulate/unbiased_standard_deviation_accumulate.py
@@ -0,0 +1,25 @@
+from pytorch_pfn_extras.training.extensions.accumulate._accumulate_base import (
+ AccumulateBase,
+)
+from pytorch_pfn_extras.training.extensions.accumulate._summary import (
+ SummaryBase,
+ UnbiasedStandardDeviationSummary,
+)
+
+from ._accumulate_utils import all_gather_object
+
+
+class UnbiasedStandardDeviationAccumulate(AccumulateBase):
+ @property
+ def _summary(self) -> SummaryBase:
+ return self._standard_deviation_summary
+
+ def _init_summary(self) -> None:
+ self._standard_deviation_summary = UnbiasedStandardDeviationSummary()
+
+ def _all_reduce_summaries(self) -> SummaryBase:
+ summaries = all_gather_object(self._standard_deviation_summary)
+ all_reduced_summary = sum(
+ filter(None, summaries), UnbiasedStandardDeviationSummary()
+ )
+ return all_reduced_summary
| Support Min/Max in report summaries
Currently we only support the mean of all reported metrics, we should provide ways to show more stats.
https://github.com/pfnet/pytorch-pfn-extras/blob/master/pytorch_pfn_extras/reporting.py#L333-L338
| 2023-10-16T09:09:16 | 0.0 | [] | [] |
|||
pfnet/pytorch-pfn-extras | pfnet__pytorch-pfn-extras-702 | d671e208ee5f0dedfdd537ab3955e312e2d00466 | diff --git a/pytorch_pfn_extras/engine.py b/pytorch_pfn_extras/engine.py
index 3db3eea18..0b6683da4 100644
--- a/pytorch_pfn_extras/engine.py
+++ b/pytorch_pfn_extras/engine.py
@@ -14,6 +14,7 @@
import pytorch_pfn_extras.handler as handler_module
import torch
from pytorch_pfn_extras.runtime import runtime_registry
+from pytorch_pfn_extras.training import StateObjectProtocol
from pytorch_pfn_extras.training._transform_model import default_transform_model
if TYPE_CHECKING:
@@ -106,10 +107,21 @@ def create_trainer(
"""
options = options.copy() if options else {}
+
+ state_objects: Dict[str, StateObjectProtocol] = {}
+ for key, value in options.items():
+ if isinstance(value, StateObjectProtocol):
+ state_objects[f"options_{key}"] = value
+
# TODO(kmaehashi): deprecate specifying 'runtime' key in options
runtime_options = dict(
runtime_options if runtime_options else options.pop("runtime", {})
)
+
+ for key, value in runtime_options.items():
+ if isinstance(value, StateObjectProtocol):
+ state_objects[f"runtime_options_{key}"] = value
+
logic = handler_module.Logic() if logic is None else logic
handler_class = handler_class if handler_class else handler_module.Handler
@@ -139,6 +151,7 @@ def create_trainer(
writer=writer,
transform_model=transform_model,
profile=profile,
+ state_objects=state_objects,
**kwargs,
)
diff --git a/pytorch_pfn_extras/training/__init__.py b/pytorch_pfn_extras/training/__init__.py
index 8fae0df54..4289657c9 100644
--- a/pytorch_pfn_extras/training/__init__.py
+++ b/pytorch_pfn_extras/training/__init__.py
@@ -3,6 +3,7 @@
from pytorch_pfn_extras.training._evaluator import Evaluator # NOQA
from pytorch_pfn_extras.training._manager_protocol import ( # NOQA
ExtensionsManagerProtocol,
+ StateObjectProtocol,
)
from pytorch_pfn_extras.training._trainer import Trainer # NOQA
from pytorch_pfn_extras.training.extension import PRIORITY_EDITOR # NOQA
diff --git a/pytorch_pfn_extras/training/_manager_protocol.py b/pytorch_pfn_extras/training/_manager_protocol.py
index 624acff1d..08f88d50b 100644
--- a/pytorch_pfn_extras/training/_manager_protocol.py
+++ b/pytorch_pfn_extras/training/_manager_protocol.py
@@ -1,4 +1,11 @@
-from typing import TYPE_CHECKING, Mapping, Optional
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Dict,
+ Mapping,
+ Optional,
+ runtime_checkable,
+)
import torch
from typing_extensions import Protocol
@@ -72,3 +79,12 @@ def get_extension(self, name: str) -> "Extension":
@property
def observation(self) -> "reporting.Observation":
...
+
+
+@runtime_checkable
+class StateObjectProtocol(Protocol):
+ def state_dict(self) -> Dict[str, Any]:
+ ...
+
+ def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
+ ...
diff --git a/pytorch_pfn_extras/training/manager.py b/pytorch_pfn_extras/training/manager.py
index 3ba206e86..487c28207 100644
--- a/pytorch_pfn_extras/training/manager.py
+++ b/pytorch_pfn_extras/training/manager.py
@@ -1,4 +1,4 @@
-import collections
+import collections.abc
import contextlib
import copy
import time
@@ -18,6 +18,7 @@
import torch
from pytorch_pfn_extras import reporting, writing
from pytorch_pfn_extras.profiler import record
+from pytorch_pfn_extras.training import StateObjectProtocol
from pytorch_pfn_extras.training import _util as util_module
from pytorch_pfn_extras.training import extension as extension_module
from pytorch_pfn_extras.training import trigger as trigger_module
@@ -99,6 +100,9 @@ def observation(self) -> reporting.Observation:
return self._manager.observation
+_default_state_objects: Dict[str, StateObjectProtocol] = {}
+
+
class _BaseExtensionsManager:
"""
Keeps track of the extensions and the current status
@@ -117,6 +121,7 @@ def __init__(
stop_trigger: "trigger_module.TriggerLike" = None,
transform_model: _TransformModel = default_transform_model,
enable_profile: bool = False,
+ state_objects: Dict[str, StateObjectProtocol] = _default_state_objects,
) -> None:
if extensions is None:
extensions = []
@@ -178,6 +183,7 @@ def __init__(
self.extend(ext)
self._enable_profile = enable_profile
+ self._state_objects = state_objects
# Initialize the writer
self.writer.initialize(self.out)
@@ -493,6 +499,10 @@ def state_dict(
name: self._optimizers[name].state_dict()
for name in self._optimizers
}
+ to_save["state_objects"] = {
+ name: self._state_objects[name].state_dict()
+ for name in self._state_objects
+ }
to_save["extensions"] = {
name: self._extensions[name].state_dict()
for name in self._extensions
@@ -538,6 +548,12 @@ def load_state_dict(
for name in self._optimizers:
self._optimizers[name].load_state_dict(to_load["optimizers"][name])
+ if "state_objects" in to_load:
+ for name in self._state_objects:
+ self._state_objects[name].load_state_dict( # type: ignore[no-untyped-call]
+ to_load["state_objects"][name]
+ )
+
for name in self._extensions:
self._extensions[name].load_state_dict(to_load["extensions"][name])
@@ -579,6 +595,7 @@ def __init__(
writer: Optional[writing.Writer] = None,
transform_model: _TransformModel = lambda n, x: x,
enable_profile: bool = False,
+ state_objects: Dict[str, StateObjectProtocol] = _default_state_objects,
) -> None:
super().__init__(
models,
@@ -590,6 +607,7 @@ def __init__(
stop_trigger,
transform_model,
enable_profile,
+ state_objects,
)
if iters_per_epoch < 1:
raise ValueError(
@@ -683,6 +701,7 @@ def __init__(
out_dir: str = "result",
writer: Optional[writing.Writer] = None,
enable_profile: bool = False,
+ state_objects: Dict[str, StateObjectProtocol] = _default_state_objects,
) -> None:
import ignite
@@ -703,6 +722,7 @@ def __init__(
out_dir,
writer,
enable_profile=enable_profile,
+ state_objects=state_objects,
)
self.engine = engine
self._start_epoch = 0 # Used to correctly restore snapshots
| GradScaler state_dict not saved in snapshot
GradScaler objects can be managed by ppe.training.manager._BaseExtensionManager so that state_dict can be saved
| 2023-05-31T06:48:53 | 0.0 | [] | [] |
|||
pfnet/pytorch-pfn-extras | pfnet__pytorch-pfn-extras-697 | fdfd3585cae6af7200f7f8b917564454b044a3b9 | diff --git a/pytorch_pfn_extras/handler/_logic.py b/pytorch_pfn_extras/handler/_logic.py
index b13e1fd5f..b9dacc663 100644
--- a/pytorch_pfn_extras/handler/_logic.py
+++ b/pytorch_pfn_extras/handler/_logic.py
@@ -268,6 +268,10 @@ def train_epoch_begin(
# Needed for `torch.utils.data.DistributedSampler`
loader.sampler.set_epoch(epoch) # type: ignore[attr-defined]
+ def train_epoch_end(self, models: Mapping[str, Any], epoch: int) -> None:
+ model = models[self.model_name]
+ model.eval()
+
def train_step(
self,
models: Mapping[str, torch.nn.Module],
@@ -339,6 +343,10 @@ def train_validation_begin(
model = models[self.model_name]
model.eval()
+ def train_validation_end(self, models: Mapping[str, Any]) -> None:
+ model = models[self.model_name]
+ model.train()
+
def eval_step(
self,
models: Mapping[str, torch.nn.Module],
@@ -404,6 +412,10 @@ def train_epoch_begin(
# Needed for `torch.utils.data.DistributedSampler`
loader.sampler.set_epoch(epoch) # type: ignore[attr-defined]
+ def train_epoch_end(self, models: Mapping[str, Any], epoch: int) -> None:
+ model = models[self.model_name]
+ model.eval()
+
def train_step(
self,
models: Mapping[str, torch.nn.Module],
@@ -447,6 +459,10 @@ def train_validation_begin(
model = models[self.model_name]
model.eval()
+ def train_validation_end(self, models: Mapping[str, Any]) -> None:
+ model = models[self.model_name]
+ model.train()
+
def eval_step(
self,
models: Mapping[str, torch.nn.Module],
| Recovering the mode of the model after running evaluator
When the evaluator is run, `ppe.handler.logic.train_validation_begin` changes the model to `eval` mode, but the `ppe.handler.logic.train_ validation_end`, which is executed after the evaluator is run, does not restore it.
Conventionally, it is specified in `train` mode via `ppe.handler.logic.train_epoch_begin`, but if the timing of the evaluator execution is at a non-integer epoch frequency, it is possible that a training that is not in `train` mode will be executed.
Therefore, the mode of the model is restored at `ppe.handler.logic.train_validation_end`.
https://github.com/pfnet/pytorch-pfn-extras/blob/bcb5f37eba7ffd48f052231c886df21ab759a0f8/pytorch_pfn_extras/handler/_logic.py#L330-L340
| 2023-05-24T06:31:27 | 0.0 | [] | [] |
|||
CatalaLang/catala | CatalaLang__catala-733 | e751bbf21073f5e6c4bff41f33a21633deed645b | diff --git a/compiler/catala_utils/string.ml b/compiler/catala_utils/string.ml
index daec56188..2563b1433 100644
--- a/compiler/catala_utils/string.ml
+++ b/compiler/catala_utils/string.ml
@@ -17,6 +17,13 @@
include Stdlib.String
let to_ascii : string -> string = Ubase.from_utf8
+
+let to_id s =
+ to_ascii s
+ |> map (function
+ | ('a' .. 'z' | 'A' .. 'Z' | '0' .. '9') as c -> c
+ | _ -> '_')
+
let is_uppercase_ascii = function 'A' .. 'Z' -> true | _ -> false
let begins_with_uppercase (s : string) : bool =
@@ -29,7 +36,7 @@ let begins_with_uppercase (s : string) : bool =
let to_snake_case (s : string) : string =
let out = Buffer.create (2 * length s) in
s
- |> to_ascii
+ |> to_id
|> iteri (fun i c ->
if is_uppercase_ascii c && 0 <> i && get s (i - 1) <> '_' then
Buffer.add_char out '_';
@@ -40,7 +47,7 @@ let to_camel_case (s : string) : string =
let last_was_underscore = ref true in
let out = Buffer.create (length s) in
s
- |> to_ascii
+ |> to_id
|> iter (function
| '_' -> last_was_underscore := true
| c ->
diff --git a/compiler/catala_utils/string.mli b/compiler/catala_utils/string.mli
index 338cf2d6c..9d568a2d6 100644
--- a/compiler/catala_utils/string.mli
+++ b/compiler/catala_utils/string.mli
@@ -29,6 +29,10 @@ val to_ascii : string -> string
(** Removes all non-ASCII diacritics from a string by converting them to their
base letter in the Latin alphabet. *)
+val to_id : string -> string
+(** Like [to_ascii], but in addition replaces any non-alphanumeric character by
+ [_] *)
+
val is_uppercase_ascii : char -> bool
(** [is_uppercase c] returns if [c] is in the set ['A'...'Z']. *)
diff --git a/compiler/plugins/api_web.ml b/compiler/plugins/api_web.ml
index e0524ae09..d29552170 100644
--- a/compiler/plugins/api_web.ml
+++ b/compiler/plugins/api_web.ml
@@ -37,7 +37,7 @@ module To_jsoo = struct
other modules: here everything is flattened in the current namespace *)
let format_struct_name ppf name =
StructName.to_string name
- |> String.to_ascii
+ |> String.to_id
|> String.uncapitalize_ascii
|> String.map (function '.' -> '_' | c -> c)
|> Format.pp_print_string ppf
diff --git a/compiler/plugins/explain.ml b/compiler/plugins/explain.ml
index 8077c03ae..83ea8e726 100644
--- a/compiler/plugins/explain.ml
+++ b/compiler/plugins/explain.ml
@@ -17,11 +17,65 @@
open Catala_utils
open Shared_ast
+module Style = struct
+ type color = Graph.Graphviz.color
+
+ type elt = {
+ fill : color;
+ border : color;
+ stroke : int; (* in px *)
+ text : color;
+ }
+
+ type theme = {
+ page_background : Graph.Graphviz.color;
+ arrows : Graph.Graphviz.color;
+ input : elt;
+ middle : elt;
+ constant : elt;
+ condition : elt;
+ output : elt;
+ }
+
+ let dark =
+ {
+ page_background = 0x0;
+ arrows = 0x606060;
+ input =
+ { fill = 0x252526; border = 0xBC3FBC; stroke = 2; text = 0xFFFFFF };
+ middle =
+ { fill = 0x252526; border = 0x0097FB; stroke = 2; text = 0xFFFFFF };
+ constant =
+ { fill = 0x252526; border = 0x40C8AE; stroke = 2; text = 0xFFFFFF };
+ condition =
+ { fill = 0x252526; border = 0xff7700; stroke = 2; text = 0xFFFFFF };
+ output =
+ { fill = 0x252526; border = 0xFFFFFF; stroke = 2; text = 0xFFFFFF };
+ }
+
+ let light =
+ {
+ page_background = 0xffffff;
+ arrows = 0x0;
+ input = { fill = 0xffaa55; border = 0x0; stroke = 1; text = 0x0 };
+ middle = { fill = 0xffee99; border = 0x0; stroke = 1; text = 0x0 };
+ constant = { fill = 0x99bbff; border = 0x0; stroke = 1; text = 0x0 };
+ condition = { fill = 0xffffff; border = 0xff7700; stroke = 2; text = 0x0 };
+ output = { fill = 0xffffff; border = 0x1; stroke = 2; text = 0x0 };
+ }
+
+ let width pixels =
+ let dpi = 96. in
+ let pt_per_inch = 72.28 in
+ float_of_int pixels /. dpi *. pt_per_inch
+end
+
type flags = {
with_conditions : bool;
with_cleanup : bool;
merge_level : int;
format : [ `Dot | `Convert of string ];
+ theme : Style.theme;
show : string option;
output : Global.raw_file option;
base_src_url : string;
@@ -574,7 +628,7 @@ end
module E = struct
type hand_side = Lhs of string | Rhs of string
- type t = { side : hand_side option; condition : bool }
+ type t = { side : hand_side option; condition : bool; invisible : bool }
let compare x y =
match Bool.compare x.condition y.condition with
@@ -588,7 +642,7 @@ module E = struct
x.side y.side
| n -> n
- let default = { side = None; condition = false }
+ let default = { side = None; condition = false; invisible = false }
end
module G = Graph.Persistent.Digraph.AbstractLabeled (V) (E)
@@ -739,7 +793,7 @@ let program_to_graph
value_level with
eval_struct = false;
eval_op = false;
- eval_match = false;
+ eval_match = true;
eval_vars = (fun v -> false);
}
in
@@ -760,7 +814,9 @@ let program_to_graph
aux (Some parent) (g, var_vertices, env) econd
in
( G.add_edge_e g
- (G.E.create parent { side = None; condition = true } vcond),
+ (G.E.create parent
+ { side = None; condition = true; invisible = false }
+ vcond),
var_vertices,
Env.join env0 env ))
(g, var_vertices, env0) conditions
@@ -842,11 +898,15 @@ let program_to_graph
in
let g =
G.add_edge_e g
- (G.E.create v { side = lhs_label; condition = false } lhs)
+ (G.E.create v
+ { side = lhs_label; condition = false; invisible = false }
+ lhs)
in
let g =
G.add_edge_e g
- (G.E.create v { side = rhs_label; condition = false } rhs)
+ (G.E.create v
+ { side = rhs_label; condition = false; invisible = false }
+ rhs)
in
(g, var_vertices, env), v
| EAppOp { op = _; args; _ }, _ ->
@@ -1167,7 +1227,7 @@ let expr_to_dot_label0 :
match o with
| Eq_boo_boo | Eq_int_int | Eq_rat_rat | Eq_mon_mon | Eq_dur_dur
| Eq_dat_dat | Eq ->
- "="
+ "="
| Minus_int | Minus_rat | Minus_mon | Minus_dur | Minus -> "-"
| ToRat_int | ToRat_mon | ToRat -> ""
| ToMoney_rat | ToMoney -> ""
@@ -1181,19 +1241,19 @@ let expr_to_dot_label0 :
"×"
| Div_int_int | Div_rat_rat | Div_mon_mon | Div_mon_rat | Div_dur_dur
| Div ->
- "/"
+ "÷"
| Lt_int_int | Lt_rat_rat | Lt_mon_mon | Lt_dur_dur | Lt_dat_dat | Lt
->
"<"
| Lte_int_int | Lte_rat_rat | Lte_mon_mon | Lte_dur_dur | Lte_dat_dat
| Lte ->
- "<="
+ "≤"
| Gt_int_int | Gt_rat_rat | Gt_mon_mon | Gt_dur_dur | Gt_dat_dat | Gt
->
">"
| Gte_int_int | Gte_rat_rat | Gte_mon_mon | Gte_dur_dur | Gte_dat_dat
| Gte ->
- ">="
+ "≥"
| Concat -> "++"
| Not -> xlang () ~en:"not" ~fr:"non"
| Length -> xlang () ~en:"length" ~fr:"nombre"
@@ -1224,11 +1284,26 @@ let expr_to_dot_label0 :
let bypass : type a t. Format.formatter -> (a, t) gexpr -> bool =
fun ppf e ->
+ let percent_printer ppf = function
+ | ELit (LRat r), m
+ when Runtime.(o_lt_rat_rat r (Runtime.decimal_of_float 1.)) ->
+ Format.fprintf ppf "%a%%" aux_value
+ ( ELit
+ (LRat
+ (Runtime.o_mult_rat_rat r (Runtime.decimal_of_float 100.))),
+ m )
+ | e -> aux_value ppf e
+ in
match Mark.remove e with
| ELit _ | EArray _ | ETuple _ | EStruct _ | EInj _ | EEmpty | EAbs _
| EExternal _ ->
aux_value ppf e;
true
+ | EAppOp
+ { op = (Op.Mult_rat_rat | Op.Mult_mon_rat), _; args = [x1; x2]; _ }
+ ->
+ Format.fprintf ppf "%a × %a" percent_printer x1 percent_printer x2;
+ true
| EMatch { e; cases; _ } ->
let cases =
List.map
@@ -1274,16 +1349,11 @@ let htmlencode =
| "@" -> "@"
| _ -> assert false)
-let scale_svg_width s =
- let open Re in
- let re = Pcre.re "<svg width=\"[^\"]*pt\"" in
- replace_string (compile re) ~by:"<svg width=\"100%\"" s
-
let expr_to_dot_label0 lang ctx env ppf e =
Format.fprintf ppf "%s"
(htmlencode (Format.asprintf "%a" (expr_to_dot_label0 lang ctx env) e))
-let rec expr_to_dot_label lang ctx env ppf e =
+let rec expr_to_dot_label (style : Style.theme) lang ctx env ppf e =
let print_expr ppf = function
| (EVar _, _) as e ->
let e, _ = lazy_eval ctx env value_level e in
@@ -1317,9 +1387,11 @@ let rec expr_to_dot_label lang ctx env ppf e =
| EStruct { name; fields }, _ ->
let pr ppf =
Format.fprintf ppf
- "<table border=\"0\" cellborder=\"1\" cellspacing=\"0\"><tr><td \
+ "<table border=\"%f\" cellborder=\"1\" cellspacing=\"0\" \
+ bgcolor=\"#%06x\" color=\"#%06x\"><tr><td \
colspan=\"2\">%a</td></tr><tr><td>%a</td><td>%a</td></tr></table>"
- StructName.format name
+ (float_of_int style.output.stroke)
+ style.output.fill style.output.border StructName.format name
(Format.pp_print_list
~pp_sep:(fun ppf () -> Format.pp_print_string ppf " | ")
(fun ppf fld ->
@@ -1350,7 +1422,7 @@ let rec expr_to_dot_label lang ctx env ppf e =
Format.pp_print_string ppf (Message.unformat pr)
| e -> Format.fprintf ppf "%a@," (expr_to_dot_label0 lang ctx env) e
-let to_dot lang ppf ctx env base_vars g ~base_src_url ~line_format =
+let to_dot lang ppf ctx env base_vars g ~base_src_url ~line_format ~theme =
let module GPr = Graph.Graphviz.Dot (struct
include G
@@ -1361,11 +1433,20 @@ let to_dot lang ppf ctx env base_vars g ~base_src_url ~line_format =
* out_funs with
* Format.out_newline = (fun () -> out_funs.out_string "<br/>" 0 2);
* }; *)
- expr_to_dot_label env ctx lang ppf e
+ expr_to_dot_label theme env ctx lang ppf e
(* ; * Format.pp_print_flush ppf (); * Format.pp_set_formatter_out_functions
ppf out_funs *)
- let graph_attributes _ = [ (* `Rankdir `LeftToRight *) ]
+ let graph_attributes _ =
+ [
+ `BgcolorWithTransparency (Int32.of_int 0x00);
+ (* `Ratio (`Float 0.8); *)
+ (* `Concentrate true; *)
+ `Ratio `Compress;
+ (* `Size (8.3, 11.7); (* A4 in inches..... *) *)
+ (* `Rankdir `LeftToRight *)
+ ]
+
let default_vertex_attributes _ = []
let vertex_label v =
@@ -1417,54 +1498,127 @@ let to_dot lang ppf ctx env base_vars g ~base_src_url ~line_format =
(match G.V.label v with
| EVar var, _ ->
if Var.Set.mem var base_vars then
- [`Style `Filled; `Fillcolor 0xffaa55; `Shape `Box]
+ [
+ `Style `Filled;
+ `Fillcolor theme.input.fill;
+ `Shape `Box;
+ `Penwidth (Style.width theme.input.stroke);
+ `Color theme.input.border;
+ `Fontcolor theme.input.text;
+ ]
else if
List.exists (fun e -> not (G.E.label e).condition) (G.succ_e g v)
then
(* non-constants *)
- [`Style `Filled; `Fillcolor 0xffee99; `Shape `Box]
- else (* Constants *)
- [`Style `Filled; `Fillcolor 0x99bbff; `Shape `Note]
+ [
+ `Style `Filled;
+ `Fillcolor theme.middle.fill;
+ `Shape `Box;
+ `Penwidth (Style.width theme.middle.stroke);
+ `Color theme.middle.border;
+ `Fontcolor theme.middle.text;
+ ]
+ else
+ (* Constants *)
+ [
+ `Style `Filled;
+ `Fillcolor theme.constant.fill;
+ `Shape `Box;
+ `Penwidth (Style.width theme.middle.stroke);
+ `Color theme.constant.border;
+ `Fontcolor theme.constant.text;
+ ]
| EAppOp { op = Op.Eq, _; args = [(EVar _, _); (EAppOp _, _)]; _ }, _ ->
- [`Style `Filled; `Fillcolor 0xffee99; `Shape `Box]
- | EStruct _, _ | EArray _, _ -> [`Shape `Plaintext]
+ [
+ `Style `Filled;
+ `Fillcolor theme.middle.fill;
+ `Shape `Box;
+ `Penwidth (Style.width theme.middle.stroke);
+ `Color theme.middle.border;
+ `Fontcolor theme.middle.text;
+ ]
+ | EStruct _, _ | EArray _, _ ->
+ [
+ `Style `Solid;
+ (* `Fillcolor theme.output.fill; *)
+ `Shape `Plaintext;
+ `Penwidth (Style.width theme.output.stroke);
+ `Color theme.output.border;
+ `Fontcolor theme.output.text;
+ ]
(* | EAppOp { op = op, _; _ }, _ -> (
* match op_kind op with
- * | `Sum | `Product | _ -> [`Shape `Box; `Fillcolor 0xff0000] (\* | _ -> [] *\)) *)
- | _ -> [`Shape `Box; `Penwidth 2.; `Style `Dashed; `Color 0xff7700])
+ * | `Sum | `Product | _ -> [`Shape `Box; `Fillcolor 0xff0000] (* | _ -> [] *)) *)
+ | _ ->
+ [
+ `Style `Dashed;
+ `Style `Filled;
+ `Fillcolor theme.condition.fill;
+ `Shape `Box;
+ `Penwidth (Style.width theme.condition.stroke);
+ `Color theme.condition.border;
+ `Fontcolor theme.condition.text;
+ ])
let get_subgraph v =
- match G.V.label v with
- | EVar var, _ -> (
- if Var.Set.mem var base_vars then
- Some
- {
- Graph.Graphviz.DotAttributes.sg_name = "cluster_inputs";
- sg_attributes = [];
- sg_parent = None;
- }
- else
- match List.map G.V.label (G.succ g v) with
- (* | [] | [ELit _, _] ->
- * Some
- * {
- * Graph.Graphviz.DotAttributes.sg_name = "constants";
- * sg_attributes = [`Shape `Box];
- * sg_parent = None;
- * } *)
- | _ -> None)
- | _ -> None
+ let is_input =
+ match G.V.label v with
+ | EVar var, _ -> Var.Set.mem var base_vars
+ | _ -> false
+ in
+ if is_input then
+ Some
+ {
+ Graph.Graphviz.DotAttributes.sg_name = "inputs";
+ sg_attributes =
+ [
+ `Style `Filled;
+ `FillcolorWithTransparency (Int32.of_int 0x0);
+ `ColorWithTransparency (Int32.of_int 0x0);
+ ];
+ sg_parent = None;
+ }
+ else None
let default_edge_attributes _ = []
let edge_attributes e =
match E.label e with
+ | { invisible = true; _ } -> [`Style `Invis; `Weight 6]
| { condition = true; _ } ->
- [`Style `Dashed; `Penwidth 2.; `Color 0xff7700; `Arrowhead `Odot]
+ [
+ `Style `Dashed;
+ `Penwidth 2.;
+ `Color 0xff7700;
+ `Arrowhead `Odot;
+ `Weight 8;
+ ]
| { side = Some (Lhs s | Rhs s); _ } ->
- [ (* `Label s; `Color 0xbb7700 *) ]
- | { side = None; _ } -> [ (* `Minlen 0; `Weight 10 *) ]
+ [`Color theme.arrows (* `Label s; `Color 0xbb7700 *); `Weight 10]
+ | { side = None; _ } ->
+ [`Color theme.arrows (* `Minlen 0; `Weight 10 *); `Weight 10]
end) in
+ let g =
+ (* Add fake edges from everything towards the inputs to force ordering *)
+ G.fold_vertex
+ (fun v g ->
+ match G.V.label v with
+ | EVar var, _ when Var.Set.mem var base_vars ->
+ G.fold_vertex
+ (fun v0 g ->
+ if G.out_degree g v0 > 0 then g
+ else
+ match G.V.label v0 with
+ | EVar var, _ when Var.Set.mem var base_vars -> g
+ | _ ->
+ G.add_edge_e g
+ (G.E.create v0
+ { invisible = true; condition = false; side = None }
+ v))
+ g g
+ | _ -> g)
+ g g
+ in
GPr.fprint_graph ppf (reverse_graph g)
(* -- Plugin registration -- *)
@@ -1523,6 +1677,12 @@ let options =
mkinfo "html";
])
in
+ let theme =
+ Arg.(
+ value
+ & opt (enum ["light", Style.light; "dark", Style.dark]) Style.light
+ & info ["theme"] ~doc:"Select the color theme for graphical outputs")
+ in
let show =
Arg.(
value
@@ -1562,6 +1722,7 @@ let options =
no_cleanup
merge_level
format
+ theme
show
output
base_src_url
@@ -1572,6 +1733,7 @@ let options =
with_cleanup = not no_cleanup;
merge_level;
format;
+ theme;
show;
output;
base_src_url;
@@ -1585,6 +1747,7 @@ let options =
$ no_cleanup
$ merge_level
$ format
+ $ theme
$ show
$ Cli.Flags.output
$ base_src_url
@@ -1704,7 +1867,7 @@ let run
let dot_content =
to_dot lang Format.str_formatter prg.decl_ctx env base_vars g
~base_src_url:explain_options.base_src_url
- ~line_format:explain_options.line_format;
+ ~line_format:explain_options.line_format ~theme:explain_options.theme;
Format.flush_str_formatter ()
|> Re.(replace_string (compile (seq [bow; str "comment="])) ~by:"tooltip=")
in
@@ -1732,10 +1895,17 @@ let run
if wrap_html then (
output_string oc "<!DOCTYPE html>\n<html>\n<head>\n <title>";
output_string oc (htmlencode ex_scope);
- output_string oc "</title>\n</head>\n<body>\n");
+ Printf.fprintf oc
+ " </title>\n\
+ \ <style>\n\
+ \ body { background-color: #%06x }\n\
+ \ svg { max-width: 80rem; height: fit-content; }\n\
+ \ </style>\n\
+ </head>\n\
+ <body>\n"
+ explain_options.theme.page_background);
let contents = File.process_out "dot" ["-T" ^ fmt; dotfile] in
- output_string oc
- (if wrap_html then scale_svg_width contents else contents);
+ output_string oc contents;
if wrap_html then output_string oc "</body>\n</html>\n")
| `Dot -> ());
match explain_options.show with
diff --git a/compiler/scalc/to_c.ml b/compiler/scalc/to_c.ml
index be4172dfb..3185642a7 100644
--- a/compiler/scalc/to_c.ml
+++ b/compiler/scalc/to_c.ml
@@ -81,8 +81,8 @@ let renaming =
let id = f id |> Re.replace_string module_sep_re ~by:"_" in
String.concat "__" (pfx @ [id])
in
- let cap s = String.to_ascii s |> String.capitalize_ascii in
- let uncap s = String.to_ascii s |> String.uncapitalize_ascii in
+ let cap s = String.to_id s |> String.capitalize_ascii in
+ let uncap s = String.to_id s |> String.uncapitalize_ascii in
let upper s = String.to_snake_case s |> String.uppercase_ascii in
Renaming.program ()
~reserved:c_keywords
diff --git a/compiler/scopelang/from_desugared.ml b/compiler/scopelang/from_desugared.ml
index bca2f8afa..fc929e3f9 100644
--- a/compiler/scopelang/from_desugared.ml
+++ b/compiler/scopelang/from_desugared.ml
@@ -863,7 +863,7 @@ let translate_program
match states with
| D.WholeVar -> WholeVar (ScopeVar.fresh (var_name, var_pos))
| States states ->
- let var_prefix = var_name ^ "_" in
+ let var_prefix = var_name ^ "#" in
let state_var state =
ScopeVar.fresh
(Mark.map (( ^ ) var_prefix) (StateName.get_info state))
diff --git a/compiler/shared_ast/renaming.ml b/compiler/shared_ast/renaming.ml
index fd7f76cb8..8ca53dddf 100644
--- a/compiler/shared_ast/renaming.ml
+++ b/compiler/shared_ast/renaming.ml
@@ -471,8 +471,8 @@ let process_type_ident
ctx_enums = EnumName.Map.add new_name ctx_constrs tctx.ctx_enums;
}
-let cap s = String.to_ascii s |> String.capitalize_ascii
-let uncap s = String.to_ascii s |> String.uncapitalize_ascii
+let cap s = String.to_id s |> String.capitalize_ascii
+let uncap s = String.to_id s |> String.uncapitalize_ascii
(* Todo? - handle separate namespaces ? (e.g. allow a field and var to have the
same name for backends that support it) - register module names as reserved
| Explain plugin broken on demo example
On this file : https://github.com/CatalaLang/catala-examples/blob/exemple_explication/exemple_explication/apl_locatif.catala_fr
The command `catala explain apl_locatif.catala_fr -s Exemple1` returns the error :
```text
┌─[ERROR]─
│
│ Operator +$ applied to the wrong arguments (should not happen if the term was well-typed)
│
│ Operator (value +$):
├─➤ apl_locatif.catala_fr:116.22-116.23:
│ │
│ 116 │ loyer_éligible + montant_forfaitaire_charges_d823_16
│ │ ‾
├─ Code de la construction est de l'habitation
│ └─ Article D823-16
│
│ Argument n°1, value $239.48
├─➤ apl_locatif.catala_fr:775.16-775.23:
│ │
│ 775 │ -- Zone1 : 239,48€
│ │ ‾‾‾‾‾‾‾
├─ Arrêté du 27 septembre 2019 relatif au calcul des aides personnelles au logement et de la prime de déménagement
│ └─ Article 16
│
│ Argument n°2, value PersonneSeule ()
├─➤ apl_locatif.catala_fr:847.59-847.72:
│ │
│ 847 │ définition calcul.situation_familiale_calcul_apl égal à PersonneSeule
│ │ ‾‾‾‾‾‾‾‾‾‾‾‾‾
└─ Tests
```
Looks like there is a bug in the lazy evaluation...
| Apparently it only happens on one of my machines?
I can reproduce on the new version of the example :/
Looking into it
ok it's in the new stuff I added to remove neutral elements from operations that stg is going wrong so worst case we just need to disable that | 2024-10-28T11:16:21 | 0.0 | [] | [] |
||
CatalaLang/catala | CatalaLang__catala-706 | 29ce1649bc591fcaf07bfcc4b94938c0856915e5 | diff --git a/catala.opam b/catala.opam
index e9e2eadf3..58ff41c9f 100644
--- a/catala.opam
+++ b/catala.opam
@@ -77,8 +77,8 @@ depexts: [
]
pin-depends: [
[
- # FIXME: this is temporary, while we wait for the branch to be merged (and package released)
+ # FIXME: this is temporary, while we wait for the package to be released
"dates_calc.0.0.6"
- "git+https://github.com/CatalaLang/dates-calc#add-c-implem"
+ "git+https://github.com/CatalaLang/dates-calc#9125d78"
]
]
diff --git a/compiler/catala_utils/cli.ml b/compiler/catala_utils/cli.ml
index 8ab607445..46067c659 100644
--- a/compiler/catala_utils/cli.ml
+++ b/compiler/catala_utils/cli.ml
@@ -286,6 +286,20 @@ module Flags = struct
& flag
& info ["check-invariants"] ~doc:"Check structural invariants on the AST."
+ let autotest =
+ value
+ & flag
+ & info ["autotest"]
+ ~env:(Cmd.Env.info "CATALA_AUTOTEST")
+ ~doc:
+ "Insert automatic test assertions in the compiled program. This \
+ detects all scopes that have no input or context variables, runs \
+ the interpreter to pre-compute their values, then adds runtime \
+ assertions to the program that ensure that the actual output of the \
+ scopes match their pre-computed values. If used on a testing \
+ program with a given backend, this guarantees consistency between \
+ the backend and the interpreter."
+
let no_typing =
value
& flag
diff --git a/compiler/catala_utils/cli.mli b/compiler/catala_utils/cli.mli
index 160dcde59..f286b6929 100644
--- a/compiler/catala_utils/cli.mli
+++ b/compiler/catala_utils/cli.mli
@@ -47,6 +47,7 @@ module Flags : sig
(** Parsers for all flags and options that commands can use *)
val check_invariants : bool Term.t
+ val autotest : bool Term.t
val no_typing : bool Term.t
val wrap_weaved_output : bool Term.t
val print_only_law : bool Term.t
diff --git a/compiler/catala_web_interpreter.ml b/compiler/catala_web_interpreter.ml
index 471af383d..fa0f9b738 100644
--- a/compiler/catala_web_interpreter.ml
+++ b/compiler/catala_web_interpreter.ml
@@ -25,7 +25,8 @@ let () =
in
let prg, _type_order =
Passes.dcalc options ~includes:[] ~optimize:false
- ~check_invariants:false ~typed:Shared_ast.Expr.typed
+ ~check_invariants:false ~autotest:false
+ ~typed:Shared_ast.Expr.typed
in
Shared_ast.Interpreter.interpret_program_dcalc prg
(Commands.get_scope_uid prg.decl_ctx scope)
diff --git a/compiler/driver.ml b/compiler/driver.ml
index b71fb0351..e07e551c8 100644
--- a/compiler/driver.ml
+++ b/compiler/driver.ml
@@ -174,9 +174,10 @@ module Passes = struct
includes:Global.raw_file list ->
optimize:bool ->
check_invariants:bool ->
+ autotest:bool ->
typed:ty mark ->
ty Dcalc.Ast.program * TypeIdent.t list =
- fun options ~includes ~optimize ~check_invariants ~typed ->
+ fun options ~includes ~optimize ~check_invariants ~autotest ~typed ->
let prg = scopelang options ~includes in
debug_pass_name "dcalc";
let type_ordering =
@@ -193,6 +194,15 @@ module Passes = struct
in
Message.debug "Translating to default calculus...";
let prg = Dcalc.From_scopelang.translate_program prg in
+ let prg =
+ if autotest then (
+ Interpreter.load_runtime_modules
+ ~hashf:
+ Hash.(finalise ~closure_conversion:false ~monomorphize_types:false)
+ prg;
+ Dcalc.Autotest.program prg)
+ else prg
+ in
let prg =
if optimize then begin
Message.debug "Optimizing default calculus...";
@@ -226,6 +236,7 @@ module Passes = struct
~includes
~optimize
~check_invariants
+ ~autotest
~(typed : ty mark)
~closure_conversion
~keep_special_ops
@@ -234,7 +245,7 @@ module Passes = struct
~renaming :
typed Lcalc.Ast.program * TypeIdent.t list * Renaming.context option =
let prg, type_ordering =
- dcalc options ~includes ~optimize ~check_invariants ~typed
+ dcalc options ~includes ~optimize ~check_invariants ~autotest ~typed
in
debug_pass_name "lcalc";
let prg =
@@ -302,6 +313,7 @@ module Passes = struct
~includes
~optimize
~check_invariants
+ ~autotest
~closure_conversion
~keep_special_ops
~dead_value_assignment
@@ -310,9 +322,9 @@ module Passes = struct
~expand_ops
~renaming : Scalc.Ast.program * TypeIdent.t list * Renaming.context =
let prg, type_ordering, renaming_context =
- lcalc options ~includes ~optimize ~check_invariants ~typed:Expr.typed
- ~closure_conversion ~keep_special_ops ~monomorphize_types ~expand_ops
- ~renaming
+ lcalc options ~includes ~optimize ~check_invariants ~autotest
+ ~typed:Expr.typed ~closure_conversion ~keep_special_ops
+ ~monomorphize_types ~expand_ops ~renaming
in
let renaming_context =
match renaming_context with
@@ -617,10 +629,18 @@ module Commands = struct
$ Cli.Flags.check_invariants
$ Cli.Flags.include_dirs)
- let dcalc typed options includes output optimize ex_scope_opt check_invariants
- =
+ let dcalc
+ typed
+ options
+ includes
+ output
+ optimize
+ ex_scope_opt
+ check_invariants
+ autotest =
let prg, _ =
- Passes.dcalc options ~includes ~optimize ~check_invariants ~typed
+ Passes.dcalc options ~includes ~optimize ~check_invariants ~autotest
+ ~typed
in
let _output_file, with_output = get_output_format options output in
with_output
@@ -663,7 +683,8 @@ module Commands = struct
$ Cli.Flags.output
$ Cli.Flags.optimize
$ Cli.Flags.ex_scope_opt
- $ Cli.Flags.check_invariants)
+ $ Cli.Flags.check_invariants
+ $ Cli.Flags.autotest)
let proof
options
@@ -673,7 +694,7 @@ module Commands = struct
check_invariants
disable_counterexamples =
let prg, _ =
- Passes.dcalc options ~includes ~optimize ~check_invariants
+ Passes.dcalc options ~includes ~optimize ~check_invariants ~autotest:false
~typed:Expr.typed
in
Verification.Globals.setup ~optimize ~disable_counterexamples;
@@ -727,7 +748,8 @@ module Commands = struct
check_invariants
ex_scope_opt =
let prg, _ =
- Passes.dcalc options ~includes ~optimize ~check_invariants ~typed
+ Passes.dcalc options ~includes ~optimize ~check_invariants ~autotest:false
+ ~typed
in
Interpreter.load_runtime_modules
~hashf:Hash.(finalise ~closure_conversion:false ~monomorphize_types:false)
@@ -742,13 +764,14 @@ module Commands = struct
output
optimize
check_invariants
+ autotest
closure_conversion
keep_special_ops
monomorphize_types
expand_ops
ex_scope_opt =
let prg, _, _ =
- Passes.lcalc options ~includes ~optimize ~check_invariants
+ Passes.lcalc options ~includes ~optimize ~check_invariants ~autotest
~closure_conversion ~keep_special_ops ~typed ~monomorphize_types
~expand_ops ~renaming:(Some Renaming.default)
in
@@ -783,6 +806,7 @@ module Commands = struct
$ Cli.Flags.output
$ Cli.Flags.optimize
$ Cli.Flags.check_invariants
+ $ Cli.Flags.autotest
$ Cli.Flags.closure_conversion
$ Cli.Flags.keep_special_ops
$ Cli.Flags.monomorphize_types
@@ -801,7 +825,7 @@ module Commands = struct
check_invariants
ex_scope_opt =
let prg, _, _ =
- Passes.lcalc options ~includes ~optimize ~check_invariants
+ Passes.lcalc options ~includes ~optimize ~check_invariants ~autotest:false
~closure_conversion ~keep_special_ops ~monomorphize_types ~typed
~expand_ops ~renaming:None
in
@@ -860,10 +884,11 @@ module Commands = struct
output
optimize
check_invariants
+ autotest
closure_conversion
ex_scope_opt =
let prg, type_ordering, _ =
- Passes.lcalc options ~includes ~optimize ~check_invariants
+ Passes.lcalc options ~includes ~optimize ~check_invariants ~autotest
~typed:Expr.typed ~closure_conversion ~keep_special_ops:true
~monomorphize_types:false ~expand_ops:true
~renaming:(Some Lcalc.To_ocaml.renaming)
@@ -891,6 +916,7 @@ module Commands = struct
$ Cli.Flags.output
$ Cli.Flags.optimize
$ Cli.Flags.check_invariants
+ $ Cli.Flags.autotest
$ Cli.Flags.closure_conversion
$ Cli.Flags.ex_scope_opt)
@@ -900,6 +926,7 @@ module Commands = struct
output
optimize
check_invariants
+ autotest
closure_conversion
keep_special_ops
dead_value_assignment
@@ -908,7 +935,7 @@ module Commands = struct
expand_ops
ex_scope_opt =
let prg, _, _ =
- Passes.scalc options ~includes ~optimize ~check_invariants
+ Passes.scalc options ~includes ~optimize ~check_invariants ~autotest
~closure_conversion ~keep_special_ops ~dead_value_assignment
~no_struct_literals ~monomorphize_types ~expand_ops
~renaming:(Some Renaming.default)
@@ -943,6 +970,7 @@ module Commands = struct
$ Cli.Flags.output
$ Cli.Flags.optimize
$ Cli.Flags.check_invariants
+ $ Cli.Flags.autotest
$ Cli.Flags.closure_conversion
$ Cli.Flags.keep_special_ops
$ Cli.Flags.dead_value_assignment
@@ -957,9 +985,10 @@ module Commands = struct
output
optimize
check_invariants
+ autotest
closure_conversion =
let prg, type_ordering, _ren_ctx =
- Passes.scalc options ~includes ~optimize ~check_invariants
+ Passes.scalc options ~includes ~optimize ~check_invariants ~autotest
~closure_conversion ~keep_special_ops:false ~dead_value_assignment:true
~no_struct_literals:false ~monomorphize_types:false ~expand_ops:false
~renaming:(Some Scalc.To_python.renaming)
@@ -985,11 +1014,12 @@ module Commands = struct
$ Cli.Flags.output
$ Cli.Flags.optimize
$ Cli.Flags.check_invariants
+ $ Cli.Flags.autotest
$ Cli.Flags.closure_conversion)
- let c options includes output optimize check_invariants =
+ let c options includes output optimize check_invariants autotest =
let prg, type_ordering, _ren_ctx =
- Passes.scalc options ~includes ~optimize ~check_invariants
+ Passes.scalc options ~includes ~optimize ~check_invariants ~autotest
~closure_conversion:true ~keep_special_ops:false
~dead_value_assignment:false ~no_struct_literals:true
~monomorphize_types:false ~expand_ops:true
@@ -1024,7 +1054,8 @@ module Commands = struct
$ Cli.Flags.include_dirs
$ Cli.Flags.output
$ Cli.Flags.optimize
- $ Cli.Flags.check_invariants)
+ $ Cli.Flags.check_invariants
+ $ Cli.Flags.autotest)
let depends options includes prefix extension extra_files =
let file = Global.input_src_file options.Global.input_src in
diff --git a/compiler/driver.mli b/compiler/driver.mli
index 8e0be6795..e0f8280d5 100644
--- a/compiler/driver.mli
+++ b/compiler/driver.mli
@@ -42,6 +42,7 @@ module Passes : sig
includes:Global.raw_file list ->
optimize:bool ->
check_invariants:bool ->
+ autotest:bool ->
typed:'m Shared_ast.mark ->
'm Dcalc.Ast.program * Shared_ast.TypeIdent.t list
@@ -50,6 +51,7 @@ module Passes : sig
includes:Global.raw_file list ->
optimize:bool ->
check_invariants:bool ->
+ autotest:bool ->
typed:'m Shared_ast.mark ->
closure_conversion:bool ->
keep_special_ops:bool ->
@@ -65,6 +67,7 @@ module Passes : sig
includes:Global.raw_file list ->
optimize:bool ->
check_invariants:bool ->
+ autotest:bool ->
closure_conversion:bool ->
keep_special_ops:bool ->
dead_value_assignment:bool ->
diff --git a/compiler/plugins/api_web.ml b/compiler/plugins/api_web.ml
index 60cd53524..842784fb7 100644
--- a/compiler/plugins/api_web.ml
+++ b/compiler/plugins/api_web.ml
@@ -478,7 +478,7 @@ let run
let options = Global.enforce_options ~trace:true () in
let prg, type_ordering, _ =
Driver.Passes.lcalc options ~includes ~optimize ~check_invariants
- ~closure_conversion ~keep_special_ops ~typed:Expr.typed
+ ~autotest:false ~closure_conversion ~keep_special_ops ~typed:Expr.typed
~monomorphize_types ~expand_ops:false
~renaming:(Some Lcalc.To_ocaml.renaming)
in
diff --git a/compiler/plugins/explain.ml b/compiler/plugins/explain.ml
index 3a905b9b9..11597c0ab 100644
--- a/compiler/plugins/explain.ml
+++ b/compiler/plugins/explain.ml
@@ -1388,7 +1388,7 @@ let options =
let run includes optimize ex_scope explain_options global_options =
let prg, _ =
Driver.Passes.dcalc global_options ~includes ~optimize
- ~check_invariants:false ~typed:Expr.typed
+ ~check_invariants:false ~autotest:false ~typed:Expr.typed
in
Interpreter.load_runtime_modules prg
~hashf:(Hash.finalise ~closure_conversion:false ~monomorphize_types:false);
diff --git a/compiler/plugins/json_schema.ml b/compiler/plugins/json_schema.ml
index 2b827ccd1..4fd3cdbdd 100644
--- a/compiler/plugins/json_schema.ml
+++ b/compiler/plugins/json_schema.ml
@@ -216,7 +216,7 @@ let run
options =
let prg, _, _ =
Driver.Passes.lcalc options ~includes ~optimize ~check_invariants
- ~closure_conversion ~keep_special_ops ~typed:Expr.typed
+ ~autotest:false ~closure_conversion ~keep_special_ops ~typed:Expr.typed
~monomorphize_types ~expand_ops:false
~renaming:(Some Lcalc.To_ocaml.renaming)
in
diff --git a/compiler/plugins/lazy_interp.ml b/compiler/plugins/lazy_interp.ml
index f87b6ad75..3075e27ac 100644
--- a/compiler/plugins/lazy_interp.ml
+++ b/compiler/plugins/lazy_interp.ml
@@ -269,7 +269,7 @@ let interpret_program (prg : ('dcalc, 'm) gexpr program) (scope : ScopeName.t) :
let run includes optimize check_invariants ex_scope options =
let prg, _ =
Driver.Passes.dcalc options ~includes ~optimize ~check_invariants
- ~typed:Expr.typed
+ ~autotest:false ~typed:Expr.typed
in
Interpreter.load_runtime_modules prg
~hashf:(Hash.finalise ~closure_conversion:false ~monomorphize_types:false);
diff --git a/compiler/plugins/python.ml b/compiler/plugins/python.ml
index db64483ea..176b0377d 100644
--- a/compiler/plugins/python.ml
+++ b/compiler/plugins/python.ml
@@ -26,8 +26,9 @@ let run includes output optimize check_invariants closure_conversion options =
let open Driver.Commands in
let prg, type_ordering, _ =
Driver.Passes.scalc options ~includes ~optimize ~check_invariants
- ~closure_conversion ~keep_special_ops:false ~dead_value_assignment:true
- ~no_struct_literals:false ~monomorphize_types:false ~expand_ops:false
+ ~autotest:false ~closure_conversion ~keep_special_ops:false
+ ~dead_value_assignment:true ~no_struct_literals:false
+ ~monomorphize_types:false ~expand_ops:false
~renaming:(Some Scalc.To_python.renaming)
in
diff --git a/compiler/shared_ast/boundList.ml b/compiler/shared_ast/boundList.ml
index cafaad736..9e5699bd4 100644
--- a/compiler/shared_ast/boundList.ml
+++ b/compiler/shared_ast/boundList.ml
@@ -27,6 +27,15 @@ let rec to_seq = function
let v, next = Bindlib.unbind next_bind in
Seq.Cons ((v, item), to_seq next)
+let rec of_list list ~last =
+ match list with
+ | [] -> Bindlib.box_apply (fun l -> Last l) last
+ | (var, item) :: list ->
+ Bindlib.box_apply2
+ (fun item next -> Cons (item, next))
+ item
+ (Bindlib.bind_var var (of_list list ~last))
+
let rec last = function
| Last e -> e
| Cons (_, bnd) ->
@@ -70,16 +79,19 @@ let rec fold_lr ~top ~down ~bottom ~up = function
let bottom = fold_lr ~down ~up ~top ~bottom next in
up var item bottom
-let rec map ~f ~last = function
- | Last l -> Bindlib.box_apply (fun l -> Last l) (last l)
+let rec map_last ~f ~last = function
+ | Last l -> last l
| Cons (item, next_bind) ->
let var, next = Bindlib.unbind next_bind in
let var, item = f var item in
- let next_bind = Bindlib.bind_var var (map ~f ~last next) in
+ let next_bind = Bindlib.bind_var var (map_last ~f ~last next) in
Bindlib.box_apply2
(fun item next_bind -> Cons (item, next_bind))
item next_bind
+let map ~f ~last =
+ map_last ~f ~last:(fun l -> Bindlib.box_apply (fun l -> Last l) (last l))
+
let rec fold_map ~f ~last ~init:ctx = function
| Last l ->
let ret, l = last ctx l in
diff --git a/compiler/shared_ast/boundList.mli b/compiler/shared_ast/boundList.mli
index bd89e119a..dee108ee0 100644
--- a/compiler/shared_ast/boundList.mli
+++ b/compiler/shared_ast/boundList.mli
@@ -33,6 +33,11 @@ type ('e, 'elt, 'last) t = ('e, 'elt, 'last) bound_list =
val to_seq : (((_, _) gexpr as 'e), 'elt, _) t -> ('e Var.t * 'elt) Seq.t
(** Note that the boundlist terminator is ignored in the resulting sequence *)
+val of_list :
+ ('e Var.t * 'elt Bindlib.box) list ->
+ last:'last Bindlib.box ->
+ ('e, 'elt, 'last) t Bindlib.box
+
val last : (_, _, 'a) t -> 'a
val iter : f:('e Var.t -> 'elt -> unit) -> ('e, 'elt, 'last) t -> 'last
val find : f:('elt -> 'a option) -> (_, 'elt, _) t -> 'a
@@ -73,6 +78,14 @@ val map :
('e1, 'elt1, 'last1) t ->
('e2, 'elt2, 'last2) t Bindlib.box
+val map_last :
+ f:('e1 Var.t -> 'elt1 -> 'e2 Var.t * 'elt2 Bindlib.box) ->
+ last:('last1 -> ('e2, 'elt2, 'last2) t Bindlib.box) ->
+ ('e1, 'elt1, 'last1) t ->
+ ('e2, 'elt2, 'last2) t Bindlib.box
+(** A more expressive version of [map] that allows extending the tail (e.g. to
+ append new elements) *)
+
val fold_map :
f:('ctx -> 'e1 Var.t -> 'elt1 -> 'ctx * 'e2 Var.t * 'elt2 Bindlib.box) ->
last:('ctx -> 'last1 -> 'ret * 'last2 Bindlib.box) ->
diff --git a/compiler/shared_ast/interpreter.ml b/compiler/shared_ast/interpreter.ml
index 674796fe7..781eeaca9 100644
--- a/compiler/shared_ast/interpreter.ml
+++ b/compiler/shared_ast/interpreter.ml
@@ -60,6 +60,12 @@ let rec format_runtime_value lang ppf = function
~pp_sep:(fun ppf () -> Format.fprintf ppf ";@ ")
(format_runtime_value lang))
(Array.to_list elts)
+ | Runtime.Tuple elts ->
+ Format.fprintf ppf "@[<hv 2>(@,@[<hov>%a@]@;<0 -2>)@]"
+ (Format.pp_print_list
+ ~pp_sep:(fun ppf () -> Format.fprintf ppf ",@ ")
+ (format_runtime_value lang))
+ (Array.to_list elts)
| Runtime.Unembeddable -> Format.pp_print_string ppf "<object>"
let print_log lang entry =
@@ -115,6 +121,10 @@ let rec value_to_runtime_embedded = function
Runtime.Array
(Array.of_list
(List.map (fun e -> value_to_runtime_embedded (Mark.remove e)) el))
+ | ETuple el ->
+ Runtime.Tuple
+ (Array.of_list
+ (List.map (fun e -> value_to_runtime_embedded (Mark.remove e)) el))
| _ -> Runtime.Unembeddable
(* Todo: this should be handled early when resolving overloads. Here we have
@@ -132,7 +142,7 @@ let handle_eq pos evaluate_operator m lang e1 e2 =
| ELit (LDuration x1), ELit (LDuration x2) ->
o_eq_dur_dur (Expr.pos_to_runtime (Expr.mark_pos m)) x1 x2
| ELit (LDate x1), ELit (LDate x2) -> o_eq_dat_dat x1 x2
- | EArray es1, EArray es2 -> (
+ | EArray es1, EArray es2 | ETuple es1, ETuple es2 -> (
try
List.for_all2
(fun e1 e2 ->
@@ -796,8 +806,9 @@ let rec evaluate_expr :
(Print.UserFacing.expr lang)
(partially_evaluate_expr_for_assertion_failure_message ctx lang
(Expr.skip_wrappers e'));
- Mark.add m (ELit LUnit)
- (* raise Runtime.(Error (AssertionFailed, [Expr.pos_to_runtime pos])) *)
+ if Global.options.stop_on_error then
+ raise Runtime.(Error (AssertionFailed, [Expr.pos_to_runtime pos]))
+ else Mark.add m (ELit LUnit)
| _ ->
Message.error ~pos:(Expr.pos e') "%a" Format.pp_print_text
"Expected a boolean literal for the result of this assertion (should \
@@ -881,6 +892,7 @@ let evaluate_expr_trace :
((d, yes) interpr_kind, 't) gexpr ->
((d, yes) interpr_kind, 't) gexpr =
fun ctx lang e ->
+ Runtime.reset_log ();
Fun.protect
(fun () -> evaluate_expr ctx lang e)
~finally:(fun () ->
@@ -1103,56 +1115,63 @@ let interpret_program_dcalc p s : (Uid.MarkedString.info * ('a, 'm) gexpr) list
external functions), straying away from the DCalc and LCalc ASTS. [addcustom]
and [delcustom] are needed to expand and shrink the type of the terms to
reflect that. *)
-let evaluate_expr ctx lang e = evaluate_expr ctx lang (addcustom e)
+let evaluate_expr ctx lang e =
+ Fun.protect ~finally:Runtime.reset_log
+ @@ fun () -> evaluate_expr ctx lang (addcustom e)
+
+let loaded_modules = Hashtbl.create 17
let load_runtime_modules ~hashf prg =
let load (mname, intf_id) =
let hash = hashf intf_id.hash in
- let expect_hash =
- if intf_id.is_external then Hash.external_placeholder
- else Hash.to_string hash
- in
- let obj_file =
- let src = Pos.get_file (Mark.get (ModuleName.get_info mname)) in
- Dynlink.adapt_filename
- File.((dirname src / ModuleName.to_string mname) ^ ".cmo")
- in
- (if not (Sys.file_exists obj_file) then
- Message.error
- ~pos_msg:(fun ppf -> Format.pp_print_string ppf "Module defined here")
- ~pos:(Mark.get (ModuleName.get_info mname))
- "Compiled OCaml object %a@ not@ found.@ Make sure it has been \
- suitably compiled."
- File.format obj_file
- else
- try Dynlink.loadfile obj_file
- with Dynlink.Error dl_err ->
+ if Hashtbl.mem loaded_modules mname then ()
+ else
+ let expect_hash =
+ if intf_id.is_external then Hash.external_placeholder
+ else Hash.to_string hash
+ in
+ let obj_file =
+ let src = Pos.get_file (Mark.get (ModuleName.get_info mname)) in
+ Dynlink.adapt_filename
+ File.((dirname src / ModuleName.to_string mname) ^ ".cmo")
+ in
+ (if not (Sys.file_exists obj_file) then
Message.error
- "While loading compiled module from %a:@;<1 2>@[<hov>%a@]"
- File.format obj_file Format.pp_print_text
- (Dynlink.error_message dl_err));
- match Runtime.check_module (ModuleName.to_string mname) expect_hash with
- | Ok () -> ()
- | Error bad_hash ->
- Message.debug
- "Module hash mismatch for %a:@ @[<v>Expected: %a@,Found: %a@]"
- ModuleName.format mname Hash.format hash
- (fun ppf h ->
- try Hash.format ppf (Hash.of_string h)
- with Failure _ ->
- if h = Hash.external_placeholder then
- Format.fprintf ppf "@{<cyan>%s@}" Hash.external_placeholder
- else Format.fprintf ppf "@{<red><invalid>@}")
- bad_hash;
- Message.error
- "Module %a@ needs@ recompiling:@ %a@ was@ likely@ compiled@ from@ an@ \
- older@ version@ or@ with@ incompatible@ flags."
- ModuleName.format mname File.format obj_file
- | exception Not_found ->
- Message.error
- "Module %a@ was loaded from file %a but did not register properly, \
- there is something wrong in its code."
- ModuleName.format mname File.format obj_file
+ ~pos_msg:(fun ppf ->
+ Format.pp_print_string ppf "Module defined here")
+ ~pos:(Mark.get (ModuleName.get_info mname))
+ "Compiled OCaml object %a@ not@ found.@ Make sure it has been \
+ suitably compiled."
+ File.format obj_file
+ else
+ try Dynlink.loadfile obj_file
+ with Dynlink.Error dl_err ->
+ Message.error
+ "While loading compiled module from %a:@;<1 2>@[<hov>%a@]"
+ File.format obj_file Format.pp_print_text
+ (Dynlink.error_message dl_err));
+ match Runtime.check_module (ModuleName.to_string mname) expect_hash with
+ | Ok () -> Hashtbl.add loaded_modules mname hash
+ | Error bad_hash ->
+ Message.debug
+ "Module hash mismatch for %a:@ @[<v>Expected: %a@,Found: %a@]"
+ ModuleName.format mname Hash.format hash
+ (fun ppf h ->
+ try Hash.format ppf (Hash.of_string h)
+ with Failure _ ->
+ if h = Hash.external_placeholder then
+ Format.fprintf ppf "@{<cyan>%s@}" Hash.external_placeholder
+ else Format.fprintf ppf "@{<red><invalid>@}")
+ bad_hash;
+ Message.error
+ "Module %a@ needs@ recompiling:@ %a@ was@ likely@ compiled@ from@ \
+ an@ older@ version@ or@ with@ incompatible@ flags."
+ ModuleName.format mname File.format obj_file
+ | exception Not_found ->
+ Message.error
+ "Module %a@ was loaded from file %a but did not register properly, \
+ there is something wrong in its code."
+ ModuleName.format mname File.format obj_file
in
let modules_list_topo = Program.modules_to_list prg.decl_ctx.ctx_modules in
if modules_list_topo <> [] then
diff --git a/compiler/shared_ast/program.ml b/compiler/shared_ast/program.ml
index 55d96727b..b85c2981c 100644
--- a/compiler/shared_ast/program.ml
+++ b/compiler/shared_ast/program.ml
@@ -15,6 +15,7 @@
License for the specific language governing permissions and limitations under
the License. *)
+open Catala_utils
open Definitions
let map_decl_ctx ~f ctx =
@@ -26,6 +27,62 @@ let map_decl_ctx ~f ctx =
TopdefName.Map.map (fun (ty, vis) -> f ty, vis) ctx.ctx_topdefs;
}
+let map_scopes ~f prg =
+ let code_items =
+ let f var = function
+ | ScopeDef (name, body) ->
+ var, Bindlib.box_apply (fun body -> ScopeDef (name, body)) (f name body)
+ | Topdef (name, ty, vis, expr) ->
+ ( var,
+ Bindlib.box_apply
+ (fun e -> Topdef (name, ty, vis, e))
+ (Expr.Box.lift (Expr.rebox expr)) )
+ in
+ BoundList.map ~f ~last:(Scope.map_last_item ~varf:Fun.id) prg.code_items
+ |> Bindlib.unbox
+ in
+ { prg with code_items }
+
+let map_scopes_env ~f prg =
+ let code_items =
+ let f env var = function
+ | ScopeDef (name, body) ->
+ let pos = Mark.get (ScopeName.get_info name) in
+ let body1 = f env name body in
+ let env child =
+ env
+ @@ Expr.make_let_in var
+ ( TArrow
+ ( [TStruct body.scope_body_input_struct, pos],
+ (TStruct body.scope_body_output_struct, pos) ),
+ pos )
+ (Scope.to_expr prg.decl_ctx body)
+ child pos
+ in
+ let def = Bindlib.box_apply (fun body -> ScopeDef (name, body)) body1 in
+ env, var, def
+ | Topdef (name, ty, vis, expr) ->
+ let pos = Mark.get (TopdefName.get_info name) in
+ let env child =
+ env @@ Expr.make_let_in var ty (Expr.rebox expr) child pos
+ in
+ let def =
+ Bindlib.box_apply
+ (fun e -> Topdef (name, ty, vis, e))
+ (Expr.Box.lift (Expr.rebox expr))
+ in
+ env, var, def
+ in
+ BoundList.fold_map
+ ~init:(fun e -> e)
+ ~f
+ ~last:(fun _env last -> (), Scope.map_last_item ~varf:Fun.id last)
+ prg.code_items
+ |> snd
+ |> Bindlib.unbox
+ in
+ { prg with code_items }
+
let map_exprs ?typ ~f ~varf { code_items; decl_ctx; lang; module_name } =
let boxed_prg =
Bindlib.box_apply
diff --git a/compiler/shared_ast/program.mli b/compiler/shared_ast/program.mli
index 071b78738..818d6f009 100644
--- a/compiler/shared_ast/program.mli
+++ b/compiler/shared_ast/program.mli
@@ -25,6 +25,23 @@ val empty_ctx : decl_ctx
val map_decl_ctx : f:(typ -> typ) -> decl_ctx -> decl_ctx
+val map_scopes :
+ f:(ScopeName.t -> 'e scope_body -> 'e scope_body Bindlib.box) ->
+ 'e program ->
+ 'e program
+
+val map_scopes_env :
+ f:
+ (('e boxed -> 'e boxed) ->
+ ScopeName.t ->
+ 'e scope_body ->
+ 'e scope_body Bindlib.box) ->
+ 'e program ->
+ 'e program
+(** Maps on the scopes in the program, passing along an "environment" in the
+ form of a function binding all toplevel and scope definitions to their
+ variables in the argument expression *)
+
val map_exprs :
?typ:(typ -> typ) ->
f:('expr1 -> 'expr2 boxed) ->
diff --git a/compiler/shared_ast/scope.mli b/compiler/shared_ast/scope.mli
index 30d14699e..ccbaa583d 100644
--- a/compiler/shared_ast/scope.mli
+++ b/compiler/shared_ast/scope.mli
@@ -51,7 +51,6 @@ val map_last_item :
varf:(('a, 'm) naked_gexpr Bindlib.var -> 'e2 Bindlib.var) ->
('a, 'm) naked_gexpr list ->
'e2 list Bindlib.box
-
(** Helper function to handle the [code_item_list] terminator when manually
mapping on [code_item_list] *)
@@ -64,8 +63,8 @@ val to_expr : decl_ctx -> ('a any, 'm) gexpr scope_body -> ('a, 'm) boxed_gexpr
(** Usage: [to_expr ctx body scope_position] where [scope_position] corresponds
to the line of the scope declaration for instance. *)
-val unfold :
- decl_ctx -> ((_, 'm) gexpr as 'e) code_item_list -> ScopeName.t -> 'e boxed
+val unfold_body_expr : decl_ctx -> 'e scope_body_expr -> 'e boxed
+val unfold : decl_ctx -> 'e code_item_list -> ScopeName.t -> 'e boxed
val typ : _ scope_body -> typ
(** builds the arrow type for the specified scope *)
diff --git a/compiler/shared_ast/type.ml b/compiler/shared_ast/type.ml
index 9cc553bf1..94382eb2d 100644
--- a/compiler/shared_ast/type.ml
+++ b/compiler/shared_ast/type.ml
@@ -124,6 +124,22 @@ let rec hash ~strip ty =
| TAny -> !`TAny
| TClosureEnv -> !`TClosureEnv
+let rec has_arrow decl_ctx ty =
+ match Mark.remove ty with
+ | TArrow _ -> true
+ | TLit _ -> false
+ | TAny | TClosureEnv -> invalid_arg "Type.has_arrow"
+ | TTuple tl -> List.exists (has_arrow decl_ctx) tl
+ | TStruct n ->
+ StructField.Map.exists
+ (fun _ -> has_arrow decl_ctx)
+ (StructName.Map.find n decl_ctx.ctx_structs)
+ | TEnum n ->
+ EnumConstructor.Map.exists
+ (fun _ -> has_arrow decl_ctx)
+ (EnumName.Map.find n decl_ctx.ctx_enums)
+ | TOption ty | TArray ty | TDefault ty -> has_arrow decl_ctx ty
+
let rec arrow_return = function TArrow (_, b), _ -> arrow_return b | t -> t
let format = Print.typ_debug
diff --git a/compiler/shared_ast/type.mli b/compiler/shared_ast/type.mli
index fa8c29c1a..7d473545f 100644
--- a/compiler/shared_ast/type.mli
+++ b/compiler/shared_ast/type.mli
@@ -40,3 +40,6 @@ val unifiable_list : t list -> t list -> bool
val arrow_return : t -> t
(** Returns the last member in nested [TArrow] types *)
+
+val has_arrow : Definitions.decl_ctx -> t -> bool
+(** Fails (with [Invalid_argument]) on TAny and TClosureEnv *)
diff --git a/runtimes/ocaml/runtime.ml b/runtimes/ocaml/runtime.ml
index bcb73260b..842268d1b 100644
--- a/runtimes/ocaml/runtime.ml
+++ b/runtimes/ocaml/runtime.ml
@@ -247,6 +247,7 @@ type runtime_value =
| Enum of string * (string * runtime_value)
| Struct of string * (string * runtime_value) list
| Array of runtime_value array
+ | Tuple of runtime_value array
| Unembeddable
let unembeddable _ = Unembeddable
@@ -333,7 +334,7 @@ module BufferedJson = struct
(list (fun buf (cstr, v) ->
Printf.bprintf buf {|"%s":%a|} cstr runtime_value v))
elts
- | Array elts ->
+ | Array elts | Tuple elts ->
Printf.bprintf buf "[%a]" (list runtime_value) (Array.to_list elts)
| Unembeddable -> Buffer.add_string buf {|"unembeddable"|}
@@ -465,6 +466,12 @@ let rec pp_events ?(is_first_call = true) ppf events =
~pp_sep:(fun ppf () -> Format.fprintf ppf ";@ ")
format_value)
(elts |> Array.to_list)
+ | Tuple elts ->
+ Format.fprintf ppf "@[<hv 2>(@ %a@;<1 -2>)@]"
+ (Format.pp_print_list
+ ~pp_sep:(fun ppf () -> Format.fprintf ppf ",@ ")
+ format_value)
+ (elts |> Array.to_list)
and format_event ppf = function
| VarComputation var_def_with_fun
when Option.is_some var_def_with_fun.fun_calls ->
diff --git a/runtimes/ocaml/runtime.mli b/runtimes/ocaml/runtime.mli
index 292b2c3a1..9be3d230d 100644
--- a/runtimes/ocaml/runtime.mli
+++ b/runtimes/ocaml/runtime.mli
@@ -103,6 +103,7 @@ type runtime_value =
| Enum of string * (string * runtime_value)
| Struct of string * (string * runtime_value) list
| Array of runtime_value Array.t
+ | Tuple of runtime_value Array.t
| Unembeddable
val unembeddable : 'a -> runtime_value
| Cross-backend testing
`clerk` should have a command like `clerk test --cross-backends` that compiles a module full of test scopes into all available backends, then launch the tests and checks whether they pass in the backends as well as the interpreter. For this to work, the tests should use assertions to check that computed outputs equal expected outputs.
| Decision : @AltGr added a pass in the compilation chain at the Dcalc level. If enabled, this pass adds for every test scope (a scope with no input) a set of asserts on its result variables that check whether the output is equal to the output as computed by the interpreter. Then, these asserts can be carried to every compilation backend. | 2024-10-01T13:08:42 | 0.0 | [] | [] |
||
CatalaLang/catala | CatalaLang__catala-630 | 01b03b69a06ad64e5ed970649ea68defe06032cb | diff --git a/compiler/catala_utils/message.ml b/compiler/catala_utils/message.ml
index 1b977d3fc..d16e69c33 100644
--- a/compiler/catala_utils/message.ml
+++ b/compiler/catala_utils/message.ml
@@ -327,6 +327,7 @@ open Content
(** {1 Error exception} *)
exception CompilerError of Content.t
+exception CompilerErrors of Content.t list
(** {1 Error printing} *)
@@ -404,3 +405,34 @@ let result = make ~level:Result ~cont:emit
let results r = emit (List.flatten (List.map of_result r)) Result
let warning = make ~level:Warning ~cont:emit
let error = make ~level:Error ~cont:(fun m _ -> raise (CompilerError m))
+
+(* Multiple errors handling *)
+let global_errors = ref None
+
+let delayed_error x =
+ make ~level:Error ~cont:(fun m _ ->
+ match !global_errors with
+ | None ->
+ error ~internal:true
+ "delayed error called outside scope: encapsulate using \
+ 'with_delayed_errors' first"
+ | Some l ->
+ global_errors := Some (m :: l);
+ x)
+
+let with_delayed_errors (f : unit -> 'a) : 'a =
+ (match !global_errors with
+ | None -> global_errors := Some []
+ | Some _ ->
+ error ~internal:true
+ "delayed error called outside scope: encapsulate using \
+ 'with_delayed_errors' first");
+ let r = f () in
+ match !global_errors with
+ | None -> error ~internal:true "intertwined delayed error scope"
+ | Some [] ->
+ global_errors := None;
+ r
+ | Some errs ->
+ global_errors := None;
+ raise (CompilerErrors (List.rev errs))
diff --git a/compiler/catala_utils/message.mli b/compiler/catala_utils/message.mli
index 26bb86763..d1204ec98 100644
--- a/compiler/catala_utils/message.mli
+++ b/compiler/catala_utils/message.mli
@@ -60,9 +60,10 @@ end
(** This functions emits the message according to the emission type defined by
[Cli.message_format_flag]. *)
-(** {1 Error exception} *)
+(** {1 Error exceptions} *)
exception CompilerError of Content.t
+exception CompilerErrors of Content.t list
(** {1 Some formatting helpers}*)
@@ -98,5 +99,15 @@ val log : ('a, unit) emitter
val debug : ('a, unit) emitter
val result : ('a, unit) emitter
val warning : ('a, unit) emitter
-val error : ('a, 'b) emitter
+val error : ('a, 'exn) emitter
val results : Content.message list -> unit
+
+(** Multiple errors *)
+
+val with_delayed_errors : (unit -> 'a) -> 'a
+(** [with_delayed_errors f] calls [f] and registers each error triggered using
+ [delayed_error].
+
+ @raise CompilerErrors when delayed errors were registered. *)
+
+val delayed_error : 'b -> ('a, 'b) emitter
diff --git a/compiler/driver.ml b/compiler/driver.ml
index 6e0cd14a3..c37d79df1 100644
--- a/compiler/driver.ml
+++ b/compiler/driver.ml
@@ -1204,6 +1204,11 @@ let main () =
Message.Content.emit content Error;
if Global.options.debug then Printexc.print_raw_backtrace stderr bt;
exit Cmd.Exit.some_error
+ | exception Message.CompilerErrors contents ->
+ let bt = Printexc.get_raw_backtrace () in
+ List.iter (fun c -> Message.Content.emit c Error) contents;
+ if Global.options.debug then Printexc.print_raw_backtrace stderr bt;
+ exit Cmd.Exit.some_error
| exception Failure msg ->
let bt = Printexc.get_raw_backtrace () in
Message.Content.emit (Message.Content.of_string msg) Error;
diff --git a/compiler/surface/parser_driver.ml b/compiler/surface/parser_driver.ml
index 672749d31..2481239d0 100644
--- a/compiler/surface/parser_driver.ml
+++ b/compiler/surface/parser_driver.ml
@@ -60,29 +60,6 @@ let rec law_struct_list_to_tree (f : Ast.law_structure list) :
let gobbled, rest_out = split_rest_tree rest_tree in
LawHeading (heading, gobbled) :: rest_out))
-(** Usage: [raise_parser_error error_loc last_good_loc token msg]
-
- Raises an error message featuring the [error_loc] position where the parser
- has failed, the [token] on which the parser has failed, and the error
- message [msg]. If available, displays [last_good_loc] the location of the
- last token correctly parsed. *)
-let raise_parser_error
- ?(suggestion : string list option)
- (error_loc : Pos.t)
- (last_good_loc : Pos.t option)
- (token : string)
- (msg : Format.formatter -> unit) : 'a =
- Message.error ?suggestion
- ~extra_pos:
- [
- (match last_good_loc with
- | None -> "Error token", error_loc
- | Some last_good_loc -> "Last good token", last_good_loc);
- ]
- "@[<hov>Syntax error at %a:@ %t@]"
- (fun ppf string -> Format.fprintf ppf "@{<yellow>\"%s\"@}" string)
- token msg
-
module ParserAux (LocalisedLexer : Lexer_common.LocalisedLexer) = struct
include Parser.Make (LocalisedLexer)
module I = MenhirInterpreter
@@ -93,40 +70,12 @@ module ParserAux (LocalisedLexer : Lexer_common.LocalisedLexer) = struct
| MenhirLib.General.Nil -> 0
| MenhirLib.General.Cons (Element (s, _, _, _), _) -> I.number s
- (** Usage: [fail lexbuf env token_list last_input_needed]
-
- Raises an error with meaningful hints about what the parsing error was.
- [lexbuf] is the lexing buffer state at the failure point, [env] is the
- Menhir environment and [last_input_needed] is the last checkpoint of a
- valid Menhir state before the parsing error. [token_list] is provided by
- things like {!val: Surface.Lexer_common.token_list_language_agnostic} and
- is used to provide suggestions of the tokens acceptable at the failure
- point *)
- let fail
+ let register_parsing_error
(lexbuf : lexbuf)
(env : 'semantic_value I.env)
- (token_list : (string * Tokens.token) list)
- (last_input_needed : 'semantic_value I.env option) : 'a =
- let wrong_token = Utf8.lexeme lexbuf in
- let acceptable_tokens, last_positions =
- match last_input_needed with
- | Some last_input_needed ->
- ( List.filter
- (fun (_, t) ->
- I.acceptable
- (I.input_needed last_input_needed)
- t
- (fst (lexing_positions lexbuf)))
- token_list,
- Some (I.positions last_input_needed) )
- | None -> token_list, None
- in
- let similar_acceptable_tokens =
- Suggestions.suggestion_minimum_levenshtein_distance_association
- (List.map (fun (s, _) -> s) acceptable_tokens)
- wrong_token
- in
- (* The parser has suspended itself because of a syntax error. Stop. *)
+ (acceptable_tokens : (string * Tokens.token) list)
+ (similar_candidate_tokens : string list) : 'a =
+ (* The parser has suspended itself because of a syntax error. *)
let custom_menhir_message ppf =
(match Parser_errors.message (state env) with
| exception Not_found -> Format.fprintf ppf "@{<yellow>unexpected token@}"
@@ -141,31 +90,162 @@ module ParserAux (LocalisedLexer : Lexer_common.LocalisedLexer) = struct
(fun ppf string -> Format.fprintf ppf "@{<yellow>\"%s\"@}" string))
(List.map (fun (s, _) -> s) acceptable_tokens)
in
- raise_parser_error ~suggestion:similar_acceptable_tokens
- (Pos.from_lpos (lexing_positions lexbuf))
- (Option.map Pos.from_lpos last_positions)
- (Utf8.lexeme lexbuf) custom_menhir_message
+ let suggestion =
+ if similar_candidate_tokens = [] then None
+ else Some similar_candidate_tokens
+ in
+ let error_loc = Pos.from_lpos (lexing_positions lexbuf) in
+ let wrong_token = Utf8.lexeme lexbuf in
+ let msg = custom_menhir_message in
+ Message.delayed_error () ?suggestion
+ ~extra_pos:["", error_loc]
+ "@[<hov>Syntax error at %a:@ %t@]"
+ (fun ppf string -> Format.fprintf ppf "@{<yellow>\"%s\"@}" string)
+ wrong_token msg
+
+ let sorted_candidate_tokens lexbuf token_list env =
+ let acceptable_tokens =
+ List.filter
+ (fun (_, t) ->
+ I.acceptable (I.input_needed env) t (fst (lexing_positions lexbuf)))
+ token_list
+ in
+ let similar_acceptable_tokens =
+ Suggestions.suggestion_minimum_levenshtein_distance_association
+ (List.map (fun (s, _) -> s) acceptable_tokens)
+ (Utf8.lexeme lexbuf)
+ in
+ let module S = Set.Make (String) in
+ let s_toks = S.of_list similar_acceptable_tokens in
+ let sorted_acceptable_tokens =
+ List.sort
+ (fun (s, _) _ -> if S.mem s s_toks then -1 else 1)
+ acceptable_tokens
+ in
+ similar_acceptable_tokens, sorted_acceptable_tokens
+
+ type 'a ring_buffer = {
+ curr_idx : int;
+ start : int ref;
+ stop : int ref;
+ max_size : int;
+ feed : unit -> 'a;
+ data : 'a array;
+ }
+
+ let next ({ curr_idx; start; stop; max_size; feed; data } as buff) =
+ let next_idx = succ curr_idx mod max_size in
+ if curr_idx = !stop then (
+ let new_elt = feed () in
+ data.(curr_idx) <- new_elt;
+ let size = ((!stop - !start + max_size) mod max_size) + 1 in
+ stop := succ !stop mod max_size;
+ let is_full = size = max_size in
+ if is_full then
+ (* buffer will get full: start is also moved *)
+ start := succ !start mod max_size;
+ { buff with curr_idx = next_idx }, new_elt)
+ else
+ let elt = data.(curr_idx) in
+ { buff with curr_idx = next_idx }, elt
+
+ let create ?(max_size = 20) feed v =
+ {
+ curr_idx = 0;
+ start = ref 0;
+ stop = ref 0;
+ feed;
+ data = Array.make max_size v;
+ max_size;
+ }
+
+ let progress ?(max_step = 10) lexer_buffer env checkpoint : int =
+ let rec loop nth_step lexer_buffer env checkpoint =
+ if nth_step >= max_step then nth_step
+ else
+ match checkpoint with
+ | I.InputNeeded env ->
+ let new_lexer_buffer, token = next lexer_buffer in
+ let checkpoint = I.offer checkpoint token in
+ loop (succ nth_step) new_lexer_buffer env checkpoint
+ | I.Shifting _ | I.AboutToReduce _ ->
+ let checkpoint = I.resume checkpoint in
+ loop nth_step lexer_buffer env checkpoint
+ | I.HandlingError (_ : _ I.env) | I.Accepted _ | I.Rejected -> nth_step
+ in
+ loop 0 lexer_buffer env checkpoint
+
+ let recover_parsing_error lexer_buffer env acceptable_tokens =
+ let candidates_checkpoints =
+ let without_token = I.input_needed env in
+ let make_with_token tok =
+ let l, r = I.positions env in
+ let checkpoint = I.input_needed env in
+ I.offer checkpoint (tok, l, r)
+ in
+ without_token :: List.map make_with_token acceptable_tokens
+ in
+ let threshold = min 10 lexer_buffer.max_size in
+ let rec iterate ((curr_max_progress, _) as acc) = function
+ | [] -> acc
+ | cp :: t ->
+ if curr_max_progress >= 10 then acc
+ else
+ let cp_progress = progress ~max_step:threshold lexer_buffer env cp in
+ if cp_progress > curr_max_progress then iterate (cp_progress, cp) t
+ else iterate acc t
+ in
+ let best_progress, best_cp =
+ let dummy_cp = I.input_needed env in
+ iterate (-1, dummy_cp) candidates_checkpoints
+ in
+ (* We do not consider paths were progress isn't significant *)
+ if best_progress < 2 then None else Some best_cp
(** Main parsing loop *)
- let rec loop
- (next_token : unit -> Tokens.token * Lexing.position * Lexing.position)
+ let loop
+ (lexer_buffer :
+ (Tokens.token * Lexing.position * Lexing.position) ring_buffer)
(token_list : (string * Tokens.token) list)
(lexbuf : lexbuf)
(last_input_needed : 'semantic_value I.env option)
(checkpoint : 'semantic_value I.checkpoint) : Ast.source_file =
- match checkpoint with
- | I.InputNeeded env ->
- let token = next_token () in
- let checkpoint = I.offer checkpoint token in
- loop next_token token_list lexbuf (Some env) checkpoint
- | I.Shifting _ | I.AboutToReduce _ ->
- let checkpoint = I.resume checkpoint in
- loop next_token token_list lexbuf last_input_needed checkpoint
- | I.HandlingError env -> fail lexbuf env token_list last_input_needed
- | I.Accepted v -> v
- | I.Rejected ->
- (* Cannot happen as we stop at syntax error immediatly *)
- assert false
+ let rec loop
+ (lexer_buffer :
+ (Tokens.token * Lexing.position * Lexing.position) ring_buffer)
+ (token_list : (string * Tokens.token) list)
+ (lexbuf : lexbuf)
+ (last_input_needed : 'semantic_value I.env option)
+ (checkpoint : 'semantic_value I.checkpoint) : Ast.source_file =
+ match checkpoint with
+ | I.InputNeeded env ->
+ let new_lexer_buffer, token = next lexer_buffer in
+ let checkpoint = I.offer checkpoint token in
+ loop new_lexer_buffer token_list lexbuf (Some env) checkpoint
+ | I.Shifting _ | I.AboutToReduce _ ->
+ let checkpoint = I.resume checkpoint in
+ loop lexer_buffer token_list lexbuf last_input_needed checkpoint
+ | I.HandlingError (env : 'semantic_value I.env) -> (
+ let similar_candidate_tokens, sorted_acceptable_tokens =
+ sorted_candidate_tokens lexbuf token_list env
+ in
+ register_parsing_error lexbuf env sorted_acceptable_tokens
+ similar_candidate_tokens;
+ let best_effort_checkpoint =
+ recover_parsing_error lexer_buffer env
+ (List.map snd sorted_acceptable_tokens)
+ in
+ match best_effort_checkpoint with
+ | None ->
+ (* No reasonable solution, aborting *)
+ []
+ | Some best_effort_checkpoint ->
+ loop lexer_buffer token_list lexbuf last_input_needed
+ best_effort_checkpoint)
+ | I.Accepted v -> v
+ | I.Rejected -> []
+ in
+ loop lexer_buffer token_list lexbuf last_input_needed checkpoint
(** Stub that wraps the parsing main loop and handles the Menhir/Sedlex type
difference for [lexbuf]. *)
@@ -174,12 +254,17 @@ module ParserAux (LocalisedLexer : Lexer_common.LocalisedLexer) = struct
(token_list : (string * Tokens.token) list)
(target_rule : Lexing.position -> 'semantic_value I.checkpoint)
(lexbuf : lexbuf) : Ast.source_file =
- let lexer : unit -> Tokens.token * Lexing.position * Lexing.position =
- with_tokenizer lexer' lexbuf
+ let lexer_buffer :
+ (Tokens.token * Lexing.position * Lexing.position) ring_buffer =
+ let feed = with_tokenizer lexer' lexbuf in
+ create feed Lexing.(Tokens.EOF, dummy_pos, dummy_pos)
in
try
- loop lexer token_list lexbuf None
- (target_rule (fst @@ Sedlexing.lexing_positions lexbuf))
+ let target_rule =
+ target_rule (fst @@ Sedlexing.lexing_positions lexbuf)
+ in
+ Message.with_delayed_errors
+ @@ fun () -> loop lexer_buffer token_list lexbuf None target_rule
with Sedlexing.MalFormed | Sedlexing.InvalidCodepoint _ ->
Lexer_common.raise_lexer_error
(Pos.from_lpos (lexing_positions lexbuf))
| Printf multiple errors before exiting
Currently, the Catala compiler exits at the first error it encounters. It would be nice to only exit when the errors it has found forbid it to continue processing, so that it displays multiple related error messages in the same run.
For that, every AST should be augmented with an `Error` case produced when an error message is raised. The compiler could then continue its operations until it actually needs to do something with the location of the AST where `Error` is, in which case it would exit.
| I think it would be quite useful to me as well to have an `Error` case in the AST, because it would allow me to continue other branches of evaluation after an error is found.
I have been considering using OCaml's `Result` module to make the concolic evaluator return a `((dcalc,'m) gexpr, type_for_errors) result`, but an AST case would probably be better. How much effort do you think it would need to at least add the `Error` AST case (even if it's underused at first)?
I guess one solid (if not two) days of refactoring following the OCaml compiler errors... | 2024-06-05T13:22:53 | 0.0 | [] | [] |
||
CatalaLang/catala | CatalaLang__catala-602 | ff683f3ea052e40b7f62c659a3ce34f2bd8dd3a2 | diff --git a/compiler/catala_utils/message.ml b/compiler/catala_utils/message.ml
index 47aa05f6d..47656321b 100644
--- a/compiler/catala_utils/message.ml
+++ b/compiler/catala_utils/message.ml
@@ -145,19 +145,13 @@ module Content = struct
content @ [Position { pos = position; pos_message = message }]
let of_string (s : string) : t =
- [
- MainMessage
- (fun ppf ->
- Format.pp_open_hovbox ppf 0;
- Format.pp_print_text ppf s;
- Format.pp_close_box ppf ());
- ]
+ [MainMessage (fun ppf -> Format.pp_print_text ppf s)]
let emit (content : t) (target : level) : unit =
match Global.options.message_format with
| Global.Human ->
let ppf = get_ppf target in
- Format.pp_open_hvbox ppf 0;
+ Format.pp_open_vbox ppf 0;
Format.pp_print_list
~pp_sep:(fun ppf () -> Format.fprintf ppf "@,@,")
(fun ppf -> function
@@ -167,14 +161,9 @@ module Content = struct
pos.pos_message;
Pos.format_loc_text ppf pos.pos
| MainMessage msg ->
- Format.fprintf ppf "%t%t%t" (pp_marker target)
- (fun ppf ->
- match target with
- | Result | Error -> Format.pp_print_space ppf ()
- | _ -> Format.pp_print_char ppf ' ')
- msg
+ Format.fprintf ppf "@[<hov 2>%t %t@]" (pp_marker target) msg
| Outcome msg ->
- Format.fprintf ppf "@[<hv>%t@ %t@]" (pp_marker target) msg
+ Format.fprintf ppf "@[<hov>%t@ %t@]" (pp_marker target) msg
| Suggestion suggestions_list ->
Suggestions.format ppf suggestions_list)
ppf content;
diff --git a/compiler/desugared/dependency.ml b/compiler/desugared/dependency.ml
index 1630f9387..51490cdf2 100644
--- a/compiler/desugared/dependency.ml
+++ b/compiler/desugared/dependency.ml
@@ -142,8 +142,8 @@ let check_for_cycle (scope : Ast.scope) (g : ScopeDependencies.t) : unit =
(List.tl cycle @ [List.hd cycle])
in
Message.error ~extra_pos
- "@[<hov 2>Cyclic dependency detected between the following variables of \
- scope %a:@ @[<hv>%a@]@]"
+ "Cyclic dependency detected between the following variables of scope \
+ %a:@ @[<hv>%a@]"
ScopeName.format scope.scope_uid
(Format.pp_print_list
~pp_sep:(fun ppf () -> Format.fprintf ppf " →@ ")
@@ -197,13 +197,13 @@ let build_scope_dependencies (scope : Ast.scope) : ScopeDependencies.t =
match def_key with
| _, Ast.ScopeDef.Var _ ->
Message.error ~pos:fv_def_pos
- "The variable %a is used in one of its definitions, but \
- recursion is forbidden in Catala"
+ "The variable@ %a@ is@ used@ in@ one@ of@ its@ \
+ definitions@ (Catala doesn't support recursion)"
Ast.ScopeDef.format def_key
| v, Ast.ScopeDef.SubScopeInput _ ->
Message.error ~pos:fv_def_pos
- "The subscope %a is used in the definition of its own \
- input %a, but recursion is forbidden in Catala"
+ "The subscope@ %a@ is@ used@ in@ the@ definition@ of@ its@ \
+ own@ input@ %a@ (Catala doesn't support recursion)"
ScopeVar.format (Mark.remove v) Ast.ScopeDef.format def_key
in
ScopeDependencies.add_edge_e g
@@ -499,6 +499,7 @@ let check_for_exception_cycle
in
let v, _ = RuleName.Map.choose (List.hd scc).rules in
Message.error ~extra_pos:spans
- "Exception cycle detected when defining %a: each of these %d exceptions \
- applies over the previous one, and the first applies over the last"
+ "Exception cycle detected when defining@ %a:@ each of these %d \
+ exceptions applies over the previous one,@ and@ the@ first@ applies@ \
+ over@ the@ last"
RuleName.format v (List.length scc)
diff --git a/compiler/desugared/from_surface.ml b/compiler/desugared/from_surface.ml
index 228a13905..a9b162f57 100644
--- a/compiler/desugared/from_surface.ml
+++ b/compiler/desugared/from_surface.ml
@@ -174,8 +174,8 @@ let rec disambiguate_constructor
| [] ->
if EnumName.Map.cardinal possible_c_uids > 1 then
Message.error ~pos:(Mark.get constructor)
- "This constructor name is ambiguous, it can belong to %a. Disambiguate \
- it by prefixing it with the enum name."
+ "This constructor name is ambiguous, it can belong to@ %a.@ \
+ Disambiguate it by prefixing it with the enum name."
(EnumName.Map.format_keys ~pp_sep:(fun fmt () ->
Format.pp_print_string fmt " or "))
possible_c_uids;
@@ -187,8 +187,8 @@ let rec disambiguate_constructor
let c_uid = EnumName.Map.find e_uid possible_c_uids in
e_uid, c_uid
with EnumName.Map.Not_found _ ->
- Message.error ~pos "Enum %s does not contain case %s" (Mark.remove enum)
- (Mark.remove constructor))
+ Message.error ~pos "Enum %s@ does@ not@ contain@ case@ %s"
+ (Mark.remove enum) (Mark.remove constructor))
| mod_id :: path ->
let constructor =
List.map (Mark.map (fun (_, c) -> path, c)) constructor0
@@ -212,6 +212,7 @@ let rec check_formula (op, pos_op) e =
true]) *)
Message.error
~extra_pos:["", pos_op; "", pos_op1]
+ "%a" Format.pp_print_text
"Please add parentheses to explicit which of these operators should be \
applied first";
check_formula (op1, pos_op1) e1;
@@ -401,18 +402,19 @@ let rec translate_expr
(* TODO *)
Message.error
~pos:(Mark.get (Option.get st))
+ "%a" Format.pp_print_text
"Referring to a previous state of the variable being defined \
is not supported at the moment.";
match sx' with
| None ->
- failwith
+ Message.error ~internal:true
"inconsistent state: inside a definition of a variable with no \
state but variable has states"
| Some inside_def_state ->
if StateName.compare inside_def_state (List.hd states) = 0 then
- Message.error ~pos
- "It is impossible to refer to the variable you are defining \
- when defining its first state."
+ Message.error ~pos "%a" Format.pp_print_text
+ "The definition of the initial state of this variable refers \
+ to itself."
else
(* Tricky: we have to retrieve in the list the previous state
with respect to the state that we are defining. *)
@@ -635,8 +637,8 @@ let rec translate_expr
EnumName.Map.cardinal possible_c_uids > 1
then
Message.error ~pos:pos_constructor
- "This constructor name is ambiguous, it can belong to %a. \
- Desambiguate it by prefixing it with the enum name."
+ "This constructor name is ambiguous, it can belong to@ %a.@ \
+ Disambiguate it by prefixing it with the enum name."
(EnumName.Map.format_keys ~pp_sep:(fun fmt () ->
Format.fprintf fmt " or "))
possible_c_uids
@@ -963,8 +965,9 @@ and disambiguate_match_and_build_expression
else
Message.error
~pos:(Mark.get case.S.match_case_pattern)
- "This case matches a constructor of enumeration %a but previous \
- case were matching constructors of enumeration %a"
+ "This case matches a constructor of enumeration@ %a@ but@ \
+ previous@ cases@ were@ matching@ constructors@ of@ enumeration@ \
+ %a"
EnumName.format e_uid EnumName.format e_uid'
in
(match EnumConstructor.Map.find_opt c_uid cases_d with
@@ -972,8 +975,8 @@ and disambiguate_match_and_build_expression
| Some e_case ->
Message.error
~extra_pos:["", Mark.get case.match_case_expr; "", Expr.pos e_case]
- "The constructor %a has been matched twice:" EnumConstructor.format
- c_uid);
+ "The constructor %a@ has@ been@ matched@ twice:"
+ EnumConstructor.format c_uid);
let local_vars, param_var =
create_var local_vars (Option.map Mark.remove binding)
in
@@ -1001,7 +1004,7 @@ and disambiguate_match_and_build_expression
match e_uid with
| None ->
if 1 = nb_cases then
- Message.error ~pos:case_pos
+ Message.error ~pos:case_pos "%a" Format.pp_print_text
"Couldn't infer the enumeration name from lonely wildcard \
(wildcard cannot be used as single match case)"
else raise_wildcard_not_last_case_err ()
@@ -1016,8 +1019,8 @@ and disambiguate_match_and_build_expression
in
if EnumConstructor.Map.is_empty missing_constructors then
Message.warning ~pos:case_pos
- "Unreachable match case, all constructors of the enumeration %a \
- are already specified"
+ "Unreachable match case, all constructors of the enumeration@ %a@ \
+ are@ already@ specified"
EnumName.format e_uid;
(* The current used strategy is to replace the wildcard branch:
match foo with
@@ -1093,8 +1096,8 @@ let rec arglist_eq_check pos_decl pos_def pdecl pdefs =
Message.error
~extra_pos:
["Argument declared here:", decl_apos; "Defined here:", def_apos]
- "Function argument name mismatch between declaration ('%a') and \
- definition ('%a')"
+ "Function argument name mismatch between declaration@ ('%a')@ and@ \
+ definition@ ('%a')"
Print.lit_style decl_arg Print.lit_style def_arg
let process_rule_parameters
@@ -1114,7 +1117,7 @@ let process_rule_parameters
"Declared here without arguments", decl_pos;
"Unexpected arguments appearing here", pos;
]
- "Extra arguments in this definition of %a" Ast.ScopeDef.format decl_name
+ "Extra arguments in this definition of@ %a" Ast.ScopeDef.format decl_name
| Some (_, pos), None ->
Message.error
~extra_pos:
@@ -1395,6 +1398,7 @@ let check_unlabeled_exception
Message.error ~pos:(Mark.get item)
~pos_msg:(fun ppf -> Format.pp_print_text ppf "Ambiguous exception")
~extra_pos:(List.map (fun p -> "Candidate definition", p) pos)
+ "%a" Format.pp_print_text
"This exception can refer to several definitions. Try using labels \
to disambiguate"
| Some (Unique _) -> ()))
diff --git a/compiler/desugared/linting.ml b/compiler/desugared/linting.ml
index cbb314a47..c4e24dc9b 100644
--- a/compiler/desugared/linting.ml
+++ b/compiler/desugared/linting.ml
@@ -39,8 +39,8 @@ let detect_empty_definitions (p : program) : unit =
then
Message.warning
~pos:(ScopeDef.get_position scope_def_key)
- "In scope \"%a\", the variable \"%a\" is declared but never \
- defined; did you forget something?"
+ "In scope \"%a\",@ the@ variable@ \"%a\"@ is@ declared@ but@ \
+ never@ defined;@ did you forget something?"
ScopeName.format scope_name Ast.ScopeDef.format scope_def_key)
scope.scope_defs)
p.program_root.module_scopes
@@ -95,8 +95,8 @@ let detect_identical_rules (p : program) : unit =
(fun _ pos ->
if List.length pos > 1 then
Message.warning ~extra_pos:pos
- "These %s have identical justifications and consequences; is \
- it a mistake?"
+ "These %s have identical justifications@ and@ consequences;@ \
+ is it a mistake?"
(if scope_def.scope_def_is_condition then "rules"
else "definitions"))
rules_seen)
@@ -155,7 +155,7 @@ let detect_unused_struct_fields (p : program) : unit =
then
Message.warning
~pos:(snd (StructName.get_info s_name))
- "The structure \"%a\" is never used; maybe it's unnecessary?"
+ "The structure@ \"%a\"@ is@ never@ used;@ maybe it's unnecessary?"
StructName.format s_name
else
StructField.Map.iter
@@ -166,8 +166,8 @@ let detect_unused_struct_fields (p : program) : unit =
then
Message.warning
~pos:(snd (StructField.get_info field))
- "The field \"%a\" of struct @{<yellow>\"%a\"@} is never \
- used; maybe it's unnecessary?"
+ "The field@ \"%a\"@ of@ struct@ @{<yellow>\"%a\"@}@ is@ \
+ never@ used;@ maybe it's unnecessary?"
StructField.format field StructName.format s_name)
fields)
p.program_ctx.ctx_structs
@@ -213,7 +213,7 @@ let detect_unused_enum_constructors (p : program) : unit =
then
Message.warning
~pos:(snd (EnumName.get_info e_name))
- "The enumeration \"%a\" is never used; maybe it's unnecessary?"
+ "The enumeration@ \"%a\"@ is@ never@ used;@ maybe it's unnecessary?"
EnumName.format e_name
else
EnumConstructor.Map.iter
@@ -223,8 +223,8 @@ let detect_unused_enum_constructors (p : program) : unit =
then
Message.warning
~pos:(snd (EnumConstructor.get_info constructor))
- "The constructor \"%a\" of enumeration \"%a\" is never used; \
- maybe it's unnecessary?"
+ "The constructor@ \"%a\"@ of@ enumeration@ \"%a\"@ is@ \
+ never@ used;@ maybe it's unnecessary?"
EnumConstructor.format constructor EnumName.format e_name)
constructors)
p.program_ctx.ctx_enums
@@ -268,8 +268,8 @@ let detect_dead_code (p : program) : unit =
let emit_unused_warning vx =
Message.warning
~pos:(Mark.get (Dependency.Vertex.info vx))
- "Unused varible: %a does not contribute to computing any of scope %a \
- outputs. Did you forget something?"
+ "Unused varible:@ %a@ does@ not@ contribute@ to@ computing@ any@ of@ \
+ scope@ %a@ outputs.@ Did you forget something?"
Dependency.Vertex.format vx ScopeName.format scope_name
in
Dependency.ScopeDependencies.iter_vertex
diff --git a/compiler/desugared/name_resolution.ml b/compiler/desugared/name_resolution.ml
index 57724dae8..4c758094d 100644
--- a/compiler/desugared/name_resolution.ml
+++ b/compiler/desugared/name_resolution.ml
@@ -396,6 +396,7 @@ let process_data_decl
(Ident.Map.find state_id_name states_idmap
|> StateName.get_info) );
]
+ "%a" Format.pp_print_text
"There are two states with the same name for the same variable: \
this is ambiguous. Please change the name of either states.";
let state_uid = StateName.fresh state_id in
@@ -431,8 +432,8 @@ let process_struct_decl (ctxt : context) (sdecl : Surface.Ast.struct_decl) :
if sdecl.struct_decl_fields = [] then
Message.error
~pos:(Mark.get sdecl.struct_decl_name)
- "The struct %s does not have any fields; give it some for Catala to be \
- able to accept it."
+ "The struct@ %s@ does@ not@ have@ any@ fields;@ give it some for Catala \
+ to be able to accept it."
(Mark.remove sdecl.struct_decl_name);
List.fold_left
(fun ctxt (fdecl, _) ->
@@ -476,8 +477,8 @@ let process_enum_decl (ctxt : context) (edecl : Surface.Ast.enum_decl) : context
if List.length edecl.enum_decl_cases = 0 then
Message.error
~pos:(Mark.get edecl.enum_decl_name)
- "The enum %s does not have any cases; give it some for Catala to be able \
- to accept it."
+ "The enum@ %s@ does@ not@ have@ any@ cases;@ give it some for Catala to \
+ be able to accept it."
(Mark.remove edecl.enum_decl_name);
List.fold_left
(fun ctxt (cdecl, cdecl_pos) ->
@@ -777,7 +778,7 @@ let get_def_key
"", Mark.get state;
"Variable declaration:", Mark.get (ScopeVar.get_info x_uid);
]
- "This identifier is not a state declared for variable %a."
+ "This identifier is not a state declared for variable@ %a."
ScopeVar.format x_uid)
| None ->
if not (Ident.Map.is_empty var_sig.var_sig_states_idmap) then
@@ -787,8 +788,8 @@ let get_def_key
"", Mark.get x;
"Variable declaration:", Mark.get (ScopeVar.get_info x_uid);
]
- "This definition does not indicate which state has to be \
- considered for variable %a."
+ "This definition does not indicate which state has to@ be@ \
+ considered@ for@ variable@ %a."
ScopeVar.format x_uid
else None) )
| [y; x] ->
@@ -796,17 +797,17 @@ let get_def_key
match Ident.Map.find_opt (Mark.remove y) scope_ctxt.var_idmap with
| Some (SubScope (v, u, _)) -> v, u
| Some _ ->
- Message.error ~pos "Invalid definition, %a is not a subscope"
+ Message.error ~pos "Invalid definition,@ %a@ is@ not@ a@ subscope"
Print.lit_style (Mark.remove y)
| None ->
- Message.error ~pos "No definition found for subscope %a" Print.lit_style
- (Mark.remove y)
+ Message.error ~pos "No definition found for subscope@ %a"
+ Print.lit_style (Mark.remove y)
in
let var_within_origin_scope = get_var_uid name ctxt x in
( (subscope_var, pos),
Ast.ScopeDef.SubScopeInput { name; var_within_origin_scope } )
| _ ->
- Message.error ~pos
+ Message.error ~pos "%a" Format.pp_print_text
"This line is defining a quantity that is neither a scope variable nor a \
subscope variable. In particular, it is not possible to define struct \
fields individually in Catala."
diff --git a/compiler/scopelang/dependency.ml b/compiler/scopelang/dependency.ml
index 34bb774db..de6644092 100644
--- a/compiler/scopelang/dependency.ml
+++ b/compiler/scopelang/dependency.ml
@@ -120,8 +120,9 @@ let build_program_dep_graph (prgm : 'm Ast.program) : SDependencies.t =
if VMap.mem (Topdef glo_name) used_defs then
Message.error
~pos:(Mark.get (TopdefName.get_info glo_name))
- "The Topdef %a has a definition that refers to itself, which is \
- forbidden since Catala does not provide recursion"
+ "The toplevel declaration@ %a@ has@ a@ definition@ that@ refers@ \
+ to@ itself,@ which@ is@ not@ supported@ (Catala does not provide \
+ recursion)"
TopdefName.format glo_name;
VMap.fold
(fun def pos g ->
@@ -138,8 +139,8 @@ let build_program_dep_graph (prgm : 'm Ast.program) : SDependencies.t =
if VMap.mem (Scope scope_name) used_defs then
Message.error
~pos:(Mark.get (ScopeName.get_info scope.Ast.scope_decl_name))
- "The scope %a is calling into itself as a subscope, which is \
- forbidden since Catala does not provide recursion"
+ "The scope@ %a@ is@ calling@ into@ itself@ as@ a@ subscope,@ \
+ which@ is@ not@ supported@ (Catala does not provide recursion)"
ScopeName.format scope.Ast.scope_decl_name;
VMap.fold
(fun used_def pos g ->
@@ -192,8 +193,7 @@ let check_for_cycle_in_defs (g : SDependencies.t) : unit =
(List.tl cycle @ [List.hd cycle])
in
Message.error ~extra_pos:spans
- "@[<hov 2>Cyclic dependency detected between the following scopes:@ \
- @[<hv>%a@]@]"
+ "Cyclic dependency detected between the following scopes:@ @[<hv>%a@]"
(Format.pp_print_list
~pp_sep:(fun ppf () -> Format.fprintf ppf " →@ ")
SVertex.format)
@@ -283,8 +283,8 @@ let build_type_graph (structs : struct_ctx) (enums : enum_ctx) : TDependencies.t
(fun used g ->
if TVertex.equal used def then
Message.error ~pos:(Mark.get typ)
- "The type %a is defined using itself, which is forbidden \
- since Catala does not provide recursive types"
+ "The type@ %a@ is@ defined@ using@ itself,@ which@ is@ \
+ not@ supported@ (Catala does not allow recursive types)"
TVertex.format used
else
let edge = TDependencies.E.create used (Mark.get typ) def in
@@ -305,8 +305,8 @@ let build_type_graph (structs : struct_ctx) (enums : enum_ctx) : TDependencies.t
(fun used g ->
if TVertex.equal used def then
Message.error ~pos:(Mark.get typ)
- "The type %a is defined using itself, which is forbidden \
- since Catala does not provide recursive types"
+ "The type@ %a@ is@ defined@ using@ itself,@ which@ is@ \
+ not@ supported@ (Catala does not allow recursive types)"
TVertex.format used
else
let edge = TDependencies.E.create used (Mark.get typ) def in
diff --git a/compiler/scopelang/from_desugared.ml b/compiler/scopelang/from_desugared.ml
index 79cb1bdd3..61138b492 100644
--- a/compiler/scopelang/from_desugared.ml
+++ b/compiler/scopelang/from_desugared.ml
@@ -225,6 +225,7 @@ let rule_to_exception_graph (scope : D.scope) = function
( "Incriminated subscope variable definition:",
Mark.get (RuleName.get_info rule) ))
(RuleName.Map.keys def))
+ "%a" Format.pp_print_text
"Invalid assignment to a subscope variable that is not tagged \
as input or context."
| OnlyInput when RuleName.Map.is_empty def && not is_cond ->
@@ -237,6 +238,7 @@ let rule_to_exception_graph (scope : D.scope) = function
Mark.get (ScopeVar.get_info (Mark.remove sscope)) );
"Incriminated variable:", Mark.get sscope;
]
+ "%a" Format.pp_print_text
"This subscope variable is a mandatory input but no definition \
was provided."
| _ -> ()
@@ -261,8 +263,8 @@ let rule_to_exception_graph (scope : D.scope) = function
( "Incriminated variable definition:",
Mark.get (RuleName.get_info rule) ))
(RuleName.Map.keys var_def))
- "It is impossible to give a definition to a scope variable tagged as \
- input."
+ "%a" Format.pp_print_text
+ "There cannot be a definition for a scope variable tagged as input."
| OnlyInput -> D.ScopeDef.Map.empty
(* we do not provide any definition for an input-only variable *)
| _ ->
diff --git a/compiler/shared_ast/interpreter.ml b/compiler/shared_ast/interpreter.ml
index 433aedb82..8d0aeab2f 100644
--- a/compiler/shared_ast/interpreter.ml
+++ b/compiler/shared_ast/interpreter.ml
@@ -138,7 +138,8 @@ let rec evaluate_operator
]
"division by zero at runtime"
| Runtime.UncomparableDurations ->
- Message.error ~extra_pos:(get_binop_args_pos args)
+ Message.error ~extra_pos:(get_binop_args_pos args) "%a"
+ Format.pp_print_text
"Cannot compare together durations that cannot be converted to a \
precise number of days"
in
@@ -158,11 +159,10 @@ let rec evaluate_operator
arg,
Expr.pos arg ))
args)
- "Operator %a applied to the wrong arguments\n\
- (should not happen if the term was well-typed)%a"
+ "Operator %a applied to the wrong@ arguments@ (should not happen if the \
+ term was well-typed)"
(Print.operator ~debug:true)
- op Expr.format
- (EAppOp { op; tys = []; args }, m)
+ op
in
let open Runtime.Oper in
Mark.add m
@@ -236,6 +236,7 @@ let rec evaluate_operator
| _ ->
Message.error
~pos:(Expr.pos (List.nth args 0))
+ "%a" Format.pp_print_text
"This predicate evaluated to something else than a boolean \
(should not happen if the term was well-typed)")
es)
@@ -392,8 +393,8 @@ let rec evaluate_operator
| ELit (LBool false) -> raise (CatalaException (EmptyError, pos))
| _ ->
Message.error ~pos
- "Default justification has not been reduced to a boolean at \
- evaluation (should not happen if the term was well-typed@\n\
+ "Default justification has not been reduced to a boolean at@ \
+ evaluation@ (should not happen if the term was well-typed@\n\
%a@."
Expr.format just)
| [e] -> Mark.remove e
@@ -603,7 +604,7 @@ and val_to_runtime :
| TDefault ty, _ -> val_to_runtime eval_expr ctx ty v
| _ ->
Message.error ~internal:true
- "Could not convert value of type %a to runtime: %a" (Print.typ ctx) ty
+ "Could not convert value of type %a@ to@ runtime:@ %a" (Print.typ ctx) ty
Expr.format v
let rec evaluate_expr :
@@ -617,7 +618,7 @@ let rec evaluate_expr :
let pos = Expr.mark_pos m in
match Mark.remove e with
| EVar _ ->
- Message.error ~pos
+ Message.error ~pos "%a" Format.pp_print_text
"free variable found at evaluation (should not happen if term was \
well-typed)"
| EExternal { name } ->
@@ -637,7 +638,7 @@ let rec evaluate_expr :
(TStruct scope_info.out_struct_name, pos) ),
pos )
with TopdefName.Map.Not_found _ | ScopeName.Map.Not_found _ ->
- Message.error ~pos "Reference to %a could not be resolved"
+ Message.error ~pos "Reference to %a@ could@ not@ be@ resolved"
Print.external_ref name
in
let runtime_path =
@@ -673,7 +674,7 @@ let rec evaluate_expr :
|> fun o ->
runtime_to_val (fun ctx -> evaluate_expr ctx lang) ctx m tret o
| _ ->
- Message.error ~pos
+ Message.error ~pos "%a" Format.pp_print_text
"function has not been reduced to a lambda at evaluation (should not \
happen if the term was well-typed")
| EAppOp { op; args; _ } ->
@@ -698,19 +699,20 @@ let rec evaluate_expr :
if not (StructName.equal s name) then
Message.error
~extra_pos:["", pos; "", Expr.pos e]
+ "%a" Format.pp_print_text
"Error during struct access: not the same structs (should not happen \
if the term was well-typed)";
match StructField.Map.find_opt field es with
| Some e' -> e'
| None ->
Message.error ~pos:(Expr.pos e)
- "Invalid field access %a in struct %a (should not happen if the term \
- was well-typed)"
+ "Invalid field access %a@ in@ struct@ %a@ (should not happen if the \
+ term was well-typed)"
StructField.format field StructName.format s)
| _ ->
Message.error ~pos:(Expr.pos e)
- "The expression %a should be a struct %a but is not (should not happen \
- if the term was well-typed)"
+ "The expression %a@ should@ be@ a@ struct@ %a@ but@ is@ not@ (should \
+ not happen if the term was well-typed)"
(Print.UserFacing.expr lang)
e StructName.format s)
| ETuple es -> Mark.add m (ETuple (List.map (evaluate_expr ctx lang) es))
@@ -719,8 +721,8 @@ let rec evaluate_expr :
| ETuple es, _ when List.length es = size -> List.nth es index
| e ->
Message.error ~pos:(Expr.pos e)
- "The expression %a was expected to be a tuple of size %d (should not \
- happen if the term was well-typed)"
+ "The expression %a@ was@ expected@ to@ be@ a@ tuple@ of@ size@ %d@ \
+ (should not happen if the term was well-typed)"
(Print.UserFacing.expr lang)
e size)
| EInj { e; name; cons } ->
@@ -733,13 +735,14 @@ let rec evaluate_expr :
if not (EnumName.equal name name') then
Message.error
~extra_pos:["", Expr.pos e; "", Expr.pos e1]
+ "%a" Format.pp_print_text
"Error during match: two different enums found (should not happen if \
the term was well-typed)";
let es_n =
match EnumConstructor.Map.find_opt cons cases with
| Some es_n -> es_n
| None ->
- Message.error ~pos:(Expr.pos e)
+ Message.error ~pos:(Expr.pos e) "%a" Format.pp_print_text
"sum type index error (should not happen if the term was \
well-typed)"
in
@@ -758,7 +761,7 @@ let rec evaluate_expr :
| ELit (LBool true) -> evaluate_expr ctx lang etrue
| ELit (LBool false) -> evaluate_expr ctx lang efalse
| _ ->
- Message.error ~pos:(Expr.pos cond)
+ Message.error ~pos:(Expr.pos cond) "%a" Format.pp_print_text
"Expected a boolean literal for the result of this condition (should \
not happen if the term was well-typed)")
| EArray es ->
@@ -774,13 +777,13 @@ let rec evaluate_expr :
(partially_evaluate_expr_for_assertion_failure_message ctx lang
(Expr.skip_wrappers e'))
| _ ->
- Message.error ~pos:(Expr.pos e')
+ Message.error ~pos:(Expr.pos e') "%a" Format.pp_print_text
"Expected a boolean literal for the result of this assertion (should \
not happen if the term was well-typed)")
| EErrorOnEmpty e' -> (
match evaluate_expr ctx lang e' with
| EEmptyError, _ ->
- Message.error ~pos:(Expr.pos e')
+ Message.error ~pos:(Expr.pos e') "%a" Format.pp_print_text
"This variable evaluated to an empty term (no rule that defined it \
applied in this situation)"
| e -> e)
@@ -794,7 +797,7 @@ let rec evaluate_expr :
| ELit (LBool true) -> evaluate_expr ctx lang cons
| ELit (LBool false) -> Mark.copy e EEmptyError
| _ ->
- Message.error ~pos:(Expr.pos e)
+ Message.error ~pos:(Expr.pos e) "%a" Format.pp_print_text
"Default justification has not been reduced to a boolean at \
evaluation (should not happen if the term was well-typed")
| 1 -> List.find (fun sub -> not (is_empty_error sub)) excepts
@@ -912,7 +915,7 @@ let delcustom e =
let interp_failure_message ~pos = function
| NoValueProvided ->
- Message.error ~pos
+ Message.error ~pos "%a" Format.pp_print_text
"This variable evaluated to an empty term (no rule that defined it \
applied in this situation)"
| ConflictError cpos ->
@@ -921,14 +924,15 @@ let interp_failure_message ~pos = function
(List.map
(fun pos -> "This consequence has a valid justification:", pos)
cpos)
+ "%a" Format.pp_print_text
"There is a conflict between multiple valid consequences for assigning \
the same variable."
| Crash ->
(* This constructor seems to be never used *)
- Message.error ~pos "Internal error, the interpreter crashed"
+ Message.error ~pos ~internal:true "The interpreter crashed"
| EmptyError ->
- Message.error ~pos
- "Internal error, a variable without valid definition escaped"
+ Message.error ~pos ~internal:true
+ "A variable without valid definition escaped"
let interpret_program_lcalc p s : (Uid.MarkedString.info * ('a, 'm) gexpr) list
=
@@ -981,12 +985,13 @@ let interpret_program_lcalc p s : (Uid.MarkedString.info * ('a, 'm) gexpr) list
mark_e
| _ ->
Message.error ~pos:(Mark.get ty)
- "This scope needs an input argument of type %a to be executed. \
- But the Catala built-in interpreter does not have a way to \
- retrieve input values from the command line, so it cannot \
- execute this scope. Please create another scope that provides \
- the input arguments to this one and execute it instead."
- Print.typ_debug ty)
+ "This scope needs an input argument of type@ %a@ %a"
+ Print.typ_debug ty Format.pp_print_text
+ "to be executed. But the Catala built-in interpreter does not \
+ have a way to retrieve input values from the command line, so \
+ it cannot execute this scope. Please create another scope that \
+ provides the input arguments to this one and execute it \
+ instead.")
taus
in
let to_interpret =
@@ -1006,12 +1011,12 @@ let interpret_program_lcalc p s : (Uid.MarkedString.info * ('a, 'm) gexpr) list
| exception CatalaException (except, pos) ->
interp_failure_message ~pos except
| _ ->
- Message.error ~pos:(Expr.pos e)
+ Message.error ~pos:(Expr.pos e) "%a" Format.pp_print_text
"The interpretation of a program should always yield a struct \
corresponding to the scope variables"
end
| _ ->
- Message.error ~pos:(Expr.pos e)
+ Message.error ~pos:(Expr.pos e) "%a" Format.pp_print_text
"The interpreter can only interpret terms starting with functions having \
thunked arguments"
@@ -1038,7 +1043,7 @@ let interpret_program_dcalc p s : (Uid.MarkedString.info * ('a, 'm) gexpr) list
(Bindlib.box EEmptyError, Expr.with_ty mark_e ty_out)
ty_in (Expr.mark_pos mark_e)
| _ ->
- Message.error ~pos:(Mark.get ty)
+ Message.error ~pos:(Mark.get ty) "%a" Format.pp_print_text
"This scope needs input arguments to be executed. But the Catala \
built-in interpreter does not have a way to retrieve input \
values from the command line, so it cannot execute this scope. \
@@ -1063,12 +1068,12 @@ let interpret_program_dcalc p s : (Uid.MarkedString.info * ('a, 'm) gexpr) list
| exception CatalaException (except, pos) ->
interp_failure_message ~pos except
| _ ->
- Message.error ~pos:(Expr.pos e)
+ Message.error ~pos:(Expr.pos e) "%a" Format.pp_print_text
"The interpretation of a program should always yield a struct \
corresponding to the scope variables"
end
| _ ->
- Message.error ~pos:(Expr.pos e)
+ Message.error ~pos:(Expr.pos e) "%a" Format.pp_print_text
"The interpreter can only interpret terms starting with functions having \
thunked arguments"
@@ -1091,7 +1096,7 @@ let load_runtime_modules prg =
Message.error
~pos_msg:(fun ppf -> Format.pp_print_string ppf "Module defined here")
~pos:(Mark.get (ModuleName.get_info m))
- "Compiled OCaml object %a not found. Make sure it has been suitably \
+ "Compiled OCaml object %a@ not@ found.@ Make sure it has been suitably \
compiled."
File.format obj_file
else
diff --git a/compiler/shared_ast/typing.ml b/compiler/shared_ast/typing.ml
index 492cde0c8..611d2416a 100644
--- a/compiler/shared_ast/typing.ml
+++ b/compiler/shared_ast/typing.ml
@@ -264,9 +264,9 @@ let handle_type_error ctx (A.AnyExpr e) t1 t2 =
]
in
Message.error ~fmt_pos
- "@[<v>Error during typechecking, incompatible types:@,\
+ "Error during typechecking, incompatible types:@\n\
@[<v>@{<bold;blue>@<3>%s@} @[<hov>%a@]@,\
- @{<bold;blue>@<3>%s@} @[<hov>%a@]@]@]" "┌─⯈" (format_typ ctx) t1 "└─⯈"
+ @{<bold;blue>@<3>%s@} @[<hov>%a@]@]" "┌─⯈" (format_typ ctx) t1 "└─⯈"
(format_typ ctx) t2
let lit_type (lit : A.lit) : naked_typ =
@@ -607,7 +607,7 @@ and typecheck_expr_top_down :
"", Expr.mark_pos context_mark;
"Structure definition", Mark.get (A.StructName.get_info name);
]
- "Field @{<yellow>\"%s\"@} does not belong to structure \
+ "Field@ @{<yellow>\"%s\"@}@ does@ not@ belong@ to@ structure@ \
@{<yellow>\"%a\"@}."
field A.StructName.format name
~suggestion:(A.Ident.Map.keys ctx.ctx_struct_fields))
@@ -616,8 +616,8 @@ and typecheck_expr_top_down :
with A.StructName.Map.Not_found _ ->
Message.error
~pos:(Expr.mark_pos context_mark)
- "@[<hov>Field @{<yellow>\"%s\"@}@ does not belong to@ structure \
- @{<yellow>\"%a\"@}@ (however, structure %a defines it)@]"
+ "Field@ @{<yellow>\"%s\"@}@ does@ not@ belong@ to@ structure@ \
+ @{<yellow>\"%a\"@}@ (however, structure@ %a@ defines@ it)@]"
field A.StructName.format name
(Format.pp_print_list
~pp_sep:(fun ppf () -> Format.fprintf ppf "@ or@ ")
diff --git a/compiler/surface/parser_driver.ml b/compiler/surface/parser_driver.ml
index f77a12d09..985bd8cdc 100644
--- a/compiler/surface/parser_driver.ml
+++ b/compiler/surface/parser_driver.ml
@@ -73,17 +73,13 @@ let raise_parser_error
(token : string)
(msg : Format.formatter -> unit) : 'a =
Message.error ?suggestion
- ~fmt_pos:
- (((fun ppf -> Format.pp_print_string ppf "Error token:"), error_loc)
- ::
- (match last_good_loc with
- | None -> []
- | Some last_good_loc ->
- [
- ( (fun ppf -> Format.pp_print_string ppf "Last good token:"),
- last_good_loc );
- ]))
- "@[<v>Syntax error at token %a@,%t@]"
+ ~extra_pos:
+ [
+ (match last_good_loc with
+ | None -> "Error token", error_loc
+ | Some last_good_loc -> "Last good token", last_good_loc);
+ ]
+ "Syntax error at %a@\n%t"
(fun ppf string -> Format.fprintf ppf "@{<yellow>\"%s\"@}" string)
token msg
@@ -133,18 +129,15 @@ module ParserAux (LocalisedLexer : Lexer_common.LocalisedLexer) = struct
(* The parser has suspended itself because of a syntax error. Stop. *)
let custom_menhir_message ppf =
(match Parser_errors.message (state env) with
- | exception Not_found ->
- Format.fprintf ppf "Message: @{<yellow>unexpected token@}@,%t"
+ | exception Not_found -> Format.fprintf ppf "@{<yellow>unexpected token@}"
| msg ->
- Format.fprintf ppf "Message: @{<yellow>%s@}@,%t"
- (String.trim (String.uncapitalize_ascii msg)))
- (fun (ppf : Format.formatter) ->
- Format.fprintf ppf "You could have written : ";
- Format.pp_print_list
- ~pp_sep:(fun ppf () -> Format.fprintf ppf ",@ or ")
- (fun ppf string -> Format.fprintf ppf "@{<yellow>\"%s\"@}" string)
- ppf
- (List.map (fun (s, _) -> s) acceptable_tokens))
+ Format.fprintf ppf "@{<yellow>@<1>»@} @[<hov>%a@]" Format.pp_print_text
+ (String.trim (String.uncapitalize_ascii msg)));
+ Format.fprintf ppf "@,@[<hov>Those are valid at this point:@ %a@]"
+ (Format.pp_print_list
+ ~pp_sep:(fun ppf () -> Format.fprintf ppf ",@ ")
+ (fun ppf string -> Format.fprintf ppf "@{<yellow>\"%s\"@}" string))
+ (List.map (fun (s, _) -> s) acceptable_tokens)
in
raise_parser_error ~suggestion:similar_acceptable_tokens
(Pos.from_lpos (lexing_positions lexbuf))
@@ -303,8 +296,8 @@ and expand_includes (source_file : string) (commands : Ast.law_structure list) :
"Module declaration", Mark.get id;
]
"A file that declares a module cannot be used through the raw \
- '@{<yellow>> Include@}' directive. You should use it as a \
- module with '@{<yellow>> Use @{<blue>%s@}@}' instead."
+ '@{<yellow>> Include@}'@ directive.@ You should use it as a \
+ module with@ '@{<yellow>> Use @{<blue>%s@}@}'@ instead."
(Mark.remove id)
in
{
@@ -406,9 +399,8 @@ let check_modname program source_file =
(Global.FileName file | Global.Contents (_, file) | Global.Stdin file) )
when not File.(equal mname Filename.(remove_extension (basename file))) ->
Message.error ~pos
- "@[<hov>Module declared as@ @{<blue>%s@},@ which@ does@ not@ match@ the@ \
- file@ name@ %a.@ Rename the module to@ @{<blue>%s@}@ or@ the@ file@ to@ \
- %a.@]"
+ "Module declared as@ @{<blue>%s@},@ which@ does@ not@ match@ the@ file@ \
+ name@ %a.@ Rename the module to@ @{<blue>%s@}@ or@ the@ file@ to@ %a."
mname File.format file
(String.capitalize_ascii Filename.(remove_extension (basename file)))
File.format
| Rework the Message module interface
using optional arguments for the various options rather than the multiple
function variants (`spanned`/`multispanned`/`full` etc.)
| 2024-04-10T15:38:47 | 0.0 | [] | [] |
|||
CatalaLang/catala | CatalaLang__catala-564 | 6972de06adf78da3c246ad8022f0eb74fd13dbdc | diff --git a/compiler/desugared/from_surface.ml b/compiler/desugared/from_surface.ml
index 01d618bd8..c7b897995 100644
--- a/compiler/desugared/from_surface.ml
+++ b/compiler/desugared/from_surface.ml
@@ -498,7 +498,9 @@ let rec translate_expr
let ctxt = Name_resolution.module_ctx ctxt path in
let s_uid =
match Ident.Map.find_opt (Mark.remove s_name) ctxt.local.typedefs with
- | Some (Name_resolution.TStruct s_uid) -> s_uid
+ | Some (Name_resolution.TStruct s_uid)
+ | Some (Name_resolution.TScope (_, { out_struct_name = s_uid; _ })) ->
+ s_uid
| _ ->
Message.raise_spanned_error (Mark.get s_name)
"This identifier should refer to a struct name"
| Be able to use scope output struct as a real struct (build object)
Suppose the following scope :
```catala
declaration scope Foo:
result a content boolean
result b content integer
```
Now suppose you're using the output struct of `Foo` later in another calling scope. Right now you cannot build a new `Foo` object to, let's say, map the contents of the output of `Foo` into a new `Foo` object with tweaked values. This restriction should be lifted.
| 2024-01-19T14:46:16 | 0.0 | [] | [] |
|||
CatalaLang/catala | CatalaLang__catala-560 | 401fcd54bea47be20d25cc17fc0152b0c815a792 | diff --git a/compiler/shared_ast/print.ml b/compiler/shared_ast/print.ml
index 047ef7f17..6d9fccd33 100644
--- a/compiler/shared_ast/print.ml
+++ b/compiler/shared_ast/print.ml
@@ -980,8 +980,10 @@ module UserFacing = struct
aux (Z.abs n)
let money (lang : Cli.backend_lang) ppf n =
+ let num = Z.abs n in
+ let units, cents = Z.div_rem num (Z.of_int 100) in
+ if Z.sign n < 0 then Format.pp_print_char ppf '-';
(match lang with En -> Format.pp_print_string ppf "$" | Fr | Pl -> ());
- let units, cents = Z.div_rem n (Z.of_int 100) in
integer lang ppf units;
Format.pp_print_string ppf (decsep lang);
Format.fprintf ppf "%02d" (Z.to_int (Z.abs cents));
| UserFacing printer forgets minus sign for money in (-1, 0)
I think the exact same bug as in #551 also occurs for money, even after the #552 patch. Sorry for not catching both at the same time.
| 2024-01-16T10:41:08 | 0.0 | [] | [] |
|||
CatalaLang/catala | CatalaLang__catala-552 | a0ec4f8aff4d04166e57a530bb1862b8521099d2 | diff --git a/compiler/shared_ast/print.ml b/compiler/shared_ast/print.ml
index 06be55d53..15594a564 100644
--- a/compiler/shared_ast/print.ml
+++ b/compiler/shared_ast/print.ml
@@ -980,9 +980,11 @@ module UserFacing = struct
let decimal (lang : Cli.backend_lang) ppf r =
let den = Q.den r in
- let int_part, rem = Z.div_rem (Q.num r) den in
+ let num = Z.abs (Q.num r) in
+ let int_part, rem = Z.div_rem num den in
let rem = Z.abs rem in
(* Printing the integer part *)
+ if Q.sign r < 0 then Format.pp_print_char ppf '-';
integer lang ppf int_part;
(* Printing the decimals *)
let bigsep, nsep = bigsep lang in
| UserFacing printer forgets minus sign for decimals in (-1, 0)
While trying to print `Runtime.decimal`s I think I've found a bug in the user-facing printer.
Here is a code example (in OCaml) demonstrating the behaviour :
```ocaml
open Catala_utils
open Shared_ast
let print_decimals () =
let decimals = [1.; 0.5; 0.; -0.; -0.5; -1.] in
let m = Untyped {pos = Pos.no_pos} in
Message.emit_result "float\tQ\texpr\texpr db\tuserfacing";
List.iter
(fun f ->
let q = Q.of_float f in
let e = Expr.unbox (Expr.elit (LRat q) m) in
Message.emit_result "%s\t%s\t%a\t%a\t%a"
(string_of_float f)
(Q.to_string q)
(Print.expr ~debug:false ()) e
(Print.expr ~debug:true ()) e
(Print.UserFacing.value En) e
)
decimals;;
print_decimals ()
```
And the printed result:
```
[RESULT] float Q expr expr db userfacing
[RESULT] 1. 1 1. 1. 1.0
[RESULT] 0.5 1/2 0.5 0.5 0.5
[RESULT] 0. 0 0. 0. 0.0
[RESULT] -0. 0 0. 0. 0.0
[RESULT] -0.5 -1/2 -0.5 -0.5 0.5
[RESULT] -1. -1 -1. -1. -1.0
```
As you can see, `-0.5` is missing its leading minus sign when using the user-facing printer. I only saw this happen for decimals between -1 and 0, so it may have to do with the fact that those numbers have an integer part of "-0" which is printed as "0".
| 2023-12-19T12:40:53 | 0.0 | [] | [] |
|||
CatalaLang/catala | CatalaLang__catala-546 | b50db59f5028608b7cec5985008d4ea1635c09f2 | diff --git a/compiler/catala_utils/cli.ml b/compiler/catala_utils/cli.ml
index 79e15b503..b821e100c 100644
--- a/compiler/catala_utils/cli.ml
+++ b/compiler/catala_utils/cli.ml
@@ -204,7 +204,7 @@ module Flags = struct
let message_format =
value
& opt (enum message_format_opt) Human
- & info ["message_format"]
+ & info ["message-format"]
~doc:
"Selects the format of error and warning messages emitted by the \
compiler. If set to $(i,human), the messages will be nicely \
@@ -241,14 +241,14 @@ module Flags = struct
let disable_warnings =
value
& flag
- & info ["disable_warnings"]
+ & info ["disable-warnings"]
~doc:"Disable all the warnings emitted by the compiler."
let max_prec_digits =
value
& opt int 20
& info
- ["p"; "max_digits_printed"]
+ ["p"; "max-digits-printed"]
~docv:"NUM"
~doc:
"Maximum number of significant digits printed for decimal results."
@@ -340,7 +340,7 @@ module Flags = struct
let check_invariants =
value
& flag
- & info ["check_invariants"] ~doc:"Check structural invariants on the AST."
+ & info ["check-invariants"] ~doc:"Check structural invariants on the AST."
let no_typing =
value
@@ -356,7 +356,7 @@ module Flags = struct
let print_only_law =
value
& flag
- & info ["print_only_law"]
+ & info ["print-only-law"]
~doc:
"In literate programming output, skip all code and metadata sections \
and print only the text of the law."
@@ -392,13 +392,13 @@ module Flags = struct
let avoid_exceptions =
value
& flag
- & info ["avoid_exceptions"]
+ & info ["avoid-exceptions"]
~doc:"Compiles the default calculus without exceptions."
let closure_conversion =
value
& flag
- & info ["closure_conversion"]
+ & info ["closure-conversion"]
~doc:
"Performs closure conversion on the lambda calculus. Implies \
$(b,--avoid-exceptions)."
@@ -407,7 +407,7 @@ module Flags = struct
value
& flag
& info
- ["disable_counterexamples"]
+ ["disable-counterexamples"]
~doc:
"Disables the search for counterexamples. Useful when you want a \
deterministic output from the Catala compiler, since provers can \
diff --git a/compiler/driver.ml b/compiler/driver.ml
index df0b97328..dc02d1adf 100644
--- a/compiler/driver.ml
+++ b/compiler/driver.ml
@@ -211,7 +211,7 @@ module Passes = struct
raise
(Message.raise_internal_error "Some Dcalc invariants are invalid")
| _ ->
- Message.raise_error "--check_invariants cannot be used with --no-typing");
+ Message.raise_error "--check-invariants cannot be used with --no-typing");
prg, type_ordering
let lcalc
@@ -229,12 +229,12 @@ module Passes = struct
in
debug_pass_name "lcalc";
let avoid_exceptions = avoid_exceptions || closure_conversion in
- (* --closure_conversion implies --avoid_exceptions *)
+ (* --closure-conversion implies --avoid-exceptions *)
let prg =
match avoid_exceptions, options.trace, typed with
| true, true, _ ->
Message.raise_error
- "Option --avoid_exceptions is not compatible with option --trace"
+ "Option --avoid-exceptions is not compatible with option --trace"
| true, _, Untyped _ ->
Program.untype
(Lcalc.Compile_without_exceptions.translate_program
diff --git a/compiler/shared_ast/print.ml b/compiler/shared_ast/print.ml
index 15594a564..422656eb3 100644
--- a/compiler/shared_ast/print.ml
+++ b/compiler/shared_ast/print.ml
@@ -162,8 +162,7 @@ let rec typ_gen
(typ ~colors:(List.tl colors))
t2
| TArray t1 ->
- Format.fprintf fmt "@[<hov 2>%a@ %a@]" base_type "collection" (typ ~colors)
- t1
+ Format.fprintf fmt "@[<hov 2>%a@ %a@]" base_type "list of" (typ ~colors) t1
| TDefault t1 ->
punctuation fmt "⟨";
typ ~colors fmt t1;
diff --git a/compiler/shared_ast/typing.ml b/compiler/shared_ast/typing.ml
index a26a52a91..edc82677c 100644
--- a/compiler/shared_ast/typing.ml
+++ b/compiler/shared_ast/typing.ml
@@ -157,9 +157,8 @@ let rec format_typ
")" (format_typ ~colors) t2
| TArray t1 -> (
match Mark.remove (UnionFind.get (UnionFind.find t1)) with
- | TAny _ when not Cli.globals.debug ->
- Format.pp_print_string fmt "collection"
- | _ -> Format.fprintf fmt "@[collection@ %a@]" (format_typ ~colors) t1)
+ | TAny _ when not Cli.globals.debug -> Format.pp_print_string fmt "list"
+ | _ -> Format.fprintf fmt "@[list of@ %a@]" (format_typ ~colors) t1)
| TDefault t1 ->
Format.pp_print_as fmt 1 "⟨";
format_typ ~colors fmt t1;
diff --git a/compiler/surface/lexer.cppo.ml b/compiler/surface/lexer.cppo.ml
index 114763077..fa2bbab05 100644
--- a/compiler/surface/lexer.cppo.ml
+++ b/compiler/surface/lexer.cppo.ml
@@ -56,8 +56,8 @@ module R = Re.Pcre
#ifndef MR_OF
#define MR_OF MS_OF
#endif
-#ifndef MR_COLLECTION
- #define MR_COLLECTION MS_COLLECTION
+#ifndef MR_LIST
+ #define MR_LIST MS_LIST
#endif
#ifndef MR_CONTAINS
#define MR_CONTAINS MS_CONTAINS
@@ -203,8 +203,8 @@ module R = Re.Pcre
#ifndef MR_IS
#define MR_IS MS_IS
#endif
-#ifndef MR_EMPTY
- #define MR_EMPTY MS_EMPTY
+#ifndef MR_LIST_EMPTY
+ #define MR_LIST_EMPTY MS_LIST_EMPTY
#endif
#ifndef MR_CARDINAL
#define MR_CARDINAL MS_CARDINAL
@@ -266,7 +266,7 @@ let token_list : (string * token) list =
(MS_DECREASING, DECREASING);
(MS_INCREASING, INCREASING);
(MS_OF, OF);
- (MS_COLLECTION, COLLECTION);
+ (MS_LIST, LIST);
(MS_CONTAINS, CONTAINS);
(MS_ENUM, ENUM);
(MS_INTEGER, INTEGER);
@@ -315,7 +315,7 @@ let token_list : (string * token) list =
(MS_MAXIMUM, MAXIMUM);
(MS_MINIMUM, MINIMUM);
(MS_IS, IS);
- (MS_EMPTY, EMPTY);
+ (MS_LIST_EMPTY, LIST_EMPTY);
(MS_CARDINAL, CARDINAL);
(MS_YEAR, YEAR);
(MS_MONTH, MONTH);
@@ -417,9 +417,9 @@ let rec lex_code (lexbuf : lexbuf) : token =
| MR_OF ->
L.update_acc lexbuf;
OF
- | MR_COLLECTION ->
+ | MR_LIST ->
L.update_acc lexbuf;
- COLLECTION
+ LIST
| MR_CONTAINS ->
L.update_acc lexbuf;
CONTAINS
@@ -567,9 +567,9 @@ let rec lex_code (lexbuf : lexbuf) : token =
| MR_IS ->
L.update_acc lexbuf;
IS
- | MR_EMPTY ->
+ | MR_LIST_EMPTY ->
L.update_acc lexbuf;
- EMPTY
+ LIST_EMPTY
| MR_CARDINAL ->
L.update_acc lexbuf;
CARDINAL
diff --git a/compiler/surface/lexer_en.cppo.ml b/compiler/surface/lexer_en.cppo.ml
index 5dd69669d..4d46c8303 100644
--- a/compiler/surface/lexer_en.cppo.ml
+++ b/compiler/surface/lexer_en.cppo.ml
@@ -28,7 +28,8 @@
#define MS_DECREASING "decreasing"
#define MS_INCREASING "increasing"
#define MS_OF "of"
-#define MS_COLLECTION "collection"
+#define MS_LIST "list of"
+#define MR_LIST "list", space_plus, "of"
#define MS_CONTAINS "contains"
#define MS_ENUM "enumeration"
#define MS_INTEGER "integer"
@@ -80,7 +81,8 @@
#define MS_MAXIMUM "maximum"
#define MS_MINIMUM "minimum"
#define MS_IS "is"
-#define MS_EMPTY "empty"
+#define MS_LIST_EMPTY "list empty"
+#define MR_LIST_EMPTY "list", space_plus, "empty"
#define MS_CARDINAL "number"
#define MS_YEAR "year"
#define MS_MONTH "month"
diff --git a/compiler/surface/lexer_fr.cppo.ml b/compiler/surface/lexer_fr.cppo.ml
index e2da919ce..1c64a66c0 100644
--- a/compiler/surface/lexer_fr.cppo.ml
+++ b/compiler/surface/lexer_fr.cppo.ml
@@ -36,7 +36,8 @@
#define MR_DECREASING "d", 0xE9, "croissant"
#define MS_INCREASING "croissant"
#define MS_OF "de"
-#define MS_COLLECTION "collection"
+#define MS_LIST "liste de"
+#define MR_LIST "liste", space_plus, "de"
#define MS_CONTAINS "contient"
#define MS_ENUM "énumération"
#define MR_ENUM 0xE9, "num", 0xE9, "ration"
@@ -100,7 +101,8 @@
#define MS_MAXIMUM "maximum"
#define MS_MINIMUM "minimum"
#define MS_IS "est"
-#define MS_EMPTY "vide"
+#define MS_LIST_EMPTY "liste vide"
+#define MR_LIST_EMPTY "liste", space_plus, "vide"
#define MS_CARDINAL "nombre"
#define MS_YEAR "an"
#define MS_MONTH "mois"
diff --git a/compiler/surface/lexer_pl.cppo.ml b/compiler/surface/lexer_pl.cppo.ml
index 5dd45da81..6d7fffd77 100644
--- a/compiler/surface/lexer_pl.cppo.ml
+++ b/compiler/surface/lexer_pl.cppo.ml
@@ -30,7 +30,7 @@
#define MS_INCREASING "rosnący"
#define MR_INCREASING "rosn", 0x0105, "cy"
#define MS_OF "z"
-#define MS_COLLECTION "kolekcja"
+#define MS_LIST "lista"
#define MS_CONTAINS "zawiera"
#define MS_ENUM "enumeracja"
#define MS_INTEGER "całkowita"
@@ -93,7 +93,8 @@
#define MS_MAXIMUM "maksimum"
#define MS_MINIMUM "minimum"
#define MS_IS "jest"
-#define MS_EMPTY "pusty"
+#define MS_LIST_EMPTY "lista pusta"
+#define MR_LIST_EMPTY "lista", space_plus, "pusta"
#define MS_CARDINAL "liczba"
#define MS_YEAR "rok"
#define MS_MONTH "miesiąc"
diff --git a/compiler/surface/parser.messages b/compiler/surface/parser.messages
index 06f5d3fcd..a3f4e5c28 100644
--- a/compiler/surface/parser.messages
+++ b/compiler/surface/parser.messages
@@ -1,6 +1,6 @@
source_file: BEGIN_CODE DECLARATION ENUM UIDENT COLON ALT UIDENT CONTENT TEXT YEAR
##
-## Ends in an error in state: 362.
+## Ends in an error in state: 358.
##
## list(addpos(enum_decl_line)) -> enum_decl_line . list(addpos(enum_decl_line)) [ SCOPE END_CODE DECLARATION ]
##
@@ -12,7 +12,7 @@ expected another enum case, or a new declaration or scope use
source_file: BEGIN_CODE DECLARATION ENUM UIDENT COLON ALT UIDENT CONTENT YEAR
##
-## Ends in an error in state: 358.
+## Ends in an error in state: 354.
##
## option(preceded(CONTENT,addpos(typ))) -> CONTENT . typ_data [ SCOPE END_CODE DECLARATION ALT ]
##
@@ -24,7 +24,7 @@ expected a content type
source_file: BEGIN_CODE DECLARATION ENUM UIDENT COLON ALT UIDENT YEAR
##
-## Ends in an error in state: 357.
+## Ends in an error in state: 353.
##
## enum_decl_line -> ALT UIDENT . option(preceded(CONTENT,addpos(typ))) [ SCOPE END_CODE DECLARATION ALT ]
##
@@ -36,7 +36,7 @@ expected a payload for your enum case, or another case or declaration
source_file: BEGIN_CODE DECLARATION ENUM UIDENT COLON ALT YEAR
##
-## Ends in an error in state: 356.
+## Ends in an error in state: 352.
##
## enum_decl_line -> ALT . UIDENT option(preceded(CONTENT,addpos(typ))) [ SCOPE END_CODE DECLARATION ALT ]
##
@@ -48,7 +48,7 @@ expected the name of an enum case
source_file: BEGIN_CODE DECLARATION ENUM UIDENT COLON YEAR
##
-## Ends in an error in state: 355.
+## Ends in an error in state: 351.
##
## code_item -> DECLARATION ENUM UIDENT COLON . list(addpos(enum_decl_line)) [ SCOPE END_CODE DECLARATION ]
##
@@ -60,7 +60,7 @@ expected an enum case
source_file: BEGIN_CODE DECLARATION ENUM UIDENT YEAR
##
-## Ends in an error in state: 354.
+## Ends in an error in state: 350.
##
## code_item -> DECLARATION ENUM UIDENT . COLON list(addpos(enum_decl_line)) [ SCOPE END_CODE DECLARATION ]
##
@@ -72,7 +72,7 @@ expected a colon
source_file: BEGIN_CODE DECLARATION ENUM YEAR
##
-## Ends in an error in state: 353.
+## Ends in an error in state: 349.
##
## code_item -> DECLARATION ENUM . UIDENT COLON list(addpos(enum_decl_line)) [ SCOPE END_CODE DECLARATION ]
##
@@ -84,7 +84,7 @@ expected the name of your enum
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON YEAR
##
-## Ends in an error in state: 316.
+## Ends in an error in state: 312.
##
## code_item -> DECLARATION SCOPE UIDENT COLON . nonempty_list(addpos(scope_decl_item)) [ SCOPE END_CODE DECLARATION ]
##
@@ -96,7 +96,7 @@ expected a context item introduced by "context"
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT YEAR
##
-## Ends in an error in state: 315.
+## Ends in an error in state: 311.
##
## code_item -> DECLARATION SCOPE UIDENT . COLON nonempty_list(addpos(scope_decl_item)) [ SCOPE END_CODE DECLARATION ]
##
@@ -108,7 +108,7 @@ expected a colon followed by the list of context items of this scope
source_file: BEGIN_CODE DECLARATION SCOPE YEAR
##
-## Ends in an error in state: 314.
+## Ends in an error in state: 310.
##
## code_item -> DECLARATION SCOPE . UIDENT COLON nonempty_list(addpos(scope_decl_item)) [ SCOPE END_CODE DECLARATION ]
##
@@ -122,7 +122,7 @@ expected the name of the scope you are declaring
source_file: BEGIN_CODE DECLARATION STRUCT UIDENT COLON CONDITION LIDENT DEPENDS YEAR
##
-## Ends in an error in state: 301.
+## Ends in an error in state: 297.
##
## struct_scope -> struct_scope_base DEPENDS . separated_nonempty_list(COMMA,var_content) [ SCOPE END_CODE DECLARATION DATA CONDITION ]
## struct_scope -> struct_scope_base DEPENDS . LPAREN separated_nonempty_list(COMMA,var_content) RPAREN [ SCOPE END_CODE DECLARATION DATA CONDITION ]
@@ -135,7 +135,7 @@ expected the type of the parameter of this struct data function
source_file: BEGIN_CODE DECLARATION STRUCT UIDENT COLON CONDITION LIDENT YEAR
##
-## Ends in an error in state: 300.
+## Ends in an error in state: 296.
##
## struct_scope -> struct_scope_base . DEPENDS separated_nonempty_list(COMMA,var_content) [ SCOPE END_CODE DECLARATION DATA CONDITION ]
## struct_scope -> struct_scope_base . DEPENDS LPAREN separated_nonempty_list(COMMA,var_content) RPAREN [ SCOPE END_CODE DECLARATION DATA CONDITION ]
@@ -149,7 +149,7 @@ expected a new struct data, or another declaration or scope use
source_file: BEGIN_CODE DECLARATION STRUCT UIDENT COLON CONDITION YEAR
##
-## Ends in an error in state: 298.
+## Ends in an error in state: 294.
##
## struct_scope_base -> CONDITION . lident [ SCOPE END_CODE DEPENDS DECLARATION DATA CONDITION ]
##
@@ -161,7 +161,7 @@ expected the name of this struct condition
source_file: BEGIN_CODE DECLARATION STRUCT UIDENT COLON DATA LIDENT CONTENT YEAR
##
-## Ends in an error in state: 293.
+## Ends in an error in state: 289.
##
## struct_scope_base -> DATA lident CONTENT . typ_data [ SCOPE END_CODE DEPENDS DECLARATION DATA CONDITION ]
##
@@ -173,7 +173,7 @@ expected the type of this struct data
source_file: BEGIN_CODE DECLARATION STRUCT UIDENT COLON DATA LIDENT YEAR
##
-## Ends in an error in state: 292.
+## Ends in an error in state: 288.
##
## struct_scope_base -> DATA lident . CONTENT typ_data [ SCOPE END_CODE DEPENDS DECLARATION DATA CONDITION ]
##
@@ -185,7 +185,7 @@ expected the type of this struct data, introduced by the content keyword
source_file: BEGIN_CODE DECLARATION STRUCT UIDENT COLON DATA YEAR
##
-## Ends in an error in state: 291.
+## Ends in an error in state: 287.
##
## struct_scope_base -> DATA . lident CONTENT typ_data [ SCOPE END_CODE DEPENDS DECLARATION DATA CONDITION ]
##
@@ -197,7 +197,7 @@ expected the name of this struct data
source_file: BEGIN_CODE DECLARATION STRUCT UIDENT COLON YEAR
##
-## Ends in an error in state: 290.
+## Ends in an error in state: 286.
##
## code_item -> DECLARATION STRUCT UIDENT COLON . list(addpos(struct_scope)) [ SCOPE END_CODE DECLARATION ]
##
@@ -209,7 +209,7 @@ expected struct data or condition
source_file: BEGIN_CODE DECLARATION STRUCT UIDENT YEAR
##
-## Ends in an error in state: 289.
+## Ends in an error in state: 285.
##
## code_item -> DECLARATION STRUCT UIDENT . COLON list(addpos(struct_scope)) [ SCOPE END_CODE DECLARATION ]
##
@@ -221,7 +221,7 @@ expected a colon
source_file: BEGIN_CODE DECLARATION STRUCT YEAR
##
-## Ends in an error in state: 288.
+## Ends in an error in state: 284.
##
## code_item -> DECLARATION STRUCT . UIDENT COLON list(addpos(struct_scope)) [ SCOPE END_CODE DECLARATION ]
##
@@ -233,7 +233,7 @@ expected the struct name
source_file: BEGIN_CODE DECLARATION YEAR
##
-## Ends in an error in state: 287.
+## Ends in an error in state: 283.
##
## code_item -> DECLARATION . STRUCT UIDENT COLON list(addpos(struct_scope)) [ SCOPE END_CODE DECLARATION ]
## code_item -> DECLARATION . SCOPE UIDENT COLON nonempty_list(addpos(scope_decl_item)) [ SCOPE END_CODE DECLARATION ]
@@ -250,7 +250,7 @@ expected the kind of the declaration (struct, scope or enum)
source_file: BEGIN_CODE SCOPE UIDENT COLON ASSERTION CARDINAL THEN
##
-## Ends in an error in state: 245.
+## Ends in an error in state: 241.
##
## assertion -> option(condition_consequence) expression . [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
## expression -> expression . DOT qlident [ XOR WITH SCOPE RULE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE DOT DIV DEFINITION DECLARATION DATE CONTAINS ASSERTION AND ]
@@ -282,7 +282,7 @@ expected a new scope use item
source_file: BEGIN_CODE SCOPE UIDENT COLON ASSERTION FIXED LIDENT BY YEAR
##
-## Ends in an error in state: 242.
+## Ends in an error in state: 238.
##
## assertion -> FIXED separated_nonempty_list(DOT,addpos(LIDENT)) BY . lident [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
##
@@ -294,7 +294,7 @@ expected the legislative text by which the value of the variable is fixed
source_file: BEGIN_CODE SCOPE UIDENT COLON ASSERTION FIXED LIDENT WITH_V
##
-## Ends in an error in state: 241.
+## Ends in an error in state: 237.
##
## assertion -> FIXED separated_nonempty_list(DOT,addpos(LIDENT)) . BY lident [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
##
@@ -305,14 +305,14 @@ source_file: BEGIN_CODE SCOPE UIDENT COLON ASSERTION FIXED LIDENT WITH_V
## This implies that, although the LR(1) items shown above provide an
## accurate view of the past (what has been recognized so far), they
## may provide an INCOMPLETE view of the future (what was expected next).
-## In state 229, spurious reduction of production separated_nonempty_list(DOT,addpos(LIDENT)) -> LIDENT
+## In state 225, spurious reduction of production separated_nonempty_list(DOT,addpos(LIDENT)) -> LIDENT
##
expected the legislative text by which the value of the variable is fixed
source_file: BEGIN_CODE SCOPE UIDENT COLON ASSERTION FIXED YEAR
##
-## Ends in an error in state: 240.
+## Ends in an error in state: 236.
##
## assertion -> FIXED . separated_nonempty_list(DOT,addpos(LIDENT)) BY lident [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
##
@@ -324,7 +324,7 @@ expected the name of the variable that should be fixed
source_file: BEGIN_CODE SCOPE UIDENT COLON ASSERTION UNDER_CONDITION TRUE THEN
##
-## Ends in an error in state: 238.
+## Ends in an error in state: 234.
##
## condition_consequence -> UNDER_CONDITION expression . CONSEQUENCE [ UIDENT TRUE SUM OUTPUT NOT MONEY_AMOUNT MONEY MINUS MINIMUM MAXIMUM MATCH LPAREN LIDENT LET LBRACKET INT_LITERAL IF FOR FILLED FALSE EXISTS DEFINED_AS DECIMAL_LITERAL DECIMAL DATE_LITERAL CARDINAL ]
## expression -> expression . DOT qlident [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS CONSEQUENCE AND ]
@@ -356,7 +356,7 @@ expected a consequence for this definition under condition
source_file: BEGIN_CODE SCOPE UIDENT COLON ASSERTION UNDER_CONDITION YEAR
##
-## Ends in an error in state: 237.
+## Ends in an error in state: 233.
##
## condition_consequence -> UNDER_CONDITION . expression CONSEQUENCE [ UIDENT TRUE SUM OUTPUT NOT MONEY_AMOUNT MONEY MINUS MINIMUM MAXIMUM MATCH LPAREN LIDENT LET LBRACKET INT_LITERAL IF FOR FILLED FALSE EXISTS DEFINED_AS DECIMAL_LITERAL DECIMAL DATE_LITERAL CARDINAL ]
##
@@ -368,7 +368,7 @@ expected an expression for this condition
source_file: BEGIN_CODE SCOPE UIDENT COLON ASSERTION VARIES LIDENT UNDER_CONDITION
##
-## Ends in an error in state: 232.
+## Ends in an error in state: 228.
##
## assertion -> VARIES separated_nonempty_list(DOT,addpos(LIDENT)) . WITH_V expression option(addpos(variation_type)) [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
##
@@ -379,14 +379,14 @@ source_file: BEGIN_CODE SCOPE UIDENT COLON ASSERTION VARIES LIDENT UNDER_CONDITI
## This implies that, although the LR(1) items shown above provide an
## accurate view of the past (what has been recognized so far), they
## may provide an INCOMPLETE view of the future (what was expected next).
-## In state 229, spurious reduction of production separated_nonempty_list(DOT,addpos(LIDENT)) -> LIDENT
+## In state 225, spurious reduction of production separated_nonempty_list(DOT,addpos(LIDENT)) -> LIDENT
##
expected an indication about what this variable varies with
source_file: BEGIN_CODE SCOPE UIDENT COLON ASSERTION VARIES LIDENT WITH_V YEAR
##
-## Ends in an error in state: 233.
+## Ends in an error in state: 229.
##
## assertion -> VARIES separated_nonempty_list(DOT,addpos(LIDENT)) WITH_V . expression option(addpos(variation_type)) [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
##
@@ -398,7 +398,7 @@ the variable varies with an expression that was expected here
source_file: BEGIN_CODE SCOPE UIDENT COLON ASSERTION VARIES YEAR
##
-## Ends in an error in state: 228.
+## Ends in an error in state: 224.
##
## assertion -> VARIES . separated_nonempty_list(DOT,addpos(LIDENT)) WITH_V expression option(addpos(variation_type)) [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
##
@@ -410,7 +410,7 @@ expecting the name of the varying variable
source_file: BEGIN_CODE SCOPE UIDENT COLON ASSERTION YEAR
##
-## Ends in an error in state: 227.
+## Ends in an error in state: 223.
##
## scope_item -> ASSERTION . assertion [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
##
@@ -422,7 +422,7 @@ expected an expression that shoud be asserted during execution
source_file: BEGIN_CODE SCOPE UIDENT COLON DEFINITION LIDENT DEFINED_AS YEAR
##
-## Ends in an error in state: 268.
+## Ends in an error in state: 264.
##
## definition -> option(label) option(exception_to) DEFINITION separated_nonempty_list(DOT,addpos(LIDENT)) option(addpos(definition_parameters)) option(state) option(condition_consequence) DEFINED_AS . expression [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
##
@@ -434,7 +434,7 @@ expected an expression for the definition
source_file: BEGIN_CODE SCOPE UIDENT COLON DEFINITION LIDENT OF LIDENT DECREASING
##
-## Ends in an error in state: 259.
+## Ends in an error in state: 255.
##
## separated_nonempty_list(COMMA,lident) -> lident . [ UNDER_CONDITION STATE NOT FILLED DEFINED_AS ]
## separated_nonempty_list(COMMA,lident) -> lident . COMMA separated_nonempty_list(COMMA,lident) [ UNDER_CONDITION STATE NOT FILLED DEFINED_AS ]
@@ -447,7 +447,7 @@ expected an expression for defining this function, introduced by the defined as
source_file: BEGIN_CODE SCOPE UIDENT COLON DEFINITION LIDENT WITH_V
##
-## Ends in an error in state: 256.
+## Ends in an error in state: 252.
##
## definition -> option(label) option(exception_to) DEFINITION separated_nonempty_list(DOT,addpos(LIDENT)) . option(addpos(definition_parameters)) option(state) option(condition_consequence) DEFINED_AS expression [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
##
@@ -458,14 +458,14 @@ source_file: BEGIN_CODE SCOPE UIDENT COLON DEFINITION LIDENT WITH_V
## This implies that, although the LR(1) items shown above provide an
## accurate view of the past (what has been recognized so far), they
## may provide an INCOMPLETE view of the future (what was expected next).
-## In state 229, spurious reduction of production separated_nonempty_list(DOT,addpos(LIDENT)) -> LIDENT
+## In state 225, spurious reduction of production separated_nonempty_list(DOT,addpos(LIDENT)) -> LIDENT
##
expected the defined as keyword to introduce the definition of this variable
source_file: BEGIN_CODE SCOPE UIDENT COLON DEFINITION YEAR
##
-## Ends in an error in state: 255.
+## Ends in an error in state: 251.
##
## definition -> option(label) option(exception_to) DEFINITION . separated_nonempty_list(DOT,addpos(LIDENT)) option(addpos(definition_parameters)) option(state) option(condition_consequence) DEFINED_AS expression [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
##
@@ -477,7 +477,7 @@ expected the name of the variable you want to define
source_file: BEGIN_CODE SCOPE UIDENT COLON EXCEPTION LIDENT YEAR
##
-## Ends in an error in state: 282.
+## Ends in an error in state: 278.
##
## option(addpos(exception_to)) -> exception_to . [ RULE ]
## option(exception_to) -> exception_to . [ DEFINITION ]
@@ -490,7 +490,7 @@ expected a rule or a definition after the exception declaration
source_file: BEGIN_CODE SCOPE UIDENT COLON EXCEPTION YEAR
##
-## Ends in an error in state: 251.
+## Ends in an error in state: 247.
##
## exception_to -> EXCEPTION . option(lident) [ RULE DEFINITION ]
##
@@ -502,7 +502,7 @@ expected the label to which the exception is referring back
source_file: BEGIN_CODE SCOPE UIDENT COLON LABEL LIDENT DEFINED_AS
##
-## Ends in an error in state: 250.
+## Ends in an error in state: 246.
##
## definition -> option(label) . option(exception_to) DEFINITION separated_nonempty_list(DOT,addpos(LIDENT)) option(addpos(definition_parameters)) option(state) option(condition_consequence) DEFINED_AS expression [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
## rule -> option(label) . option(addpos(exception_to)) RULE rule_expr option(state) option(condition_consequence) rule_consequence [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
@@ -515,7 +515,7 @@ expected a rule or a definition after the label declaration
source_file: BEGIN_CODE SCOPE UIDENT COLON LABEL YEAR
##
-## Ends in an error in state: 220.
+## Ends in an error in state: 216.
##
## label -> LABEL . lident [ RULE EXCEPTION DEFINITION ]
##
@@ -527,7 +527,7 @@ expected the name of the label
source_file: BEGIN_CODE SCOPE UIDENT COLON RULE LIDENT DOT YEAR
##
-## Ends in an error in state: 230.
+## Ends in an error in state: 226.
##
## separated_nonempty_list(DOT,addpos(LIDENT)) -> LIDENT DOT . separated_nonempty_list(DOT,addpos(LIDENT)) [ WITH_V UNDER_CONDITION STATE OF NOT FILLED DEFINED_AS BY ]
##
@@ -539,7 +539,7 @@ expected a struct field or a sub-scope context item after the dot
source_file: BEGIN_CODE SCOPE UIDENT COLON RULE LIDENT NOT FALSE
##
-## Ends in an error in state: 280.
+## Ends in an error in state: 276.
##
## rule_consequence -> option(NOT) . FILLED [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
##
@@ -551,7 +551,7 @@ expected the filled keyword the this rule
source_file: BEGIN_CODE SCOPE UIDENT COLON RULE LIDENT OF YEAR
##
-## Ends in an error in state: 257.
+## Ends in an error in state: 253.
##
## definition_parameters -> OF . separated_nonempty_list(COMMA,lident) [ UNDER_CONDITION STATE NOT FILLED DEFINED_AS ]
##
@@ -563,7 +563,7 @@ expected the name of the parameter for this dependent variable
source_file: BEGIN_CODE SCOPE UIDENT COLON RULE LIDENT WITH_V
##
-## Ends in an error in state: 273.
+## Ends in an error in state: 269.
##
## rule_expr -> separated_nonempty_list(DOT,addpos(LIDENT)) . option(addpos(definition_parameters)) [ UNDER_CONDITION STATE NOT FILLED ]
##
@@ -574,14 +574,14 @@ source_file: BEGIN_CODE SCOPE UIDENT COLON RULE LIDENT WITH_V
## This implies that, although the LR(1) items shown above provide an
## accurate view of the past (what has been recognized so far), they
## may provide an INCOMPLETE view of the future (what was expected next).
-## In state 229, spurious reduction of production separated_nonempty_list(DOT,addpos(LIDENT)) -> LIDENT
+## In state 225, spurious reduction of production separated_nonempty_list(DOT,addpos(LIDENT)) -> LIDENT
##
expected a condition or a consequence for this rule
source_file: BEGIN_CODE SCOPE UIDENT COLON RULE LIDENT YEAR
##
-## Ends in an error in state: 229.
+## Ends in an error in state: 225.
##
## separated_nonempty_list(DOT,addpos(LIDENT)) -> LIDENT . [ WITH_V UNDER_CONDITION STATE OF NOT FILLED DEFINED_AS BY ]
## separated_nonempty_list(DOT,addpos(LIDENT)) -> LIDENT . DOT separated_nonempty_list(DOT,addpos(LIDENT)) [ WITH_V UNDER_CONDITION STATE OF NOT FILLED DEFINED_AS BY ]
@@ -594,7 +594,7 @@ expected 'under condition' followed by a condition, 'equals' followed by the def
source_file: BEGIN_CODE SCOPE UIDENT COLON RULE YEAR
##
-## Ends in an error in state: 272.
+## Ends in an error in state: 268.
##
## rule -> option(label) option(addpos(exception_to)) RULE . rule_expr option(state) option(condition_consequence) rule_consequence [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
##
@@ -606,7 +606,7 @@ expected the name of the variable subject to the rule
source_file: BEGIN_CODE SCOPE UIDENT COLON YEAR
##
-## Ends in an error in state: 219.
+## Ends in an error in state: 215.
##
## code_item -> SCOPE UIDENT option(preceded(UNDER_CONDITION,expression)) COLON . nonempty_list(scope_item) [ SCOPE END_CODE DECLARATION ]
##
@@ -692,7 +692,7 @@ expected the "all" keyword to mean the "for all" construction of the universal t
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION IF TRUE SEMICOLON
##
-## Ends in an error in state: 168.
+## Ends in an error in state: 166.
##
## expression -> expression . DOT qlident [ XOR WITH THEN PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> expression . OF funcall_args [ XOR WITH THEN PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
@@ -748,7 +748,7 @@ expected a unit for this literal, or a valid operator to complete the expression
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LPAREN TRUE THEN
##
-## Ends in an error in state: 182.
+## Ends in an error in state: 180.
##
## expression -> LPAREN expression . RPAREN [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . DOT qlident [ XOR WITH RPAREN PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
@@ -792,7 +792,7 @@ expected an expression inside the parenthesis
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LBRACKET TRUE THEN
##
-## Ends in an error in state: 176.
+## Ends in an error in state: 174.
##
## expression -> expression . DOT qlident [ XOR WITH SEMICOLON RBRACKET PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> expression . OF funcall_args [ XOR WITH SEMICOLON RBRACKET PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
@@ -821,7 +821,7 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LBRACKET TRUE THEN
## expression
##
-expected a semicolon or a right square bracket after the collection element
+expected a semicolon or a right square bracket after the list element
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LBRACKET YEAR
##
@@ -833,11 +833,11 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LBRACKET YEAR
## LBRACKET
##
-expected a collection element
+expected a list element
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MATCH TRUE WITH ALT YEAR
##
-## Ends in an error in state: 186.
+## Ends in an error in state: 184.
##
## nonempty_list(addpos(preceded(ALT,match_arm))) -> ALT . match_arm [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## nonempty_list(addpos(preceded(ALT,match_arm))) -> ALT . match_arm nonempty_list(addpos(preceded(ALT,match_arm))) [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
@@ -850,7 +850,7 @@ expected the name of the constructor for the enum case in the pattern matching
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MATCH TRUE WITH YEAR
##
-## Ends in an error in state: 185.
+## Ends in an error in state: 183.
##
## expression -> expression WITH . constructor_binding [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> MATCH expression WITH . nonempty_list(addpos(preceded(ALT,match_arm))) [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
@@ -875,7 +875,7 @@ expected an expression to match with
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION TRUE YEAR
##
-## Ends in an error in state: 217.
+## Ends in an error in state: 213.
##
## expression -> expression . DOT qlident [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS COLON AND ]
## expression -> expression . OF funcall_args [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS COLON AND ]
@@ -944,7 +944,7 @@ expected the name of the scope being used
source_file: BEGIN_CODE YEAR
##
-## Ends in an error in state: 403.
+## Ends in an error in state: 399.
##
## source_file_item -> BEGIN_CODE . code END_CODE [ LAW_TEXT LAW_HEADING EOF BEGIN_METADATA BEGIN_DIRECTIVE BEGIN_CODE ]
##
@@ -1005,8 +1005,8 @@ source_file: BEGIN_METADATA LAW_TEXT LAW_HEADING
## accurate view of the past (what has been recognized so far), they
## may provide an INCOMPLETE view of the future (what was expected next).
## In state 1, spurious reduction of production nonempty_list(LAW_TEXT) -> LAW_TEXT
-## In state 382, spurious reduction of production law_text -> nonempty_list(LAW_TEXT)
-## In state 383, spurious reduction of production option(law_text) -> law_text
+## In state 378, spurious reduction of production law_text -> nonempty_list(LAW_TEXT)
+## In state 379, spurious reduction of production option(law_text) -> law_text
##
expected some law text or code block
@@ -1106,7 +1106,7 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION SUM BOOLEAN YEAR
## SUM primitive_typ
##
-expected 'of' then the collection on which to operate
+expected 'of' then the list on which to operate
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION SUM UIDENT OF YEAR
##
@@ -1118,7 +1118,7 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION SUM UIDENT OF YEAR
## SUM primitive_typ OF
##
-expected the collection on which to operate the sum
+expected the list on which to operate the sum
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION OUTPUT YEAR
##
@@ -1251,49 +1251,49 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MINIMUM YEAR
##
## Ends in an error in state: 47.
##
-## expression -> MINIMUM . OF expression OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> MINIMUM . OF expression OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
## MINIMUM
##
-expected 'of' then the collection to operate on
+expected 'of' then the list to operate on
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MINIMUM OF YEAR
##
## Ends in an error in state: 48.
##
-## expression -> MINIMUM OF . expression OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> MINIMUM OF . expression OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
## MINIMUM OF
##
-expected an expression defining the collection to operate on
+expected an expression defining the list to operate on
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MAXIMUM YEAR
##
## Ends in an error in state: 49.
##
-## expression -> MAXIMUM . OF expression OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> MAXIMUM . OF expression OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
## MAXIMUM
##
-expected 'of' then the collection to operate on
+expected 'of' then the list to operate on
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MAXIMUM OF YEAR
##
## Ends in an error in state: 50.
##
-## expression -> MAXIMUM OF . expression OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> MAXIMUM OF . expression OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
## MAXIMUM OF
##
-expected an expression defining the collection to operate on
+expected an expression defining the list to operate on
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT YEAR
##
@@ -1354,7 +1354,7 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION FOR ALL LIDENT AMONG YEAR
## FOR ALL lident AMONG
##
-expected an expression describing the collection to operate on
+expected an expression describing the list to operate on
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION EXISTS LIDENT AMONG YEAR
##
@@ -1366,7 +1366,7 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION EXISTS LIDENT AMONG YEAR
## EXISTS lident AMONG
##
-expected an expression describing the collection to operate on
+expected an expression describing the list to operate on
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION DECIMAL_LITERAL WITH_V
##
@@ -1422,14 +1422,14 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG YEAR
## Ends in an error in state: 88.
##
## expression -> lident AMONG . expression SUCH THAT expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
-## expression -> lident AMONG . expression SUCH THAT expression IS MAXIMUM OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
-## expression -> lident AMONG . expression SUCH THAT expression IS MINIMUM OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG . expression SUCH THAT expression IS MAXIMUM OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG . expression SUCH THAT expression IS MINIMUM OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
## lident AMONG
##
-expected an expression defining a collection
+expected an expression defining a list
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG FALSE YEAR
##
@@ -1456,8 +1456,8 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG FALSE YEAR
## expression -> expression . XOR expression [ XOR WITH SUCH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> lident AMONG expression . SUCH THAT expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . FOR lident AMONG expression SUCH THAT expression [ XOR WITH SUCH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
-## expression -> lident AMONG expression . SUCH THAT expression IS MAXIMUM OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
-## expression -> lident AMONG expression . SUCH THAT expression IS MINIMUM OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression . SUCH THAT expression IS MAXIMUM OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression . SUCH THAT expression IS MINIMUM OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
## lident AMONG expression
@@ -1564,7 +1564,7 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION UIDENT PLUSPLUS YEAR
## expression PLUSPLUS
##
-expected a collection expression
+expected a list expression
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION UIDENT PLUSPLUS FALSE YEAR
##
@@ -1680,7 +1680,7 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION UIDENT FOR LIDENT AMONG YEA
## expression FOR lident AMONG
##
-expected an expression defining a collection
+expected an expression defining a list
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION UIDENT FOR LIDENT AMONG FALSE YEAR
##
@@ -1738,7 +1738,7 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION UIDENT FOR LIDENT AMONG UID
##
expected an expression defining the condition to apply to the elements of the
-collection
+list
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION UIDENT FOR LIDENT AMONG UIDENT SUCH THAT FALSE YEAR
##
@@ -2385,8 +2385,8 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH YE
## Ends in an error in state: 141.
##
## expression -> lident AMONG expression SUCH . THAT expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
-## expression -> lident AMONG expression SUCH . THAT expression IS MAXIMUM OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
-## expression -> lident AMONG expression SUCH . THAT expression IS MINIMUM OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH . THAT expression IS MAXIMUM OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH . THAT expression IS MINIMUM OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
## lident AMONG expression SUCH
@@ -2399,8 +2399,8 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH TH
## Ends in an error in state: 142.
##
## expression -> lident AMONG expression SUCH THAT . expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
-## expression -> lident AMONG expression SUCH THAT . expression IS MAXIMUM OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
-## expression -> lident AMONG expression SUCH THAT . expression IS MINIMUM OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH THAT . expression IS MAXIMUM OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH THAT . expression IS MINIMUM OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
## lident AMONG expression SUCH THAT
@@ -2433,8 +2433,8 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH TH
## expression -> expression . XOR expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> lident AMONG expression SUCH THAT expression . [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . FOR lident AMONG expression SUCH THAT expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
-## expression -> lident AMONG expression SUCH THAT expression . IS MAXIMUM OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
-## expression -> lident AMONG expression SUCH THAT expression . IS MINIMUM OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH THAT expression . IS MAXIMUM OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH THAT expression . IS MINIMUM OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
## lident AMONG expression SUCH THAT expression
@@ -2446,8 +2446,8 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH TH
##
## Ends in an error in state: 144.
##
-## expression -> lident AMONG expression SUCH THAT expression IS . MAXIMUM OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
-## expression -> lident AMONG expression SUCH THAT expression IS . MINIMUM OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH THAT expression IS . MAXIMUM OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH THAT expression IS . MINIMUM OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
## lident AMONG expression SUCH THAT expression IS
@@ -2459,78 +2459,66 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH TH
##
## Ends in an error in state: 145.
##
-## expression -> lident AMONG expression SUCH THAT expression IS MINIMUM . OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH THAT expression IS MINIMUM . OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
## lident AMONG expression SUCH THAT expression IS MINIMUM
##
-expected 'or if collection empty then <expression>'
+expected 'or if list empty then <expression>'
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH THAT UIDENT IS MINIMUM OR YEAR
##
## Ends in an error in state: 146.
##
-## expression -> lident AMONG expression SUCH THAT expression IS MINIMUM OR . IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH THAT expression IS MINIMUM OR . IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
## lident AMONG expression SUCH THAT expression IS MINIMUM OR
##
-expected the form 'or if collection empty then <expression>'
+expected the form 'or if list empty then <expression>'
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH THAT UIDENT IS MINIMUM OR IF YEAR
##
## Ends in an error in state: 147.
##
-## expression -> lident AMONG expression SUCH THAT expression IS MINIMUM OR IF . COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH THAT expression IS MINIMUM OR IF . LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
## lident AMONG expression SUCH THAT expression IS MINIMUM OR IF
##
-expected the form 'or if collection empty then <expression>'
+expected the form 'or if list empty then <expression>'
-source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH THAT UIDENT IS MINIMUM OR IF COLLECTION YEAR
+source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH THAT UIDENT IS MINIMUM OR IF LIST_EMPTY YEAR
##
## Ends in an error in state: 148.
##
-## expression -> lident AMONG expression SUCH THAT expression IS MINIMUM OR IF COLLECTION . EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH THAT expression IS MINIMUM OR IF LIST_EMPTY . THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
-## lident AMONG expression SUCH THAT expression IS MINIMUM OR IF COLLECTION
+## lident AMONG expression SUCH THAT expression IS MINIMUM OR IF LIST_EMPTY
##
-expected the form 'or if collection empty then <expression>'
+expected the form 'or if list empty then <expression>'
-source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH THAT UIDENT IS MINIMUM OR IF COLLECTION EMPTY YEAR
+source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH THAT UIDENT IS MINIMUM OR IF LIST_EMPTY THEN YEAR
##
## Ends in an error in state: 149.
##
-## expression -> lident AMONG expression SUCH THAT expression IS MINIMUM OR IF COLLECTION EMPTY . THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH THAT expression IS MINIMUM OR IF LIST_EMPTY THEN . expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
-## lident AMONG expression SUCH THAT expression IS MINIMUM OR IF COLLECTION EMPTY
+## lident AMONG expression SUCH THAT expression IS MINIMUM OR IF LIST_EMPTY THEN
##
-expected the form 'or if collection empty then <expression>'
-
-source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH THAT UIDENT IS MINIMUM OR IF COLLECTION EMPTY THEN YEAR
-##
-## Ends in an error in state: 150.
-##
-## expression -> lident AMONG expression SUCH THAT expression IS MINIMUM OR IF COLLECTION EMPTY THEN . expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
-##
-## The known suffix of the stack is as follows:
-## lident AMONG expression SUCH THAT expression IS MINIMUM OR IF COLLECTION EMPTY THEN
-##
-
-expected an expression, following the form 'or if collection empty then
+expected an expression, following the form 'or if list empty then
<expression>'
-source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH THAT UIDENT IS MINIMUM OR IF COLLECTION EMPTY THEN FALSE YEAR
+source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH THAT UIDENT IS MINIMUM OR IF LIST_EMPTY THEN FALSE YEAR
##
-## Ends in an error in state: 151.
+## Ends in an error in state: 150.
##
## expression -> expression . DOT qlident [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . OF funcall_args [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
@@ -2552,89 +2540,77 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH TH
## expression -> expression . OR expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . XOR expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . FOR lident AMONG expression SUCH THAT expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
-## expression -> lident AMONG expression SUCH THAT expression IS MINIMUM OR IF COLLECTION EMPTY THEN expression . [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH THAT expression IS MINIMUM OR IF LIST_EMPTY THEN expression . [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
-## lident AMONG expression SUCH THAT expression IS MINIMUM OR IF COLLECTION EMPTY THEN expression
+## lident AMONG expression SUCH THAT expression IS MINIMUM OR IF LIST_EMPTY THEN expression
##
expected a binary operator continuing the expression, or a keyword ending the expression and starting the next item
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH THAT UIDENT IS MAXIMUM YEAR
##
-## Ends in an error in state: 152.
+## Ends in an error in state: 151.
##
-## expression -> lident AMONG expression SUCH THAT expression IS MAXIMUM . OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH THAT expression IS MAXIMUM . OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
## lident AMONG expression SUCH THAT expression IS MAXIMUM
##
-expected 'or if collection empty then <expression>'
+expected 'or if list empty then <expression>'
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH THAT UIDENT IS MAXIMUM OR YEAR
##
-## Ends in an error in state: 153.
+## Ends in an error in state: 152.
##
-## expression -> lident AMONG expression SUCH THAT expression IS MAXIMUM OR . IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH THAT expression IS MAXIMUM OR . IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
## lident AMONG expression SUCH THAT expression IS MAXIMUM OR
##
-expected the form 'or if collection empty then <expression>'
+expected the form 'or if list empty then <expression>'
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH THAT UIDENT IS MAXIMUM OR IF YEAR
##
-## Ends in an error in state: 154.
+## Ends in an error in state: 153.
##
-## expression -> lident AMONG expression SUCH THAT expression IS MAXIMUM OR IF . COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH THAT expression IS MAXIMUM OR IF . LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
## lident AMONG expression SUCH THAT expression IS MAXIMUM OR IF
##
-expected the form 'or if collection empty then <expression>'
-
-source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH THAT UIDENT IS MAXIMUM OR IF COLLECTION YEAR
-##
-## Ends in an error in state: 155.
-##
-## expression -> lident AMONG expression SUCH THAT expression IS MAXIMUM OR IF COLLECTION . EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
-##
-## The known suffix of the stack is as follows:
-## lident AMONG expression SUCH THAT expression IS MAXIMUM OR IF COLLECTION
-##
-
-expected the form 'or if collection empty then <expression>'
+expected the form 'or if list empty then <expression>'
-source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH THAT UIDENT IS MAXIMUM OR IF COLLECTION EMPTY YEAR
+source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH THAT UIDENT IS MAXIMUM OR IF LIST_EMPTY YEAR
##
-## Ends in an error in state: 156.
+## Ends in an error in state: 154.
##
-## expression -> lident AMONG expression SUCH THAT expression IS MAXIMUM OR IF COLLECTION EMPTY . THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH THAT expression IS MAXIMUM OR IF LIST_EMPTY . THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
-## lident AMONG expression SUCH THAT expression IS MAXIMUM OR IF COLLECTION EMPTY
+## lident AMONG expression SUCH THAT expression IS MAXIMUM OR IF LIST_EMPTY
##
-expected the form 'or if collection empty then <expression>'
+expected the form 'or if list empty then <expression>'
-source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH THAT UIDENT IS MAXIMUM OR IF COLLECTION EMPTY THEN YEAR
+source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH THAT UIDENT IS MAXIMUM OR IF LIST_EMPTY THEN YEAR
##
-## Ends in an error in state: 157.
+## Ends in an error in state: 155.
##
-## expression -> lident AMONG expression SUCH THAT expression IS MAXIMUM OR IF COLLECTION EMPTY THEN . expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH THAT expression IS MAXIMUM OR IF LIST_EMPTY THEN . expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
-## lident AMONG expression SUCH THAT expression IS MAXIMUM OR IF COLLECTION EMPTY THEN
+## lident AMONG expression SUCH THAT expression IS MAXIMUM OR IF LIST_EMPTY THEN
##
-expected an expression, following the form 'or if collection empty then <expression>'
+expected an expression, following the form 'or if list empty then <expression>'
-source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH THAT UIDENT IS MAXIMUM OR IF COLLECTION EMPTY THEN FALSE YEAR
+source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH THAT UIDENT IS MAXIMUM OR IF LIST_EMPTY THEN FALSE YEAR
##
-## Ends in an error in state: 158.
+## Ends in an error in state: 156.
##
## expression -> expression . DOT qlident [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . OF funcall_args [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
@@ -2656,17 +2632,17 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LIDENT AMONG UIDENT SUCH TH
## expression -> expression . OR expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . XOR expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . FOR lident AMONG expression SUCH THAT expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
-## expression -> lident AMONG expression SUCH THAT expression IS MAXIMUM OR IF COLLECTION EMPTY THEN expression . [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> lident AMONG expression SUCH THAT expression IS MAXIMUM OR IF LIST_EMPTY THEN expression . [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
-## lident AMONG expression SUCH THAT expression IS MAXIMUM OR IF COLLECTION EMPTY THEN expression
+## lident AMONG expression SUCH THAT expression IS MAXIMUM OR IF LIST_EMPTY THEN expression
##
expected a binary operator continuing the expression, or a keyword ending the expression and starting the next item
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION UIDENT CONTENT FALSE YEAR
##
-## Ends in an error in state: 159.
+## Ends in an error in state: 157.
##
## expression -> expression . DOT qlident [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . OF funcall_args [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
@@ -2698,7 +2674,7 @@ expected a binary operator continuing the expression, or a keyword ending the ex
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION EXISTS LIDENT AMONG FALSE YEAR
##
-## Ends in an error in state: 161.
+## Ends in an error in state: 159.
##
## expression -> expression . DOT qlident [ XOR WITH SUCH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> expression . OF funcall_args [ XOR WITH SUCH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
@@ -2730,7 +2706,7 @@ expected 'such that <expression>'
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION EXISTS LIDENT AMONG UIDENT SUCH YEAR
##
-## Ends in an error in state: 162.
+## Ends in an error in state: 160.
##
## expression -> EXISTS lident AMONG expression SUCH . THAT expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
@@ -2742,7 +2718,7 @@ expected the form 'such that <expression>'
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION EXISTS LIDENT AMONG UIDENT SUCH THAT YEAR
##
-## Ends in an error in state: 163.
+## Ends in an error in state: 161.
##
## expression -> EXISTS lident AMONG expression SUCH THAT . expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
@@ -2754,7 +2730,7 @@ expected an expression, following the form 'such that <expression>'
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION EXISTS LIDENT AMONG UIDENT SUCH THAT FALSE YEAR
##
-## Ends in an error in state: 164.
+## Ends in an error in state: 162.
##
## expression -> expression . DOT qlident [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . OF funcall_args [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
@@ -2786,7 +2762,7 @@ expected a binary operator continuing the expression, or a keyword ending the ex
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION FOR ALL LIDENT AMONG FALSE YEAR
##
-## Ends in an error in state: 165.
+## Ends in an error in state: 163.
##
## expression -> expression . DOT qlident [ XOR WITH WE_HAVE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> expression . OF funcall_args [ XOR WITH WE_HAVE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
@@ -2818,7 +2794,7 @@ expected 'we have <expression>'
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION FOR ALL LIDENT AMONG UIDENT WE_HAVE YEAR
##
-## Ends in an error in state: 166.
+## Ends in an error in state: 164.
##
## expression -> FOR ALL lident AMONG expression WE_HAVE . expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
@@ -2830,7 +2806,7 @@ expected the form 'we have <expression>'
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION FOR ALL LIDENT AMONG UIDENT WE_HAVE FALSE YEAR
##
-## Ends in an error in state: 167.
+## Ends in an error in state: 165.
##
## expression -> expression . DOT qlident [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . OF funcall_args [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
@@ -2862,7 +2838,7 @@ expected a binary operator continuing the expression, or a keyword ending the ex
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION IF UIDENT THEN YEAR
##
-## Ends in an error in state: 169.
+## Ends in an error in state: 167.
##
## expression -> IF expression THEN . expression ELSE expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
@@ -2874,7 +2850,7 @@ expected an expression, followed by 'else <expression>'
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION IF UIDENT THEN FALSE YEAR
##
-## Ends in an error in state: 170.
+## Ends in an error in state: 168.
##
## expression -> expression . DOT qlident [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL ELSE DOT DIV CONTAINS AND ]
## expression -> expression . OF funcall_args [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL ELSE DOT DIV CONTAINS AND ]
@@ -2906,7 +2882,7 @@ expected 'else <expression>'
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION IF UIDENT THEN UIDENT ELSE YEAR
##
-## Ends in an error in state: 171.
+## Ends in an error in state: 169.
##
## expression -> IF expression THEN expression ELSE . expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
@@ -2918,7 +2894,7 @@ expected an expression
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION IF UIDENT THEN UIDENT ELSE FALSE YEAR
##
-## Ends in an error in state: 172.
+## Ends in an error in state: 170.
##
## expression -> expression . DOT qlident [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . OF funcall_args [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
@@ -2950,7 +2926,7 @@ expected a binary operator continuing the expression, or a keyword ending the ex
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LBRACKET UIDENT SEMICOLON YEAR
##
-## Ends in an error in state: 177.
+## Ends in an error in state: 175.
##
## separated_nonempty_list(SEMICOLON,expression) -> expression SEMICOLON . separated_nonempty_list(SEMICOLON,expression) [ RBRACKET ]
##
@@ -2962,7 +2938,7 @@ expected an expression
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LET LIDENT DEFINED_AS FALSE YEAR
##
-## Ends in an error in state: 179.
+## Ends in an error in state: 177.
##
## expression -> expression . DOT qlident [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER IN GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> expression . OF funcall_args [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER IN GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
@@ -2994,7 +2970,7 @@ expected the keyword 'in'
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LET LIDENT DEFINED_AS UIDENT IN YEAR
##
-## Ends in an error in state: 180.
+## Ends in an error in state: 178.
##
## expression -> LET lident DEFINED_AS expression IN . expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
@@ -3006,7 +2982,7 @@ expected an expression
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION LET LIDENT DEFINED_AS UIDENT IN FALSE YEAR
##
-## Ends in an error in state: 181.
+## Ends in an error in state: 179.
##
## expression -> expression . DOT qlident [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . OF funcall_args [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
@@ -3038,7 +3014,7 @@ expected a binary operator continuing the expression, or a keyword ending the ex
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MATCH FALSE YEAR
##
-## Ends in an error in state: 184.
+## Ends in an error in state: 182.
##
## expression -> expression . DOT qlident [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> expression . OF funcall_args [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
@@ -3070,7 +3046,7 @@ expected 'with pattern -- <pattern> : <expression> ...'
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MATCH UIDENT WITH ALT WILDCARD YEAR
##
-## Ends in an error in state: 187.
+## Ends in an error in state: 185.
##
## match_arm -> WILDCARD . COLON expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
@@ -3082,7 +3058,7 @@ expected ':' followed by an expression
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MATCH UIDENT WITH ALT WILDCARD COLON YEAR
##
-## Ends in an error in state: 188.
+## Ends in an error in state: 186.
##
## match_arm -> WILDCARD COLON . expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
@@ -3094,7 +3070,7 @@ expected an expression
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MATCH UIDENT WITH ALT WILDCARD COLON FALSE YEAR
##
-## Ends in an error in state: 189.
+## Ends in an error in state: 187.
##
## expression -> expression . DOT qlident [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . OF funcall_args [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
@@ -3126,7 +3102,7 @@ expected a binary operator continuing the expression, or a keyword ending the ex
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MATCH UIDENT WITH ALT UIDENT XOR
##
-## Ends in an error in state: 192.
+## Ends in an error in state: 190.
##
## match_arm -> constructor_binding . COLON expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
@@ -3145,7 +3121,7 @@ expected a colon followed by an expression, as in '-- Case : <expression>'
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MATCH UIDENT WITH ALT UIDENT COLON YEAR
##
-## Ends in an error in state: 193.
+## Ends in an error in state: 191.
##
## match_arm -> constructor_binding COLON . expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
@@ -3157,7 +3133,7 @@ expected an expression
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MATCH UIDENT WITH ALT UIDENT COLON FALSE YEAR
##
-## Ends in an error in state: 194.
+## Ends in an error in state: 192.
##
## expression -> expression . DOT qlident [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . OF funcall_args [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
@@ -3190,14 +3166,14 @@ expected a binary operator, or the next case in the form '-- NextCase : <express
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MAXIMUM OF FALSE YEAR
##
-## Ends in an error in state: 196.
+## Ends in an error in state: 194.
##
## expression -> expression . DOT qlident [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> expression . OF funcall_args [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> expression . WITH constructor_binding [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> expression . CONTAINS expression [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> expression . FOR lident AMONG expression [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
-## expression -> MAXIMUM OF expression . OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> MAXIMUM OF expression . OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . MULT expression [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> expression . DIV expression [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> expression . PLUS expression [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
@@ -3218,80 +3194,68 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MAXIMUM OF FALSE YEAR
## MAXIMUM OF expression
##
-expected 'or if collection empty then <expression>'
+expected 'or if list empty then <expression>'
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MAXIMUM OF UIDENT OR YEAR
##
-## Ends in an error in state: 197.
+## Ends in an error in state: 195.
##
-## expression -> MAXIMUM OF expression OR . IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> MAXIMUM OF expression OR . IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression OR . expression [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
##
## The known suffix of the stack is as follows:
## MAXIMUM OF expression OR
##
-expected the form 'or if collection empty then <expression>'
+expected the form 'or if list empty then <expression>'
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MAXIMUM OF UIDENT OR IF YEAR
##
-## Ends in an error in state: 198.
+## Ends in an error in state: 196.
##
-## expression -> MAXIMUM OF expression OR IF . COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> MAXIMUM OF expression OR IF . LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> IF . expression THEN expression ELSE expression [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
##
## The known suffix of the stack is as follows:
## MAXIMUM OF expression OR IF
##
-expected the form 'or if collection empty then <expression>'
-
-source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MAXIMUM OF UIDENT OR IF COLLECTION YEAR
-##
-## Ends in an error in state: 199.
-##
-## expression -> MAXIMUM OF expression OR IF COLLECTION . EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
-##
-## The known suffix of the stack is as follows:
-## MAXIMUM OF expression OR IF COLLECTION
-##
-
-expected the form 'or if collection empty then <expression>'
+expected the form 'or if list empty then <expression>'
-source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MAXIMUM OF UIDENT OR IF COLLECTION EMPTY YEAR
+source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MAXIMUM OF UIDENT OR IF LIST_EMPTY YEAR
##
-## Ends in an error in state: 200.
+## Ends in an error in state: 197.
##
-## expression -> MAXIMUM OF expression OR IF COLLECTION EMPTY . THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> MAXIMUM OF expression OR IF LIST_EMPTY . THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
-## MAXIMUM OF expression OR IF COLLECTION EMPTY
+## MAXIMUM OF expression OR IF LIST_EMPTY
##
-expected the form 'or if collection empty then <expression>'
+expected the form 'or if list empty then <expression>'
-source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MAXIMUM OF UIDENT OR IF COLLECTION EMPTY THEN YEAR
+source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MAXIMUM OF UIDENT OR IF LIST_EMPTY THEN YEAR
##
-## Ends in an error in state: 201.
+## Ends in an error in state: 198.
##
-## expression -> MAXIMUM OF expression OR IF COLLECTION EMPTY THEN . expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> MAXIMUM OF expression OR IF LIST_EMPTY THEN . expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
-## MAXIMUM OF expression OR IF COLLECTION EMPTY THEN
+## MAXIMUM OF expression OR IF LIST_EMPTY THEN
##
-expected an expression, following the form 'or if collection empty then <expression>'
+expected an expression, following the form 'or if list empty then <expression>'
-source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MAXIMUM OF UIDENT OR IF COLLECTION EMPTY THEN FALSE YEAR
+source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MAXIMUM OF UIDENT OR IF LIST_EMPTY THEN FALSE YEAR
##
-## Ends in an error in state: 202.
+## Ends in an error in state: 199.
##
## expression -> expression . DOT qlident [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . OF funcall_args [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . WITH constructor_binding [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . CONTAINS expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . FOR lident AMONG expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
-## expression -> MAXIMUM OF expression OR IF COLLECTION EMPTY THEN expression . [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> MAXIMUM OF expression OR IF LIST_EMPTY THEN expression . [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . MULT expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . DIV expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . PLUS expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
@@ -3309,21 +3273,21 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MAXIMUM OF UIDENT OR IF COL
## expression -> expression . FOR lident AMONG expression SUCH THAT expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
-## MAXIMUM OF expression OR IF COLLECTION EMPTY THEN expression
+## MAXIMUM OF expression OR IF LIST_EMPTY THEN expression
##
expected a binary operator continuing the expression, or a keyword ending the expression and starting the next item
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MINIMUM OF FALSE YEAR
##
-## Ends in an error in state: 203.
+## Ends in an error in state: 200.
##
## expression -> expression . DOT qlident [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> expression . OF funcall_args [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> expression . WITH constructor_binding [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> expression . CONTAINS expression [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> expression . FOR lident AMONG expression [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
-## expression -> MINIMUM OF expression . OR IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> MINIMUM OF expression . OR IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . MULT expression [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> expression . DIV expression [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
## expression -> expression . PLUS expression [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
@@ -3344,81 +3308,69 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MINIMUM OF FALSE YEAR
## MINIMUM OF expression
##
-expected 'or if collection empty then <expression>'
+expected 'or if list empty then <expression>'
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MINIMUM OF UIDENT OR YEAR
##
-## Ends in an error in state: 204.
+## Ends in an error in state: 201.
##
-## expression -> MINIMUM OF expression OR . IF COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> MINIMUM OF expression OR . IF LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression OR . expression [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
##
## The known suffix of the stack is as follows:
## MINIMUM OF expression OR
##
-expected the form 'or if collection empty then <expression>'
+expected the form 'or if list empty then <expression>'
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MINIMUM OF UIDENT OR IF YEAR
##
-## Ends in an error in state: 205.
+## Ends in an error in state: 202.
##
-## expression -> MINIMUM OF expression OR IF . COLLECTION EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> MINIMUM OF expression OR IF . LIST_EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> IF . expression THEN expression ELSE expression [ XOR WITH PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ]
##
## The known suffix of the stack is as follows:
## MINIMUM OF expression OR IF
##
-expected the form 'or if collection empty then <expression>'
+expected the form 'or if list empty then <expression>'
-source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MINIMUM OF UIDENT OR IF COLLECTION YEAR
+source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MINIMUM OF UIDENT OR IF LIST_EMPTY YEAR
##
-## Ends in an error in state: 206.
-##
-## expression -> MINIMUM OF expression OR IF COLLECTION . EMPTY THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
-##
-## The known suffix of the stack is as follows:
-## MINIMUM OF expression OR IF COLLECTION
-##
-
-expected the form 'or if collection empty then <expression>'
-
-source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MINIMUM OF UIDENT OR IF COLLECTION EMPTY YEAR
-##
-## Ends in an error in state: 207.
+## Ends in an error in state: 203.
##
-## expression -> MINIMUM OF expression OR IF COLLECTION EMPTY . THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> MINIMUM OF expression OR IF LIST_EMPTY . THEN expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
-## MINIMUM OF expression OR IF COLLECTION EMPTY
+## MINIMUM OF expression OR IF LIST_EMPTY
##
-expected the form 'or if collection empty then <expression>'
+expected the form 'or if list empty then <expression>'
-source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MINIMUM OF UIDENT OR IF COLLECTION EMPTY THEN YEAR
+source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MINIMUM OF UIDENT OR IF LIST_EMPTY THEN YEAR
##
-## Ends in an error in state: 208.
+## Ends in an error in state: 204.
##
-## expression -> MINIMUM OF expression OR IF COLLECTION EMPTY THEN . expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> MINIMUM OF expression OR IF LIST_EMPTY THEN . expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
-## MINIMUM OF expression OR IF COLLECTION EMPTY THEN
+## MINIMUM OF expression OR IF LIST_EMPTY THEN
##
-expected an expression, following the form 'or if collection empty then
+expected an expression, following the form 'or if list empty then
<expression>'
-source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MINIMUM OF UIDENT OR IF COLLECTION EMPTY THEN FALSE YEAR
+source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MINIMUM OF UIDENT OR IF LIST_EMPTY THEN FALSE YEAR
##
-## Ends in an error in state: 209.
+## Ends in an error in state: 205.
##
## expression -> expression . DOT qlident [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . OF funcall_args [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . WITH constructor_binding [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . CONTAINS expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . FOR lident AMONG expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
-## expression -> MINIMUM OF expression OR IF COLLECTION EMPTY THEN expression . [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
+## expression -> MINIMUM OF expression OR IF LIST_EMPTY THEN expression . [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . MULT expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . DIV expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . PLUS expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
@@ -3436,14 +3388,14 @@ source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MINIMUM OF UIDENT OR IF COL
## expression -> expression . FOR lident AMONG expression SUCH THAT expression [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
##
## The known suffix of the stack is as follows:
-## MINIMUM OF expression OR IF COLLECTION EMPTY THEN expression
+## MINIMUM OF expression OR IF LIST_EMPTY THEN expression
##
expected a binary operator continuing the expression, or a keyword ending the expression and starting the next item
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION MINUS FALSE YEAR
##
-## Ends in an error in state: 210.
+## Ends in an error in state: 206.
##
## expression -> expression . DOT qlident [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . OF funcall_args [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
@@ -3475,7 +3427,7 @@ expected a binary operator continuing the expression, or a keyword ending the ex
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION NOT FALSE YEAR
##
-## Ends in an error in state: 211.
+## Ends in an error in state: 207.
##
## expression -> expression . DOT qlident [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . OF funcall_args [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
@@ -3507,7 +3459,7 @@ expected a binary operator continuing the expression, or a keyword ending the ex
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION UIDENT LBRACE ALT LIDENT COLON FALSE YEAR
##
-## Ends in an error in state: 212.
+## Ends in an error in state: 208.
##
## expression -> expression . DOT qlident [ XOR WITH RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ALT ]
## expression -> expression . OF funcall_args [ XOR WITH RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL DOT DIV CONTAINS AND ALT ]
@@ -3539,7 +3491,7 @@ expected another field in the form '-- <var>: <expression>', or a closing '}' br
source_file: BEGIN_CODE SCOPE UIDENT UNDER_CONDITION SUM UIDENT OF FALSE YEAR
##
-## Ends in an error in state: 216.
+## Ends in an error in state: 212.
##
## expression -> expression . DOT qlident [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
## expression -> expression . OF funcall_args [ XOR WITH WE_HAVE THEN SUCH SEMICOLON SCOPE RULE RPAREN RBRACKET RBRACE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL IS INCREASING IN GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE ELSE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS CONSEQUENCE COMMA COLON ASSERTION AND ALT ]
@@ -3571,7 +3523,7 @@ expected a binary operator continuing the expression, or a keyword ending the ex
source_file: BEGIN_CODE SCOPE UIDENT COLON ASSERTION VARIES LIDENT WITH_V FALSE YEAR
##
-## Ends in an error in state: 234.
+## Ends in an error in state: 230.
##
## assertion -> VARIES separated_nonempty_list(DOT,addpos(LIDENT)) WITH_V expression . option(addpos(variation_type)) [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
## expression -> expression . DOT qlident [ XOR WITH SCOPE RULE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL INCREASING GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE DOT DIV DEFINITION DECREASING DECLARATION DATE CONTAINS ASSERTION AND ]
@@ -3603,7 +3555,7 @@ expected a binary operator continuing the expression, or a keyword ending the ex
source_file: BEGIN_CODE SCOPE UIDENT COLON ASSERTION UNDER_CONDITION UIDENT CONSEQUENCE YEAR
##
-## Ends in an error in state: 244.
+## Ends in an error in state: 240.
##
## assertion -> option(condition_consequence) . expression [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
##
@@ -3615,7 +3567,7 @@ expected either 'fulfilled' or 'not fulfilled'
source_file: BEGIN_CODE SCOPE UIDENT COLON RULE LIDENT FILLED YEAR
##
-## Ends in an error in state: 248.
+## Ends in an error in state: 244.
##
## nonempty_list(scope_item) -> scope_item . [ SCOPE END_CODE DECLARATION ]
## nonempty_list(scope_item) -> scope_item . nonempty_list(scope_item) [ SCOPE END_CODE DECLARATION ]
@@ -3628,7 +3580,7 @@ expected the next item in the scope, or the start of a new top-level decleration
source_file: BEGIN_CODE SCOPE UIDENT COLON RULE LIDENT UNDER_CONDITION UIDENT CONSEQUENCE YEAR
##
-## Ends in an error in state: 277.
+## Ends in an error in state: 273.
##
## rule -> option(label) option(addpos(exception_to)) RULE rule_expr option(state) option(condition_consequence) . rule_consequence [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
##
@@ -3640,7 +3592,7 @@ expected either 'fulfilled' or 'not fulfilled'
source_file: BEGIN_CODE SCOPE UIDENT COLON DEFINITION LIDENT STATE YEAR
##
-## Ends in an error in state: 263.
+## Ends in an error in state: 259.
##
## state -> STATE . lident [ UNDER_CONDITION STATE SCOPE OUTPUT NOT LIDENT INTERNAL INPUT FILLED END_CODE DEFINED_AS DECLARATION CONTEXT ]
##
@@ -3652,7 +3604,7 @@ expected an identifier defining the name of the state
source_file: BEGIN_CODE SCOPE UIDENT COLON RULE LIDENT STATE LIDENT YEAR
##
-## Ends in an error in state: 276.
+## Ends in an error in state: 272.
##
## rule -> option(label) option(addpos(exception_to)) RULE rule_expr option(state) . option(condition_consequence) rule_consequence [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
##
@@ -3664,7 +3616,7 @@ expected 'equals' then an expression defining the rule
source_file: BEGIN_CODE SCOPE UIDENT COLON DEFINITION LIDENT STATE LIDENT YEAR
##
-## Ends in an error in state: 266.
+## Ends in an error in state: 262.
##
## definition -> option(label) option(exception_to) DEFINITION separated_nonempty_list(DOT,addpos(LIDENT)) option(addpos(definition_parameters)) option(state) . option(condition_consequence) DEFINED_AS expression [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
##
@@ -3676,7 +3628,7 @@ expected 'equals' then an expression defining the rule
source_file: BEGIN_CODE SCOPE UIDENT COLON DEFINITION LIDENT UNDER_CONDITION UIDENT CONSEQUENCE YEAR
##
-## Ends in an error in state: 267.
+## Ends in an error in state: 263.
##
## definition -> option(label) option(exception_to) DEFINITION separated_nonempty_list(DOT,addpos(LIDENT)) option(addpos(definition_parameters)) option(state) option(condition_consequence) . DEFINED_AS expression [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
##
@@ -3688,7 +3640,7 @@ expected 'fulfilled' or 'not fulfilled'
source_file: BEGIN_CODE SCOPE UIDENT COLON DEFINITION LIDENT DEFINED_AS FALSE YEAR
##
-## Ends in an error in state: 269.
+## Ends in an error in state: 265.
##
## definition -> option(label) option(exception_to) DEFINITION separated_nonempty_list(DOT,addpos(LIDENT)) option(addpos(definition_parameters)) option(state) option(condition_consequence) DEFINED_AS expression . [ SCOPE RULE LABEL EXCEPTION END_CODE DEFINITION DECLARATION DATE ASSERTION ]
## expression -> expression . DOT qlident [ XOR WITH SCOPE RULE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER LABEL GREATER_EQUAL GREATER FOR EXCEPTION EQUAL END_CODE DOT DIV DEFINITION DECLARATION DATE CONTAINS ASSERTION AND ]
@@ -3720,7 +3672,7 @@ expected a binary operator continuing the expression, or a keyword ending the ex
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON CONTEXT YEAR
##
-## Ends in an error in state: 321.
+## Ends in an error in state: 317.
##
## scope_decl_item_attribute -> scope_decl_item_attribute_input . scope_decl_item_attribute_output [ LIDENT ]
##
@@ -3732,7 +3684,7 @@ expected a variable name, optionally preceded by 'output'
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON INTERNAL YEAR
##
-## Ends in an error in state: 324.
+## Ends in an error in state: 320.
##
## scope_decl_item -> scope_decl_item_attribute . lident CONTENT typ_data DEPENDS separated_nonempty_list(COMMA,var_content) list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
## scope_decl_item -> scope_decl_item_attribute . lident CONTENT typ_data DEPENDS LPAREN separated_nonempty_list(COMMA,var_content) RPAREN list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
@@ -3749,7 +3701,7 @@ expected a variable name
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON CONTEXT LIDENT YEAR
##
-## Ends in an error in state: 325.
+## Ends in an error in state: 321.
##
## scope_decl_item -> scope_decl_item_attribute lident . CONTENT typ_data DEPENDS separated_nonempty_list(COMMA,var_content) list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
## scope_decl_item -> scope_decl_item_attribute lident . CONTENT typ_data DEPENDS LPAREN separated_nonempty_list(COMMA,var_content) RPAREN list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
@@ -3766,7 +3718,7 @@ expected either 'condition', or 'content' followed by the expected variable type
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON CONTEXT LIDENT CONTENT YEAR
##
-## Ends in an error in state: 326.
+## Ends in an error in state: 322.
##
## scope_decl_item -> scope_decl_item_attribute lident CONTENT . typ_data DEPENDS separated_nonempty_list(COMMA,var_content) list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
## scope_decl_item -> scope_decl_item_attribute lident CONTENT . typ_data DEPENDS LPAREN separated_nonempty_list(COMMA,var_content) RPAREN list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
@@ -3780,7 +3732,7 @@ expected a type
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON CONTEXT LIDENT CONTENT BOOLEAN YEAR
##
-## Ends in an error in state: 327.
+## Ends in an error in state: 323.
##
## scope_decl_item -> scope_decl_item_attribute lident CONTENT typ_data . DEPENDS separated_nonempty_list(COMMA,var_content) list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
## scope_decl_item -> scope_decl_item_attribute lident CONTENT typ_data . DEPENDS LPAREN separated_nonempty_list(COMMA,var_content) RPAREN list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
@@ -3795,7 +3747,7 @@ for the scope
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON CONTEXT LIDENT CONTENT UIDENT DEPENDS YEAR
##
-## Ends in an error in state: 328.
+## Ends in an error in state: 324.
##
## scope_decl_item -> scope_decl_item_attribute lident CONTENT typ_data DEPENDS . separated_nonempty_list(COMMA,var_content) list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
## scope_decl_item -> scope_decl_item_attribute lident CONTENT typ_data DEPENDS . LPAREN separated_nonempty_list(COMMA,var_content) RPAREN list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
@@ -3808,7 +3760,7 @@ expected a name and type for the dependency of this definition ('<ident> content
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON CONTEXT LIDENT CONTENT UIDENT DEPENDS LPAREN YEAR
##
-## Ends in an error in state: 329.
+## Ends in an error in state: 325.
##
## scope_decl_item -> scope_decl_item_attribute lident CONTENT typ_data DEPENDS LPAREN . separated_nonempty_list(COMMA,var_content) RPAREN list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
##
@@ -3820,7 +3772,7 @@ expected a name and type for the dependency of this definition ('<ident> content
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON CONTEXT LIDENT CONTENT UIDENT DEPENDS LPAREN LIDENT CONTENT UIDENT STATE
##
-## Ends in an error in state: 330.
+## Ends in an error in state: 326.
##
## scope_decl_item -> scope_decl_item_attribute lident CONTENT typ_data DEPENDS LPAREN separated_nonempty_list(COMMA,var_content) . RPAREN list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
##
@@ -3833,15 +3785,15 @@ source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON CONTEXT LIDENT CONTENT UI
## may provide an INCOMPLETE view of the future (what was expected next).
## In state 21, spurious reduction of production quident -> UIDENT
## In state 30, spurious reduction of production primitive_typ -> quident
-## In state 296, spurious reduction of production typ_data -> primitive_typ
-## In state 307, spurious reduction of production separated_nonempty_list(COMMA,var_content) -> lident CONTENT typ_data
+## In state 292, spurious reduction of production typ_data -> primitive_typ
+## In state 303, spurious reduction of production separated_nonempty_list(COMMA,var_content) -> lident CONTENT typ_data
##
expected a closing paren, or a comma followed by another argument specification
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON CONTEXT LIDENT CONTENT UIDENT DEPENDS LPAREN LIDENT CONTENT UIDENT RPAREN YEAR
##
-## Ends in an error in state: 331.
+## Ends in an error in state: 327.
##
## scope_decl_item -> scope_decl_item_attribute lident CONTENT typ_data DEPENDS LPAREN separated_nonempty_list(COMMA,var_content) RPAREN . list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
##
@@ -3853,7 +3805,7 @@ expected a 'state' declaration for the preceding declaration, or the next declar
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON CONTEXT LIDENT CONDITION STATE LIDENT YEAR
##
-## Ends in an error in state: 332.
+## Ends in an error in state: 328.
##
## list(state) -> state . list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
##
@@ -3866,7 +3818,7 @@ declaration for the scope
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON CONTEXT LIDENT CONTENT UIDENT DEPENDS LIDENT CONTENT UIDENT DEFINED_AS
##
-## Ends in an error in state: 335.
+## Ends in an error in state: 331.
##
## scope_decl_item -> scope_decl_item_attribute lident CONTENT typ_data DEPENDS separated_nonempty_list(COMMA,var_content) . list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
##
@@ -3879,15 +3831,15 @@ source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON CONTEXT LIDENT CONTENT UI
## may provide an INCOMPLETE view of the future (what was expected next).
## In state 21, spurious reduction of production quident -> UIDENT
## In state 30, spurious reduction of production primitive_typ -> quident
-## In state 296, spurious reduction of production typ_data -> primitive_typ
-## In state 307, spurious reduction of production separated_nonempty_list(COMMA,var_content) -> lident CONTENT typ_data
+## In state 292, spurious reduction of production typ_data -> primitive_typ
+## In state 303, spurious reduction of production separated_nonempty_list(COMMA,var_content) -> lident CONTENT typ_data
##
expected the next declaration for the scope
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON CONTEXT LIDENT CONDITION YEAR
##
-## Ends in an error in state: 338.
+## Ends in an error in state: 334.
##
## scope_decl_item -> scope_decl_item_attribute lident CONDITION . DEPENDS separated_nonempty_list(COMMA,var_content) list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
## scope_decl_item -> scope_decl_item_attribute lident CONDITION . DEPENDS LPAREN separated_nonempty_list(COMMA,var_content) RPAREN list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
@@ -3901,7 +3853,7 @@ expected the next declaration for the scope
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON CONTEXT LIDENT CONDITION DEPENDS YEAR
##
-## Ends in an error in state: 339.
+## Ends in an error in state: 335.
##
## scope_decl_item -> scope_decl_item_attribute lident CONDITION DEPENDS . separated_nonempty_list(COMMA,var_content) list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
## scope_decl_item -> scope_decl_item_attribute lident CONDITION DEPENDS . LPAREN separated_nonempty_list(COMMA,var_content) RPAREN list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
@@ -3914,7 +3866,7 @@ expected the form 'depends on <ident> content <type>'
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON CONTEXT LIDENT CONDITION DEPENDS LPAREN YEAR
##
-## Ends in an error in state: 340.
+## Ends in an error in state: 336.
##
## scope_decl_item -> scope_decl_item_attribute lident CONDITION DEPENDS LPAREN . separated_nonempty_list(COMMA,var_content) RPAREN list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
##
@@ -3926,7 +3878,7 @@ expected the form 'depends on (<ident> content <type> [, <ident> content <type>
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON CONTEXT LIDENT CONDITION DEPENDS LPAREN LIDENT CONTENT UIDENT STATE
##
-## Ends in an error in state: 341.
+## Ends in an error in state: 337.
##
## scope_decl_item -> scope_decl_item_attribute lident CONDITION DEPENDS LPAREN separated_nonempty_list(COMMA,var_content) . RPAREN list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
##
@@ -3939,15 +3891,15 @@ source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON CONTEXT LIDENT CONDITION
## may provide an INCOMPLETE view of the future (what was expected next).
## In state 21, spurious reduction of production quident -> UIDENT
## In state 30, spurious reduction of production primitive_typ -> quident
-## In state 296, spurious reduction of production typ_data -> primitive_typ
-## In state 307, spurious reduction of production separated_nonempty_list(COMMA,var_content) -> lident CONTENT typ_data
+## In state 292, spurious reduction of production typ_data -> primitive_typ
+## In state 303, spurious reduction of production separated_nonempty_list(COMMA,var_content) -> lident CONTENT typ_data
##
expected a closing paren, or a comma followed by another argument declaration (', <ident> content <type>')
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON CONTEXT LIDENT CONDITION DEPENDS LPAREN LIDENT CONTENT UIDENT RPAREN YEAR
##
-## Ends in an error in state: 342.
+## Ends in an error in state: 338.
##
## scope_decl_item -> scope_decl_item_attribute lident CONDITION DEPENDS LPAREN separated_nonempty_list(COMMA,var_content) RPAREN . list(state) [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
##
@@ -3959,7 +3911,7 @@ expected the next definition in scope
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON LIDENT YEAR
##
-## Ends in an error in state: 349.
+## Ends in an error in state: 345.
##
## scope_decl_item -> lident . SCOPE quident [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
##
@@ -3971,7 +3923,7 @@ expected the form '<ident> scope <Scope_name>', or a scope variable declaration
source_file: BEGIN_CODE DECLARATION SCOPE UIDENT COLON LIDENT SCOPE YEAR
##
-## Ends in an error in state: 350.
+## Ends in an error in state: 346.
##
## scope_decl_item -> lident SCOPE . quident [ SCOPE OUTPUT LIDENT INTERNAL INPUT END_CODE DECLARATION CONTEXT ]
##
@@ -3983,7 +3935,7 @@ expected a scope name
source_file: BEGIN_CODE DECLARATION LIDENT YEAR
##
-## Ends in an error in state: 364.
+## Ends in an error in state: 360.
##
## code_item -> DECLARATION lident . CONTENT typ_data DEPENDS separated_nonempty_list(COMMA,var_content) option(opt_def) [ SCOPE END_CODE DECLARATION ]
## code_item -> DECLARATION lident . CONTENT typ_data DEPENDS LPAREN separated_nonempty_list(COMMA,var_content) RPAREN option(opt_def) [ SCOPE END_CODE DECLARATION ]
@@ -3997,7 +3949,7 @@ expected 'content <type>'
source_file: BEGIN_CODE DECLARATION LIDENT CONTENT YEAR
##
-## Ends in an error in state: 365.
+## Ends in an error in state: 361.
##
## code_item -> DECLARATION lident CONTENT . typ_data DEPENDS separated_nonempty_list(COMMA,var_content) option(opt_def) [ SCOPE END_CODE DECLARATION ]
## code_item -> DECLARATION lident CONTENT . typ_data DEPENDS LPAREN separated_nonempty_list(COMMA,var_content) RPAREN option(opt_def) [ SCOPE END_CODE DECLARATION ]
@@ -4011,7 +3963,7 @@ expected a type
source_file: BEGIN_CODE DECLARATION LIDENT CONTENT BOOLEAN YEAR
##
-## Ends in an error in state: 366.
+## Ends in an error in state: 362.
##
## code_item -> DECLARATION lident CONTENT typ_data . DEPENDS separated_nonempty_list(COMMA,var_content) option(opt_def) [ SCOPE END_CODE DECLARATION ]
## code_item -> DECLARATION lident CONTENT typ_data . DEPENDS LPAREN separated_nonempty_list(COMMA,var_content) RPAREN option(opt_def) [ SCOPE END_CODE DECLARATION ]
@@ -4026,7 +3978,7 @@ expected 'equals <expression>', optionally preceded by 'depends on <var> content
source_file: BEGIN_CODE DECLARATION LIDENT CONTENT UIDENT DEPENDS YEAR
##
-## Ends in an error in state: 367.
+## Ends in an error in state: 363.
##
## code_item -> DECLARATION lident CONTENT typ_data DEPENDS . separated_nonempty_list(COMMA,var_content) option(opt_def) [ SCOPE END_CODE DECLARATION ]
## code_item -> DECLARATION lident CONTENT typ_data DEPENDS . LPAREN separated_nonempty_list(COMMA,var_content) RPAREN option(opt_def) [ SCOPE END_CODE DECLARATION ]
@@ -4039,7 +3991,7 @@ expected a variable name, following the form 'depends on <var> content <type>'
source_file: BEGIN_CODE DECLARATION LIDENT CONTENT UIDENT DEPENDS LPAREN YEAR
##
-## Ends in an error in state: 368.
+## Ends in an error in state: 364.
##
## code_item -> DECLARATION lident CONTENT typ_data DEPENDS LPAREN . separated_nonempty_list(COMMA,var_content) RPAREN option(opt_def) [ SCOPE END_CODE DECLARATION ]
##
@@ -4051,7 +4003,7 @@ expected a variable name, following the form 'depends on (<var> content <type>,
source_file: BEGIN_CODE DECLARATION LIDENT CONTENT UIDENT DEPENDS LPAREN LIDENT CONTENT UIDENT DEFINED_AS
##
-## Ends in an error in state: 369.
+## Ends in an error in state: 365.
##
## code_item -> DECLARATION lident CONTENT typ_data DEPENDS LPAREN separated_nonempty_list(COMMA,var_content) . RPAREN option(opt_def) [ SCOPE END_CODE DECLARATION ]
##
@@ -4064,8 +4016,8 @@ source_file: BEGIN_CODE DECLARATION LIDENT CONTENT UIDENT DEPENDS LPAREN LIDENT
## may provide an INCOMPLETE view of the future (what was expected next).
## In state 21, spurious reduction of production quident -> UIDENT
## In state 30, spurious reduction of production primitive_typ -> quident
-## In state 296, spurious reduction of production typ_data -> primitive_typ
-## In state 307, spurious reduction of production separated_nonempty_list(COMMA,var_content) -> lident CONTENT typ_data
+## In state 292, spurious reduction of production typ_data -> primitive_typ
+## In state 303, spurious reduction of production separated_nonempty_list(COMMA,var_content) -> lident CONTENT typ_data
##
expected ')', or ',' followed by another argument declaration in the form '<var>
@@ -4073,7 +4025,7 @@ content <type>'
source_file: BEGIN_CODE DECLARATION LIDENT CONTENT UIDENT DEPENDS LPAREN LIDENT CONTENT UIDENT RPAREN YEAR
##
-## Ends in an error in state: 370.
+## Ends in an error in state: 366.
##
## code_item -> DECLARATION lident CONTENT typ_data DEPENDS LPAREN separated_nonempty_list(COMMA,var_content) RPAREN . option(opt_def) [ SCOPE END_CODE DECLARATION ]
##
@@ -4085,7 +4037,7 @@ expected 'equals <expression>'
source_file: BEGIN_CODE DECLARATION LIDENT CONTENT UIDENT DEPENDS LPAREN LIDENT CONTENT UIDENT RPAREN DEFINED_AS YEAR
##
-## Ends in an error in state: 371.
+## Ends in an error in state: 367.
##
## option(opt_def) -> DEFINED_AS . expression [ SCOPE END_CODE DECLARATION ]
##
@@ -4097,7 +4049,7 @@ expected an expression
source_file: BEGIN_CODE DECLARATION LIDENT CONTENT UIDENT DEPENDS LIDENT YEAR
##
-## Ends in an error in state: 305.
+## Ends in an error in state: 301.
##
## separated_nonempty_list(COMMA,var_content) -> lident . CONTENT typ_data [ STATE SCOPE RPAREN OUTPUT LIDENT INTERNAL INPUT END_CODE DEFINED_AS DECLARATION DATA CONTEXT CONDITION ]
## separated_nonempty_list(COMMA,var_content) -> lident . CONTENT typ_data COMMA separated_nonempty_list(COMMA,var_content) [ STATE SCOPE RPAREN OUTPUT LIDENT INTERNAL INPUT END_CODE DEFINED_AS DECLARATION DATA CONTEXT CONDITION ]
@@ -4110,7 +4062,7 @@ expected 'content <type>'
source_file: BEGIN_CODE DECLARATION LIDENT CONTENT UIDENT DEPENDS LIDENT CONTENT YEAR
##
-## Ends in an error in state: 306.
+## Ends in an error in state: 302.
##
## separated_nonempty_list(COMMA,var_content) -> lident CONTENT . typ_data [ STATE SCOPE RPAREN OUTPUT LIDENT INTERNAL INPUT END_CODE DEFINED_AS DECLARATION DATA CONTEXT CONDITION ]
## separated_nonempty_list(COMMA,var_content) -> lident CONTENT . typ_data COMMA separated_nonempty_list(COMMA,var_content) [ STATE SCOPE RPAREN OUTPUT LIDENT INTERNAL INPUT END_CODE DEFINED_AS DECLARATION DATA CONTEXT CONDITION ]
@@ -4123,7 +4075,7 @@ expected a type
source_file: BEGIN_CODE DECLARATION LIDENT CONTENT UIDENT DEPENDS LIDENT CONTENT BOOLEAN YEAR
##
-## Ends in an error in state: 307.
+## Ends in an error in state: 303.
##
## separated_nonempty_list(COMMA,var_content) -> lident CONTENT typ_data . [ STATE SCOPE RPAREN OUTPUT LIDENT INTERNAL INPUT END_CODE DEFINED_AS DECLARATION DATA CONTEXT CONDITION ]
## separated_nonempty_list(COMMA,var_content) -> lident CONTENT typ_data . COMMA separated_nonempty_list(COMMA,var_content) [ STATE SCOPE RPAREN OUTPUT LIDENT INTERNAL INPUT END_CODE DEFINED_AS DECLARATION DATA CONTEXT CONDITION ]
@@ -4136,7 +4088,7 @@ expected 'equals <expression>'
source_file: BEGIN_CODE DECLARATION LIDENT CONTENT UIDENT DEPENDS LIDENT CONTENT UIDENT COMMA YEAR
##
-## Ends in an error in state: 308.
+## Ends in an error in state: 304.
##
## separated_nonempty_list(COMMA,var_content) -> lident CONTENT typ_data COMMA . separated_nonempty_list(COMMA,var_content) [ STATE SCOPE RPAREN OUTPUT LIDENT INTERNAL INPUT END_CODE DEFINED_AS DECLARATION DATA CONTEXT CONDITION ]
##
@@ -4148,7 +4100,7 @@ expected the definition of another argument in the form '<var> content <type>'
source_file: BEGIN_CODE DECLARATION LIDENT CONTENT UIDENT DEPENDS LIDENT CONTENT UIDENT DEFINED_AS FALSE YEAR
##
-## Ends in an error in state: 372.
+## Ends in an error in state: 368.
##
## expression -> expression . DOT qlident [ XOR WITH SCOPE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL END_CODE DOT DIV DECLARATION CONTAINS AND ]
## expression -> expression . OF funcall_args [ XOR WITH SCOPE PLUSPLUS PLUS OR OF NOT_EQUAL MULT MINUS LESSER_EQUAL LESSER GREATER_EQUAL GREATER FOR EQUAL END_CODE DOT DIV DECLARATION CONTAINS AND ]
@@ -4180,7 +4132,7 @@ expected a binary operator continuing the expression, or a keyword ending the ex
source_file: BEGIN_DIRECTIVE YEAR
##
-## Ends in an error in state: 384.
+## Ends in an error in state: 380.
##
## source_file_item -> BEGIN_DIRECTIVE . directive END_DIRECTIVE [ LAW_TEXT LAW_HEADING EOF BEGIN_METADATA BEGIN_DIRECTIVE BEGIN_CODE ]
##
@@ -4192,7 +4144,7 @@ expected a directive, e.g. 'Include: <filename>'
source_file: BEGIN_DIRECTIVE LAW_INCLUDE YEAR
##
-## Ends in an error in state: 394.
+## Ends in an error in state: 390.
##
## directive -> LAW_INCLUDE . COLON nonempty_list(DIRECTIVE_ARG) option(AT_PAGE) [ END_DIRECTIVE ]
##
@@ -4204,7 +4156,7 @@ expected ':', then a file name or 'JORFTEXTNNNNNNNNNNNN'
source_file: BEGIN_DIRECTIVE LAW_INCLUDE COLON YEAR
##
-## Ends in an error in state: 395.
+## Ends in an error in state: 391.
##
## directive -> LAW_INCLUDE COLON . nonempty_list(DIRECTIVE_ARG) option(AT_PAGE) [ END_DIRECTIVE ]
##
@@ -4216,7 +4168,7 @@ expected a file name or 'JORFTEXTNNNNNNNNNNNN'
source_file: BEGIN_DIRECTIVE LAW_INCLUDE COLON DIRECTIVE_ARG YEAR
##
-## Ends in an error in state: 396.
+## Ends in an error in state: 392.
##
## nonempty_list(DIRECTIVE_ARG) -> DIRECTIVE_ARG . [ END_DIRECTIVE AT_PAGE ]
## nonempty_list(DIRECTIVE_ARG) -> DIRECTIVE_ARG . nonempty_list(DIRECTIVE_ARG) [ END_DIRECTIVE AT_PAGE ]
@@ -4229,7 +4181,7 @@ expected a page specification in the form '@p.<number>', or a newline
source_file: BEGIN_DIRECTIVE LAW_INCLUDE COLON DIRECTIVE_ARG AT_PAGE YEAR
##
-## Ends in an error in state: 401.
+## Ends in an error in state: 397.
##
## source_file_item -> BEGIN_DIRECTIVE directive . END_DIRECTIVE [ LAW_TEXT LAW_HEADING EOF BEGIN_METADATA BEGIN_DIRECTIVE BEGIN_CODE ]
##
@@ -4241,7 +4193,7 @@ expected a newline
source_file: LAW_HEADING YEAR
##
-## Ends in an error in state: 406.
+## Ends in an error in state: 402.
##
## source_file -> source_file_item . source_file [ # ]
##
diff --git a/compiler/surface/parser.mly b/compiler/surface/parser.mly
index 04fa6f01e..3ab879f9d 100644
--- a/compiler/surface/parser.mly
+++ b/compiler/surface/parser.mly
@@ -119,7 +119,7 @@ let primitive_typ :=
let typ_data :=
| t = primitive_typ ; <Primitive>
-| COLLECTION ; t = addpos(typ_data) ; <Collection>
+| LIST ; t = addpos(typ_data) ; <Collection>
let typ == t = typ_data ; <Data>
@@ -213,7 +213,7 @@ let naked_expression ==
} %prec apply
| max = minmax ;
OF ; coll = expression ;
- OR ; IF ; COLLECTION ; EMPTY ; THEN ;
+ OR ; IF ; LIST_EMPTY ; THEN ;
default = expression ; {
CollectionOp (AggregateExtremum { max; default }, coll)
} %prec apply
@@ -265,7 +265,7 @@ let naked_expression ==
AMONG ; coll = expression ;
SUCH ; THAT ; f = expression ;
IS ; max = minmax ;
- OR ; IF ; COLLECTION ; EMPTY ; THEN ; default = expression ; {
+ OR ; IF ; LIST_EMPTY ; THEN ; default = expression ; {
CollectionOp (AggregateArgExtremum { max; default; f = i, f }, coll)
} %prec top_expr
diff --git a/compiler/surface/tokens.mly b/compiler/surface/tokens.mly
index f0c8ffca0..681fe0c98 100644
--- a/compiler/surface/tokens.mly
+++ b/compiler/surface/tokens.mly
@@ -38,7 +38,7 @@
%token<string * string> MONEY_AMOUNT
%token BEGIN_CODE TEXT
%token COLON ALT DATA
-%token OF INTEGER COLLECTION CONTAINS AMONG
+%token OF INTEGER LIST CONTAINS AMONG
%token RULE CONDITION DEFINED_AS
%token<Ast.op_kind> LESSER GREATER LESSER_EQUAL GREATER_EQUAL
%token LET EXISTS IN SUCH THAT COMMA
@@ -55,6 +55,6 @@
%token BEGIN_METADATA MONEY DECIMAL
%token UNDER_CONDITION CONSEQUENCE LBRACE RBRACE
%token LABEL EXCEPTION LBRACKET RBRACKET SEMICOLON
-%token MAXIMUM MINIMUM IS EMPTY
+%token MAXIMUM MINIMUM IS LIST_EMPTY
%%
diff --git a/doc/syntax/syntax_en.tex b/doc/syntax/syntax_en.tex
index 8943d8d59..82ecb174a 100644
--- a/doc/syntax/syntax_en.tex
+++ b/doc/syntax/syntax_en.tex
@@ -273,7 +273,7 @@ \section{Literals and types}
\end{catala}
& \begin{catala}
```catala
- collection integer
+ list of integer
```
\end{catala}
@@ -632,7 +632,7 @@ \section{Scope definition}
\\
\end{tabular}
-\section{Collection operations}
+\section{List operations}
\begin{tabular}{@{}p{\cola}>{\slshape}p{\colb}@{}}
\begin{catala}
@@ -709,7 +709,7 @@ \section{Collection operations}
\begin{catala}
```catala
maximum of coll
- or if collection empty then -1
+ or if list empty then -1
```
\end{catala}
& Extremum
@@ -718,7 +718,7 @@ \section{Collection operations}
```catala
x among coll
such that (x * x) is minimum
- or if collection empty then -1
+ or if list empty then -1
```
\end{catala}
& Arg-extremum
diff --git a/doc/syntax/syntax_fr.tex b/doc/syntax/syntax_fr.tex
index cdb7d6fba..39fdb739c 100644
--- a/doc/syntax/syntax_fr.tex
+++ b/doc/syntax/syntax_fr.tex
@@ -273,7 +273,7 @@ \section{Littéraux et types}
\end{catala}
& \begin{catala}
```catala
- collection entier
+ liste de entier
```
\end{catala}
@@ -631,7 +631,7 @@ \section{Définition de champ d'application}
\\
\end{tabular}
-\section{Opérations sur les collections}
+\section{Opérations sur les listes}
\begin{tabular}{@{}p{\cola}>{\slshape}p{\colb}@{}}
\begin{catala}
@@ -693,10 +693,10 @@ \section{Opérations sur les collections}
\\
\begin{catala}
```catala
- somme entier coll
+ somme entier de coll
```
\end{catala}
- & Aggrégation
+ & Agrégation
\\
\begin{catala}
```catala
@@ -708,7 +708,7 @@ \section{Opérations sur les collections}
\begin{catala}
```catala
maximum de coll
- ou si collection vide alors -1
+ ou si liste vide alors -1
```
\end{catala}
& Extremums
@@ -717,7 +717,7 @@ \section{Opérations sur les collections}
```catala
x parmi coll
tel que (x * x) est minimum
- ou si collection vide alors -1
+ ou si liste vide alors -1
```
\end{catala}
& Élément selon extremum
diff --git a/examples/aides_logement/prologue.catala_fr b/examples/aides_logement/prologue.catala_fr
index 4d81c1c7a..14227bf97 100644
--- a/examples/aides_logement/prologue.catala_fr
+++ b/examples/aides_logement/prologue.catala_fr
@@ -216,9 +216,9 @@ déclaration énumération DateNaissanceTroisièmeOuDernierPlusEnfant:
-- PlusDeTroisEnfants contenu DateDeNaissanceOuMoisDeGrossesse
déclaration structure Ménage:
- donnée prestations_reçues contenu collection PrestationReçue
+ donnée prestations_reçues contenu liste de PrestationReçue
donnée logement contenu Logement
- donnée personnes_à_charge contenu collection PersonneÀCharge
+ donnée personnes_à_charge contenu liste de PersonneÀCharge
donnée nombre_autres_occupants_logement contenu entier
donnée situation_familiale contenu SituationFamiliale
donnée condition_rattaché_foyer_fiscal_parent_ifi contenu booléen
@@ -311,7 +311,7 @@ déclaration champ d'application ÉligibilitéAidesPersonnelleLogement:
interne prise_en_compte_personne_à_charge condition
dépend de personne_à_charge contenu PersonneÀCharge
interne personnes_à_charge_prises_en_compte
- contenu collection PersonneÀCharge
+ contenu liste de PersonneÀCharge
contexte date_entrée_vigueur_différée_cch contenu date
interne nombre_personnes_logement contenu entier
@@ -330,7 +330,7 @@ déclaration champ d'application ÉligibilitéAidesPersonnelleLogement:
résultat éligibilité condition
résultat nombre_personnes_à_charge_prises_en_compte contenu entier
résultat coefficents_enfants_garde_alternée_pris_en_compte contenu
- collection décimal
+ liste de décimal
résultat condition_2_r823_4 condition
dépend de personne_à_charge contenu PersonneÀCharge
@@ -388,7 +388,7 @@ déclaration champ d'application ÉligibilitéAidePersonnaliséeLogement:
résultat éligibilité condition
résultat nombre_personnes_à_charge_prises_en_compte contenu entier
résultat coefficents_enfants_garde_alternée_pris_en_compte contenu
- collection décimal
+ liste de décimal
champ d'application ÉligibilitéAidePersonnaliséeLogement:
@@ -437,7 +437,7 @@ déclaration champ d'application ÉligibilitéAllocationLogement:
état l841_2
résultat nombre_personnes_à_charge_prises_en_compte contenu entier
résultat coefficents_enfants_garde_alternée_pris_en_compte contenu
- collection décimal
+ liste de décimal
champ d'application ÉligibilitéAllocationLogement:
# Ici nous instancions le champ d'application d'éligibilité aux prestations
@@ -608,9 +608,9 @@ déclaration champ d'application CalculÉquivalenceLoyerMinimale:
entrée condition_2_du_832_25 contenu booléen
entrée n_nombre_parts_d832_25 contenu décimal
- interne tranches_revenus_d832_26 contenu collection TrancheRevenu
+ interne tranches_revenus_d832_26 contenu liste de TrancheRevenu
interne tranches_revenus_d832_26_multipliées contenu
- collection TrancheRevenuDécimal
+ liste de TrancheRevenuDécimal
interne montant_forfaitaire_d832_26 contenu argent
résultat montant contenu argent
@@ -1137,7 +1137,7 @@ déclaration champ d'application CalculetteAidesAuLogement:
résultat traitement_aide_finale contenu argent
dépend de aide_finale contenu argent
résultat coefficents_enfants_garde_alternée_pris_en_compte contenu
- collection décimal
+ liste de décimal
champ d'application CalculetteAidesAuLogement:
définition éligibilité_aide_personnalisée_logement.ménage égal à
@@ -1205,7 +1205,7 @@ déclaration champ d'application CalculetteAidesAuLogementGardeAlternée:
interne ménage_sans_enfants_garde_alternée contenu Ménage
interne coefficents_enfants_garde_alternée_pris_en_compte contenu
- collection décimal
+ liste de décimal
calculette champ d'application CalculetteAidesAuLogement
calculette_sans_garde_alternée champ d'application CalculetteAidesAuLogement
diff --git a/examples/allocations_familiales/epilogue.catala_fr b/examples/allocations_familiales/epilogue.catala_fr
index 3e021dc3f..698255829 100644
--- a/examples/allocations_familiales/epilogue.catala_fr
+++ b/examples/allocations_familiales/epilogue.catala_fr
@@ -12,7 +12,7 @@ champ d'application EnfantLePlusÂgé:
définition le_plus_âgé égal à
potentiel_plus_âgé parmi enfants tel que
potentiel_plus_âgé.date_de_naissance est minimum
- ou si collection vide alors Enfant {
+ ou si liste vide alors Enfant {
-- identifiant: -1
-- obligation_scolaire: Pendant
-- rémuneration_mensuelle: 0€
@@ -72,8 +72,8 @@ déclaration structure EnfantEntrée:
déclaration champ d'application InterfaceAllocationsFamiliales:
entrée i_date_courante contenu date
- entrée i_enfants contenu collection EnfantEntrée
- interne enfants_à_charge contenu collection Enfant
+ entrée i_enfants contenu liste de EnfantEntrée
+ interne enfants_à_charge contenu liste de Enfant
allocations_familiales champ d'application AllocationsFamiliales
entrée i_ressources_ménage contenu argent
entrée i_résidence contenu Collectivité
@@ -137,4 +137,4 @@ champ d'application InterfaceAllocationsFamiliales:
NOTA :
Conformément à l'article 63 de la loi n° 2019-791 du 26 juillet 2019, ces
-dispositions entrent en vigueur à la rentrée scolaire 2019.
\ No newline at end of file
+dispositions entrent en vigueur à la rentrée scolaire 2019.
diff --git a/examples/allocations_familiales/prologue.catala_fr b/examples/allocations_familiales/prologue.catala_fr
index 249dffefe..95dabbde1 100644
--- a/examples/allocations_familiales/prologue.catala_fr
+++ b/examples/allocations_familiales/prologue.catala_fr
@@ -65,7 +65,7 @@ déclaration champ d'application AllocationFamilialesAvril2008:
résultat âge_minimum_alinéa_1_l521_3 contenu durée
déclaration champ d'application EnfantLePlusÂgé:
- entrée enfants contenu collection Enfant
+ entrée enfants contenu liste de Enfant
résultat le_plus_âgé contenu Enfant
déclaration champ d'application AllocationsFamiliales:
@@ -80,9 +80,9 @@ déclaration champ d'application AllocationsFamiliales:
entrée date_courante contenu date
# Variables concernant les enfants du ménage
- entrée enfants_à_charge contenu collection Enfant
+ entrée enfants_à_charge contenu liste de Enfant
interne enfants_à_charge_droit_ouvert_prestation_familiale
- contenu collection Enfant
+ contenu liste de Enfant
interne prise_en_compte contenu PriseEnCompte dépend de enfant contenu Enfant
résultat versement contenu VersementAllocations
dépend de enfant contenu Enfant
diff --git a/examples/droit_successions/droit_successions.catala_fr b/examples/droit_successions/droit_successions.catala_fr
index 53d0eab85..92d4a1e98 100644
--- a/examples/droit_successions/droit_successions.catala_fr
+++ b/examples/droit_successions/droit_successions.catala_fr
@@ -31,14 +31,14 @@ déclaration structure BienUsufruitRenteViagère:
déclaration champ d'application RéserveHéréditaire:
résultat quotité_réserve_héréditaire contenu décimal
- entrée enfants contenu collection Enfant
+ entrée enfants contenu liste de Enfant
interne enfant_pris_en_compte_réserve_héréditaire contenu booléen
dépend de enfant contenu Enfant
- interne enfants_réserve_héréditaire contenu collection Enfant
+ interne enfants_réserve_héréditaire contenu liste de Enfant
entrée conjoint_survivant_non_divorcé condition
entrée patrimoine_total contenu argent
- entrée biens_usufruit_rente_viagère contenu collection BienUsufruitRenteViagère
+ entrée biens_usufruit_rente_viagère contenu liste de BienUsufruitRenteViagère
résultat patrimoine_assiette_réserve_héréditaire contenu argent
résultat montant_réserve_héréditaire contenu argent
```
diff --git a/examples/tutorial_en/tutorial_en.catala_en b/examples/tutorial_en/tutorial_en.catala_en
index c7d0c1e0c..41ab01544 100644
--- a/examples/tutorial_en/tutorial_en.catala_en
+++ b/examples/tutorial_en/tutorial_en.catala_en
@@ -819,20 +819,20 @@ scope DateValues:
(value1 - |1999-12-31|) + 45 day # 367 + 45 days (2000 is bissextile)
```
-### Collections
+### Listes
Often, Catala programs need to speak about a collection of data because the law
talks about the number of children, the maximum of a list, etc. Catala features
-first-class support for collections, which are basically fixed-size lists.
+first-class support for lists.
You can create a list, filter its elements but also aggregate over its contents
to compute all sorts of values.
```catala
-declaration scope CollectionValues:
- internal value1 content collection integer
+declaration scope ListValues:
+ internal value1 content list of integer
internal value2 content integer
-scope CollectionValues:
+scope ListValues:
definition value1 equals [45;-6;3;4;0;2155]
definition value2 equals sum integer of (i * i) for i among value1
# sum of squares
diff --git a/examples/tutoriel_fr/tutoriel_fr.catala_fr b/examples/tutoriel_fr/tutoriel_fr.catala_fr
index 565d7579d..085205f8f 100644
--- a/examples/tutoriel_fr/tutoriel_fr.catala_fr
+++ b/examples/tutoriel_fr/tutoriel_fr.catala_fr
@@ -863,18 +863,17 @@ champ d'application ValeursDate:
### Collections
Souvent, les programmes Catala ont besoin de parler de collection de données
-parce que la loi parle du nombre d’enfants, du maximum d’une liste, etc.
-Catala propose un support de première classe pour les collections, qui ne sont
-finalement que des listes de taille fixe. Vous pouvez créer une liste, filtrer
-ses éléments, mais aussi agréger son contenu pour calculer toutes sortes de
-valeurs.
+parce que la loi parle du nombre d’enfants, du maximum d’une liste, etc. Catala
+propose un support de première classe pour les listes. Vous pouvez créer une
+liste, filtrer ses éléments, mais aussi agréger son contenu pour calculer toutes
+sortes de valeurs.
```catala
-déclaration champ d'application ValeursDeCollection:
- interne valeur1 contenu collection entier
+déclaration champ d'application ValeursDeListe:
+ interne valeur1 contenu liste de entier
interne valeur2 contenu entier
-champ d'application ValeursDeCollection:
+champ d'application ValeursDeListe:
définition valeur1 égal à [45;-6;3;4;0;2155]
définition valeur2 égal à somme entier de (i * i) pour i parmi valeur1
# somme de carré
diff --git a/examples/us_tax_code/section_121.catala_en b/examples/us_tax_code/section_121.catala_en
index 80a2e068f..ee84b1f1e 100644
--- a/examples/us_tax_code/section_121.catala_en
+++ b/examples/us_tax_code/section_121.catala_en
@@ -6,9 +6,9 @@ declaration structure Period:
data end content date
declaration scope PeriodMerge:
- context periods1 content collection Period
- context periods2 content collection Period
- output output_periods content collection Period
+ context periods1 content list of Period
+ context periods2 content list of Period
+ output output_periods content list of Period
scope PeriodMerge:
# Placeholders, overwritten by caller
@@ -34,13 +34,13 @@ declaration scope Section121SinglePerson:
output requirements_ownership_met condition
output requirements_usage_met condition
input date_of_sale_or_exchange content date
- input property_ownage content collection Period
+ input property_ownage content list of Period
# Invariant: the periods in the collection are disjoint
input property_usage_as_principal_residence
- content collection Period
+ content list of Period
# Invariant: the periods in the collection are disjoint
internal aggregate_periods_from_last_five_years content duration
- depends on periods content collection Period
+ depends on periods content list of Period
context output gain_cap content money
input gain_from_sale_or_exchange_of_property content money
output income_excluded_from_gross_income_uncapped content money
@@ -49,9 +49,9 @@ declaration scope Section121SinglePerson:
input other_section_121a_sale content OtherSection121aSale
declaration structure PersonalData:
- data property_ownage content collection Period
+ data property_ownage content list of Period
data property_usage_as_principal_residence
- content collection Period
+ content list of Period
data other_section_121a_sale content OtherSection121aSale
declaration structure JointReturn:
diff --git a/syntax_highlighting/emacs/catala-mode.el b/syntax_highlighting/emacs/catala-mode.el
index fb109ee70..88850dd63 100644
--- a/syntax_highlighting/emacs/catala-mode.el
+++ b/syntax_highlighting/emacs/catala-mode.el
@@ -22,7 +22,7 @@
(define-generic-mode 'catala-mode-fr
'("#")
'("contexte" "entrée" "résultat" "interne"
- "champ d'application" "si et seulement si" "dépend de" "déclaration" "inclus" "collection" "contenu" "optionnel" "structure" "énumération" "contexte" "entrée" "résultat" "interne" "règle" "sous condition" "condition" "donnée" "conséquence" "rempli" "égal à" "assertion" "définition" "état" "étiquette" "exception" "soit")
+ "champ d'application" "si et seulement si" "dépend de" "déclaration" "inclus" "liste de" "contenu" "optionnel" "structure" "énumération" "contexte" "entrée" "résultat" "interne" "règle" "sous condition" "condition" "donnée" "conséquence" "rempli" "égal à" "assertion" "définition" "état" "étiquette" "exception" "soit" "liste vide")
'(("\\<\\(selon\\|sous\s+forme\\|fixé\\|par\\|décroissante\\|croissante\\|varie\\|avec\\|on\s+a\\|soit\\|dans\\|tel\s+que\\|existe\\|pour\\|tout\\|de\\|si\\|alors\\|sinon\\|initial\\)\\>" . font-lock-builtin-face)
("\\<\\(vrai\\|faux\\)\\>" . font-lock-constant-face)
("\\<\\([0-9][0-9 ]*\\(,[0-9]*\\|\\)\\)\\>" . font-lock-constant-face)
@@ -41,7 +41,7 @@
(define-generic-mode 'catala-mode-en
'("#")
'("context" "input" "output" "internal"
- "scope" "depends on" "declaration" "includes" "collection" "content" "optional" "structure" "enumeration" "context" "input" "output" "internal" "rule" "under condition" "condition" "data" "consequence" "fulfilled" "equals" "assertion" "definition" "state" "label" "exception" "let")
+ "scope" "depends on" "declaration" "includes" "list of" "content" "optional" "structure" "enumeration" "context" "input" "output" "internal" "rule" "under condition" "condition" "data" "consequence" "fulfilled" "equals" "assertion" "definition" "state" "label" "exception" "let" "list empty")
'(("\\<\\(match\\|with\s+pattern\\|fixed\\|by\\|decreasing\\|increasing\\|varies\\|with\\|we\s+have\\|let\\|in\\|such\s+that\\|exists\\|for\\|all\\|of\\|if\\|then\\|else\\|initial\\)\\>" . font-lock-builtin-face)
("|[0-9]\\+-[0-9]\\+-[0-9]\\+|" . font-lock-constant-face)
("\\<\\(true\\|false\\)\\>" . font-lock-constant-face)
diff --git a/syntax_highlighting/en/ace/mode-catala_en.js b/syntax_highlighting/en/ace/mode-catala_en.js
index 46236ba44..15742e003 100644
--- a/syntax_highlighting/en/ace/mode-catala_en.js
+++ b/syntax_highlighting/en/ace/mode-catala_en.js
@@ -75,7 +75,7 @@ ace.define(
{
token: "keyword.other",
regex:
- "\\b(scope|depends\\s+on|declaration|includes|collection|content|optional|structure|enumeration|context|input|output|internal|rule|under\\s+condition|condition|data|consequence|fulfilled|equals|assertion|definition|state|label|exception|anything)\\b",
+ "\\b(scope|depends\\s+on|declaration|includes|list\\s+of|content|optional|structure|enumeration|context|input|output|internal|rule|under\\s+condition|condition|data|consequence|fulfilled|equals|assertion|definition|state|label|exception|anything|list\\s+empty)\\b",
},
{
token: "constant.numeric",
diff --git a/syntax_highlighting/en/atom/grammars/catala_en.cson b/syntax_highlighting/en/atom/grammars/catala_en.cson
index 21c59d0d9..50e4730be 100644
--- a/syntax_highlighting/en/atom/grammars/catala_en.cson
+++ b/syntax_highlighting/en/atom/grammars/catala_en.cson
@@ -125,7 +125,7 @@
'name' : 'keyword.control.catala_en'
}
{
- 'match' : '\\b(scope|depends\\s+on|declaration|includes|collection|content|optional|structure|enumeration|context|input|output|internal|rule|under\\s+condition|condition|data|consequence|fulfilled|equals|assertion|definition|state|label|exception|anything)\\b'
+ 'match' : '\\b(scope|depends\\s+on|declaration|includes|list\\s+of|content|optional|structure|enumeration|context|input|output|internal|rule|under\\s+condition|condition|data|consequence|fulfilled|equals|assertion|definition|state|label|exception|anything|list\\+empty)\\b'
'name' : 'keyword.other.catala_en'
}
{
diff --git a/syntax_highlighting/en/catala_en.iro b/syntax_highlighting/en/catala_en.iro
index a428db6e5..d05761df0 100644
--- a/syntax_highlighting/en/catala_en.iro
+++ b/syntax_highlighting/en/catala_en.iro
@@ -221,7 +221,7 @@ code : context {
}
: pattern {
- regex \= \b(contains|number|sum|such\s+that|exists|for|all|of|if|then|else|is|empty|among|maximum|minimum|round)\b
+ regex \= \b(contains|number|sum|such\s+that|exists|for|all|of|if|then|else|is|empty|among|maximum|minimum|round|list\s+empty)\b
styles [] = .keyword_rule ;
}
@@ -253,7 +253,7 @@ code : context {
}
: pattern {
- regex \= \b(structure|enumeration|collection|integer|boolean|date|duration|money|text|decimal)\b
+ regex \= \b(structure|enumeration|list\s+of|integer|boolean|date|duration|money|text|decimal)\b
styles [] = .primitive;
}
diff --git a/syntax_highlighting/en/pygments/catala_en_lexer/lexer.py b/syntax_highlighting/en/pygments/catala_en_lexer/lexer.py
index af86f69a7..a02673676 100644
--- a/syntax_highlighting/en/pygments/catala_en_lexer/lexer.py
+++ b/syntax_highlighting/en/pygments/catala_en_lexer/lexer.py
@@ -27,13 +27,13 @@ class CustomLexer(RegexLexer):
(u'(\\s*\\#.*$)', bygroups(Comment.Single)),
(u'(context|input|output|internal)(\\s*)(|output)(\\s+)([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)', bygroups(Keyword.Declaration, String, Keyword.Declaration, String, Name.Variable)),
(u'\\b(match|with\\s+pattern|fixed|by|decreasing|increasing|varies|with|we\\s+have|let|in|scope|depends\\s+on|declaration|includes|content|rule|under\\s+condition|condition|data|consequence|fulfilled|equals|assertion|definition|state|label|exception)\\b', bygroups(Keyword.Reserved)),
- (u'\\b(contains|number|sum|such\\s+that|exists|for|all|of|if|then|else|is|empty|among|maximum|minimum|round)\\b', bygroups(Keyword.Declaration)),
+ (u'\\b(contains|number|sum|such\\s+that|exists|for|all|of|if|then|else|is|list\\s+empty|among|maximum|minimum|round)\\b', bygroups(Keyword.Declaration)),
(u'(\\|[0-9]+\\-[0-9]+\\-[0-9]+\\|)', bygroups(Number.Integer)),
(u'\\b(true|false)\\b', bygroups(Keyword.Constant)),
(u'\\b([0-9]+(,[0-9]*|))\\b', bygroups(Number.Integer)),
(u'(\\-\\-|\\;|\\.|\\,|\\:|\\(|\\)|\\[|\\]|\\{|\\})', bygroups(Operator)),
(u'(\\-\\>|\\+\\.|\\+\\@|\\+\\^|\\+\\$|\\+|\\-\\.|\\-\\@|\\-\\^|\\-\\$|\\-|\\*\\.|\\*\\@|\\*\\^|\\*\\$|\\*|/\\.|/\\@|/\\$|/|\\!|>\\.|>=\\.|<=\\.|<\\.|>\\@|>=\\@|<=\\@|<\\@|>\\$|>=\\$|<=\\$|<\\$|>\\^|>=\\^|<=\\^|<\\^|>|>=|<=|<|=|not|or|xor|and|\\$|\u20ac|%|year|month|day)', bygroups(Operator)),
- (u'\\b(structure|enumeration|collection|integer|boolean|date|duration|money|text|decimal)\\b', bygroups(Keyword.Type)),
+ (u'\\b(structure|enumeration|list\\s+of|integer|boolean|date|duration|money|text|decimal)\\b', bygroups(Keyword.Type)),
(u'\\b([A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)(\\.)([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)\\b', bygroups(Name.Class, Operator, Name.Variable)),
(u'\\b([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)(\\.)([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\'\\.]*)\\b', bygroups(Name.Variable, Operator, String)),
(u'\\b([a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xca\u0152\xc70-9_\\\']*)\\b', bygroups(Name.Variable)),
diff --git a/syntax_highlighting/en/vim/catala_en.vim b/syntax_highlighting/en/vim/catala_en.vim
index 5650ff687..10323d918 100644
--- a/syntax_highlighting/en/vim/catala_en.vim
+++ b/syntax_highlighting/en/vim/catala_en.vim
@@ -22,7 +22,7 @@ syn match Include "^\s*>\s*Include:.*$"
syn match sc_id_def contained "\<\([a-zéèàâùîôêœç][a-zéèàâùîôêœçA-ZÉÈÀÂÙÎÔÊŒÇ0-9_\']*\)\>"
syn match cc_id contained "\<\([A-ZÉÈÀÂÙÎÔÊŒÇ][a-zéèàâùîôêœçA-ZÉÈÀÂÙÎÔÊŒÇ0-9_\']*\)\>"
-syn match Keyword contained "\<\(scope\|depends\s\+on\|declaration\|includes\|collection\|content\|optional\|structure\|enumeration\|context\|rule\|under\s\+condition\|condition\|data\|consequence\|fulfilled\|equals\|assertion\|definition\|state\|label\|exception\|anything\)\>"
+syn match Keyword contained "\<\(scope\|depends\s\+on\|declaration\|includes\|list\s\+of\|content\|optional\|structure\|enumeration\|context\|rule\|under\s\+condition\|condition\|data\|consequence\|fulfilled\|equals\|assertion\|definition\|state\|label\|exception\|anything\)\>"
syn match Statement contained "\<\(match\|with\s\+pattern\|fixed\|by\|decreasing\|increasing\|varies\|with\|we\s\+have\|let\|in\|such\s\+that\|exists\|for\|all\|of\|if\|then\|else\|initial\)\>"
syn keyword Conditional contained if then else
syn match Comment contained "#.*$"
diff --git a/syntax_highlighting/en/vscode/syntaxes/catalavs.xml b/syntax_highlighting/en/vscode/syntaxes/catalavs.xml
index 51235df9e..b6ff595ea 100644
--- a/syntax_highlighting/en/vscode/syntaxes/catalavs.xml
+++ b/syntax_highlighting/en/vscode/syntaxes/catalavs.xml
@@ -200,7 +200,7 @@
</dict>
<dict>
<key>match</key>
- <string>\b(scope|depends\s+on|declaration|includes|collection|content|optional|structure|enumeration|context|input|output|internal|rule|under\s+condition|condition|data|consequence|fulfilled|equals|assertion|definition|state|label|exception|anything)\b</string>
+ <string>\b(scope|depends\s+on|declaration|includes|list\s+of|content|optional|structure|enumeration|context|input|output|internal|rule|under\s+condition|condition|data|consequence|fulfilled|equals|assertion|definition|state|label|exception|anything)\b</string>
<key>name</key>
<string>keyword.other.catala_en</string>
</dict>
diff --git a/syntax_highlighting/fr/ace/mode-catala_fr.js b/syntax_highlighting/fr/ace/mode-catala_fr.js
index 346ceb6f0..a32a29a7e 100644
--- a/syntax_highlighting/fr/ace/mode-catala_fr.js
+++ b/syntax_highlighting/fr/ace/mode-catala_fr.js
@@ -75,7 +75,7 @@ ace.define(
{
token: "keyword.other",
regex:
- "\\b(champ\\s+d'application|si\\s+et\\s+seulement\\s+si|d\u00e9pend\\s+de|d\u00e9claration|inclus|collection|contenu|optionnel|structure|\u00e9num\u00e9ration|contexte|entr\u00e9e|r\u00e9sultat|interne|r\u00e8gle|sous\\s+condition|condition|donn\u00e9e|cons\u00e9quence|rempli|\u00e9gal\\s+\u00e0|assertion|d\u00e9finition|\u00e9tat|\u00e9tiquette|exception|n'importe\\s+quel)\\b",
+ "\\b(champ\\s+d'application|si\\s+et\\s+seulement\\s+si|d\u00e9pend\\s+de|d\u00e9claration|inclus|liste\\s+de|contenu|optionnel|structure|\u00e9num\u00e9ration|contexte|entr\u00e9e|r\u00e9sultat|interne|r\u00e8gle|sous\\s+condition|condition|donn\u00e9e|cons\u00e9quence|rempli|\u00e9gal\\s+\u00e0|assertion|d\u00e9finition|\u00e9tat|\u00e9tiquette|exception|n'importe\\s+quel)\\b",
},
{
token: "constant.numeric",
diff --git a/syntax_highlighting/fr/atom/grammars/catala_fr.cson b/syntax_highlighting/fr/atom/grammars/catala_fr.cson
index 5d6a572a0..a01854eba 100644
--- a/syntax_highlighting/fr/atom/grammars/catala_fr.cson
+++ b/syntax_highlighting/fr/atom/grammars/catala_fr.cson
@@ -125,7 +125,7 @@
'name' : 'keyword.control.catala_fr'
}
{
- 'match' : '\\b(champ\\s+d\'application|si\\s+et\\s+seulement\\s+si|dépend\\s+de|déclaration|inclus|collection|contenu|optionnel|structure|énumération|contexte|entrée|r\\x{00e9}sultat|interne|règle|sous\\s+condition|condition|donnée|conséquence|rempli|égal\\s+à|assertion|définition|état|étiquette|exception|n\'importe\\s+quel)\\b'
+ 'match' : '\\b(champ\\s+d\'application|si\\s+et\\s+seulement\\s+si|dépend\\s+de|déclaration|inclus|liste\\s+de|contenu|optionnel|structure|énumération|contexte|entrée|r\\x{00e9}sultat|interne|règle|sous\\s+condition|condition|donnée|conséquence|rempli|égal\\s+à|assertion|définition|état|étiquette|exception|n\'importe\\s+quel)\\b'
'name' : 'keyword.other.catala_fr'
}
{
diff --git a/syntax_highlighting/fr/catala_fr.iro b/syntax_highlighting/fr/catala_fr.iro
index 0b43db5ee..2aa92350d 100644
--- a/syntax_highlighting/fr/catala_fr.iro
+++ b/syntax_highlighting/fr/catala_fr.iro
@@ -221,7 +221,7 @@ code : context {
}
: pattern {
- regex \= \b(contient|nombre|somme|tel\s+que|existe|pour|tout|de|si|alors|sinon|est|vide|parmi|maximum|minimum|arrondi)\b
+ regex \= \b(contient|nombre|somme|tel\s+que|existe|pour|tout|de|si|alors|sinon|est|liste\s+vide|parmi|maximum|minimum|arrondi)\b
styles [] = .keyword_rule ;
}
@@ -253,7 +253,7 @@ code : context {
}
: pattern {
- regex \= \b(structure|énumération|collection|entier|booléen|date|durée|argent|texte|décimal|décret|loi|nombre|somme)\b
+ regex \= \b(structure|énumération|liste\s+de|entier|booléen|date|durée|argent|texte|décimal|décret|loi|nombre|somme)\b
styles [] = .primitive;
}
diff --git a/syntax_highlighting/fr/pygments/catala_fr_lexer/lexer.py b/syntax_highlighting/fr/pygments/catala_fr_lexer/lexer.py
index 279a0f81d..31c5ca98d 100644
--- a/syntax_highlighting/fr/pygments/catala_fr_lexer/lexer.py
+++ b/syntax_highlighting/fr/pygments/catala_fr_lexer/lexer.py
@@ -27,14 +27,14 @@ class CustomLexer(RegexLexer):
(u'(\\s*\\#.*$)', bygroups(Comment.Single)),
(u'(contexte|entr\xe9e|r\xe9sultat|interne)(\\s*)(|r\xe9sultat)(\\s+)([a-z\xe9\xe8\xe0\xe2\xf9\xee\xf4\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xf4\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xd4\xca\u0152\xc70-9_\\\']*)', bygroups(Keyword.Declaration, String, Keyword.Declaration, String, Name.Variable)),
(u'\\b(selon|sous\\s+forme|fix\xe9|par|d\xe9croissante|croissante|varie|avec|on\\s+a|soit|dans|champ\\s+d\'application|d\xe9pend\\s+de|d\xe9claration|inclusion|contenu|r\xe8gle|sous\\s+condition|condition|donn\xe9e|cons\xe9quence|rempli|\xe9gal\\s+\xe0|assertion|d\xe9finition|\xe9tat|\xe9tiquette|exception)\\b', bygroups(Keyword.Reserved)),
- (u'\\b(contient|nombre|somme|tel\\s+que|existe|pour|tout|de|si|alors|sinon|est|vide|parmi|maximum|minimum|arrondi)\\b', bygroups(Keyword.Declaration)),
+ (u'\\b(contient|nombre|somme|tel\\s+que|existe|pour|tout|de|si|alors|sinon|est|liste\\s+vide|parmi|maximum|minimum|arrondi)\\b', bygroups(Keyword.Declaration)),
(u'(\\|[0-9]+\\-[0-9]+\\-[0-9]+\\|)', bygroups(Number.Integer)),
(u'\\b(vrai|faux)\\b', bygroups(Keyword.Constant)),
(u'\\b([0-9]+(,[0-9]*|))\\b', bygroups(Number.Integer)),
(u'(\\-\\-|\\;|\\.|\\,|\\:|\\(|\\)|\\[|\\]|\\{|\\})', bygroups(Operator)),
(u'(\\-\\>|\\+\\.|\\+\\@|\\+\\^|\\+\u20ac|\\+|\\-\\.|\\-\\@|\\-\\^|\\-\u20ac|\\-|\\*\\.|\\*\\@|\\*\\^|\\*\u20ac|\\*|/\\.|/\\@|/\u20ac|/|\\!|>\\.|>=\\.|<=\\.|<\\.|>\\@|>=\\@|<=\\@|<\\@|>\u20ac|>=\u20ac|<=\u20ac|<\u20ac|>\\^|>=\\^|<=\\^|<\\^|>|>=|<=|<|=|\u20ac|%)', bygroups(Operator)),
(u'\\b(non|ou\\s+bien|ou|et|an|mois|jour)\\b', bygroups(Operator)),
- (u'\\b(structure|\xe9num\xe9ration|collection|entier|bool\xe9en|date|dur\xe9e|argent|texte|d\xe9cimal|d\xe9cret|loi|nombre|somme)\\b', bygroups(Keyword.Type)),
+ (u'\\b(structure|\xe9num\xe9ration|liste\\s+de|entier|bool\xe9en|date|dur\xe9e|argent|texte|d\xe9cimal|d\xe9cret|loi|nombre|somme)\\b', bygroups(Keyword.Type)),
(u'\\b([A-Z\xc9\xc8\xc0\xc2\xd9\xce\xd4\xca\u0152\xc7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xf4\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xd4\xca\u0152\xc70-9_\\\']*)(\\.)([a-z\xe9\xe8\xe0\xe2\xf9\xee\xf4\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xf4\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xd4\xca\u0152\xc70-9_\\\']*)\\b', bygroups(Name.Class, Operator, Name.Variable)),
(u'\\b([a-z\xe9\xe8\xe0\xe2\xf9\xee\xf4\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xf4\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xd4\xca\u0152\xc70-9_\\\']*)(\\.)([a-z\xe9\xe8\xe0\xe2\xf9\xee\xf4\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xf4\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xd4\xca\u0152\xc70-9_\\\'\\.]*)\\b', bygroups(Name.Variable, Operator, String)),
(u'\\b([a-z\xe9\xe8\xe0\xe2\xf9\xee\xf4\xea\u0153\xe7][a-z\xe9\xe8\xe0\xe2\xf9\xee\xf4\xea\u0153\xe7A-Z\xc9\xc8\xc0\xc2\xd9\xce\xd4\xca\u0152\xc70-9_\\\']*)\\b', bygroups(Name.Variable)),
diff --git a/syntax_highlighting/fr/vim/catala_fr.vim b/syntax_highlighting/fr/vim/catala_fr.vim
index 49e0c4e88..ea74f0856 100644
--- a/syntax_highlighting/fr/vim/catala_fr.vim
+++ b/syntax_highlighting/fr/vim/catala_fr.vim
@@ -23,7 +23,7 @@ syn match Include "^\s*>\s*Inclusion:.*$"
syn match sc_id_def contained "\<\([a-zéèàâùîôêœç][a-zéèàâùîôêœçA-ZÉÈÀÂÙÎÔÊŒÇ0-9_\']*\)\>"
syn match cc_id contained "\<\([A-ZÉÈÀÂÙÎÔÊŒÇ][a-zéèàâùîôêœçA-ZÉÈÀÂÙÎÔÊŒÇ0-9_\']*\)\>"
-syn match Keyword contained "\<\(contexte\|entrée\|résultat\|interne\|champ\s\+d'application\|collection\|structure\|donnée\|énumération\|définition\|déclaration\|si\s\+et\s\+seulement\s\+si\|dépend\s\+de\|inclus\|contenu\|optionnel\|règle\|sous\s\+condition\|condition\|conséquence\|rempli\|égal\s\+à\|assertion\|état\|étiquette\|exception\|n'importe\s\+quel\)\>"
+syn match Keyword contained "\<\(contexte\|entrée\|résultat\|interne\|champ\s\+d'application\|liste\s\+de\|structure\|donnée\|énumération\|définition\|déclaration\|si\s\+et\s\+seulement\s\+si\|dépend\s\+de\|inclus\|contenu\|optionnel\|règle\|sous\s\+condition\|condition\|conséquence\|rempli\|égal\s\+à\|assertion\|état\|étiquette\|exception\|n'importe\s\+quel\)\>"
syn match Statement contained "\<\(selon\|sous\s\+forme\|fixé\|par\|décroissante\|croissante\|varie\|avec\|on\s\+a\|soit\|dans\|tel\s\+que\|existe\|pour\|tout\|de\|initial\)\>"
syn keyword Conditional contained si alors sinon
syn match Comment contained "#.*$"
diff --git a/syntax_highlighting/fr/vscode/syntaxes/catalavs.xml b/syntax_highlighting/fr/vscode/syntaxes/catalavs.xml
index 9385d9a27..55f58738a 100644
--- a/syntax_highlighting/fr/vscode/syntaxes/catalavs.xml
+++ b/syntax_highlighting/fr/vscode/syntaxes/catalavs.xml
@@ -200,7 +200,7 @@
</dict>
<dict>
<key>match</key>
- <string>\b(champ\s+d'application|si\s+et\s+seulement\s+si|dépend\s+de|déclaration|inclus|collection|contenu|optionnel|structure|énumération|contexte|entrée|résultat|interne|règle|sous\s+condition|condition|donnée|conséquence|rempli|égal\s+à|assertion|définition|état|étiquette|exception|n'importe\s+quel)\b</string>
+ <string>\b(champ\s+d'application|si\s+et\s+seulement\s+si|dépend\s+de|déclaration|inclus|liste\s+de|contenu|optionnel|structure|énumération|contexte|entrée|résultat|interne|règle|sous\s+condition|condition|donnée|conséquence|rempli|égal\s+à|assertion|définition|état|étiquette|exception|n'importe\s+quel)\b</string>
<key>name</key>
<string>keyword.other.catala_fr</string>
</dict>
diff --git a/syntax_highlighting/gnu_gedit/catala.lang b/syntax_highlighting/gnu_gedit/catala.lang
index 8150108f1..716ef4ea7 100644
--- a/syntax_highlighting/gnu_gedit/catala.lang
+++ b/syntax_highlighting/gnu_gedit/catala.lang
@@ -67,7 +67,7 @@
<keyword>上下文</keyword>
<!--COLLECTION-->
- <keyword>collection</keyword>
+ <keyword>list\s+of</keyword>
<!--CONTAINS-->
<keyword>contains</keyword>
| Renaming "collection" as "list"
The type "collection" should be renamed to "list" ("liste" in French). We should also specify "list of integer" instead of "list integer" (if the parser allows). This renaming will avoid any confusion that the user could have about order-preservation properties of operations on collections.
| 2023-12-07T17:01:20 | 0.0 | [] | [] |
|||
J-Quants/jquants-api-client-python | J-Quants__jquants-api-client-python-24 | 99c181e1d7df9863ef01f5a611b0b2baef8af629 | diff --git a/Makefile b/Makefile
index 6408064..4787092 100644
--- a/Makefile
+++ b/Makefile
@@ -15,4 +15,4 @@ lint-fix:
.PHONY: test
test:
- poetry run pytest
+ poetry run pytest --cov=./jquantsapi tests/
diff --git a/README.md b/README.md
index 6196f55..af45391 100644
--- a/README.md
+++ b/README.md
@@ -7,17 +7,22 @@ J-QuantsやAPI仕様についての詳細を知りたい方は [公式ウェブ
現在、J-Quants APIはベータ版サービスとして提供されています。
## 使用方法
+
pip経由でインストールします。
```shell
pip install jquants-api-client
```
+### J-Quants API の利用
+
+To use J-Quants API, you need to "Applications for J-Quants API" from [J-Quants API Web site](https://jpx-jquants.com/?lang=en#jquants-api).
+
+J-Quants APIを利用するためには[J-Quants API の Web サイト](https://jpx-jquants.com/#jquants-api) から「J-Quants API申し込み」が必要になります。
-### J-Quants API のリフレッシュトークン取得
+jquants-api-client-python を使用するためには「J-Quants API ログインページで使用するメールアドレスおよびパスワード」または「J-Quants API メニューページから取得したリフレッシュトークン」が必要になります。必要に応じて下記のWebサイトより取得してください。
-J-Quants APIを利用するためには [J-Quants API の Web サイト](https://jpx-jquants.com/#jquants-api) から取得できる
-リフレッシュトークンが必要になります。
+[J-Quants API ログインページ](https://application.jpx-jquants.com/)
### サンプルコード
@@ -26,15 +31,18 @@ from datetime import datetime
from dateutil import tz
import jquantsapi
-my_refresh_token:str = "*****"
-cli = jquantsapi.Client(refresh_token=my_refresh_token)
+my_mail_address:str = "*****"
+my_password: str = "*****"
+cli = jquantsapi.Client(mail_address=my_mail_address, password=my_password)
df = cli.get_price_range(
start_dt=datetime(2022, 7, 25, tzinfo=tz.gettz("Asia/Tokyo")),
end_dt=datetime(2022, 7, 26, tzinfo=tz.gettz("Asia/Tokyo")),
)
print(df)
```
+
APIレスポンスがDataframeの形式で取得できます。
+
```shell
Code Date ... AdjustmentClose AdjustmentVolume
0 13010 2022-07-25 ... 3630.0 8100.0
@@ -57,7 +65,10 @@ APIレスポンスがDataframeの形式で取得できます。
## 対応API
### ラッパー群
+
J-Quants API の各APIエンドポイントに対応しています。
+
+ - get_refresh_token
- get_id_token
- get_listed_info
- get_listed_sections
@@ -65,17 +76,43 @@ J-Quants API の各APIエンドポイントに対応しています。
- get_prices_daily_quotes
- get_fins_statements
- get_fins_announcement
+
### ユーティリティ群
+
日付範囲を指定して一括でデータ取得して、取得したデータを結合して返すようなユーティリティが用意されています。
+
- get_list
- get_price_range
- get_statements_range
+## 設定
+
+認証用のメールアドレス/パスワードおよびリフレッシュトークンは設定ファイルおよび環境変数を使用して指定することも可能です。
+設定は下記の順に読み込まれ、設定項目が重複している場合は後に読み込まれた値で上書きされます。
+
+1. `/content/drive/MyDrive/drive_ws/secret/jquants-api.toml` (Google Colabのみ)
+2. `${HOME}/.jquants-api/jquants-api.toml`
+3. `jquants-api.toml`
+4. `os.environ["JQUANTS_API_CLIENT_CONFIG_FILE"]`
+5. `${JQUANTS_API_MAIL_ADDRESS}`, `${JQUANTS_API_PASSWORD}`, `${JQUANTS_API_REFRESH_TOKEN}`
+
+### 設定ファイル例
+
+`jquants-api.toml` は下記のように設定します。
+
+```toml
+[jquants-api-client]
+mail_address = "*****"
+password = "*****"
+refresh_token = "*****"
+```
## 動作確認
-Python 3.10で動作確認を行っています。
+
+Google Colab および Python 3.10 で動作確認を行っています。
J-Quants APIは現在β版のため、本ライブラリも今後仕様が変更となる可能性があります。
## 開発
+
J-Quants API Clientの開発に是非ご協力ください。
Github上でIssueやPull Requestをお待ちしております。
diff --git a/jquantsapi/client.py b/jquantsapi/client.py
index f5d26b6..4eb7488 100644
--- a/jquantsapi/client.py
+++ b/jquantsapi/client.py
@@ -1,15 +1,35 @@
import os
+import sys
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime
-from typing import Dict, List, Optional, Union
+from pathlib import Path
+from typing import Any, List, Mapping, Optional, Union
import pandas as pd # type: ignore
import requests
from dateutil import tz
from requests.adapters import HTTPAdapter
+from requests.exceptions import HTTPError
+from tenacity import (
+ retry,
+ retry_if_exception_type,
+ stop_after_attempt,
+ wait_exponential,
+)
from urllib3.util import Retry
+if sys.version_info >= (3, 11):
+ import tomllib
+else:
+ import tomli as tomllib
+
+
DatetimeLike = Union[datetime, pd.Timestamp, str]
+_Data = Union[str, Mapping[str, Any]]
+
+
+class TokenAuthRefreshBadRequestException(Exception):
+ pass
class Client:
@@ -21,28 +41,135 @@ class Client:
JQUANTS_API_BASE = "https://api.jpx-jquants.com/v1"
MAX_WORKERS = 5
- def __init__(self, refresh_token: str) -> None:
+ def __init__(
+ self,
+ refresh_token: Optional[str] = None,
+ *,
+ mail_address: Optional[str] = None,
+ password: Optional[str] = None,
+ ) -> None:
"""
Args:
- refresh_token: J-Quants API リフレッシュトークン
+ refresh_token: J-Quants API refresh token
+ refresh_token_expiredat: refresh token expired_at
+ mail_address: J-Quants API login email address
+ password: J-Quants API login password
"""
- self.refresh_token = refresh_token
+ config = self._load_config()
+
+ self._mail_address = config["mail_address"]
+ if mail_address is not None:
+ self._mail_address = mail_address
+
+ self._password = config["password"]
+ if password is not None:
+ self._password = password
+
+ self._refresh_token = config["refresh_token"]
+ if refresh_token is not None:
+ self._refresh_token = refresh_token
+
+ if self._refresh_token != "":
+ self._refresh_token_expire = pd.Timestamp.utcnow() + pd.Timedelta(
+ 6, unit="D"
+ )
+ else:
+ self._refresh_token_expire = pd.Timestamp.utcnow()
+
self._id_token = ""
self._id_token_expire = pd.Timestamp.utcnow()
self._session: Optional[requests.Session] = None
+ if ((self._mail_address == "") or (self._password == "")) and (
+ self._refresh_token == ""
+ ):
+ raise ValueError(
+ "Either mail_address/password or refresh_token is required."
+ )
+ if (self._mail_address != "") and ("@" not in self._mail_address):
+ raise ValueError("mail_address must contain '@' charactor.")
+
+ def _is_colab(self) -> bool:
+ """
+ Return True if running in colab
+ """
+ return "google.colab" in sys.modules
+
+ def _load_config(self) -> dict:
+ """
+ load config from files and environment variables
+
+ Args:
+ N/A
+ Returns:
+ dict: configurations
+ """
+ config: dict = {}
+
+ # colab config
+ if self._is_colab():
+ colab_config_path = (
+ "/content/drive/MyDrive/drive_ws/secret/jquants-api.toml"
+ )
+ config = {**config, **self._read_config(colab_config_path)}
+
+ # user default config
+ user_config_path = f"{Path.home()}/.jquants-api/jquants-api.toml"
+ config = {**config, **self._read_config(user_config_path)}
+
+ # current dir config
+ current_config_path = "jquants-api.toml"
+ config = {**config, **self._read_config(current_config_path)}
+
+ # env specified config
+ if "JQUANTS_API_CLIENT_CONFIG_FILE" in os.environ:
+ env_config_path = os.environ["JQUANTS_API_CLIENT_CONFIG_FILE"]
+ config = {**config, **self._read_config(env_config_path)}
+
+ # env vars
+ config["mail_address"] = os.environ.get(
+ "JQUANTS_API_MAIL_ADDRESS", config.get("mail_address", "")
+ )
+ config["password"] = os.environ.get(
+ "JQUANTS_API_PASSWORD", config.get("password", "")
+ )
+ config["refresh_token"] = os.environ.get(
+ "JQUANTS_API_REFRESH_TOKEN", config.get("refresh_token", "")
+ )
+
+ return config
+
+ def _read_config(self, config_path: str) -> dict:
+ """
+ read config from a toml file
+
+ Params:
+ config_path: a path to a toml file
+ """
+ if not os.path.isfile(config_path):
+ return {}
+
+ with open(config_path, mode="rb") as f:
+ ret = tomllib.load(f)
+
+ if "jquants-api-client" not in ret:
+ return {}
+
+ return ret["jquants-api-client"]
+
def _base_headers(self) -> dict:
"""
J-Quants API にアクセスする際にヘッダーにIDトークンを設定
"""
- headers = {"Authorization": f"Bearer {self.get_id_token()}"}
+ id_token = self.get_id_token()
+ headers = {"Authorization": f"Bearer {id_token}"}
return headers
def _request_session(
self,
status_forcelist: Optional[List[int]] = None,
method_whitelist: Optional[List[str]] = None,
- ):
+ ) -> requests.Session:
"""
requests の session 取得
@@ -71,7 +198,7 @@ def _request_session(
return self._session
- def _get(self, url: str, params: dict = None) -> requests.Response:
+ def _get(self, url: str, params: Optional[dict] = None) -> requests.Response:
"""
requests の get 用ラッパー
@@ -93,7 +220,11 @@ def _get(self, url: str, params: dict = None) -> requests.Response:
return ret
def _post(
- self, url: str, payload: dict = None, headers: dict = None
+ self,
+ url: str,
+ data: Optional[_Data] = None,
+ json: Optional[Any] = None,
+ headers: Optional[dict] = None,
) -> requests.Response:
"""
requests の get 用ラッパー
@@ -111,16 +242,92 @@ def _post(
"""
s = self._request_session()
- ret = s.post(url, data=payload, headers=headers, timeout=30)
+ ret = s.post(url, data=data, json=json, headers=headers, timeout=30)
ret.raise_for_status()
return ret
- def get_id_token(self) -> str:
+ def get_refresh_token(
+ self, mail_address: Optional[str] = None, password: Optional[str] = None
+ ) -> str:
+ """
+ get J-Quants API refresh token
+
+ Params:
+ mail_address: J-Quants API login email address
+ password: J-Quants API login password
+ Returns:
+ refresh_token: J-Quants API refresh token
+ """
+ if self._refresh_token_expire > pd.Timestamp.utcnow():
+ return self._refresh_token
+
+ if mail_address is None:
+ mail_address = self._mail_address
+ if password is None:
+ password = self._password
+
+ if mail_address == "" or password == "":
+ raise ValueError("mail_address/password are required")
+ if (mail_address is not None) and ("@" not in mail_address):
+ raise ValueError("mail_address must contain '@' charactor.")
+
+ url = f"{self.JQUANTS_API_BASE}/token/auth_user"
+ data = {
+ "mailaddress": mail_address,
+ "password": password,
+ }
+ ret = self._post(url, json=data)
+ refresh_token = ret.json()["refreshToken"]
+ self._refresh_token = refresh_token
+ self._refresh_token_expire = pd.Timestamp.utcnow() + pd.Timedelta(6, unit="D")
+ return self._refresh_token
+
+ @retry(
+ retry=retry_if_exception_type(TokenAuthRefreshBadRequestException),
+ stop=stop_after_attempt(3),
+ wait=wait_exponential(multiplier=1, min=5, max=300),
+ )
+ def get_id_token(self, refresh_token: Optional[str] = None) -> str:
+ """
+ get J-Quants API id_token
+
+ Params:
+ refresh_token: J-Quants API refresh token
+ Retruns:
+ id_token: J-Quants API id token
+ """
if self._id_token_expire > pd.Timestamp.utcnow():
return self._id_token
- url = f"{self.JQUANTS_API_BASE}/token/auth_refresh?refreshtoken={self.refresh_token}"
- ret = self._post(url)
+ if refresh_token is not None:
+ _refresh_token = refresh_token
+ else:
+ _refresh_token = self.get_refresh_token()
+
+ url = (
+ f"{self.JQUANTS_API_BASE}/token/auth_refresh?refreshtoken={_refresh_token}"
+ )
+ try:
+ ret = self._post(url)
+ except HTTPError as e:
+ # retry if:
+ # - refresh_token is not provided as a parameter
+ # - error is 400 bad request
+ # - mail_address and password are provided
+ if (
+ refresh_token is None
+ and e.response.status_code == 400
+ and self._mail_address != ""
+ and self._password != ""
+ ):
+ # clear tokens for the next try
+ self._refresh_token = ""
+ self._refresh_token_expire = pd.Timestamp.utcnow()
+ self._id_token = ""
+ self._id_token_expire = pd.Timestamp.utcnow()
+ # raise for retrying
+ raise TokenAuthRefreshBadRequestException(e)
+ raise e
id_token = ret.json()["idToken"]
self._id_token = id_token
self._id_token_expire = pd.Timestamp.utcnow() + pd.Timedelta(23, unit="hour")
@@ -169,7 +376,7 @@ def get_listed_sections(self) -> pd.DataFrame:
pd.DataFrame: セクター一覧
"""
url = f"{self.JQUANTS_API_BASE}/listed/sections"
- params: Dict = {}
+ params: dict = {}
ret = self._get(url, params)
d = ret.json()
df = pd.DataFrame.from_dict(d["sections"])
@@ -300,6 +507,9 @@ def get_price_range(
Returns:
pd.DataFrame: 株価情報 (Code, Date列でソートされています)
"""
+ # pre-load id_token
+ self.get_id_token()
+
buff = []
dates = pd.date_range(start_dt, end_dt, freq="D")
with ThreadPoolExecutor(max_workers=self.MAX_WORKERS) as executor:
diff --git a/poetry.lock b/poetry.lock
index e167e03..d330d67 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,11 +1,3 @@
-[[package]]
-name = "atomicwrites"
-version = "1.4.1"
-description = "Atomic file writes."
-category = "dev"
-optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-
[[package]]
name = "attrs"
version = "22.1.0"
@@ -22,7 +14,7 @@ tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>
[[package]]
name = "black"
-version = "22.6.0"
+version = "22.8.0"
description = "The uncompromising code formatter."
category = "dev"
optional = false
@@ -53,7 +45,7 @@ python-versions = ">=3.6"
[[package]]
name = "charset-normalizer"
-version = "2.1.0"
+version = "2.1.1"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
category = "main"
optional = false
@@ -84,7 +76,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
[[package]]
name = "coverage"
-version = "6.4.2"
+version = "6.4.4"
description = "Code coverage measurement for Python"
category = "dev"
optional = false
@@ -98,7 +90,7 @@ toml = ["tomli"]
[[package]]
name = "dunamai"
-version = "1.12.0"
+version = "1.13.0"
description = "Dynamic version generation"
category = "dev"
optional = false
@@ -235,7 +227,7 @@ python-versions = ">=3.7,<3.11"
[[package]]
name = "numpy"
-version = "1.23.1"
+version = "1.23.2"
description = "NumPy is the fundamental package for array computing with Python."
category = "main"
optional = false
@@ -274,7 +266,7 @@ test = ["hypothesis (>=3.58)", "pytest (>=6.0)", "pytest-xdist"]
[[package]]
name = "pandas"
-version = "1.4.3"
+version = "1.4.4"
description = "Powerful data structures for data analysis, time series, and statistics"
category = "main"
optional = false
@@ -295,11 +287,11 @@ test = ["hypothesis (>=5.5.3)", "pytest (>=6.0)", "pytest-xdist (>=1.31)"]
[[package]]
name = "pathspec"
-version = "0.9.0"
+version = "0.10.1"
description = "Utility library for gitignore style pattern matching of file paths."
category = "dev"
optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
+python-versions = ">=3.7"
[[package]]
name = "platformdirs"
@@ -378,26 +370,25 @@ diagrams = ["railroad-diagrams", "jinja2"]
[[package]]
name = "pyproject-flake8"
-version = "0.0.1a4"
+version = "0.0.1a5"
description = "pyproject-flake8 (`pflake8`), a monkey patching wrapper to connect flake8 with pyproject.toml configuration"
category = "dev"
optional = false
python-versions = "*"
[package.dependencies]
-flake8 = "*"
+flake8 = "<5.0.0"
tomli = {version = "*", markers = "python_version < \"3.11\""}
[[package]]
name = "pytest"
-version = "7.1.2"
+version = "7.1.3"
description = "pytest: simple powerful testing with Python"
category = "dev"
optional = false
python-versions = ">=3.7"
[package.dependencies]
-atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""}
attrs = ">=19.2.0"
colorama = {version = "*", markers = "sys_platform == \"win32\""}
importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""}
@@ -438,7 +429,7 @@ six = ">=1.5"
[[package]]
name = "pytz"
-version = "2022.1"
+version = "2022.2.1"
description = "World timezone definitions, modern and historical"
category = "main"
optional = false
@@ -470,17 +461,28 @@ category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
+[[package]]
+name = "tenacity"
+version = "8.0.1"
+description = "Retry code until it succeeds"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.extras]
+doc = ["reno", "sphinx", "tornado (>=4.5)"]
+
[[package]]
name = "tomli"
version = "2.0.1"
description = "A lil' TOML parser"
-category = "dev"
+category = "main"
optional = false
python-versions = ">=3.7"
[[package]]
name = "tomlkit"
-version = "0.11.1"
+version = "0.11.4"
description = "Style preserving TOML library"
category = "dev"
optional = false
@@ -504,7 +506,7 @@ python-versions = "*"
[[package]]
name = "types-requests"
-version = "2.28.7"
+version = "2.28.9"
description = "Typing stubs for requests"
category = "main"
optional = false
@@ -515,7 +517,7 @@ types-urllib3 = "<1.27"
[[package]]
name = "types-urllib3"
-version = "1.26.20"
+version = "1.26.24"
description = "Typing stubs for urllib3"
category = "main"
optional = false
@@ -531,7 +533,7 @@ python-versions = ">=3.7"
[[package]]
name = "urllib3"
-version = "1.26.11"
+version = "1.26.12"
description = "HTTP library with thread-safe connection pooling, file post, and more."
category = "main"
optional = false
@@ -539,7 +541,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*,
[package.extras]
brotli = ["brotlicffi (>=0.8.0)", "brotli (>=1.0.9)", "brotlipy (>=0.6.0)"]
-secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"]
+secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "urllib3-secure-extra", "ipaddress"]
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
[[package]]
@@ -557,425 +559,55 @@ testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-
[metadata]
lock-version = "1.1"
python-versions = ">=3.7.1,<4.0"
-content-hash = "b57f3808a728745c394f92f0ef66366da45b6be43cd4fb0a33c63acd86b4d2ea"
+content-hash = "329f74167d4a5eab54784fa97d11042990888cbd42006740ea78db0429e16be7"
[metadata.files]
-atomicwrites = [
- {file = "atomicwrites-1.4.1.tar.gz", hash = "sha256:81b2c9071a49367a7f770170e5eec8cb66567cfbbc8c73d20ce5ca4a8d71cf11"},
-]
-attrs = [
- {file = "attrs-22.1.0-py2.py3-none-any.whl", hash = "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c"},
- {file = "attrs-22.1.0.tar.gz", hash = "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6"},
-]
-black = [
- {file = "black-22.6.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f586c26118bc6e714ec58c09df0157fe2d9ee195c764f630eb0d8e7ccce72e69"},
- {file = "black-22.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b270a168d69edb8b7ed32c193ef10fd27844e5c60852039599f9184460ce0807"},
- {file = "black-22.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6797f58943fceb1c461fb572edbe828d811e719c24e03375fd25170ada53825e"},
- {file = "black-22.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c85928b9d5f83b23cee7d0efcb310172412fbf7cb9d9ce963bd67fd141781def"},
- {file = "black-22.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:f6fe02afde060bbeef044af7996f335fbe90b039ccf3f5eb8f16df8b20f77666"},
- {file = "black-22.6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cfaf3895a9634e882bf9d2363fed5af8888802d670f58b279b0bece00e9a872d"},
- {file = "black-22.6.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94783f636bca89f11eb5d50437e8e17fbc6a929a628d82304c80fa9cd945f256"},
- {file = "black-22.6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:2ea29072e954a4d55a2ff58971b83365eba5d3d357352a07a7a4df0d95f51c78"},
- {file = "black-22.6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e439798f819d49ba1c0bd9664427a05aab79bfba777a6db94fd4e56fae0cb849"},
- {file = "black-22.6.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:187d96c5e713f441a5829e77120c269b6514418f4513a390b0499b0987f2ff1c"},
- {file = "black-22.6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:074458dc2f6e0d3dab7928d4417bb6957bb834434516f21514138437accdbe90"},
- {file = "black-22.6.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a218d7e5856f91d20f04e931b6f16d15356db1c846ee55f01bac297a705ca24f"},
- {file = "black-22.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:568ac3c465b1c8b34b61cd7a4e349e93f91abf0f9371eda1cf87194663ab684e"},
- {file = "black-22.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6c1734ab264b8f7929cef8ae5f900b85d579e6cbfde09d7387da8f04771b51c6"},
- {file = "black-22.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9a3ac16efe9ec7d7381ddebcc022119794872abce99475345c5a61aa18c45ad"},
- {file = "black-22.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:b9fd45787ba8aa3f5e0a0a98920c1012c884622c6c920dbe98dbd05bc7c70fbf"},
- {file = "black-22.6.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7ba9be198ecca5031cd78745780d65a3f75a34b2ff9be5837045dce55db83d1c"},
- {file = "black-22.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a3db5b6409b96d9bd543323b23ef32a1a2b06416d525d27e0f67e74f1446c8f2"},
- {file = "black-22.6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:560558527e52ce8afba936fcce93a7411ab40c7d5fe8c2463e279e843c0328ee"},
- {file = "black-22.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b154e6bbde1e79ea3260c4b40c0b7b3109ffcdf7bc4ebf8859169a6af72cd70b"},
- {file = "black-22.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:4af5bc0e1f96be5ae9bd7aaec219c901a94d6caa2484c21983d043371c733fc4"},
- {file = "black-22.6.0-py3-none-any.whl", hash = "sha256:ac609cf8ef5e7115ddd07d85d988d074ed00e10fbc3445aee393e70164a2219c"},
- {file = "black-22.6.0.tar.gz", hash = "sha256:6c6d39e28aed379aec40da1c65434c77d75e65bb59a1e1c283de545fb4e7c6c9"},
-]
-certifi = [
- {file = "certifi-2022.6.15-py3-none-any.whl", hash = "sha256:fe86415d55e84719d75f8b69414f6438ac3547d2078ab91b67e779ef69378412"},
- {file = "certifi-2022.6.15.tar.gz", hash = "sha256:84c85a9078b11105f04f3036a9482ae10e4621616db313fe045dd24743a0820d"},
-]
-charset-normalizer = [
- {file = "charset-normalizer-2.1.0.tar.gz", hash = "sha256:575e708016ff3a5e3681541cb9d79312c416835686d054a23accb873b254f413"},
- {file = "charset_normalizer-2.1.0-py3-none-any.whl", hash = "sha256:5189b6f22b01957427f35b6a08d9a0bc45b46d3788ef5a92e978433c7a35f8a5"},
-]
-click = [
- {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"},
- {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"},
-]
-colorama = [
- {file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"},
- {file = "colorama-0.4.5.tar.gz", hash = "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"},
-]
-coverage = [
- {file = "coverage-6.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a9032f9b7d38bdf882ac9f66ebde3afb8145f0d4c24b2e600bc4c6304aafb87e"},
- {file = "coverage-6.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e0524adb49c716ca763dbc1d27bedce36b14f33e6b8af6dba56886476b42957c"},
- {file = "coverage-6.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4548be38a1c810d79e097a38107b6bf2ff42151900e47d49635be69943763d8"},
- {file = "coverage-6.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f23876b018dfa5d3e98e96f5644b109090f16a4acb22064e0f06933663005d39"},
- {file = "coverage-6.4.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fe75dcfcb889b6800f072f2af5a331342d63d0c1b3d2bf0f7b4f6c353e8c9c0"},
- {file = "coverage-6.4.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2f8553878a24b00d5ab04b7a92a2af50409247ca5c4b7a2bf4eabe94ed20d3ee"},
- {file = "coverage-6.4.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:d774d9e97007b018a651eadc1b3970ed20237395527e22cbeb743d8e73e0563d"},
- {file = "coverage-6.4.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d56f105592188ce7a797b2bd94b4a8cb2e36d5d9b0d8a1d2060ff2a71e6b9bbc"},
- {file = "coverage-6.4.2-cp310-cp310-win32.whl", hash = "sha256:d230d333b0be8042ac34808ad722eabba30036232e7a6fb3e317c49f61c93386"},
- {file = "coverage-6.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:5ef42e1db047ca42827a85e34abe973971c635f83aed49611b7f3ab49d0130f0"},
- {file = "coverage-6.4.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:25b7ec944f114f70803d6529394b64f8749e93cbfac0fe6c5ea1b7e6c14e8a46"},
- {file = "coverage-6.4.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bb00521ab4f99fdce2d5c05a91bddc0280f0afaee0e0a00425e28e209d4af07"},
- {file = "coverage-6.4.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2dff52b3e7f76ada36f82124703f4953186d9029d00d6287f17c68a75e2e6039"},
- {file = "coverage-6.4.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:147605e1702d996279bb3cc3b164f408698850011210d133a2cb96a73a2f7996"},
- {file = "coverage-6.4.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:422fa44070b42fef9fb8dabd5af03861708cdd6deb69463adc2130b7bf81332f"},
- {file = "coverage-6.4.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:8af6c26ba8df6338e57bedbf916d76bdae6308e57fc8f14397f03b5da8622b4e"},
- {file = "coverage-6.4.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5336e0352c0b12c7e72727d50ff02557005f79a0b8dcad9219c7c4940a930083"},
- {file = "coverage-6.4.2-cp37-cp37m-win32.whl", hash = "sha256:0f211df2cba951ffcae210ee00e54921ab42e2b64e0bf2c0befc977377fb09b7"},
- {file = "coverage-6.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:a13772c19619118903d65a91f1d5fea84be494d12fd406d06c849b00d31bf120"},
- {file = "coverage-6.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f7bd0ffbcd03dc39490a1f40b2669cc414fae0c4e16b77bb26806a4d0b7d1452"},
- {file = "coverage-6.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0895ea6e6f7f9939166cc835df8fa4599e2d9b759b02d1521b574e13b859ac32"},
- {file = "coverage-6.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4e7ced84a11c10160c0697a6cc0b214a5d7ab21dfec1cd46e89fbf77cc66fae"},
- {file = "coverage-6.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80db4a47a199c4563d4a25919ff29c97c87569130375beca3483b41ad5f698e8"},
- {file = "coverage-6.4.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3def6791adf580d66f025223078dc84c64696a26f174131059ce8e91452584e1"},
- {file = "coverage-6.4.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4f89d8e03c8a3757aae65570d14033e8edf192ee9298303db15955cadcff0c63"},
- {file = "coverage-6.4.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6d0b48aff8e9720bdec315d67723f0babd936a7211dc5df453ddf76f89c59933"},
- {file = "coverage-6.4.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2b20286c2b726f94e766e86a3fddb7b7e37af5d0c635bdfa7e4399bc523563de"},
- {file = "coverage-6.4.2-cp38-cp38-win32.whl", hash = "sha256:d714af0bdba67739598849c9f18efdcc5a0412f4993914a0ec5ce0f1e864d783"},
- {file = "coverage-6.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:5f65e5d3ff2d895dab76b1faca4586b970a99b5d4b24e9aafffc0ce94a6022d6"},
- {file = "coverage-6.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a697977157adc052284a7160569b36a8bbec09db3c3220642e6323b47cec090f"},
- {file = "coverage-6.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c77943ef768276b61c96a3eb854eba55633c7a3fddf0a79f82805f232326d33f"},
- {file = "coverage-6.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54d8d0e073a7f238f0666d3c7c0d37469b2aa43311e4024c925ee14f5d5a1cbe"},
- {file = "coverage-6.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f22325010d8824594820d6ce84fa830838f581a7fd86a9235f0d2ed6deb61e29"},
- {file = "coverage-6.4.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24b04d305ea172ccb21bee5bacd559383cba2c6fcdef85b7701cf2de4188aa55"},
- {file = "coverage-6.4.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:866ebf42b4c5dbafd64455b0a1cd5aa7b4837a894809413b930026c91e18090b"},
- {file = "coverage-6.4.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e36750fbbc422c1c46c9d13b937ab437138b998fe74a635ec88989afb57a3978"},
- {file = "coverage-6.4.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:79419370d6a637cb18553ecb25228893966bd7935a9120fa454e7076f13b627c"},
- {file = "coverage-6.4.2-cp39-cp39-win32.whl", hash = "sha256:b5e28db9199dd3833cc8a07fa6cf429a01227b5d429facb56eccd765050c26cd"},
- {file = "coverage-6.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:edfdabe7aa4f97ed2b9dd5dde52d2bb29cb466993bb9d612ddd10d0085a683cf"},
- {file = "coverage-6.4.2-pp36.pp37.pp38-none-any.whl", hash = "sha256:e2618cb2cf5a7cc8d698306e42ebcacd02fb7ef8cfc18485c59394152c70be97"},
- {file = "coverage-6.4.2.tar.gz", hash = "sha256:6c3ccfe89c36f3e5b9837b9ee507472310164f352c9fe332120b764c9d60adbe"},
-]
-dunamai = [
- {file = "dunamai-1.12.0-py3-none-any.whl", hash = "sha256:00b9c1ef58d4950204f76c20f84afe7a28d095f77feaa8512dbb172035415e61"},
- {file = "dunamai-1.12.0.tar.gz", hash = "sha256:fac4f09e2b8a105bd01f8c50450fea5aa489a6c439c949950a65f0dd388b0d20"},
-]
-flake8 = [
- {file = "flake8-4.0.1-py2.py3-none-any.whl", hash = "sha256:479b1304f72536a55948cb40a32dce8bb0ffe3501e26eaf292c7e60eb5e0428d"},
- {file = "flake8-4.0.1.tar.gz", hash = "sha256:806e034dda44114815e23c16ef92f95c91e4c71100ff52813adf7132a6ad870d"},
-]
-idna = [
- {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"},
- {file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"},
-]
-importlib-metadata = [
- {file = "importlib_metadata-4.2.0-py3-none-any.whl", hash = "sha256:057e92c15bc8d9e8109738a48db0ccb31b4d9d5cfbee5a8670879a30be66304b"},
- {file = "importlib_metadata-4.2.0.tar.gz", hash = "sha256:b7e52a1f8dec14a75ea73e0891f3060099ca1d8e6a462a4dff11c3e119ea1b31"},
-]
+attrs = []
+black = []
+certifi = []
+charset-normalizer = []
+click = []
+colorama = []
+coverage = []
+dunamai = []
+flake8 = []
+idna = []
+importlib-metadata = []
iniconfig = [
{file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"},
{file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"},
]
-isort = [
- {file = "isort-5.10.1-py3-none-any.whl", hash = "sha256:6f62d78e2f89b4500b080fe3a81690850cd254227f27f75c3a0c491a1f351ba7"},
- {file = "isort-5.10.1.tar.gz", hash = "sha256:e8443a5e7a020e9d7f97f1d7d9cd17c88bcb3bc7e218bf9cf5095fe550be2951"},
-]
-jinja2 = [
- {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"},
- {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"},
-]
-markupsafe = [
- {file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812"},
- {file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a"},
- {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a49907dd8420c5685cfa064a1335b6754b74541bbb3706c259c02ed65b644b3e"},
- {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c1bfff05d95783da83491be968e8fe789263689c02724e0c691933c52994f5"},
- {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7bd98b796e2b6553da7225aeb61f447f80a1ca64f41d83612e6139ca5213aa4"},
- {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b09bf97215625a311f669476f44b8b318b075847b49316d3e28c08e41a7a573f"},
- {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:694deca8d702d5db21ec83983ce0bb4b26a578e71fbdbd4fdcd387daa90e4d5e"},
- {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:efc1913fd2ca4f334418481c7e595c00aad186563bbc1ec76067848c7ca0a933"},
- {file = "MarkupSafe-2.1.1-cp310-cp310-win32.whl", hash = "sha256:4a33dea2b688b3190ee12bd7cfa29d39c9ed176bda40bfa11099a3ce5d3a7ac6"},
- {file = "MarkupSafe-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:dda30ba7e87fbbb7eab1ec9f58678558fd9a6b8b853530e176eabd064da81417"},
- {file = "MarkupSafe-2.1.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:671cd1187ed5e62818414afe79ed29da836dde67166a9fac6d435873c44fdd02"},
- {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3799351e2336dc91ea70b034983ee71cf2f9533cdff7c14c90ea126bfd95d65a"},
- {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e72591e9ecd94d7feb70c1cbd7be7b3ebea3f548870aa91e2732960fa4d57a37"},
- {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6fbf47b5d3728c6aea2abb0589b5d30459e369baa772e0f37a0320185e87c980"},
- {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d5ee4f386140395a2c818d149221149c54849dfcfcb9f1debfe07a8b8bd63f9a"},
- {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:bcb3ed405ed3222f9904899563d6fc492ff75cce56cba05e32eff40e6acbeaa3"},
- {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e1c0b87e09fa55a220f058d1d49d3fb8df88fbfab58558f1198e08c1e1de842a"},
- {file = "MarkupSafe-2.1.1-cp37-cp37m-win32.whl", hash = "sha256:8dc1c72a69aa7e082593c4a203dcf94ddb74bb5c8a731e4e1eb68d031e8498ff"},
- {file = "MarkupSafe-2.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:97a68e6ada378df82bc9f16b800ab77cbf4b2fada0081794318520138c088e4a"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e8c843bbcda3a2f1e3c2ab25913c80a3c5376cd00c6e8c4a86a89a28c8dc5452"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0212a68688482dc52b2d45013df70d169f542b7394fc744c02a57374a4207003"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e576a51ad59e4bfaac456023a78f6b5e6e7651dcd383bcc3e18d06f9b55d6d1"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b9fe39a2ccc108a4accc2676e77da025ce383c108593d65cc909add5c3bd601"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96e37a3dc86e80bf81758c152fe66dbf60ed5eca3d26305edf01892257049925"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6d0072fea50feec76a4c418096652f2c3238eaa014b2f94aeb1d56a66b41403f"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:089cf3dbf0cd6c100f02945abeb18484bd1ee57a079aefd52cffd17fba910b88"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6a074d34ee7a5ce3effbc526b7083ec9731bb3cbf921bbe1d3005d4d2bdb3a63"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-win32.whl", hash = "sha256:421be9fbf0ffe9ffd7a378aafebbf6f4602d564d34be190fc19a193232fd12b1"},
- {file = "MarkupSafe-2.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e04e26803c9c3851c931eac40c695602c6295b8d432cbe78609649ad9bd2da8a"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b87db4360013327109564f0e591bd2a3b318547bcef31b468a92ee504d07ae4f"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99a2a507ed3ac881b975a2976d59f38c19386d128e7a9a18b7df6fff1fd4c1d6"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56442863ed2b06d19c37f94d999035e15ee982988920e12a5b4ba29b62ad1f77"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ce11ee3f23f79dbd06fb3d63e2f6af7b12db1d46932fe7bd8afa259a5996603"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:33b74d289bd2f5e527beadcaa3f401e0df0a89927c1559c8566c066fa4248ab7"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:43093fb83d8343aac0b1baa75516da6092f58f41200907ef92448ecab8825135"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8e3dcf21f367459434c18e71b2a9532d96547aef8a871872a5bd69a715c15f96"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-win32.whl", hash = "sha256:d4306c36ca495956b6d568d276ac11fdd9c30a36f1b6eb928070dc5360b22e1c"},
- {file = "MarkupSafe-2.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:46d00d6cfecdde84d40e572d63735ef81423ad31184100411e6e3388d405e247"},
- {file = "MarkupSafe-2.1.1.tar.gz", hash = "sha256:7f91197cc9e48f989d12e4e6fbc46495c446636dfc81b9ccf50bb0ec74b91d4b"},
-]
-mccabe = [
- {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"},
- {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"},
-]
-mypy = [
- {file = "mypy-0.971-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f2899a3cbd394da157194f913a931edfd4be5f274a88041c9dc2d9cdcb1c315c"},
- {file = "mypy-0.971-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:98e02d56ebe93981c41211c05adb630d1d26c14195d04d95e49cd97dbc046dc5"},
- {file = "mypy-0.971-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:19830b7dba7d5356d3e26e2427a2ec91c994cd92d983142cbd025ebe81d69cf3"},
- {file = "mypy-0.971-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:02ef476f6dcb86e6f502ae39a16b93285fef97e7f1ff22932b657d1ef1f28655"},
- {file = "mypy-0.971-cp310-cp310-win_amd64.whl", hash = "sha256:25c5750ba5609a0c7550b73a33deb314ecfb559c350bb050b655505e8aed4103"},
- {file = "mypy-0.971-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d3348e7eb2eea2472db611486846742d5d52d1290576de99d59edeb7cd4a42ca"},
- {file = "mypy-0.971-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3fa7a477b9900be9b7dd4bab30a12759e5abe9586574ceb944bc29cddf8f0417"},
- {file = "mypy-0.971-cp36-cp36m-win_amd64.whl", hash = "sha256:2ad53cf9c3adc43cf3bea0a7d01a2f2e86db9fe7596dfecb4496a5dda63cbb09"},
- {file = "mypy-0.971-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:855048b6feb6dfe09d3353466004490b1872887150c5bb5caad7838b57328cc8"},
- {file = "mypy-0.971-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:23488a14a83bca6e54402c2e6435467a4138785df93ec85aeff64c6170077fb0"},
- {file = "mypy-0.971-cp37-cp37m-win_amd64.whl", hash = "sha256:4b21e5b1a70dfb972490035128f305c39bc4bc253f34e96a4adf9127cf943eb2"},
- {file = "mypy-0.971-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9796a2ba7b4b538649caa5cecd398d873f4022ed2333ffde58eaf604c4d2cb27"},
- {file = "mypy-0.971-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5a361d92635ad4ada1b1b2d3630fc2f53f2127d51cf2def9db83cba32e47c856"},
- {file = "mypy-0.971-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b793b899f7cf563b1e7044a5c97361196b938e92f0a4343a5d27966a53d2ec71"},
- {file = "mypy-0.971-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d1ea5d12c8e2d266b5fb8c7a5d2e9c0219fedfeb493b7ed60cd350322384ac27"},
- {file = "mypy-0.971-cp38-cp38-win_amd64.whl", hash = "sha256:23c7ff43fff4b0df93a186581885c8512bc50fc4d4910e0f838e35d6bb6b5e58"},
- {file = "mypy-0.971-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1f7656b69974a6933e987ee8ffb951d836272d6c0f81d727f1d0e2696074d9e6"},
- {file = "mypy-0.971-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d2022bfadb7a5c2ef410d6a7c9763188afdb7f3533f22a0a32be10d571ee4bbe"},
- {file = "mypy-0.971-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef943c72a786b0f8d90fd76e9b39ce81fb7171172daf84bf43eaf937e9f220a9"},
- {file = "mypy-0.971-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d744f72eb39f69312bc6c2abf8ff6656973120e2eb3f3ec4f758ed47e414a4bf"},
- {file = "mypy-0.971-cp39-cp39-win_amd64.whl", hash = "sha256:77a514ea15d3007d33a9e2157b0ba9c267496acf12a7f2b9b9f8446337aac5b0"},
- {file = "mypy-0.971-py3-none-any.whl", hash = "sha256:0d054ef16b071149917085f51f89555a576e2618d5d9dd70bd6eea6410af3ac9"},
- {file = "mypy-0.971.tar.gz", hash = "sha256:40b0f21484238269ae6a57200c807d80debc6459d444c0489a102d7c6a75fa56"},
-]
-mypy-extensions = [
- {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"},
- {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"},
-]
-numpy = [
- {file = "numpy-1.21.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8737609c3bbdd48e380d463134a35ffad3b22dc56295eff6f79fd85bd0eeeb25"},
- {file = "numpy-1.21.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fdffbfb6832cd0b300995a2b08b8f6fa9f6e856d562800fea9182316d99c4e8e"},
- {file = "numpy-1.21.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3820724272f9913b597ccd13a467cc492a0da6b05df26ea09e78b171a0bb9da6"},
- {file = "numpy-1.21.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f17e562de9edf691a42ddb1eb4a5541c20dd3f9e65b09ded2beb0799c0cf29bb"},
- {file = "numpy-1.21.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f30427731561ce75d7048ac254dbe47a2ba576229250fb60f0fb74db96501a1"},
- {file = "numpy-1.21.6-cp310-cp310-win32.whl", hash = "sha256:d4bf4d43077db55589ffc9009c0ba0a94fa4908b9586d6ccce2e0b164c86303c"},
- {file = "numpy-1.21.6-cp310-cp310-win_amd64.whl", hash = "sha256:d136337ae3cc69aa5e447e78d8e1514be8c3ec9b54264e680cf0b4bd9011574f"},
- {file = "numpy-1.21.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6aaf96c7f8cebc220cdfc03f1d5a31952f027dda050e5a703a0d1c396075e3e7"},
- {file = "numpy-1.21.6-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:67c261d6c0a9981820c3a149d255a76918278a6b03b6a036800359aba1256d46"},
- {file = "numpy-1.21.6-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a6be4cb0ef3b8c9250c19cc122267263093eee7edd4e3fa75395dfda8c17a8e2"},
- {file = "numpy-1.21.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c4068a8c44014b2d55f3c3f574c376b2494ca9cc73d2f1bd692382b6dffe3db"},
- {file = "numpy-1.21.6-cp37-cp37m-win32.whl", hash = "sha256:7c7e5fa88d9ff656e067876e4736379cc962d185d5cd808014a8a928d529ef4e"},
- {file = "numpy-1.21.6-cp37-cp37m-win_amd64.whl", hash = "sha256:bcb238c9c96c00d3085b264e5c1a1207672577b93fa666c3b14a45240b14123a"},
- {file = "numpy-1.21.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:82691fda7c3f77c90e62da69ae60b5ac08e87e775b09813559f8901a88266552"},
- {file = "numpy-1.21.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:643843bcc1c50526b3a71cd2ee561cf0d8773f062c8cbaf9ffac9fdf573f83ab"},
- {file = "numpy-1.21.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:357768c2e4451ac241465157a3e929b265dfac85d9214074985b1786244f2ef3"},
- {file = "numpy-1.21.6-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9f411b2c3f3d76bba0865b35a425157c5dcf54937f82bbeb3d3c180789dd66a6"},
- {file = "numpy-1.21.6-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4aa48afdce4660b0076a00d80afa54e8a97cd49f457d68a4342d188a09451c1a"},
- {file = "numpy-1.21.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6a96eef20f639e6a97d23e57dd0c1b1069a7b4fd7027482a4c5c451cd7732f4"},
- {file = "numpy-1.21.6-cp38-cp38-win32.whl", hash = "sha256:5c3c8def4230e1b959671eb959083661b4a0d2e9af93ee339c7dada6759a9470"},
- {file = "numpy-1.21.6-cp38-cp38-win_amd64.whl", hash = "sha256:bf2ec4b75d0e9356edea834d1de42b31fe11f726a81dfb2c2112bc1eaa508fcf"},
- {file = "numpy-1.21.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4391bd07606be175aafd267ef9bea87cf1b8210c787666ce82073b05f202add1"},
- {file = "numpy-1.21.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:67f21981ba2f9d7ba9ade60c9e8cbaa8cf8e9ae51673934480e45cf55e953673"},
- {file = "numpy-1.21.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ee5ec40fdd06d62fe5d4084bef4fd50fd4bb6bfd2bf519365f569dc470163ab0"},
- {file = "numpy-1.21.6-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1dbe1c91269f880e364526649a52eff93ac30035507ae980d2fed33aaee633ac"},
- {file = "numpy-1.21.6-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d9caa9d5e682102453d96a0ee10c7241b72859b01a941a397fd965f23b3e016b"},
- {file = "numpy-1.21.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58459d3bad03343ac4b1b42ed14d571b8743dc80ccbf27444f266729df1d6f5b"},
- {file = "numpy-1.21.6-cp39-cp39-win32.whl", hash = "sha256:7f5ae4f304257569ef3b948810816bc87c9146e8c446053539947eedeaa32786"},
- {file = "numpy-1.21.6-cp39-cp39-win_amd64.whl", hash = "sha256:e31f0bb5928b793169b87e3d1e070f2342b22d5245c755e2b81caa29756246c3"},
- {file = "numpy-1.21.6-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dd1c8f6bd65d07d3810b90d02eba7997e32abbdf1277a481d698969e921a3be0"},
- {file = "numpy-1.21.6.zip", hash = "sha256:ecb55251139706669fdec2ff073c98ef8e9a84473e51e716211b41aa0f18e656"},
- {file = "numpy-1.23.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b15c3f1ed08df4980e02cc79ee058b788a3d0bef2fb3c9ca90bb8cbd5b8a3a04"},
- {file = "numpy-1.23.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9ce242162015b7e88092dccd0e854548c0926b75c7924a3495e02c6067aba1f5"},
- {file = "numpy-1.23.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0d7447679ae9a7124385ccf0ea990bb85bb869cef217e2ea6c844b6a6855073"},
- {file = "numpy-1.23.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3119daed207e9410eaf57dcf9591fdc68045f60483d94956bee0bfdcba790953"},
- {file = "numpy-1.23.1-cp310-cp310-win32.whl", hash = "sha256:3ab67966c8d45d55a2bdf40701536af6443763907086c0a6d1232688e27e5447"},
- {file = "numpy-1.23.1-cp310-cp310-win_amd64.whl", hash = "sha256:1865fdf51446839ca3fffaab172461f2b781163f6f395f1aed256b1ddc253622"},
- {file = "numpy-1.23.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:aeba539285dcf0a1ba755945865ec61240ede5432df41d6e29fab305f4384db2"},
- {file = "numpy-1.23.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7e8229f3687cdadba2c4faef39204feb51ef7c1a9b669247d49a24f3e2e1617c"},
- {file = "numpy-1.23.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68b69f52e6545af010b76516f5daaef6173e73353e3295c5cb9f96c35d755641"},
- {file = "numpy-1.23.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1408c3527a74a0209c781ac82bde2182b0f0bf54dea6e6a363fe0cc4488a7ce7"},
- {file = "numpy-1.23.1-cp38-cp38-win32.whl", hash = "sha256:47f10ab202fe4d8495ff484b5561c65dd59177949ca07975663f4494f7269e3e"},
- {file = "numpy-1.23.1-cp38-cp38-win_amd64.whl", hash = "sha256:37e5ebebb0eb54c5b4a9b04e6f3018e16b8ef257d26c8945925ba8105008e645"},
- {file = "numpy-1.23.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:173f28921b15d341afadf6c3898a34f20a0569e4ad5435297ba262ee8941e77b"},
- {file = "numpy-1.23.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:876f60de09734fbcb4e27a97c9a286b51284df1326b1ac5f1bf0ad3678236b22"},
- {file = "numpy-1.23.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35590b9c33c0f1c9732b3231bb6a72d1e4f77872390c47d50a615686ae7ed3fd"},
- {file = "numpy-1.23.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a35c4e64dfca659fe4d0f1421fc0f05b8ed1ca8c46fb73d9e5a7f175f85696bb"},
- {file = "numpy-1.23.1-cp39-cp39-win32.whl", hash = "sha256:c2f91f88230042a130ceb1b496932aa717dcbd665350beb821534c5c7e15881c"},
- {file = "numpy-1.23.1-cp39-cp39-win_amd64.whl", hash = "sha256:37ece2bd095e9781a7156852e43d18044fd0d742934833335599c583618181b9"},
- {file = "numpy-1.23.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8002574a6b46ac3b5739a003b5233376aeac5163e5dcd43dd7ad062f3e186129"},
- {file = "numpy-1.23.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d732d17b8a9061540a10fda5bfeabca5785700ab5469a5e9b93aca5e2d3a5fb"},
- {file = "numpy-1.23.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:55df0f7483b822855af67e38fb3a526e787adf189383b4934305565d71c4b148"},
- {file = "numpy-1.23.1.tar.gz", hash = "sha256:d748ef349bfef2e1194b59da37ed5a29c19ea8d7e6342019921ba2ba4fd8b624"},
-]
-packaging = [
- {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"},
- {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"},
-]
-pandas = [
- {file = "pandas-1.3.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:62d5b5ce965bae78f12c1c0df0d387899dd4211ec0bdc52822373f13a3a022b9"},
- {file = "pandas-1.3.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:adfeb11be2d54f275142c8ba9bf67acee771b7186a5745249c7d5a06c670136b"},
- {file = "pandas-1.3.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:60a8c055d58873ad81cae290d974d13dd479b82cbb975c3e1fa2cf1920715296"},
- {file = "pandas-1.3.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd541ab09e1f80a2a1760032d665f6e032d8e44055d602d65eeea6e6e85498cb"},
- {file = "pandas-1.3.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2651d75b9a167cc8cc572cf787ab512d16e316ae00ba81874b560586fa1325e0"},
- {file = "pandas-1.3.5-cp310-cp310-win_amd64.whl", hash = "sha256:aaf183a615ad790801fa3cf2fa450e5b6d23a54684fe386f7e3208f8b9bfbef6"},
- {file = "pandas-1.3.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:344295811e67f8200de2390093aeb3c8309f5648951b684d8db7eee7d1c81fb7"},
- {file = "pandas-1.3.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:552020bf83b7f9033b57cbae65589c01e7ef1544416122da0c79140c93288f56"},
- {file = "pandas-1.3.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cce0c6bbeb266b0e39e35176ee615ce3585233092f685b6a82362523e59e5b4"},
- {file = "pandas-1.3.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d28a3c65463fd0d0ba8bbb7696b23073efee0510783340a44b08f5e96ffce0c"},
- {file = "pandas-1.3.5-cp37-cp37m-win32.whl", hash = "sha256:a62949c626dd0ef7de11de34b44c6475db76995c2064e2d99c6498c3dba7fe58"},
- {file = "pandas-1.3.5-cp37-cp37m-win_amd64.whl", hash = "sha256:8025750767e138320b15ca16d70d5cdc1886e8f9cc56652d89735c016cd8aea6"},
- {file = "pandas-1.3.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fe95bae4e2d579812865db2212bb733144e34d0c6785c0685329e5b60fcb85dd"},
- {file = "pandas-1.3.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f261553a1e9c65b7a310302b9dbac31cf0049a51695c14ebe04e4bfd4a96f02"},
- {file = "pandas-1.3.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b6dbec5f3e6d5dc80dcfee250e0a2a652b3f28663492f7dab9a24416a48ac39"},
- {file = "pandas-1.3.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3bc49af96cd6285030a64779de5b3688633a07eb75c124b0747134a63f4c05f"},
- {file = "pandas-1.3.5-cp38-cp38-win32.whl", hash = "sha256:b6b87b2fb39e6383ca28e2829cddef1d9fc9e27e55ad91ca9c435572cdba51bf"},
- {file = "pandas-1.3.5-cp38-cp38-win_amd64.whl", hash = "sha256:a395692046fd8ce1edb4c6295c35184ae0c2bbe787ecbe384251da609e27edcb"},
- {file = "pandas-1.3.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bd971a3f08b745a75a86c00b97f3007c2ea175951286cdda6abe543e687e5f2f"},
- {file = "pandas-1.3.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37f06b59e5bc05711a518aa10beaec10942188dccb48918bb5ae602ccbc9f1a0"},
- {file = "pandas-1.3.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c21778a688d3712d35710501f8001cdbf96eb70a7c587a3d5613573299fdca6"},
- {file = "pandas-1.3.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3345343206546545bc26a05b4602b6a24385b5ec7c75cb6059599e3d56831da2"},
- {file = "pandas-1.3.5-cp39-cp39-win32.whl", hash = "sha256:c69406a2808ba6cf580c2255bcf260b3f214d2664a3a4197d0e640f573b46fd3"},
- {file = "pandas-1.3.5-cp39-cp39-win_amd64.whl", hash = "sha256:32e1a26d5ade11b547721a72f9bfc4bd113396947606e00d5b4a5b79b3dcb006"},
- {file = "pandas-1.3.5.tar.gz", hash = "sha256:1e4285f5de1012de20ca46b188ccf33521bff61ba5c5ebd78b4fb28e5416a9f1"},
- {file = "pandas-1.4.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d51674ed8e2551ef7773820ef5dab9322be0828629f2cbf8d1fc31a0c4fed640"},
- {file = "pandas-1.4.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:16ad23db55efcc93fa878f7837267973b61ea85d244fc5ff0ccbcfa5638706c5"},
- {file = "pandas-1.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:958a0588149190c22cdebbc0797e01972950c927a11a900fe6c2296f207b1d6f"},
- {file = "pandas-1.4.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e48fbb64165cda451c06a0f9e4c7a16b534fcabd32546d531b3c240ce2844112"},
- {file = "pandas-1.4.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f803320c9da732cc79210d7e8cc5c8019aad512589c910c66529eb1b1818230"},
- {file = "pandas-1.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:2893e923472a5e090c2d5e8db83e8f907364ec048572084c7d10ef93546be6d1"},
- {file = "pandas-1.4.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:24ea75f47bbd5574675dae21d51779a4948715416413b30614c1e8b480909f81"},
- {file = "pandas-1.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d5ebc990bd34f4ac3c73a2724c2dcc9ee7bf1ce6cf08e87bb25c6ad33507e318"},
- {file = "pandas-1.4.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d6c0106415ff1a10c326c49bc5dd9ea8b9897a6ca0c8688eb9c30ddec49535ef"},
- {file = "pandas-1.4.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78b00429161ccb0da252229bcda8010b445c4bf924e721265bec5a6e96a92e92"},
- {file = "pandas-1.4.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dfbf16b1ea4f4d0ee11084d9c026340514d1d30270eaa82a9f1297b6c8ecbf0"},
- {file = "pandas-1.4.3-cp38-cp38-win32.whl", hash = "sha256:48350592665ea3cbcd07efc8c12ff12d89be09cd47231c7925e3b8afada9d50d"},
- {file = "pandas-1.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:605d572126eb4ab2eadf5c59d5d69f0608df2bf7bcad5c5880a47a20a0699e3e"},
- {file = "pandas-1.4.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a3924692160e3d847e18702bb048dc38e0e13411d2b503fecb1adf0fcf950ba4"},
- {file = "pandas-1.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:07238a58d7cbc8a004855ade7b75bbd22c0db4b0ffccc721556bab8a095515f6"},
- {file = "pandas-1.4.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:755679c49460bd0d2f837ab99f0a26948e68fa0718b7e42afbabd074d945bf84"},
- {file = "pandas-1.4.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41fc406e374590a3d492325b889a2686b31e7a7780bec83db2512988550dadbf"},
- {file = "pandas-1.4.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d9382f72a4f0e93909feece6fef5500e838ce1c355a581b3d8f259839f2ea76"},
- {file = "pandas-1.4.3-cp39-cp39-win32.whl", hash = "sha256:0daf876dba6c622154b2e6741f29e87161f844e64f84801554f879d27ba63c0d"},
- {file = "pandas-1.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:721a3dd2f06ef942f83a819c0f3f6a648b2830b191a72bbe9451bcd49c3bd42e"},
- {file = "pandas-1.4.3.tar.gz", hash = "sha256:2ff7788468e75917574f080cd4681b27e1a7bf36461fe968b49a87b5a54d007c"},
-]
-pathspec = [
- {file = "pathspec-0.9.0-py2.py3-none-any.whl", hash = "sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a"},
- {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"},
-]
-platformdirs = [
- {file = "platformdirs-2.5.2-py3-none-any.whl", hash = "sha256:027d8e83a2d7de06bbac4e5ef7e023c02b863d7ea5d079477e722bb41ab25788"},
- {file = "platformdirs-2.5.2.tar.gz", hash = "sha256:58c8abb07dcb441e6ee4b11d8df0ac856038f944ab98b7be6b27b2a3c7feef19"},
-]
-pluggy = [
- {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"},
- {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"},
-]
-poetry-dynamic-versioning = [
- {file = "poetry-dynamic-versioning-0.17.1.tar.gz", hash = "sha256:304eec793e8b71e3646b365671464c935626781e92dc029e35b01e8fe8c7530c"},
- {file = "poetry_dynamic_versioning-0.17.1-py3-none-any.whl", hash = "sha256:647d41eb554496eb657c7745ab2321b77d1f9c7cf52f7deb6ff674736fbd8ea1"},
-]
-py = [
- {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"},
- {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"},
-]
-pycodestyle = [
- {file = "pycodestyle-2.8.0-py2.py3-none-any.whl", hash = "sha256:720f8b39dde8b293825e7ff02c475f3077124006db4f440dcbc9a20b76548a20"},
- {file = "pycodestyle-2.8.0.tar.gz", hash = "sha256:eddd5847ef438ea1c7870ca7eb78a9d47ce0cdb4851a5523949f2601d0cbbe7f"},
-]
-pyflakes = [
- {file = "pyflakes-2.4.0-py2.py3-none-any.whl", hash = "sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e"},
- {file = "pyflakes-2.4.0.tar.gz", hash = "sha256:05a85c2872edf37a4ed30b0cce2f6093e1d0581f8c19d7393122da7e25b2b24c"},
-]
-pyparsing = [
- {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"},
- {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"},
-]
-pyproject-flake8 = [
- {file = "pyproject-flake8-0.0.1a4.tar.gz", hash = "sha256:8ed9453f1d984cfe94c998f9840275359e29e7f435b8ddd188ae084e2dc1270c"},
- {file = "pyproject_flake8-0.0.1a4-py2.py3-none-any.whl", hash = "sha256:1a8f94e18d08677ee780625049d9d00a9ee823661c6606caab8a383351037a75"},
-]
-pytest = [
- {file = "pytest-7.1.2-py3-none-any.whl", hash = "sha256:13d0e3ccfc2b6e26be000cb6568c832ba67ba32e719443bfe725814d3c42433c"},
- {file = "pytest-7.1.2.tar.gz", hash = "sha256:a06a0425453864a270bc45e71f783330a7428defb4230fb5e6a731fde06ecd45"},
-]
-pytest-cov = [
- {file = "pytest-cov-3.0.0.tar.gz", hash = "sha256:e7f0f5b1617d2210a2cabc266dfe2f4c75a8d32fb89eafb7ad9d06f6d076d470"},
- {file = "pytest_cov-3.0.0-py3-none-any.whl", hash = "sha256:578d5d15ac4a25e5f961c938b85a05b09fdaae9deef3bb6de9a6e766622ca7a6"},
-]
-python-dateutil = [
- {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
- {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
-]
-pytz = [
- {file = "pytz-2022.1-py2.py3-none-any.whl", hash = "sha256:e68985985296d9a66a881eb3193b0906246245294a881e7c8afe623866ac6a5c"},
- {file = "pytz-2022.1.tar.gz", hash = "sha256:1e760e2fe6a8163bc0b3d9a19c4f84342afa0a2affebfaa84b01b978a02ecaa7"},
-]
-requests = [
- {file = "requests-2.28.1-py3-none-any.whl", hash = "sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349"},
- {file = "requests-2.28.1.tar.gz", hash = "sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983"},
-]
-six = [
- {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
- {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
-]
-tomli = [
- {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
- {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
-]
-tomlkit = [
- {file = "tomlkit-0.11.1-py3-none-any.whl", hash = "sha256:1c5bebdf19d5051e2e1de6cf70adfc5948d47221f097fcff7a3ffc91e953eaf5"},
- {file = "tomlkit-0.11.1.tar.gz", hash = "sha256:61901f81ff4017951119cd0d1ed9b7af31c821d6845c8c477587bbdcd5e5854e"},
-]
-typed-ast = [
- {file = "typed_ast-1.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4"},
- {file = "typed_ast-1.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62"},
- {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac"},
- {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe"},
- {file = "typed_ast-1.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72"},
- {file = "typed_ast-1.5.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec"},
- {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47"},
- {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6"},
- {file = "typed_ast-1.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1"},
- {file = "typed_ast-1.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6"},
- {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66"},
- {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c"},
- {file = "typed_ast-1.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2"},
- {file = "typed_ast-1.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d"},
- {file = "typed_ast-1.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f"},
- {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc"},
- {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6"},
- {file = "typed_ast-1.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e"},
- {file = "typed_ast-1.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35"},
- {file = "typed_ast-1.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97"},
- {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3"},
- {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72"},
- {file = "typed_ast-1.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1"},
- {file = "typed_ast-1.5.4.tar.gz", hash = "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2"},
-]
-types-python-dateutil = [
- {file = "types-python-dateutil-2.8.19.tar.gz", hash = "sha256:bfd3eb39c7253aea4ba23b10f69b017d30b013662bb4be4ab48b20bbd763f309"},
- {file = "types_python_dateutil-2.8.19-py3-none-any.whl", hash = "sha256:6284df1e4783d8fc6e587f0317a81333856b872a6669a282f8a325342bce7fa8"},
-]
-types-requests = [
- {file = "types-requests-2.28.7.tar.gz", hash = "sha256:36385618d4bd2ee3211d4d2e78b44f067ceb5984865c0f253f3c9ecb964526cf"},
- {file = "types_requests-2.28.7-py3-none-any.whl", hash = "sha256:38015d310d13cf7d4d712d2507178349e13fd5dab85259dab7d9a9884c2c9c2a"},
-]
-types-urllib3 = [
- {file = "types-urllib3-1.26.20.tar.gz", hash = "sha256:1fb6e2af519a7216a19dd6be8cd2ee787b402a754ccb4a13ca1c0e5b202aea5a"},
- {file = "types_urllib3-1.26.20-py3-none-any.whl", hash = "sha256:6249b6223226cb2012db3b4ff6945c9cb0e12ece9b24f5e29787c4f05028a979"},
-]
-typing-extensions = [
- {file = "typing_extensions-4.3.0-py3-none-any.whl", hash = "sha256:25642c956049920a5aa49edcdd6ab1e06d7e5d467fc00e0506c44ac86fbfca02"},
- {file = "typing_extensions-4.3.0.tar.gz", hash = "sha256:e6d2677a32f47fc7eb2795db1dd15c1f34eff616bcaf2cfb5e997f854fa1c4a6"},
-]
-urllib3 = [
- {file = "urllib3-1.26.11-py2.py3-none-any.whl", hash = "sha256:c33ccba33c819596124764c23a97d25f32b28433ba0dedeb77d873a38722c9bc"},
- {file = "urllib3-1.26.11.tar.gz", hash = "sha256:ea6e8fb210b19d950fab93b60c9009226c63a28808bc8386e05301e25883ac0a"},
-]
-zipp = [
- {file = "zipp-3.8.1-py3-none-any.whl", hash = "sha256:47c40d7fe183a6f21403a199b3e4192cca5774656965b0a4988ad2f8feb5f009"},
- {file = "zipp-3.8.1.tar.gz", hash = "sha256:05b45f1ee8f807d0cc928485ca40a07cb491cf092ff587c0df9cb1fd154848d2"},
-]
+isort = []
+jinja2 = []
+markupsafe = []
+mccabe = []
+mypy = []
+mypy-extensions = []
+numpy = []
+packaging = []
+pandas = []
+pathspec = []
+platformdirs = []
+pluggy = []
+poetry-dynamic-versioning = []
+py = []
+pycodestyle = []
+pyflakes = []
+pyparsing = []
+pyproject-flake8 = []
+pytest = []
+pytest-cov = []
+python-dateutil = []
+pytz = []
+requests = []
+six = []
+tenacity = []
+tomli = []
+tomlkit = []
+typed-ast = []
+types-python-dateutil = []
+types-requests = []
+types-urllib3 = []
+typing-extensions = []
+urllib3 = []
+zipp = []
diff --git a/pyproject.toml b/pyproject.toml
index e164a0a..20d0f80 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -39,6 +39,8 @@ numpy = [
{ version = "^1.23.1", python = ">=3.8" }
]
urllib3 = "^1.24.3"
+tomli = { version = "^2.0.1", python = ">=3.7.1,<3.11" }
+tenacity = "^8.0.1"
[tool.poetry.dev-dependencies]
black = "^22.6.0"
@@ -60,3 +62,6 @@ max-complexity = 18
ignore = "E203,E266,W503,"
per-file-ignores = "__init__.py:F401"
exclude = ".venv"
+
+[tool.isort]
+profile = "black"
| feat: support auth_user API endpoint to fetch a refresh token
J-Quants API に追加される「リフレッシュトークン取得API」に対応したいです。
ref. https://jpx.gitbook.io/j-quants-api/api-reference/refreshtoken
加えて、各種サンプルの更新を実施したいです。
※ サンプル更新については実装の目処がついた段階で別のissueに切り分けますが、一旦はこのissueで管理します。
| APIの仕様としてリフレッシュトークンもIDトークンもいつ期限切れになるかをレスポンスに含めて欲しいなと思いました。
@tknhs フィードバックをありがとうございます!J-Quants API 運営に伝えました!
@tknhs
コメントありがとうございます!
もう少し詳しく要望を伺いたいのですが、
「いつ期限切れになるかをレスポンスに含めて」というのは、
リフレッシュトークン取得APIをコールした際に、今はresponseにrefreshTokenのみですが、
このresponseにexpiredIn等で期限が切れる日を返してほしいということでしょうか?
```
Returnイメージ
{
"refreshToken": "xxxxxx",
"expiredIn": xxxxxxx(何年何月等もしくはUnixTimeを返す)
}
```
@s-meitoma
> このresponseにexpiredIn等で期限が切れる日を返してほしいということでしょうか?
はい、 `Returnイメージ` に書いていただいた通りの認識で問題ないです。
個人的には `Unixtime` だといろいろ考慮しなくていいなと思ってます。
また、 `expired in` だと切れるまでの時間を表すものになるかなと思いますが、
個人的には切れる日時を表していただくほうが扱いやすいと感じますので `expired at` だと嬉しいです。
@tknhs
ご回答ありがとうございます。
ご要望の意図は承知いたしました。
対応検討し、可能であればAPI内で対応したいと思います。
少々お待ち下さい。
ユーザー認証情報とrefresh_tokenを下記の優先順位で取得することを考えています。
既存のサンプルで利用していた `jquantsapi-key.txt` にも対応する方向で考えていますが、落とすかもしれません。
## mail & password
```
Priority: High
1. mail_address & password are supplied as parameters
2. ENV JQUANTS_API_MAIL_ADDRESS & JQUANTS_API_PASSWORD exist
3. mail_address & password exist in ${PWD}/jquants-api.toml if 'google.colab' not in sys.modules else '/content'
4. mail_address & password exist in ${HOME}/.jquants-api/jquants-api.toml if 'google.colab' not in sys.modules else '/content/drive/MyDrive/drive_ws/secret/jquants-api.toml'
Priority: Low
```
## refresh_token
```
Priority: High
1. refresh_token is supplied as a parameter
2. ENV JQUANTS_API_REFRESH_TOKEN exists
3. refresh_token exists in ${PWD}/jquants-api.toml if 'google.colab' not in sys.modules else '/content'
4. refresh_token exists in ${HOME}/.jquants-api/jquants-api.toml if 'google.colab' not in sys.modules else '/content/drive/MyDrive/drive_ws/secret/jquants-api.toml'
5. refresh_token exists in '/content/drive/MyDrive/drive_ws/secret/jquantsapi-key.txt' if 'google.colab' in sys.modules
``` | 2022-09-08T14:07:01 | 0.0 | [] | [] |
||
J-Quants/jquants-api-client-python | J-Quants__jquants-api-client-python-15 | 7be06db4cdbc945abfcf375399a9f5752c920471 | diff --git a/jquantsapi/client.py b/jquantsapi/client.py
index d29f46c..f1cff1d 100644
--- a/jquantsapi/client.py
+++ b/jquantsapi/client.py
@@ -1,6 +1,6 @@
import os
from datetime import datetime
-from typing import Dict
+from typing import Dict, Union
import pandas as pd # type: ignore
import requests
@@ -8,6 +8,8 @@
from requests.adapters import HTTPAdapter
from urllib3.util import Retry
+DatetimeLike = Union[datetime, pd.Timestamp, str]
+
class Client:
"""
@@ -280,8 +282,8 @@ def get_prices_daily_quotes(
def get_price_range(
self,
- start_dt: datetime = datetime(2017, 1, 1, tzinfo=tz.gettz("Asia/Tokyo")),
- end_dt: datetime = datetime.now(tz.gettz("Asia/Tokyo")),
+ start_dt: DatetimeLike = "20170101",
+ end_dt: DatetimeLike = datetime.now(),
) -> pd.DataFrame:
"""
全銘柄の株価情報を日付範囲指定して取得
diff --git a/mypy.ini b/mypy.ini
new file mode 100644
index 0000000..ebc2ba8
--- /dev/null
+++ b/mypy.ini
@@ -0,0 +1,3 @@
+[mypy]
+[mypy-pandas]
+ignore_missing_imports = True
| 年月日を引数にとるメソッドについて、その型を統一したい
(大変有用な製品の開発・運用をありがとうございます!陰ながら応援させていただきます。)
## 背景
後述の例の通り、始まりと終わりの年月日を引数として要求するメソッドが複数存在する。それらについて、あるものは `datetime` をとり、別のものは `str` をとる。
なお、 `str` の場合、`yyyymmdd` の形式で8桁の数値を `str` で要求する。
## 提案
### 提案の内容
下記の通り、年月日を要求するメソッドについて、その型を `yyyymmdd` 形式の `str` に統一する事を提案する。
一例として、下記の通り修正のイメージを示す。
https://github.com/J-Quants/jquants-api-client-python/blob/7be06db4cdbc945abfcf375399a9f5752c920471/jquantsapi/client.py#L281-L285
```diff
def get_price_range(
self,
- start_dt: datetime = datetime(2017, 1, 1, tzinfo=tz.gettz("Asia/Tokyo")),
- end_dt: datetime = datetime.now(tz.gettz("Asia/Tokyo")),
+ from_yyyymmdd: str = '20170101',
+ to_yyyymmdd_dt: str = None,
) -> pd.DataFrame:
```
### 提案の理由
提案の理由は2つある。
まず1つ目として、型を統一したい理由は、同じ目的で使用する値について、メソッドによって異なる型を要求するのは、混乱の元になるためである。
次に2つ目として、 `datatime` ではなく `str` を選択した理由は、 `datatime` で受け取った値は、 [`pd.date_range()`](https://pandas.pydata.org/docs/reference/api/pandas.date_range.html)に渡されるのみで、他に変換等されていないためである。
`pd.date_range()` は8桁の数値で `str` を渡しても、 `datetime` と同じように解釈してくれる。このため、引数として受け取った `str` をそのまま `pd.date_range()` に渡しても動作は変わらないと考えられる。よって、 `datetime` で引数として受け取らなくても、 `str` で必要な情報を得られると考えた。
### 実際の動作
ここで8桁の数値で `str` を渡したときの動作を確認する。以下は、 `pd.date_range()` に対して `yyyymmdd` 形式の8桁の数値で `str` を渡した際の挙動である。
```python
pd.date_range('20220101', '20220214')
# =>
DatetimeIndex(['2022-01-01', '2022-01-02', '2022-01-03', '2022-01-04',
'2022-01-05', '2022-01-06', '2022-01-07', '2022-01-08',
'2022-01-09', '2022-01-10', '2022-01-11', '2022-01-12',
'2022-01-13', '2022-01-14', '2022-01-15', '2022-01-16',
'2022-01-17', '2022-01-18', '2022-01-19', '2022-01-20',
'2022-01-21', '2022-01-22', '2022-01-23', '2022-01-24',
'2022-01-25', '2022-01-26', '2022-01-27', '2022-01-28',
'2022-01-29', '2022-01-30', '2022-01-31', '2022-02-01',
'2022-02-02', '2022-02-03', '2022-02-04', '2022-02-05',
'2022-02-06', '2022-02-07', '2022-02-08', '2022-02-09',
'2022-02-10', '2022-02-11', '2022-02-12', '2022-02-13',
'2022-02-14'],
dtype='datetime64[ns]', freq='D')
```
Google colaboratoryでの実行結果:
<img width="538" alt="image" src="https://user-images.githubusercontent.com/2360094/183254194-10a5f6e4-dc52-4889-9c37-fd93fefc16cc.png">
上図より、2022年1月1日から同年2月14日までの範囲を、意図通り取り出せていることがわかる。
### 検討が必要な点
現在 `datetime` として要求するメソッドには、デフォルトと引数としてタイムゾーンが与えられている `datetime` がセットされている。これに特別な理由がある場合は、 `str` を渡すと想定外の挙動を示す可能性がある。
## 年月日を引数として受け取る例
### `str` の箇所
https://github.com/J-Quants/jquants-api-client-python/blob/7be06db4cdbc945abfcf375399a9f5752c920471/jquantsapi/client.py#L229-L231
https://github.com/J-Quants/jquants-api-client-python/blob/7be06db4cdbc945abfcf375399a9f5752c920471/jquantsapi/client.py#L309
### `datetime` の箇所
https://github.com/J-Quants/jquants-api-client-python/blob/7be06db4cdbc945abfcf375399a9f5752c920471/jquantsapi/client.py#L283-L284
https://github.com/J-Quants/jquants-api-client-python/blob/7be06db4cdbc945abfcf375399a9f5752c920471/jquantsapi/client.py#L422-L423
年月日を引数にとるメソッドについて、その型を統一したい
(大変有用な製品の開発・運用をありがとうございます!陰ながら応援させていただきます。)
## 背景
後述の例の通り、始まりと終わりの年月日を引数として要求するメソッドが複数存在する。それらについて、あるものは `datetime` をとり、別のものは `str` をとる。
なお、 `str` の場合、`yyyymmdd` の形式で8桁の数値を `str` で要求する。
## 提案
### 提案の内容
下記の通り、年月日を要求するメソッドについて、その型を `yyyymmdd` 形式の `str` に統一する事を提案する。
一例として、下記の通り修正のイメージを示す。
https://github.com/J-Quants/jquants-api-client-python/blob/7be06db4cdbc945abfcf375399a9f5752c920471/jquantsapi/client.py#L281-L285
```diff
def get_price_range(
self,
- start_dt: datetime = datetime(2017, 1, 1, tzinfo=tz.gettz("Asia/Tokyo")),
- end_dt: datetime = datetime.now(tz.gettz("Asia/Tokyo")),
+ from_yyyymmdd: str = '20170101',
+ to_yyyymmdd_dt: str = None,
) -> pd.DataFrame:
```
### 提案の理由
提案の理由は2つある。
まず1つ目として、型を統一したい理由は、同じ目的で使用する値について、メソッドによって異なる型を要求するのは、混乱の元になるためである。
次に2つ目として、 `datatime` ではなく `str` を選択した理由は、 `datatime` で受け取った値は、 [`pd.date_range()`](https://pandas.pydata.org/docs/reference/api/pandas.date_range.html)に渡されるのみで、他に変換等されていないためである。
`pd.date_range()` は8桁の数値で `str` を渡しても、 `datetime` と同じように解釈してくれる。このため、引数として受け取った `str` をそのまま `pd.date_range()` に渡しても動作は変わらないと考えられる。よって、 `datetime` で引数として受け取らなくても、 `str` で必要な情報を得られると考えた。
### 実際の動作
ここで8桁の数値で `str` を渡したときの動作を確認する。以下は、 `pd.date_range()` に対して `yyyymmdd` 形式の8桁の数値で `str` を渡した際の挙動である。
```python
pd.date_range('20220101', '20220214')
# =>
DatetimeIndex(['2022-01-01', '2022-01-02', '2022-01-03', '2022-01-04',
'2022-01-05', '2022-01-06', '2022-01-07', '2022-01-08',
'2022-01-09', '2022-01-10', '2022-01-11', '2022-01-12',
'2022-01-13', '2022-01-14', '2022-01-15', '2022-01-16',
'2022-01-17', '2022-01-18', '2022-01-19', '2022-01-20',
'2022-01-21', '2022-01-22', '2022-01-23', '2022-01-24',
'2022-01-25', '2022-01-26', '2022-01-27', '2022-01-28',
'2022-01-29', '2022-01-30', '2022-01-31', '2022-02-01',
'2022-02-02', '2022-02-03', '2022-02-04', '2022-02-05',
'2022-02-06', '2022-02-07', '2022-02-08', '2022-02-09',
'2022-02-10', '2022-02-11', '2022-02-12', '2022-02-13',
'2022-02-14'],
dtype='datetime64[ns]', freq='D')
```
Google colaboratoryでの実行結果:
<img width="538" alt="image" src="https://user-images.githubusercontent.com/2360094/183254194-10a5f6e4-dc52-4889-9c37-fd93fefc16cc.png">
上図より、2022年1月1日から同年2月14日までの範囲を、意図通り取り出せていることがわかる。
### 検討が必要な点
現在 `datetime` として要求するメソッドには、デフォルトと引数としてタイムゾーンが与えられている `datetime` がセットされている。これに特別な理由がある場合は、 `str` を渡すと想定外の挙動を示す可能性がある。
## 年月日を引数として受け取る例
### `str` の箇所
https://github.com/J-Quants/jquants-api-client-python/blob/7be06db4cdbc945abfcf375399a9f5752c920471/jquantsapi/client.py#L229-L231
https://github.com/J-Quants/jquants-api-client-python/blob/7be06db4cdbc945abfcf375399a9f5752c920471/jquantsapi/client.py#L309
### `datetime` の箇所
https://github.com/J-Quants/jquants-api-client-python/blob/7be06db4cdbc945abfcf375399a9f5752c920471/jquantsapi/client.py#L283-L284
https://github.com/J-Quants/jquants-api-client-python/blob/7be06db4cdbc945abfcf375399a9f5752c920471/jquantsapi/client.py#L422-L423
| 2022-08-09T14:30:35 | 0.0 | [] | [] |
|||
aristoteleo/spateo-release | aristoteleo__spateo-release-81 | 339e47effdb95e123e66cb0d107dfb8f8180607c | diff --git a/spateo/preprocessing/segmentation/label.py b/spateo/preprocessing/segmentation/label.py
index 53e5cd39..bec820a2 100755
--- a/spateo/preprocessing/segmentation/label.py
+++ b/spateo/preprocessing/segmentation/label.py
@@ -1,14 +1,17 @@
"""Functions for use when labeling individual nuclei/cells, after obtaining a
mask.
"""
+import math
from typing import Dict, Optional, Union
import cv2
import numpy as np
from anndata import AnnData
+from joblib import Parallel, delayed
from numba import njit
from scipy.sparse import issparse, spmatrix
from skimage import filters, measure, segmentation
+from tqdm import tqdm
from ...configuration import SKM
from ...errors import PreprocessingError
@@ -169,7 +172,13 @@ def watershed(
SKM.set_layer_data(adata, out_layer, labels)
-def _expand_labels(labels: np.ndarray, distance: int, max_area: int, mask: Optional[np.ndarray] = None) -> np.ndarray:
+def _expand_labels(
+ labels: np.ndarray,
+ distance: int,
+ max_area: int,
+ mask: Optional[np.ndarray] = None,
+ n_threads: int = 1,
+) -> np.ndarray:
"""Expand labels up to a certain distance, while ignoring labels that are
above a certain size.
@@ -179,49 +188,73 @@ def _expand_labels(labels: np.ndarray, distance: int, max_area: int, mask: Optio
of iterations of distance 1 dilations.
max_area: Maximum area of each label.
mask: Only expand within the provided mask.
+ n_threads: Number of threads to use.
Returns:
New label array with expanded labels.
"""
@njit
- def _expand(X, areas, kernel, max_area, n_iter, mask):
- pad = kernel.shape[0] // 2
- expanded = np.zeros((X.shape[0] + 2 * pad, X.shape[1] + 2 * pad), dtype=X.dtype)
- expanded[pad:-pad, pad:-pad] = X
- for _ in range(n_iter):
+ def _expand(X, areas, max_area, mask, start_i, end_i):
+ expanded = X[start_i:end_i].copy()
+ new_areas = np.zeros_like(areas)
+ n_neighbors = 0
+ neighbors = np.zeros(4, dtype=X.dtype)
+ for i in range(start_i, end_i):
+ for j in range(X.shape[1]):
+ if X[i, j] > 0 or not mask[i, j]:
+ continue
+
+ if i - 1 >= 0:
+ neighbors[n_neighbors] = X[i - 1, j]
+ n_neighbors += 1
+ if i + 1 < X.shape[0]:
+ neighbors[n_neighbors] = X[i + 1, j]
+ n_neighbors += 1
+ if j - 1 >= 0:
+ neighbors[n_neighbors] = X[i, j - 1]
+ n_neighbors += 1
+ if j + 1 < X.shape[1]:
+ neighbors[n_neighbors] = X[i, j + 1]
+ n_neighbors += 1
+ unique = np.unique(neighbors[:n_neighbors])
+ unique_labels = unique[unique > 0]
+ if len(unique_labels) == 1:
+ label = unique_labels[0]
+ if areas[label] < max_area:
+ expanded[i - start_i, j] = label
+ new_areas[label] += 1
+ n_neighbors = 0
+ return expanded, new_areas
+
+ areas = np.bincount(labels.flatten())
+ mask = np.ones(labels.shape, dtype=bool) if mask is None else mask
+ step = math.ceil(labels.shape[0] / n_threads)
+ expanded = labels.copy()
+ with Parallel(n_jobs=n_threads) as parallel:
+ for _ in tqdm(range(distance), desc="Expanding"):
new_areas = np.zeros_like(areas)
- _expanded = np.zeros_like(expanded)
- for _i in range(X.shape[0]):
- i = _i + pad
- for _j in range(X.shape[1]):
- j = _j + pad
- if expanded[i, j] > 0:
- _expanded[i, j] = expanded[i, j]
- continue
- if not mask[_i, _j]:
- continue
-
- neighbors = expanded[i - pad : i + pad + 1, j - pad : j + pad + 1]
- unique = np.unique(neighbors * kernel)
- unique_labels = unique[unique > 0]
- if len(unique_labels) == 1:
- label = unique_labels[0]
- if areas[label] < max_area:
- _expanded[i, j] = label
- new_areas[label] += 1
- expanded = _expanded
+ subis = range(0, labels.shape[0], step)
+ sublabels = []
+ submasks = []
+ for i in subis:
+ sl = slice(max(0, i - 1), min(labels.shape[0], i + step + 1))
+ sublabels.append(expanded[sl])
+ submasks.append(mask[sl])
+ for i, (_expanded, _new_areas) in zip(
+ subis,
+ parallel(
+ delayed(_expand)(
+ sl, areas, max_area, sm, int(i - 1 >= 0), sl.shape[0] - int(i + step + 1 < labels.shape[0])
+ )
+ for i, sl, sm in zip(subis, sublabels, submasks)
+ ),
+ ):
+ expanded[i : i + step] = _expanded
+ new_areas += _new_areas
areas += new_areas
- return expanded[pad:-pad, pad:-pad]
- return _expand(
- labels,
- np.bincount(labels.flatten()),
- utils.circle(3),
- max_area,
- distance,
- np.ones(labels.shape, dtype=bool) if mask is None else mask,
- )
+ return expanded
def expand_labels(
@@ -231,6 +264,7 @@ def expand_labels(
max_area: int = 400,
mask_layer: Optional[str] = None,
out_layer: Optional[str] = None,
+ n_threads: int = 1,
):
"""Expand labels up to a certain distance.
@@ -241,14 +275,17 @@ def expand_labels(
distance: Distance to expand. Internally, this is used as the number
of iterations of distance 1 dilations.
max_area: Maximum area of each label.
+ mask_layer: Layer containing mask to restrict expansion to within.
out_layer: Layer to save results. By default, uses `{layer}_labels_expanded`.
+ n_threads: Number of threads to use.
"""
label_layer = SKM.gen_new_layer_key(layer, SKM.LABELS_SUFFIX)
if label_layer not in adata.layers:
label_layer = layer
labels = SKM.select_layer_data(adata, label_layer)
+ mask = SKM.select_layer_data(adata, mask_layer) if mask_layer else None
lm.main_info("Expanding labels.")
- expanded = _expand_labels(labels, distance, max_area)
+ expanded = _expand_labels(labels, distance, max_area, mask=mask, n_threads=n_threads)
out_layer = out_layer or SKM.gen_new_layer_key(label_layer, SKM.EXPANDED_SUFFIX)
SKM.set_layer_data(adata, out_layer, expanded)
| speed up expand_labels
Speed up `st.pp.segmentation.expand_labels` by splitting the image into horizontal strips (with appropriate overlap), apply, and then combine.
| 2022-04-08T17:16:28 | 0.0 | [] | [] |
|||
aristoteleo/spateo-release | aristoteleo__spateo-release-21 | 1b0552ec8a88e5031b55bcc31c886593a932eebc | diff --git a/requirements.txt b/requirements.txt
index 8a77e9a7..7e274df5 100755
--- a/requirements.txt
+++ b/requirements.txt
@@ -5,6 +5,7 @@ dynamo-release>=0.9.2
fbgbp>=0.1.0
geopandas>=0.10.2
loompy>=3.0.5
+logging-release>=0.0.4
matplotlib>=3.1.0
numba>=0.46.0
numpy>=1.18.1
diff --git a/spateo/io/bgi.py b/spateo/io/bgi.py
old mode 100755
new mode 100644
index 14bf8c7b..63abec77
--- a/spateo/io/bgi.py
+++ b/spateo/io/bgi.py
@@ -15,6 +15,9 @@
import geopandas as gpd
import numpy as np
import pandas as pd
+
+from scipy.sparse import csr_matrix, spmatrix
+from typing import Optional, Tuple
import skimage.io
from anndata import AnnData
from scipy.sparse import csr_matrix, spmatrix
diff --git a/spateo/io/image_utils.py b/spateo/io/image_utils.py
old mode 100755
new mode 100644
diff --git a/spateo/preprocessing/segmentation/density.py b/spateo/preprocessing/segmentation/density.py
old mode 100755
new mode 100644
diff --git a/spateo/tools/__init__.py b/spateo/tools/__init__.py
index 08ba79d1..36ab85bc 100755
--- a/spateo/tools/__init__.py
+++ b/spateo/tools/__init__.py
@@ -4,6 +4,11 @@
find_all_cluster_degs,
)
+from .find_clusters import (
+ find_cluster_spagcn,
+ find_cluster_scc,
+)
+
# from .image import add_image_layer
# from .interpolation_utils import *
from .interpolation import interpolation_SparseVFC
diff --git a/spateo/tools/find_clusters.py b/spateo/tools/find_clusters.py
new file mode 100644
index 00000000..fa92d478
--- /dev/null
+++ b/spateo/tools/find_clusters.py
@@ -0,0 +1,216 @@
+import anndata
+import cv2
+import dynamo as dyn
+import numpy as np
+import pandas as pd
+import random
+import torch
+
+from typing import Optional, Tuple
+
+from .find_clusters_utils import *
+from ..preprocessing.filter import filter_cells, filter_genes
+
+
+def find_cluster_spagcn(
+ adata: anndata.AnnData,
+ n_clusters: int,
+ p: float = 0.5,
+ s: int = 1,
+ b: int = 49,
+ refine_shape: Optional[str] = None,
+ his_img_path: Optional[str] = None,
+ total_umi: Optional[str] = None,
+ x_pixel: str = None,
+ y_pixel: str = None,
+ x_array: str = None,
+ y_array: str = None,
+ seed: int = 100,
+ copy: bool = False,
+) -> Optional[anndata.AnnData]:
+ """Function to find clusters with spagcn.
+
+ Args:
+ adata: an Anndata object, after normalization.
+ n_clusters: Desired number of clusters.
+ p: parameter `p` in spagcn algorithm. See `SpaGCN` for details. Defaults to 0.5.
+ s: alpha to control the color scale in calculating adjacent matrix. Defaults to 1.
+ b: beta to control the range of neighbourhood when calculate grey value for one spot in calculating adjacent matrix. Defaults to 49.
+ refine_shape: Smooth the spatial domains with given spatial topology, "hexagon" for Visium data, "square" for ST data. Defaults to None.
+ his_img_path: The file path of histology image used to calculate adjacent matrix in spagcn algorithm. Defaults to None.
+ total_umi: By providing the key(colname) in `adata.obs` which contains total UMIs(counts) for each spot, the function use the total counts as
+ a grayscale image when histology image is not provided. Ignored if his_img_path is not `None`. Defaults to "total_umi".
+ x_pixel: The key(colname) in `adata.obs` which contains corresponding x-pixels in histology image. Defaults to None.
+ y_pixel: The key(colname) in `adata.obs` which contains corresponding y-pixels in histology image. Defaults to None.
+ x_array: The key(colname) in `adata.obs` which contains corresponding x-coordinates. Defaults to None.
+ y_array: The key(colname) in `adata.obs` which contains corresponding y-coordinates. Defaults to None.
+ seed: Global seed for `random`, `torch`, `numpy`. Defaults to 100.
+ copy: Whether to return a new deep copy of `adata` instead of updating `adata` object passed in arguments. Defaults to False.
+
+ Returns:
+ class:`~anndata.AnnData`: An `~anndata.AnnData` object with cluster info in "spagcn_pred", and in "spagcn_pred_refined" if `refine_shape` is set.
+ The adjacent matrix used in spagcn algorithm is saved in `adata.uns["adj_spagcn"]`.
+ """
+
+ if x_array is None:
+ x_array = [i[0] for i in adata.obsm["X_spatial"]]
+ else:
+ x_array = adata.obs[x_array].tolist()
+
+ if y_array is None:
+ y_array = [i[1] for i in adata.obsm["X_spatial"]]
+ else:
+ y_array = adata.obs[y_array].tolist()
+
+ if x_pixel is None:
+ x_pixel = [int(i) for i in x_array]
+ else:
+ x_pixel = adata.obs[x_pixel].tolist()
+
+ if y_pixel is None:
+ y_pixel = [int(i) for i in y_array]
+ else:
+ y_pixel = adata.obs[y_pixel].tolist()
+
+ s = 1
+ b = 49
+
+ if his_img_path is None:
+ if total_umi is None:
+ adj = calculate_adj_matrix(x=x_array, y=y_array, histology=False)
+ else:
+ total_umi = adata.obs[total_umi].tolist()
+ total_umi = [int(x / max(total_umi) * 254 + 1) for x in total_umi]
+ total_umi_mtx = pd.DataFrame({"x_pos": x_pixel, "y_pos": y_pixel, "n_umis": total_umi})
+ total_umi_mtx = total_umi_mtx.pivot(index="x_pos", columns="y_pos", values="n_umis").fillna(1).to_numpy()
+ umi_gs_img = np.dstack((total_umi_mtx, total_umi_mtx, total_umi_mtx)).astype(int)
+ adj = calculate_adj_matrix(
+ x=x_array,
+ y=y_array,
+ x_pixel=x_pixel,
+ y_pixel=y_pixel,
+ image=umi_gs_img,
+ beta=b,
+ alpha=s,
+ histology=True,
+ )
+ else:
+ img = cv2.imread(his_img_path)
+ adj = calculate_adj_matrix(
+ x=x_array,
+ y=y_array,
+ x_pixel=x_pixel,
+ y_pixel=y_pixel,
+ image=img,
+ beta=b,
+ alpha=s,
+ histology=True,
+ )
+
+ adata.uns["adj_spagcn"] = adj
+
+ l = search_l(p, adj, start=0.01, end=1000, tol=0.01, max_run=100)
+
+ # Set seed
+ r_seed = t_seed = n_seed = seed
+
+ # Seaech for suitable resolution
+ res = search_res(
+ adata,
+ adj,
+ l,
+ n_clusters,
+ start=0.7,
+ step=0.1,
+ tol=5e-3,
+ lr=0.05,
+ max_epochs=20,
+ r_seed=r_seed,
+ t_seed=t_seed,
+ n_seed=n_seed,
+ )
+
+ clf = SpaGCN()
+ clf.set_l(l)
+
+ # Set seed
+ random.seed(r_seed)
+ torch.manual_seed(t_seed)
+ np.random.seed(n_seed)
+
+ # Run
+ clf.train(
+ adata,
+ adj,
+ init_spa=True,
+ init="louvain",
+ res=res,
+ tol=5e-3,
+ lr=0.05,
+ max_epochs=200,
+ )
+ y_pred, prob = clf.predict()
+ adata.obs["spagcn_pred"] = y_pred
+ adata.obs["spagcn_pred"] = adata.obs["spagcn_pred"].astype("category")
+ adata.obs["spagcn_pred"] = [str(i) for i in adata.obs["spagcn_pred"]]
+
+ if refine_shape is not None:
+ # Do cluster refinement(optional)
+ adj_2d = calculate_adj_matrix(x=x_array, y=y_array, histology=False)
+ refined_pred = refine(
+ sample_id=adata.obs.index.tolist(),
+ pred=adata.obs["spagcn_pred"].tolist(),
+ dis=adj_2d,
+ shape=refine_shape,
+ )
+ adata.obs["spagcn_pred_refined"] = refined_pred
+ adata.obs["spagcn_pred_refined"] = adata.obs["spagcn_pred_refined"].astype("category")
+
+ if copy:
+ return adata
+ return None
+
+
+def find_cluster_scc(
+ adata: anndata.AnnData,
+ min_cells: int = 100,
+ spatial_key: str = "spatial",
+ e_neigh: int = 30,
+ s_neigh: int = 6,
+ resolution: Optional[float] = None,
+ copy: bool = False,
+) -> Optional[anndata.AnnData]:
+ """Spatially constrained clustering (scc) to identify continuous tissue domains.
+
+ Args:
+ adata: an Anndata object, after normalization.
+ min_cells: minimal number of cells the gene expressed.
+ spatial_key: the key in `.obsm` that corresponds to the spatial coordinate of each bucket.
+ e_neigh: the number of nearest neighbor in gene expression space.
+ s_neigh: the number of nearest neighbor in physical space.
+ resolution: the resolution parameter of the leiden clustering algorithm.
+ copy: Whether to return a new deep copy of `adata` instead of updating `adata` object passed in arguments.
+ Defaults to False.
+
+ Returns:
+ Depends on the argument `copy` return either an `~anndata.AnnData` object with cluster info in "scc_e_{a}_s{b}"
+ or None.
+ """
+
+ filter_genes(adata, min_cells=min_cells)
+ adata.uns["pp"] = {}
+ dyn.pp.normalize_cell_expr_by_size_factors(adata, layers="X")
+ dyn.pp.log1p(adata)
+ dyn.pp.pca_monocle(adata, n_pca_components=30, pca_key="X_pca")
+
+ dyn.tl.neighbors(adata, n_neighbors=e_neigh)
+ dyn.tl.neighbors(adata, n_neighbors=s_neigh, basis=spatial_key, result_prefix="spatial")
+ conn = adata.obsp["connectivities"].copy()
+ conn.data[conn.data > 0] = 1
+ adj = conn + adata.obsp["spatial_connectivities"]
+ adj.data[adj.data > 0] = 1
+ dyn.tl.leiden(adata, adj_matrix=adj, resolution=resolution, result_key="scc_e" + str(e_neigh) + "_s" + str(s_neigh))
+
+ if copy:
+ return adata
+ return None
diff --git a/spateo/tools/find_clusters_utils.py b/spateo/tools/find_clusters_utils.py
new file mode 100644
index 00000000..84f508cd
--- /dev/null
+++ b/spateo/tools/find_clusters_utils.py
@@ -0,0 +1,681 @@
+import math
+import random
+
+import anndata as ad
+import dynamo as dyn
+import lack
+import numpy as np
+import numba
+import pandas as pd
+import torch
+import torch.nn as nn
+
+from scipy.sparse import issparse
+from sklearn.cluster import KMeans
+from sklearn.decomposition import PCA
+
+slog = lack.LoggerManager(namespace="spateo")
+
+
+def calculate_adj_matrix(x, y, x_pixel=None, y_pixel=None, image=None, beta=49, alpha=1, histology=True):
+ """(Part of spagcn algorithm) Function to calculate adjacent matrix according to spatial coordinate and image pixels.
+
+ Args:
+ x (list): a list which contains corresponding x-coordinates for the spots, spatialy.
+ y (list): a list which contains corresponding y-coordinates for the spots, spatialy.
+ x_pixel (list, optional): a list which contains corresponding x-pixels for the spots, in histology image. Defaults to None.
+ y_pixel (list, optional): a list which contains corresponding y-pixels for the spots, in histology image. Defaults to None.
+ image (class: `numpy.ndarray`, optional): the image(typically histology image) in `numpy.ndarray` format(can be obtained by cv2.imread). Defaults to None.
+ beta (int, optional): to control the range of neighbourhood when calculate grey value for one spot. Defaults to 49.
+ alpha (int, optional): to control the color scale. Defaults to 1.
+ histology (bool, optional): if the image is histological. Defaults to True.
+
+ Returns:
+ class: `numpy.ndarray`: the calculated adjacent matrix.
+ """
+
+ if histology:
+ assert (x_pixel is not None) & (x_pixel is not None) & (image is not None)
+ assert (len(x) == len(x_pixel)) & (len(y) == len(y_pixel))
+ slog.main_info("Calculateing adj matrix using histology image...")
+ beta_half = round(beta / 2)
+ g = []
+ max_x = image.shape[0]
+ max_y = image.shape[1]
+ for i in range(len(x_pixel)):
+ nbs = image[
+ max(0, x_pixel[i] - beta_half) : min(max_x, x_pixel[i] + beta_half + 1),
+ max(0, y_pixel[i] - beta_half) : min(max_y, y_pixel[i] + beta_half + 1),
+ ]
+ g.append(np.mean(np.mean(nbs, axis=0), axis=0))
+ c0, c1, c2 = [], [], []
+ for i in g:
+ c0.append(i[0])
+ c1.append(i[1])
+ c2.append(i[2])
+ c0 = np.array(c0)
+ c1 = np.array(c1)
+ c2 = np.array(c2)
+ slog.main_info(f"Var of c0,c1,c2 = {np.var(c0)}, {np.var(c1)}, {np.var(c2)}")
+ c3 = (c0 * np.var(c0) + c1 * np.var(c1) + c2 * np.var(c2)) / (np.var(c0) + np.var(c1) + np.var(c2))
+ c4 = (c3 - np.mean(c3)) / np.std(c3)
+ z_scale = np.max([np.std(x), np.std(y)]) * alpha
+ z = c4 * z_scale
+ z = z.tolist()
+ slog.main_info(f"Var of x,y,z = {np.var(x)}, {np.var(y)}, {np.var(z)}")
+ X = np.array([x, y, z]).T.astype(np.float32)
+ else:
+ slog.main_info("Calculateing adj matrix using xy only...")
+ X = np.array([x, y]).T.astype(np.float32)
+ n = X.shape[0]
+ adj = np.empty((n, n), dtype=np.float32)
+ for i in numba.prange(n):
+ for j in numba.prange(n):
+ adj[i][j] = np.sqrt(np.sum((X[i] - X[j]) ** 2))
+ return adj
+
+
+def calculate_p(adj, l):
+ adj_exp = np.exp(-1 * (adj**2) / (2 * (l**2)))
+ return np.mean(np.sum(adj_exp, 1)) - 1
+
+
+def search_l(p, adj, start=0.01, end=1000, tol=0.01, max_run=100):
+ """Function to search proper `l` value for spagcn algorithm.
+
+ Args:
+ p (float, optional): parameter `p` in spagcn algorithm. See `SpaGCN` for details.
+ adj (class: `numpy.ndarray`): the calculated adjacent matrix in spagcn algorithm.
+ start (float, optional): lower boundary of search. Defaults to 0.01.
+ end (int, optional): upper boundary of search. Defaults to 1000.
+ tol (float, optional): step length for search. Defaults to 0.01.
+ max_run (int, optional): maximum number of searching iteration. Defaults to 100.
+
+ Returns:
+ float: the `l` value
+ """
+ run = 0
+ p_low = calculate_p(adj, start)
+ p_high = calculate_p(adj, end)
+ if p_low > p + tol:
+ slog.main_info("l not found, try smaller start point.")
+ return None
+ elif p_high < p - tol:
+ slog.main_info("l not found, try bigger end point.")
+ return None
+ elif np.abs(p_low - p) <= tol:
+ slog.main_info(f"recommended l = {str(start)}.")
+ return start
+ elif np.abs(p_high - p) <= tol:
+ slog.main_info(f"recommended l = {str(end)}.")
+ return end
+ while (p_low + tol) < p < (p_high - tol):
+ run += 1
+ slog.main_info(
+ "Run "
+ + str(run)
+ + ": l ["
+ + str(start)
+ + ", "
+ + str(end)
+ + "], p ["
+ + str(p_low)
+ + ", "
+ + str(p_high)
+ + "]"
+ )
+ if run > max_run:
+ slog.main_info(
+ "Exact l not found, closest values are:\n"
+ + "l="
+ + str(start)
+ + ": "
+ + "p="
+ + str(p_low)
+ + "\nl="
+ + str(end)
+ + ": "
+ + "p="
+ + str(p_high)
+ )
+ return None
+ mid = (start + end) / 2
+ p_mid = calculate_p(adj, mid)
+ if np.abs(p_mid - p) <= tol:
+ slog.main_info(f"recommended l = {str(mid)}")
+ return mid
+ if p_mid <= p:
+ start = mid
+ p_low = p_mid
+ else:
+ end = mid
+ p_high = p_mid
+
+
+def get_cluster_num(
+ adata,
+ adj,
+ res,
+ tol,
+ lr,
+ max_epochs,
+ l,
+ r_seed=100,
+ t_seed=100,
+ n_seed=100,
+):
+ """get the initial number of clusters corresponding to given louvain resolution.
+
+ Args:
+ adata, adj, res, tol, lr, max_epochs: further passed to SpaGCN.train(), see `SpaGCN.train`.
+ l (float): parameter `l` in spagcn algorithm, see `SpaGCN` for details.
+ r_seed, t_seed, n_seed (int, optional): Global seed for `random`, `torch`, `numpy`. Defaults to 100.
+
+ Returns:
+ int: number of clusters
+ """
+ random.seed(r_seed)
+ torch.manual_seed(t_seed)
+ np.random.seed(n_seed)
+ clf = SpaGCN()
+ clf.set_l(l)
+ clf.train(
+ adata,
+ adj,
+ init_spa=True,
+ init="louvain",
+ res=res,
+ tol=tol,
+ lr=lr,
+ max_epochs=max_epochs,
+ )
+ y_pred, _ = clf.predict()
+ return len(set(y_pred))
+
+
+def search_res(
+ adata,
+ adj,
+ l,
+ target_num,
+ start=0.4,
+ step=0.1,
+ tol=5e-3,
+ lr=0.05,
+ max_epochs=10,
+ r_seed=100,
+ t_seed=100,
+ n_seed=100,
+ max_run=10,
+):
+ """Function to search a proper initial louvain resolution to get desired number of clusters in spagcn algorithm.
+
+ Args:
+ adata (class:`~anndata.AnnData`): an Annadata object.
+ adj (class: `numpy.ndarray`): the calculated adjacent matrix in spagcn algorithm.
+ l (float): parameter `l` in spagcn algorithm, see `SpaGCN` for details.
+ target_num (int): desired number of clusters.
+ start (float, optional): the lower boundary of search for resolution. Defaults to 0.4.
+ step (float, optional): search step length. Defaults to 0.1.
+ tol, lr, max_epochs: further passed to SpaGCN.train(), see `SpaGCN.train`.
+ r_seed, t_seed, n_seed (int, optional): Global seed for `random`, `torch`, `numpy`. Defaults to 100.
+ max_run (int, optional): max number of iteration. Defaults to 10.
+
+ Returns:
+ float: calculated initial louvain resolution.
+ """
+ res = start
+ slog.main_info(f"Start at res = {res} step = {step}")
+ old_num = get_cluster_num(adata, adj, res, tol, lr, max_epochs, l, r_seed, t_seed, n_seed)
+ slog.main_info(f"Res = {res} Num of clusters = {old_num}")
+ run = 0
+ while old_num != target_num:
+ old_sign = -1 if (old_num < target_num) else 1
+ new_num = get_cluster_num(
+ adata,
+ adj,
+ res + step * old_sign,
+ tol,
+ lr,
+ max_epochs,
+ l,
+ r_seed,
+ t_seed,
+ n_seed,
+ )
+ slog.main_info(f"Res = {res + step * old_sign} Num of clusters = {new_num}")
+ if new_num == target_num:
+ res = res + step * old_sign
+ slog.main_info(f"recommended res = {res}")
+ return res
+ new_sign = -1 if (new_num < target_num) else 1
+ if new_sign == old_sign:
+ res = res + step * old_sign
+ slog.main_info(f"Res changed to res")
+ old_num = new_num
+ else:
+ step = step / 2
+ slog.main_info(f"Step changed to {step}")
+ if run > max_run:
+ slog.main_info("Exact resolution not found")
+ slog.main_info(f"Recommended res = {res}")
+ return res
+ run += 1
+ slog.main_info(f"recommended res = {res}")
+ return res
+
+
+def refine(sample_id, pred, dis, shape="square"):
+ """To refine(smooth) the boundary of spatial domains(clusters).
+
+ Args:
+ sample_id (list): list of sample(cell, spot or bin) names.
+ pred (list): list of spatial domains corresponding to the sample_id list.
+ dis (class: `numpy.ndarray`): the calculated adjacent matrix in spagcn algorithm.
+ shape (str, optional): Smooth the spatial domains with given spatial topology, "hexagon" for Visium data, "square" for ST data. Defaults to "square".
+
+ Returns:
+ [list]: list of refined spatial domains corresponding to the sample_id list.
+ """
+ refined_pred = []
+ pred = pd.DataFrame({"pred": pred}, index=sample_id)
+ dis_df = pd.DataFrame(dis, index=sample_id, columns=sample_id)
+ if shape == "hexagon":
+ num_nbs = 6
+ elif shape == "square":
+ num_nbs = 4
+ else:
+ slog.main_info("Shape not recongized, shape='hexagon' for Visium data, 'square' for ST data.")
+ for i in range(len(sample_id)):
+ index = sample_id[i]
+ dis_tmp = dis_df.loc[index, :].sort_values()
+ nbs = dis_tmp[0 : num_nbs + 1]
+ nbs_pred = pred.loc[nbs.index, "pred"]
+ self_pred = pred.loc[index, "pred"]
+ v_c = nbs_pred.value_counts()
+ if (v_c.loc[self_pred] < num_nbs / 2) and (np.max(v_c) > num_nbs / 2):
+ refined_pred.append(v_c.idxmax())
+ else:
+ refined_pred.append(self_pred)
+ return refined_pred
+
+
+class GraphConvolution(nn.Module):
+ """
+ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
+ """
+
+ def __init__(self, in_features, out_features, bias=True):
+ super(GraphConvolution, self).__init__()
+ self.in_features = in_features
+ self.out_features = out_features
+ self.weight = nn.parameter.Parameter(torch.FloatTensor(in_features, out_features))
+ if bias:
+ self.bias = nn.parameter.Parameter(torch.FloatTensor(out_features))
+ else:
+ self.register_parameter("bias", None)
+ self.reset_parameters()
+
+ def reset_parameters(self):
+ stdv = 1.0 / math.sqrt(self.weight.size(1))
+ self.weight.data.uniform_(-stdv, stdv)
+ if self.bias is not None:
+ self.bias.data.uniform_(-stdv, stdv)
+
+ def forward(self, input, adj):
+ support = torch.mm(input, self.weight)
+ output = torch.spmm(adj, support)
+ if self.bias is not None:
+ return output + self.bias
+ else:
+ return output
+
+ def __repr__(self):
+ return self.__class__.__name__ + " (" + str(self.in_features) + " -> " + str(self.out_features) + ")"
+
+
+class simple_GC_DEC(nn.Module):
+ """
+ Simple NN model constructed with a GraphConvolution layer followed by a DeepEmbeddingClustering layer.
+ For DEC, see https://arxiv.org/abs/1511.06335v2
+ """
+
+ def __init__(self, nfeat, nhid, alpha=0.2):
+ super(simple_GC_DEC, self).__init__()
+ self.gc = GraphConvolution(nfeat, nhid)
+ self.nhid = nhid
+ # self.mu determined by the init method
+ self.alpha = alpha
+
+ def forward(self, x, adj):
+ x = self.gc(x, adj)
+ q = 1.0 / ((1.0 + torch.sum((x.unsqueeze(1) - self.mu) ** 2, dim=2) / self.alpha) + 1e-8)
+ q = q ** (self.alpha + 1.0) / 2.0
+ q = q / torch.sum(q, dim=1, keepdim=True)
+ return x, q
+
+ def loss_function(self, p, q):
+ def kld(target, pred):
+ return torch.mean(torch.sum(target * torch.log(target / (pred + 1e-6)), dim=1))
+
+ loss = kld(p, q)
+ return loss
+
+ def target_distribution(self, q):
+ # weight = q ** 2 / q.sum(0)
+ # return torch.transpose((torch.transpose(weight,0,1) / weight.sum(1)),0,1)e
+ p = q**2 / torch.sum(q, dim=0)
+ p = p / torch.sum(p, dim=1, keepdim=True)
+ return p
+
+ def fit(
+ self,
+ X,
+ adj,
+ lr=0.001,
+ max_epochs=5000,
+ update_interval=3,
+ trajectory_interval=50,
+ weight_decay=5e-4,
+ opt="sgd",
+ init="louvain",
+ n_neighbors=10,
+ res=0.4,
+ n_clusters=10,
+ init_spa=True,
+ tol=1e-3,
+ ):
+ self.trajectory = []
+ if opt == "sgd":
+ optimizer = torch.optim.SGD(self.parameters(), lr=lr, momentum=0.9)
+ elif opt == "adam":
+ optimizer = torch.optim.Adam(self.parameters(), lr=lr, weight_decay=weight_decay)
+
+ features = self.gc(torch.FloatTensor(X), torch.FloatTensor(adj))
+ # ----------------------------------------------------------------
+ if init == "kmeans":
+ slog.main_info("Initializing cluster centers with kmeans, n_clusters known")
+ self.n_clusters = n_clusters
+ kmeans = KMeans(self.n_clusters, n_init=20)
+ if init_spa:
+ # ------Kmeans use exp and spatial
+ y_pred = kmeans.fit_predict(features.detach().numpy())
+ else:
+ # ------Kmeans only use exp info, no spatial
+ y_pred = kmeans.fit_predict(X) # Here we use X as numpy
+ elif init == "louvain":
+ slog.main_info(f"Initializing cluster centers with louvain, resolution = {res}")
+ if init_spa:
+ adata = ad.AnnData(features.detach().numpy())
+ else:
+ adata = ad.AnnData(X)
+ dyn.tl.neighbors(adata, n_neighbors=n_neighbors, X_data=adata.X)
+ dyn.tl.louvain(adata, resolution=res)
+ y_pred = adata.obs["louvain"].astype(int).to_numpy()
+ self.n_clusters = len(np.unique(y_pred))
+ # ----------------------------------------------------------------
+ y_pred_last = y_pred
+ self.mu = nn.parameter.Parameter(torch.Tensor(self.n_clusters, self.nhid))
+ X = torch.FloatTensor(X)
+ adj = torch.FloatTensor(adj)
+ self.trajectory.append(y_pred)
+ features = pd.DataFrame(features.detach().numpy(), index=np.arange(0, features.shape[0]))
+ Group = pd.Series(y_pred, index=np.arange(0, features.shape[0]), name="Group")
+ Mergefeature = pd.concat([features, Group], axis=1)
+ cluster_centers = np.asarray(Mergefeature.groupby("Group").mean())
+
+ self.mu.data.copy_(torch.Tensor(cluster_centers))
+ self.train()
+ for epoch in range(max_epochs):
+ if epoch % update_interval == 0:
+ _, q = self.forward(X, adj)
+ p = self.target_distribution(q).data
+ if epoch % 10 == 0:
+ slog.main_info(f"Epoch {epoch}")
+ optimizer.zero_grad()
+ z, q = self(X, adj)
+ loss = self.loss_function(p, q)
+ loss.backward()
+ optimizer.step()
+ if epoch % trajectory_interval == 0:
+ self.trajectory.append(torch.argmax(q, dim=1).data.cpu().numpy())
+
+ # Check stop criterion
+ y_pred = torch.argmax(q, dim=1).data.cpu().numpy()
+ delta_label = np.sum(y_pred != y_pred_last).astype(np.float32) / X.shape[0]
+ y_pred_last = y_pred
+ if epoch > 0 and (epoch - 1) % update_interval == 0 and delta_label < tol:
+ slog.main_info(f"delta_label {delta_label} < tol {tol}")
+ slog.main_info("Reach tolerance threshold. Stopping training.")
+ slog.main_info(f"Total epoch: {epoch}")
+ break
+
+ def predict(self, X, adj):
+ z, q = self(torch.FloatTensor(X), torch.FloatTensor(adj))
+ return z, q
+
+
+class simple_GC_DEC_PyG(simple_GC_DEC):
+ """
+ NN model like simple_GC_DEC, but employed torch_geometric.GCNConv as the GCN layer.
+ """
+
+ def __init__(self, nfeat, nhid, alpha=0.2):
+ super(simple_GC_DEC_PyG, self).__init__()
+
+ # torch geometric
+ try:
+ from torch_geometric.nn import GCNConv
+ except ModuleNotFoundError:
+ # Installing torch geometric packages with specific CUDA+PyTorch version.
+ # See https://pytorch-geometric.readthedocs.io/en/latest/notes/installation.html for details
+ ImportError(
+ """
+ TORCH = torch.__version__.split('+')[0]
+ CUDA = 'cu' + torch.version.cuda.replace('.','')
+
+ !pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-{TORCH}+{CUDA}.html
+ !pip install torch-sparse -f https://pytorch-geometric.com/whl/torch-{TORCH}+{CUDA}.html
+ !pip install torch-cluster -f https://pytorch-geometric.com/whl/torch-{TORCH}+{CUDA}.html
+ !pip install torch-spline-conv -f https://pytorch-geometric.com/whl/torch-{TORCH}+{CUDA}.html
+ !pip install torch-geometric"
+ """
+ )
+
+ from torch_geometric.nn import GCNConv
+
+ self.gc = GCNConv(nfeat, nhid)
+ self.nhid = nhid
+ # self.mu determined by the init method
+ self.alpha = alpha
+
+ def forward(self, x, edge_index, edge_attr):
+ x = self.gc(x, edge_index, edge_attr)
+ q = 1.0 / ((1.0 + torch.sum((x.unsqueeze(1) - self.mu) ** 2, dim=2) / self.alpha) + 1e-8)
+ q = q ** (self.alpha + 1.0) / 2.0
+ q = q / torch.sum(q, dim=1, keepdim=True)
+ return x, q
+
+ def fit(
+ self,
+ X,
+ adj,
+ lr=0.001,
+ max_epochs=5000,
+ update_interval=3,
+ trajectory_interval=50,
+ weight_decay=5e-4,
+ opt="sgd",
+ init="louvain",
+ n_neighbors=10,
+ res=0.4,
+ n_clusters=10,
+ init_spa=True,
+ tol=1e-3,
+ ):
+ self.trajectory = []
+ if opt == "sgd":
+ optimizer = torch.optim.SGD(self.parameters(), lr=lr, momentum=0.9)
+ elif opt == "adam":
+ optimizer = torch.optim.Adam(self.parameters(), lr=lr, weight_decay=weight_decay)
+
+ adj_mlt = pd.DataFrame(adj).reset_index().melt("index")
+ edge_index = torch.tensor([adj_mlt.loc[:, "index"], adj_mlt.loc[:, "variable"]], dtype=torch.long)
+ edge_attr = torch.tensor(adj_mlt.loc[:, "value"], dtype=torch.float)
+
+ features = self.gc(torch.FloatTensor(X), edge_index, edge_attr)
+ # ----------------------------------------------------------------
+ if init == "kmeans":
+ slog.main_info("Initializing cluster centers with kmeans, n_clusters known")
+ self.n_clusters = n_clusters
+ kmeans = KMeans(self.n_clusters, n_init=20)
+ if init_spa:
+ # ------Kmeans use exp and spatial
+ y_pred = kmeans.fit_predict(features.detach().numpy())
+ else:
+ # ------Kmeans only use exp info, no spatial
+ y_pred = kmeans.fit_predict(X) # Here we use X as numpy
+ elif init == "louvain":
+ slog.main_info(f"Initializing cluster centers with louvain, resolution = {res}")
+ if init_spa:
+ adata = ad.AnnData(features.detach().numpy())
+ else:
+ adata = ad.AnnData(X)
+ dyn.tl.neighbors(adata, n_neighbors=n_neighbors)
+ dyn.tl.louvain(adata, resolution=res)
+ y_pred = adata.obs["louvain"].astype(int).to_numpy()
+ self.n_clusters = len(np.unique(y_pred))
+ # ----------------------------------------------------------------
+ y_pred_last = y_pred
+ self.mu = nn.parameter.Parameter(torch.Tensor(self.n_clusters, self.nhid))
+ X = torch.FloatTensor(X)
+ self.trajectory.append(y_pred)
+ features = pd.DataFrame(features.detach().numpy(), index=np.arange(0, features.shape[0]))
+ Group = pd.Series(y_pred, index=np.arange(0, features.shape[0]), name="Group")
+ Mergefeature = pd.concat([features, Group], axis=1)
+ cluster_centers = np.asarray(Mergefeature.groupby("Group").mean())
+
+ self.mu.data.copy_(torch.Tensor(cluster_centers))
+ self.train()
+ for epoch in range(max_epochs):
+ if epoch % update_interval == 0:
+ _, q = self.forward(X, edge_index, edge_attr)
+ p = self.target_distribution(q).data
+ if epoch % 10 == 0:
+ slog.main_info(f"Epoch {epoch}")
+ optimizer.zero_grad()
+ z, q = self(X, edge_index, edge_attr)
+ loss = self.loss_function(p, q)
+ loss.backward()
+ optimizer.step()
+ if epoch % trajectory_interval == 0:
+ self.trajectory.append(torch.argmax(q, dim=1).data.cpu().numpy())
+
+ # Check stop criterion
+ y_pred = torch.argmax(q, dim=1).data.cpu().numpy()
+ delta_label = np.sum(y_pred != y_pred_last).astype(np.float32) / X.shape[0]
+ y_pred_last = y_pred
+ if epoch > 0 and (epoch - 1) % update_interval == 0 and delta_label < tol:
+ slog.main_info(f"delta_label {delta_label} < tol {tol}")
+ slog.main_info("Reach tolerance threshold. Stopping training.")
+ slog.main_info(f"Total epoch: {epoch}")
+ break
+
+ def predict(self, X, adj):
+ adj_mlt = pd.DataFrame(adj).reset_index().melt("index")
+ edge_index = torch.tensor([adj_mlt.loc[:, "index"], adj_mlt.loc[:, "variable"]], dtype=torch.long)
+ edge_attr = torch.tensor(adj_mlt.loc[:, "value"], dtype=torch.float)
+ z, q = self(torch.FloatTensor(X), edge_index, edge_attr)
+ return z, q
+
+
+class SpaGCN(object):
+ """
+ Implementation for spagcn algorithm, see https://doi.org/10.1038/s41592-021-01255-8
+ """
+
+ def __init__(self):
+ super(SpaGCN, self).__init__()
+ self.l = None
+
+ def set_l(self, l):
+ self.l = l
+
+ def train(
+ self,
+ adata,
+ adj,
+ num_pcs=50,
+ lr=0.005,
+ max_epochs=2000,
+ weight_decay=0,
+ opt="adam",
+ init_spa=True,
+ init="louvain", # louvain or kmeans
+ n_neighbors=10, # for louvain
+ n_clusters=None, # for kmeans
+ res=0.4, # for louvain
+ tol=1e-3,
+ ):
+ """train model for spagcn
+
+ Args:
+ adata (class:`~anndata.AnnData`): an Annadata object.
+ adj (class: `numpy.ndarray`): the calculated adjacent matrix in spagcn algorithm.
+ num_pcs (int, optional): number of pcs(out dimension of PCA) to use. Defaults to 50.
+ lr (float, optional): learning rate in neural network. Defaults to 0.005.
+ max_epochs (int, optional): max epochs to train in neural network. Defaults to 2000.
+ weight_decay (int, optional): make learning rate decay while training. Defaults to 0.
+ opt (str, optional): the optimizer to use. Defaults to "adam".
+ init_spa (bool, optional): make initial clusters with louvain or kmeans. Defaults to True.
+ init (str, optional): algorithm to use in inital clustering. Supports "louvain", "kmeans". Defaults to "louvain".
+ """
+ self.num_pcs = num_pcs
+ self.res = res
+ self.lr = lr
+ self.max_epochs = max_epochs
+ self.weight_decay = weight_decay
+ self.opt = opt
+ self.init_spa = init_spa
+ self.init = init
+ self.n_neighbors = n_neighbors
+ self.n_clusters = n_clusters
+ self.res = res
+ self.tol = tol
+ assert adata.shape[0] == adj.shape[0] == adj.shape[1]
+ pca = PCA(n_components=self.num_pcs)
+ if issparse(adata.X):
+ pca.fit(adata.X.A)
+ embed = pca.transform(adata.X.A)
+ else:
+ pca.fit(adata.X)
+ embed = pca.transform(adata.X)
+ ###------------------------------------------###
+ if self.l is None:
+ raise ValueError("l should be set before fitting the model!")
+ adj_exp = np.exp(-1 * (adj**2) / (2 * (self.l**2)))
+ # ----------Train model----------
+ self.model = simple_GC_DEC(embed.shape[1], embed.shape[1])
+ self.model.fit(
+ embed,
+ adj_exp,
+ lr=self.lr,
+ max_epochs=self.max_epochs,
+ weight_decay=self.weight_decay,
+ opt=self.opt,
+ init_spa=self.init_spa,
+ init=self.init,
+ n_neighbors=self.n_neighbors,
+ n_clusters=self.n_clusters,
+ res=self.res,
+ tol=self.tol,
+ )
+ self.embed = embed
+ self.adj_exp = adj_exp
+
+ def predict(self):
+ z, q = self.model.predict(self.embed, self.adj_exp)
+ y_pred = torch.argmax(q, dim=1).data.cpu().numpy()
+ # Max probability plot
+ prob = q.detach().numpy()
+ return y_pred, prob
| Clustering with re-implementation of spagcn
This pull request implements:
- [x] reimplement the spagcn method with pytorch-geometry
- [x] use the total UMI counts per bucket (bin, cell), just like the stain image
- [x] consider replace vanilla spagcn method with more advanced attention based graphical neural network methods
Clustering with re-implementation of spagcn
This pull request implements:
- [x] reimplement the spagcn method with pytorch-geometry
- [x] use the total UMI counts per bucket (bin, cell), just like the stain image
- [x] consider replace vanilla spagcn method with more advanced attention based graphical neural network methods
| 2022-01-10T14:35:13 | 0.0 | [] | [] |
|||
Adam-Vandervorst/PyBHV | Adam-Vandervorst__PyBHV-9 | 2b289b6ae298552f5c3ef93256f857e6ef237ed3 | diff --git a/benchmarks/layerwise_random_execution.py b/benchmarks/layerwise_random_execution.py
index b94e3a1..eed1505 100644
--- a/benchmarks/layerwise_random_execution.py
+++ b/benchmarks/layerwise_random_execution.py
@@ -1,7 +1,7 @@
# N-input M-output stack of random computation (from different families)
# NOTE: can fail due to tight bounds on the probabilistic properties, run multiple times
# from bhv.np import NumPyPacked64BHV as BHV
-from bhv.native import CNativePackedBHV as BHV
+from bhv.native import NativePackedBHV as BHV
from time import monotonic
from random import shuffle, random, randrange, sample
@@ -11,10 +11,10 @@
repeat_pipeline = 3
-I = 50
-O = 50
-sizes = [2000]*20 + [O]
-vat_type: '"MAJ3" | "XOR NOT", "SELECT EQ"' = "MAJ3"
+I = 500
+O = 500
+sizes = [10000]*100 + [O]
+vat_type: '"MAJ3" | "XOR NOT" | "SELECT EQ" | "TERNARY"' = "MAJ3"
if vat_type == "MAJ3":
@@ -42,7 +42,12 @@ def execute(_):
else:
i, j = sample(values[-1], k=2)
return ~(i ^ j)
-
+elif vat_type == "TERNARY":
+ OPS = [15, 23, 27, 29, 30, 39, 43, 45, 46, 51, 53, 54, 57, 58, 60, 71, 75, 77, 78, 83, 85, 86, 89, 90, 92, 99, 101, 102, 105, 106, 108, 113, 114, 116, 120, 135, 139, 141, 142, 147, 149, 150, 153, 154, 156, 163, 165, 166, 169, 170, 172, 177, 178, 180, 184, 195, 197, 198, 201, 202, 204, 209, 210, 212, 216, 225, 226, 228, 232, 240]
+ def execute(_):
+ op, = sample(OPS, k=1)
+ i, j, k = sample(values[-1], k=3)
+ return BHV.ternary(i, j, k, op)
execution_times = []
diff --git a/benchmarks/majority.py b/benchmarks/majority.py
index 0db0a51..ec94665 100644
--- a/benchmarks/majority.py
+++ b/benchmarks/majority.py
@@ -2,7 +2,7 @@
from bhv import DIMENSION, AbstractBHV
# from bhv.np import NumPyPacked64BHV as BHV
# from bhv.np import NumPyBoolBHV as BHV
-from bhv.native import NativePackedBHV as BHV
+from bhv.native import CNativePackedBHV as BHV
from time import monotonic
from statistics import pstdev, fmean
@@ -29,7 +29,7 @@
distances[size].append([AbstractBHV.frac_to_std(r.hamming(maj)/DIMENSION, invert=True) for r in rs])
-with open("results/majority_2000_native_packed.csv", 'w') as f:
+with open("results/majority_2000_native_simd.csv", 'w') as f:
f.write("size,mean_distance,std_distance,time\n")
for size in sizes:
print(size)
diff --git a/bhv/abstract.py b/bhv/abstract.py
index d8b31c4..14050a2 100644
--- a/bhv/abstract.py
+++ b/bhv/abstract.py
@@ -160,10 +160,6 @@ def __or__(self, other: Self) -> Self:
def __invert__(self) -> Self:
return self ^ self.ONE
- # recall
- # information_entropy(p) = -p*log2(p) - (1 - p)*log2(1 - p)
- # so for some active fractions, it should be a lot cheaper than for others
- # hence a specialized method for 2^-n
@classmethod
def rand2(cls, power: int) -> Self:
assert power >= 0
diff --git a/bhv/cnative/TurboSHAKEopt/KeccakP-1600-opt64-config.h b/bhv/cnative/TurboSHAKE_AVX512/KeccakP-1600-AVX512-config.h
similarity index 100%
rename from bhv/cnative/TurboSHAKEopt/KeccakP-1600-opt64-config.h
rename to bhv/cnative/TurboSHAKE_AVX512/KeccakP-1600-AVX512-config.h
diff --git a/bhv/cnative/TurboSHAKE_AVX512/KeccakP-1600-AVX512.cpp b/bhv/cnative/TurboSHAKE_AVX512/KeccakP-1600-AVX512.cpp
new file mode 100644
index 0000000..a486191
--- /dev/null
+++ b/bhv/cnative/TurboSHAKE_AVX512/KeccakP-1600-AVX512.cpp
@@ -0,0 +1,625 @@
+/*
+The eXtended Keccak Code Package (XKCP)
+https://github.com/XKCP/XKCP
+
+The Keccak-p permutations, designed by Guido Bertoni, Joan Daemen, Michaël Peeters and Gilles Van Assche.
+
+Implementation by Ronny Van Keer, hereby denoted as "the implementer".
+
+For more information, feedback or questions, please refer to the Keccak Team website:
+https://keccak.team/
+
+To the extent possible under law, the implementer has waived all copyright
+and related or neighboring rights to the source code in this file.
+http://creativecommons.org/publicdomain/zero/1.0/
+
+---
+
+This file implements Keccak-p[1600] in a SnP-compatible way.
+Please refer to SnP-documentation.h for more details.
+
+This implementation comes with KeccakP-1600-SnP.h in the same folder.
+Please refer to LowLevel.build for the exact list of other files it must be combined with.
+
+---
+
+We would like to thank Vladimir Sedach, we have used parts of his Keccak AVX-512 C++ code.
+ */
+#ifdef __AVX512BW__
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <smmintrin.h>
+#include <wmmintrin.h>
+#include <immintrin.h>
+#include <emmintrin.h>
+#include "align.h"
+#include "brg_endian.h"
+#include "KeccakP-1600-AVX512-config.h"
+
+#if (PLATFORM_BYTE_ORDER != IS_LITTLE_ENDIAN)
+#error Expecting a little-endian platform
+#endif
+
+#ifdef KeccakP1600_fullUnrolling
+#define FullUnrolling
+#else
+#define Unrolling KeccakP1600_unrolling
+#endif
+
+/* Comment the define hereunder when compiling for a CPU with AVX-512 SIMD */
+/*
+ * Warning: This code has only been tested on Haswell (AVX2) with SIMULATE_AVX512 defined,
+ * errors will occur if we did a bad interpretation of the AVX-512 intrinsics'
+ * API or functionality.
+ */
+/* #define SIMULATE_AVX512 */
+
+#if defined(SIMULATE_AVX512)
+
+typedef struct
+{
+ uint64_t x[8];
+} __m512i;
+
+static __m512i _mm512_xor_si512( __m512i a, __m512i b)
+{
+ __m512i r;
+ unsigned int i;
+
+ for ( i = 0; i < 8; ++i )
+ r.x[i] = a.x[i] ^ b.x[i];
+ return(r);
+}
+
+static __m512i _mm512_ternarylogic_epi64(__m512i a, __m512i b, __m512i c, int imm)
+{
+
+ if (imm == 0x96)
+ return ( _mm512_xor_si512( _mm512_xor_si512( a, b ), c ) );
+ if (imm == 0xD2) {
+ __m512i t;
+ unsigned int i;
+
+ for ( i = 0; i < 8; ++i )
+ t.x[i] = ~b.x[i] & c.x[i];
+ return ( _mm512_xor_si512( a, t ) );
+ }
+ printf( "_mm512_ternarylogic_epi64( a, b, c, %02X) not implemented!\n", imm );
+ exit(1);
+
+}
+
+static __m512i _mm512_rol_epi64(__m512i a, int offset)
+{
+ __m512i r;
+ unsigned int i;
+
+ for ( i = 0; i < 8; ++i )
+ r.x[i] = (a.x[i] << offset) | (a.x[i] >> (64-offset));
+ return(r);
+}
+
+static __m512i _mm512_rolv_epi64(__m512i a, __m512i offset)
+{
+ __m512i r;
+ unsigned int i;
+
+ for ( i = 0; i < 8; ++i )
+ r.x[i] = (a.x[i] << offset.x[i]) | (a.x[i] >> (64-offset.x[i]));
+ return(r);
+}
+
+static __m512i _mm512_setr_epi64(uint64_t a, uint64_t b, uint64_t c, uint64_t d, uint64_t e, uint64_t f, uint64_t g, uint64_t h)
+{
+ __m512i r;
+
+ r.x[0] = a;
+ r.x[1] = b;
+ r.x[2] = c;
+ r.x[3] = d;
+ r.x[4] = e;
+ r.x[5] = f;
+ r.x[6] = g;
+ r.x[7] = h;
+ return(r);
+}
+
+static __m512i _mm512_permutexvar_epi64(__m512i idx, __m512i v)
+{
+ __m512i r;
+ unsigned int i;
+
+ for ( i = 0; i < 8; ++i )
+ r.x[i] = v.x[idx.x[i]];
+ return(r);
+}
+
+static __m512i _mm512_permutex2var_epi64(__m512i a, __m512i idx, __m512i b)
+{
+ __m512i r;
+ unsigned int i;
+ unsigned int index;
+
+ for ( i = 0; i < 8; ++i ) {
+ index = idx.x[i] & 7;
+ r.x[i] = (idx.x[i] & 8) ? b.x[index] : a.x[index];
+ }
+ return(r);
+}
+
+static __m512i _mm512_unpacklo_epi64(__m512i a, __m512i b)
+{
+ __m512i r;
+ unsigned int i;
+
+ for ( i = 0; i < 8; i += 2 ) {
+ r.x[i] = a.x[i];
+ r.x[i+1] = b.x[i];
+ }
+ return(r);
+}
+
+static __m512i _mm512_unpackhi_epi64(__m512i a, __m512i b)
+{
+ __m512i r;
+ unsigned int i;
+
+ for ( i = 0; i < 8; i += 2 ) {
+ r.x[i] = a.x[i+1];
+ r.x[i+1] = b.x[i+1];
+ }
+ return(r);
+}
+
+static __m512i _mm512_mask_blend_epi64(unsigned char mask, __m512i a, __m512i b)
+{
+ __m512i r;
+ unsigned int i;
+
+ for ( i = 0; i < 8; ++i, mask >>= 1 )
+ r.x[i] = (mask & 1) ? b.x[i] : a.x[i];
+ return(r);
+}
+
+static __m512i _mm512_maskz_loadu_epi64( unsigned char mask, const void * a)
+{
+ __m512i r;
+ unsigned int i;
+ const uint64_t *p = a;
+
+ for ( i = 0; i < 8; ++i, mask >>= 1 )
+ r.x[i] = (mask & 1) ? p[i] : 0;
+ return(r);
+}
+
+static void _mm512_mask_storeu_epi64( void * a, unsigned char mask, __m512i v)
+{
+ unsigned int i;
+ uint64_t *p = a;
+
+ for ( i = 0; i < 8; ++i, mask >>= 1 )
+ if ( mask & 1 )
+ p[i] = v.x[i];
+}
+
+
+#endif
+
+typedef __m512i V512;
+
+#define XOR(a,b) _mm512_xor_si512(a,b)
+#define XOR3(a,b,c) _mm512_ternarylogic_epi64(a,b,c,0x96)
+#define XOR5(a,b,c,d,e) XOR3(XOR3(a,b,c),d,e)
+#define ROL(a,offset) _mm512_rol_epi64(a,offset)
+#define Chi(a,b,c) _mm512_ternarylogic_epi64(a,b,c,0xD2)
+
+#define LOAD_Lanes(m,a) _mm512_maskz_loadu_epi64(m,a)
+#define LOAD_Lane(a) LOAD_Lanes(0x01,a)
+#define LOAD_Plane(a) LOAD_Lanes(0x1F,a)
+#define LOAD_8Lanes(a) LOAD_Lanes(0xFF,a)
+#define STORE_Lanes(a,m,v) _mm512_mask_storeu_epi64(a,m,v)
+#define STORE_Lane(a,v) STORE_Lanes(a,0x01,v)
+#define STORE_Plane(a,v) STORE_Lanes(a,0x1F,v)
+#define STORE_8Lanes(a,v) STORE_Lanes(a,0xFF,v)
+
+/* ---------------------------------------------------------------- */
+
+void KeccakP1600_Initialize(void *state)
+{
+ memset(state, 0, 1600/8);
+}
+
+/* ---------------------------------------------------------------- */
+
+void KeccakP1600_AddBytes(void *state, const unsigned char *data, unsigned int offset, unsigned int length)
+{
+ uint8_t *stateAsBytes;
+ uint64_t *stateAsLanes;
+
+ for( stateAsBytes = (uint8_t*)state; ((offset % 8) != 0) && (length != 0); ++offset, --length)
+ stateAsBytes[offset] ^= *(data++);
+ for (stateAsLanes = (uint64_t*)(stateAsBytes + offset); length >= 8*8; stateAsLanes += 8, data += 8*8, length -= 8*8)
+ STORE_8Lanes( stateAsLanes, XOR(LOAD_8Lanes(stateAsLanes), LOAD_8Lanes((const uint64_t*)data)));
+ for (/* empty */; length >= 8; ++stateAsLanes, data += 8, length -= 8)
+ STORE_Lane( stateAsLanes, XOR(LOAD_Lane(stateAsLanes), LOAD_Lane((const uint64_t*)data)));
+ for ( stateAsBytes = (uint8_t*)stateAsLanes; length != 0; --length)
+ *(stateAsBytes++) ^= *(data++);
+}
+
+/* ---------------------------------------------------------------- */
+
+void KeccakP1600_OverwriteBytes(void *state, const unsigned char *data, unsigned int offset, unsigned int length)
+{
+ memcpy((unsigned char*)state+offset, data, length);
+}
+
+/* ---------------------------------------------------------------- */
+
+void KeccakP1600_OverwriteWithZeroes(void *state, unsigned int byteCount)
+{
+ memset(state, 0, byteCount);
+}
+
+/* ---------------------------------------------------------------- */
+
+void KeccakP1600_ExtractBytes(const void *state, unsigned char *data, unsigned int offset, unsigned int length)
+{
+ memcpy(data, (unsigned char*)state+offset, length);
+}
+
+/* ---------------------------------------------------------------- */
+
+void KeccakP1600_ExtractAndAddBytes(const void *state, const unsigned char *input, unsigned char *output, unsigned int offset, unsigned int length)
+{
+ uint8_t *stateAsBytes;
+ uint64_t *stateAsLanes;
+
+ for( stateAsBytes = (uint8_t*)state; ((offset % 8) != 0) && (length != 0); ++offset, --length)
+ *(output++) = stateAsBytes[offset] ^ *(input++);
+ for (stateAsLanes = (uint64_t*)(stateAsBytes + offset); length >= 8*8; stateAsLanes += 8, input += 8*8, output += 8*8, length -= 8*8)
+ STORE_8Lanes( (uint64_t*)output, XOR(LOAD_8Lanes(stateAsLanes), LOAD_8Lanes((const uint64_t*)input)));
+ for (/* empty */; length >= 8; ++stateAsLanes, input += 8, output += 8, length -= 8)
+ STORE_Lane( (uint64_t*)output, XOR(LOAD_Lane(stateAsLanes), LOAD_Lane((const uint64_t*)input)));
+ for ( stateAsBytes = (uint8_t*)stateAsLanes; length != 0; --length)
+ *(output++) = *(stateAsBytes++) ^ *(input++);
+}
+
+const uint64_t KeccakP1600RoundConstants[24] = {
+ 0x0000000000000001ULL,
+ 0x0000000000008082ULL,
+ 0x800000000000808aULL,
+ 0x8000000080008000ULL,
+ 0x000000000000808bULL,
+ 0x0000000080000001ULL,
+ 0x8000000080008081ULL,
+ 0x8000000000008009ULL,
+ 0x000000000000008aULL,
+ 0x0000000000000088ULL,
+ 0x0000000080008009ULL,
+ 0x000000008000000aULL,
+ 0x000000008000808bULL,
+ 0x800000000000008bULL,
+ 0x8000000000008089ULL,
+ 0x8000000000008003ULL,
+ 0x8000000000008002ULL,
+ 0x8000000000000080ULL,
+ 0x000000000000800aULL,
+ 0x800000008000000aULL,
+ 0x8000000080008081ULL,
+ 0x8000000000008080ULL,
+ 0x0000000080000001ULL,
+ 0x8000000080008008ULL };
+
+#define KeccakP_DeclareVars \
+ V512 b0, b1, b2, b3, b4; \
+ V512 Baeiou, Gaeiou, Kaeiou, Maeiou, Saeiou; \
+ V512 moveThetaPrev = _mm512_setr_epi64(4, 0, 1, 2, 3, 5, 6, 7); \
+ V512 moveThetaNext = _mm512_setr_epi64(1, 2, 3, 4, 0, 5, 6, 7); \
+ V512 rhoB = _mm512_setr_epi64( 0, 1, 62, 28, 27, 0, 0, 0); \
+ V512 rhoG = _mm512_setr_epi64(36, 44, 6, 55, 20, 0, 0, 0); \
+ V512 rhoK = _mm512_setr_epi64( 3, 10, 43, 25, 39, 0, 0, 0); \
+ V512 rhoM = _mm512_setr_epi64(41, 45, 15, 21, 8, 0, 0, 0); \
+ V512 rhoS = _mm512_setr_epi64(18, 2, 61, 56, 14, 0, 0, 0); \
+ V512 pi1B = _mm512_setr_epi64(0, 3, 1, 4, 2, 5, 6, 7); \
+ V512 pi1G = _mm512_setr_epi64(1, 4, 2, 0, 3, 5, 6, 7); \
+ V512 pi1K = _mm512_setr_epi64(2, 0, 3, 1, 4, 5, 6, 7); \
+ V512 pi1M = _mm512_setr_epi64(3, 1, 4, 2, 0, 5, 6, 7); \
+ V512 pi1S = _mm512_setr_epi64(4, 2, 0, 3, 1, 5, 6, 7); \
+ V512 pi2S1 = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 0+8, 2+8); \
+ V512 pi2S2 = _mm512_setr_epi64(0, 1, 2, 3, 4, 5, 1+8, 3+8); \
+ V512 pi2BG = _mm512_setr_epi64(0, 1, 0+8, 1+8, 6, 5, 6, 7); \
+ V512 pi2KM = _mm512_setr_epi64(2, 3, 2+8, 3+8, 7, 5, 6, 7); \
+ V512 pi2S3 = _mm512_setr_epi64(4, 5, 4+8, 5+8, 4, 5, 6, 7);
+
+#define copyFromState(pState) \
+ Baeiou = LOAD_Plane(pState+ 0); \
+ Gaeiou = LOAD_Plane(pState+ 5); \
+ Kaeiou = LOAD_Plane(pState+10); \
+ Maeiou = LOAD_Plane(pState+15); \
+ Saeiou = LOAD_Plane(pState+20);
+
+#define copyToState(pState) \
+ STORE_Plane(pState+ 0, Baeiou); \
+ STORE_Plane(pState+ 5, Gaeiou); \
+ STORE_Plane(pState+10, Kaeiou); \
+ STORE_Plane(pState+15, Maeiou); \
+ STORE_Plane(pState+20, Saeiou);
+
+#define KeccakP_Round(i) \
+ /* Theta */ \
+ b0 = XOR5( Baeiou, Gaeiou, Kaeiou, Maeiou, Saeiou ); \
+ b1 = _mm512_permutexvar_epi64(moveThetaPrev, b0); \
+ b0 = _mm512_permutexvar_epi64(moveThetaNext, b0); \
+ b0 = _mm512_rol_epi64(b0, 1); \
+ Baeiou = XOR3( Baeiou, b0, b1 ); \
+ Gaeiou = XOR3( Gaeiou, b0, b1 ); \
+ Kaeiou = XOR3( Kaeiou, b0, b1 ); \
+ Maeiou = XOR3( Maeiou, b0, b1 ); \
+ Saeiou = XOR3( Saeiou, b0, b1 ); \
+ /* Rho */ \
+ Baeiou = _mm512_rolv_epi64(Baeiou, rhoB); \
+ Gaeiou = _mm512_rolv_epi64(Gaeiou, rhoG); \
+ Kaeiou = _mm512_rolv_epi64(Kaeiou, rhoK); \
+ Maeiou = _mm512_rolv_epi64(Maeiou, rhoM); \
+ Saeiou = _mm512_rolv_epi64(Saeiou, rhoS); \
+ /* Pi 1 */ \
+ b0 = _mm512_permutexvar_epi64(pi1B, Baeiou); \
+ b1 = _mm512_permutexvar_epi64(pi1G, Gaeiou); \
+ b2 = _mm512_permutexvar_epi64(pi1K, Kaeiou); \
+ b3 = _mm512_permutexvar_epi64(pi1M, Maeiou); \
+ b4 = _mm512_permutexvar_epi64(pi1S, Saeiou); \
+ /* Chi */ \
+ Baeiou = Chi(b0, b1, b2); \
+ Gaeiou = Chi(b1, b2, b3); \
+ Kaeiou = Chi(b2, b3, b4); \
+ Maeiou = Chi(b3, b4, b0); \
+ Saeiou = Chi(b4, b0, b1); \
+ /* Iota */ \
+ Baeiou = XOR(Baeiou, LOAD_Lane(KeccakP1600RoundConstants+i)); \
+ /* Pi 2 */ \
+ b0 = _mm512_unpacklo_epi64(Baeiou, Gaeiou); \
+ b1 = _mm512_unpacklo_epi64(Kaeiou, Maeiou); \
+ b0 = _mm512_permutex2var_epi64(b0, pi2S1, Saeiou); \
+ b2 = _mm512_unpackhi_epi64(Baeiou, Gaeiou); \
+ b3 = _mm512_unpackhi_epi64(Kaeiou, Maeiou); \
+ b2 = _mm512_permutex2var_epi64(b2, pi2S2, Saeiou); \
+ Baeiou = _mm512_permutex2var_epi64(b0, pi2BG, b1); \
+ Gaeiou = _mm512_permutex2var_epi64(b2, pi2BG, b3); \
+ Kaeiou = _mm512_permutex2var_epi64(b0, pi2KM, b1); \
+ Maeiou = _mm512_permutex2var_epi64(b2, pi2KM, b3); \
+ b0 = _mm512_permutex2var_epi64(b0, pi2S3, b1); \
+ Saeiou = _mm512_mask_blend_epi64(0x10, b0, Saeiou)
+
+#ifdef FullUnrolling
+
+#define rounds12 \
+ KeccakP_Round( 12 ); \
+ KeccakP_Round( 13 ); \
+ KeccakP_Round( 14 ); \
+ KeccakP_Round( 15 ); \
+ KeccakP_Round( 16 ); \
+ KeccakP_Round( 17 ); \
+ KeccakP_Round( 18 ); \
+ KeccakP_Round( 19 ); \
+ KeccakP_Round( 20 ); \
+ KeccakP_Round( 21 ); \
+ KeccakP_Round( 22 ); \
+ KeccakP_Round( 23 )
+
+#define rounds24 \
+ KeccakP_Round( 0 ); \
+ KeccakP_Round( 1 ); \
+ KeccakP_Round( 2 ); \
+ KeccakP_Round( 3 ); \
+ KeccakP_Round( 4 ); \
+ KeccakP_Round( 5 ); \
+ KeccakP_Round( 6 ); \
+ KeccakP_Round( 7 ); \
+ KeccakP_Round( 8 ); \
+ KeccakP_Round( 9 ); \
+ KeccakP_Round( 10 ); \
+ KeccakP_Round( 11 ); \
+ KeccakP_Round( 12 ); \
+ KeccakP_Round( 13 ); \
+ KeccakP_Round( 14 ); \
+ KeccakP_Round( 15 ); \
+ KeccakP_Round( 16 ); \
+ KeccakP_Round( 17 ); \
+ KeccakP_Round( 18 ); \
+ KeccakP_Round( 19 ); \
+ KeccakP_Round( 20 ); \
+ KeccakP_Round( 21 ); \
+ KeccakP_Round( 22 ); \
+ KeccakP_Round( 23 )
+
+#elif (Unrolling == 6)
+
+#define rounds12 \
+ i = 12; \
+ do { \
+ KeccakP_Round( i+ 0 ); \
+ KeccakP_Round( i+ 1 ); \
+ KeccakP_Round( i+ 2 ); \
+ KeccakP_Round( i+ 3 ); \
+ KeccakP_Round( i+ 4 ); \
+ KeccakP_Round( i+ 5 ); \
+ } while( (i += 6) < 24 )
+
+#define rounds24 \
+ i = 0; \
+ do { \
+ KeccakP_Round( i+ 0 ); \
+ KeccakP_Round( i+ 1 ); \
+ KeccakP_Round( i+ 2 ); \
+ KeccakP_Round( i+ 3 ); \
+ KeccakP_Round( i+ 4 ); \
+ KeccakP_Round( i+ 5 ); \
+ } while( (i += 6) < 24 )
+
+#elif (Unrolling == 12)
+
+#define rounds12 \
+ KeccakP_Round( 12 ); \
+ KeccakP_Round( 13 ); \
+ KeccakP_Round( 14 ); \
+ KeccakP_Round( 15 ); \
+ KeccakP_Round( 16 ); \
+ KeccakP_Round( 17 ); \
+ KeccakP_Round( 18 ); \
+ KeccakP_Round( 19 ); \
+ KeccakP_Round( 20 ); \
+ KeccakP_Round( 21 ); \
+ KeccakP_Round( 22 ); \
+ KeccakP_Round( 23 )
+
+#define rounds24 \
+ i = 0; \
+ do { \
+ KeccakP_Round( i+ 0 ); \
+ KeccakP_Round( i+ 1 ); \
+ KeccakP_Round( i+ 2 ); \
+ KeccakP_Round( i+ 3 ); \
+ KeccakP_Round( i+ 4 ); \
+ KeccakP_Round( i+ 5 ); \
+ KeccakP_Round( i+ 6 ); \
+ KeccakP_Round( i+ 7 ); \
+ KeccakP_Round( i+ 8 ); \
+ KeccakP_Round( i+ 9 ); \
+ KeccakP_Round( i+10 ); \
+ KeccakP_Round( i+11 ); \
+ } while( (i += 12) < 24 )
+
+#else
+#error "Unrolling is not correctly specified!"
+#endif
+
+void KeccakP1600_Permute_Nrounds(void *state, unsigned int nrounds)
+{
+ KeccakP_DeclareVars
+ unsigned int i;
+ uint64_t *stateAsLanes = (uint64_t*)state;
+
+ copyFromState(stateAsLanes);
+ if ((nrounds & 1) != 0) {
+ KeccakP_Round( 24-nrounds );
+ --nrounds;
+ }
+ if ((nrounds & 2) != 0) {
+ KeccakP_Round( 24+0-nrounds );
+ KeccakP_Round( 24+1-nrounds );
+ nrounds -= 2;
+ }
+ for (i = 24-nrounds; i < 24; i+= 4) {
+ KeccakP_Round( i );
+ KeccakP_Round( i+1 );
+ KeccakP_Round( i+2 );
+ KeccakP_Round( i+3 );
+ }
+ copyToState(stateAsLanes);
+}
+
+/* ---------------------------------------------------------------- */
+
+void KeccakP1600_Permute_12rounds(void *state)
+{
+ KeccakP_DeclareVars
+ #ifndef KeccakP1600_fullUnrolling
+ unsigned int i;
+ #endif
+ uint64_t *stateAsLanes = (uint64_t*)state;
+
+ copyFromState(stateAsLanes);
+ rounds12;
+ copyToState(stateAsLanes);
+}
+
+/* ---------------------------------------------------------------- */
+
+void KeccakP1600_Permute_24rounds(void *state)
+{
+ KeccakP_DeclareVars
+ #ifndef KeccakP1600_fullUnrolling
+ unsigned int i;
+ #endif
+ uint64_t *stateAsLanes = (uint64_t*)state;
+
+ copyFromState(stateAsLanes);
+ rounds24;
+ copyToState(stateAsLanes);
+}
+
+size_t KeccakF1600_FastLoop_Absorb(void *state, unsigned int laneCount, const unsigned char *data, size_t dataByteLen)
+{
+ size_t originalDataByteLen = dataByteLen;
+
+ if (laneCount == 21) {
+ KeccakP_DeclareVars;
+ #ifndef KeccakP1600_fullUnrolling
+ unsigned int i;
+ #endif
+ uint64_t *stateAsLanes = (uint64_t*)state;
+ uint64_t *inDataAsLanes = (uint64_t*)data;
+
+ copyFromState(stateAsLanes);
+ while(dataByteLen >= 21*8) {
+ Baeiou = XOR(Baeiou, LOAD_Plane(inDataAsLanes+ 0));
+ Gaeiou = XOR(Gaeiou, LOAD_Plane(inDataAsLanes+ 5));
+ Kaeiou = XOR(Kaeiou, LOAD_Plane(inDataAsLanes+10));
+ Maeiou = XOR(Maeiou, LOAD_Plane(inDataAsLanes+15));
+ Saeiou = XOR(Saeiou, LOAD_Lane(inDataAsLanes+20));
+ rounds24;
+ inDataAsLanes += 21;
+ dataByteLen -= 21*8;
+ }
+ copyToState(stateAsLanes);
+ }
+ else {
+ while(dataByteLen >= laneCount*8) {
+ KeccakP1600_AddBytes(state, data, 0, laneCount*8);
+ KeccakP1600_Permute_24rounds(state);
+ data += laneCount*8;
+ dataByteLen -= laneCount*8;
+ }
+ }
+ return originalDataByteLen - dataByteLen;
+}
+
+size_t KeccakP1600_12rounds_FastLoop_Absorb(void *state, unsigned int laneCount, const unsigned char *data, size_t dataByteLen)
+{
+ size_t originalDataByteLen = dataByteLen;
+
+ if (laneCount == 21) {
+ KeccakP_DeclareVars;
+ #if !defined(KeccakP1600_fullUnrolling) && (KeccakP1600_unrolling < 12)
+ unsigned int i;
+ #endif
+ uint64_t *stateAsLanes = (uint64_t*)state;
+ uint64_t *inDataAsLanes = (uint64_t*)data;
+
+ copyFromState(stateAsLanes);
+ while(dataByteLen >= 21*8) {
+ Baeiou = XOR(Baeiou, LOAD_Plane(inDataAsLanes+ 0));
+ Gaeiou = XOR(Gaeiou, LOAD_Plane(inDataAsLanes+ 5));
+ Kaeiou = XOR(Kaeiou, LOAD_Plane(inDataAsLanes+10));
+ Maeiou = XOR(Maeiou, LOAD_Plane(inDataAsLanes+15));
+ Saeiou = XOR(Saeiou, LOAD_Lane(inDataAsLanes+20));
+ rounds12;
+ inDataAsLanes += 21;
+ dataByteLen -= 21*8;
+ }
+ copyToState(stateAsLanes);
+ }
+ else {
+ while(dataByteLen >= laneCount*8) {
+ KeccakP1600_AddBytes(state, data, 0, laneCount*8);
+ KeccakP1600_Permute_12rounds(state);
+ data += laneCount*8;
+ dataByteLen -= laneCount*8;
+ }
+ }
+ return originalDataByteLen - dataByteLen;
+}
+#endif
\ No newline at end of file
diff --git a/bhv/cnative/TurboSHAKE_AVX512/KeccakP-1600-SnP.h b/bhv/cnative/TurboSHAKE_AVX512/KeccakP-1600-SnP.h
new file mode 100644
index 0000000..eaacb10
--- /dev/null
+++ b/bhv/cnative/TurboSHAKE_AVX512/KeccakP-1600-SnP.h
@@ -0,0 +1,47 @@
+/*
+The eXtended Keccak Code Package (XKCP)
+https://github.com/XKCP/XKCP
+
+The Keccak-p permutations, designed by Guido Bertoni, Joan Daemen, Michaël Peeters and Gilles Van Assche.
+
+Implementation by Ronny Van Keer, hereby denoted as "the implementer".
+
+For more information, feedback or questions, please refer to the Keccak Team website:
+https://keccak.team/
+
+To the extent possible under law, the implementer has waived all copyright
+and related or neighboring rights to the source code in this file.
+http://creativecommons.org/publicdomain/zero/1.0/
+
+---
+
+Please refer to SnP-documentation.h for more details.
+*/
+
+#ifndef _KeccakP_1600_SnP_h_
+#define _KeccakP_1600_SnP_h_
+
+#include <stddef.h>
+#include "KeccakP-1600-AVX512-config.h"
+
+#define KeccakP1600_implementation "AVX-512 optimized implementation (" KeccakP1600_implementation_config ")"
+#define KeccakP1600_stateSizeInBytes 200
+#define KeccakP1600_stateAlignment 8
+#define KeccakF1600_FastLoop_supported
+#define KeccakP1600_12rounds_FastLoop_supported
+
+#define KeccakP1600_StaticInitialize()
+void KeccakP1600_Initialize(void *state);
+#define KeccakP1600_AddByte(state, byte, offset) ((unsigned char*)(state))[offset] ^= (byte)
+void KeccakP1600_AddBytes(void *state, const unsigned char *data, unsigned int offset, unsigned int length);
+void KeccakP1600_OverwriteBytes(void *state, const unsigned char *data, unsigned int offset, unsigned int length);
+void KeccakP1600_OverwriteWithZeroes(void *state, unsigned int byteCount);
+void KeccakP1600_Permute_Nrounds(void *state, unsigned int nrounds);
+void KeccakP1600_Permute_12rounds(void *state);
+void KeccakP1600_Permute_24rounds(void *state);
+void KeccakP1600_ExtractBytes(const void *state, unsigned char *data, unsigned int offset, unsigned int length);
+void KeccakP1600_ExtractAndAddBytes(const void *state, const unsigned char *input, unsigned char *output, unsigned int offset, unsigned int length);
+size_t KeccakF1600_FastLoop_Absorb(void *state, unsigned int laneCount, const unsigned char *data, size_t dataByteLen);
+size_t KeccakP1600_12rounds_FastLoop_Absorb(void *state, unsigned int laneCount, const unsigned char *data, size_t dataByteLen);
+
+#endif
diff --git a/bhv/cnative/TurboSHAKEopt/KeccakSponge.cpp b/bhv/cnative/TurboSHAKE_AVX512/KeccakSponge.cpp
similarity index 100%
rename from bhv/cnative/TurboSHAKEopt/KeccakSponge.cpp
rename to bhv/cnative/TurboSHAKE_AVX512/KeccakSponge.cpp
diff --git a/bhv/cnative/TurboSHAKEopt/KeccakSponge.h b/bhv/cnative/TurboSHAKE_AVX512/KeccakSponge.h
similarity index 100%
rename from bhv/cnative/TurboSHAKEopt/KeccakSponge.h
rename to bhv/cnative/TurboSHAKE_AVX512/KeccakSponge.h
diff --git a/bhv/cnative/TurboSHAKEopt/KeccakSponge.inc b/bhv/cnative/TurboSHAKE_AVX512/KeccakSponge.inc
similarity index 100%
rename from bhv/cnative/TurboSHAKEopt/KeccakSponge.inc
rename to bhv/cnative/TurboSHAKE_AVX512/KeccakSponge.inc
diff --git a/bhv/cnative/TurboSHAKEopt/TurboSHAKE.cpp b/bhv/cnative/TurboSHAKE_AVX512/TurboSHAKE.cpp
similarity index 98%
rename from bhv/cnative/TurboSHAKEopt/TurboSHAKE.cpp
rename to bhv/cnative/TurboSHAKE_AVX512/TurboSHAKE.cpp
index 1a76e73..353e4a5 100644
--- a/bhv/cnative/TurboSHAKEopt/TurboSHAKE.cpp
+++ b/bhv/cnative/TurboSHAKE_AVX512/TurboSHAKE.cpp
@@ -13,7 +13,7 @@ To the extent possible under law, the implementer has waived all copyright
and related or neighboring rights to the source code in this file.
http://creativecommons.org/publicdomain/zero/1.0/
*/
-
+#ifdef __AVX512BW__
#include "TurboSHAKE.h"
#ifdef XKCP_has_KeccakP1600
@@ -69,3 +69,4 @@ int TurboSHAKE_Squeeze(TurboSHAKE_Instance *instance, unsigned char *data, size_
{
return TurboSHAKE_SpongeSqueeze(instance, data, dataByteLen);
}
+#endif
\ No newline at end of file
diff --git a/bhv/cnative/TurboSHAKEopt/TurboSHAKE.h b/bhv/cnative/TurboSHAKE_AVX512/TurboSHAKE.h
similarity index 100%
rename from bhv/cnative/TurboSHAKEopt/TurboSHAKE.h
rename to bhv/cnative/TurboSHAKE_AVX512/TurboSHAKE.h
diff --git a/bhv/cnative/TurboSHAKEopt/align.h b/bhv/cnative/TurboSHAKE_AVX512/align.h
similarity index 100%
rename from bhv/cnative/TurboSHAKEopt/align.h
rename to bhv/cnative/TurboSHAKE_AVX512/align.h
diff --git a/bhv/cnative/TurboSHAKEopt/brg_endian.h b/bhv/cnative/TurboSHAKE_AVX512/brg_endian.h
similarity index 100%
rename from bhv/cnative/TurboSHAKEopt/brg_endian.h
rename to bhv/cnative/TurboSHAKE_AVX512/brg_endian.h
diff --git a/bhv/cnative/TurboSHAKEopt/config.h b/bhv/cnative/TurboSHAKE_AVX512/config.h
similarity index 100%
rename from bhv/cnative/TurboSHAKEopt/config.h
rename to bhv/cnative/TurboSHAKE_AVX512/config.h
diff --git a/bhv/cnative/TurboSHAKEopt/KeccakP-1600-64.macros b/bhv/cnative/TurboSHAKE_opt/KeccakP-1600-64.macros
similarity index 100%
rename from bhv/cnative/TurboSHAKEopt/KeccakP-1600-64.macros
rename to bhv/cnative/TurboSHAKE_opt/KeccakP-1600-64.macros
diff --git a/bhv/cnative/TurboSHAKEopt/KeccakP-1600-SnP.h b/bhv/cnative/TurboSHAKE_opt/KeccakP-1600-SnP.h
similarity index 100%
rename from bhv/cnative/TurboSHAKEopt/KeccakP-1600-SnP.h
rename to bhv/cnative/TurboSHAKE_opt/KeccakP-1600-SnP.h
diff --git a/bhv/cnative/TurboSHAKE_opt/KeccakP-1600-opt64-config.h b/bhv/cnative/TurboSHAKE_opt/KeccakP-1600-opt64-config.h
new file mode 100644
index 0000000..085b6c9
--- /dev/null
+++ b/bhv/cnative/TurboSHAKE_opt/KeccakP-1600-opt64-config.h
@@ -0,0 +1,6 @@
+/*
+This file defines some parameters of the implementation in the parent directory.
+*/
+
+#define KeccakP1600_implementation_config "all rounds unrolled"
+#define KeccakP1600_fullUnrolling
diff --git a/bhv/cnative/TurboSHAKEopt/KeccakP-1600-opt64.cpp b/bhv/cnative/TurboSHAKE_opt/KeccakP-1600-opt64.cpp
similarity index 99%
rename from bhv/cnative/TurboSHAKEopt/KeccakP-1600-opt64.cpp
rename to bhv/cnative/TurboSHAKE_opt/KeccakP-1600-opt64.cpp
index 617069e..5e10f73 100644
--- a/bhv/cnative/TurboSHAKEopt/KeccakP-1600-opt64.cpp
+++ b/bhv/cnative/TurboSHAKE_opt/KeccakP-1600-opt64.cpp
@@ -21,7 +21,7 @@ Please refer to SnP-documentation.h for more details.
This implementation comes with KeccakP-1600-SnP.h in the same folder.
Please refer to LowLevel.build for the exact list of other files it must be combined with.
*/
-
+#ifndef __AVX512BW__
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
@@ -563,3 +563,4 @@ size_t KeccakP1600_12rounds_FastLoop_Absorb(void *state, unsigned int laneCount,
copyToState(stateAsLanes, A)
return originalDataByteLen - dataByteLen;
}
+#endif
diff --git a/bhv/cnative/TurboSHAKEopt/KeccakP-1600-unrolling.macros b/bhv/cnative/TurboSHAKE_opt/KeccakP-1600-unrolling.macros
similarity index 100%
rename from bhv/cnative/TurboSHAKEopt/KeccakP-1600-unrolling.macros
rename to bhv/cnative/TurboSHAKE_opt/KeccakP-1600-unrolling.macros
diff --git a/bhv/cnative/TurboSHAKE_opt/KeccakSponge.cpp b/bhv/cnative/TurboSHAKE_opt/KeccakSponge.cpp
new file mode 100644
index 0000000..f940ee3
--- /dev/null
+++ b/bhv/cnative/TurboSHAKE_opt/KeccakSponge.cpp
@@ -0,0 +1,93 @@
+/*
+The eXtended Keccak Code Package (XKCP)
+https://github.com/XKCP/XKCP
+
+Keccak, designed by Guido Bertoni, Joan Daemen, Michaël Peeters and Gilles Van Assche.
+
+Implementation by the designers, hereby denoted as "the implementer".
+
+For more information, feedback or questions, please refer to the Keccak Team website:
+https://keccak.team/
+
+To the extent possible under law, the implementer has waived all copyright
+and related or neighboring rights to the source code in this file.
+http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+#include "KeccakSponge.h"
+
+#ifdef KeccakReference
+ #include "displayIntermediateValues.h"
+#endif
+
+#ifdef XKCP_has_KeccakP200
+ #include "KeccakP-200-SnP.h"
+
+ #define prefix KeccakWidth200
+ #define SnP KeccakP200
+ #define SnP_width 200
+ #define SnP_Permute KeccakP200_Permute_18rounds
+ #if defined(KeccakF200_FastLoop_supported)
+ #define SnP_FastLoop_Absorb KeccakF200_FastLoop_Absorb
+ #endif
+ #include "KeccakSponge.inc"
+ #undef prefix
+ #undef SnP
+ #undef SnP_width
+ #undef SnP_Permute
+ #undef SnP_FastLoop_Absorb
+#endif
+
+#ifdef XKCP_has_KeccakP400
+ #include "KeccakP-400-SnP.h"
+
+ #define prefix KeccakWidth400
+ #define SnP KeccakP400
+ #define SnP_width 400
+ #define SnP_Permute KeccakP400_Permute_20rounds
+ #if defined(KeccakF400_FastLoop_supported)
+ #define SnP_FastLoop_Absorb KeccakF400_FastLoop_Absorb
+ #endif
+ #include "KeccakSponge.inc"
+ #undef prefix
+ #undef SnP
+ #undef SnP_width
+ #undef SnP_Permute
+ #undef SnP_FastLoop_Absorb
+#endif
+
+#ifdef XKCP_has_KeccakP800
+ #include "KeccakP-800-SnP.h"
+
+ #define prefix KeccakWidth800
+ #define SnP KeccakP800
+ #define SnP_width 800
+ #define SnP_Permute KeccakP800_Permute_22rounds
+ #if defined(KeccakF800_FastLoop_supported)
+ #define SnP_FastLoop_Absorb KeccakF800_FastLoop_Absorb
+ #endif
+ #include "KeccakSponge.inc"
+ #undef prefix
+ #undef SnP
+ #undef SnP_width
+ #undef SnP_Permute
+ #undef SnP_FastLoop_Absorb
+#endif
+
+#ifdef XKCP_has_KeccakP1600
+ #include "KeccakP-1600-SnP.h"
+
+ #define prefix KeccakWidth1600
+ #define SnP KeccakP1600
+ #define SnP_width 1600
+ #define SnP_Permute KeccakP1600_Permute_24rounds
+ #if defined(KeccakF1600_FastLoop_supported)
+ #define SnP_FastLoop_Absorb KeccakF1600_FastLoop_Absorb
+ #endif
+ #include "KeccakSponge.inc"
+ #undef prefix
+ #undef SnP
+ #undef SnP_width
+ #undef SnP_Permute
+ #undef SnP_FastLoop_Absorb
+#endif
diff --git a/bhv/cnative/TurboSHAKE_opt/KeccakSponge.h b/bhv/cnative/TurboSHAKE_opt/KeccakSponge.h
new file mode 100644
index 0000000..c2e3563
--- /dev/null
+++ b/bhv/cnative/TurboSHAKE_opt/KeccakSponge.h
@@ -0,0 +1,70 @@
+/*
+The eXtended Keccak Code Package (XKCP)
+https://github.com/XKCP/XKCP
+
+Keccak, designed by Guido Bertoni, Joan Daemen, Michaël Peeters and Gilles Van Assche.
+
+Implementation by the designers, hereby denoted as "the implementer".
+
+For more information, feedback or questions, please refer to the Keccak Team website:
+https://keccak.team/
+
+To the extent possible under law, the implementer has waived all copyright
+and related or neighboring rights to the source code in this file.
+http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+#ifndef _KeccakSponge_h_
+#define _KeccakSponge_h_
+
+/* For the documentation, please follow the link: */
+/* #include "KeccakSponge-documentation.h" */
+
+#include <string.h>
+#include "align.h"
+#include "config.h"
+
+#define XKCP_DeclareSpongeStructure(prefix, size, alignment) \
+ ALIGN(alignment) typedef struct prefix##_SpongeInstanceStruct { \
+ unsigned char state[size]; \
+ unsigned int rate; \
+ unsigned int byteIOIndex; \
+ int squeezing; \
+ } prefix##_SpongeInstance;
+
+#define XKCP_DeclareSpongeFunctions(prefix) \
+ int prefix##_Sponge(unsigned int rate, unsigned int capacity, const unsigned char *input, size_t inputByteLen, unsigned char suffix, unsigned char *output, size_t outputByteLen); \
+ int prefix##_SpongeInitialize(prefix##_SpongeInstance *spongeInstance, unsigned int rate, unsigned int capacity); \
+ int prefix##_SpongeAbsorb(prefix##_SpongeInstance *spongeInstance, const unsigned char *data, size_t dataByteLen); \
+ int prefix##_SpongeAbsorbLastFewBits(prefix##_SpongeInstance *spongeInstance, unsigned char delimitedData); \
+ int prefix##_SpongeSqueeze(prefix##_SpongeInstance *spongeInstance, unsigned char *data, size_t dataByteLen);
+
+#ifdef XKCP_has_KeccakP200
+ #include "KeccakP-200-SnP.h"
+ XKCP_DeclareSpongeStructure(KeccakWidth200, KeccakP200_stateSizeInBytes, KeccakP200_stateAlignment)
+ XKCP_DeclareSpongeFunctions(KeccakWidth200)
+ #define XKCP_has_Sponge_Keccak_width200
+#endif
+
+#ifdef XKCP_has_KeccakP400
+ #include "KeccakP-400-SnP.h"
+ XKCP_DeclareSpongeStructure(KeccakWidth400, KeccakP400_stateSizeInBytes, KeccakP400_stateAlignment)
+ XKCP_DeclareSpongeFunctions(KeccakWidth400)
+ #define XKCP_has_Sponge_Keccak_width400
+#endif
+
+#ifdef XKCP_has_KeccakP800
+ #include "KeccakP-800-SnP.h"
+ XKCP_DeclareSpongeStructure(KeccakWidth800, KeccakP800_stateSizeInBytes, KeccakP800_stateAlignment)
+ XKCP_DeclareSpongeFunctions(KeccakWidth800)
+ #define XKCP_has_Sponge_Keccak_width800
+#endif
+
+#ifdef XKCP_has_KeccakP1600
+ #include "KeccakP-1600-SnP.h"
+ XKCP_DeclareSpongeStructure(KeccakWidth1600, KeccakP1600_stateSizeInBytes, KeccakP1600_stateAlignment)
+ XKCP_DeclareSpongeFunctions(KeccakWidth1600)
+ #define XKCP_has_Sponge_Keccak_width1600
+#endif
+
+#endif
diff --git a/bhv/cnative/TurboSHAKE_opt/KeccakSponge.inc b/bhv/cnative/TurboSHAKE_opt/KeccakSponge.inc
new file mode 100644
index 0000000..923a80d
--- /dev/null
+++ b/bhv/cnative/TurboSHAKE_opt/KeccakSponge.inc
@@ -0,0 +1,316 @@
+/*
+The eXtended Keccak Code Package (XKCP)
+https://github.com/XKCP/XKCP
+
+Keccak, designed by Guido Bertoni, Joan Daemen, Michaël Peeters and Gilles Van Assche.
+
+Implementation by the designers, hereby denoted as "the implementer".
+
+For more information, feedback or questions, please refer to the Keccak Team website:
+https://keccak.team/
+
+To the extent possible under law, the implementer has waived all copyright
+and related or neighboring rights to the source code in this file.
+http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+#define JOIN0(a, b) a ## b
+#define JOIN(a, b) JOIN0(a, b)
+
+#define Sponge JOIN(prefix, _Sponge)
+#define SpongeInstance JOIN(prefix, _SpongeInstance)
+#define SpongeInitialize JOIN(prefix, _SpongeInitialize)
+#define SpongeAbsorb JOIN(prefix, _SpongeAbsorb)
+#define SpongeAbsorbLastFewBits JOIN(prefix, _SpongeAbsorbLastFewBits)
+#define SpongeSqueeze JOIN(prefix, _SpongeSqueeze)
+
+#define SnP_stateSizeInBytes JOIN(SnP, _stateSizeInBytes)
+#define SnP_stateAlignment JOIN(SnP, _stateAlignment)
+#define SnP_StaticInitialize JOIN(SnP, _StaticInitialize)
+#define SnP_Initialize JOIN(SnP, _Initialize)
+#define SnP_AddByte JOIN(SnP, _AddByte)
+#define SnP_AddBytes JOIN(SnP, _AddBytes)
+#define SnP_ExtractBytes JOIN(SnP, _ExtractBytes)
+
+int Sponge(unsigned int rate, unsigned int capacity, const unsigned char *input, size_t inputByteLen, unsigned char suffix, unsigned char *output, size_t outputByteLen)
+{
+ ALIGN(SnP_stateAlignment) unsigned char state[SnP_stateSizeInBytes];
+ unsigned int partialBlock;
+ const unsigned char *curInput = input;
+ unsigned char *curOutput = output;
+ unsigned int rateInBytes = rate/8;
+
+ if (rate+capacity != SnP_width)
+ return 1;
+ if ((rate <= 0) || (rate > SnP_width) || ((rate % 8) != 0))
+ return 1;
+ if (suffix == 0)
+ return 1;
+
+ /* Initialize the state */
+ SnP_StaticInitialize();
+ SnP_Initialize(state);
+
+ /* First, absorb whole blocks */
+#ifdef SnP_FastLoop_Absorb
+ if (((rateInBytes % (SnP_width/200)) == 0) && (inputByteLen >= rateInBytes)) {
+ /* fast lane: whole lane rate */
+ size_t j;
+ j = SnP_FastLoop_Absorb(state, rateInBytes/(SnP_width/200), curInput, inputByteLen);
+ curInput += j;
+ inputByteLen -= j;
+ }
+#endif
+ while(inputByteLen >= (size_t)rateInBytes) {
+ #ifdef KeccakReference
+ displayBytes(1, "Block to be absorbed", curInput, rateInBytes);
+ #endif
+ SnP_AddBytes(state, curInput, 0, rateInBytes);
+ SnP_Permute(state);
+ curInput += rateInBytes;
+ inputByteLen -= rateInBytes;
+ }
+
+ /* Then, absorb what remains */
+ partialBlock = (unsigned int)inputByteLen;
+ #ifdef KeccakReference
+ displayBytes(1, "Block to be absorbed (part)", curInput, partialBlock);
+ #endif
+ SnP_AddBytes(state, curInput, 0, partialBlock);
+
+ /* Finally, absorb the suffix */
+ #ifdef KeccakReference
+ {
+ unsigned char delimitedData1[1];
+ delimitedData1[0] = suffix;
+ displayBytes(1, "Block to be absorbed (last few bits + first bit of padding)", delimitedData1, 1);
+ }
+ #endif
+ /* Last few bits, whose delimiter coincides with first bit of padding */
+ SnP_AddByte(state, suffix, partialBlock);
+ /* If the first bit of padding is at position rate-1, we need a whole new block for the second bit of padding */
+ if ((suffix >= 0x80) && (partialBlock == (rateInBytes-1)))
+ SnP_Permute(state);
+ /* Second bit of padding */
+ SnP_AddByte(state, 0x80, rateInBytes-1);
+ #ifdef KeccakReference
+ {
+ unsigned char block[SnP_width/8];
+ memset(block, 0, SnP_width/8);
+ block[rateInBytes-1] = 0x80;
+ displayBytes(1, "Second bit of padding", block, rateInBytes);
+ }
+ #endif
+ SnP_Permute(state);
+ #ifdef KeccakReference
+ displayText(1, "--- Switching to squeezing phase ---");
+ #endif
+
+ /* First, output whole blocks */
+ while(outputByteLen > (size_t)rateInBytes) {
+ SnP_ExtractBytes(state, curOutput, 0, rateInBytes);
+ SnP_Permute(state);
+ #ifdef KeccakReference
+ displayBytes(1, "Squeezed block", curOutput, rateInBytes);
+ #endif
+ curOutput += rateInBytes;
+ outputByteLen -= rateInBytes;
+ }
+
+ /* Finally, output what remains */
+ partialBlock = (unsigned int)outputByteLen;
+ SnP_ExtractBytes(state, curOutput, 0, partialBlock);
+ #ifdef KeccakReference
+ displayBytes(1, "Squeezed block (part)", curOutput, partialBlock);
+ #endif
+
+ return 0;
+}
+
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+
+int SpongeInitialize(SpongeInstance *instance, unsigned int rate, unsigned int capacity)
+{
+ if (rate+capacity != SnP_width)
+ return 1;
+ if ((rate <= 0) || (rate > SnP_width) || ((rate % 8) != 0))
+ return 1;
+ SnP_StaticInitialize();
+ SnP_Initialize(instance->state);
+ instance->rate = rate;
+ instance->byteIOIndex = 0;
+ instance->squeezing = 0;
+
+ return 0;
+}
+
+/* ---------------------------------------------------------------- */
+
+int SpongeAbsorb(SpongeInstance *instance, const unsigned char *data, size_t dataByteLen)
+{
+ size_t i, j;
+ unsigned int partialBlock;
+ const unsigned char *curData;
+ unsigned int rateInBytes = instance->rate/8;
+
+ if (instance->squeezing)
+ return 1; /* Too late for additional input */
+
+ i = 0;
+ curData = data;
+ while(i < dataByteLen) {
+ if ((instance->byteIOIndex == 0) && (dataByteLen-i >= rateInBytes)) {
+#ifdef SnP_FastLoop_Absorb
+ /* processing full blocks first */
+ if ((rateInBytes % (SnP_width/200)) == 0) {
+ /* fast lane: whole lane rate */
+ j = SnP_FastLoop_Absorb(instance->state, rateInBytes/(SnP_width/200), curData, dataByteLen - i);
+ i += j;
+ curData += j;
+ }
+ else {
+#endif
+ for(j=dataByteLen-i; j>=rateInBytes; j-=rateInBytes) {
+ #ifdef KeccakReference
+ displayBytes(1, "Block to be absorbed", curData, rateInBytes);
+ #endif
+ SnP_AddBytes(instance->state, curData, 0, rateInBytes);
+ SnP_Permute(instance->state);
+ curData+=rateInBytes;
+ }
+ i = dataByteLen - j;
+#ifdef SnP_FastLoop_Absorb
+ }
+#endif
+ }
+ else {
+ /* normal lane: using the message queue */
+ if (dataByteLen-i > rateInBytes-instance->byteIOIndex)
+ partialBlock = rateInBytes-instance->byteIOIndex;
+ else
+ partialBlock = (unsigned int)(dataByteLen - i);
+ #ifdef KeccakReference
+ displayBytes(1, "Block to be absorbed (part)", curData, partialBlock);
+ #endif
+ i += partialBlock;
+
+ SnP_AddBytes(instance->state, curData, instance->byteIOIndex, partialBlock);
+ curData += partialBlock;
+ instance->byteIOIndex += partialBlock;
+ if (instance->byteIOIndex == rateInBytes) {
+ SnP_Permute(instance->state);
+ instance->byteIOIndex = 0;
+ }
+ }
+ }
+ return 0;
+}
+
+/* ---------------------------------------------------------------- */
+
+int SpongeAbsorbLastFewBits(SpongeInstance *instance, unsigned char delimitedData)
+{
+ unsigned int rateInBytes = instance->rate/8;
+
+ if (delimitedData == 0)
+ return 1;
+ if (instance->squeezing)
+ return 1; /* Too late for additional input */
+
+ #ifdef KeccakReference
+ {
+ unsigned char delimitedData1[1];
+ delimitedData1[0] = delimitedData;
+ displayBytes(1, "Block to be absorbed (last few bits + first bit of padding)", delimitedData1, 1);
+ }
+ #endif
+ /* Last few bits, whose delimiter coincides with first bit of padding */
+ SnP_AddByte(instance->state, delimitedData, instance->byteIOIndex);
+ /* If the first bit of padding is at position rate-1, we need a whole new block for the second bit of padding */
+ if ((delimitedData >= 0x80) && (instance->byteIOIndex == (rateInBytes-1)))
+ SnP_Permute(instance->state);
+ /* Second bit of padding */
+ SnP_AddByte(instance->state, 0x80, rateInBytes-1);
+ #ifdef KeccakReference
+ {
+ unsigned char block[SnP_width/8];
+ memset(block, 0, SnP_width/8);
+ block[rateInBytes-1] = 0x80;
+ displayBytes(1, "Second bit of padding", block, rateInBytes);
+ }
+ #endif
+ SnP_Permute(instance->state);
+ instance->byteIOIndex = 0;
+ instance->squeezing = 1;
+ #ifdef KeccakReference
+ displayText(1, "--- Switching to squeezing phase ---");
+ #endif
+ return 0;
+}
+
+/* ---------------------------------------------------------------- */
+
+int SpongeSqueeze(SpongeInstance *instance, unsigned char *data, size_t dataByteLen)
+{
+ size_t i, j;
+ unsigned int partialBlock;
+ unsigned int rateInBytes = instance->rate/8;
+ unsigned char *curData;
+
+ if (!instance->squeezing)
+ SpongeAbsorbLastFewBits(instance, 0x01);
+
+ i = 0;
+ curData = data;
+ while(i < dataByteLen) {
+ if ((instance->byteIOIndex == rateInBytes) && (dataByteLen-i >= rateInBytes)) {
+ for(j=dataByteLen-i; j>=rateInBytes; j-=rateInBytes) {
+ SnP_Permute(instance->state);
+ SnP_ExtractBytes(instance->state, curData, 0, rateInBytes);
+ #ifdef KeccakReference
+ displayBytes(1, "Squeezed block", curData, rateInBytes);
+ #endif
+ curData+=rateInBytes;
+ }
+ i = dataByteLen - j;
+ }
+ else {
+ /* normal lane: using the message queue */
+ if (instance->byteIOIndex == rateInBytes) {
+ SnP_Permute(instance->state);
+ instance->byteIOIndex = 0;
+ }
+ if (dataByteLen-i > rateInBytes-instance->byteIOIndex)
+ partialBlock = rateInBytes-instance->byteIOIndex;
+ else
+ partialBlock = (unsigned int)(dataByteLen - i);
+ i += partialBlock;
+
+ SnP_ExtractBytes(instance->state, curData, instance->byteIOIndex, partialBlock);
+ #ifdef KeccakReference
+ displayBytes(1, "Squeezed block (part)", curData, partialBlock);
+ #endif
+ curData += partialBlock;
+ instance->byteIOIndex += partialBlock;
+ }
+ }
+ return 0;
+}
+
+/* ---------------------------------------------------------------- */
+
+#undef Sponge
+#undef SpongeInstance
+#undef SpongeInitialize
+#undef SpongeAbsorb
+#undef SpongeAbsorbLastFewBits
+#undef SpongeSqueeze
+#undef SnP_stateSizeInBytes
+#undef SnP_stateAlignment
+#undef SnP_StaticInitialize
+#undef SnP_Initialize
+#undef SnP_AddByte
+#undef SnP_AddBytes
+#undef SnP_ExtractBytes
diff --git a/bhv/cnative/TurboSHAKEopt/SnP-Relaned.h b/bhv/cnative/TurboSHAKE_opt/SnP-Relaned.h
similarity index 100%
rename from bhv/cnative/TurboSHAKEopt/SnP-Relaned.h
rename to bhv/cnative/TurboSHAKE_opt/SnP-Relaned.h
diff --git a/bhv/cnative/TurboSHAKE_opt/TurboSHAKE.cpp b/bhv/cnative/TurboSHAKE_opt/TurboSHAKE.cpp
new file mode 100644
index 0000000..00a24bf
--- /dev/null
+++ b/bhv/cnative/TurboSHAKE_opt/TurboSHAKE.cpp
@@ -0,0 +1,72 @@
+/*
+The eXtended Keccak Code Package (XKCP)
+https://github.com/XKCP/XKCP
+
+TurboSHAKE, proposed by Guido Bertoni, Joan Daemen, Seth Hoffert, Michaël Peeters, Gilles Van Assche, Ronny Van Keer and Benoît Viguier.
+
+Implementation by Gilles Van Assche, hereby denoted as "the implementer".
+
+For more information, feedback or questions, please refer to the Keccak Team website:
+https://keccak.team/
+
+To the extent possible under law, the implementer has waived all copyright
+and related or neighboring rights to the source code in this file.
+http://creativecommons.org/publicdomain/zero/1.0/
+*/
+#ifndef __AVX512BW__
+#include "TurboSHAKE.h"
+
+#ifdef XKCP_has_KeccakP1600
+ #include "KeccakP-1600-SnP.h"
+
+ #define prefix TurboSHAKE
+ #define SnP KeccakP1600
+ #define SnP_width 1600
+ #define SnP_Permute KeccakP1600_Permute_12rounds
+ #if defined(KeccakP1600_12rounds_FastLoop_supported)
+ #define SnP_FastLoop_Absorb KeccakP1600_12rounds_FastLoop_Absorb
+ #endif
+ #include "KeccakSponge.inc"
+ #undef prefix
+ #undef SnP
+ #undef SnP_width
+ #undef SnP_Permute
+ #undef SnP_FastLoop_Absorb
+#endif
+
+XKCP_DeclareSpongeFunctions(TurboSHAKE)
+
+int TurboSHAKE(unsigned int capacity, const unsigned char *input, size_t inputByteLen, unsigned char domain, unsigned char *output, size_t outputByteLen)
+{
+ TurboSHAKE_Instance instance;
+
+ if (TurboSHAKE_Initialize(&instance, capacity)) return 1;
+ if (TurboSHAKE_Absorb(&instance, input, inputByteLen)) return 1;
+ if (TurboSHAKE_AbsorbDomainSeparationByte(&instance, domain)) return 1;
+ if (TurboSHAKE_Squeeze(&instance, output, outputByteLen)) return 1;
+ return 0;
+}
+
+int TurboSHAKE_Initialize(TurboSHAKE_Instance *instance, unsigned int capacity)
+{
+ if ((capacity > 512) || ((capacity % 8) != 0))
+ return 1;
+ else
+ return TurboSHAKE_SpongeInitialize(instance, 1600-capacity, capacity);
+}
+
+int TurboSHAKE_Absorb(TurboSHAKE_Instance *instance, const unsigned char *data, size_t dataByteLen)
+{
+ return TurboSHAKE_SpongeAbsorb(instance, data, dataByteLen);
+}
+
+int TurboSHAKE_AbsorbDomainSeparationByte(TurboSHAKE_Instance *instance, unsigned char domain)
+{
+ return TurboSHAKE_SpongeAbsorbLastFewBits(instance, domain);
+}
+
+int TurboSHAKE_Squeeze(TurboSHAKE_Instance *instance, unsigned char *data, size_t dataByteLen)
+{
+ return TurboSHAKE_SpongeSqueeze(instance, data, dataByteLen);
+}
+#endif
diff --git a/bhv/cnative/TurboSHAKE_opt/TurboSHAKE.h b/bhv/cnative/TurboSHAKE_opt/TurboSHAKE.h
new file mode 100644
index 0000000..8bdd94e
--- /dev/null
+++ b/bhv/cnative/TurboSHAKE_opt/TurboSHAKE.h
@@ -0,0 +1,49 @@
+/*
+The eXtended Keccak Code Package (XKCP)
+https://github.com/XKCP/XKCP
+
+TurboSHAKE, proposed by Guido Bertoni, Joan Daemen, Seth Hoffert, Michaël Peeters, Gilles Van Assche, Ronny Van Keer and Benoît Viguier.
+
+Implementation by Gilles Van Assche, hereby denoted as "the implementer".
+
+For more information, feedback or questions, please refer to the Keccak Team website:
+https://keccak.team/
+
+To the extent possible under law, the implementer has waived all copyright
+and related or neighboring rights to the source code in this file.
+http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+#ifndef _TurboSHAKE_h_
+#define _TurboSHAKE_h_
+
+#include <string.h>
+#include "align.h"
+#include "config.h"
+#include "KeccakSponge.h"
+
+#ifdef XKCP_has_KeccakP1600
+ #include "KeccakP-1600-SnP.h"
+ XKCP_DeclareSpongeStructure(TurboSHAKE, KeccakP1600_stateSizeInBytes, KeccakP1600_stateAlignment)
+
+typedef TurboSHAKE_SpongeInstance TurboSHAKE_Instance;
+
+int TurboSHAKE(unsigned int capacity, const unsigned char *input, size_t inputByteLen, unsigned char domain, unsigned char *output, size_t outputByteLen);
+
+int TurboSHAKE_Initialize(TurboSHAKE_Instance *instance, unsigned int capacity);
+
+#define TurboSHAKE128_Initialize(instance) \
+ TurboSHAKE_Initialize((instance), 256)
+
+#define TurboSHAKE256_Initialize(instance) \
+ TurboSHAKE_Initialize((instance), 512)
+
+int TurboSHAKE_Absorb(TurboSHAKE_Instance *instance, const unsigned char *data, size_t dataByteLen);
+
+int TurboSHAKE_AbsorbDomainSeparationByte(TurboSHAKE_Instance *instance, unsigned char domain);
+
+int TurboSHAKE_Squeeze(TurboSHAKE_Instance *instance, unsigned char *data, size_t dataByteLen);
+
+#endif
+
+#endif
diff --git a/bhv/cnative/TurboSHAKE_opt/align.h b/bhv/cnative/TurboSHAKE_opt/align.h
new file mode 100644
index 0000000..82ad2f9
--- /dev/null
+++ b/bhv/cnative/TurboSHAKE_opt/align.h
@@ -0,0 +1,33 @@
+/*
+The eXtended Keccak Code Package (XKCP)
+https://github.com/XKCP/XKCP
+
+Implementation by Gilles Van Assche and Ronny Van Keer, hereby denoted as "the implementer".
+
+For more information, feedback or questions, please refer to the Keccak Team website:
+https://keccak.team/
+
+To the extent possible under law, the implementer has waived all copyright
+and related or neighboring rights to the source code in this file.
+http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+#ifndef _align_h_
+#define _align_h_
+
+/* on Mac OS-X and possibly others, ALIGN(x) is defined in param.h, and -Werror chokes on the redef. */
+#ifdef ALIGN
+#undef ALIGN
+#endif
+
+#if defined(__GNUC__)
+#define ALIGN(x) __attribute__ ((aligned(x)))
+#elif defined(_MSC_VER)
+#define ALIGN(x) __declspec(align(x))
+#elif defined(__ARMCC_VERSION)
+#define ALIGN(x) __align(x)
+#else
+#define ALIGN(x)
+#endif
+
+#endif
diff --git a/bhv/cnative/TurboSHAKE_opt/brg_endian.h b/bhv/cnative/TurboSHAKE_opt/brg_endian.h
new file mode 100644
index 0000000..7c640b9
--- /dev/null
+++ b/bhv/cnative/TurboSHAKE_opt/brg_endian.h
@@ -0,0 +1,143 @@
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 1998-2008, Brian Gladman, Worcester, UK. All rights reserved.
+
+ LICENSE TERMS
+
+ The redistribution and use of this software (with or without changes)
+ is allowed without the payment of fees or royalties provided that:
+
+ 1. source code distributions include the above copyright notice, this
+ list of conditions and the following disclaimer;
+
+ 2. binary distributions include the above copyright notice, this list
+ of conditions and the following disclaimer in their documentation;
+
+ 3. the name of the copyright holder is not used to endorse products
+ built using this software without specific written permission.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue Date: 20/12/2007
+ Changes for ARM 9/9/2010
+*/
+
+#ifndef _BRG_ENDIAN_H
+#define _BRG_ENDIAN_H
+
+#define IS_BIG_ENDIAN 4321 /* byte 0 is most significant (mc68k) */
+#define IS_LITTLE_ENDIAN 1234 /* byte 0 is least significant (i386) */
+
+#if 0
+/* Include files where endian defines and byteswap functions may reside */
+#if defined( __sun )
+# include <sys/isa_defs.h>
+#elif defined( __FreeBSD__ ) || defined( __OpenBSD__ ) || defined( __NetBSD__ )
+# include <sys/endian.h>
+#elif defined( BSD ) && ( BSD >= 199103 ) || defined( __APPLE__ ) || \
+ defined( __CYGWIN32__ ) || defined( __DJGPP__ ) || defined( __osf__ )
+# include <machine/endian.h>
+#elif defined( __linux__ ) || defined( __GNUC__ ) || defined( __GNU_LIBRARY__ )
+# if !defined( __MINGW32__ ) && !defined( _AIX )
+# include <endian.h>
+# if !defined( __BEOS__ )
+# include <byteswap.h>
+# endif
+# endif
+#endif
+#endif
+
+/* Now attempt to set the define for platform byte order using any */
+/* of the four forms SYMBOL, _SYMBOL, __SYMBOL & __SYMBOL__, which */
+/* seem to encompass most endian symbol definitions */
+
+#if defined( BIG_ENDIAN ) && defined( LITTLE_ENDIAN )
+# if defined( BYTE_ORDER ) && BYTE_ORDER == BIG_ENDIAN
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+# elif defined( BYTE_ORDER ) && BYTE_ORDER == LITTLE_ENDIAN
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+# endif
+#elif defined( BIG_ENDIAN )
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+#elif defined( LITTLE_ENDIAN )
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+#endif
+
+#if defined( _BIG_ENDIAN ) && defined( _LITTLE_ENDIAN )
+# if defined( _BYTE_ORDER ) && _BYTE_ORDER == _BIG_ENDIAN
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+# elif defined( _BYTE_ORDER ) && _BYTE_ORDER == _LITTLE_ENDIAN
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+# endif
+#elif defined( _BIG_ENDIAN )
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+#elif defined( _LITTLE_ENDIAN )
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+#endif
+
+#if defined( __BIG_ENDIAN ) && defined( __LITTLE_ENDIAN )
+# if defined( __BYTE_ORDER ) && __BYTE_ORDER == __BIG_ENDIAN
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+# elif defined( __BYTE_ORDER ) && __BYTE_ORDER == __LITTLE_ENDIAN
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+# endif
+#elif defined( __BIG_ENDIAN )
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+#elif defined( __LITTLE_ENDIAN )
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+#endif
+
+#if defined( __BIG_ENDIAN__ ) && defined( __LITTLE_ENDIAN__ )
+# if defined( __BYTE_ORDER__ ) && __BYTE_ORDER__ == __BIG_ENDIAN__
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+# elif defined( __BYTE_ORDER__ ) && __BYTE_ORDER__ == __LITTLE_ENDIAN__
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+# endif
+#elif defined( __BIG_ENDIAN__ )
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+#elif defined( __LITTLE_ENDIAN__ )
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+#endif
+
+/* if the platform byte order could not be determined, then try to */
+/* set this define using common machine defines */
+#if !defined(PLATFORM_BYTE_ORDER)
+
+#if defined( __alpha__ ) || defined( __alpha ) || defined( i386 ) || \
+ defined( __i386__ ) || defined( _M_I86 ) || defined( _M_IX86 ) || \
+ defined( __OS2__ ) || defined( sun386 ) || defined( __TURBOC__ ) || \
+ defined( vax ) || defined( vms ) || defined( VMS ) || \
+ defined( __VMS ) || defined( _M_X64 )
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+
+#elif defined( AMIGA ) || defined( applec ) || defined( __AS400__ ) || \
+ defined( _CRAY ) || defined( __hppa ) || defined( __hp9000 ) || \
+ defined( ibm370 ) || defined( mc68000 ) || defined( m68k ) || \
+ defined( __MRC__ ) || defined( __MVS__ ) || defined( __MWERKS__ ) || \
+ defined( sparc ) || defined( __sparc) || defined( SYMANTEC_C ) || \
+ defined( __VOS__ ) || defined( __TIGCC__ ) || defined( __TANDEM ) || \
+ defined( THINK_C ) || defined( __VMCMS__ ) || defined( _AIX ) || \
+ defined( __s390__ ) || defined( __s390x__ ) || defined( __zarch__ )
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+
+#elif defined(__arm__)
+# ifdef __BIG_ENDIAN
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+# else
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+# endif
+#elif 1 /* **** EDIT HERE IF NECESSARY **** */
+# define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
+#elif 0 /* **** EDIT HERE IF NECESSARY **** */
+# define PLATFORM_BYTE_ORDER IS_BIG_ENDIAN
+#else
+# error Please edit lines 132 or 134 in brg_endian.h to set the platform byte order
+#endif
+
+#endif
+
+#endif
diff --git a/bhv/cnative/TurboSHAKE_opt/config.h b/bhv/cnative/TurboSHAKE_opt/config.h
new file mode 100644
index 0000000..586c4c0
--- /dev/null
+++ b/bhv/cnative/TurboSHAKE_opt/config.h
@@ -0,0 +1,5 @@
+/* File generated by ToTargetConfigFile.xsl */
+
+#define XKCP_has_Sponge_Keccak
+#define XKCP_has_TurboSHAKE
+#define XKCP_has_KeccakP1600
diff --git a/bhv/cnative/benchmark.cpp b/bhv/cnative/benchmark.cpp
new file mode 100644
index 0000000..df1291b
--- /dev/null
+++ b/bhv/cnative/benchmark.cpp
@@ -0,0 +1,943 @@
+#include <iostream>
+#include <chrono>
+#include <functional>
+
+#include "core.h"
+
+using namespace std;
+
+#define DO_VALIDATION true
+
+#define MAJ_INPUT_HYPERVECTOR_COUNT 1000001
+#define INPUT_HYPERVECTOR_COUNT 1000
+
+#define THRESHOLD
+#define MAJ
+#define RAND
+#define RAND2
+#define RANDOM
+#define PERMUTE
+#define ACTIVE
+#define HAMMING
+#define INVERT
+#define SWAP_HALVES
+#define REHASH
+#define AND
+#define OR
+#define XOR
+#define SELECT
+#define MAJ3
+#define TERNARY
+
+
+float threshold_benchmark(size_t n, size_t threshold, float af, bool display, bool keep_in_cache) {
+ //For the simple cases, like 3 vectors, we want a lot of tests to get a reliable number
+ //but allocating 2,000 vectors * 10,000 tests starts to exceed resident memory and we end
+ //up paying disk swap penalties. Therefore we do fewer tests in the case with more hypervectors
+ const size_t test_count = MAJ_INPUT_HYPERVECTOR_COUNT / n;
+ const size_t input_output_count = (keep_in_cache ? 1 : test_count);
+
+ //Init n random vectors for each test
+ word_t ***inputs = (word_t***)malloc(sizeof(word_t**) * input_output_count);
+ for (size_t i = 0; i < input_output_count; i++) {
+ word_t **rs = (word_t **) malloc(sizeof(word_t **) * n);
+ for (size_t j = 0; j < n; ++j) {
+ rs[j] = bhv::random(af);
+ }
+ inputs[i] = rs;
+ }
+
+ //Allocate a buffer for TEST_COUNT results
+ word_t *result_buffer = (word_t *) malloc(input_output_count * BYTES);
+
+ // Gotta assign the result to a volatile, so the test operation doesn't get optimized away
+ volatile word_t something = 0;
+ volatile word_t something_else = 0;
+
+ auto t1 = chrono::high_resolution_clock::now();
+ for (size_t i = 0; i < test_count; i++) {
+ const size_t io_buf_idx = (keep_in_cache ? 0 : i);
+
+ word_t *m = result_buffer + (io_buf_idx * BYTES / sizeof(word_t));
+ word_t **rs = inputs[io_buf_idx];
+
+ bhv::threshold_into(rs, n, threshold, m);
+
+ // So the test operation doesn't get optimized away
+ something = something ^ m[0] + 3 * m[4] + 5 * m[WORDS / 2] + 7 * m[WORDS - 1];
+ }
+ auto t2 = chrono::high_resolution_clock::now();
+
+ const char* validation_status;
+ if (DO_VALIDATION) {
+ for (size_t i = 0; i < test_count; i++) {
+ const size_t io_buf_idx = (keep_in_cache ? 0 : i);
+
+ word_t *m = result_buffer + (io_buf_idx * BYTES / sizeof(word_t));
+ word_t **rs = inputs[io_buf_idx];
+
+ bhv::threshold_into_reference(rs, n, threshold, m);
+
+ something_else = something_else ^ m[0] + 3 * m[4] + 5 * m[WORDS / 2] + 7 * m[WORDS - 1];
+ }
+ validation_status = ((something == something_else) ? "equiv: √, " : "equiv: X, ");
+ } else {
+ validation_status = "";
+ }
+
+ float mean_test_time = (float) chrono::duration_cast<chrono::nanoseconds>(t2 - t1).count() / (float) test_count;
+ if (display)
+ cout << n << " hypervectors, " << threshold << " threshold, " << validation_status << "in_cache: " << keep_in_cache
+ << ", total: " << mean_test_time / 1000.0 << "µs, normalized: " << mean_test_time / (float) n << "ns/vec" << endl;
+
+ //Clean up our mess
+ for (size_t i = 0; i < input_output_count; i++) {
+ word_t **rs = inputs[i];
+ for (size_t j = 0; j < n; ++j) {
+ free(rs[j]);
+ }
+ free(rs);
+ }
+ free(result_buffer);
+ free(inputs);
+
+ return mean_test_time;
+}
+
+float majority_benchmark(size_t n, bool display, bool keep_in_cache) {
+ //For the simple cases, like 3 vectors, we want a lot of tests to get a reliable number
+ //but allocating 2,000 vectors * 10,000 tests starts to exceed resident memory and we end
+ //up paying disk swap penalties. Therefore we do fewer tests in the case with more hypervectors
+ const size_t test_count = MAJ_INPUT_HYPERVECTOR_COUNT / n;
+ const size_t input_output_count = (keep_in_cache ? 1 : test_count);
+
+ //Init n random vectors for each test
+ word_t ***inputs = (word_t***)malloc(sizeof(word_t**) * input_output_count);
+ for (size_t i = 0; i < input_output_count; i++) {
+ word_t **rs = (word_t **) malloc(sizeof(word_t **) * n);
+ for (size_t j = 0; j < n; ++j) {
+ rs[j] = bhv::rand();
+ }
+ inputs[i] = rs;
+ }
+
+ //Allocate a buffer for TEST_COUNT results
+ word_t *result_buffer = (word_t *) malloc(input_output_count * BYTES);
+
+ // Gotta assign the result to a volatile, so the test operation doesn't get optimized away
+ volatile word_t something = 0;
+ volatile word_t something_else = 0;
+
+ auto t1 = chrono::high_resolution_clock::now();
+ for (size_t i = 0; i < test_count; i++) {
+ const size_t io_buf_idx = (keep_in_cache ? 0 : i);
+
+ word_t *m = result_buffer + (io_buf_idx * BYTES / sizeof(word_t));
+ word_t **rs = inputs[io_buf_idx];
+
+ bhv::true_majority_into(rs, n, m);
+
+ // So the test operation doesn't get optimized away
+ something = something ^ m[0] + 3 * m[4] + 5 * m[WORDS / 2] + 7 * m[WORDS - 1];
+ }
+ auto t2 = chrono::high_resolution_clock::now();
+
+ const char* validation_status;
+ if (DO_VALIDATION) {
+ for (size_t i = 0; i < test_count; i++) {
+ const size_t io_buf_idx = (keep_in_cache ? 0 : i);
+
+ word_t *m = result_buffer + (io_buf_idx * BYTES / sizeof(word_t));
+ word_t **rs = inputs[io_buf_idx];
+
+ bhv::threshold_into_reference(rs, n, n/2, m);
+
+ something_else = something_else ^ m[0] + 3 * m[4] + 5 * m[WORDS / 2] + 7 * m[WORDS - 1];
+ }
+ validation_status = ((something == something_else) ? "equiv: √, " : "equiv: X, ");
+ } else {
+ validation_status = "";
+ }
+
+ float mean_test_time = (float) chrono::duration_cast<chrono::nanoseconds>(t2 - t1).count() / (float) test_count;
+ if (display)
+ cout << n << " hypervectors, " << validation_status << "in_cache: " << keep_in_cache << ", total: " << mean_test_time / 1000.0
+ << "µs, normalized: " << mean_test_time / (float) n << "ns/vec" << endl;
+
+ //Clean up our mess
+ for (size_t i = 0; i < input_output_count; i++) {
+ word_t **rs = inputs[i];
+ for (size_t j = 0; j < n; ++j) {
+ free(rs[j]);
+ }
+ free(rs);
+ }
+ free(result_buffer);
+ free(inputs);
+
+ return mean_test_time;
+}
+
+
+float rand_benchmark(bool display, bool keep_in_cache) {
+ const int test_count = INPUT_HYPERVECTOR_COUNT;
+ const int input_output_count = (keep_in_cache ? 1 : test_count);
+
+ //Allocate a buffer for TEST_COUNT results
+ word_t *result_buffer = (word_t *) malloc(input_output_count * BYTES);
+
+ // Gotta assign the result to a volatile, so the test operation doesn't get optimized away
+ volatile word_t something = 0;
+
+ auto t1 = chrono::high_resolution_clock::now();
+ for (size_t i = 0; i < test_count; ++i) {
+ const size_t io_buf_idx = (keep_in_cache ? 0 : i);
+
+ word_t *m = result_buffer + (io_buf_idx * BYTES / sizeof(word_t));
+
+ bhv::rand_into_reference(m);
+
+ something = something ^ m[0] + 3 * m[4] + 5 * m[WORDS / 2] + 7 * m[WORDS - 1];
+ }
+ auto t2 = chrono::high_resolution_clock::now();
+
+ float mean_test_time = (float) chrono::duration_cast<chrono::nanoseconds>(t2 - t1).count() / (float) test_count;
+ if (display)
+ cout << "in_cache: " << keep_in_cache << ", total: " << mean_test_time / 1000.0 << "µs" << endl;
+
+ //Clean up our mess
+ free(result_buffer);
+
+ return mean_test_time;
+}
+
+float rand2_benchmark(bool display, bool keep_in_cache, int pow) {
+ const int test_count = INPUT_HYPERVECTOR_COUNT;
+ const int input_output_count = (keep_in_cache ? 1 : test_count);
+ volatile int p = pow;
+
+ //Allocate a buffer for TEST_COUNT results
+ word_t *result_buffer = (word_t *) malloc(input_output_count * BYTES);
+
+ double observed_frac[test_count];
+
+ auto t1 = chrono::high_resolution_clock::now();
+ for (size_t i = 0; i < test_count; ++i) {
+ const size_t io_buf_idx = (keep_in_cache ? 0 : i);
+
+ word_t *m = result_buffer + (io_buf_idx * BYTES / sizeof(word_t));
+
+ bhv::rand2_into(m, p);
+
+ // once runtime of random_into drops under 500ns, consider removing this
+ observed_frac[i] = (double) bhv::active(m) / (double) BITS;
+ }
+ auto t2 = chrono::high_resolution_clock::now();
+
+ float mean_test_time = (float) chrono::duration_cast<chrono::nanoseconds>(t2 - t1).count() / (float) test_count;
+ std::sort(observed_frac, observed_frac + test_count);
+ double mean_observed =
+ std::reduce(observed_frac, observed_frac + test_count, 0., std::plus<double>()) / (double) test_count;
+ mean_observed = mean_observed > .5 ? 1 + std::log2(1 - mean_observed) : -1 - std::log2(mean_observed);
+ if (display)
+ cout << pow << "pow, observed: " << mean_observed << ", in_cache: " << keep_in_cache << ", total: "
+ << mean_test_time / 1000.0 << "µs" << endl;
+
+ //Clean up our mess
+ free(result_buffer);
+
+ return mean_test_time;
+}
+
+
+float random_benchmark(bool display, bool keep_in_cache, float base_frac, bool randomize = false) {
+ const int test_count = INPUT_HYPERVECTOR_COUNT;
+ const int input_output_count = (keep_in_cache ? 1 : test_count);
+ volatile double n = randomize ? ((double) (rand() - RAND_MAX / 2) / (double) (RAND_MAX)) / 1000. : 0;
+ float p = base_frac + n;
+
+ //Allocate a buffer for TEST_COUNT results
+ word_t *result_buffer = (word_t *) malloc(input_output_count * BYTES);
+
+ double observed_frac[test_count];
+
+ auto t1 = chrono::high_resolution_clock::now();
+ for (size_t i = 0; i < test_count; ++i) {
+ const size_t io_buf_idx = (keep_in_cache ? 0 : i);
+
+ word_t *m = result_buffer + (io_buf_idx * BYTES / sizeof(word_t));
+
+// bhv::random_into_reference(m, p);
+ bhv::random_into_tree_sparse_avx2(m, p);
+// bhv::random_into_buffer_avx2(m, p);
+
+ // once runtime of random_into drops under 500ns, consider removing this
+ observed_frac[i] = (double) bhv::active(m) / (double) BITS;
+ }
+ auto t2 = chrono::high_resolution_clock::now();
+
+ float mean_test_time = (float) chrono::duration_cast<chrono::nanoseconds>(t2 - t1).count() / (float) test_count;
+ std::sort(observed_frac, observed_frac + test_count);
+ double mean_observed_frac =
+ std::reduce(observed_frac, observed_frac + test_count, 0., std::plus<double>()) / (double) test_count - n;
+ if (display)
+ cout << base_frac << "frac, observed: " << abs(base_frac - mean_observed_frac) << ", in_cache: "
+ << keep_in_cache << ", total: " << mean_test_time / 1000.0 << "µs" << endl;
+
+ //Clean up our mess
+ free(result_buffer);
+
+ return mean_test_time;
+}
+
+template<bool keep_in_cache, int different_permutations>
+float permute_benchmark(bool display) {
+ const int test_count = INPUT_HYPERVECTOR_COUNT * different_permutations * 2;
+
+ bool correct = true;
+ double total_test_time = 0.;
+ int perms [different_permutations];
+ for (size_t i = 0; i < different_permutations; ++i)
+ perms[i] = std::abs(std::rand());
+
+ if constexpr (keep_in_cache) {
+ word_t *forward [INPUT_HYPERVECTOR_COUNT][different_permutations + 1];
+ word_t *backward [INPUT_HYPERVECTOR_COUNT][different_permutations + 1];
+ // different_permutations=3
+ // forward: R p0(R) p1(p0(R)) p2(p1(p0(R)))
+ // | hopefully equal set equal |
+ // backward: p-0(p-1(p-2(p2(p1(p0(R)))))) p-1(p-2(p2(p1(p0(R))))) p-2(p2(p1(p0(R)))) p2(p1(p0(R)))
+
+ auto t1 = chrono::high_resolution_clock::now();
+
+ for (size_t i = 0; i < INPUT_HYPERVECTOR_COUNT; ++i) {
+ forward[i][0] = bhv::rand(); // TODO this and the eq should be outside of the timing
+
+ for (size_t j = 0; j < different_permutations; ++j)
+ forward[i][j + 1] = bhv::permute(forward[i][j], perms[j]);
+
+ backward[i][different_permutations] = forward[i][different_permutations];
+
+ for (size_t j = different_permutations; j > 0; --j)
+ backward[i][j - 1] = bhv::permute(backward[i][j], -perms[j - 1]);
+
+ correct &= bhv::eq(forward[i][0], backward[i][0]);
+ }
+
+ auto t2 = chrono::high_resolution_clock::now();
+ total_test_time = (double) chrono::duration_cast<chrono::nanoseconds>(t2 - t1).count();
+
+ for (size_t i = 0; i < INPUT_HYPERVECTOR_COUNT; ++i) {
+ for (size_t j = 0; j < different_permutations; ++j) free(forward[i][j]);
+ for (size_t j = 0; j < different_permutations - 1; ++j) free(backward[i][j]); // -1, don't double free
+ }
+ } else {
+// word_t *running = bhv::rand();
+ assert(false); // TODO
+ }
+
+ double mean_test_time = total_test_time / (double) test_count;
+ if (display)
+ cout << "correctly inverted: " << (correct ? "v" : "x") << ", in_cache: "
+ << keep_in_cache << ", total: " << mean_test_time / 1000.0 << "µs" << endl;
+
+ return mean_test_time;
+}
+
+float active_benchmark(bool display) {
+ const int test_count = INPUT_HYPERVECTOR_COUNT;
+
+ word_t *hvs [test_count];
+
+ for (size_t i = 0; i < test_count; ++i) {
+ hvs[i] = bhv::random((float)i/(float)test_count);
+ }
+
+ uint32_t observed_active[test_count];
+
+ auto t1 = chrono::high_resolution_clock::now();
+ for (size_t i = 0; i < test_count; ++i) {
+ word_t *m = hvs[i];
+ observed_active[i] = bhv::active(m);
+ // observed_active[i] = bhv::active_avx512(m);
+ // observed_active[i] = bhv::active_adder_avx2(m);
+ // observed_active[i] = bhv::active_reference(m);
+ }
+ auto t2 = chrono::high_resolution_clock::now();
+
+ bool correct = true;
+ for (size_t i = 0; i < test_count; ++i) {
+ correct &= observed_active[i] == bhv::active_reference(hvs[i]);
+ }
+
+ float mean_test_time = (float) chrono::duration_cast<chrono::nanoseconds>(t2 - t1).count() / (float) test_count;
+ if (display)
+ cout << "correct " << (correct ? "v" : "x") << ", total: " << mean_test_time / 1000.0 << "µs" << endl;
+
+ return mean_test_time;
+}
+
+float hamming_benchmark(bool display) {
+ const int test_count = INPUT_HYPERVECTOR_COUNT/2;
+
+ word_t *as [test_count];
+ word_t *bs [test_count];
+
+ for (size_t i = 0; i < test_count; ++i) {
+ as[i] = bhv::random((float)i/(float)test_count);
+ bs[i] = bhv::random((float)(test_count - i)/(float)test_count);
+ }
+
+ uint32_t observed_distance[test_count];
+
+ auto t1 = chrono::high_resolution_clock::now();
+ for (size_t i = 0; i < test_count; ++i) {
+ observed_distance[i] = bhv::hamming(as[i], bs[i]);
+ // observed_distance[i] = bhv::hamming_reference(as[i], bs[i]);
+ // observed_distance[i] = bhv::hamming_adder_avx2(as[i], bs[i]);
+ // observed_distance[i] = bhv::hamming_avx512(as[i], bs[i]);
+ }
+ auto t2 = chrono::high_resolution_clock::now();
+
+ bool correct = true;
+ for (size_t i = 0; i < test_count; ++i) {
+ correct &= observed_distance[i] == bhv::hamming_reference(as[i], bs[i]);
+ }
+
+ float mean_test_time = (float) chrono::duration_cast<chrono::nanoseconds>(t2 - t1).count() / (float) test_count;
+ if (display)
+ cout << "correct " << (correct ? "v" : "x") << ", total: " << mean_test_time / 1000.0 << "µs" << endl;
+
+ return mean_test_time;
+}
+
+template <void F(word_t*, word_t*), void FC(word_t*, word_t*)>
+float unary_benchmark(bool display, bool keep_in_cache) {
+ const int test_count = INPUT_HYPERVECTOR_COUNT;
+ const int input_output_count = (keep_in_cache ? 1 : test_count);
+
+ word_t *hvs [test_count];
+
+ word_t *result_buffer = (word_t *) malloc(input_output_count * BYTES);
+
+ for (size_t i = 0; i < test_count; ++i)
+ hvs[i] = bhv::random((float)i/(float)test_count);
+
+ volatile word_t something = 0;
+ volatile word_t something_else = 0;
+
+ auto t1 = chrono::high_resolution_clock::now();
+ for (size_t i = 0; i < test_count; ++i) {
+ const size_t io_buf_idx = (keep_in_cache ? 0 : i);
+
+ word_t *m = result_buffer + (io_buf_idx * BYTES / sizeof(word_t));
+
+ F(hvs[i], m);
+
+ something = something ^ m[0] + 3 * m[4] + 5 * m[WORDS / 2] + 7 * m[WORDS - 1];
+ }
+ auto t2 = chrono::high_resolution_clock::now();
+
+ for (size_t i = 0; i < test_count; ++i) {
+ word_t *m = bhv::empty();
+
+ FC(hvs[i], m);
+
+ something_else = something_else ^ m[0] + 3 * m[4] + 5 * m[WORDS / 2] + 7 * m[WORDS - 1];
+ }
+
+ float mean_test_time = (float) chrono::duration_cast<chrono::nanoseconds>(t2 - t1).count() / (float) test_count;
+ if (display)
+ cout << "equiv " << ((something == something_else) ? "v" : "x") << ", total: " << mean_test_time / 1000.0 << "µs" << endl;
+
+ return mean_test_time;
+}
+
+template <void F(word_t*, word_t*, word_t*), void FC(word_t*, word_t*, word_t*)>
+float binary_benchmark(bool display, bool keep_in_cache) {
+ const int test_count = INPUT_HYPERVECTOR_COUNT;
+ const int input_output_count = (keep_in_cache ? 1 : test_count);
+
+ word_t *hvs0 [test_count];
+ word_t *hvs1 [test_count];
+
+ word_t *result_buffer = (word_t *) malloc(input_output_count * BYTES);
+
+ for (size_t i = 0; i < test_count; ++i)
+ hvs0[i] = bhv::random((float)i/(float)test_count);
+
+ memcpy(hvs1, hvs0, test_count * sizeof(word_t *));
+ std::shuffle(hvs1, hvs1 + test_count, bhv::rng);
+
+ volatile word_t something = 0;
+ volatile word_t something_else = 0;
+
+ auto t1 = chrono::high_resolution_clock::now();
+ for (size_t i = 0; i < test_count; ++i) {
+ const size_t io_buf_idx = (keep_in_cache ? 0 : i);
+
+ word_t *m = result_buffer + (io_buf_idx * BYTES / sizeof(word_t));
+
+ F(hvs0[i], hvs1[i], m);
+
+ something = something ^ m[0] + 3 * m[4] + 5 * m[WORDS / 2] + 7 * m[WORDS - 1];
+ }
+ auto t2 = chrono::high_resolution_clock::now();
+
+ for (size_t i = 0; i < test_count; ++i) {
+ word_t *m = bhv::empty();
+
+ FC(hvs0[i], hvs1[i], m);
+
+ something_else = something_else ^ m[0] + 3 * m[4] + 5 * m[WORDS / 2] + 7 * m[WORDS - 1];
+ }
+
+ float mean_test_time = (float) chrono::duration_cast<chrono::nanoseconds>(t2 - t1).count() / (float) test_count;
+ if (display)
+ cout << "equiv " << ((something == something_else) ? "v" : "x") << ", total: " << mean_test_time / 1000.0 << "µs" << endl;
+
+ return mean_test_time;
+}
+
+typedef void(*TernaryFunc)(word_t*, word_t*, word_t*, word_t*);
+
+template <TernaryFunc F, TernaryFunc FC>
+float ternary_benchmark(bool display, bool keep_in_cache) {
+ const int test_count = INPUT_HYPERVECTOR_COUNT;
+ const int input_output_count = (keep_in_cache ? 1 : test_count);
+
+ word_t *hvs0 [test_count];
+ word_t *hvs1 [test_count];
+ word_t *hvs2 [test_count];
+
+ word_t *result_buffer = (word_t *) malloc(input_output_count * BYTES);
+
+ for (size_t i = 0; i < test_count; ++i)
+ hvs0[i] = bhv::random((float)i/(float)test_count);
+
+ memcpy(hvs1, hvs0, test_count * sizeof(word_t *));
+ std::shuffle(hvs1, hvs1 + test_count, bhv::rng);
+ memcpy(hvs2, hvs0, test_count * sizeof(word_t *));
+ std::shuffle(hvs2, hvs2 + test_count, bhv::rng);
+
+ volatile word_t something = 0;
+ volatile word_t something_else = 0;
+
+ auto t1 = chrono::high_resolution_clock::now();
+ for (size_t i = 0; i < test_count; ++i) {
+ const size_t io_buf_idx = (keep_in_cache ? 0 : i);
+
+ word_t *m = result_buffer + (io_buf_idx * BYTES / sizeof(word_t));
+
+ F(hvs0[i], hvs1[i], hvs2[i], m);
+
+ something = something ^ m[0] + 3 * m[4] + 5 * m[WORDS / 2] + 7 * m[WORDS - 1];
+ }
+ auto t2 = chrono::high_resolution_clock::now();
+
+ for (size_t i = 0; i < test_count; ++i) {
+ word_t *m = bhv::empty();
+
+ FC(hvs0[i], hvs1[i], hvs2[i], m);
+
+ something_else = something_else ^ m[0] + 3 * m[4] + 5 * m[WORDS / 2] + 7 * m[WORDS - 1];
+ }
+
+ float mean_test_time = (float) chrono::duration_cast<chrono::nanoseconds>(t2 - t1).count() / (float) test_count;
+ if (display)
+ cout << "equiv " << ((something == something_else) ? "v" : "x") << ", total: " << mean_test_time / 1000.0 << "µs" << endl;
+
+ return mean_test_time;
+}
+
+inline void simulated_select(word_t *x, word_t *y, word_t *z, word_t *target) {
+ bhv::dynamic_ternary_into_reference(x, y, z, target, 0xca);
+};
+
+inline void simulated_maj3(word_t *x, word_t *y, word_t *z, word_t *target) {
+ bhv::dynamic_ternary_into_reference(x, y, z, target, 0xe8);
+};
+
+inline void simulated_any(word_t *x, word_t *y, word_t *z, word_t *target) {
+ bhv::dynamic_ternary_into_reference(x, y, z, target, 0b11111110);
+};
+
+void __attribute__ ((noinline)) any_via_threshold(word_t *x, word_t *y, word_t *z, word_t *target) {
+ word_t *xs [3] = {x, y, z};
+ bhv::threshold_into(xs, 3, 0, target);
+};
+
+
+int main() {
+ cout << "*-= WARMUP =-*" << endl;
+ // burn some cycles to get the OS's attention
+ volatile uint64_t x = 0x7834d688d8827099ULL;
+ for (size_t i = 0; i < 50000000; ++i)
+ x = x + (x % 7);
+
+ cout << "*-= STARTING (" << x << ") =-*" << endl;
+#ifdef TERNARY
+
+ ternary_benchmark<simulated_select, bhv::select_into_reference>(false, true);
+
+ cout << "*-= TERNARY =-*" << endl;
+ cout << "*-= IN CACHE TESTS =-*" << endl;
+ ternary_benchmark<simulated_select, bhv::select_into_reference>(true, true);
+ ternary_benchmark<simulated_maj3, bhv::majority3_into>(true, true);
+ ternary_benchmark<simulated_any, any_via_threshold>(true, true);
+
+ cout << "*-= OUT OF CACHE TESTS =-*" << endl;
+ ternary_benchmark<simulated_select, bhv::select_into_reference>(true, false);
+ ternary_benchmark<simulated_maj3, bhv::majority3_into>(true, false);
+ ternary_benchmark<simulated_any, any_via_threshold>(true, false);
+#endif
+#ifdef MAJ3
+ ternary_benchmark<bhv::majority3_into, bhv::majority3_into_reference>(false, true);
+
+ cout << "*-= MAJ3 =-*" << endl;
+ cout << "*-= IN CACHE TESTS =-*" << endl;
+ ternary_benchmark<bhv::majority3_into, bhv::majority3_into_reference>(true, true);
+ ternary_benchmark<bhv::majority3_into, bhv::majority3_into_reference>(true, true);
+ ternary_benchmark<bhv::majority3_into, bhv::majority3_into_reference>(true, true);
+
+ cout << "*-= OUT OF CACHE TESTS =-*" << endl;
+ ternary_benchmark<bhv::majority3_into, bhv::majority3_into_reference>(true, false);
+ ternary_benchmark<bhv::majority3_into, bhv::majority3_into_reference>(true, false);
+ ternary_benchmark<bhv::majority3_into, bhv::majority3_into_reference>(true, false);
+#endif
+#ifdef SELECT
+ ternary_benchmark<bhv::select_into, bhv::select_into_reference>(false, true);
+
+ cout << "*-= SELECT =-*" << endl;
+ cout << "*-= IN CACHE TESTS =-*" << endl;
+ ternary_benchmark<bhv::select_into, bhv::select_into_reference>(true, true);
+ ternary_benchmark<bhv::select_into, bhv::select_into_reference>(true, true);
+ ternary_benchmark<bhv::select_into, bhv::select_into_reference>(true, true);
+
+ cout << "*-= OUT OF CACHE TESTS =-*" << endl;
+ ternary_benchmark<bhv::select_into, bhv::select_into_reference>(true, false);
+ ternary_benchmark<bhv::select_into, bhv::select_into_reference>(true, false);
+ ternary_benchmark<bhv::select_into, bhv::select_into_reference>(true, false);
+#endif
+#ifdef XOR
+ binary_benchmark<bhv::xor_into, bhv::xor_into>(false, true);
+
+ cout << "*-= XOR =-*" << endl;
+ cout << "*-= IN CACHE TESTS =-*" << endl;
+ binary_benchmark<bhv::xor_into, bhv::xor_into>(true, true);
+ binary_benchmark<bhv::xor_into, bhv::xor_into>(true, true);
+ binary_benchmark<bhv::xor_into, bhv::xor_into>(true, true);
+
+ cout << "*-= OUT OF CACHE TESTS =-*" << endl;
+ binary_benchmark<bhv::xor_into, bhv::xor_into>(true, false);
+ binary_benchmark<bhv::xor_into, bhv::xor_into>(true, false);
+ binary_benchmark<bhv::xor_into, bhv::xor_into>(true, false);
+#endif
+#ifdef XOR
+ binary_benchmark<bhv::xor_into, bhv::xor_into>(false, true);
+
+ cout << "*-= XOR =-*" << endl;
+ cout << "*-= IN CACHE TESTS =-*" << endl;
+ binary_benchmark<bhv::xor_into, bhv::xor_into>(true, true);
+ binary_benchmark<bhv::xor_into, bhv::xor_into>(true, true);
+ binary_benchmark<bhv::xor_into, bhv::xor_into>(true, true);
+
+ cout << "*-= OUT OF CACHE TESTS =-*" << endl;
+ binary_benchmark<bhv::xor_into, bhv::xor_into>(true, false);
+ binary_benchmark<bhv::xor_into, bhv::xor_into>(true, false);
+ binary_benchmark<bhv::xor_into, bhv::xor_into>(true, false);
+#endif
+#ifdef OR
+ binary_benchmark<bhv::or_into, bhv::or_into>(false, true);
+
+ cout << "*-= OR =-*" << endl;
+ cout << "*-= IN CACHE TESTS =-*" << endl;
+ binary_benchmark<bhv::or_into, bhv::or_into>(true, true);
+ binary_benchmark<bhv::or_into, bhv::or_into>(true, true);
+ binary_benchmark<bhv::or_into, bhv::or_into>(true, true);
+
+ cout << "*-= OUT OF CACHE TESTS =-*" << endl;
+ binary_benchmark<bhv::or_into, bhv::or_into>(true, false);
+ binary_benchmark<bhv::or_into, bhv::or_into>(true, false);
+ binary_benchmark<bhv::or_into, bhv::or_into>(true, false);
+#endif
+#ifdef AND
+ binary_benchmark<bhv::and_into, bhv::and_into>(false, true);
+
+ cout << "*-= AND =-*" << endl;
+ cout << "*-= IN CACHE TESTS =-*" << endl;
+ binary_benchmark<bhv::and_into, bhv::and_into>(true, true);
+ binary_benchmark<bhv::and_into, bhv::and_into>(true, true);
+ binary_benchmark<bhv::and_into, bhv::and_into>(true, true);
+
+ cout << "*-= OUT OF CACHE TESTS =-*" << endl;
+ binary_benchmark<bhv::and_into, bhv::and_into>(true, false);
+ binary_benchmark<bhv::and_into, bhv::and_into>(true, false);
+ binary_benchmark<bhv::and_into, bhv::and_into>(true, false);
+#endif
+#ifdef REHASH
+ unary_benchmark<bhv::rehash_into, bhv::rehash_into>(false, true);
+
+ cout << "*-= REHASH =-*" << endl;
+ cout << "*-= IN CACHE TESTS =-*" << endl;
+ unary_benchmark<bhv::rehash_into, bhv::rehash_into>(true, true);
+ unary_benchmark<bhv::rehash_into, bhv::rehash_into>(true, true);
+ unary_benchmark<bhv::rehash_into, bhv::rehash_into>(true, true);
+
+ cout << "*-= OUT OF CACHE TESTS =-*" << endl;
+ unary_benchmark<bhv::rehash_into, bhv::rehash_into>(true, false);
+ unary_benchmark<bhv::rehash_into, bhv::rehash_into>(true, false);
+ unary_benchmark<bhv::rehash_into, bhv::rehash_into>(true, false);
+#endif
+#ifdef SWAP_HALVES
+ unary_benchmark<bhv::swap_halves_into, bhv::swap_halves_into>(false, true);
+
+ cout << "*-= SWAP_HALVES =-*" << endl;
+ cout << "*-= IN CACHE TESTS =-*" << endl;
+ unary_benchmark<bhv::swap_halves_into, bhv::swap_halves_into>(true, true);
+ unary_benchmark<bhv::swap_halves_into, bhv::swap_halves_into>(true, true);
+ unary_benchmark<bhv::swap_halves_into, bhv::swap_halves_into>(true, true);
+
+ cout << "*-= OUT OF CACHE TESTS =-*" << endl;
+ unary_benchmark<bhv::swap_halves_into, bhv::swap_halves_into>(true, false);
+ unary_benchmark<bhv::swap_halves_into, bhv::swap_halves_into>(true, false);
+ unary_benchmark<bhv::swap_halves_into, bhv::swap_halves_into>(true, false);
+#endif
+#ifdef INVERT
+ unary_benchmark<bhv::invert_into, bhv::invert_into>(false, true);
+
+ cout << "*-= INVERT =-*" << endl;
+ cout << "*-= IN CACHE TESTS =-*" << endl;
+ unary_benchmark<bhv::invert_into, bhv::invert_into>(true, true);
+ unary_benchmark<bhv::invert_into, bhv::invert_into>(true, true);
+ unary_benchmark<bhv::invert_into, bhv::invert_into>(true, true);
+
+ cout << "*-= OUT OF CACHE TESTS =-*" << endl;
+ unary_benchmark<bhv::invert_into, bhv::invert_into>(true, false);
+ unary_benchmark<bhv::invert_into, bhv::invert_into>(true, false);
+ unary_benchmark<bhv::invert_into, bhv::invert_into>(true, false);
+#endif
+#ifdef HAMMING
+ hamming_benchmark(false);
+
+ cout << "*-= HAMMING =-*" << endl;
+ hamming_benchmark(true);
+ hamming_benchmark(true);
+ hamming_benchmark(true);
+#endif
+#ifdef ACTIVE
+ active_benchmark(false);
+
+ cout << "*-= ACTIVE =-*" << endl;
+ active_benchmark(true);
+ active_benchmark(true);
+ active_benchmark(true);
+#endif
+#ifdef PERMUTE
+ permute_benchmark<true, 100>(false);
+
+ cout << "*-= PERMUTE =-*" << endl;
+ permute_benchmark<true, 100>(true);
+ permute_benchmark<true, 100>(true);
+ permute_benchmark<true, 100>(true);
+#endif
+#ifdef RAND
+ rand_benchmark(false, false);
+
+ cout << "*-= RAND =-*" << endl;
+ cout << "*-= IN CACHE TESTS =-*" << endl;
+ rand_benchmark(true, true);
+ rand_benchmark(true, true);
+ rand_benchmark(true, true);
+
+ cout << "*-= OUT OF CACHE TESTS =-*" << endl;
+ rand_benchmark(true, false);
+ rand_benchmark(true, false);
+ rand_benchmark(true, false);
+#endif
+#ifdef RAND2
+ int8_t pws[33];
+ for (int8_t i = -16; i < 17; ++i)
+ pws[i + 16] = i;
+
+ rand2_benchmark(false, false, 4);
+ cout << "*-= RAND2 =-*" << endl;
+
+ cout << "*-= IN CACHE TESTS =-*" << endl;
+ for (float p : pws)
+ rand2_benchmark(true, true, p);
+
+ cout << "*-= OUT OF CACHE TESTS =-*" << endl;
+ for (float p : pws)
+ rand2_benchmark(true, false, p);
+#endif
+#ifdef RANDOM
+ random_benchmark(false, false, .1);
+
+ cout << "*-= RANDOM =-*" << endl;
+ cout << "*-= COMMON =-*" << endl;
+ float common[13] = {.001, .01, .04, .2, .26, .48, .5, .52, .74,.8, .95, .99, .999};
+ cout << "*-= IN CACHE TESTS =-*" << endl;
+ double total = 0.;
+ for (float p: common)
+ total += random_benchmark(true, true, p);
+ cout << "total: " << total << endl;
+ total = 0;
+ cout << "*-= OUT OF CACHE TESTS =-*" << endl;
+ for (float p: common)
+ total += random_benchmark(true, true, p);
+ cout << "total: " << total << endl;
+
+ cout << "*-= SMALL =-*" << endl;
+ float small[9] = {1e-6, 5e-6, 1e-5, 5e-5, 1e-4, 5e-4, 1e-3, 5e-3, 1e-2};
+ cout << "*-= IN CACHE TESTS =-*" << endl;
+ total = 0.;
+ for (float p: small)
+ total += random_benchmark(true, true, p);
+ cout << "total: " << total << endl;
+ total = 0;
+ cout << "*-= OUT OF CACHE TESTS =-*" << endl;
+ for (float p: small)
+ total += random_benchmark(true, false, p);
+ cout << "total: " << total << endl;
+
+ cout << "*-= PERCENTAGES =-*" << endl;
+ float perc[99];
+ for (size_t i = 1; i < 100; ++i)
+ perc[i - 1] = (float) i / 100.f;
+ cout << "*-= IN CACHE TESTS =-*" << endl;
+ total = 0.;
+ for (float p: perc)
+ total += random_benchmark(true, false, p);
+ cout << "total: " << total << endl;
+ total = 0;
+ cout << "*-= OUT OF CACHE TESTS =-*" << endl;
+ for (float p: perc)
+ total += random_benchmark(true, false, p);
+ cout << "total: " << total << endl;
+#endif
+#ifdef MAJ
+ //Run one throw-away test to make sure the OS is ready to give us full resource
+ majority_benchmark(3, false, false);
+
+ cout << "*-= IN CACHE TESTS =-*" << endl;
+ majority_benchmark(3, true, true);
+ majority_benchmark(5, true, true);
+ majority_benchmark(7, true, true);
+ majority_benchmark(9, true, true);
+ majority_benchmark(11, true, true);
+ majority_benchmark(15, true, true);
+ majority_benchmark(17, true, true);
+ majority_benchmark(19, true, true);
+ majority_benchmark(21, true, true);
+ majority_benchmark(23, true, true);
+ majority_benchmark(25, true, true);
+ majority_benchmark(27, true, true);
+ majority_benchmark(39, true, true);
+ majority_benchmark(47, true, true);
+ majority_benchmark(55, true, true);
+ majority_benchmark(63, true, true);
+ majority_benchmark(73, true, true);
+ majority_benchmark(77, true, true);
+ majority_benchmark(79, true, true);
+ majority_benchmark(81, true, true);
+ majority_benchmark(85, true, true);
+ majority_benchmark(89, true, true);
+ majority_benchmark(91, true, true);
+ majority_benchmark(109, true, true);
+ majority_benchmark(175, true, true);
+ majority_benchmark(201, true, true);
+ majority_benchmark(255, true, true);
+
+ majority_benchmark(257, true, true);
+ majority_benchmark(385, true, true);
+ majority_benchmark(511, true, true);
+ majority_benchmark(667, true, true);
+ majority_benchmark(881, true, true);
+ majority_benchmark(945, true, true);
+ majority_benchmark(1021, true, true);
+ majority_benchmark(2001, true, true);
+ majority_benchmark(5001, true, true);
+ majority_benchmark(9999, true, true);
+ majority_benchmark(10003, true, true);
+ majority_benchmark(20001, true, true);
+ majority_benchmark(200001, true, true);
+ majority_benchmark(1000001, true, true);
+
+ cout << "*-= OUT OF CACHE TESTS =-*" << endl;
+ majority_benchmark(3, true, false);
+ majority_benchmark(5, true, false);
+ majority_benchmark(7, true, false);
+ majority_benchmark(9, true, false);
+ majority_benchmark(11, true, false);
+ majority_benchmark(27, true, false);
+ majority_benchmark(39, true, false);
+ majority_benchmark(47, true, false);
+ majority_benchmark(55, true, false);
+ majority_benchmark(63, true, false);
+ majority_benchmark(73, true, false);
+ majority_benchmark(77, true, false);
+ majority_benchmark(79, true, false);
+ majority_benchmark(81, true, false);
+ majority_benchmark(85, true, false);
+ majority_benchmark(89, true, false);
+ majority_benchmark(91, true, false);
+ majority_benchmark(109, true, false);
+ majority_benchmark(175, true, false);
+ majority_benchmark(201, true, false);
+ majority_benchmark(255, true, false);
+ majority_benchmark(257, true, false);
+ majority_benchmark(313, true, false);
+ majority_benchmark(385, true, false);
+ majority_benchmark(511, true, false);
+ majority_benchmark(667, true, false);
+ majority_benchmark(881, true, false);
+ majority_benchmark(945, true, false);
+ majority_benchmark(1021, true, false);
+ majority_benchmark(2001, true, false);
+ majority_benchmark(5001, true, false);
+ majority_benchmark(9999, true, false);
+ majority_benchmark(10003, true, false);
+ majority_benchmark(20001, true, false);
+ majority_benchmark(200001, true, false);
+ majority_benchmark(1000001, true, false);
+#endif
+#ifdef THRESHOLD
+ threshold_benchmark(3, 0, .5, false, false);
+
+ cout << "*-= THRESHOLD =-*" << endl;
+ cout << "*-= IN CACHE TESTS =-*" << endl;
+ threshold_benchmark(3, 0, .5, true, true);
+ threshold_benchmark(10, 2, .3, true, true);
+ threshold_benchmark(30, 20, .7, true, true);
+
+
+ threshold_benchmark(100, 48, .5, true, true);
+ threshold_benchmark(200, 50, .25, true, true);
+
+ threshold_benchmark(3000, 1502, .5, true, true);
+ threshold_benchmark(4000, 1000, .25, true, true);
+
+ threshold_benchmark(200001, 0, .5, true, true);
+ threshold_benchmark(200001, 200000, .5, true, true);
+
+ threshold_benchmark(1000001, 498384, .5, true, true);
+
+ cout << "*-= OUT OF CACHE TESTS =-*" << endl;
+ threshold_benchmark(3, 0, .5, true, false);
+ threshold_benchmark(10, 2, .3, true, false);
+ threshold_benchmark(30, 20, .7, true, false);
+
+
+ threshold_benchmark(100, 48, .5, true, false);
+ threshold_benchmark(200, 50, .25, true, false);
+
+ threshold_benchmark(3000, 1502, .5, true, false);
+ threshold_benchmark(4000, 1000, .25, true, false);
+
+ threshold_benchmark(200001, 0, .5, true, false);
+ threshold_benchmark(200001, 200000, .5, true, false);
+
+ threshold_benchmark(1000001, 498384, .5, true, false);
+#endif
+}
diff --git a/bhv/cnative/bindings.cpp b/bhv/cnative/bindings.cpp
index 21662fd..78b65ca 100644
--- a/bhv/cnative/bindings.cpp
+++ b/bhv/cnative/bindings.cpp
@@ -1,9 +1,11 @@
#include <cstdlib>
+
#define PY_SSIZE_T_CLEAN size_t
+
#include <Python.h>
#include "structmember.h"
-#include "packed.h"
+#include "core.h"
typedef struct {
PyObject_HEAD
@@ -27,6 +29,7 @@ static int BHV_init(BHV *v, PyObject *args, PyObject *kwds) {
}
static PyObject *BHV_rand(PyTypeObject *type, PyObject *Py_UNUSED(ignored));
+
static PyObject *BHV_random(PyTypeObject *type, PyObject *args);
static PyObject *BHV_majority(PyTypeObject *type, PyObject *args);
@@ -34,13 +37,25 @@ static PyObject *BHV_majority(PyTypeObject *type, PyObject *args);
static PyObject *BHV_representative(PyTypeObject *type, PyObject *args);
static PyObject *BHV_select(BHV *cond, PyObject *args);
+
+static PyObject *BHV_ternary(BHV *x, PyObject *args);
+
static PyObject *BHV_roll_words(BHV *x, PyObject *args);
+
static PyObject *BHV_roll_word_bits(BHV *x, PyObject *args);
+
+static PyObject *BHV_permute_byte_bits(BHV *x, PyObject *args);
+
+static PyObject *BHV_permute_words(BHV *x, PyObject *args);
+
static PyObject *BHV_permute(BHV *x, PyObject *args);
+
static PyObject *BHV_rehash(BHV *x, PyObject *args);
+
static PyObject *BHV_swap_halves(BHV *x, PyObject *args);
static PyObject *BHV_eq(BHV *v1, PyObject *args);
+
static PyObject *BHV_richcompare(PyObject *self, PyObject *other, int op) {
switch (op) {
case Py_EQ: if (bhv::eq(((BHV*)self)->data, ((BHV*)other)->data)) Py_RETURN_TRUE; else Py_RETURN_FALSE;
@@ -51,17 +66,20 @@ static PyObject *BHV_richcompare(PyObject *self, PyObject *other, int op) {
}
}
-static PyObject *BHV_xor(PyObject *v1, PyObject *v2);
-static PyObject *BHV_and(PyObject *v1, PyObject *v2);
-static PyObject *BHV_or(PyObject *v1, PyObject *v2);
-static PyObject *BHV_invert(PyObject *v);
+static PyObject *BHV_xor(PyObject * v1, PyObject * v2);
+static PyObject *BHV_and(PyObject * v1, PyObject * v2);
+static PyObject *BHV_or(PyObject * v1, PyObject * v2);
+static PyObject *BHV_invert(PyObject * v);
-static PyObject *BHV_hamming(BHV *v1, PyObject *args);
+static PyObject *BHV_hamming(BHV * v1, PyObject * args);
-static PyObject *BHV_to_bytes(BHV *x, PyObject *Py_UNUSED(ignored));
+static PyObject *BHV_to_bytes(BHV * x, PyObject * Py_UNUSED(ignored));
static PyObject *BHV_from_bytes(PyTypeObject *type, PyObject *args);
-static PyObject *BHV_active(BHV *v, PyObject *Py_UNUSED(ignored)) {
+static PyObject *__getstate__(BHV *x, PyObject *Py_UNUSED(ignored));
+static PyObject *__setstate__(BHV *x, PyObject *args);
+
+static PyObject *BHV_active(BHV * v, PyObject * Py_UNUSED(ignored)) {
return Py_BuildValue("i", bhv::active(v->data));
}
@@ -70,49 +88,59 @@ static PyMemberDef BHV_members[] = {
};
static PyMethodDef BHV_methods[] = {
- {"rand", (PyCFunction) BHV_rand, METH_CLASS | METH_NOARGS,
+ {"rand", (PyCFunction) BHV_rand, METH_CLASS | METH_NOARGS,
"Bernoulli 1/2 distributed bit vector"},
- {"random", (PyCFunction) BHV_random, METH_CLASS | METH_VARARGS,
+ {"random", (PyCFunction) BHV_random, METH_CLASS | METH_VARARGS,
"Bernoulli p distributed bit vector"},
- {"majority", (PyCFunction) BHV_majority, METH_CLASS | METH_VARARGS,
+ {"majority", (PyCFunction) BHV_majority, METH_CLASS | METH_VARARGS,
"The majority of a list of BHVs"},
- {"representative", (PyCFunction) BHV_representative, METH_CLASS | METH_VARARGS,
+ {"representative", (PyCFunction) BHV_representative, METH_CLASS | METH_VARARGS,
"Random representative of a list of BHVs"},
- {"select", (PyCFunction) BHV_select, METH_VARARGS,
+ {"select", (PyCFunction) BHV_select, METH_VARARGS,
"MUX or IF-THEN-ELSE"},
- {"roll_words", (PyCFunction) BHV_roll_words, METH_VARARGS,
+ {"ternary", (PyCFunction) BHV_ternary, METH_VARARGS,
+ "Ternary logic operation"},
+ {"roll_words", (PyCFunction) BHV_roll_words, METH_VARARGS,
"Word-level rotation"},
- {"roll_word_bits", (PyCFunction) BHV_roll_word_bits, METH_VARARGS,
+ {"roll_word_bits", (PyCFunction) BHV_roll_word_bits, METH_VARARGS,
"Word-level rotation of bits"},
- {"permute", (PyCFunction) BHV_permute, METH_VARARGS,
+ {"permute_byte_bits", (PyCFunction) BHV_permute_byte_bits, METH_VARARGS,
+ "Permutes the bits of every byte"},
+ {"permute_words", (PyCFunction) BHV_permute_words, METH_VARARGS,
"Word-level permutation"},
- {"rehash", (PyCFunction) BHV_rehash, METH_NOARGS,
+ {"permute", (PyCFunction) BHV_permute, METH_VARARGS,
+ "Default permutation"},
+ {"rehash", (PyCFunction) BHV_rehash, METH_NOARGS,
"Hash the vector into another vector"},
- {"swap_halves", (PyCFunction) BHV_swap_halves, METH_NOARGS,
+ {"swap_halves", (PyCFunction) BHV_swap_halves, METH_NOARGS,
"Swap the halves of the vector"},
- {"eq", (PyCFunction) BHV_eq, METH_VARARGS,
+ {"eq", (PyCFunction) BHV_eq, METH_VARARGS,
"Check equality"},
- {"hamming", (PyCFunction) BHV_hamming, METH_VARARGS,
+ {"hamming", (PyCFunction) BHV_hamming, METH_VARARGS,
"Hamming distance between two BHVs"},
- {"active", (PyCFunction) BHV_active, METH_NOARGS,
+ {"active", (PyCFunction) BHV_active, METH_NOARGS,
"Count the number of active bits"},
- {"to_bytes", (PyCFunction) BHV_to_bytes, METH_NOARGS,
+ {"to_bytes", (PyCFunction) BHV_to_bytes, METH_NOARGS,
"Bytes normalized form"},
- {"from_bytes", (PyCFunction) BHV_from_bytes, METH_CLASS | METH_VARARGS,
+ {"from_bytes", (PyCFunction) BHV_from_bytes, METH_CLASS | METH_VARARGS,
"Construct from bytes normalized form"},
+ {"__getstate__", (PyCFunction) __getstate__, METH_NOARGS,
+ "Pickle the vector"},
+ {"__setstate__", (PyCFunction) __setstate__, METH_VARARGS,
+ "Un-pickle the vector"},
{nullptr}
};
static PyNumberMethods BHV_nb_methods = {
- .nb_invert = (unaryfunc)BHV_invert,
- .nb_and = (binaryfunc)BHV_and,
- .nb_xor = (binaryfunc)BHV_xor,
- .nb_or = (binaryfunc)BHV_or,
+ .nb_invert = (unaryfunc) BHV_invert,
+ .nb_and = (binaryfunc) BHV_and,
+ .nb_xor = (binaryfunc) BHV_xor,
+ .nb_or = (binaryfunc) BHV_or,
};
static PyTypeObject BHVType = {
.ob_base = PyVarObject_HEAD_INIT(nullptr, 0)
- .tp_name = "bhv.CNativePackedBHV",
+ .tp_name = "bhv.cnative.CNativePackedBHV",
.tp_basicsize = sizeof(BHV),
.tp_itemsize = 0,
.tp_dealloc = (destructor) BHV_dealloc,
@@ -144,8 +172,8 @@ static PyObject *BHV_random(PyTypeObject *type, PyObject *args) {
return v;
}
-static PyObject *BHV_hamming(BHV *v1, PyObject *args) {
- BHV *v2;
+static PyObject *BHV_hamming(BHV * v1, PyObject * args) {
+ BHV * v2;
if (!PyArg_ParseTuple(args, "O!", &BHVType, &v2))
return nullptr;
@@ -170,13 +198,12 @@ static PyObject *BHV_majority(PyTypeObject *type, PyObject *args) {
if (even) vs[n_vectors] = bhv::rand();
- PyObject * ret = type->tp_alloc(type, 0);
- ((BHV *) ret)->data = bhv::true_majority(vs, n_vectors + even);
- return ret;
+ PyObject * v = BHV_new(type, nullptr, nullptr);
+ bhv::true_majority_into(vs, n_vectors + even, ((BHV *) v)->data);
+ return v;
}
static PyObject *BHV_representative(PyTypeObject *type, PyObject *args) {
- // TODO
PyObject * vector_list;
if (!PyArg_ParseTuple(args, "O!", &PyList_Type, &vector_list))
@@ -191,60 +218,60 @@ static PyObject *BHV_representative(PyTypeObject *type, PyObject *args) {
vs[i] = ((BHV *) v_i_py)->data;
}
- PyObject * ret = type->tp_alloc(type, 0);
- ((BHV *) ret)->data = bhv::representative(vs, n_vectors);
+ PyObject * ret = BHV_new(type, nullptr, nullptr);
+ bhv::representative_into(vs, n_vectors, ((BHV *) ret)->data);
return ret;
}
-static PyObject *BHV_xor(PyObject *v1, PyObject *v2) {
- if (not PyObject_IsInstance(v1, (PyObject *)&BHVType) or
- not PyObject_IsInstance(v2, (PyObject *)&BHVType)) {
+static PyObject *BHV_xor(PyObject * v1, PyObject * v2) {
+ if (not PyObject_IsInstance(v1, (PyObject *) &BHVType) or
+ not PyObject_IsInstance(v2, (PyObject *) &BHVType)) {
PyErr_SetString(PyExc_TypeError, "Only BHV argument(s) supported");
return nullptr;
}
PyObject * ret = BHV_new(&BHVType, nullptr, nullptr);
- bhv::xor_into(((BHV*)v1)->data, ((BHV*)v2)->data, ((BHV *) ret)->data);
+ bhv::xor_into(((BHV *) v1)->data, ((BHV *) v2)->data, ((BHV *) ret)->data);
return ret;
}
-static PyObject *BHV_and(PyObject *v1, PyObject *v2) {
- if (not PyObject_IsInstance(v1, (PyObject *)&BHVType) or
- not PyObject_IsInstance(v2, (PyObject *)&BHVType)) {
+static PyObject *BHV_and(PyObject * v1, PyObject * v2) {
+ if (not PyObject_IsInstance(v1, (PyObject *) &BHVType) or
+ not PyObject_IsInstance(v2, (PyObject *) &BHVType)) {
PyErr_SetString(PyExc_TypeError, "Only BHV argument(s) supported");
return nullptr;
}
PyObject * ret = BHV_new(&BHVType, nullptr, nullptr);
- bhv::and_into(((BHV*)v1)->data, ((BHV*)v2)->data, ((BHV *) ret)->data);
+ bhv::and_into(((BHV *) v1)->data, ((BHV *) v2)->data, ((BHV *) ret)->data);
return ret;
}
-static PyObject *BHV_or(PyObject *v1, PyObject *v2) {
- if (not PyObject_IsInstance(v1, (PyObject *)&BHVType) or
- not PyObject_IsInstance(v2, (PyObject *)&BHVType)) {
+static PyObject *BHV_or(PyObject * v1, PyObject * v2) {
+ if (not PyObject_IsInstance(v1, (PyObject *) &BHVType) or
+ not PyObject_IsInstance(v2, (PyObject *) &BHVType)) {
PyErr_SetString(PyExc_TypeError, "Only BHV argument(s) supported");
return nullptr;
}
PyObject * ret = BHV_new(&BHVType, nullptr, nullptr);
- bhv::or_into(((BHV*)v1)->data, ((BHV*)v2)->data, ((BHV *) ret)->data);
+ bhv::or_into(((BHV *) v1)->data, ((BHV *) v2)->data, ((BHV *) ret)->data);
return ret;
}
-static PyObject *BHV_invert(PyObject *v) {
- if (not PyObject_IsInstance(v, (PyObject *)&BHVType)) {
+static PyObject *BHV_invert(PyObject * v) {
+ if (not PyObject_IsInstance(v, (PyObject *) &BHVType)) {
PyErr_SetString(PyExc_TypeError, "Only BHV argument(s) supported");
return nullptr;
}
PyObject * ret = BHV_new(&BHVType, nullptr, nullptr);
- bhv::invert_into(((BHV*)v)->data, ((BHV *) ret)->data);
+ bhv::invert_into(((BHV *) v)->data, ((BHV *) ret)->data);
return ret;
}
-static PyObject *BHV_eq(BHV *v1, PyObject *args) {
- BHV *v2;
+static PyObject *BHV_eq(BHV * v1, PyObject * args) {
+ BHV * v2;
if (!PyArg_ParseTuple(args, "O!", &BHVType, &v2))
return nullptr;
@@ -253,9 +280,9 @@ static PyObject *BHV_eq(BHV *v1, PyObject *args) {
else Py_RETURN_FALSE;
}
-static PyObject *BHV_select(BHV *cond, PyObject *args) {
- BHV *when1;
- BHV *when0;
+static PyObject *BHV_select(BHV * cond, PyObject * args) {
+ BHV * when1;
+ BHV * when0;
if (!PyArg_ParseTuple(args, "O!O!", &BHVType, &when1, &BHVType, &when0))
return nullptr;
@@ -265,7 +292,20 @@ static PyObject *BHV_select(BHV *cond, PyObject *args) {
return ret;
}
-static PyObject *BHV_roll_words(BHV *x, PyObject *args) {
+static PyObject *BHV_ternary(BHV * x, PyObject * args) {
+ BHV * y;
+ BHV * z;
+ uint8_t op;
+
+ if (!PyArg_ParseTuple(args, "O!O!b", &BHVType, &y, &BHVType, &z, &op))
+ return nullptr;
+
+ PyObject * ret = BHV_new(&BHVType, nullptr, nullptr);
+ bhv::dynamic_ternary_into(x->data, y->data, z->data, ((BHV *) ret)->data, op);
+ return ret;
+}
+
+static PyObject *BHV_roll_words(BHV * x, PyObject * args) {
int32_t p;
if (!PyArg_ParseTuple(args, "i", &p))
return nullptr;
@@ -275,7 +315,7 @@ static PyObject *BHV_roll_words(BHV *x, PyObject *args) {
return v;
}
-static PyObject *BHV_roll_word_bits(BHV *x, PyObject *args) {
+static PyObject *BHV_roll_word_bits(BHV * x, PyObject * args) {
int32_t p;
if (!PyArg_ParseTuple(args, "i", &p))
return nullptr;
@@ -285,7 +325,29 @@ static PyObject *BHV_roll_word_bits(BHV *x, PyObject *args) {
return v;
}
-static PyObject *BHV_permute(BHV *x, PyObject *args) {
+static PyObject *BHV_permute_words(BHV * x, PyObject * args) {
+ int32_t perm;
+
+ if (!PyArg_ParseTuple(args, "i", &perm))
+ return nullptr;
+
+ PyObject * ret = BHV_new(&BHVType, nullptr, nullptr);
+ bhv::permute_words_into(x->data, perm, ((BHV *) ret)->data);
+ return ret;
+}
+
+static PyObject *BHV_permute_byte_bits(BHV * x, PyObject * args) {
+ int32_t perm;
+
+ if (!PyArg_ParseTuple(args, "i", &perm))
+ return nullptr;
+
+ PyObject * ret = BHV_new(&BHVType, nullptr, nullptr);
+ bhv::permute_byte_bits_into(x->data, perm, ((BHV *) ret)->data);
+ return ret;
+}
+
+static PyObject *BHV_permute(BHV * x, PyObject * args) {
int32_t perm;
if (!PyArg_ParseTuple(args, "i", &perm))
@@ -296,26 +358,26 @@ static PyObject *BHV_permute(BHV *x, PyObject *args) {
return ret;
}
-static PyObject *BHV_rehash(BHV *x, PyObject *args) {
+static PyObject *BHV_rehash(BHV * x, PyObject * args) {
PyObject * ret = BHV_new(&BHVType, nullptr, nullptr);
((BHV *) ret)->data = bhv::zero();
bhv::rehash_into(x->data, ((BHV *) ret)->data);
return ret;
}
-static PyObject *BHV_swap_halves(BHV *x, PyObject *args) {
+static PyObject *BHV_swap_halves(BHV * x, PyObject * args) {
PyObject * ret = BHV_new(&BHVType, nullptr, nullptr);
bhv::swap_halves_into(x->data, ((BHV *) ret)->data);
return ret;
}
-static PyObject *BHV_to_bytes(BHV *x, PyObject *Py_UNUSED(ignored)) {
+static PyObject *BHV_to_bytes(BHV * x, PyObject * Py_UNUSED(ignored)) {
return PyBytes_FromStringAndSize((char*)x->data, BYTES);
}
static PyObject *BHV_from_bytes(PyTypeObject *type, PyObject *args) {
PyObject * ret = BHV_new(&BHVType, nullptr, nullptr);
- char* buf;
+ char *buf;
size_t size;
if (!PyArg_ParseTuple(args, "s#", &buf, &size))
@@ -326,10 +388,30 @@ static PyObject *BHV_from_bytes(PyTypeObject *type, PyObject *args) {
return nullptr;
}
- memcpy(((BHV*)ret)->data, buf, BYTES);
+ memcpy(((BHV *) ret)->data, buf, BYTES);
return ret;
}
+static PyObject *__getstate__(BHV *x, PyObject *Py_UNUSED(ignored)) {
+ return PyBytes_FromStringAndSize((char*)x->data, BYTES);
+}
+
+static PyObject *__setstate__(BHV *x, PyObject *args) {
+ char *buf;
+ size_t size;
+
+ if (!PyArg_ParseTuple(args, "s#", &buf, &size))
+ return nullptr;
+
+ if (size != BYTES) {
+ PyErr_SetString(PyExc_TypeError, "Bytes object didn't have the right size");
+ return nullptr;
+ }
+
+ memcpy(x->data, buf, BYTES);
+ Py_RETURN_NONE;
+}
+
static PyObject *dimension(PyObject * self, PyObject * args, PyObject * kwds) {
return PyLong_FromLong(BITS);
}
@@ -364,15 +446,15 @@ PyMODINIT_FUNC PyInit_cnative(void) {
if (m == nullptr)
return nullptr;
- BHV *z = (BHV*) BHVType.tp_alloc(&BHVType, 0);
+ BHV * z = (BHV *) BHVType.tp_alloc(&BHVType, 0);
z->data = bhv::zero();
- PyDict_SetItemString(BHVType.tp_dict, "ZERO", (PyObject *)z);
- BHV *o = (BHV*) BHVType.tp_alloc(&BHVType, 0);
+ PyDict_SetItemString(BHVType.tp_dict, "ZERO", (PyObject *) z);
+ BHV * o = (BHV *) BHVType.tp_alloc(&BHVType, 0);
o->data = bhv::one();
- PyDict_SetItemString(BHVType.tp_dict, "ONE", (PyObject *)o);
- BHV *h = (BHV*) BHVType.tp_alloc(&BHVType, 0);
+ PyDict_SetItemString(BHVType.tp_dict, "ONE", (PyObject *) o);
+ BHV * h = (BHV *) BHVType.tp_alloc(&BHVType, 0);
h->data = bhv::half();
- PyDict_SetItemString(BHVType.tp_dict, "HALF", (PyObject *)h);
+ PyDict_SetItemString(BHVType.tp_dict, "HALF", (PyObject *) h);
Py_INCREF(&BHVType);
PyModule_AddObject(m, "CNativePackedBHV", (PyObject *) &BHVType);
diff --git a/bhv/cnative/core.h b/bhv/cnative/core.h
new file mode 100644
index 0000000..0b9bd50
--- /dev/null
+++ b/bhv/cnative/core.h
@@ -0,0 +1,120 @@
+#ifndef BHV_CORE_H
+#define BHV_CORE_H
+
+#include <bit>
+#include <random>
+#include <cstring>
+#include <cassert>
+#include <algorithm>
+#include "shared.h"
+#include <immintrin.h>
+#ifdef __AVX2__
+#include "simdpcg.h"
+#endif
+#ifdef __AVX512__
+#include "TurboSHAKE_AVX512/TurboSHAKE.h"
+#else
+#include "TurboSHAKE_opt/TurboSHAKE.h"
+#endif
+
+
+namespace bhv {
+ constexpr word_t ONE_WORD = std::numeric_limits<word_t>::max();
+ constexpr bit_word_iter_t HALF_BITS_PER_WORD = BITS_PER_WORD / 2;
+ constexpr word_t HALF_WORD = ONE_WORD << HALF_BITS_PER_WORD;
+ constexpr word_t OTHER_HALF_WORD = ~HALF_WORD;
+
+ template<word_t W>
+ word_t *const_bhv() {
+ static word_t x[WORDS];
+ for (word_t &i: x) i = W;
+ return x;
+ }
+
+ word_t *ZERO = const_bhv<0>();
+ word_t *ONE = const_bhv<ONE_WORD>();
+ word_t *HALF = const_bhv<HALF_WORD>();
+
+ std::mt19937_64 rng;
+
+ inline word_t *empty() {
+ return (word_t *) aligned_alloc(64, BYTES);
+ }
+
+ word_t *zero() {
+ word_t * e = empty();
+ memset(e, 0, BYTES);
+ return e;
+ }
+
+ word_t *one() {
+ word_t *x = empty();
+ for (word_iter_t i = 0; i < WORDS; ++i) {
+ x[i] = ONE_WORD;
+ }
+ return x;
+ }
+
+ word_t *half() {
+ word_t *x = empty();
+ for (word_iter_t i = 0; i < WORDS; ++i) {
+ x[i] = HALF_WORD;
+ }
+ return x;
+ }
+
+ void swap_halves_into(word_t *x, word_t *target) {
+ for (word_iter_t i = 0; i < WORDS; ++i) {
+ target[i] = ((x[i] & HALF_WORD) >> HALF_BITS_PER_WORD) | ((x[i] & OTHER_HALF_WORD) << HALF_BITS_PER_WORD);
+ }
+ }
+
+ bool eq(word_t *x, word_t *y) {
+ for (word_iter_t i = 0; i < WORDS; ++i) {
+ if (x[i] != y[i])
+ return false;
+ }
+ return true;
+ }
+
+ void xor_into(word_t *x, word_t *y, word_t *target) {
+ for (word_iter_t i = 0; i < WORDS; ++i) {
+ target[i] = x[i] ^ y[i];
+ }
+ }
+
+ void and_into(word_t *x, word_t *y, word_t *target) {
+ for (word_iter_t i = 0; i < WORDS; ++i) {
+ target[i] = x[i] & y[i];
+ }
+ }
+
+ void or_into(word_t *x, word_t *y, word_t *target) {
+ for (word_iter_t i = 0; i < WORDS; ++i) {
+ target[i] = x[i] | y[i];
+ }
+ }
+
+ void invert_into(word_t *x, word_t *target) {
+ for (word_iter_t i = 0; i < WORDS; ++i) {
+ target[i] = ~x[i];
+ }
+ }
+
+ #include "ternary.h"
+
+ #include "distance.h"
+
+ #include "random.h"
+
+ #include "threshold.h"
+
+ #include "majority.h"
+
+ #include "representative.h"
+
+ #include "permutation.h"
+
+ #include "hash.h"
+}
+#endif //BHV_CORE_H
diff --git a/bhv/cnative/distance.h b/bhv/cnative/distance.h
new file mode 100644
index 0000000..3511c8b
--- /dev/null
+++ b/bhv/cnative/distance.h
@@ -0,0 +1,221 @@
+/// @brief Count the number of set bits in the vector
+/// @note This implementation is within 30% of AVX-2 and AVX-512 on Ice-Lake
+bit_iter_t active_reference(word_t *x) {
+ bit_iter_t total = 0;
+ for (word_iter_t i = 0; i < WORDS; ++i) {
+ total += __builtin_popcountl(x[i]);
+ }
+ return total;
+}
+
+#ifdef __AVX2__
+
+void carry_save_adder(__m256i &h, __m256i &l, __m256i a, __m256i b, __m256i c) {
+ __m256i u = _mm256_xor_si256(a, b);
+ h = _mm256_or_si256(_mm256_and_si256(a, b), _mm256_and_si256(u, c));
+ l = _mm256_xor_si256(u, c);
+}
+
+__m256i count(__m256i v) {
+ __m256i lookup = _mm256_setr_epi8(0, 1, 1, 2, 1, 2, 2, 3,
+ 1, 2, 2, 3, 2, 3, 3, 4,
+ 0, 1, 1, 2, 1, 2, 2, 3,
+ 1, 2, 2, 3, 2, 3, 3, 4);
+ __m256i low_mask = _mm256_set1_epi8(0x0f);
+ __m256i lo = _mm256_and_si256(v, low_mask);
+ __m256i hi = _mm256_and_si256(_mm256_srli_epi32(v, 4), low_mask);
+ __m256i popcnt1 = _mm256_shuffle_epi8(lookup, lo);
+ __m256i popcnt2 = _mm256_shuffle_epi8(lookup, hi);
+ __m256i total = _mm256_add_epi8(popcnt1, popcnt2);
+ return _mm256_sad_epu8(total, _mm256_setzero_si256());
+}
+
+/// @brief Count the number of set bits in the vector using an expanded AVX2 adder
+/// @note This follows https://github.com/JeWaVe/hamming_rs
+bit_iter_t active_adder_avx2(word_t *x) {
+ __m256i *vec_x = (__m256i *)x;
+ __m256i total = _mm256_setzero_si256();
+ __m256i ones = _mm256_setzero_si256();
+ __m256i twos = _mm256_setzero_si256();
+ __m256i fours = _mm256_setzero_si256();
+ __m256i eights = _mm256_setzero_si256();
+ __m256i sixteens = _mm256_setzero_si256();
+ __m256i twos_a = _mm256_setzero_si256();
+ __m256i fours_a = _mm256_setzero_si256();
+ __m256i eights_a = _mm256_setzero_si256();
+ __m256i twos_b = _mm256_setzero_si256();
+ __m256i fours_b = _mm256_setzero_si256();
+ __m256i eights_b = _mm256_setzero_si256();
+ for (word_iter_t i = 0; i < BITS/256; i += 16) {
+ carry_save_adder(twos_a, ones, ones, _mm256_loadu_si256(vec_x + i), _mm256_loadu_si256(vec_x + i + 1));
+ carry_save_adder(twos_b, ones, ones, _mm256_loadu_si256(vec_x + i + 2), _mm256_loadu_si256(vec_x + i + 3));
+ carry_save_adder(fours_a, twos, twos, twos_a, twos_b);
+ carry_save_adder(twos_a, ones, ones, _mm256_loadu_si256(vec_x + i + 4), _mm256_loadu_si256(vec_x + i + 5));
+ carry_save_adder(twos_b, ones, ones, _mm256_loadu_si256(vec_x + i + 6), _mm256_loadu_si256(vec_x + i + 7));
+ carry_save_adder(fours_b, twos, twos, twos_a, twos_b);
+ carry_save_adder(eights_a, fours, fours, fours_a, fours_b);
+ carry_save_adder(twos_a, ones, ones, _mm256_loadu_si256(vec_x + i + 8), _mm256_loadu_si256(vec_x + i + 9));
+ carry_save_adder(twos_b, ones, ones, _mm256_loadu_si256(vec_x + i + 10), _mm256_loadu_si256(vec_x + i + 11));
+ carry_save_adder(fours_a, twos, twos, twos_a, twos_b);
+ carry_save_adder(twos_a, ones, ones, _mm256_loadu_si256(vec_x + i + 12), _mm256_loadu_si256(vec_x + i + 13));
+ carry_save_adder(twos_b, ones, ones, _mm256_loadu_si256(vec_x + i + 14), _mm256_loadu_si256(vec_x + i + 15));
+ carry_save_adder(fours_b, twos, twos, twos_a, twos_b);
+ carry_save_adder(eights_b, fours, fours, fours_a, fours_b);
+ carry_save_adder(sixteens, eights, eights, eights_a, eights_b);
+ total = _mm256_add_epi64(total, count(sixteens));
+ }
+ // final reduce
+ total = _mm256_slli_epi64(total, 4);
+ total = _mm256_add_epi64(total, _mm256_slli_epi64(count(eights), 3));
+ total = _mm256_add_epi64(total, _mm256_slli_epi64(count(fours), 2));
+ total = _mm256_add_epi64(total, _mm256_slli_epi64(count(twos), 1));
+ total = _mm256_add_epi64(total, count(ones));
+ return (_mm256_extract_epi64(total, 0)
+ + _mm256_extract_epi64(total, 1)
+ + _mm256_extract_epi64(total, 2)
+ + _mm256_extract_epi64(total, 3));
+}
+#endif //__AVX2__
+
+#if __AVX512BW__
+/// @brief Count the number of set bits in the vector using vector popcnt (AVX-512 BITALG)
+bit_iter_t active_avx512(word_t *x) {
+ __m512i total = _mm512_set1_epi64(0);
+
+ for (word_iter_t i = 0; i < WORDS; i += 8) {
+ __m512i v = _mm512_loadu_si512((__m512i *)(x + i));
+ __m512i cnts = _mm512_popcnt_epi64(v);
+ total = _mm512_add_epi64(total, cnts);
+ }
+
+#if true // TODO figure out when reduce_add is available
+ return _mm512_reduce_add_epi64(total);
+#else
+ uint64_t a [8];
+ _mm512_storeu_si512((__m512i *)a, totals);
+ return a[0] + a[1] + a[2] + a[3] + a[4] + a[5] + a[6] + a[7];
+#endif
+}
+#endif
+
+#if __AVX512BW__
+#define active active_avx512
+#elif __AVX2__
+#define active active_adder_avx2
+#else
+#define active active_reference
+#endif
+
+/// @brief The hamming distance between two vectors, this is equivalent to active(xor(x, y)) but faster.
+bit_iter_t hamming_reference(word_t *x, word_t *y) {
+ bit_iter_t total = 0;
+ for (word_iter_t i = 0; i < WORDS; ++i) {
+ total += __builtin_popcountl(x[i] ^ y[i]);
+ }
+ return total;
+}
+
+#ifdef __AVX2__
+
+/// @brief The hamming distance between two vectors using an expanded AVX2 adder
+/// @note This follows https://github.com/JeWaVe/hamming_rs
+uint64_t hamming_adder_avx2(word_t *x, word_t *y) {
+ __m256i *vec_x = (__m256i *)x;
+ __m256i *vec_y = (__m256i *)y;
+ __m256i total = _mm256_setzero_si256();
+ __m256i ones = _mm256_setzero_si256();
+ __m256i twos = _mm256_setzero_si256();
+ __m256i fours = _mm256_setzero_si256();
+ __m256i eights = _mm256_setzero_si256();
+ __m256i sixteens = _mm256_setzero_si256();
+ __m256i twos_a = _mm256_setzero_si256();
+ __m256i fours_a = _mm256_setzero_si256();
+ __m256i eights_a = _mm256_setzero_si256();
+ __m256i twos_b = _mm256_setzero_si256();
+ __m256i fours_b = _mm256_setzero_si256();
+ __m256i eights_b = _mm256_setzero_si256();
+ for (word_iter_t i = 0; i < BITS/256; i += 16) {
+ carry_save_adder(twos_a, ones, ones,
+ _mm256_xor_si256(_mm256_loadu_si256(vec_x + i), _mm256_loadu_si256(vec_y + i)),
+ _mm256_xor_si256(_mm256_loadu_si256(vec_x + i + 1), _mm256_loadu_si256(vec_y + i + 1))
+ );
+ carry_save_adder(twos_b, ones, ones,
+ _mm256_xor_si256(_mm256_loadu_si256(vec_x + i + 2), _mm256_loadu_si256(vec_y + i + 2)),
+ _mm256_xor_si256(_mm256_loadu_si256(vec_x + i + 3), _mm256_loadu_si256(vec_y + i + 3))
+ );
+ carry_save_adder(fours_a, twos, twos, twos_a, twos_b);
+ carry_save_adder(twos_a, ones, ones,
+ _mm256_xor_si256(_mm256_loadu_si256(vec_x + i + 4), _mm256_loadu_si256(vec_y + i + 4)),
+ _mm256_xor_si256(_mm256_loadu_si256(vec_x + i + 5), _mm256_loadu_si256(vec_y + i + 5))
+ );
+ carry_save_adder(twos_b, ones, ones,
+ _mm256_xor_si256(_mm256_loadu_si256(vec_x + i + 6), _mm256_loadu_si256(vec_y + i + 6)),
+ _mm256_xor_si256(_mm256_loadu_si256(vec_x + i + 7), _mm256_loadu_si256(vec_y + i + 7))
+ );
+ carry_save_adder(fours_b, twos, twos, twos_a, twos_b);
+ carry_save_adder(eights_a, fours, fours, fours_a, fours_b);
+ carry_save_adder(twos_a, ones, ones,
+ _mm256_xor_si256(_mm256_loadu_si256(vec_x + i + 8), _mm256_loadu_si256(vec_y + i + 8)),
+ _mm256_xor_si256(_mm256_loadu_si256(vec_x + i + 9), _mm256_loadu_si256(vec_y + i + 9))
+ );
+ carry_save_adder(twos_b, ones, ones,
+ _mm256_xor_si256(_mm256_loadu_si256(vec_x + i + 10), _mm256_loadu_si256(vec_y + i + 10)),
+ _mm256_xor_si256(_mm256_loadu_si256(vec_x + i + 11), _mm256_loadu_si256(vec_y + i + 11))
+ );
+ carry_save_adder(fours_a, twos, twos, twos_a, twos_b);
+ carry_save_adder(twos_a, ones, ones,
+ _mm256_xor_si256(_mm256_loadu_si256(vec_x + i + 12), _mm256_loadu_si256(vec_y + i + 12)),
+ _mm256_xor_si256(_mm256_loadu_si256(vec_x + i + 13), _mm256_loadu_si256(vec_y + i + 13))
+ );
+ carry_save_adder(twos_b, ones, ones,
+ _mm256_xor_si256(_mm256_loadu_si256(vec_x + i + 14), _mm256_loadu_si256(vec_y + i + 14)),
+ _mm256_xor_si256(_mm256_loadu_si256(vec_x + i + 15), _mm256_loadu_si256(vec_y + i + 15))
+ );
+ carry_save_adder(fours_b, twos, twos, twos_a, twos_b);
+ carry_save_adder(eights_b, fours, fours, fours_a, fours_b);
+ carry_save_adder(sixteens, eights, eights, eights_a, eights_b);
+ total = _mm256_add_epi64(total, count(sixteens));
+ }
+ // final reduce
+ total = _mm256_slli_epi64(total, 4);
+ total = _mm256_add_epi64(total, _mm256_slli_epi64(count(eights), 3));
+ total = _mm256_add_epi64(total, _mm256_slli_epi64(count(fours), 2));
+ total = _mm256_add_epi64(total, _mm256_slli_epi64(count(twos), 1));
+ total = _mm256_add_epi64(total, count(ones));
+ return (_mm256_extract_epi64(total, 0)
+ + _mm256_extract_epi64(total, 1)
+ + _mm256_extract_epi64(total, 2)
+ + _mm256_extract_epi64(total, 3));
+}
+#endif //__AVX2__
+
+#if __AVX512BW__
+/// @brief The hamming distance between two vectors using vector popcnt (AVX-512 BITALG)
+bit_iter_t hamming_avx512(word_t *x, word_t *y) {
+ __m512i total = _mm512_set1_epi64(0);
+
+ for (word_iter_t i = 0; i < WORDS; i += 8) {
+ __m512i vec_x = _mm512_loadu_si512((__m512i *)(x + i));
+ __m512i vec_y = _mm512_loadu_si512((__m512i *)(y + i));
+ __m512i d = _mm512_xor_si512(vec_x, vec_y);
+ __m512i cnts = _mm512_popcnt_epi64(d);
+ total = _mm512_add_epi64(total, cnts);
+ }
+
+#if true // TODO figure out when reduce_add is available
+ return _mm512_reduce_add_epi64(total);
+#else
+ uint64_t a [8];
+ _mm512_storeu_si512((__m512i *)a, totals);
+ return a[0] + a[1] + a[2] + a[3] + a[4] + a[5] + a[6] + a[7];
+#endif
+}
+#endif
+
+#if __AVX512BW__
+#define hamming hamming_avx512
+#elif __AVX2__
+#define hamming hamming_adder_avx2
+#else
+#define hamming hamming_reference
+#endif
diff --git a/bhv/cnative/hash.h b/bhv/cnative/hash.h
new file mode 100644
index 0000000..711f57f
--- /dev/null
+++ b/bhv/cnative/hash.h
@@ -0,0 +1,3 @@
+void rehash_into(word_t *x, word_t *target) {
+ TurboSHAKE(512, (uint8_t *) x, BYTES, 0x1F, (uint8_t *) target, BYTES);
+}
diff --git a/bhv/cnative/majority.h b/bhv/cnative/majority.h
new file mode 100644
index 0000000..fbd6568
--- /dev/null
+++ b/bhv/cnative/majority.h
@@ -0,0 +1,181 @@
+/// @brief Straight C implementation of Maj-3
+void majority3_into_reference(word_t *x, word_t *y, word_t *z, word_t *target) {
+ for (word_iter_t i = 0; i < WORDS; ++i) {
+ target[i] = ((x[i] & y[i]) | (x[i] & z[i]) | (y[i] & z[i]));
+ }
+}
+
+#if __AVX512BW__
+
+/// @brief AVX-512 implementation of Decision-Tree-Majority algorithm
+/// @note Optimal for use in the N=7 to N=89 regime. After that, thresholding is faster
+template <uint8_t size>
+void logic_majority_into_avx512(word_t ** xs, word_t* target) {
+ constexpr uint8_t half = size/2;
+ __m512i grid [size/2 + 1][size/2 + 1];
+
+ for (word_iter_t word_id = 0; word_id < WORDS; word_id += 8) {
+
+ word_t* x = xs[size - 1];
+ grid[half][half] = _mm512_loadu_si512((__m512i*)(x + word_id));
+
+ for (uint8_t i = 0; i < half; ++i) {
+ x = xs[size - i - 2];
+ __m512i chunk = _mm512_loadu_si512((__m512i*)(x + word_id));
+
+ grid[half - i - 1][half] = grid[half - i][half] & chunk;
+ grid[half][half - i - 1] = grid[half][half - i] | chunk;
+ }
+
+ //NOTE: loop terminates when variable wraps after 0
+ for (uint8_t i = half - 1; i < half; --i) for (uint8_t j = half - 1; j < half; --j) {
+ x = xs[i + j];
+ __m512i chunk = _mm512_loadu_si512((__m512i*)(x + word_id));
+
+ grid[i][j] = _mm512_ternarylogic_epi64(chunk, grid[i + 1][j], grid[i][j + 1], 0xca); // select
+ }
+
+ _mm512_storeu_si512((__m512i*)(target + word_id), grid[0][0]);
+ }
+}
+
+/// @brief AVX-512 AND-OR version of majority3_into
+void majority3_into_avx512(word_t * x, word_t * y, word_t * z, word_t * target) {
+ for (word_iter_t word_id = 0; word_id < WORDS; word_id += 8) {
+ __m512i xi = _mm512_loadu_si512((__m512i*)(x + word_id));
+ __m512i yi = _mm512_loadu_si512((__m512i*)(y + word_id));
+ __m512i zi = _mm512_loadu_si512((__m512i*)(z + word_id));
+
+ __m512i result = ((xi & yi) | (xi & zi) | (yi & zi));
+
+ _mm512_storeu_si512((__m512i*)(target + word_id), result);
+ }
+}
+
+/// @brief AVX-512 TERNARY version of majority3_into
+/// @note On GCC 13 majority3_into_avx512 gets compiled into two ternary instructions, this uses only one
+void majority3_into_ternary_avx512(word_t * x, word_t * y, word_t * z, word_t * target) {
+ __m512i *x_vec = (__m512i *)x;
+ __m512i *y_vec = (__m512i *)y;
+ __m512i *z_vec = (__m512i *)z;
+ __m512i *target_vec = (__m512i *)target;
+
+ for (word_iter_t i = 0; i < BITS/512; ++i) {
+ _mm512_storeu_si512(target_vec + i,
+ _mm512_ternarylogic_epi64(_mm512_loadu_si512(x_vec + i),
+ _mm512_loadu_si512(y_vec + i),
+ _mm512_loadu_si512(z_vec + i), 0xe8));
+ }
+}
+
+/// @brief Computes the majority value for each bit among all corresponding bits in the input vectors
+/// @param xs array of size input vectors
+/// @param size count of input vectors
+/// @param target output vec
+void true_majority_into_avx512(word_t **xs, size_t size, word_t *target) {
+ assert(size % 2 == 1 && "true majority must be given an odd number of input hypervectors");
+ switch (size) {
+ case 1: memcpy(target, xs[0], BYTES); return;
+ case 3: majority3_into_ternary_avx512(xs[0], xs[1], xs[2], target); return;
+ case 5: logic_majority_into_avx512<5>(xs, target); return;
+ case 7: logic_majority_into_avx512<7>(xs, target); return;
+ case 9: logic_majority_into_avx512<9>(xs, target); return;
+ case 11: logic_majority_into_avx512<11>(xs, target); return;
+ case 13: logic_majority_into_avx512<13>(xs, target); return;
+ case 15: logic_majority_into_avx512<15>(xs, target); return;
+ case 17: logic_majority_into_avx512<17>(xs, target); return;
+ case 19: logic_majority_into_avx512<19>(xs, target); return;
+ default: threshold_into_avx512(xs, size, size/2, target); return;
+ }
+}
+#endif //!__AVX512BW__
+
+#ifdef __AVX2__
+/// @brief AVX-256 implementation of Decision-Tree-Majority algorithm
+/// @note Optimal for use in the N=7 to N=79 regime. After that, thresholding is faster
+template<uint8_t size>
+void logic_majority_into_avx2(word_t **xs, word_t *target) {
+ constexpr uint8_t half = size / 2;
+ __m256i grid[size / 2 + 1][size / 2 + 1];
+
+ for (word_iter_t word_id = 0; word_id < WORDS; word_id += 4) {
+ word_t *x = xs[size - 1];
+ grid[half][half] = _mm256_loadu_si256((__m256i *) (x + word_id));
+
+ for (uint8_t i = 0; i < half; ++i) {
+ x = xs[size - i - 2];
+ __m256i chunk = _mm256_loadu_si256((__m256i *) (x + word_id));
+
+ grid[half - i - 1][half] = grid[half - i][half] & chunk;
+ grid[half][half - i - 1] = grid[half][half - i] | chunk;
+ }
+
+ //NOTE: loop terminates when variable wraps after 0
+ for (uint8_t i = half - 1; i < half; --i)
+ for (uint8_t j = half - 1; j < half; --j) {
+ x = xs[i + j];
+ __m256i chunk = _mm256_loadu_si256((__m256i *) (x + word_id));
+
+ grid[i][j] = grid[i][j + 1] ^ (chunk & (grid[i][j + 1] ^ grid[i + 1][j])); // select
+ }
+
+ _mm256_storeu_si256((__m256i *) (target + word_id), grid[0][0]);
+ }
+}
+
+/// @brief AVX-2 version of majority3_into
+void majority3_into_avx2(word_t *x, word_t *y, word_t *z, word_t *target) {
+ for (word_iter_t word_id = 0; word_id < WORDS; word_id += 4) {
+ __m256i xi = _mm256_loadu_si256((__m256i *) (x + word_id));
+ __m256i yi = _mm256_loadu_si256((__m256i *) (y + word_id));
+ __m256i zi = _mm256_loadu_si256((__m256i *) (z + word_id));
+
+ __m256i result = ((xi & yi) | (xi & zi) | (yi & zi));
+
+ _mm256_storeu_si256((__m256i *) (target + word_id), result);
+ }
+}
+
+/// @brief Computes the majority value for each bit among all corresponding bits in the input vectors
+/// @param xs array of size input vectors
+/// @param size count of input vectors
+/// @param target output vec
+void true_majority_into_avx2(word_t **xs, size_t size, word_t *target) {
+ assert(size % 2 == 1 && "true majority must be given an odd number of input hypervectors");
+ switch (size) {
+ case 1: memcpy(target, xs[0], BYTES); return;
+ case 3: majority3_into_avx2(xs[0], xs[1], xs[2], target); return;
+ case 5: logic_majority_into_avx2<5>(xs, target); return;
+ case 7: logic_majority_into_avx2<7>(xs, target); return;
+ case 9: logic_majority_into_avx2<9>(xs, target); return;
+ case 11: logic_majority_into_avx2<11>(xs, target); return;
+ case 13: logic_majority_into_avx2<13>(xs, target); return;
+ case 15: logic_majority_into_avx2<15>(xs, target); return;
+ default: threshold_into_avx2(xs, size, size/2, target); return;
+ }
+}
+#endif //__AVX2__
+
+#if __AVX512BW__
+#define logic_majority_into logic_majority_into_avx512
+#define majority3_into majority3_into_ternary_avx512
+#define true_majority_into true_majority_into_avx512
+#elif __AVX2__
+#define majority3_into majority3_into_avx2
+#define logic_majority_into logic_majority_into_avx2
+#define true_majority_into true_majority_into_avx2
+#else
+#define majority3_into majority3_into_reference
+#define logic_majority_into logic_majority_into_avx2 //GOAT, need a straight C impl
+#define true_majority_into true_majority_into_avx2 //GOAT, need a straight C impl
+#endif
+
+/// @brief Computes the majority value for each bit among all corresponding bits in the input vectors
+/// @param xs array of size input vectors
+/// @param size count of input vectors
+/// @return Return hypervector
+word_t *true_majority(word_t **xs, size_t size) {
+ word_t *new_vec = bhv::empty();
+ true_majority_into(xs, size, new_vec);
+ return new_vec;
+}
diff --git a/bhv/cnative/packed.h b/bhv/cnative/packed.h
deleted file mode 100644
index bcda9f0..0000000
--- a/bhv/cnative/packed.h
+++ /dev/null
@@ -1,292 +0,0 @@
-#ifndef BHV_PACKED_H
-#define BHV_PACKED_H
-
-#include <bit>
-#include <random>
-#include <cstring>
-#include <algorithm>
-#include "shared.h"
-//#include "TurboSHAKE.h"
-#include "TurboSHAKEopt/TurboSHAKE.h"
-
-
-namespace bhv {
- constexpr word_t ONE_WORD = std::numeric_limits<word_t>::max();
- constexpr bit_word_iter_t HALF_BITS_PER_WORD = BITS_PER_WORD/2;
- constexpr word_t HALF_WORD = ONE_WORD << HALF_BITS_PER_WORD;
- constexpr word_t OTHER_HALF_WORD = ~HALF_WORD;
-
- std::mt19937_64 rng;
-
- word_t * empty() {
- return (word_t *) malloc(BYTES);
- }
-
- word_t * zero() {
- return (word_t *) calloc(WORDS, sizeof(word_t));
- }
-
- word_t * one() {
- word_t * x = empty();
- for (word_iter_t i = 0; i < WORDS; ++i) {
- x[i] = ONE_WORD;
- }
- return x;
- }
-
- word_t * half() {
- word_t * x = empty();
- for (word_iter_t i = 0; i < WORDS; ++i) {
- x[i] = HALF_WORD;
- }
- return x;
- }
-
- void swap_halves_into(word_t * x, word_t * target) {
- for (word_iter_t i = 0; i < WORDS; ++i) {
- target[i] = ((x[i] & HALF_WORD) >> HALF_BITS_PER_WORD) | ((x[i] & OTHER_HALF_WORD) << HALF_BITS_PER_WORD);
- }
- }
-
- void rand_into(word_t * x) {
- for (word_iter_t i = 0; i < WORDS; ++i) {
- x[i] = rng();
- }
- }
-
- void random_into(word_t * x, float_t p) {
- std::uniform_real_distribution<float> gen(0.0, 1.0);
-
- for (word_iter_t i = 0; i < WORDS; ++i) {
- word_t word = 0;
- for (bit_word_iter_t bit_id = 0; bit_id < BITS_PER_WORD; ++bit_id) {
- if (gen(rng) < p)
- word |= 1UL << bit_id;
- }
- x[i] = word;
- }
- }
-
- word_t * rand() {
- word_t * x = empty();
- rand_into(x);
- return x;
- }
-
- word_t * random(float_t p) {
- word_t * x = empty();
- random_into(x, p);
- return x;
- }
-
- bit_iter_t active(word_t * x) {
- bit_iter_t total = 0;
- for (word_iter_t i = 0; i < WORDS; ++i) {
- total += __builtin_popcountl(x[i]);
- }
- return total;
- }
-
- bit_iter_t hamming(word_t * x, word_t * y) {
- bit_iter_t total = 0;
- for (word_iter_t i = 0; i < WORDS; ++i) {
- total += __builtin_popcountl(x[i] ^ y[i]);
- }
- return total;
- }
-
- bool eq(word_t * x, word_t * y) {
- for (word_iter_t i = 0; i < WORDS; ++i) {
- if (x[i] != y[i])
- return false;
- }
- return true;
- }
-
-
- void xor_into(word_t * x, word_t * y, word_t * target) {
- for (word_iter_t i = 0; i < WORDS; ++i) {
- target[i] = x[i] ^ y[i];
- }
- }
-
- void and_into(word_t * x, word_t * y, word_t * target) {
- for (word_iter_t i = 0; i < WORDS; ++i) {
- target[i] = x[i] & y[i];
- }
- }
-
- void or_into(word_t * x, word_t * y, word_t * target) {
- for (word_iter_t i = 0; i < WORDS; ++i) {
- target[i] = x[i] | y[i];
- }
- }
-
- void invert_into(word_t * x, word_t * target) {
- for (word_iter_t i = 0; i < WORDS; ++i) {
- target[i] = ~x[i];
- }
- }
-
- template <typename N>
- N* generic_counts(word_t ** xs, N size) {
- N* totals = (N *) calloc(BITS, sizeof(N));
-
- for (N i = 0; i < size; ++i) {
- word_t * x = xs[i];
-
- for (word_iter_t word_id = 0; word_id < WORDS; ++word_id) {
- bit_iter_t offset = word_id * BITS_PER_WORD;
- word_t word = x[word_id];
- for (bit_word_iter_t bit_id = 0; bit_id < BITS_PER_WORD; ++bit_id) {
- totals[offset + bit_id] += ((word >> bit_id) & 1);
- }
- }
- }
-
- return totals;
- }
-
- template <typename N>
- word_t* generic_gt(N * totals, N threshold) {
- word_t * x = empty();
-
- for (word_iter_t word_id = 0; word_id < WORDS; ++word_id) {
- bit_iter_t offset = word_id * BITS_PER_WORD;
- word_t word = 0;
- for (bit_word_iter_t bit_id = 0; bit_id < BITS_PER_WORD; ++bit_id) {
- if (threshold < totals[offset + bit_id])
- word |= 1UL << bit_id;
- }
- x[word_id] = word;
- }
- free(totals);
- return x;
- }
-
- word_t * representative_impl(word_t ** xs, size_t size) {
- word_t * x = zero();
-
- std::uniform_int_distribution<size_t> gen(0, size - 1);
- for (word_iter_t word_id = 0; word_id < WORDS; ++word_id) {
- word_t word = 0;
- for (bit_word_iter_t bit_id = 0; bit_id < BITS_PER_WORD; ++bit_id) {
- size_t x_id = gen(rng);
- if ((xs[x_id][word_id] >> bit_id) & 1)
- word |= 1UL << bit_id;
- }
- x[word_id] = word;
- }
-
- return x;
- }
-
- word_t * n_representatives_impl(word_t ** xs, size_t size) {
- word_t * x = zero();
-
- std::uniform_int_distribution<size_t> gen(0, size - 1);
- for (word_iter_t word_id = 0; word_id < WORDS; ++word_id) {
- word_t word = 0;
- for (bit_word_iter_t bit_id = 0; bit_id < BITS_PER_WORD; ++bit_id) {
- size_t x_id = gen(rng);
- word |= 1UL << (xs[x_id][word_id] >> bit_id) & 1;
- }
- x[word_id] = word;
- }
-
- return x;
- }
-
- word_t* threshold(word_t ** xs, size_t size, size_t threshold) {
- if (size < UINT8_MAX)
- return generic_gt<uint8_t>(generic_counts<uint8_t>(xs, size), threshold);
- else if (size < UINT16_MAX)
- return generic_gt<uint16_t>(generic_counts<uint16_t>(xs, size), threshold);
- else
- return generic_gt<uint32_t>(generic_counts<uint32_t>(xs, size), threshold);
- }
-
- void select_into(word_t * cond, word_t * when1, word_t * when0, word_t * target) {
- for (word_iter_t i = 0; i < WORDS; ++i) {
- target[i] = when0[i] ^ (cond[i] & (when0[i] ^ when1[i]));
- }
- }
-
- void majority3_into(word_t * x, word_t * y, word_t * z, word_t * target) {
- for (word_iter_t i = 0; i < WORDS; ++i) {
- target[i] = ((x[i] & y[i]) | (x[i] & z[i]) | (y[i] & z[i]));
- }
- }
-
- word_t* true_majority(word_t ** xs, size_t size) {
- if (size == 0) return rand();
- else if (size == 1) { word_t * r = empty(); memcpy(r, xs[0], BYTES); return r; }
- else if (size == 2) { word_t * r = rand(); select_into(r, xs[0], xs[1], r); return r; }
- else if (size == 3) { word_t * r = empty(); majority3_into(xs[0], xs[1], xs[2], r); return r; }
- else return threshold(xs, size, size/2);
- }
-
- word_t* representative(word_t ** xs, size_t size) {
- if (size == 0) return rand();
- else if (size == 1) { word_t * r = empty(); memcpy(r, xs[0], BYTES); return r; }
- else if (size == 2) { word_t * r = rand(); select_into(r, xs[0], xs[1], r); return r; }
- else return representative_impl(xs, size);
- }
-
- void roll_words_into(word_t * x, int32_t d, word_t * target) {
- int32_t offset = ((d % WORDS) + WORDS) % WORDS;
-
- memcpy(target, x + offset, (WORDS - offset) * sizeof(word_t));
- memcpy(target + WORDS - offset, x, offset * sizeof(word_t));
- }
-
- void roll_word_bits_into(word_t * x, int32_t d, word_t * target) {
- int32_t offset = d % BITS_PER_WORD;
-
- for (word_iter_t i = 0; i < WORDS; ++i) {
- target[i] = std::rotl(x[i], offset);
- }
- }
-
- void permute_words_into(word_t * x, word_iter_t* word_permutation, word_t * target) {
- for (word_iter_t i = 0; i < WORDS; ++i) {
- target[i] = x[word_permutation[i]];
- }
- }
-
- void inverse_permute_words_into(word_t * x, word_iter_t* word_permutation, word_t * target) {
- for (word_iter_t i = 0; i < WORDS; ++i) {
- target[word_permutation[i]] = x[i];
- }
- }
-
- word_iter_t* rand_word_permutation(uint32_t seed) {
- std::minstd_rand0 perm_rng(seed);
-
- auto p = (word_iter_t *) malloc(sizeof(word_iter_t)*WORDS);
-
- for (word_iter_t i = 0; i < WORDS; ++i)
- p[i] = i;
-
- std::shuffle(p, p + WORDS, perm_rng);
-
- return p;
- }
-
- void permute_into(word_t * x, int32_t perm_id, word_t * target) {
- if (perm_id == 0) {
- memcpy(target, x, BYTES);
- return;
- }
-
- word_iter_t* perm = rand_word_permutation(abs(perm_id));
- if (perm_id > 0) permute_words_into(x, perm, target);
- else inverse_permute_words_into(x, perm, target);
- free(perm);
- }
-
- void rehash_into(word_t * x, word_t * target) {
- TurboSHAKE(512, (uint8_t *)x, BYTES, 0x1F, (uint8_t *)target, BYTES);
- }
-}
-#endif //BHV_PACKED_H
diff --git a/bhv/cnative/permutation.h b/bhv/cnative/permutation.h
new file mode 100644
index 0000000..f81c8a7
--- /dev/null
+++ b/bhv/cnative/permutation.h
@@ -0,0 +1,224 @@
+void roll_words_into(word_t *x, int32_t d, word_t *target) {
+ int32_t offset = ((d % WORDS) + WORDS) % WORDS;
+
+ memcpy(target, x + offset, (WORDS - offset) * sizeof(word_t));
+ memcpy(target + WORDS - offset, x, offset * sizeof(word_t));
+}
+
+void roll_word_bits_into(word_t *x, int32_t d, word_t *target) {
+ int32_t offset = d % BITS_PER_WORD;
+
+ for (word_iter_t i = 0; i < WORDS; ++i) {
+ target[i] = std::rotl(x[i], offset);
+ }
+}
+
+uint8_t permute_single_byte_bits(uint8_t x, uint64_t p) {
+ uint64_t w = _pdep_u64(x, 0x0101010101010101);
+ uint64_t res = (uint64_t) _mm_shuffle_pi8(_mm_cvtsi64_m64(w), _mm_cvtsi64_m64(p));
+ return _pext_u64(res, 0x0101010101010101);
+}
+
+uint64_t byte_bits_permutation_invert(uint64_t p) {
+ uint64_t r = 0;
+
+ for (uint64_t i = 0; i < 8; ++i)
+ r |= i << (((p >> (i * 8)) & 0x07) * 8);
+
+ return r;
+}
+
+uint64_t rand_byte_bits_permutation(uint32_t seed) {
+ std::minstd_rand0 perm_rng(seed);
+
+ uint8_t p[8] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+ std::shuffle(p, p + 8, perm_rng);
+
+ return *((uint64_t *) p);
+}
+
+void permute_byte_bits_into_shuffle(word_t *x, int32_t perm_id, word_t *target) {
+ if (perm_id == 0) {
+ memcpy(target, x, BYTES);
+ return;
+ }
+
+ uint8_t *x_bytes = (uint8_t *) x;
+ uint8_t *target_bytes = (uint8_t *) target;
+
+ uint64_t byte_perm = rand_byte_bits_permutation(abs(perm_id));
+ if (perm_id < 0) byte_perm = byte_bits_permutation_invert(byte_perm);
+
+ for (byte_iter_t i = 0; i < BYTES; ++i)
+ target_bytes[i] = permute_single_byte_bits(x_bytes[i], byte_perm);
+}
+
+uint64_t byte_bits_permutation_matrix(uint64_t packed_indices) {
+ uint64_t r = 0;
+
+ for (uint8_t i = 0; i < 64; i += 8)
+ r |= 1ULL << ((56 - i) + ((packed_indices >> i) & 0x07));
+
+ return r;
+}
+
+#if __GFNI__
+void permute_byte_bits_into_gfni(word_t *x, int32_t perm_id, word_t *target) {
+ if (perm_id == 0) {
+ memcpy(target, x, BYTES);
+ return;
+ }
+
+ __m512i *x_vec = (__m512i *) x;
+ __m512i *target_vec = (__m512i *) target;
+
+ uint64_t byte_perm = rand_byte_bits_permutation(abs(perm_id));
+ if (perm_id < 0) byte_perm = byte_bits_permutation_invert(byte_perm);
+ uint64_t byte_perm_matrix = byte_bits_permutation_matrix(byte_perm);
+ __m512i byte_perm_matrices = _mm512_set1_epi64(byte_perm_matrix);
+
+ for (word_iter_t i = 0; i < BITS/512; ++i) {
+ __m512i vec = _mm512_loadu_si512(x_vec + i);
+ __m512i permuted_vec = _mm512_gf2p8affine_epi64_epi8(vec, byte_perm_matrices, 0);
+ _mm512_storeu_si512(target_vec + i, permuted_vec);
+ }
+}
+#endif
+
+#if __GFNI__
+#define permute_byte_bits_into permute_byte_bits_into_gfni
+#else
+#define permute_byte_bits_into permute_byte_bits_into_shuffle
+#endif
+
+#if __AVX512BW__
+uint64_t permute_single_word_bits(uint64_t x, __m512i p) {
+ __m512i x_simd = _mm512_set1_epi64(x);
+ __mmask64 permuted_bits = _mm512_bitshuffle_epi64_mask(x_simd, p);
+ return _cvtmask64_u64(permuted_bits);
+}
+
+__m512i word_bits_permutation_invert(__m512i p) {
+ uint8_t p_array [64];
+ uint8_t r_array [64];
+ _mm512_storeu_si512(p_array, p);
+
+ for (uint8_t i = 0; i < 64; ++i)
+ r_array[p_array[i] & 0x3f] = i;
+
+ return _mm512_loadu_si512(r_array);
+}
+
+__m512i rand_word_bits_permutation(uint32_t seed) {
+ std::minstd_rand0 perm_rng(seed);
+
+ uint8_t p [64];
+ for (uint8_t i = 0; i < 64; ++i)
+ p[i] = i;
+
+ std::shuffle(p, p + 64, perm_rng);
+
+ return _mm512_loadu_si512(p);
+}
+
+void permute_word_bits_into(word_t * x, int32_t perm_id, word_t * target) {
+ if (perm_id == 0) {memcpy(target, x, BYTES); return;}
+
+ __m512i word_perm = rand_word_bits_permutation(abs(perm_id));
+ if (perm_id < 0) word_perm = word_bits_permutation_invert(word_perm);
+
+ for (word_iter_t i = 0; i < WORDS; ++i)
+ target[i] = permute_single_word_bits(x[i], word_perm);
+}
+#endif
+
+template<bool inverse>
+void apply_word_permutation_into(word_t *x, word_iter_t *word_permutation, word_t *target) {
+ for (word_iter_t i = 0; i < WORDS; ++i) {
+ if constexpr (inverse)
+ target[word_permutation[i]] = x[i];
+ else
+ target[i] = x[word_permutation[i]];
+ }
+}
+
+void rand_word_permutation_into(uint32_t seed, word_iter_t *p) {
+ std::minstd_rand0 perm_rng(seed);
+
+ for (word_iter_t i = 0; i < WORDS; ++i)
+ p[i] = i;
+
+ std::shuffle(p, p + WORDS, perm_rng);
+}
+
+void permute_words_into(word_t *x, int32_t perm, word_t *target) {
+ if (perm == 0) {
+ memcpy(target, x, BYTES);
+ return;
+ }
+
+ word_iter_t p[WORDS];
+ rand_word_permutation_into(abs(perm), p);
+
+ if (perm > 0) apply_word_permutation_into<false>(x, p, target);
+ else apply_word_permutation_into<true>(x, p, target);
+}
+
+
+template<bool inverse>
+void apply_byte_permutation_into(word_t *x, byte_iter_t *byte_permutation, word_t *target) {
+ uint8_t *x_bytes = (uint8_t *) x;
+ uint8_t *target_bytes = (uint8_t *) target;
+
+ for (byte_iter_t i = 0; i < BYTES; ++i) {
+ if constexpr (inverse)
+ target_bytes[byte_permutation[i]] = x_bytes[i];
+ else
+ target_bytes[i] = x_bytes[byte_permutation[i]];
+ }
+}
+
+void rand_byte_permutation_into(uint32_t seed, byte_iter_t *p) {
+ std::minstd_rand0 perm_rng(seed);
+
+ for (byte_iter_t i = 0; i < BYTES; ++i)
+ p[i] = i;
+
+ std::shuffle(p, p + BYTES, perm_rng);
+}
+
+void permute_bytes_into(word_t *x, int32_t perm, word_t *target) {
+ if (perm == 0) {
+ memcpy(target, x, BYTES);
+ return;
+ }
+
+ byte_iter_t p[BYTES];
+ rand_byte_permutation_into(abs(perm), p);
+
+ if (perm > 0) apply_byte_permutation_into<false>(x, p, target);
+ else apply_byte_permutation_into<true>(x, p, target);
+}
+
+void permute_into(word_t *x, int32_t perm, word_t *target) {
+#if __AVX512BW__
+ permute_words_into(x, perm, target);
+ permute_word_bits_into(target, perm, target);
+#else
+ permute_bytes_into(x, perm, target);
+ permute_byte_bits_into(target, perm, target);
+#endif
+}
+
+word_t * permute(word_t *x, int32_t perm) {
+ word_t *r = empty();
+#if __AVX512BW__
+ permute_words_into(x, perm, r);
+ permute_word_bits_into(r, perm, r);
+#else
+ permute_bytes_into(x, perm, r);
+ permute_byte_bits_into(r, perm, r);
+#endif
+ return r;
+}
diff --git a/bhv/cnative/random.h b/bhv/cnative/random.h
new file mode 100644
index 0000000..996cb2c
--- /dev/null
+++ b/bhv/cnative/random.h
@@ -0,0 +1,270 @@
+void rand_into_reference(word_t *x) {
+ for (word_iter_t i = 0; i < WORDS; ++i) {
+ x[i] = rng();
+ }
+}
+
+#ifdef __AESNI__
+__m256i aes_state = _mm256_setzero_si256();
+__m256i increment = _mm256_set_epi8(0x2f, 0x2b, 0x29, 0x25, 0x1f, 0x1d, 0x17, 0x13,
+ 0x11, 0x0D, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01,
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c);
+
+void rand_into_aes(word_t *x) {
+ for (word_iter_t i = 0; i < WORDS; i += 8) {
+ aes_state += increment;
+
+ __m256i penultimate = _mm256_aesenc_epi128(aes_state, increment);
+
+ _mm256_storeu_si256((__m256i *) (x + i), _mm256_aesenc_epi128(penultimate, increment));
+ _mm256_storeu_si256((__m256i *) (x + i + 4), _mm256_aesdec_epi128(penultimate, increment));
+ }
+}
+#endif
+
+#ifdef __AVX2__
+avx2_pcg32_random_t avx2_key = {
+ .state = {_mm256_set_epi64x(0xb5f380a45f908741, 0x88b545898d45385d, 0xd81c7fe764f8966c, 0x44a9a3b6b119e7bc),
+ _mm256_set_epi64x(0x3cb6e04dc22f629, 0x727947debc931183, 0xfbfa8fdcff91891f, 0xb9384fd8f34c0f49)},
+ .inc = {_mm256_set_epi64x(0xbf2de0670ac3d03e, 0x98c40c0dc94e71e, 0xf3565f35a8c61d00, 0xd3c83e29b30df640),
+ _mm256_set_epi64x(0x14b7f6e4c89630fa, 0x37cc7b0347694551, 0x4a052322d95d485b, 0x10f3ade77a26e15e)},
+ .pcg32_mult_l = _mm256_set1_epi64x(UINT64_C(0x5851f42d4c957f2d) & 0xffffffffu),
+ .pcg32_mult_h = _mm256_set1_epi64x(UINT64_C(0x5851f42d4c957f2d) >> 32)};
+
+void rand_into_avx2(word_t *x) {
+ for (word_iter_t i = 0; i < WORDS; i += 4) {
+ _mm256_storeu_si256((__m256i *) (x + i), avx2_pcg32_random_r(&avx2_key));
+ }
+}
+#endif //__AVX2__
+
+#if __AVX512BW__
+avx512_pcg32_random_t avx512_narrow_key = {
+ .state = _mm512_set_epi64(0xb5f380a45f908741, 0x88b545898d45385d, 0xd81c7fe764f8966c, 0x44a9a3b6b119e7bc, 0x3cb6e04dc22f629, 0x727947debc931183, 0xfbfa8fdcff91891f, 0xb9384fd8f34c0f49),
+ .inc = _mm512_set_epi64(0xbf2de0670ac3d03e, 0x98c40c0dc94e71e, 0xf3565f35a8c61d00, 0xd3c83e29b30df640, 0x14b7f6e4c89630fa, 0x37cc7b0347694551, 0x4a052322d95d485b, 0x10f3ade77a26e15e),
+ .multiplier = _mm512_set1_epi64(0x5851f42d4c957f2d)};
+
+avx512bis_pcg32_random_t avx512_key = {
+ .state = {_mm512_set_epi64(0xb5f380a45f908741, 0x88b545898d45385d, 0xd81c7fe764f8966c, 0x44a9a3b6b119e7bc, 0x3cb6e04dc22f629, 0x727947debc931183, 0xfbfa8fdcff91891f, 0xb9384fd8f34c0f49),
+ _mm512_set_epi64(0xe4253e998046cdfb, 0x78a622340a6ad250, 0x5e414281f13fd909, 0x3015456ade10a4d0, 0x7294fe41ba737ee9, 0x36dc2d779e797897, 0x81228ea9c9bb25a2, 0xfbfca70842e57746)},
+ .inc = {_mm512_set_epi64(0xbf2de0670ac3d03e, 0x98c40c0dc94e71e, 0xf3565f35a8c61d00, 0xd3c83e29b30df640, 0x14b7f6e4c89630fa, 0x37cc7b0347694551, 0x4a052322d95d485b, 0x10f3ade77a26e15e),
+ _mm512_set_epi64(0x5e3cf9dbf6635b3c, 0x2a580d00dc0e34cd, 0xb2b1c52ab1c72ca6, 0x4a683d7ad57caba0, 0x76b85fc2d899c649, 0xf28e80cc844192ff, 0x40a357e9b7739d1e, 0xeb8aa949b57f75de)},
+ .multiplier = _mm512_set1_epi64(0x5851f42d4c957f2d)};
+
+void rand_into_avx512(word_t * x) {
+ for (word_iter_t i = 0; i < WORDS; i += 8) {
+ _mm512_storeu_si512((__m512i*)(x + i), avx512bis_pcg32_random_r(&avx512_key));
+ }
+}
+#endif
+
+#if __AVX512BW__
+#define rand_into rand_into_avx512
+#elif __AVX2__
+#define rand_into rand_into_avx2
+#else
+#define rand_into rand_into_reference
+#endif
+
+void rand2_into_reference(word_t *target, int8_t pow) {
+ if (pow == 0)
+ return rand_into_reference(target);
+
+ for (word_iter_t i = 0; i < WORDS; ++i) {
+ word_t w = rng();
+ for (int8_t p = 0; p < std::abs(pow); ++p) {
+ if (pow > 0)
+ w &= rng();
+ else
+ w |= rng();
+ }
+ target[i] = w;
+ }
+}
+
+#define rand2_into rand2_into_reference
+
+void random_into_reference(word_t *x, float_t p) {
+ std::bernoulli_distribution gen(p);
+
+ for (word_iter_t i = 0; i < WORDS; ++i) {
+ word_t word = 0;
+ for (bit_word_iter_t bit_id = 0; bit_id < BITS_PER_WORD; ++bit_id) {
+ if (gen(rng))
+ word |= 1UL << bit_id;
+ }
+ x[i] = word;
+ }
+}
+
+// Note This could have an AVX-512 implementation with 512-bit float-level log and floor, and probably and equivalent to generate_canonical
+template<bool additive>
+void sparse_random_switch_into(word_t *x, float_t prob, word_t *target) {
+ double inv_log_not_prob = 1. / log(1 - prob);
+ size_t skip_count = floor(log(generate_canonical<float_t, 23>(rng)) * inv_log_not_prob);
+
+ for (word_iter_t i = 0; i < WORDS; ++i) {
+ word_t word = x[i];
+ while (skip_count < BITS_PER_WORD) {
+ if constexpr (additive)
+ word |= 1UL << skip_count;
+ else
+ word &= ~(1UL << skip_count);
+ skip_count += floor(log(generate_canonical<float_t, 23>(rng)) * inv_log_not_prob);
+ }
+ skip_count -= BITS_PER_WORD;
+ target[i] = word;
+ }
+}
+
+uint64_t instruction_upto(float target, uint8_t *to, float *remaining, float threshold = 1e-4) {
+ uint8_t depth = 0;
+ uint64_t res = 0;
+ float frac = target;
+ float delta;
+ float correction;
+
+ do {
+ delta = frac - (1.f / (float) (2 << depth));
+
+ if (delta > 0) {
+ res |= 1ULL << depth;
+ frac = delta;
+ }
+
+ depth += 1;
+ correction = (1. - target) / (1. - (target + delta)) - 1.;
+ } while (abs(correction) > threshold);
+
+ *to = depth - 1;
+ *remaining = correction;
+ return res;
+}
+
+#ifdef __AVX2__
+
+void random_into_tree_sparse_avx2(word_t *x, float p) {
+ constexpr float sparse_faster_threshold = .002;
+
+ if (p < sparse_faster_threshold)
+ return sparse_random_switch_into<true>(ZERO, p, x);
+ else if (p > (1.f - sparse_faster_threshold))
+ return sparse_random_switch_into<false>(ONE, 1.f - p, x);
+
+ uint8_t to;
+ float correction;
+ uint64_t instr = instruction_upto(p, &to, &correction, sparse_faster_threshold);
+
+ for (word_iter_t word_id = 0; word_id < WORDS; word_id += 4) {
+ __m256i chunk = avx2_pcg32_random_r(&avx2_key);
+
+ for (uint8_t i = to - 1; i < to; --i) {
+ if ((instr & (1ULL << i)) >> i)
+ chunk = _mm256_or_si256(chunk, avx2_pcg32_random_r(&avx2_key));
+ else
+ chunk = _mm256_and_si256(chunk, avx2_pcg32_random_r(&avx2_key));
+ }
+
+ _mm256_storeu_si256((__m256i *) (x + word_id), chunk);
+ }
+
+ if (correction == 0.)
+ return;
+ else if (correction > 0.)
+ return sparse_random_switch_into<true>(x, correction, x);
+ else if (correction < 0.)
+ return sparse_random_switch_into<false>(x, -correction, x);
+}
+#endif //__AVX2__
+
+int8_t ternary_instruction(float af, uint8_t* instr, uint8_t *to, float threshold=1e-6) {
+ if (af <= 0.) return -2;
+ if (af >= 1.) return -3;
+
+ float da = af - (1.f / (float) (2 << (2*(*to))));
+
+ if (abs(da) <= threshold) return -1;
+
+ if (da > 0) af = da;
+
+ float db = af - (1.f / (float) (2 << (2*(*to) + 1)));
+
+ if (db > 0) af = db;
+
+ if (abs(db) > threshold) {
+ if (da > 0) {
+ if (db > 0) instr[*to] = 0;
+ else instr[*to] = 1;
+ } else {
+ if (db > 0) instr[*to] = 2;
+ else instr[*to] = 3;
+ }
+ *to += 1;
+ return ternary_instruction(af, instr, to, threshold);
+ }
+
+ return da > 0;
+}
+
+#if __AVX512BW__
+void random_into_ternary_tree_avx512(word_t *x, float_t p) {
+ if (p < 1.f/128.f)
+ return sparse_random_switch_into<true>(ZERO, p, x);
+ else if (p > 127.f/128.f)
+ return sparse_random_switch_into<false>(ONE, 1.f - p, x);
+
+ uint8_t buffer [24];
+ uint8_t to = 0;
+ int8_t finalizer = ternary_instruction(p, buffer, &to, 5e-4);
+
+ for (word_iter_t word_id = 0; word_id < WORDS; word_id += 8) {
+ __m512i chunk = avx512bis_pcg32_random_r(&avx512_key);
+
+ switch (finalizer) {
+ case 1: chunk = _mm512_or_si512(avx512bis_pcg32_random_r(&avx512_key), chunk); break;
+ case 0: chunk = _mm512_and_si512(avx512bis_pcg32_random_r(&avx512_key), chunk); break;
+ case -1: break;
+ case -2: case -3: assert(false);
+ }
+
+ for (int i = (int)to - 1; i >= 0; --i) switch (buffer[i]) {
+ case 0: chunk = _mm512_ternarylogic_epi64(avx512bis_pcg32_random_r(&avx512_key),
+ avx512bis_pcg32_random_r(&avx512_key),
+ chunk, 0b11111110); break;
+ case 1: chunk = _mm512_ternarylogic_epi64(avx512bis_pcg32_random_r(&avx512_key),
+ avx512bis_pcg32_random_r(&avx512_key),
+ chunk, 0b11111000); break;
+ case 2: chunk = _mm512_ternarylogic_epi64(avx512bis_pcg32_random_r(&avx512_key),
+ avx512bis_pcg32_random_r(&avx512_key),
+ chunk, 0b11100000); break;
+ case 3: chunk = _mm512_ternarylogic_epi64(avx512bis_pcg32_random_r(&avx512_key),
+ avx512bis_pcg32_random_r(&avx512_key),
+ chunk, 0b10000000); break;
+ }
+
+ _mm512_storeu_si512((__m512i *) (x + word_id), chunk);
+ }
+}
+#endif
+
+#if __AVX512BW__
+#define random_into random_into_ternary_tree_avx512
+#elif __AVX2__
+#define random_into random_into_tree_sparse_avx2
+#else
+#define random_into random_into_reference
+#endif //#if __AVX512BW__
+
+
+word_t *rand() {
+ word_t *x = empty();
+ rand_into(x);
+ return x;
+}
+
+word_t *random(float_t p) {
+ word_t *x = empty();
+ random_into(x, p);
+ return x;
+}
diff --git a/bhv/cnative/representative.h b/bhv/cnative/representative.h
new file mode 100644
index 0000000..af78c1a
--- /dev/null
+++ b/bhv/cnative/representative.h
@@ -0,0 +1,37 @@
+/// @brief Plain C implementation of representative sampling
+void representative_into_reference(word_t **xs, size_t size, word_t *target) {
+ std::uniform_int_distribution<size_t> gen(0, size - 1);
+ for (word_iter_t word_id = 0; word_id < WORDS; ++word_id) {
+ word_t word = 0;
+ for (bit_word_iter_t bit_id = 0; bit_id < BITS_PER_WORD; ++bit_id) {
+ size_t x_id = gen(rng);
+ if ((xs[x_id][word_id] >> bit_id) & 1)
+ word |= 1UL << bit_id;
+ }
+ target[word_id] = word;
+ }
+}
+
+//word_t * n_representatives_impl(word_t ** xs, size_t size) {
+// word_t * x = zero();
+//
+// std::uniform_int_distribution<size_t> gen(0, size - 1);
+// for (word_iter_t word_id = 0; word_id < WORDS; ++word_id) {
+// word_t word = 0;
+// for (bit_word_iter_t bit_id = 0; bit_id < BITS_PER_WORD; ++bit_id) {
+// size_t x_id = gen(rng);
+// word |= 1UL << (xs[x_id][word_id] >> bit_id) & 1;
+// }
+// x[word_id] = word;
+// }
+//
+// return x;
+//}
+
+/// @brief For each dimension, samples a bit from xs into target
+void representative_into(word_t **xs, size_t size, word_t *target) {
+ if (size == 0) rand_into(target);
+ else if (size == 1) memcpy(target, xs[0], BYTES);
+ else if (size == 2) { word_t r[WORDS]; rand_into(r); select_into(r, xs[0], xs[1], target); }
+ else representative_into_reference(xs, size, target);
+}
\ No newline at end of file
diff --git a/bhv/cnative/run.sh b/bhv/cnative/run.sh
index a4b90c7..7a9663e 100755
--- a/bhv/cnative/run.sh
+++ b/bhv/cnative/run.sh
@@ -1,3 +1,5 @@
- g++ scatter_add_gt_gather.cpp -O3 -std=c++20 -march=native
- ./a.out
- rm a.out
+g++ benchmark.cpp TurboSHAKE_opt/TurboSHAKE.cpp TurboSHAKE_opt/KeccakP-1600-opt64.cpp \
+ TurboSHAKE_AVX512/TurboSHAKE.cpp TurboSHAKE_AVX512/KeccakP-1600-AVX512.cpp \
+ -O3 -std=c++20 -march=native -Wall -Wpedantic -Wextra -g -ffast-math
+./a.out
+rm a.out
diff --git a/bhv/cnative/scatter_add_gt_gather.cpp b/bhv/cnative/scatter_add_gt_gather.cpp
deleted file mode 100644
index 34d14cc..0000000
--- a/bhv/cnative/scatter_add_gt_gather.cpp
+++ /dev/null
@@ -1,50 +0,0 @@
-#include <iostream>
-#include <chrono>
-#include <bitset>
-#include <immintrin.h>
-
-using namespace std;
-
-void print_bits(int64_t w) {
- std::bitset<64> x(w);
- std::cout << x << std::endl;
-}
-
-int main() {
- // the data we care about
- uint64_t b1 = 0b01101111;
- uint64_t b2 = 0b00010011;
- uint64_t b3 = 0b00110110;
- uint64_t re = 0b00110111; // expected result of MAJ3
-
- // scatter so each bit of the byte lines up with a byte of a word
- uint64_t b1_byte_spread = _pdep_u64(b1, 0x0101010101010101);
- print_bits(b1_byte_spread);
- uint64_t b2_byte_spread = _pdep_u64(b2, 0x0101010101010101);
- print_bits(b2_byte_spread);
- uint64_t b3_byte_spread = _pdep_u64(b3, 0x0101010101010101);
- print_bits(b3_byte_spread);
-
- // embedding it into __m128 so we can do bytewise processing
- __m128i b1_simd = _mm_setr_epi64(_mm_cvtsi64_m64(0), _mm_cvtsi64_m64(b1_byte_spread));
- __m128i b2_simd = _mm_setr_epi64(_mm_cvtsi64_m64(0), _mm_cvtsi64_m64(b2_byte_spread));
- __m128i b3_simd = _mm_setr_epi64(_mm_cvtsi64_m64(0), _mm_cvtsi64_m64(b3_byte_spread));
-
- // bytewise addition
- __m128i counts = _mm_add_epi8(_mm_add_epi8(b1_simd, b2_simd), b3_simd);
- print_bits(_mm_extract_epi64(counts, 1));
-
- // out threshold, in this case hardcoded for 3 inputs
- __m128i threshold = _mm_setr_epi64(_mm_cvtsi64_m64(0), _mm_cvtsi64_m64(0x0101010101010101));
-
- // bytewise greater then threshold
- uint64_t maj_word = _mm_extract_epi64(_mm_cmpgt_epi8(counts, threshold), 1);
- print_bits(maj_word);
-
- // gather so each gt result is a single bit
- uint64_t maj = _pext_u64(maj_word, 0x0101010101010101);
-
- // print whether the result is as exptected
- std::cout << (re == maj) << std::endl;
- return 0;
-}
diff --git a/bhv/cnative/shared.h b/bhv/cnative/shared.h
index 8d5210d..19f730e 100644
--- a/bhv/cnative/shared.h
+++ b/bhv/cnative/shared.h
@@ -11,7 +11,7 @@ using bit_word_iter_t = uint8_t;
constexpr bit_word_iter_t BITS_PER_WORD = 64;
constexpr bit_iter_t BITS = 8192;
-constexpr byte_iter_t BYTES = BITS/8;
-constexpr word_iter_t WORDS = BITS/BITS_PER_WORD;
+constexpr byte_iter_t BYTES = BITS / 8;
+constexpr word_iter_t WORDS = BITS / BITS_PER_WORD;
#endif //BHV_CONSTANTS_H
diff --git a/bhv/cnative/simdpcg.h b/bhv/cnative/simdpcg.h
new file mode 100644
index 0000000..d57db7d
--- /dev/null
+++ b/bhv/cnative/simdpcg.h
@@ -0,0 +1,210 @@
+#include <stdint.h> // life is short, please use a C99-compliant compiler
+
+#if defined(_MSC_VER)
+/* Microsoft C/C++-compatible compiler */
+#include <intrin.h>
+#elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
+/* GCC-compatible compiler, targeting x86/x86-64 */
+#include <x86intrin.h>
+
+#elif defined(__GNUC__) && defined(__ARM_NEON__)
+/* GCC-compatible compiler, targeting ARM with NEON */
+#include <arm_neon.h>
+#elif defined(__GNUC__) && defined(__IWMMXT__)
+/* GCC-compatible compiler, targeting ARM with WMMX */
+#include <mmintrin.h>
+#elif (defined(__GNUC__) || defined(__xlC__)) && \
+ (defined(__VEC__) || defined(__ALTIVEC__))
+/* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */
+#include <altivec.h>
+#elif defined(__GNUC__) && defined(__SPE__)
+/* GCC-compatible compiler, targeting PowerPC with SPE */
+#include <spe.h>
+#endif
+
+#ifdef __AVX2__
+#define AVX2PCG
+typedef struct avx2_pcg_state_setseq_64 {
+ __m256i state[2]; // RNG state. All values are possible.
+ __m256i inc[2]; // Controls which RNG sequence (stream) is selected. Must
+ // *always* be odd.
+ __m256i
+ pcg32_mult_l; // set to _mm256_set1_epi64x(UINT64_C(0x5851f42d4c957f2dULL)
+ // & 0xffffffffu);
+ __m256i
+ pcg32_mult_h; // set to _mm256_set1_epi64x(UINT64_C(0x5851f42d4c957f2dULL)
+ // >> 32);
+
+} avx2_pcg32_random_t;
+
+// credit Wenzel Jakob
+// https://github.com/wjakob/pcg32/blob/master/pcg32_8.h
+static inline __m256i avx2_pcg32_random_r(avx2_pcg32_random_t *rng) {
+ const __m256i mask_l = _mm256_set1_epi64x(UINT64_C(0x00000000ffffffff));
+ const __m256i shift0 = _mm256_set_epi32(7, 7, 7, 7, 6, 4, 2, 0);
+ const __m256i shift1 = _mm256_set_epi32(6, 4, 2, 0, 7, 7, 7, 7);
+ const __m256i const32 = _mm256_set1_epi32(32);
+
+ __m256i s0 = rng->state[0], s1 = rng->state[1];
+
+ /* Extract low and high words for partial products below */
+ __m256i s0_l = _mm256_and_si256(s0, mask_l);
+ __m256i s0_h = _mm256_srli_epi64(s0, 32);
+ __m256i s1_l = _mm256_and_si256(s1, mask_l);
+ __m256i s1_h = _mm256_srli_epi64(s1, 32);
+
+ /* Improve high bits using xorshift step */
+ __m256i s0s = _mm256_srli_epi64(s0, 18);
+ __m256i s1s = _mm256_srli_epi64(s1, 18);
+
+ __m256i s0x = _mm256_xor_si256(s0s, s0);
+ __m256i s1x = _mm256_xor_si256(s1s, s1);
+
+ __m256i s0xs = _mm256_srli_epi64(s0x, 27);
+ __m256i s1xs = _mm256_srli_epi64(s1x, 27);
+
+ __m256i xors0 = _mm256_and_si256(mask_l, s0xs);
+ __m256i xors1 = _mm256_and_si256(mask_l, s1xs);
+
+ /* Use high bits to choose a bit-level rotation */
+ __m256i rot0 = _mm256_srli_epi64(s0, 59);
+ __m256i rot1 = _mm256_srli_epi64(s1, 59);
+
+ /* 64 bit multiplication using 32 bit partial products :( */
+ __m256i m0_hl = _mm256_mul_epu32(s0_h, rng->pcg32_mult_l);
+ __m256i m1_hl = _mm256_mul_epu32(s1_h, rng->pcg32_mult_l);
+ __m256i m0_lh = _mm256_mul_epu32(s0_l, rng->pcg32_mult_h);
+ __m256i m1_lh = _mm256_mul_epu32(s1_l, rng->pcg32_mult_h);
+
+ /* Assemble lower 32 bits, will be merged into one 256 bit vector below */
+ xors0 = _mm256_permutevar8x32_epi32(xors0, shift0);
+ rot0 = _mm256_permutevar8x32_epi32(rot0, shift0);
+ xors1 = _mm256_permutevar8x32_epi32(xors1, shift1);
+ rot1 = _mm256_permutevar8x32_epi32(rot1, shift1);
+
+ /* Continue with partial products */
+ __m256i m0_ll = _mm256_mul_epu32(s0_l, rng->pcg32_mult_l);
+ __m256i m1_ll = _mm256_mul_epu32(s1_l, rng->pcg32_mult_l);
+
+ __m256i m0h = _mm256_add_epi64(m0_hl, m0_lh);
+ __m256i m1h = _mm256_add_epi64(m1_hl, m1_lh);
+
+ __m256i m0hs = _mm256_slli_epi64(m0h, 32);
+ __m256i m1hs = _mm256_slli_epi64(m1h, 32);
+
+ __m256i s0n = _mm256_add_epi64(m0hs, m0_ll);
+ __m256i s1n = _mm256_add_epi64(m1hs, m1_ll);
+
+ __m256i xors = _mm256_or_si256(xors0, xors1);
+ __m256i rot = _mm256_or_si256(rot0, rot1);
+
+ rng->state[0] = _mm256_add_epi64(s0n, rng->inc[0]);
+ rng->state[1] = _mm256_add_epi64(s1n, rng->inc[1]);
+
+ /* Finally, rotate and return the result */
+ __m256i result =
+ _mm256_or_si256(_mm256_srlv_epi32(xors, rot),
+ _mm256_sllv_epi32(xors, _mm256_sub_epi32(const32, rot)));
+
+ return result;
+}
+
+typedef struct avx256_pcg_state_setseq_64 { // Internals are *Private*.
+ __m256i state; // (8x64bits) RNG state. All values are possible.
+ __m256i inc; // (8x64bits)Controls which RNG sequences (stream) is
+ // selected. Must *always* be odd. You probably want
+ // distinct sequences
+ __m256i
+ pcg32_mult_l; // set to _mm256_set1_epi64x(UINT64_C(0x5851f42d4c957f2d) &
+ // 0xffffffff)
+ __m256i
+ pcg32_mult_h; // set to _mm256_set1_epi64x(UINT64_C(0x5851f42d4c957f2d) >>
+ // 32)
+
+} avx256_pcg32_random_t;
+
+// untested
+static inline __m256i hacked_mm256_rorv_epi32(__m256i x, __m256i r) {
+ return _mm256_or_si256(
+ _mm256_sllv_epi32(x, _mm256_sub_epi32(_mm256_set1_epi32(32), r)),
+ _mm256_srlv_epi32(x, r));
+}
+
+// untested
+static inline __m256i hacked_mm256_mullo_epi64(__m256i x, __m256i ml,
+ __m256i mh) {
+ __m256i xl =
+ _mm256_and_si256(x, _mm256_set1_epi64x(UINT64_C(0x00000000ffffffff)));
+ __m256i xh = _mm256_srli_epi64(x, 32);
+ __m256i hl = _mm256_slli_epi64(_mm256_mul_epu32(xh, ml), 32);
+ __m256i lh = _mm256_slli_epi64(_mm256_mul_epu32(xl, mh), 32);
+ __m256i ll = _mm256_mul_epu32(xl, ml);
+ return _mm256_add_epi64(ll, _mm256_add_epi64(hl, lh));
+}
+
+static inline __m128i avx256_pcg32_random_r(avx256_pcg32_random_t *rng) {
+ __m256i oldstate = rng->state;
+ rng->state =
+ _mm256_add_epi64(hacked_mm256_mullo_epi64(rng->state, rng->pcg32_mult_l,
+ rng->pcg32_mult_h),
+ rng->inc);
+ __m256i xorshifted = _mm256_srli_epi64(
+ _mm256_xor_si256(_mm256_srli_epi64(oldstate, 18), oldstate), 27);
+ __m256i rot = _mm256_srli_epi64(oldstate, 59);
+ return _mm256_castsi256_si128(
+ _mm256_permutevar8x32_epi32(hacked_mm256_rorv_epi32(xorshifted, rot),
+ _mm256_set_epi32(7, 7, 7, 7, 6, 4, 2, 0)));
+}
+#endif
+
+#if defined(__AVX512F__) && defined(__AVX512DQ__)
+#define AVX512PCG
+typedef struct avx512_pcg_state_setseq_64 { // Internals are *Private*.
+ __m512i state; // (8x64bits) RNG state. All values are possible.
+ __m512i inc; // (8x64bits)Controls which RNG sequences (stream) is
+ // selected. Must *always* be odd. You probably want
+ // distinct sequences
+ __m512i multiplier; // set to _mm512_set1_epi64(0x5851f42d4c957f2d);
+} avx512_pcg32_random_t;
+
+static inline __m256i avx512_pcg32_random_r(avx512_pcg32_random_t *rng) {
+ __m512i oldstate = rng->state;
+ rng->state = _mm512_add_epi64(_mm512_mullo_epi64(rng->multiplier, rng->state),
+ rng->inc);
+ __m512i xorshifted = _mm512_srli_epi64(
+ _mm512_xor_epi64(_mm512_srli_epi64(oldstate, 18), oldstate), 27);
+ __m512i rot = _mm512_srli_epi64(oldstate, 59);
+ return _mm512_cvtepi64_epi32(_mm512_rorv_epi32(xorshifted, rot));
+}
+
+typedef struct avx512bis_pcg_state_setseq_64 { // Internals are *Private*.
+ __m512i state[2]; // (8x64bits) RNG state. All values are possible.
+ __m512i inc[2]; // (8x64bits)Controls which RNG sequences (stream) is
+ // selected. Must *always* be odd. You probably want
+ // distinct sequences
+ __m512i multiplier; // set to _mm512_set1_epi64(0x5851f42d4c957f2d);
+} avx512bis_pcg32_random_t;
+
+static inline __m512i
+avx512bis_pcg32_random_r(avx512bis_pcg32_random_t *rng) {
+ __m512i oldstate0 = rng->state[0];
+ __m512i oldstate1 = rng->state[1];
+
+ rng->state[0] = _mm512_add_epi64(
+ _mm512_mullo_epi64(rng->multiplier, rng->state[0]), rng->inc[0]);
+ rng->state[1] = _mm512_add_epi64(
+ _mm512_mullo_epi64(rng->multiplier, rng->state[1]), rng->inc[1]);
+
+ __m512i xorshifted0 = _mm512_srli_epi64(
+ _mm512_xor_epi64(_mm512_srli_epi64(oldstate0, 18), oldstate0), 27);
+ __m512i rot0 = _mm512_srli_epi64(oldstate0, 59);
+ __m512i xorshifted1 = _mm512_srli_epi64(
+ _mm512_xor_epi64(_mm512_srli_epi64(oldstate1, 18), oldstate1), 27);
+ __m512i rot1 = _mm512_srli_epi64(oldstate1, 59);
+ return _mm512_inserti32x8(
+ _mm512_castsi256_si512(
+ _mm512_cvtepi64_epi32(_mm512_rorv_epi32(xorshifted0, rot0))),
+ _mm512_cvtepi64_epi32(_mm512_rorv_epi32(xorshifted1, rot1)), 1);
+}
+
+#endif
\ No newline at end of file
diff --git a/bhv/cnative/ternary.h b/bhv/cnative/ternary.h
new file mode 100644
index 0000000..3257bcc
--- /dev/null
+++ b/bhv/cnative/ternary.h
@@ -0,0 +1,574 @@
+void select_into_reference(word_t *cond, word_t *when1, word_t *when0, word_t *target) {
+ for (word_iter_t i = 0; i < WORDS; ++i) {
+ target[i] = when0[i] ^ (cond[i] & (when0[i] ^ when1[i]));
+ }
+}
+
+#if __AVX512F__
+/// @note Under GCC -O3 the references implementation compiles into the same instruction
+void select_into_ternary_avx512(word_t *cond, word_t *when1, word_t *when0, word_t *target) {
+ __m512i *cond_vec = (__m512i *)cond;
+ __m512i *when1_vec = (__m512i *)when1;
+ __m512i *when0_vec = (__m512i *)when0;
+ __m512i *target_vec = (__m512i *)target;
+
+ for (word_iter_t i = 0; i < BITS/512; ++i) {
+ _mm512_storeu_si512(target_vec + i,
+ _mm512_ternarylogic_epi64(_mm512_loadu_si512(cond_vec + i),
+ _mm512_loadu_si512(when1_vec + i),
+ _mm512_loadu_si512(when0_vec + i), 0xca));
+ }
+}
+#endif
+
+#if __AVX512F__
+#define select_into select_into_ternary_avx512
+#else
+#define select_into select_into_reference
+#endif
+
+#if __AVX512F__
+template <uint8_t op>
+void ternary_into_avx512(word_t *x, word_t *y, word_t *z, word_t *target) {
+ __m512i *x_vec = (__m512i *)x;
+ __m512i *y_vec = (__m512i *)y;
+ __m512i *z_vec = (__m512i *)z;
+ __m512i *target_vec = (__m512i *)target;
+
+ for (word_iter_t i = 0; i < BITS/512; ++i) {
+ _mm512_storeu_si512(target_vec + i,
+ _mm512_ternarylogic_epi64(_mm512_loadu_si512(x_vec + i),
+ _mm512_loadu_si512(y_vec + i),
+ _mm512_loadu_si512(z_vec + i), op));
+ }
+}
+
+// ["case " + str(i) + ": ternary_into_avx512<0x" + hex(i)[2:].rjust(2, "0") + ">(x, y, z, target); break;" for i in range(256)]
+void dynamic_ternary_into_avx512(word_t *x, word_t *y, word_t *z, word_t *target, uint8_t op) {
+ switch (op) {
+ case 0: ternary_into_avx512<0x00>(x, y, z, target); break;
+ case 1: ternary_into_avx512<0x01>(x, y, z, target); break;
+ case 2: ternary_into_avx512<0x02>(x, y, z, target); break;
+ case 3: ternary_into_avx512<0x03>(x, y, z, target); break;
+ case 4: ternary_into_avx512<0x04>(x, y, z, target); break;
+ case 5: ternary_into_avx512<0x05>(x, y, z, target); break;
+ case 6: ternary_into_avx512<0x06>(x, y, z, target); break;
+ case 7: ternary_into_avx512<0x07>(x, y, z, target); break;
+ case 8: ternary_into_avx512<0x08>(x, y, z, target); break;
+ case 9: ternary_into_avx512<0x09>(x, y, z, target); break;
+ case 10: ternary_into_avx512<0x0a>(x, y, z, target); break;
+ case 11: ternary_into_avx512<0x0b>(x, y, z, target); break;
+ case 12: ternary_into_avx512<0x0c>(x, y, z, target); break;
+ case 13: ternary_into_avx512<0x0d>(x, y, z, target); break;
+ case 14: ternary_into_avx512<0x0e>(x, y, z, target); break;
+ case 15: ternary_into_avx512<0x0f>(x, y, z, target); break;
+ case 16: ternary_into_avx512<0x10>(x, y, z, target); break;
+ case 17: ternary_into_avx512<0x11>(x, y, z, target); break;
+ case 18: ternary_into_avx512<0x12>(x, y, z, target); break;
+ case 19: ternary_into_avx512<0x13>(x, y, z, target); break;
+ case 20: ternary_into_avx512<0x14>(x, y, z, target); break;
+ case 21: ternary_into_avx512<0x15>(x, y, z, target); break;
+ case 22: ternary_into_avx512<0x16>(x, y, z, target); break;
+ case 23: ternary_into_avx512<0x17>(x, y, z, target); break;
+ case 24: ternary_into_avx512<0x18>(x, y, z, target); break;
+ case 25: ternary_into_avx512<0x19>(x, y, z, target); break;
+ case 26: ternary_into_avx512<0x1a>(x, y, z, target); break;
+ case 27: ternary_into_avx512<0x1b>(x, y, z, target); break;
+ case 28: ternary_into_avx512<0x1c>(x, y, z, target); break;
+ case 29: ternary_into_avx512<0x1d>(x, y, z, target); break;
+ case 30: ternary_into_avx512<0x1e>(x, y, z, target); break;
+ case 31: ternary_into_avx512<0x1f>(x, y, z, target); break;
+ case 32: ternary_into_avx512<0x20>(x, y, z, target); break;
+ case 33: ternary_into_avx512<0x21>(x, y, z, target); break;
+ case 34: ternary_into_avx512<0x22>(x, y, z, target); break;
+ case 35: ternary_into_avx512<0x23>(x, y, z, target); break;
+ case 36: ternary_into_avx512<0x24>(x, y, z, target); break;
+ case 37: ternary_into_avx512<0x25>(x, y, z, target); break;
+ case 38: ternary_into_avx512<0x26>(x, y, z, target); break;
+ case 39: ternary_into_avx512<0x27>(x, y, z, target); break;
+ case 40: ternary_into_avx512<0x28>(x, y, z, target); break;
+ case 41: ternary_into_avx512<0x29>(x, y, z, target); break;
+ case 42: ternary_into_avx512<0x2a>(x, y, z, target); break;
+ case 43: ternary_into_avx512<0x2b>(x, y, z, target); break;
+ case 44: ternary_into_avx512<0x2c>(x, y, z, target); break;
+ case 45: ternary_into_avx512<0x2d>(x, y, z, target); break;
+ case 46: ternary_into_avx512<0x2e>(x, y, z, target); break;
+ case 47: ternary_into_avx512<0x2f>(x, y, z, target); break;
+ case 48: ternary_into_avx512<0x30>(x, y, z, target); break;
+ case 49: ternary_into_avx512<0x31>(x, y, z, target); break;
+ case 50: ternary_into_avx512<0x32>(x, y, z, target); break;
+ case 51: ternary_into_avx512<0x33>(x, y, z, target); break;
+ case 52: ternary_into_avx512<0x34>(x, y, z, target); break;
+ case 53: ternary_into_avx512<0x35>(x, y, z, target); break;
+ case 54: ternary_into_avx512<0x36>(x, y, z, target); break;
+ case 55: ternary_into_avx512<0x37>(x, y, z, target); break;
+ case 56: ternary_into_avx512<0x38>(x, y, z, target); break;
+ case 57: ternary_into_avx512<0x39>(x, y, z, target); break;
+ case 58: ternary_into_avx512<0x3a>(x, y, z, target); break;
+ case 59: ternary_into_avx512<0x3b>(x, y, z, target); break;
+ case 60: ternary_into_avx512<0x3c>(x, y, z, target); break;
+ case 61: ternary_into_avx512<0x3d>(x, y, z, target); break;
+ case 62: ternary_into_avx512<0x3e>(x, y, z, target); break;
+ case 63: ternary_into_avx512<0x3f>(x, y, z, target); break;
+ case 64: ternary_into_avx512<0x40>(x, y, z, target); break;
+ case 65: ternary_into_avx512<0x41>(x, y, z, target); break;
+ case 66: ternary_into_avx512<0x42>(x, y, z, target); break;
+ case 67: ternary_into_avx512<0x43>(x, y, z, target); break;
+ case 68: ternary_into_avx512<0x44>(x, y, z, target); break;
+ case 69: ternary_into_avx512<0x45>(x, y, z, target); break;
+ case 70: ternary_into_avx512<0x46>(x, y, z, target); break;
+ case 71: ternary_into_avx512<0x47>(x, y, z, target); break;
+ case 72: ternary_into_avx512<0x48>(x, y, z, target); break;
+ case 73: ternary_into_avx512<0x49>(x, y, z, target); break;
+ case 74: ternary_into_avx512<0x4a>(x, y, z, target); break;
+ case 75: ternary_into_avx512<0x4b>(x, y, z, target); break;
+ case 76: ternary_into_avx512<0x4c>(x, y, z, target); break;
+ case 77: ternary_into_avx512<0x4d>(x, y, z, target); break;
+ case 78: ternary_into_avx512<0x4e>(x, y, z, target); break;
+ case 79: ternary_into_avx512<0x4f>(x, y, z, target); break;
+ case 80: ternary_into_avx512<0x50>(x, y, z, target); break;
+ case 81: ternary_into_avx512<0x51>(x, y, z, target); break;
+ case 82: ternary_into_avx512<0x52>(x, y, z, target); break;
+ case 83: ternary_into_avx512<0x53>(x, y, z, target); break;
+ case 84: ternary_into_avx512<0x54>(x, y, z, target); break;
+ case 85: ternary_into_avx512<0x55>(x, y, z, target); break;
+ case 86: ternary_into_avx512<0x56>(x, y, z, target); break;
+ case 87: ternary_into_avx512<0x57>(x, y, z, target); break;
+ case 88: ternary_into_avx512<0x58>(x, y, z, target); break;
+ case 89: ternary_into_avx512<0x59>(x, y, z, target); break;
+ case 90: ternary_into_avx512<0x5a>(x, y, z, target); break;
+ case 91: ternary_into_avx512<0x5b>(x, y, z, target); break;
+ case 92: ternary_into_avx512<0x5c>(x, y, z, target); break;
+ case 93: ternary_into_avx512<0x5d>(x, y, z, target); break;
+ case 94: ternary_into_avx512<0x5e>(x, y, z, target); break;
+ case 95: ternary_into_avx512<0x5f>(x, y, z, target); break;
+ case 96: ternary_into_avx512<0x60>(x, y, z, target); break;
+ case 97: ternary_into_avx512<0x61>(x, y, z, target); break;
+ case 98: ternary_into_avx512<0x62>(x, y, z, target); break;
+ case 99: ternary_into_avx512<0x63>(x, y, z, target); break;
+ case 100: ternary_into_avx512<0x64>(x, y, z, target); break;
+ case 101: ternary_into_avx512<0x65>(x, y, z, target); break;
+ case 102: ternary_into_avx512<0x66>(x, y, z, target); break;
+ case 103: ternary_into_avx512<0x67>(x, y, z, target); break;
+ case 104: ternary_into_avx512<0x68>(x, y, z, target); break;
+ case 105: ternary_into_avx512<0x69>(x, y, z, target); break;
+ case 106: ternary_into_avx512<0x6a>(x, y, z, target); break;
+ case 107: ternary_into_avx512<0x6b>(x, y, z, target); break;
+ case 108: ternary_into_avx512<0x6c>(x, y, z, target); break;
+ case 109: ternary_into_avx512<0x6d>(x, y, z, target); break;
+ case 110: ternary_into_avx512<0x6e>(x, y, z, target); break;
+ case 111: ternary_into_avx512<0x6f>(x, y, z, target); break;
+ case 112: ternary_into_avx512<0x70>(x, y, z, target); break;
+ case 113: ternary_into_avx512<0x71>(x, y, z, target); break;
+ case 114: ternary_into_avx512<0x72>(x, y, z, target); break;
+ case 115: ternary_into_avx512<0x73>(x, y, z, target); break;
+ case 116: ternary_into_avx512<0x74>(x, y, z, target); break;
+ case 117: ternary_into_avx512<0x75>(x, y, z, target); break;
+ case 118: ternary_into_avx512<0x76>(x, y, z, target); break;
+ case 119: ternary_into_avx512<0x77>(x, y, z, target); break;
+ case 120: ternary_into_avx512<0x78>(x, y, z, target); break;
+ case 121: ternary_into_avx512<0x79>(x, y, z, target); break;
+ case 122: ternary_into_avx512<0x7a>(x, y, z, target); break;
+ case 123: ternary_into_avx512<0x7b>(x, y, z, target); break;
+ case 124: ternary_into_avx512<0x7c>(x, y, z, target); break;
+ case 125: ternary_into_avx512<0x7d>(x, y, z, target); break;
+ case 126: ternary_into_avx512<0x7e>(x, y, z, target); break;
+ case 127: ternary_into_avx512<0x7f>(x, y, z, target); break;
+ case 128: ternary_into_avx512<0x80>(x, y, z, target); break;
+ case 129: ternary_into_avx512<0x81>(x, y, z, target); break;
+ case 130: ternary_into_avx512<0x82>(x, y, z, target); break;
+ case 131: ternary_into_avx512<0x83>(x, y, z, target); break;
+ case 132: ternary_into_avx512<0x84>(x, y, z, target); break;
+ case 133: ternary_into_avx512<0x85>(x, y, z, target); break;
+ case 134: ternary_into_avx512<0x86>(x, y, z, target); break;
+ case 135: ternary_into_avx512<0x87>(x, y, z, target); break;
+ case 136: ternary_into_avx512<0x88>(x, y, z, target); break;
+ case 137: ternary_into_avx512<0x89>(x, y, z, target); break;
+ case 138: ternary_into_avx512<0x8a>(x, y, z, target); break;
+ case 139: ternary_into_avx512<0x8b>(x, y, z, target); break;
+ case 140: ternary_into_avx512<0x8c>(x, y, z, target); break;
+ case 141: ternary_into_avx512<0x8d>(x, y, z, target); break;
+ case 142: ternary_into_avx512<0x8e>(x, y, z, target); break;
+ case 143: ternary_into_avx512<0x8f>(x, y, z, target); break;
+ case 144: ternary_into_avx512<0x90>(x, y, z, target); break;
+ case 145: ternary_into_avx512<0x91>(x, y, z, target); break;
+ case 146: ternary_into_avx512<0x92>(x, y, z, target); break;
+ case 147: ternary_into_avx512<0x93>(x, y, z, target); break;
+ case 148: ternary_into_avx512<0x94>(x, y, z, target); break;
+ case 149: ternary_into_avx512<0x95>(x, y, z, target); break;
+ case 150: ternary_into_avx512<0x96>(x, y, z, target); break;
+ case 151: ternary_into_avx512<0x97>(x, y, z, target); break;
+ case 152: ternary_into_avx512<0x98>(x, y, z, target); break;
+ case 153: ternary_into_avx512<0x99>(x, y, z, target); break;
+ case 154: ternary_into_avx512<0x9a>(x, y, z, target); break;
+ case 155: ternary_into_avx512<0x9b>(x, y, z, target); break;
+ case 156: ternary_into_avx512<0x9c>(x, y, z, target); break;
+ case 157: ternary_into_avx512<0x9d>(x, y, z, target); break;
+ case 158: ternary_into_avx512<0x9e>(x, y, z, target); break;
+ case 159: ternary_into_avx512<0x9f>(x, y, z, target); break;
+ case 160: ternary_into_avx512<0xa0>(x, y, z, target); break;
+ case 161: ternary_into_avx512<0xa1>(x, y, z, target); break;
+ case 162: ternary_into_avx512<0xa2>(x, y, z, target); break;
+ case 163: ternary_into_avx512<0xa3>(x, y, z, target); break;
+ case 164: ternary_into_avx512<0xa4>(x, y, z, target); break;
+ case 165: ternary_into_avx512<0xa5>(x, y, z, target); break;
+ case 166: ternary_into_avx512<0xa6>(x, y, z, target); break;
+ case 167: ternary_into_avx512<0xa7>(x, y, z, target); break;
+ case 168: ternary_into_avx512<0xa8>(x, y, z, target); break;
+ case 169: ternary_into_avx512<0xa9>(x, y, z, target); break;
+ case 170: ternary_into_avx512<0xaa>(x, y, z, target); break;
+ case 171: ternary_into_avx512<0xab>(x, y, z, target); break;
+ case 172: ternary_into_avx512<0xac>(x, y, z, target); break;
+ case 173: ternary_into_avx512<0xad>(x, y, z, target); break;
+ case 174: ternary_into_avx512<0xae>(x, y, z, target); break;
+ case 175: ternary_into_avx512<0xaf>(x, y, z, target); break;
+ case 176: ternary_into_avx512<0xb0>(x, y, z, target); break;
+ case 177: ternary_into_avx512<0xb1>(x, y, z, target); break;
+ case 178: ternary_into_avx512<0xb2>(x, y, z, target); break;
+ case 179: ternary_into_avx512<0xb3>(x, y, z, target); break;
+ case 180: ternary_into_avx512<0xb4>(x, y, z, target); break;
+ case 181: ternary_into_avx512<0xb5>(x, y, z, target); break;
+ case 182: ternary_into_avx512<0xb6>(x, y, z, target); break;
+ case 183: ternary_into_avx512<0xb7>(x, y, z, target); break;
+ case 184: ternary_into_avx512<0xb8>(x, y, z, target); break;
+ case 185: ternary_into_avx512<0xb9>(x, y, z, target); break;
+ case 186: ternary_into_avx512<0xba>(x, y, z, target); break;
+ case 187: ternary_into_avx512<0xbb>(x, y, z, target); break;
+ case 188: ternary_into_avx512<0xbc>(x, y, z, target); break;
+ case 189: ternary_into_avx512<0xbd>(x, y, z, target); break;
+ case 190: ternary_into_avx512<0xbe>(x, y, z, target); break;
+ case 191: ternary_into_avx512<0xbf>(x, y, z, target); break;
+ case 192: ternary_into_avx512<0xc0>(x, y, z, target); break;
+ case 193: ternary_into_avx512<0xc1>(x, y, z, target); break;
+ case 194: ternary_into_avx512<0xc2>(x, y, z, target); break;
+ case 195: ternary_into_avx512<0xc3>(x, y, z, target); break;
+ case 196: ternary_into_avx512<0xc4>(x, y, z, target); break;
+ case 197: ternary_into_avx512<0xc5>(x, y, z, target); break;
+ case 198: ternary_into_avx512<0xc6>(x, y, z, target); break;
+ case 199: ternary_into_avx512<0xc7>(x, y, z, target); break;
+ case 200: ternary_into_avx512<0xc8>(x, y, z, target); break;
+ case 201: ternary_into_avx512<0xc9>(x, y, z, target); break;
+ case 202: ternary_into_avx512<0xca>(x, y, z, target); break;
+ case 203: ternary_into_avx512<0xcb>(x, y, z, target); break;
+ case 204: ternary_into_avx512<0xcc>(x, y, z, target); break;
+ case 205: ternary_into_avx512<0xcd>(x, y, z, target); break;
+ case 206: ternary_into_avx512<0xce>(x, y, z, target); break;
+ case 207: ternary_into_avx512<0xcf>(x, y, z, target); break;
+ case 208: ternary_into_avx512<0xd0>(x, y, z, target); break;
+ case 209: ternary_into_avx512<0xd1>(x, y, z, target); break;
+ case 210: ternary_into_avx512<0xd2>(x, y, z, target); break;
+ case 211: ternary_into_avx512<0xd3>(x, y, z, target); break;
+ case 212: ternary_into_avx512<0xd4>(x, y, z, target); break;
+ case 213: ternary_into_avx512<0xd5>(x, y, z, target); break;
+ case 214: ternary_into_avx512<0xd6>(x, y, z, target); break;
+ case 215: ternary_into_avx512<0xd7>(x, y, z, target); break;
+ case 216: ternary_into_avx512<0xd8>(x, y, z, target); break;
+ case 217: ternary_into_avx512<0xd9>(x, y, z, target); break;
+ case 218: ternary_into_avx512<0xda>(x, y, z, target); break;
+ case 219: ternary_into_avx512<0xdb>(x, y, z, target); break;
+ case 220: ternary_into_avx512<0xdc>(x, y, z, target); break;
+ case 221: ternary_into_avx512<0xdd>(x, y, z, target); break;
+ case 222: ternary_into_avx512<0xde>(x, y, z, target); break;
+ case 223: ternary_into_avx512<0xdf>(x, y, z, target); break;
+ case 224: ternary_into_avx512<0xe0>(x, y, z, target); break;
+ case 225: ternary_into_avx512<0xe1>(x, y, z, target); break;
+ case 226: ternary_into_avx512<0xe2>(x, y, z, target); break;
+ case 227: ternary_into_avx512<0xe3>(x, y, z, target); break;
+ case 228: ternary_into_avx512<0xe4>(x, y, z, target); break;
+ case 229: ternary_into_avx512<0xe5>(x, y, z, target); break;
+ case 230: ternary_into_avx512<0xe6>(x, y, z, target); break;
+ case 231: ternary_into_avx512<0xe7>(x, y, z, target); break;
+ case 232: ternary_into_avx512<0xe8>(x, y, z, target); break;
+ case 233: ternary_into_avx512<0xe9>(x, y, z, target); break;
+ case 234: ternary_into_avx512<0xea>(x, y, z, target); break;
+ case 235: ternary_into_avx512<0xeb>(x, y, z, target); break;
+ case 236: ternary_into_avx512<0xec>(x, y, z, target); break;
+ case 237: ternary_into_avx512<0xed>(x, y, z, target); break;
+ case 238: ternary_into_avx512<0xee>(x, y, z, target); break;
+ case 239: ternary_into_avx512<0xef>(x, y, z, target); break;
+ case 240: ternary_into_avx512<0xf0>(x, y, z, target); break;
+ case 241: ternary_into_avx512<0xf1>(x, y, z, target); break;
+ case 242: ternary_into_avx512<0xf2>(x, y, z, target); break;
+ case 243: ternary_into_avx512<0xf3>(x, y, z, target); break;
+ case 244: ternary_into_avx512<0xf4>(x, y, z, target); break;
+ case 245: ternary_into_avx512<0xf5>(x, y, z, target); break;
+ case 246: ternary_into_avx512<0xf6>(x, y, z, target); break;
+ case 247: ternary_into_avx512<0xf7>(x, y, z, target); break;
+ case 248: ternary_into_avx512<0xf8>(x, y, z, target); break;
+ case 249: ternary_into_avx512<0xf9>(x, y, z, target); break;
+ case 250: ternary_into_avx512<0xfa>(x, y, z, target); break;
+ case 251: ternary_into_avx512<0xfb>(x, y, z, target); break;
+ case 252: ternary_into_avx512<0xfc>(x, y, z, target); break;
+ case 253: ternary_into_avx512<0xfd>(x, y, z, target); break;
+ case 254: ternary_into_avx512<0xfe>(x, y, z, target); break;
+ case 255: ternary_into_avx512<0xff>(x, y, z, target); break;
+ }
+}
+#endif
+
+void dynamic_ternary_into_reference(word_t *x, word_t *y, word_t *z, word_t *target, uint8_t op) {
+ switch (op) {
+ case 0: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = 0; break;
+ case 1: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] | y[i]) | z[i])); break;
+ case 2: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (z[i] & (~(x[i] | y[i]))); break;
+ case 3: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(x[i] | y[i])); break;
+ case 4: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] & (~(x[i] | z[i]))); break;
+ case 5: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(x[i] | z[i])); break;
+ case 6: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~x[i]) & (y[i] ^ z[i])); break;
+ case 7: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(x[i] | (y[i] & z[i]))); break;
+ case 8: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] & z[i]) & (~x[i])); break;
+ case 9: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(x[i] | (y[i] ^ z[i]))); break;
+ case 10: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (z[i] & (~x[i])); break;
+ case 11: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~x[i]) & (z[i] | (~y[i]))); break;
+ case 12: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] & (~x[i])); break;
+ case 13: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~x[i]) & (y[i] | (~z[i]))); break;
+ case 14: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~x[i]) & (y[i] | z[i])); break;
+ case 15: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~x[i]); break;
+ case 16: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] & (~(y[i] | z[i]))); break;
+ case 17: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(y[i] | z[i])); break;
+ case 18: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~y[i]) & (x[i] ^ z[i])); break;
+ case 19: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(y[i] | (x[i] & z[i]))); break;
+ case 20: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~z[i]) & (x[i] ^ y[i])); break;
+ case 21: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(z[i] | (x[i] & y[i]))); break;
+ case 22: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (((((x[i] & y[i]) & z[i]) ^ x[i]) ^ y[i]) ^ z[i]); break;
+ case 23: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] | y[i]) & (z[i] | (x[i] & y[i])))); break;
+ case 24: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] ^ y[i]) & (x[i] ^ z[i])); break;
+ case 25: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] & y[i]) | (y[i] ^ z[i]))); break;
+ case 26: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((z[i] | (x[i] & y[i])) ^ x[i]); break;
+ case 27: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((z[i] & (x[i] ^ y[i])) ^ y[i])); break;
+ case 28: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] | (x[i] & z[i])) ^ x[i]); break;
+ case 29: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((y[i] & (x[i] ^ z[i])) ^ z[i])); break;
+ case 30: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] | z[i]) ^ x[i]); break;
+ case 31: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(x[i] & (y[i] | z[i]))); break;
+ case 32: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & z[i]) & (~y[i])); break;
+ case 33: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(y[i] | (x[i] ^ z[i]))); break;
+ case 34: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (z[i] & (~y[i])); break;
+ case 35: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~y[i]) & (z[i] | (~x[i]))); break;
+ case 36: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] ^ y[i]) & (y[i] ^ z[i])); break;
+ case 37: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] & y[i]) | (x[i] ^ z[i]))); break;
+ case 38: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((z[i] | (x[i] & y[i])) ^ y[i]); break;
+ case 39: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((z[i] & (x[i] ^ y[i])) ^ x[i])); break;
+ case 40: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (z[i] & (x[i] ^ y[i])); break;
+ case 41: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] & y[i]) | ((x[i] ^ y[i]) ^ z[i]))); break;
+ case 42: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (z[i] & (~(x[i] & y[i]))); break;
+ case 43: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(((x[i] ^ y[i]) & (y[i] ^ z[i])) ^ x[i])); break;
+ case 44: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] | z[i]) & (x[i] ^ y[i])); break;
+ case 45: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] | (~z[i])) ^ x[i]); break;
+ case 46: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] | (x[i] ^ z[i])) ^ x[i]); break;
+ case 47: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(x[i] & (y[i] | (~z[i])))); break;
+ case 48: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] & (~y[i])); break;
+ case 49: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~y[i]) & (x[i] | (~z[i]))); break;
+ case 50: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~y[i]) & (x[i] | z[i])); break;
+ case 51: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~y[i]); break;
+ case 52: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] | (y[i] & z[i])) ^ y[i]); break;
+ case 53: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] & (y[i] ^ z[i])) ^ z[i])); break;
+ case 54: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] | z[i]) ^ y[i]); break;
+ case 55: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(y[i] & (x[i] | z[i]))); break;
+ case 56: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] | z[i]) & (x[i] ^ y[i])); break;
+ case 57: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] | (~z[i])) ^ y[i]); break;
+ case 58: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] | (y[i] ^ z[i])) ^ y[i]); break;
+ case 59: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(y[i] & (x[i] | (~z[i])))); break;
+ case 60: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] ^ y[i]); break;
+ case 61: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~(x[i] | z[i])) | (x[i] ^ y[i])); break;
+ case 62: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((z[i] & (~x[i])) | (x[i] ^ y[i])); break;
+ case 63: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(x[i] & y[i])); break;
+ case 64: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & y[i]) & (~z[i])); break;
+ case 65: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(z[i] | (x[i] ^ y[i]))); break;
+ case 66: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] ^ z[i]) & (y[i] ^ z[i])); break;
+ case 67: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] & z[i]) | (x[i] ^ y[i]))); break;
+ case 68: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] & (~z[i])); break;
+ case 69: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~z[i]) & (y[i] | (~x[i]))); break;
+ case 70: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] | (x[i] & z[i])) ^ z[i]); break;
+ case 71: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((y[i] & (x[i] ^ z[i])) ^ x[i])); break;
+ case 72: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] & (x[i] ^ z[i])); break;
+ case 73: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] & z[i]) | ((x[i] ^ y[i]) ^ z[i]))); break;
+ case 74: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] | z[i]) & (x[i] ^ z[i])); break;
+ case 75: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((z[i] | (~y[i])) ^ x[i]); break;
+ case 76: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] & (~(x[i] & z[i]))); break;
+ case 77: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(((x[i] ^ z[i]) & (y[i] ^ z[i])) ^ x[i])); break;
+ case 78: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((z[i] | (x[i] ^ y[i])) ^ x[i]); break;
+ case 79: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(x[i] & (z[i] | (~y[i])))); break;
+ case 80: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] & (~z[i])); break;
+ case 81: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~z[i]) & (x[i] | (~y[i]))); break;
+ case 82: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] | (y[i] & z[i])) ^ z[i]); break;
+ case 83: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] & (y[i] ^ z[i])) ^ y[i])); break;
+ case 84: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~z[i]) & (x[i] | y[i])); break;
+ case 85: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~z[i]); break;
+ case 86: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] | y[i]) ^ z[i]); break;
+ case 87: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(z[i] & (x[i] | y[i]))); break;
+ case 88: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] | y[i]) & (x[i] ^ z[i])); break;
+ case 89: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] | (~y[i])) ^ z[i]); break;
+ case 90: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] ^ z[i]); break;
+ case 91: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~(x[i] | y[i])) | (x[i] ^ z[i])); break;
+ case 92: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] | (y[i] ^ z[i])) ^ z[i]); break;
+ case 93: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(z[i] & (x[i] | (~y[i])))); break;
+ case 94: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] & (~x[i])) | (x[i] ^ z[i])); break;
+ case 95: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(x[i] & z[i])); break;
+ case 96: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] & (y[i] ^ z[i])); break;
+ case 97: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((y[i] & z[i]) | ((x[i] ^ y[i]) ^ z[i]))); break;
+ case 98: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] | z[i]) & (y[i] ^ z[i])); break;
+ case 99: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((z[i] | (~x[i])) ^ y[i]); break;
+ case 100: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] | y[i]) & (y[i] ^ z[i])); break;
+ case 101: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] | (~x[i])) ^ z[i]); break;
+ case 102: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] ^ z[i]); break;
+ case 103: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~(x[i] | y[i])) | (y[i] ^ z[i])); break;
+ case 104: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (((((x[i] | y[i]) | z[i]) ^ x[i]) ^ y[i]) ^ z[i]); break;
+ case 105: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] ^ y[i]) ^ z[i])); break;
+ case 106: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & y[i]) ^ z[i]); break;
+ case 107: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] | y[i]) & ((x[i] ^ y[i]) ^ z[i]))); break;
+ case 108: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & z[i]) ^ y[i]); break;
+ case 109: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] | z[i]) & ((x[i] ^ y[i]) ^ z[i]))); break;
+ case 110: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] & (~x[i])) | (y[i] ^ z[i])); break;
+ case 111: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~x[i]) | (y[i] ^ z[i])); break;
+ case 112: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] & (~(y[i] & z[i]))); break;
+ case 113: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(((x[i] ^ y[i]) | (x[i] ^ z[i])) ^ x[i])); break;
+ case 114: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((z[i] | (x[i] ^ y[i])) ^ y[i]); break;
+ case 115: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(y[i] & (z[i] | (~x[i])))); break;
+ case 116: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] | (x[i] ^ z[i])) ^ z[i]); break;
+ case 117: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(z[i] & (y[i] | (~x[i])))); break;
+ case 118: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & (~y[i])) | (y[i] ^ z[i])); break;
+ case 119: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(y[i] & z[i])); break;
+ case 120: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] & z[i]) ^ x[i]); break;
+ case 121: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((y[i] | z[i]) & ((x[i] ^ y[i]) ^ z[i]))); break;
+ case 122: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & (~y[i])) | (x[i] ^ z[i])); break;
+ case 123: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~y[i]) | (x[i] ^ z[i])); break;
+ case 124: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & (~z[i])) | (x[i] ^ y[i])); break;
+ case 125: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~z[i]) | (x[i] ^ y[i])); break;
+ case 126: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] ^ y[i]) | (x[i] ^ z[i])); break;
+ case 127: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] & y[i]) & z[i])); break;
+ case 128: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & y[i]) & z[i]); break;
+ case 129: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] ^ y[i]) | (x[i] ^ z[i]))); break;
+ case 130: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (z[i] & (~(x[i] ^ y[i]))); break;
+ case 131: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~(x[i] ^ y[i])) & (z[i] | (~x[i]))); break;
+ case 132: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] & (~(x[i] ^ z[i]))); break;
+ case 133: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~(x[i] ^ z[i])) & (y[i] | (~x[i]))); break;
+ case 134: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] | z[i]) & ((x[i] ^ y[i]) ^ z[i])); break;
+ case 135: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((y[i] & z[i]) ^ x[i])); break;
+ case 136: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] & z[i]); break;
+ case 137: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((((y[i] | z[i]) | (~x[i])) ^ y[i]) ^ z[i]); break;
+ case 138: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (z[i] & (y[i] | (~x[i]))); break;
+ case 139: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((y[i] | (x[i] ^ z[i])) ^ z[i])); break;
+ case 140: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] & (z[i] | (~x[i]))); break;
+ case 141: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((z[i] | (x[i] ^ y[i])) ^ y[i])); break;
+ case 142: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (((x[i] ^ y[i]) | (x[i] ^ z[i])) ^ x[i]); break;
+ case 143: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] & z[i]) | (~x[i])); break;
+ case 144: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] & (~(y[i] ^ z[i]))); break;
+ case 145: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~(y[i] ^ z[i])) & (x[i] | (~y[i]))); break;
+ case 146: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] | z[i]) & ((x[i] ^ y[i]) ^ z[i])); break;
+ case 147: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] & z[i]) ^ y[i])); break;
+ case 148: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] | y[i]) & ((x[i] ^ y[i]) ^ z[i])); break;
+ case 149: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] & y[i]) ^ z[i])); break;
+ case 150: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] ^ y[i]) ^ z[i]); break;
+ case 151: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((~(x[i] | y[i])) | ((x[i] ^ y[i]) ^ z[i])); break;
+ case 152: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((((x[i] | y[i]) | z[i]) ^ y[i]) ^ z[i]); break;
+ case 153: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(y[i] ^ z[i])); break;
+ case 154: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & (~y[i])) ^ z[i]); break;
+ case 155: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] | y[i]) & (y[i] ^ z[i]))); break;
+ case 156: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & (~z[i])) ^ y[i]); break;
+ case 157: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] | z[i]) & (y[i] ^ z[i]))); break;
+ case 158: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] & z[i]) | ((x[i] ^ y[i]) ^ z[i])); break;
+ case 159: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(x[i] & (y[i] ^ z[i]))); break;
+ case 160: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] & z[i]); break;
+ case 161: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((((x[i] | z[i]) | (~y[i])) ^ x[i]) ^ z[i]); break;
+ case 162: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (z[i] & (x[i] | (~y[i]))); break;
+ case 163: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] | (y[i] ^ z[i])) ^ z[i])); break;
+ case 164: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((((x[i] | y[i]) | z[i]) ^ x[i]) ^ z[i]); break;
+ case 165: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(x[i] ^ z[i])); break;
+ case 166: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] & (~x[i])) ^ z[i]); break;
+ case 167: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] | y[i]) & (x[i] ^ z[i]))); break;
+ case 168: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (z[i] & (x[i] | y[i])); break;
+ case 169: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] | y[i]) ^ z[i])); break;
+ case 170: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = z[i]; break;
+ case 171: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (z[i] | (~(x[i] | y[i]))); break;
+ case 172: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & (y[i] ^ z[i])) ^ y[i]); break;
+ case 173: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] | (y[i] & z[i])) ^ z[i])); break;
+ case 174: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (z[i] | (y[i] & (~x[i]))); break;
+ case 175: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (z[i] | (~x[i])); break;
+ case 176: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] & (z[i] | (~y[i]))); break;
+ case 177: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((z[i] | (x[i] ^ y[i])) ^ x[i])); break;
+ case 178: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (((x[i] ^ z[i]) & (y[i] ^ z[i])) ^ x[i]); break;
+ case 179: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & z[i]) | (~y[i])); break;
+ case 180: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] & (~z[i])) ^ x[i]); break;
+ case 181: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((y[i] | z[i]) & (x[i] ^ z[i]))); break;
+ case 182: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & z[i]) | ((x[i] ^ y[i]) ^ z[i])); break;
+ case 183: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(y[i] & (x[i] ^ z[i]))); break;
+ case 184: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] & (x[i] ^ z[i])) ^ x[i]); break;
+ case 185: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((y[i] | (x[i] & z[i])) ^ z[i])); break;
+ case 186: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (z[i] | (x[i] & (~y[i]))); break;
+ case 187: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (z[i] | (~y[i])); break;
+ case 188: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & z[i]) | (x[i] ^ y[i])); break;
+ case 189: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] ^ z[i]) & (y[i] ^ z[i]))); break;
+ case 190: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (z[i] | (x[i] ^ y[i])); break;
+ case 191: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] & y[i]) & (~z[i]))); break;
+ case 192: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] & y[i]); break;
+ case 193: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((((x[i] | y[i]) | (~z[i])) ^ x[i]) ^ y[i]); break;
+ case 194: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((((x[i] | y[i]) | z[i]) ^ x[i]) ^ y[i]); break;
+ case 195: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(x[i] ^ y[i])); break;
+ case 196: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] & (x[i] | (~z[i]))); break;
+ case 197: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] | (y[i] ^ z[i])) ^ y[i])); break;
+ case 198: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((z[i] & (~x[i])) ^ y[i]); break;
+ case 199: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] | z[i]) & (x[i] ^ y[i]))); break;
+ case 200: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] & (x[i] | z[i])); break;
+ case 201: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] | z[i]) ^ y[i])); break;
+ case 202: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & (y[i] ^ z[i])) ^ z[i]); break;
+ case 203: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] | (y[i] & z[i])) ^ y[i])); break;
+ case 204: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = y[i]; break;
+ case 205: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] | (~(x[i] | z[i]))); break;
+ case 206: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] | (z[i] & (~x[i]))); break;
+ case 207: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] | (~x[i])); break;
+ case 208: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] & (y[i] | (~z[i]))); break;
+ case 209: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((y[i] | (x[i] ^ z[i])) ^ x[i])); break;
+ case 210: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((z[i] & (~y[i])) ^ x[i]); break;
+ case 211: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((y[i] | z[i]) & (x[i] ^ y[i]))); break;
+ case 212: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (((x[i] ^ y[i]) & (y[i] ^ z[i])) ^ x[i]); break;
+ case 213: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & y[i]) | (~z[i])); break;
+ case 214: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & y[i]) | ((x[i] ^ y[i]) ^ z[i])); break;
+ case 215: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~(z[i] & (x[i] ^ y[i]))); break;
+ case 216: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((z[i] & (x[i] ^ y[i])) ^ x[i]); break;
+ case 217: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((z[i] | (x[i] & y[i])) ^ y[i])); break;
+ case 218: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & y[i]) | (x[i] ^ z[i])); break;
+ case 219: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] ^ y[i]) & (y[i] ^ z[i]))); break;
+ case 220: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] | (x[i] & (~z[i]))); break;
+ case 221: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] | (~z[i])); break;
+ case 222: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] | (x[i] ^ z[i])); break;
+ case 223: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] & z[i]) & (~y[i]))); break;
+ case 224: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] & (y[i] | z[i])); break;
+ case 225: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((y[i] | z[i]) ^ x[i])); break;
+ case 226: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] & (x[i] ^ z[i])) ^ z[i]); break;
+ case 227: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((y[i] | (x[i] & z[i])) ^ x[i])); break;
+ case 228: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((z[i] & (x[i] ^ y[i])) ^ y[i]); break;
+ case 229: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((z[i] | (x[i] & y[i])) ^ x[i])); break;
+ case 230: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & y[i]) | (y[i] ^ z[i])); break;
+ case 231: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((x[i] ^ y[i]) & (x[i] ^ z[i]))); break;
+ case 232: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] | y[i]) & (z[i] | (x[i] & y[i]))); break;
+ case 233: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] & y[i]) | (((~z[i]) ^ x[i]) ^ y[i])); break;
+ case 234: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (z[i] | (x[i] & y[i])); break;
+ case 235: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (z[i] | (~(x[i] ^ y[i]))); break;
+ case 236: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] | (x[i] & z[i])); break;
+ case 237: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] | (~(x[i] ^ z[i]))); break;
+ case 238: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (y[i] | z[i]); break;
+ case 239: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((y[i] | z[i]) | (~x[i])); break;
+ case 240: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = x[i]; break;
+ case 241: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] | (~(y[i] | z[i]))); break;
+ case 242: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] | (z[i] & (~y[i]))); break;
+ case 243: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] | (~y[i])); break;
+ case 244: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] | (y[i] & (~z[i]))); break;
+ case 245: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] | (~z[i])); break;
+ case 246: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] | (y[i] ^ z[i])); break;
+ case 247: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (~((y[i] & z[i]) & (~x[i]))); break;
+ case 248: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] | (y[i] & z[i])); break;
+ case 249: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] | (~(y[i] ^ z[i]))); break;
+ case 250: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] | z[i]); break;
+ case 251: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] | z[i]) | (~y[i])); break;
+ case 252: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = (x[i] | y[i]); break;
+ case 253: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] | y[i]) | (~z[i])); break;
+ case 254: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ((x[i] | y[i]) | z[i]); break;
+ case 255: for (word_iter_t i = 0; i < WORDS; ++i) target[i] = ONE_WORD; break;
+ }
+}
+
+#if __AVX512F__
+#define dynamic_ternary_into dynamic_ternary_into_avx512
+#else
+#define dynamic_ternary_into dynamic_ternary_into_reference
+#endif
diff --git a/bhv/cnative/threshold.h b/bhv/cnative/threshold.h
new file mode 100644
index 0000000..974e81b
--- /dev/null
+++ b/bhv/cnative/threshold.h
@@ -0,0 +1,683 @@
+
+/// @brief A generic implementation for threshold_into, that can use any size counter
+template<typename N>
+void threshold_into_reference(word_t ** xs, size_t size, N threshold, word_t *dst) {
+
+ N totals[BITS];
+ memset(totals, 0, BITS*sizeof(N));
+
+ for (N i = 0; i < size; ++i) {
+ word_t * x = xs[i];
+
+ for (word_iter_t word_id = 0; word_id < WORDS; ++word_id) {
+ bit_iter_t offset = word_id * BITS_PER_WORD;
+ word_t word = x[word_id];
+ for (bit_word_iter_t bit_id = 0; bit_id < BITS_PER_WORD; ++bit_id) {
+ totals[offset + bit_id] += ((word >> bit_id) & 1);
+ }
+ }
+ }
+
+ for (word_iter_t word_id = 0; word_id < WORDS; ++word_id) {
+ bit_iter_t offset = word_id * BITS_PER_WORD;
+ word_t word = 0;
+ for (bit_word_iter_t bit_id = 0; bit_id < BITS_PER_WORD; ++bit_id) {
+ if (threshold < totals[offset + bit_id])
+ word |= 1UL << bit_id;
+ }
+ dst[word_id] = word;
+ }
+}
+
+#if __AVX512BW__
+/// @brief INTERNAL Counts an input cacheline worth of bits (64 Bytes = 512 bits) for 1 input hypervector
+/// @param xs pointer to pointer to the input hypervector data
+/// @param byte_offset offset (in bytes) into each hypervector. This must be aligned to 64 Bytes
+/// @param out_counts Each counter is 2 bits, and there are 256 counters in each 512 bit AVX-512 vector, and there are 2 AVX-512 vectors.
+/// Counters are interleaved between the output vectors. For example, input bit positions indicateded by letters: H G F E D C B A,
+/// lead to output bit positions: out_counts[0]: G E C A, and out_counts[1]: H F D B
+inline void add_counts_from_cacheline_for_1_input_hypervector_avx512(word_t ** xs, size_t byte_offset, __m512i* out_counts) {
+ const __m512i interleaved_bits = _mm512_set1_epi8(0x55);
+ uint8_t* xs_bytes = *((uint8_t**)xs);
+ __m512i input_bits = _mm512_loadu_si512(xs_bytes + byte_offset);
+
+ __m512i even_bits = _mm512_and_si512(input_bits, interleaved_bits);
+ __m512i odd_bits = _mm512_and_si512(_mm512_srli_epi64(input_bits, 1), interleaved_bits);
+
+ out_counts[0] = _mm512_add_epi8(out_counts[0], even_bits);
+ out_counts[1] = _mm512_add_epi8(out_counts[1], odd_bits);
+}
+
+/// @brief INTERNAL Counts an input cacheline worth of bits (64 Bytes = 512 bits) for up to 15 input hypervectors
+/// @param xs array of input hypervectors
+/// @param byte_offset offset (in bytes) into each hypervector. Ideally this would be aligned to 64 Bytes
+/// @param num_vectors the number of vectors in xs. Maximum value of 15
+/// @param out_counts Each counter is 4 bits, and there are 128 counters in each 512 bit AVX-512 vector, and there are 4 AVX-512 vectors
+/// Counters are interleaved between output vectors, in a way that extends the interleaving described in
+/// add_counts_from_cacheline_for_1_input_hypervector_avx512
+inline void count_cacheline_for_15_input_hypervectors_avx512(word_t ** xs, size_t byte_offset, uint_fast8_t num_vectors, __m512i* out_counts) {
+ const __m512i interleaved_pairs = _mm512_set1_epi8(0x33);
+
+ out_counts[0] = _mm512_set1_epi64(0);
+ out_counts[1] = _mm512_set1_epi64(0);
+ out_counts[2] = _mm512_set1_epi64(0);
+ out_counts[3] = _mm512_set1_epi64(0);
+
+ __m512i inner_counts[2];
+ for (uint_fast8_t i = 0; i < num_vectors-2; i+=3) {
+ inner_counts[0] = _mm512_set1_epi64(0);
+ inner_counts[1] = _mm512_set1_epi64(0);
+
+ add_counts_from_cacheline_for_1_input_hypervector_avx512(xs + i, byte_offset, inner_counts);
+ add_counts_from_cacheline_for_1_input_hypervector_avx512(xs + i + 1, byte_offset, inner_counts);
+ add_counts_from_cacheline_for_1_input_hypervector_avx512(xs + i + 2, byte_offset, inner_counts);
+
+ //Expand the 2-bit counters into 4-bits, and add them to the running counters
+ __m512i increment0 = _mm512_and_si512(inner_counts[0], interleaved_pairs);
+ __m512i increment1 = _mm512_and_si512(inner_counts[1], interleaved_pairs);
+ __m512i increment2 = _mm512_and_si512(_mm512_srli_epi64(inner_counts[0], 2), interleaved_pairs);
+ __m512i increment3 = _mm512_and_si512(_mm512_srli_epi64(inner_counts[1], 2), interleaved_pairs);
+ out_counts[0] = _mm512_add_epi8(out_counts[0], increment0);
+ out_counts[1] = _mm512_add_epi8(out_counts[1], increment1);
+ out_counts[2] = _mm512_add_epi8(out_counts[2], increment2);
+ out_counts[3] = _mm512_add_epi8(out_counts[3], increment3);
+ }
+ if (num_vectors % 3 == 0) return;
+
+ // Mop up the straggler bits
+ inner_counts[0] = _mm512_set1_epi64(0);
+ inner_counts[1] = _mm512_set1_epi64(0);
+ for (uint_fast8_t i = (num_vectors/3)*3; i < num_vectors; i++) {
+ add_counts_from_cacheline_for_1_input_hypervector_avx512(xs + i, byte_offset, inner_counts);
+ }
+ //Expand the 2-bit counters into 4-bits, and add them to the running counters
+ __m512i increment0 = _mm512_and_si512(inner_counts[0], interleaved_pairs);
+ __m512i increment1 = _mm512_and_si512(inner_counts[1], interleaved_pairs);
+ __m512i increment2 = _mm512_and_si512(_mm512_srli_epi64(inner_counts[0], 2), interleaved_pairs);
+ __m512i increment3 = _mm512_and_si512(_mm512_srli_epi64(inner_counts[1], 2), interleaved_pairs);
+ out_counts[0] = _mm512_add_epi8(out_counts[0], increment0);
+ out_counts[1] = _mm512_add_epi8(out_counts[1], increment1);
+ out_counts[2] = _mm512_add_epi8(out_counts[2], increment2);
+ out_counts[3] = _mm512_add_epi8(out_counts[3], increment3);
+}
+
+/// @brief INTERNAL Counts an input cacheline worth of bits (64 Bytes = 512 bits) for up to 255 input hypervectors
+/// @param xs array of input hypervectors
+/// @param byte_offset offset (in bytes) into each hypervector. Ideally this would be aligned to 64 Bytes
+/// @param num_vectors the number of vectors in xs. Maximum value of 255
+/// @param out_counts Each counter is 8 bits, and there are 64 counters in each 512 bit AVX-512 vector, and there
+/// are 8 AVX-512 vectors. Output counters are interleaved in a way that extends the interleaving described in
+/// add_counts_from_cacheline_for_1_input_hypervector_avx512. Counters can be un-scrambled with unscramble_byte_counters_avx512
+inline void count_cacheline_for_255_input_hypervectors_avx512(word_t ** xs, size_t byte_offset, uint_fast8_t num_vectors, __m512i* out_counts) {
+ const __m512i nibble_mask = _mm512_set1_epi8(0xF);
+
+ //Zero the counters
+ for (int i=0; i<8; i++) {
+ out_counts[i] = _mm512_set1_epi64(0);
+ }
+
+ for (uint_fast8_t i = 0; i < num_vectors; i+=15) {
+ __m512i inner_counts[4];
+ uint_fast16_t num_inputs = (i<num_vectors-14)? 15: num_vectors-i;
+
+ count_cacheline_for_15_input_hypervectors_avx512(xs + i, byte_offset, num_inputs, inner_counts);
+
+ //Expand the 4-bit counters into 8-bits, and add them to the running counters
+ __m512i increment0 = _mm512_and_si512(inner_counts[0], nibble_mask);
+ __m512i increment1 = _mm512_and_si512(inner_counts[1], nibble_mask);
+ __m512i increment2 = _mm512_and_si512(inner_counts[2], nibble_mask);
+ __m512i increment3 = _mm512_and_si512(inner_counts[3], nibble_mask);
+ __m512i increment4 = _mm512_and_si512(_mm512_srli_epi64(inner_counts[0], 4), nibble_mask);
+ __m512i increment5 = _mm512_and_si512(_mm512_srli_epi64(inner_counts[1], 4), nibble_mask);
+ __m512i increment6 = _mm512_and_si512(_mm512_srli_epi64(inner_counts[2], 4), nibble_mask);
+ __m512i increment7 = _mm512_and_si512(_mm512_srli_epi64(inner_counts[3], 4), nibble_mask);
+
+ out_counts[0] = _mm512_add_epi8(out_counts[0], increment0);
+ out_counts[1] = _mm512_add_epi8(out_counts[1], increment1);
+ out_counts[2] = _mm512_add_epi8(out_counts[2], increment2);
+ out_counts[3] = _mm512_add_epi8(out_counts[3], increment3);
+ out_counts[4] = _mm512_add_epi8(out_counts[4], increment4);
+ out_counts[5] = _mm512_add_epi8(out_counts[5], increment5);
+ out_counts[6] = _mm512_add_epi8(out_counts[6], increment6);
+ out_counts[7] = _mm512_add_epi8(out_counts[7], increment7);
+ }
+}
+
+/// @brief INTERNAL Unscrambles the counters returned from count_cacheline_for_255_input_hypervectors_avx512
+/// @param scrambled_counts Each counter is 8 bits, and there are 64 counters in each 512 bit AVX-512 vector, and there are 8 AVX-512 vectors
+/// @param out_counts Each counter is 8 bits, and there are 64 counters in each 512 bit AVX-512 vector, and there are 8 AVX-512 vectors
+inline void unscramble_byte_counters_avx512(__m512i* scrambled_counts, __m512i* out_counts) {
+
+ //Untangle the bytes, so the counters end up in the same order as the input bits
+ __m512i unshuffle_l1[8];
+ unshuffle_l1[0] = _mm512_unpacklo_epi8(scrambled_counts[0], scrambled_counts[1]);
+ unshuffle_l1[1] = _mm512_unpackhi_epi8(scrambled_counts[0], scrambled_counts[1]);
+ unshuffle_l1[2] = _mm512_unpacklo_epi8(scrambled_counts[2], scrambled_counts[3]);
+ unshuffle_l1[3] = _mm512_unpackhi_epi8(scrambled_counts[2], scrambled_counts[3]);
+ unshuffle_l1[4] = _mm512_unpacklo_epi8(scrambled_counts[4], scrambled_counts[5]);
+ unshuffle_l1[5] = _mm512_unpackhi_epi8(scrambled_counts[4], scrambled_counts[5]);
+ unshuffle_l1[6] = _mm512_unpacklo_epi8(scrambled_counts[6], scrambled_counts[7]);
+ unshuffle_l1[7] = _mm512_unpackhi_epi8(scrambled_counts[6], scrambled_counts[7]);
+
+ __m512i unshuffle_l2[8];
+ unshuffle_l2[0] = _mm512_unpacklo_epi16(unshuffle_l1[0], unshuffle_l1[2]);
+ unshuffle_l2[1] = _mm512_unpackhi_epi16(unshuffle_l1[0], unshuffle_l1[2]);
+ unshuffle_l2[2] = _mm512_unpacklo_epi16(unshuffle_l1[4], unshuffle_l1[6]);
+ unshuffle_l2[3] = _mm512_unpackhi_epi16(unshuffle_l1[4], unshuffle_l1[6]);
+ unshuffle_l2[4] = _mm512_unpacklo_epi16(unshuffle_l1[1], unshuffle_l1[3]);
+ unshuffle_l2[5] = _mm512_unpackhi_epi16(unshuffle_l1[1], unshuffle_l1[3]);
+ unshuffle_l2[6] = _mm512_unpacklo_epi16(unshuffle_l1[5], unshuffle_l1[7]);
+ unshuffle_l2[7] = _mm512_unpackhi_epi16(unshuffle_l1[5], unshuffle_l1[7]);
+
+ __m512i unshuffle_l3[8];
+ unshuffle_l3[0] = _mm512_unpacklo_epi32(unshuffle_l2[0], unshuffle_l2[2]);
+ unshuffle_l3[1] = _mm512_unpackhi_epi32(unshuffle_l2[0], unshuffle_l2[2]);
+ unshuffle_l3[2] = _mm512_unpacklo_epi32(unshuffle_l2[1], unshuffle_l2[3]);
+ unshuffle_l3[3] = _mm512_unpackhi_epi32(unshuffle_l2[1], unshuffle_l2[3]);
+ unshuffle_l3[4] = _mm512_unpacklo_epi32(unshuffle_l2[4], unshuffle_l2[6]);
+ unshuffle_l3[5] = _mm512_unpackhi_epi32(unshuffle_l2[4], unshuffle_l2[6]);
+ unshuffle_l3[6] = _mm512_unpacklo_epi32(unshuffle_l2[5], unshuffle_l2[7]);
+ unshuffle_l3[7] = _mm512_unpackhi_epi32(unshuffle_l2[5], unshuffle_l2[7]);
+
+ ((__m128i*)out_counts)[0] = ((__m128i*)unshuffle_l3)[0];
+ ((__m128i*)out_counts)[1] = ((__m128i*)unshuffle_l3)[4];
+ ((__m128i*)out_counts)[2] = ((__m128i*)unshuffle_l3)[8];
+ ((__m128i*)out_counts)[3] = ((__m128i*)unshuffle_l3)[12];
+ ((__m128i*)out_counts)[4] = ((__m128i*)unshuffle_l3)[16];
+ ((__m128i*)out_counts)[5] = ((__m128i*)unshuffle_l3)[20];
+ ((__m128i*)out_counts)[6] = ((__m128i*)unshuffle_l3)[24];
+ ((__m128i*)out_counts)[7] = ((__m128i*)unshuffle_l3)[28];
+
+ ((__m128i*)out_counts)[8] = ((__m128i*)unshuffle_l3)[1];
+ ((__m128i*)out_counts)[9] = ((__m128i*)unshuffle_l3)[5];
+ ((__m128i*)out_counts)[10] = ((__m128i*)unshuffle_l3)[9];
+ ((__m128i*)out_counts)[11] = ((__m128i*)unshuffle_l3)[13];
+ ((__m128i*)out_counts)[12] = ((__m128i*)unshuffle_l3)[17];
+ ((__m128i*)out_counts)[13] = ((__m128i*)unshuffle_l3)[21];
+ ((__m128i*)out_counts)[14] = ((__m128i*)unshuffle_l3)[25];
+ ((__m128i*)out_counts)[15] = ((__m128i*)unshuffle_l3)[29];
+
+ ((__m128i*)out_counts)[16] = ((__m128i*)unshuffle_l3)[2];
+ ((__m128i*)out_counts)[17] = ((__m128i*)unshuffle_l3)[6];
+ ((__m128i*)out_counts)[18] = ((__m128i*)unshuffle_l3)[10];
+ ((__m128i*)out_counts)[19] = ((__m128i*)unshuffle_l3)[14];
+ ((__m128i*)out_counts)[20] = ((__m128i*)unshuffle_l3)[18];
+ ((__m128i*)out_counts)[21] = ((__m128i*)unshuffle_l3)[22];
+ ((__m128i*)out_counts)[22] = ((__m128i*)unshuffle_l3)[26];
+ ((__m128i*)out_counts)[23] = ((__m128i*)unshuffle_l3)[30];
+
+ ((__m128i*)out_counts)[24] = ((__m128i*)unshuffle_l3)[3];
+ ((__m128i*)out_counts)[25] = ((__m128i*)unshuffle_l3)[7];
+ ((__m128i*)out_counts)[26] = ((__m128i*)unshuffle_l3)[11];
+ ((__m128i*)out_counts)[27] = ((__m128i*)unshuffle_l3)[15];
+ ((__m128i*)out_counts)[28] = ((__m128i*)unshuffle_l3)[19];
+ ((__m128i*)out_counts)[29] = ((__m128i*)unshuffle_l3)[23];
+ ((__m128i*)out_counts)[30] = ((__m128i*)unshuffle_l3)[27];
+ ((__m128i*)out_counts)[31] = ((__m128i*)unshuffle_l3)[31];
+}
+
+/// @brief AVX-512 implementation of threshold_into using a 2-Byte counter
+void threshold_into_short_avx512(word_t ** xs, int_fast16_t size, uint16_t threshold, word_t* dst) {
+ __m512i threshold_simd = _mm512_set1_epi16(threshold);
+ uint8_t* dst_bytes = (uint8_t*)dst;
+ uint16_t counters[BITS];
+
+ //Clear out 16-bit counters
+ memset(counters, 0, BITS * sizeof(uint16_t));
+
+ //Loop over all input vectors, 255 at a time
+ for (int_fast16_t i = 0; i < size; i += 255) {
+ uint_fast16_t num_inputs = (i<size-254)? 255: size-i;
+
+ size_t cur_counters = 0;
+ for (size_t byte_offset = 0; byte_offset < BYTES; byte_offset += 64) {
+
+ //Call (inline) the function to load one cache line of input bits from each input hypervector
+ __m512i scrambled_counts[8];
+ count_cacheline_for_255_input_hypervectors_avx512(xs + i, byte_offset, num_inputs, scrambled_counts);
+
+ //Unscramble the counters
+ __m512i out_counts[8];
+ unscramble_byte_counters_avx512(scrambled_counts, out_counts);
+
+ //Expand the 8-bit counters into 16-bits, and add them to the running counters
+ for (int_fast8_t out_i = 0; out_i < 8; out_i++) {
+ __m512i increment0 = _mm512_cvtepu8_epi16(((__m256i*)&out_counts[out_i])[0]);
+ __m512i increment1 = _mm512_cvtepu8_epi16(((__m256i*)&out_counts[out_i])[1]);
+ _mm512_storeu_si512((__m512i*)(&counters[cur_counters + 0]), _mm512_add_epi16(*(__m512i*)(&counters[cur_counters + 0]), increment0));
+ _mm512_storeu_si512((__m512i*)(&counters[cur_counters + 32]), _mm512_add_epi16(*(__m512i*)(&counters[cur_counters + 32]), increment1));
+ cur_counters += 64;
+ }
+ }
+ }
+
+ //Now do thresholding and output
+ for (size_t i = 0; i < BITS/32; i++) {
+ __mmask32 maj_bits = _mm512_cmpgt_epu16_mask(*(__m512i*)(&counters[i * 32]), threshold_simd);
+ *((uint32_t*)(dst_bytes + (i * 4))) = maj_bits;
+ }
+}
+
+/// @brief AVX-512 implementation of threshold_into using a 1-Byte counter
+void threshold_into_byte_avx512(word_t ** xs, uint8_t size, uint8_t threshold, word_t* dst) {
+ __m512i threshold_simd = _mm512_set1_epi8(threshold);
+ uint8_t* dst_bytes = (uint8_t*)dst;
+
+ for (size_t byte_offset = 0; byte_offset < BYTES; byte_offset += 64) {
+
+ //Call (inline) the function to load one cache line of input bits from each input hypervector
+ __m512i scrambled_counts[8];
+ count_cacheline_for_255_input_hypervectors_avx512(xs, byte_offset, size, scrambled_counts);
+
+ //Unscramble the counters
+ //FUTURE OPTIMIZATION: Performance could be improved on average 15-20% by performing the
+ //threshold test first, and then unscrambling individual bits, rather than whole bytes.
+ __m512i out_counts[8];
+ unscramble_byte_counters_avx512(scrambled_counts, out_counts);
+
+ //Do the threshold test, and compose out output bits
+ __m512i out_bits;
+ for (int i=0; i<8; i++) {
+ __mmask64 maj_bits = _mm512_cmpgt_epu8_mask(out_counts[i], threshold_simd);
+ ((uint64_t*)&out_bits)[i] = maj_bits;
+ }
+
+ //Store the results
+ _mm512_storeu_si512((__m512i*)(dst_bytes + byte_offset), out_bits);
+ }
+}
+
+/// @brief AVX-512 implementation of threshold_into using a 4-Byte counter
+void threshold_into_32bit_avx512(word_t ** xs, uint32_t size, uint32_t threshold, word_t* dst) {
+ __m512i threshold_simd = _mm512_set1_epi32(threshold);
+ uint8_t* dst_bytes = (uint8_t*)dst;
+ uint32_t counters[BITS];
+
+ //Clear out the counters
+ memset(counters, 0, BITS * sizeof(uint32_t));
+
+ //Loop over all input vectors, 255 at a time
+ for (uint_fast32_t i = 0; i < size; i += 255) {
+ uint_fast32_t num_inputs = (i<size-254)? 255: size-i;
+
+ size_t cur_counters = 0;
+ for (size_t byte_offset = 0; byte_offset < BYTES; byte_offset += 64) {
+
+ //Call (inline) the function to load one cache line of input bits from each input hypervector
+ __m512i scrambled_counts[8];
+ count_cacheline_for_255_input_hypervectors_avx512(xs + i, byte_offset, num_inputs, scrambled_counts);
+
+ //Unscramble the counters
+ __m512i out_counts[8];
+ unscramble_byte_counters_avx512(scrambled_counts, out_counts);
+
+ //Expand the 8-bit counters into 32-bits, and add them to the running counters
+ for (int_fast8_t out_i = 0; out_i < 8; out_i++) {
+ __m512i increment0 = _mm512_cvtepu8_epi32(((__m128i*)&out_counts[out_i])[0]);
+ __m512i increment1 = _mm512_cvtepu8_epi32(((__m128i*)&out_counts[out_i])[1]);
+ __m512i increment2 = _mm512_cvtepu8_epi32(((__m128i*)&out_counts[out_i])[2]);
+ __m512i increment3 = _mm512_cvtepu8_epi32(((__m128i*)&out_counts[out_i])[3]);
+ _mm512_storeu_si512((__m512i*)(&counters[cur_counters + 0]), _mm512_add_epi32(*(__m512i*)(&counters[cur_counters + 0]), increment0));
+ _mm512_storeu_si512((__m512i*)(&counters[cur_counters + 16]), _mm512_add_epi32(*(__m512i*)(&counters[cur_counters + 16]), increment1));
+ _mm512_storeu_si512((__m512i*)(&counters[cur_counters + 32]), _mm512_add_epi32(*(__m512i*)(&counters[cur_counters + 32]), increment2));
+ _mm512_storeu_si512((__m512i*)(&counters[cur_counters + 48]), _mm512_add_epi32(*(__m512i*)(&counters[cur_counters + 48]), increment3));
+ cur_counters += 64;
+ }
+ }
+ }
+
+ //Now do thresholding and output
+ for (size_t i = 0; i < BITS/16; i++) {
+ __mmask16 maj_bits = _mm512_cmpgt_epu32_mask(*(__m512i*)(&counters[i * 16]), threshold_simd);
+ *((uint16_t*)(dst_bytes + (i * 2))) = maj_bits;
+ }
+}
+
+/// @brief Sets each result bit high if there are more than threshold 1 bits in the corresponding bit of the input vectors
+/// @param xs array of `size` input vectors
+/// @param size number of input vectors in xs
+/// @param threshold threshold to count against
+/// @param dst the hypervector to write the results into
+void threshold_into_avx512(word_t ** xs, size_t size, size_t threshold, word_t* dst) {
+ //FUTURE OPTIMIZATION: Should we have a path for smaller sizes? Currently the main user of
+ // threshold_into() is true_majority(), and it has dedicated code for cases where n <= 21
+ if (size < 256) { threshold_into_byte_avx512(xs, size, threshold, dst); return; }
+ if (size < 65536) { threshold_into_short_avx512(xs, size, threshold, dst); return; }
+ threshold_into_32bit_avx512(xs, size, threshold, dst);
+}
+#endif //__AVX512BW__
+
+#ifdef __AVX2__
+/// @brief INTERNAL Counts 256 input bits (32 Bytes) for 1 input hypervector
+/// @param xs pointer to pointer to input hypervector data
+/// @param byte_offset offset (in bytes) into each hypervector. Must be aligned to 32 Bytes
+/// @param out_counts Each counter is 2 bits, and there are 128 counters in each 256 bit AVX2 vector, so there are 2 AVX2 vectors.
+inline void add_counts_from_half_cacheline_for_1_input_hypervector_avx2(word_t ** xs, size_t byte_offset, __m256i* out_counts) {
+ const __m256i interleaved_bits = _mm256_set1_epi8(0x55);
+ uint8_t* xs_bytes = *((uint8_t**)xs);
+ __m256i input_bits = _mm256_loadu_si256((__m256i*)(xs_bytes + byte_offset));
+
+ __m256i even_bits = _mm256_and_si256(input_bits, interleaved_bits);
+ __m256i odd_bits = _mm256_and_si256(_mm256_srli_epi64(input_bits, 1), interleaved_bits);
+
+ out_counts[0] = _mm256_add_epi8(out_counts[0], even_bits);
+ out_counts[1] = _mm256_add_epi8(out_counts[1], odd_bits);
+}
+
+/// @brief INTERNAL Counts 256 input bits (32 Bytes) for up to 15 input hypervectors
+/// @param xs array of input hypervectors
+/// @param byte_offset offset (in bytes) into each hypervector. This must be aligned to 32 Bytes
+/// @param num_vectors the number of vectors in xs. Maximum value of 15
+/// @param out_counts Each counter is 4 bits, and there are 64 counters in each 256 bit AVX2 vector, and there are 4 AVX2 vectors
+inline void count_half_cacheline_for_15_input_hypervectors_avx2(word_t ** xs, size_t byte_offset, uint_fast8_t num_vectors, __m256i* out_counts) {
+ const __m256i interleaved_pairs = _mm256_set1_epi8(0x33);
+
+ out_counts[0] = _mm256_set1_epi64x(0);
+ out_counts[1] = _mm256_set1_epi64x(0);
+ out_counts[2] = _mm256_set1_epi64x(0);
+ out_counts[3] = _mm256_set1_epi64x(0);
+
+ __m256i inner_counts[2];
+ for (uint_fast8_t i = 0; i < num_vectors-2; i+=3) {
+ inner_counts[0] = _mm256_set1_epi64x(0);
+ inner_counts[1] = _mm256_set1_epi64x(0);
+
+ add_counts_from_half_cacheline_for_1_input_hypervector_avx2(xs + i, byte_offset, inner_counts);
+ add_counts_from_half_cacheline_for_1_input_hypervector_avx2(xs + i + 1, byte_offset, inner_counts);
+ add_counts_from_half_cacheline_for_1_input_hypervector_avx2(xs + i + 2, byte_offset, inner_counts);
+
+ //Expand the 2-bit counters into 4-bits, and add them to the running counters
+ __m256i increment0 = _mm256_and_si256(inner_counts[0], interleaved_pairs);
+ __m256i increment1 = _mm256_and_si256(inner_counts[1], interleaved_pairs);
+ __m256i increment2 = _mm256_and_si256(_mm256_srli_epi64(inner_counts[0], 2), interleaved_pairs);
+ __m256i increment3 = _mm256_and_si256(_mm256_srli_epi64(inner_counts[1], 2), interleaved_pairs);
+ out_counts[0] = _mm256_add_epi8(out_counts[0], increment0);
+ out_counts[1] = _mm256_add_epi8(out_counts[1], increment1);
+ out_counts[2] = _mm256_add_epi8(out_counts[2], increment2);
+ out_counts[3] = _mm256_add_epi8(out_counts[3], increment3);
+ }
+ if (num_vectors % 3 == 0) return;
+
+ // Mop up the straggler bits
+ inner_counts[0] = _mm256_set1_epi64x(0);
+ inner_counts[1] = _mm256_set1_epi64x(0);
+ for (uint_fast8_t i = (num_vectors/3)*3; i < num_vectors; i++) {
+ add_counts_from_half_cacheline_for_1_input_hypervector_avx2(xs + i, byte_offset, inner_counts);
+ }
+ //Expand the 2-bit counters into 4-bits, and add them to the running counters
+ __m256i increment0 = _mm256_and_si256(inner_counts[0], interleaved_pairs);
+ __m256i increment1 = _mm256_and_si256(inner_counts[1], interleaved_pairs);
+ __m256i increment2 = _mm256_and_si256(_mm256_srli_epi64(inner_counts[0], 2), interleaved_pairs);
+ __m256i increment3 = _mm256_and_si256(_mm256_srli_epi64(inner_counts[1], 2), interleaved_pairs);
+ out_counts[0] = _mm256_add_epi8(out_counts[0], increment0);
+ out_counts[1] = _mm256_add_epi8(out_counts[1], increment1);
+ out_counts[2] = _mm256_add_epi8(out_counts[2], increment2);
+ out_counts[3] = _mm256_add_epi8(out_counts[3], increment3);
+}
+
+/// @brief INTERNAL Counts 256 input bits (32 Bytes) for up to 255 input hypervectors
+/// @param xs array of input hypervectors
+/// @param byte_offset offset (in bytes) into each hypervector. This must be aligned to 32 Bytes
+/// @param num_vectors the number of vectors in xs. Maximum value of 255
+/// @param out_counts Each counter is 8 bits, and there are 32 counters in each 256 bit AVX2 vector, and there are 8 AVX2 vectors
+inline void count_half_cacheline_for_255_input_hypervectors_avx2(word_t ** xs, size_t byte_offset, uint_fast8_t num_vectors, __m256i* out_counts) {
+ const __m256i nibble_mask = _mm256_set1_epi8(0xF);
+
+ //Zero the counters
+ for (int i=0; i<8; i++) {
+ out_counts[i] = _mm256_set1_epi64x(0);
+ }
+
+ for (uint_fast8_t i = 0; i < num_vectors; i+=15) {
+ __m256i inner_counts[4];
+ uint_fast16_t num_inputs = (i<num_vectors-14)? 15: num_vectors-i;
+
+ count_half_cacheline_for_15_input_hypervectors_avx2(xs + i, byte_offset, num_inputs, inner_counts);
+
+ //Expand the 4-bit counters into 8-bits, and add them to the running counters
+ __m256i increment0 = _mm256_and_si256(inner_counts[0], nibble_mask);
+ __m256i increment1 = _mm256_and_si256(inner_counts[1], nibble_mask);
+ __m256i increment2 = _mm256_and_si256(inner_counts[2], nibble_mask);
+ __m256i increment3 = _mm256_and_si256(inner_counts[3], nibble_mask);
+ __m256i increment4 = _mm256_and_si256(_mm256_srli_epi64(inner_counts[0], 4), nibble_mask);
+ __m256i increment5 = _mm256_and_si256(_mm256_srli_epi64(inner_counts[1], 4), nibble_mask);
+ __m256i increment6 = _mm256_and_si256(_mm256_srli_epi64(inner_counts[2], 4), nibble_mask);
+ __m256i increment7 = _mm256_and_si256(_mm256_srli_epi64(inner_counts[3], 4), nibble_mask);
+
+ out_counts[0] = _mm256_add_epi8(out_counts[0], increment0);
+ out_counts[1] = _mm256_add_epi8(out_counts[1], increment1);
+ out_counts[2] = _mm256_add_epi8(out_counts[2], increment2);
+ out_counts[3] = _mm256_add_epi8(out_counts[3], increment3);
+ out_counts[4] = _mm256_add_epi8(out_counts[4], increment4);
+ out_counts[5] = _mm256_add_epi8(out_counts[5], increment5);
+ out_counts[6] = _mm256_add_epi8(out_counts[6], increment6);
+ out_counts[7] = _mm256_add_epi8(out_counts[7], increment7);
+ }
+}
+
+/// @brief INTERNAL Unscrambles the counters returned from count_half_cacheline_for_255_input_hypervectors_avx2
+/// @param scrambled_counts Each counter is 8 bits, and there are 32 counters in each 256 bit AVX2 vector, and there are 8 AVX2 vectors
+/// @param out_counts Each counter is 8 bits, and there are 32 counters in each 256 bit AVX2 vector, and there are 8 AVX2 vectors
+inline void unscramble_byte_counters_avx2(__m256i* scrambled_counts, __m256i* out_counts) {
+
+ //Untangle the bytes, so the counters end up in the same order as the input bits
+ __m256i unshuffle_l1[8];
+ unshuffle_l1[0] = _mm256_unpacklo_epi8(scrambled_counts[0], scrambled_counts[1]);
+ unshuffle_l1[1] = _mm256_unpackhi_epi8(scrambled_counts[0], scrambled_counts[1]);
+ unshuffle_l1[2] = _mm256_unpacklo_epi8(scrambled_counts[2], scrambled_counts[3]);
+ unshuffle_l1[3] = _mm256_unpackhi_epi8(scrambled_counts[2], scrambled_counts[3]);
+ unshuffle_l1[4] = _mm256_unpacklo_epi8(scrambled_counts[4], scrambled_counts[5]);
+ unshuffle_l1[5] = _mm256_unpackhi_epi8(scrambled_counts[4], scrambled_counts[5]);
+ unshuffle_l1[6] = _mm256_unpacklo_epi8(scrambled_counts[6], scrambled_counts[7]);
+ unshuffle_l1[7] = _mm256_unpackhi_epi8(scrambled_counts[6], scrambled_counts[7]);
+
+ __m256i unshuffle_l2[8];
+ unshuffle_l2[0] = _mm256_unpacklo_epi16(unshuffle_l1[0], unshuffle_l1[2]);
+ unshuffle_l2[1] = _mm256_unpackhi_epi16(unshuffle_l1[0], unshuffle_l1[2]);
+ unshuffle_l2[2] = _mm256_unpacklo_epi16(unshuffle_l1[4], unshuffle_l1[6]);
+ unshuffle_l2[3] = _mm256_unpackhi_epi16(unshuffle_l1[4], unshuffle_l1[6]);
+ unshuffle_l2[4] = _mm256_unpacklo_epi16(unshuffle_l1[1], unshuffle_l1[3]);
+ unshuffle_l2[5] = _mm256_unpackhi_epi16(unshuffle_l1[1], unshuffle_l1[3]);
+ unshuffle_l2[6] = _mm256_unpacklo_epi16(unshuffle_l1[5], unshuffle_l1[7]);
+ unshuffle_l2[7] = _mm256_unpackhi_epi16(unshuffle_l1[5], unshuffle_l1[7]);
+
+ __m256i unshuffle_l3[8];
+ unshuffle_l3[0] = _mm256_unpacklo_epi32(unshuffle_l2[0], unshuffle_l2[2]);
+ unshuffle_l3[1] = _mm256_unpackhi_epi32(unshuffle_l2[0], unshuffle_l2[2]);
+ unshuffle_l3[2] = _mm256_unpacklo_epi32(unshuffle_l2[1], unshuffle_l2[3]);
+ unshuffle_l3[3] = _mm256_unpackhi_epi32(unshuffle_l2[1], unshuffle_l2[3]);
+ unshuffle_l3[4] = _mm256_unpacklo_epi32(unshuffle_l2[4], unshuffle_l2[6]);
+ unshuffle_l3[5] = _mm256_unpackhi_epi32(unshuffle_l2[4], unshuffle_l2[6]);
+ unshuffle_l3[6] = _mm256_unpacklo_epi32(unshuffle_l2[5], unshuffle_l2[7]);
+ unshuffle_l3[7] = _mm256_unpackhi_epi32(unshuffle_l2[5], unshuffle_l2[7]);
+
+ ((__m128i*)out_counts)[0] = ((__m128i*)unshuffle_l3)[0];
+ ((__m128i*)out_counts)[1] = ((__m128i*)unshuffle_l3)[2];
+ ((__m128i*)out_counts)[2] = ((__m128i*)unshuffle_l3)[4];
+ ((__m128i*)out_counts)[3] = ((__m128i*)unshuffle_l3)[6];
+ ((__m128i*)out_counts)[4] = ((__m128i*)unshuffle_l3)[8];
+ ((__m128i*)out_counts)[5] = ((__m128i*)unshuffle_l3)[10];
+ ((__m128i*)out_counts)[6] = ((__m128i*)unshuffle_l3)[12];
+ ((__m128i*)out_counts)[7] = ((__m128i*)unshuffle_l3)[14];
+
+ ((__m128i*)out_counts)[8] = ((__m128i*)unshuffle_l3)[1];
+ ((__m128i*)out_counts)[9] = ((__m128i*)unshuffle_l3)[3];
+ ((__m128i*)out_counts)[10] = ((__m128i*)unshuffle_l3)[5];
+ ((__m128i*)out_counts)[11] = ((__m128i*)unshuffle_l3)[7];
+ ((__m128i*)out_counts)[12] = ((__m128i*)unshuffle_l3)[9];
+ ((__m128i*)out_counts)[13] = ((__m128i*)unshuffle_l3)[11];
+ ((__m128i*)out_counts)[14] = ((__m128i*)unshuffle_l3)[13];
+ ((__m128i*)out_counts)[15] = ((__m128i*)unshuffle_l3)[15];
+}
+
+/// @brief AVX2 implementation of threshold_into using a 2-Byte counter
+void threshold_into_short_avx2(word_t ** xs, int_fast16_t size, uint16_t threshold, word_t* dst) {
+ const __m256i signed_compare_adjustment = _mm256_set1_epi16(0x8000);
+ __m256i threshold_simd = _mm256_set1_epi16((signed)threshold - 0x8000);
+ uint8_t* dst_bytes = (uint8_t*)dst;
+ uint16_t counters[BITS];
+
+ //Clear out 16-bit counters
+ memset(counters, 0, BITS * sizeof(uint16_t));
+
+ //Loop over all input vectors, 255 at a time
+ for (int_fast16_t i = 0; i < size; i += 255) {
+ uint_fast16_t num_inputs = (i<size-254)? 255: size-i;
+
+ size_t cur_counters = 0;
+ for (size_t byte_offset = 0; byte_offset < BYTES; byte_offset += 32) {
+
+ //Call (inline) the function to load half a cache line of input bits from each input hypervector
+ __m256i scrambled_counts[8];
+ count_half_cacheline_for_255_input_hypervectors_avx2(xs + i, byte_offset, num_inputs, scrambled_counts);
+
+ //Unscramble the counters
+ __m256i out_counts[8];
+ unscramble_byte_counters_avx2(scrambled_counts, out_counts);
+
+ //Expand the 8-bit counters into 16-bits, and add them to the running counters
+ for (int_fast8_t out_i = 0; out_i < 8; out_i++) {
+ __m256i increment0 = _mm256_cvtepu8_epi16(((__m128i*)&out_counts[out_i])[0]);
+ __m256i increment1 = _mm256_cvtepu8_epi16(((__m128i*)&out_counts[out_i])[1]);
+ _mm256_storeu_si256((__m256i*)(&counters[cur_counters + 0]), _mm256_add_epi16(*(__m256i*)(&counters[cur_counters + 0]), increment0));
+ _mm256_storeu_si256((__m256i*)(&counters[cur_counters + 16]), _mm256_add_epi16(*(__m256i*)(&counters[cur_counters + 16]), increment1));
+ cur_counters += 32;
+ }
+ }
+ }
+
+ //Now do thresholding, and output the final bits
+ for (size_t i = 0; i < BITS/16; i++) {
+ __m256i adjusted_counters = _mm256_sub_epi16(*(__m256i*)(&counters[i * 16]), signed_compare_adjustment);
+ uint64_t maj_words[4];
+ *(__m256i *) maj_words = _mm256_cmpgt_epi16(adjusted_counters, threshold_simd);
+ uint8_t maj_bytes[2];
+ maj_bytes[0] = (uint8_t)_pext_u64(maj_words[0], 0x0001000100010001) | (uint8_t)_pext_u64(maj_words[1], 0x0001000100010001) << 4;
+ maj_bytes[1] = (uint8_t)_pext_u64(maj_words[2], 0x0001000100010001) | (uint8_t)_pext_u64(maj_words[3], 0x0001000100010001) << 4;
+
+ *((uint16_t*)(dst_bytes + (i * 2))) = *((uint16_t*)maj_bytes);
+ }
+}
+
+/// @brief AVX2 implementation of threshold_into using a 1-Byte counter
+void threshold_into_byte_avx2(word_t ** xs, uint8_t size, uint8_t threshold, word_t* dst) {
+ const __m256i one_twenty_eight = _mm256_set1_epi8((char)128);
+ __m256i threshold_simd = _mm256_set1_epi8(((signed char)threshold) - 128);
+ uint8_t* dst_bytes = (uint8_t*)dst;
+
+ for (size_t byte_offset = 0; byte_offset < BYTES; byte_offset += 32) {
+
+ //Call (inline) the function to load one cache line of input bits from each input hypervector
+ __m256i scrambled_counts[8];
+ count_half_cacheline_for_255_input_hypervectors_avx2(xs, byte_offset, size, scrambled_counts);
+
+ //Unscramble the counters
+ __m256i out_counts[8];
+ unscramble_byte_counters_avx2(scrambled_counts, out_counts);
+
+ //Do the threshold test, and compose out output bits
+ __m256i out_bits;
+ for (int i=0; i<8; i++) {
+ __m256i adjusted_counts = _mm256_sub_epi8(out_counts[i], one_twenty_eight);
+ __m256i maj_bits_vec = _mm256_cmpgt_epi8(adjusted_counts, threshold_simd);
+ __mmask32 maj_bits = _mm256_movemask_epi8(maj_bits_vec);
+ ((uint32_t*)&out_bits)[i] = maj_bits;
+ }
+
+ //Store the results
+ _mm256_storeu_si256((__m256i*)(dst_bytes + byte_offset), out_bits);
+ }
+}
+
+/// @brief AVX2 implementation of threshold_into using a 4-Byte counter
+void threshold_into_32bit_avx2(word_t ** xs, uint32_t size, uint32_t threshold, word_t* dst) {
+ const __m256i signed_compare_adjustment = _mm256_set1_epi32(0x80000000);
+ __m256i threshold_simd = _mm256_set1_epi32((signed)threshold - 0x80000000);
+ uint8_t* dst_bytes = (uint8_t*)dst;
+ uint32_t counters[BITS];
+
+ //Clear out the counters
+ memset(counters, 0, BITS * sizeof(uint32_t));
+
+ //Loop over all input vectors, 255 at a time
+ for (size_t i = 0; i < size; i += 255) {
+ size_t num_inputs = (i<size-254)? 255: size-i;
+
+ size_t cur_counters = 0;
+ for (size_t byte_offset = 0; byte_offset < BYTES; byte_offset += 32) {
+
+ //Call (inline) the function to load half a cache line of input bits from each input hypervector
+ __m256i scrambled_counts[8];
+ count_half_cacheline_for_255_input_hypervectors_avx2(xs + i, byte_offset, num_inputs, scrambled_counts);
+
+ //Unscramble the counters
+ __m256i out_counts[8];
+ unscramble_byte_counters_avx2(scrambled_counts, out_counts);
+
+ //Expand the 8-bit counters into 32-bits, and add them to the running counters
+ for (int_fast8_t out_i = 0; out_i < 8; out_i++) {
+ __m128i converted0 = _mm_set1_epi64(((__m64*)&out_counts[out_i])[0]);
+ __m128i converted1 = _mm_set1_epi64(((__m64*)&out_counts[out_i])[1]);
+ __m128i converted2 = _mm_set1_epi64(((__m64*)&out_counts[out_i])[2]);
+ __m128i converted3 = _mm_set1_epi64(((__m64*)&out_counts[out_i])[3]);
+
+ __m256i increment0 = _mm256_cvtepu8_epi32((__m128i) converted0);
+ __m256i increment1 = _mm256_cvtepu8_epi32((__m128i) converted1);
+ __m256i increment2 = _mm256_cvtepu8_epi32((__m128i) converted2);
+ __m256i increment3 = _mm256_cvtepu8_epi32((__m128i) converted3);
+ _mm256_storeu_si256((__m256i*)(&counters[cur_counters + 0]), _mm256_add_epi32(*(__m256i*)(&counters[cur_counters + 0]), increment0));
+ _mm256_storeu_si256((__m256i*)(&counters[cur_counters + 8]), _mm256_add_epi32(*(__m256i*)(&counters[cur_counters + 8]), increment1));
+ _mm256_storeu_si256((__m256i*)(&counters[cur_counters + 16]), _mm256_add_epi32(*(__m256i*)(&counters[cur_counters + 16]), increment2));
+ _mm256_storeu_si256((__m256i*)(&counters[cur_counters + 24]), _mm256_add_epi32(*(__m256i*)(&counters[cur_counters + 24]), increment3));
+ cur_counters += 32;
+ }
+ }
+ }
+
+ //Now do thresholding, and output the final bits
+ for (size_t i = 0; i < BITS/8; i++) {
+ __m256i adjusted_counters = _mm256_sub_epi32(*(__m256i*)(&counters[i * 8]), signed_compare_adjustment);
+ uint64_t maj_words[4];
+ *(__m256i *) maj_words = _mm256_cmpgt_epi32(adjusted_counters, threshold_simd);
+ uint8_t maj_bytes;
+ maj_bytes = (uint8_t)_pext_u64(maj_words[0], 0x0000000100000001) |
+ (uint8_t)_pext_u64(maj_words[1], 0x0000000100000001) << 2 |
+ (uint8_t)_pext_u64(maj_words[2], 0x0000000100000001) << 4 |
+ (uint8_t)_pext_u64(maj_words[3], 0x0000000100000001) << 6;
+
+ *((uint16_t*)(dst_bytes + i)) = maj_bytes;
+ }
+}
+
+/// @brief Sets each result bit high if there are more than threshold 1 bits in the corresponding bit of the input vectors
+/// @param xs array of `size` input vectors
+/// @param size number of input vectors in xs
+/// @param threshold threshold to count against
+/// @param dst the hypervector to write the results into
+void threshold_into_avx2(word_t ** xs, size_t size, size_t threshold, word_t* dst) {
+ //FUTURE OPTIMIZATION: Should we have a path for smaller sizes? Currently the main user of
+ // threshold_into() is true_majority(), and it has dedicated code for cases where n <= 21
+ if (size < 256) { threshold_into_byte_avx2(xs, size, threshold, dst); return; }
+ if (size < 65536) { threshold_into_short_avx2(xs, size, threshold, dst); return; }
+ threshold_into_32bit_avx2(xs, size, threshold, dst);
+}
+#endif //__AVX2__
+
+#if __AVX512BW__
+#define threshold_into threshold_into_avx512
+#elif __AVX2__
+#define threshold_into threshold_into_avx2
+#else
+#define threshold_into threshold_into_reference<uint32_t>
+#endif //#if __AVX512BW__
+
+/// @brief Sets each result bit high if there are more than threshold 1 bits in the corresponding bit of the input vectors
+/// @param xs array of `size` input vectors
+/// @param size number of input vectors in xs
+/// @param threshold threshold to count against
+/// @return returns a hypervector where each bit is set to 1 if the number of corresponding bits in xs exceeded threshold
+word_t* threshold(word_t ** xs, size_t size, size_t threshold) {
+ word_t* new_vec = bhv::empty();
+ threshold_into(xs, size, threshold, new_vec);
+ return new_vec;
+}
diff --git a/bhv/native.py b/bhv/native.py
index 7f584ce..8b0f1e0 100644
--- a/bhv/native.py
+++ b/bhv/native.py
@@ -31,16 +31,29 @@ def __invert__(self):
def select(self, when1, when0):
return NativePackedBHV(self.ins.select(when1.ins, when0.ins))
+ def ternary(self, y, z, op):
+ return NativePackedBHV(self.ins.ternary(y.ins, z.ins, op))
+
@classmethod
def majority(cls, xs):
return NativePackedBHV(CNativePackedBHV.majority([x.ins for x in xs]))
+ @classmethod
+ def representative(cls, xs):
+ return NativePackedBHV(CNativePackedBHV.representative([x.ins for x in xs]))
+
def active(self):
return self.ins.active()
def hamming(self, other):
return self.ins.hamming(other.ins)
+ def permute_words(self, permutation_id: int):
+ return NativePackedBHV(self.ins.permute_words(permutation_id))
+
+ def permute_byte_bits(self, permutation_id: int):
+ return NativePackedBHV(self.ins.permute_byte_bits(permutation_id))
+
def roll_words(self, d: int):
return NativePackedBHV(self.ins.roll_words(d))
@@ -74,6 +87,12 @@ def from_bytes(cls, bs):
def to_bytes(self):
return self.ins.to_bytes()
+ def __getstate__(self):
+ return self.to_bytes()
+
+ def __setstate__(self, state):
+ self.ins = CNativePackedBHV.from_bytes(state)
+
NativePackedBHV.ZERO = NativePackedBHV(CNativePackedBHV.ZERO)
NativePackedBHV.ONE = NativePackedBHV(CNativePackedBHV.ONE)
NativePackedBHV.HALF = NativePackedBHV(CNativePackedBHV.HALF)
diff --git a/bhv/symbolic.py b/bhv/symbolic.py
index 234d9f5..50e770c 100644
--- a/bhv/symbolic.py
+++ b/bhv/symbolic.py
@@ -1,4 +1,5 @@
from dataclasses import dataclass, field, fields
+from string import ascii_uppercase
from .abstract import *
from .shared import stable_hashcode, bitconfigs, unique_by_id, format_multiple, format_list
from .slice import Slice
@@ -306,6 +307,52 @@ def synth(cls, vs, t):
else:
return cls.ONE if t[0] else cls.ZERO
+ @classmethod
+ def synth_af(cls, af: float, depth=1, v_gen=lambda x: Rand(x), threshold=1e-6):
+ assert 0. < af < 1.
+ d = af - (1 / 2) ** depth
+ v = v_gen(depth)
+ if abs(d) > threshold:
+ if d > 0:
+ return v | cls.synth_af(d, depth + 1, v_gen, threshold)
+ else:
+ return v & cls.synth_af(af, depth + 1, v_gen, threshold)
+ else:
+ return v
+
+ @classmethod
+ def synth_af_ternary(cls, af: float, depth=1, v_gen=lambda x: Rand(x), threshold=1e-6):
+ assert 0. < af < 1.
+ da = af - (1 / 2) ** depth
+ va = v_gen(depth)
+
+ if abs(da) < threshold:
+ return va
+
+ if da > 0:
+ af = da
+
+ depth += 1
+ db = af - (1 / 2) ** depth
+ vb = v_gen(depth)
+
+ if db > 0:
+ af = db
+
+ if abs(db) > threshold:
+ ternary_instr = {(True, True): [0,1,1,1,1,1,1,1],
+ (True, False): [0,0,0,1,1,1,1,1],
+ (False, True): [0,0,0,0,0,1,1,1],
+ (False, False): [0,0,0,0,0,0,0,1]}[(da > 0, db > 0)]
+ # TODO implement Ternary op
+ vr = cls.synth_af_ternary(af, depth + 1, v_gen, threshold)
+ return cls.synth([va, vb, vr], ternary_instr)
+
+ if da > 0:
+ return va | vb
+ else:
+ return va & vb
+
@classmethod
def rand(cls) -> Self:
return Rand()
@@ -380,6 +427,11 @@ def expected_active_fraction(self, **kwargs):
@dataclass
class Var(SymbolicBHV):
name: str
+ @classmethod
+ def shortname(cls, i: int, letters=ascii_uppercase):
+ n = len(letters)
+ return cls(letters[i % n] + str(i // n) * (i > n))
+
def nodename(self, **kwards):
return self.name
diff --git a/setup.py b/setup.py
index ed385c5..bfacf5f 100644
--- a/setup.py
+++ b/setup.py
@@ -1,14 +1,15 @@
from setuptools import setup, find_packages, Extension
-VERSION = '0.6.10a'
+VERSION = '0.8.3'
DESCRIPTION = 'Boolean Hypervectors'
LONG_DESCRIPTION = 'Boolean Hypervectors with various operators for experiments in hyperdimensional computing (HDC).'
native = Extension("bhv.cnative",
sources=['bhv/cnative/bindings.cpp',
- 'bhv/cnative/TurboSHAKEopt/TurboSHAKE.cpp',
- 'bhv/cnative/TurboSHAKEopt/KeccakSponge.cpp',
- 'bhv/cnative/TurboSHAKEopt/KeccakP-1600-opt64.cpp',
+ 'bhv/cnative/TurboSHAKE_opt/TurboSHAKE.cpp',
+ 'bhv/cnative/TurboSHAKE_opt/KeccakP-1600-opt64.cpp',
+ 'bhv/cnative/TurboSHAKE_AVX512/TurboSHAKE.cpp',
+ 'bhv/cnative/TurboSHAKE_AVX512/KeccakP-1600-AVX512.cpp',
],
include_dirs=['bhv/cnative', 'bhv/cnative/TurboSHAKEopt'],
extra_compile_args=['-std=c++2a', '-O3', '-march=native', '-Wall'],
| ~15% speedup in threshold() from moving straggler logic to outer loop
Here's the speedup I mentioned yesterday.
| 2023-08-04T23:08:30 | 0.0 | [] | [] |
|||
marschall-lab/gaftools | marschall-lab__gaftools-30 | 75e2b2094817cb1fe7076a02474ac59c90ef2180 | diff --git a/docs/guide.rst b/docs/guide.rst
index eddee65..f79af20 100644
--- a/docs/guide.rst
+++ b/docs/guide.rst
@@ -42,24 +42,25 @@ This subcommand retrieves the base seqeunce of paths in the given GFA.
Usage
-----
-The :code:`find_path` subcommand takes 2 obligatory inputs, a GFA file and node path (like :code:`">s82312<s82313"` (with the quotes)).
-It returns the sequence of the path.
+The :code:`find_path` subcommand takes 2 obligatory inputs, a GFA file and node path (like :code:`">s82312<s82313"` (with the quotes)) or file path which has node paths.
+It returns the sequence of the path(s) by default but using the :code:`--fasta` flag, the sequences will be returned as a FASTA file.
.. code-block::
:caption: find_path arguments
- usage: gaftools find_path [-h] [-o OUTPUT] GFA path
+ usage: gaftools find_path [-h] [-o OUTPUT] [-f] GFA path
Find the genomic sequence of a given GFA path.
positional arguments:
GFA Input GFA file (can be bgzip-compressed)
- path GFA path to retrieve the sequence (e.g., ">s82312<s82313").
+ path GFA node path to retrieve the sequence (e.g., ">s82312<s82313") OR a filepath containing node paths in different lines
optional arguments:
-h, --help show this help message and exit
-o OUTPUT, --output OUTPUT
Output file. If omitted, use standard output.
+ -f, --fasta Flag to output the sequence as a FASTA file with the seqeunce named seq_<node path>
.. _gaftools-index:
@@ -128,6 +129,7 @@ Usage
-----
The :code:`order_gfa` subcommand takes an rGFA as an obligatory input to order. Optionally, the user can specify 1 or more chromosome to be sorted,
which are given after :code:`--chromosome_order`, and the chromosome name(s) should match the SN tags in the rGFA.
+With the :code:`--by-chrom` flag, all the chromosomal graphs are output separately.
Users can also specify an output directory.
The outputs of :code:`order_gfa` are separate rGFA graphs for each chromosome and a graph for all chromosomes both ordered by S lines first then L lines, and the S lines are ordered by
@@ -136,17 +138,18 @@ their BO tag then NO tag, also will output a CSV file with node colors similar t
.. code-block::
:caption: order_gfa arguments
- usage: gaftools order_gfa [-h] [--chromosome_order CHROMOSOME_ORDER] [--with-sequence] [--outdir OUTDIR] GRAPH
+ usage: gaftools order_gfa [-h] [--chromosome_order CHROMOSOME_ORDER] [--with-sequence] [--outdir OUTDIR] [--by-chrom] GRAPH
positional arguments:
GRAPH Input rGFA file
- options:
+ optional arguments:
-h, --help show this help message and exit
--chromosome_order CHROMOSOME_ORDER
Order in which to arrange chromosomes in terms of BO sorting. Expecting comma-separated list. Default: chr1,...,chr22,chrX,chrY,chrM
--with-sequence Retain sequences in output (default is to strip sequences)
--outdir OUTDIR Output Directory to store all the GFA and CSV files. Default location is a "out" folder from the directory of execution.
+ --by-chrom Outputs each chromosome as a separate GFA, otherwise, all chromosomes in one GFA file
.. _gaftools-phase:
diff --git a/gaftools/cli/find_path.py b/gaftools/cli/find_path.py
index b1e0cae..269118e 100644
--- a/gaftools/cli/find_path.py
+++ b/gaftools/cli/find_path.py
@@ -6,34 +6,71 @@
from gaftools.cli import log_memory_usage
from gaftools.timer import StageTimer
from gaftools.gfa import GFA
+import sys
logger = logging.getLogger(__name__)
-def run_find_path(gfa_path, input_path, output=None):
- timers = StageTimer()
-
- graph = GFA(gfa_path)
+def run(gfa_path, input_path, output=None, fasta=False):
+ timers = StageTimer()
- path_seq = graph.extract_path(input_path)
- print(path_seq)
-
- logger.info("\n== SUMMARY ==")
- total_time = timers.total()
- log_memory_usage()
- logger.info("Total time: %9.2f s", total_time)
+ graph = GFA(gfa_path)
+ if input_path[0] in [">", "<"]:
+ # detected node path
+ nodes = [input_path]
+ path_seqs = [graph.extract_path(input_path)]
+ else:
+ # detected file
+ reader = open(input_path, "r")
+ nodes = []
+ path_seqs = []
+ for line in reader:
+ nodes.append(line.strip())
+ path_seqs.append(graph.extract_path(nodes[-1]))
+ reader.close()
+
+ if output is None:
+ writer = sys.stdout
+ else:
+ writer = open(output, "w")
+ if fasta:
+ for node, path_seq in zip(nodes, path_seqs):
+ print(f">seq_{node}", file=writer)
+ print(path_seq, file=writer)
+ else:
+ for node, path_seq in zip(nodes, path_seqs):
+ print(path_seq, file=writer)
+
+ if output is not None:
+ writer.close()
+
+ logger.info("\n== SUMMARY ==")
+ total_time = timers.total()
+ log_memory_usage()
+ logger.info("Total time: %9.2f s", total_time)
def add_arguments(parser):
arg = parser.add_argument
# Positional arguments
arg("gfa_path", metavar="GFA", help="Input GFA file (can be bgzip-compressed)")
- arg("input_path", metavar="path", help="GFA path to retrieve the sequence (e.g., \">s82312<s82313\").")
+ arg(
+ "input_path",
+ metavar="path",
+ help='GFA node path to retrieve the sequence (e.g., ">s82312<s82313") OR a filepath containing node paths in different lines',
+ )
arg("-o", "--output", default=None, help="Output file. If omitted, use standard output.")
+ arg(
+ "-f",
+ "--fasta",
+ action="store_true",
+ help="Flag to output the sequence as a FASTA file with the seqeunce named seq_<node path>",
+ )
+
def validate(args, parser):
return True
def main(args):
- run_find_path(**vars(args))
+ run(**vars(args))
diff --git a/gaftools/cli/order_gfa.py b/gaftools/cli/order_gfa.py
index cd0786b..2ec89ae 100644
--- a/gaftools/cli/order_gfa.py
+++ b/gaftools/cli/order_gfa.py
@@ -46,6 +46,7 @@
def run_order_gfa(
gfa_filename,
outdir,
+ by_chrom,
chromosome_order=None,
with_sequence=False,
):
@@ -105,7 +106,8 @@ def run_order_gfa(
bo = 0
total_bubbles = 0
# todo output final GFA with all the chromosomes ordered
- out_files = []
+ out_gfa = []
+ out_csv = []
for chromosome in chromosome_order:
logger.info("Processing %s", chromosome)
component_nodes = components[chromosome]
@@ -126,11 +128,10 @@ def run_order_gfa(
+ chromosome
+ ".gfa"
)
- out_files.append(f_gfa)
- f_colors = open(
- outdir + os.sep + gfa_filename.split(os.sep)[-1][:-4] + "-" + chromosome + ".csv",
- "w",
- )
+ out_gfa.append(f_gfa)
+ csv_file = outdir + os.sep + gfa_filename.split(os.sep)[-1][:-4] + "-" + chromosome + ".csv"
+ out_csv.append(csv_file)
+ f_colors = open(csv_file, "w")
f_colors.write("Name,Color,SN,SO,BO,NO\n")
total_bubbles += bubble_count
for node_name in sorted(component_nodes):
@@ -170,22 +171,37 @@ def run_order_gfa(
else:
logger.warning(f"Chromosome {chromosome} was skipped")
- final_gfa = (
- outdir + os.sep + gfa_filename.split(os.sep)[-1].split(".")[0] + "-complete" + ".gfa"
- )
- with open(final_gfa, "w") as outfile:
- # outputting all the S lines first
- for f in out_files:
- with open(f, "r") as infile:
- for l in infile:
- if l.startswith("S"):
- outfile.write(l)
- # outputting all the S lines
- for f in out_files:
- with open(f, "r") as infile:
- for l in infile:
- if l.startswith("L"):
+ if not by_chrom:
+ final_gfa = (
+ outdir + os.sep + gfa_filename.split(os.sep)[-1].split(".")[0] + "-complete" + ".gfa"
+ )
+ final_csv = (
+ outdir + os.sep + gfa_filename.split(os.sep)[-1].split(".")[0] + "-complete" + ".csv"
+ )
+ with open(final_gfa, "w") as outfile:
+ # outputting all the S lines first
+ for f in out_gfa:
+ with open(f, "r") as infile:
+ for l in infile:
+ if l.startswith("S"):
+ outfile.write(l)
+ # outputting all the S lines
+ for f in out_gfa:
+ with open(f, "r") as infile:
+ for l in infile:
+ if l.startswith("L"):
+ outfile.write(l)
+ for f in out_gfa:
+ os.remove(f)
+
+ with open(final_csv, "w") as outfile:
+ for f in out_csv:
+ with open(f, 'r') as infile:
+ for l in infile:
outfile.write(l)
+ for f in out_csv:
+ os.remove(f)
+
logger.info("Total bubbles: %d", total_bubbles)
@@ -344,7 +360,12 @@ def add_arguments(parser):
default="./out",
help='Output Directory to store all the GFA and CSV files. Default location is a "out" folder from the directory of execution.',
)
-
+ arg(
+ "--by-chrom",
+ default=False,
+ action="store_true",
+ help="Outputs each chromosome as a separate GFA, otherwise, all chromosomes in one GFA file"
+ )
def main(args):
run_order_gfa(**vars(args))
diff --git a/gaftools/gfa.py b/gaftools/gfa.py
index 9484050..71d7ed8 100644
--- a/gaftools/gfa.py
+++ b/gaftools/gfa.py
@@ -1,7 +1,6 @@
import sys
import logging
from collections import defaultdict
-
import gzip
import re
import os
@@ -626,23 +625,40 @@ def graph_from_comp(self, component_nodes):
new_graph.nodes[n] = new_node
return new_graph
- def path_exists(self, path):
+ def path_exists(self, ordered_path):
"""
Just a sanity check that a path given exists in the graph
I am assuming that the list of node given as ordered_path taken from the GAF alignment is ordered
i.e. node 1 parent of node 2, node 2 parent of node 3 and so on
"""
- ordered_path = path.replace(">", ",").replace("<", ",").split(",")
- if ordered_path[0] == "":
- ordered_path = ordered_path[1:]
+ # ordered_path = path.replace(">", ",").replace("<", ",").split(",")
+ # if ordered_path[0] == "":
+ # ordered_path = ordered_path[1:]
+ cases = {
+ (">", ">"): ("end", 0),
+ ("<", "<"): ("start", 1),
+ (">", "<"): ("end", 1),
+ ("<", ">"): ("start", 0),
+ }
for i in range(1, len(ordered_path)):
- current_node = ordered_path[i]
- previous_node = ordered_path[i - 1]
- if current_node in self.nodes[previous_node].neighbors():
- continue
- else: # some node is not connected to another node in the path
+ n1 = ordered_path[i - 1]
+ n2 = ordered_path[i]
+ try:
+ case = cases[(n1[0], n2[0])]
+ except KeyError:
+ logging.error(
+ "Something went wrong when checking the path, make sure the path follows this example"
+ ">node<node>node<nod"
+ )
+ return False
+ ok = False
+ for edge in getattr(self.nodes[n1[1:]], case[0]):
+ if (n2[1:], case[1]) == (edge[0], edge[1]):
+ ok = True
+ if not ok:
return False
+
return True
def list_is_path(self, node_list):
@@ -732,11 +748,17 @@ def extract_path(self, path):
returns the sequences representing that path
"""
seq = []
+ # path has to start with > or <, otherwise it's invalid
+ if path[0] not in {"<", ">"}:
+ logging.error(f"The path {path} does not start with < or > ")
+ return ""
+
+ path = re.findall("[><][^><]+", path)
if not self.path_exists(path):
return ""
- for n in re.findall("[><][^><]+", path):
+ for n in path:
if n[1:] not in self:
logging.error(f"The node {n[1:]} in path {path} does not seem to exist in this GFA")
return ""
| find_path output issue
"-o" does not work in find_path command.
| 2024-10-09T09:12:06 | 0.0 | [] | [] |
Subsets and Splits