filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_16405 | from cpf_cnpj import Documento
from TelefonesBr import TelefonesBr
from datasbr import DatasBr
from acesso_cep import BuscaEndereco
import requests
exemplo_cpf = "94561576010"
exemplo_cnpj = "35379838000112"
telefone = "11976453329"
cep = "01001000"
cpf_um = Documento.cria_documento(exemplo_cpf)
cnpj_um = Documento.cria_documento(exemplo_cnpj)
telefone_um = TelefonesBr(telefone)
hora_cadastro = DatasBr()
tempo_cadastro = hora_cadastro.tempo_cadastro()
objeto_cep = BuscaEndereco(cep)
logradouro, bairro, cidade, uf = objeto_cep.acessa_via_cep()
print(f'''
CPF: {cpf_um}
CNPJ: {cnpj_um}
Telefone: {telefone_um}'
Hora do Cadastro: {hora_cadastro}
Tempo de Cadastro: {tempo_cadastro}
Dados Cadastrais:
Rua: {logradouro}
Bairro: {bairro}
Cidade: {cidade}
Uf: {uf}
''')
|
the-stack_0_16406 | # Copyright 2015 CloudByte Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
import six
from six.moves import http_client
from six.moves import urllib
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.volume.drivers.cloudbyte import options
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
class CloudByteISCSIDriver(san.SanISCSIDriver):
"""CloudByte ISCSI Driver.
Version history:
1.0.0 - Initial driver
1.1.0 - Add chap support and minor bug fixes
1.1.1 - Add wait logic for delete volumes
1.1.2 - Update ig to None before delete volume
1.2.0 - Add retype support
"""
VERSION = '1.2.0'
volume_stats = {}
def __init__(self, *args, **kwargs):
super(CloudByteISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(
options.cloudbyte_add_qosgroup_opts)
self.configuration.append_config_values(
options.cloudbyte_create_volume_opts)
self.configuration.append_config_values(
options.cloudbyte_update_volume_opts)
self.configuration.append_config_values(
options.cloudbyte_connection_opts)
self.cb_use_chap = self.configuration.use_chap_auth
self.get_volume_stats()
def _get_url(self, cmd, params, apikey):
"""Will prepare URL that connects to CloudByte."""
if params is None:
params = {}
params['command'] = cmd
params['response'] = 'json'
sanitized_params = {}
for key in params:
value = params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
url = ('/client/api?%s' % sanitized_params)
LOG.debug("CloudByte URL to be executed: [%s].", url)
# Add the apikey
api = {}
api['apiKey'] = apikey
url = url + '&' + urllib.parse.urlencode(api)
return url
def _extract_http_error(self, error_data):
# Extract the error message from error_data
error_msg = ""
# error_data is a single key value dict
for key, value in error_data.items():
error_msg = value.get('errortext')
return error_msg
def _execute_and_get_response_details(self, host, url):
"""Will prepare response after executing an http request."""
res_details = {}
try:
# Prepare the connection
connection = http_client.HTTPSConnection(host)
# Make the connection
connection.request('GET', url)
# Extract the response as the connection was successful
response = connection.getresponse()
# Read the response
data = response.read()
# Transform the json string into a py object
data = json.loads(data)
# Extract http error msg if any
error_details = None
if response.status != 200:
error_details = self._extract_http_error(data)
# Prepare the return object
res_details['data'] = data
res_details['error'] = error_details
res_details['http_status'] = response.status
finally:
connection.close()
LOG.debug("CloudByte connection was closed successfully.")
return res_details
def _api_request_for_cloudbyte(self, cmd, params, version=None):
"""Make http calls to CloudByte."""
LOG.debug("Executing CloudByte API for command [%s].", cmd)
if version is None:
version = CloudByteISCSIDriver.VERSION
# Below is retrieved from /etc/cinder/cinder.conf
apikey = self.configuration.cb_apikey
if apikey is None:
msg = (_("API key is missing for CloudByte driver."))
raise exception.VolumeBackendAPIException(data=msg)
host = self.configuration.san_ip
# Construct the CloudByte URL with query params
url = self._get_url(cmd, params, apikey)
data = {}
error_details = None
http_status = None
try:
# Execute CloudByte API & frame the response
res_obj = self._execute_and_get_response_details(host, url)
data = res_obj['data']
error_details = res_obj['error']
http_status = res_obj['http_status']
except http_client.HTTPException as ex:
msg = (_("Error executing CloudByte API [%(cmd)s], "
"Error: %(err)s.") %
{'cmd': cmd, 'err': ex})
raise exception.VolumeBackendAPIException(data=msg)
# Check if it was an error response from CloudByte
if http_status != 200:
msg = (_("Failed to execute CloudByte API [%(cmd)s]."
" Http status: %(status)s,"
" Error: %(error)s.") %
{'cmd': cmd, 'status': http_status,
'error': error_details})
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("CloudByte API executed successfully for command [%s]."),
cmd)
return data
def _request_tsm_details(self, account_id):
params = {"accountid": account_id}
# List all CloudByte tsm
data = self._api_request_for_cloudbyte("listTsm", params)
return data
def _add_qos_group_request(self, volume, tsmid, volume_name,
qos_group_params):
# Prepare the user input params
params = {
"name": "QoS_" + volume_name,
"tsmid": tsmid
}
# Get qos related params from configuration
params.update(self.configuration.cb_add_qosgroup)
# Override the default configuration by qos specs
if qos_group_params:
params.update(qos_group_params)
data = self._api_request_for_cloudbyte("addQosGroup", params)
return data
def _create_volume_request(self, volume, datasetid, qosgroupid,
tsmid, volume_name, file_system_params):
size = volume.get('size')
quotasize = six.text_type(size) + "G"
# Prepare the user input params
params = {
"datasetid": datasetid,
"name": volume_name,
"qosgroupid": qosgroupid,
"tsmid": tsmid,
"quotasize": quotasize
}
# Get the additional params from configuration
params.update(self.configuration.cb_create_volume)
# Override the default configuration by qos specs
if file_system_params:
params.update(file_system_params)
data = self._api_request_for_cloudbyte("createVolume", params)
return data
def _queryAsyncJobResult_request(self, jobid):
async_cmd = "queryAsyncJobResult"
params = {
"jobId": jobid,
}
data = self._api_request_for_cloudbyte(async_cmd, params)
return data
def _get_tsm_details(self, data, tsm_name, account_name):
# Filter required tsm's details
tsms = data['listTsmResponse'].get('listTsm')
if tsms is None:
msg = (_("TSM [%(tsm)s] was not found in CloudByte storage "
"for account [%(account)s].") %
{'tsm': tsm_name, 'account': account_name})
raise exception.VolumeBackendAPIException(data=msg)
tsmdetails = {}
for tsm in tsms:
if tsm['name'] == tsm_name:
tsmdetails['datasetid'] = tsm['datasetid']
tsmdetails['tsmid'] = tsm['id']
break
return tsmdetails
def _retry_volume_operation(self, operation, retries,
max_retries, jobid,
cb_volume):
"""CloudByte async calls via the FixedIntervalLoopingCall."""
# Query the CloudByte storage with this jobid
volume_response = self._queryAsyncJobResult_request(jobid)
count = retries['count']
result_res = None
if volume_response is not None:
result_res = volume_response.get('queryasyncjobresultresponse')
if result_res is None:
msg = (_(
"Null response received while querying "
"for [%(operation)s] based job [%(job)s] "
"at CloudByte storage.") %
{'operation': operation, 'job': jobid})
raise exception.VolumeBackendAPIException(data=msg)
status = result_res.get('jobstatus')
if status == 1:
LOG.info(_LI("CloudByte operation [%(operation)s] succeeded for "
"volume [%(cb_volume)s]."),
{'operation': operation, 'cb_volume': cb_volume})
raise loopingcall.LoopingCallDone()
elif status == 2:
job_result = result_res.get("jobresult")
err_msg = job_result.get("errortext")
err_code = job_result.get("errorcode")
msg = (_(
"Error in Operation [%(operation)s] "
"for volume [%(cb_volume)s] in CloudByte "
"storage: [%(cb_error)s], "
"error code: [%(error_code)s]."),
{'cb_error': err_msg,
'error_code': err_code,
'cb_volume': cb_volume,
'operation': operation})
raise exception.VolumeBackendAPIException(data=msg)
elif count == max_retries:
# All attempts exhausted
LOG.error(_LE("CloudByte operation [%(operation)s] failed"
" for volume [%(vol)s]. Exhausted all"
" [%(max)s] attempts."),
{'operation': operation,
'vol': cb_volume,
'max': max_retries})
raise loopingcall.LoopingCallDone(retvalue=False)
else:
count += 1
retries['count'] = count
LOG.debug("CloudByte operation [%(operation)s] for"
" volume [%(vol)s]: retry [%(retry)s] of [%(max)s].",
{'operation': operation,
'vol': cb_volume,
'retry': count,
'max': max_retries})
def _wait_for_volume_creation(self, volume_response, cb_volume_name):
"""Given the job wait for it to complete."""
vol_res = volume_response.get('createvolumeresponse')
if vol_res is None:
msg = _("Null response received while creating volume [%s] "
"at CloudByte storage.") % cb_volume_name
raise exception.VolumeBackendAPIException(data=msg)
jobid = vol_res.get('jobid')
if jobid is None:
msg = _("Job id not found in CloudByte's "
"create volume [%s] response.") % cb_volume_name
raise exception.VolumeBackendAPIException(data=msg)
retry_interval = (
self.configuration.cb_confirm_volume_create_retry_interval)
max_retries = (
self.configuration.cb_confirm_volume_create_retries)
retries = {'count': 0}
timer = loopingcall.FixedIntervalLoopingCall(
self._retry_volume_operation,
'Create Volume',
retries,
max_retries,
jobid,
cb_volume_name)
timer.start(interval=retry_interval).wait()
def _wait_for_volume_deletion(self, volume_response, cb_volume_id):
"""Given the job wait for it to complete."""
vol_res = volume_response.get('deleteFileSystemResponse')
if vol_res is None:
msg = _("Null response received while deleting volume [%s] "
"at CloudByte storage.") % cb_volume_id
raise exception.VolumeBackendAPIException(data=msg)
jobid = vol_res.get('jobid')
if jobid is None:
msg = _("Job id not found in CloudByte's "
"delete volume [%s] response.") % cb_volume_id
raise exception.VolumeBackendAPIException(data=msg)
retry_interval = (
self.configuration.cb_confirm_volume_delete_retry_interval)
max_retries = (
self.configuration.cb_confirm_volume_delete_retries)
retries = {'count': 0}
timer = loopingcall.FixedIntervalLoopingCall(
self._retry_volume_operation,
'Delete Volume',
retries,
max_retries,
jobid,
cb_volume_id)
timer.start(interval=retry_interval).wait()
def _get_volume_id_from_response(self, cb_volumes, volume_name):
"""Search the volume in CloudByte storage."""
vol_res = cb_volumes.get('listFilesystemResponse')
if vol_res is None:
msg = _("Null response received from CloudByte's "
"list filesystem.")
raise exception.VolumeBackendAPIException(data=msg)
volumes = vol_res.get('filesystem')
if volumes is None:
msg = _('No volumes found in CloudByte storage.')
raise exception.VolumeBackendAPIException(data=msg)
volume_id = None
for vol in volumes:
if vol['name'] == volume_name:
volume_id = vol['id']
break
if volume_id is None:
msg = _("Volume [%s] not found in CloudByte "
"storage.") % volume_name
raise exception.VolumeBackendAPIException(data=msg)
return volume_id
def _get_qosgroupid_id_from_response(self, cb_volumes, volume_id):
volumes = cb_volumes['listFilesystemResponse']['filesystem']
qosgroup_id = None
for vol in volumes:
if vol['id'] == volume_id:
qosgroup_id = vol['groupid']
break
return qosgroup_id
def _build_provider_details_from_volume(self, volume, chap):
model_update = {}
model_update['provider_location'] = (
'%s %s %s' % (volume['ipaddress'] + ':3260', volume['iqnname'], 0)
)
# Will provide CHAP Authentication on forthcoming patches/release
model_update['provider_auth'] = None
if chap:
model_update['provider_auth'] = ('CHAP %(username)s %(password)s'
% chap)
model_update['provider_id'] = volume['id']
LOG.debug("CloudByte volume iqn: [%(iqn)s] provider id: [%(proid)s].",
{'iqn': volume['iqnname'], 'proid': volume['id']})
return model_update
def _build_provider_details_from_response(self,
cb_volumes,
volume_name,
chap):
"""Get provider information."""
model_update = {}
volumes = cb_volumes['listFilesystemResponse']['filesystem']
for vol in volumes:
if vol['name'] == volume_name:
model_update = self._build_provider_details_from_volume(vol,
chap)
break
return model_update
def _get_initiator_group_id_from_response(self, data, filter):
"""Find iSCSI initiator group id."""
ig_list_res = data.get('listInitiatorsResponse')
if ig_list_res is None:
msg = _("Null response received from CloudByte's "
"list iscsi initiators.")
raise exception.VolumeBackendAPIException(data=msg)
ig_list = ig_list_res.get('initiator')
if ig_list is None:
msg = _('No iscsi initiators were found in CloudByte.')
raise exception.VolumeBackendAPIException(data=msg)
ig_id = None
for ig in ig_list:
if ig.get('initiatorgroup') == filter:
ig_id = ig['id']
break
return ig_id
def _get_iscsi_service_id_from_response(self, volume_id, data):
iscsi_service_res = data.get('listVolumeiSCSIServiceResponse')
if iscsi_service_res is None:
msg = _("Null response received from CloudByte's "
"list volume iscsi service.")
raise exception.VolumeBackendAPIException(data=msg)
iscsi_service_list = iscsi_service_res.get('iSCSIService')
if iscsi_service_list is None:
msg = _('No iscsi services found in CloudByte storage.')
raise exception.VolumeBackendAPIException(data=msg)
iscsi_id = None
for iscsi_service in iscsi_service_list:
if iscsi_service['volume_id'] == volume_id:
iscsi_id = iscsi_service['id']
break
if iscsi_id is None:
msg = _("No iscsi service found for CloudByte "
"volume [%s].") % volume_id
raise exception.VolumeBackendAPIException(data=msg)
else:
return iscsi_id
def _request_update_iscsi_service(self, iscsi_id, ig_id, ag_id):
params = {
"id": iscsi_id,
"igid": ig_id
}
if ag_id:
params['authgroupid'] = ag_id
params['authmethod'] = "CHAP"
self._api_request_for_cloudbyte(
'updateVolumeiSCSIService', params)
def _get_cb_snapshot_path(self, snapshot_name, volume_id):
"""Find CloudByte snapshot path."""
params = {"id": volume_id}
# List all snapshot from CloudByte
cb_snapshots_list = self._api_request_for_cloudbyte(
'listStorageSnapshots', params)
# Filter required snapshot from list
cb_snap_res = cb_snapshots_list.get('listDatasetSnapshotsResponse')
cb_snapshot = {}
if cb_snap_res is not None:
cb_snapshot = cb_snap_res.get('snapshot')
path = None
# Filter snapshot path
for snap in cb_snapshot:
if snap['name'] == snapshot_name:
path = snap['path']
break
return path
def _get_account_id_from_name(self, account_name):
params = {}
data = self._api_request_for_cloudbyte("listAccount", params)
accounts = data["listAccountResponse"]["account"]
account_id = None
for account in accounts:
if account.get("name") == account_name:
account_id = account.get("id")
break
if account_id is None:
msg = _("Failed to get CloudByte account details "
"for account [%s].") % account_name
raise exception.VolumeBackendAPIException(data=msg)
return account_id
def _search_volume_id(self, cb_volumes, cb_volume_id):
"""Search the volume in CloudByte."""
volumes_res = cb_volumes.get('listFilesystemResponse')
if volumes_res is None:
msg = _("No response was received from CloudByte's "
"list filesystem api call.")
raise exception.VolumeBackendAPIException(data=msg)
volumes = volumes_res.get('filesystem')
if volumes is None:
msg = _("No volume was found at CloudByte storage.")
raise exception.VolumeBackendAPIException(data=msg)
volume_id = None
for vol in volumes:
if vol['id'] == cb_volume_id:
volume_id = vol['id']
break
return volume_id
def _get_storage_info(self, tsmname):
"""Get CloudByte TSM that is associated with OpenStack backend."""
# List all TSMs from CloudByte storage
tsm_list = self._api_request_for_cloudbyte('listTsm', params={})
tsm_details_res = tsm_list.get('listTsmResponse')
if tsm_details_res is None:
msg = _("No response was received from CloudByte storage "
"list tsm API call.")
raise exception.VolumeBackendAPIException(data=msg)
tsm_details = tsm_details_res.get('listTsm')
data = {}
flag = 0
# Filter required TSM and get storage info
for tsms in tsm_details:
if tsms['name'] == tsmname:
flag = 1
data['total_capacity_gb'] = (
float(tsms['numericquota']) / units.Ki)
data['free_capacity_gb'] = (
float(tsms['availablequota']) / units.Ki)
break
# TSM not found in CloudByte storage
if flag == 0:
LOG.error(_LE("TSM [%s] not found in CloudByte storage."), tsmname)
data['total_capacity_gb'] = 0.0
data['free_capacity_gb'] = 0.0
return data
def _get_auth_group_id_from_response(self, data):
"""Find iSCSI auth group id."""
chap_group = self.configuration.cb_auth_group
ag_list_res = data.get('listiSCSIAuthGroupResponse')
if ag_list_res is None:
msg = _("Null response received from CloudByte's "
"list iscsi auth groups.")
raise exception.VolumeBackendAPIException(data=msg)
ag_list = ag_list_res.get('authgroup')
if ag_list is None:
msg = _('No iscsi auth groups were found in CloudByte.')
raise exception.VolumeBackendAPIException(data=msg)
ag_id = None
for ag in ag_list:
if ag.get('name') == chap_group:
ag_id = ag['id']
break
else:
msg = _("Auth group [%s] details not found in "
"CloudByte storage.") % chap_group
raise exception.VolumeBackendAPIException(data=msg)
return ag_id
def _get_auth_group_info(self, account_id, ag_id):
"""Fetch the auth group details."""
params = {"accountid": account_id, "authgroupid": ag_id}
auth_users = self._api_request_for_cloudbyte(
'listiSCSIAuthUser', params)
auth_user_details_res = auth_users.get('listiSCSIAuthUsersResponse')
if auth_user_details_res is None:
msg = _("No response was received from CloudByte storage "
"list iSCSI auth user API call.")
raise exception.VolumeBackendAPIException(data=msg)
auth_user_details = auth_user_details_res.get('authuser')
if auth_user_details is None:
msg = _("Auth user details not found in CloudByte storage.")
raise exception.VolumeBackendAPIException(data=msg)
chapuser = auth_user_details[0].get('chapusername')
chappassword = auth_user_details[0].get('chappassword')
if chapuser is None or chappassword is None:
msg = _("Invalid chap user details found in CloudByte storage.")
raise exception.VolumeBackendAPIException(data=msg)
data = {'username': chapuser, 'password': chappassword, 'ag_id': ag_id}
return data
def _get_chap_info(self, account_id):
"""Fetch the chap details."""
params = {"accountid": account_id}
iscsi_auth_data = self._api_request_for_cloudbyte(
'listiSCSIAuthGroup', params)
ag_id = self._get_auth_group_id_from_response(
iscsi_auth_data)
return self._get_auth_group_info(account_id, ag_id)
def _export(self):
model_update = {'provider_auth': None}
if self.cb_use_chap is True:
account_name = self.configuration.cb_account_name
account_id = self._get_account_id_from_name(account_name)
chap = self._get_chap_info(account_id)
model_update['provider_auth'] = ('CHAP %(username)s %(password)s'
% chap)
return model_update
def _update_initiator_group(self, volume_id, ig_name):
# Get account id of this account
account_name = self.configuration.cb_account_name
account_id = self._get_account_id_from_name(account_name)
# Fetch the initiator group ID
params = {"accountid": account_id}
iscsi_initiator_data = self._api_request_for_cloudbyte(
'listiSCSIInitiator', params)
# Filter the list of initiator groups with the name
ig_id = self._get_initiator_group_id_from_response(
iscsi_initiator_data, ig_name)
params = {"storageid": volume_id}
iscsi_service_data = self._api_request_for_cloudbyte(
'listVolumeiSCSIService', params)
iscsi_id = self._get_iscsi_service_id_from_response(
volume_id, iscsi_service_data)
# Update the iscsi service with above fetched iscsi_id
self._request_update_iscsi_service(iscsi_id, ig_id, None)
LOG.debug("CloudByte initiator group updated successfully for volume "
"[%(vol)s] with ig [%(ig)s].",
{'vol': volume_id,
'ig': ig_name})
def _get_qos_by_volume_type(self, ctxt, type_id):
"""Get the properties which can be QoS or file system related."""
update_qos_group_params = {}
update_file_system_params = {}
volume_type = volume_types.get_volume_type(ctxt, type_id)
qos_specs_id = volume_type.get('qos_specs_id')
extra_specs = volume_type.get('extra_specs')
if qos_specs_id is not None:
specs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
# Override extra specs with specs
# Hence specs will prefer QoS than extra specs
extra_specs.update(specs)
for key, value in extra_specs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
if key in self.configuration.cb_update_qos_group:
update_qos_group_params[key] = value
elif key in self.configuration.cb_update_file_system:
update_file_system_params[key] = value
return update_qos_group_params, update_file_system_params
def create_volume(self, volume):
qos_group_params = {}
file_system_params = {}
tsm_name = self.configuration.cb_tsm_name
account_name = self.configuration.cb_account_name
# Get account id of this account
account_id = self._get_account_id_from_name(account_name)
# Set backend storage volume name using OpenStack volume id
cb_volume_name = volume['id'].replace("-", "")
ctxt = context.get_admin_context()
type_id = volume['volume_type_id']
if type_id is not None:
qos_group_params, file_system_params = (
self._get_qos_by_volume_type(ctxt, type_id))
LOG.debug("Will create a volume [%(cb_vol)s] in TSM [%(tsm)s] "
"at CloudByte storage w.r.t "
"OpenStack volume [%(stack_vol)s].",
{'cb_vol': cb_volume_name,
'stack_vol': volume.get('id'),
'tsm': tsm_name})
tsm_data = self._request_tsm_details(account_id)
tsm_details = self._get_tsm_details(tsm_data, tsm_name, account_name)
# Send request to create a qos group before creating a volume
LOG.debug("Creating qos group for CloudByte volume [%s].",
cb_volume_name)
qos_data = self._add_qos_group_request(
volume, tsm_details.get('tsmid'), cb_volume_name, qos_group_params)
# Extract the qos group id from response
qosgroupid = qos_data['addqosgroupresponse']['qosgroup']['id']
LOG.debug("Successfully created qos group for CloudByte volume [%s].",
cb_volume_name)
# Send a create volume request to CloudByte API
vol_data = self._create_volume_request(
volume, tsm_details.get('datasetid'), qosgroupid,
tsm_details.get('tsmid'), cb_volume_name, file_system_params)
# Since create volume is an async call;
# need to confirm the creation before proceeding further
self._wait_for_volume_creation(vol_data, cb_volume_name)
# Fetch iscsi id
cb_volumes = self._api_request_for_cloudbyte(
'listFileSystem', params={})
volume_id = self._get_volume_id_from_response(cb_volumes,
cb_volume_name)
params = {"storageid": volume_id}
iscsi_service_data = self._api_request_for_cloudbyte(
'listVolumeiSCSIService', params)
iscsi_id = self._get_iscsi_service_id_from_response(
volume_id, iscsi_service_data)
# Fetch the initiator group ID
params = {"accountid": account_id}
iscsi_initiator_data = self._api_request_for_cloudbyte(
'listiSCSIInitiator', params)
ig_id = self._get_initiator_group_id_from_response(
iscsi_initiator_data, 'ALL')
LOG.debug("Updating iscsi service for CloudByte volume [%s].",
cb_volume_name)
ag_id = None
chap_info = {}
if self.cb_use_chap is True:
chap_info = self._get_chap_info(account_id)
ag_id = chap_info['ag_id']
# Update the iscsi service with above fetched iscsi_id & ig_id
self._request_update_iscsi_service(iscsi_id, ig_id, ag_id)
LOG.debug("CloudByte volume [%(vol)s] updated with "
"iscsi id [%(iscsi)s] and initiator group [%(ig)s] and "
"authentication group [%(ag)s].",
{'vol': cb_volume_name, 'iscsi': iscsi_id,
'ig': ig_id, 'ag': ag_id})
# Provide the model after successful completion of above steps
provider = self._build_provider_details_from_response(
cb_volumes, cb_volume_name, chap_info)
LOG.info(_LI("Successfully created a CloudByte volume [%(cb_vol)s] "
"w.r.t OpenStack volume [%(stack_vol)s]."),
{'cb_vol': cb_volume_name, 'stack_vol': volume.get('id')})
return provider
def delete_volume(self, volume):
params = {}
# OpenStack source volume id
source_volume_id = volume['id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = volume.get('provider_id')
LOG.debug("Will delete CloudByte volume [%(cb_vol)s] "
"w.r.t OpenStack volume [%(stack_vol)s].",
{'cb_vol': cb_volume_id, 'stack_vol': source_volume_id})
# Delete volume at CloudByte
if cb_volume_id is not None:
cb_volumes = self._api_request_for_cloudbyte(
'listFileSystem', params)
# Search cb_volume_id in CloudByte volumes
# incase it has already been deleted from CloudByte
cb_volume_id = self._search_volume_id(cb_volumes, cb_volume_id)
# Delete volume at CloudByte
if cb_volume_id is not None:
# Need to set the initiator group to None before deleting
self._update_initiator_group(cb_volume_id, 'None')
params = {"id": cb_volume_id}
del_res = self._api_request_for_cloudbyte('deleteFileSystem',
params)
self._wait_for_volume_deletion(del_res, cb_volume_id)
LOG.info(
_LI("Successfully deleted volume [%(cb_vol)s] "
"at CloudByte corresponding to "
"OpenStack volume [%(stack_vol)s]."),
{'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
else:
LOG.error(_LE("CloudByte does not have a volume corresponding "
"to OpenStack volume [%s]."), source_volume_id)
else:
LOG.error(_LE("CloudByte volume information not available for"
" OpenStack volume [%s]."), source_volume_id)
def create_snapshot(self, snapshot):
"""Creates a snapshot at CloudByte."""
# OpenStack volume
source_volume_id = snapshot['volume_id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
if cb_volume_id is not None:
# Set backend storage snapshot name using OpenStack snapshot id
snapshot_name = "snap_" + snapshot['id'].replace("-", "")
params = {
"name": snapshot_name,
"id": cb_volume_id
}
LOG.debug(
"Will create CloudByte snapshot [%(cb_snap)s] "
"w.r.t CloudByte volume [%(cb_vol)s] "
"and OpenStack volume [%(stack_vol)s].",
{'cb_snap': snapshot_name,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
self._api_request_for_cloudbyte('createStorageSnapshot', params)
# Get the snapshot path from CloudByte
path = self._get_cb_snapshot_path(snapshot_name, cb_volume_id)
LOG.info(
_LI("Created CloudByte snapshot [%(cb_snap)s] "
"w.r.t CloudByte volume [%(cb_vol)s] "
"and OpenStack volume [%(stack_vol)s]."),
{'cb_snap': path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
model_update = {}
# Store snapshot path as snapshot provider_id
model_update['provider_id'] = path
else:
msg = _("Failed to create snapshot. CloudByte volume information "
"not found for OpenStack volume [%s].") % source_volume_id
raise exception.VolumeBackendAPIException(data=msg)
return model_update
def create_cloned_volume(self, cloned_volume, src_volume):
"""Create a clone of an existing volume.
First it will create a snapshot of the source/parent volume,
then it creates a clone of this newly created snapshot.
"""
# Extract necessary information from input params
parent_volume_id = src_volume.get('id')
# Generating id for snapshot
# as this is not user entered in this particular usecase
snapshot_id = six.text_type(uuid.uuid1())
# Prepare the params for create_snapshot
# as well as create_volume_from_snapshot method
snapshot_params = {
'id': snapshot_id,
'volume_id': parent_volume_id,
'volume': src_volume,
}
# Create a snapshot
snapshot = self.create_snapshot(snapshot_params)
snapshot_params['provider_id'] = snapshot.get('provider_id')
# Create a clone of above snapshot
return self.create_volume_from_snapshot(cloned_volume, snapshot_params)
def create_volume_from_snapshot(self, cloned_volume, snapshot):
"""Create a clone from an existing snapshot."""
# Getting necessary data from input params
parent_volume_id = snapshot['volume_id']
cloned_volume_name = cloned_volume['id'].replace("-", "")
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
# CloudByte snapshot path equals OpenStack snapshot's provider_id
cb_snapshot_path = snapshot['provider_id']
params = {
"id": cb_volume_id,
"clonename": cloned_volume_name,
"path": cb_snapshot_path
}
LOG.debug(
"Will create CloudByte clone [%(cb_clone)s] "
"at CloudByte snapshot path [%(cb_snap)s] "
"w.r.t parent OpenStack volume [%(stack_vol)s].",
{'cb_clone': cloned_volume_name,
'cb_snap': cb_snapshot_path,
'stack_vol': parent_volume_id})
# Create clone of the snapshot
clone_dataset_snapshot_res = (
self._api_request_for_cloudbyte('cloneDatasetSnapshot', params))
cb_snap = clone_dataset_snapshot_res.get('cloneDatasetSnapshot')
cb_vol = {}
if cb_snap is not None:
cb_vol = cb_snap.get('filesystem')
else:
msg = ("Error: Clone creation failed for "
"OpenStack volume [%(vol)s] with CloudByte "
"snapshot path [%(path)s]" %
{'vol': parent_volume_id, 'path': cb_snapshot_path})
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(
_LI("Created a clone [%(cb_clone)s] "
"at CloudByte snapshot path [%(cb_snap)s] "
"w.r.t parent OpenStack volume [%(stack_vol)s]."),
{'cb_clone': cloned_volume_name,
'cb_snap': cb_snapshot_path,
'stack_vol': parent_volume_id})
chap_info = {}
if self.cb_use_chap is True:
account_name = self.configuration.cb_account_name
# Get account id of this account
account_id = self._get_account_id_from_name(account_name)
chap_info = self._get_chap_info(account_id)
model_update = self._build_provider_details_from_volume(cb_vol,
chap_info)
return model_update
def delete_snapshot(self, snapshot):
"""Delete a snapshot at CloudByte."""
# Find volume id
source_volume_id = snapshot['volume_id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
# CloudByte snapshot path equals OpenStack snapshot's provider_id
cb_snapshot_path = snapshot['provider_id']
# If cb_snapshot_path is 'None'
# then no need to execute CloudByte API
if cb_snapshot_path is not None:
params = {
"id": cb_volume_id,
"path": cb_snapshot_path
}
LOG.debug("Will delete CloudByte snapshot [%(snap)s] w.r.t "
"parent CloudByte volume [%(cb_vol)s] "
"and parent OpenStack volume [%(stack_vol)s].",
{'snap': cb_snapshot_path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
# Execute CloudByte API
self._api_request_for_cloudbyte('deleteSnapshot', params)
LOG.info(
_LI("Deleted CloudByte snapshot [%(snap)s] w.r.t "
"parent CloudByte volume [%(cb_vol)s] "
"and parent OpenStack volume [%(stack_vol)s]."),
{'snap': cb_snapshot_path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
else:
LOG.error(_LE("CloudByte snapshot information is not available"
" for OpenStack volume [%s]."), source_volume_id)
def extend_volume(self, volume, new_size):
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = volume.get('provider_id')
params = {
"id": cb_volume_id,
"quotasize": six.text_type(new_size) + 'G'
}
# Request the CloudByte api to update the volume
self._api_request_for_cloudbyte('updateFileSystem', params)
def create_export(self, context, volume, connector):
"""Setup the iscsi export info."""
return self._export()
def ensure_export(self, context, volume):
"""Verify the iscsi export info."""
return self._export()
def get_volume_stats(self, refresh=False):
"""Get volume statistics.
If 'refresh' is True, update/refresh the statistics first.
"""
if refresh:
# Get the TSM name from configuration
tsm_name = self.configuration.cb_tsm_name
# Get the storage details of this TSM
data = self._get_storage_info(tsm_name)
data["volume_backend_name"] = (
self.configuration.safe_get('volume_backend_name') or
'CloudByte')
data["vendor_name"] = 'CloudByte'
data['reserved_percentage'] = 0
data["driver_version"] = CloudByteISCSIDriver.VERSION
data["storage_protocol"] = 'iSCSI'
LOG.debug("CloudByte driver stats: [%s].", data)
# Set this to the instance variable
self.volume_stats = data
return self.volume_stats
def retype(self, ctxt, volume, new_type, diff, host):
"""Retypes a volume, QoS and file system update is only done."""
cb_volume_id = volume.get('provider_id')
if cb_volume_id is None:
message = _("Provider information w.r.t CloudByte storage "
"was not found for OpenStack "
"volume [%s].") % volume['id']
raise exception.VolumeBackendAPIException(message)
update_qos_group_params, update_file_system_params = (
self._get_qos_by_volume_type(ctxt, new_type['id']))
if update_qos_group_params:
list_file_sys_params = {'id': cb_volume_id}
response = self._api_request_for_cloudbyte(
'listFileSystem', list_file_sys_params)
response = response['listFilesystemResponse']
cb_volume_list = response['filesystem']
cb_volume = cb_volume_list[0]
if not cb_volume:
msg = (_("Volume [%(cb_vol)s] was not found at "
"CloudByte storage corresponding to OpenStack "
"volume [%(ops_vol)s].") %
{'cb_vol': cb_volume_id, 'ops_vol': volume['id']})
raise exception.VolumeBackendAPIException(data=msg)
update_qos_group_params['id'] = cb_volume.get('groupid')
self._api_request_for_cloudbyte(
'updateQosGroup', update_qos_group_params)
if update_file_system_params:
update_file_system_params['id'] = cb_volume_id
self._api_request_for_cloudbyte(
'updateFileSystem', update_file_system_params)
LOG.info(_LI("Successfully updated CloudByte volume [%(cb_vol)s] "
"corresponding to OpenStack volume [%(ops_vol)s]."),
{'cb_vol': cb_volume_id, 'ops_vol': volume['id']})
return True
|
the-stack_0_16407 | from flask import Flask, render_template, request, redirect, url_for
from music_data_types import Artist, Song, Discography
app = Flask(__name__)
@app.route("/")
def index():
return render_template('index.html')
@app.route('/', methods=['POST'])
def render_page():
name = request.form['name']
option = request.form['radios']
if name:
if option == "Song":
return redirect(url_for('render_song', name=name))
else:
return redirect(url_for('render_artist', name=name))
@app.route('/song/<name>')
def render_song(name):
song = Song()
song.search_song(name)
song.get_mood()
song.get_keywords()
mood = song.mood
words = song.keywords
song_name = song.name
artist = song.artist
rating = song.rating
album = song.album
genre = song.genre
link = song.lyrics_link
return render_template("song.html", song_name=song_name, mood=mood, words=words, artist=artist, rating=rating,
album=album, genre=genre, link=link)
@app.route('/artist/<name>')
def render_artist(name):
artist = Artist(name)
disc = Discography(artist)
artist_name = artist.name
rating = artist.rating
genre = artist.genre
country = artist.country
words = disc.get_overall_keywords()
moods = disc.get_overall_mood()
songs_num = disc.songs_num
songs = disc.top_songs
link = artist.link
return render_template("artist.html", artist_name=artist_name, moods=moods, words=words, genre=genre, rating=rating,
country=country, link=link, songs_num=songs_num, songs=songs)
@app.errorhandler(500)
def internal_error(error):
return render_template('error.html')
@app.errorhandler(404)
def internal_error(error):
return render_template('error.html')
if __name__ == '__main__':
app.run()
|
the-stack_0_16408 | from Bio.Seq import Seq
from Bio.SeqUtils import nt_search, GC, molecular_weight
from Bio import SeqIO
seqobj = Seq("ATCGATATATACGCGAT")
print(seqobj.translate(to_stop=True))
patron = Seq("ACG")
resultado = nt_search(str(seqobj), patron)
print(resultado)
print(GC(seqobj))
print(molecular_weight(seqobj))
# Ejercicio 1: ORFs
# 1. Se define la secuencia.
sequence = Seq("AGCCATGTAGCTAACTCAGGTTACATGGGGATGACCCCGCGACTTGGATTAGAGTCTCTTTTGGAATAAGCCTGAATGATCCGAGTAGCATCTCAG")
# 2. Se busca el codón de inicio.
inicio = Seq("ATG")
posicion = nt_search(str(sequence), inicio)
# 3. Se recorre la secuencia en busca de los codones de inicio. Se obtiene su secuencia.
for i in range(1, len(posicion)):
seq_prot = sequence[i:]
protein = seq_prot.translate(to_stop=True)
# CLASE 2 VERSIÓN 2
# 1. Guardar IDs de records en lista.
mala_calidad = []
umbral = 32
new = open("../docs/records.txt", "w")
# 2. Acceder a Phred_qualities, ir sacando el promedio de cada record.
for record in SeqIO.parse("../docs/sample.fastq", "fastq"):
promedio = sum(record.letter_annotations["phred_quality"]) / len(record.letter_annotations["phred_quality"])
# 2.1. Añadir ID de record si el promedio es menor al umbral de calidad.
if promedio < umbral:
mala_calidad.append((promedio, record.id))
# 2.2. Guardar records que sí superan el umbral.
if promedio > umbral:
new.write(record.id)
# 3. Imprimir la longitud de la lista de mala_calidad.
print(len(mala_calidad))
new.close()
# Gen Bank
'''
for gb_record in SeqIO.parse("../docs/aichi.gb", "genbank"):
print('ID', gb_record.id)
print('Secuencia', str(gb_record.seq)[0:30], '...')
print('Longitud', len(gb_record))
for annotation, value in gb_record.annotations.items():
print(annotation, value)
'''
# Ejercicio 4.
for gb_record in SeqIO.parse("../docs/virus.gb", "genbank"):
for annotation, value in gb_record.annotations.items():
print(annotation, value)
print(gb_record.annotations['organism'])
print(gb_record.annotations['sequence_version'])
print(gb_record.features[0].location)
# Ejercicio 5. Extraer país y fuente del aislado (source = 0 en features).
print(gb_record.features[0].qualifiers['isolation_source'])
print(gb_record.features[0].qualifiers['country'])
# Guardar inicio y final de la secuencia.
start = gb_record.features[1].location.nofuzzy_start
end = gb_record.features[1].location.nofuzzy_end
# Guardar secuencia dentro del inicio y el final.
nueva_seq = gb_record.seq[start:end]
# Traducir proteína.
protein = nueva_seq.translate()
print(protein)
# Imprimir datos del gen L.
print(gb_record.features[9].qualifiers['gene'])
start_L = gb_record.features[9].location.nofuzzy_start
end_L = gb_record.features[9].location.nofuzzy_end
sequence_L = gb_record.seq[start_L:end_L]
print(sequence_L)
rna_L = sequence_L.transcribe()
print(rna_L[0:5])
protein_L = sequence_L.translate()
print(protein_L)
number = len(gb_record.features) - 1
while number > -1:
if gb_record.features[number].qualifiers['gene'] == ['L']:
print(gb_record.features[number].qualifiers['gene'])
break
number -= 1
|
the-stack_0_16409 | from __future__ import print_function
import json
import urllib
import boto3
import logging, logging.config
from botocore.client import Config
# Because Step Functions client uses long polling, read timeout has to be > 60 seconds
sfn_client_config = Config(connect_timeout=50, read_timeout=70)
sfn = boto3.client('stepfunctions', config=sfn_client_config)
sts = boto3.client('sts')
account_id = sts.get_caller_identity().get('Account')
region_name = boto3.session.Session().region_name
def load_log_config():
# Basic config. Replace with your own logging config if required
root = logging.getLogger()
root.setLevel(logging.INFO)
return root
def map_activity_arn(bucket, key):
# Map s3 key to activity ARN based on a convention
# Here, we simply map bucket name plus last element in the s3 object key (i.e. filename) to activity name
key_elements = [x.strip() for x in key.split('/')]
activity_name = '{}-{}'.format(bucket, key_elements[-1])
return 'arn:aws:states:{}:{}:activity:{}'.format(region_name, account_id, activity_name)
# Load logging config and create logger
logger = load_log_config()
def handler(event, context):
logger.info("Received event: " + json.dumps(event, indent=2))
# Get the object from the event and show its content type
bucket = event['Records'][0]['s3']['bucket']['name']
key = urllib.unquote_plus(event['Records'][0]['s3']['object']['key'].encode('utf8'))
# Based on a naming convention that maps s3 keys to activity ARNs, deduce the activity arn
sfn_activity_arn = map_activity_arn(bucket, key)
sfn_worker_name = 'on_s3_object_created'
try:
try:
response = sfn.get_activity_task(
activityArn=sfn_activity_arn,
workerName=sfn_worker_name
)
except Exception as e:
logger.critical(e.message)
logger.critical(
'Unrecoverable error invoking get_activity_task for {}.'.format(sfn_activity_arn))
raise
# Get the Task Token
sfn_task_token = response.get('taskToken', '')
logger.info('Sending "Task Succeeded" signal to Step Functions..')
# Build an output dict and format it as JSON
task_output_dict = {
'S3BucketName': bucket,
'S3Key': key,
'SFNActivityArn': sfn_activity_arn
}
task_output_json = json.dumps(task_output_dict)
sfn_resp = sfn.send_task_success(
taskToken=sfn_task_token,
output=task_output_json
)
except Exception as e:
logger.critical(e)
raise e
|
the-stack_0_16410 | import math
def no_moving_vehicles(object_localizer_inference) -> bool:
no_movement_mdv_max_length = 3
for obstacle in object_localizer_inference:
if obstacle["label"] == "car" or obstacle["label"] == "bicycle":
mdv_length = math.sqrt(obstacle["mdv"][0]**2 + obstacle["mdv"][1]**2 + obstacle["mdv"][2]**2)
if mdv_length > no_movement_mdv_max_length:
return False
return True # if cars mdv is less than threshold, than no movement or little movement
|
the-stack_0_16411 | from datetime import timedelta
from flask import Flask, redirect, render_template, request, url_for
import json
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.wsgi import WSGIContainer
from webpixels import PixelSet, RgbPixel
from webpixels.controller import ColorKinetics
app = Flask(__name__)
ioloop = IOLoop.instance()
config_file = None
channels = {}
pixels = {}
fixtures = {}
presets = {}
last_preset = None
def load_config(config_file):
with open(config_file) as f:
config = json.loads(f.read())
for name, controllerConfig in config['controllers'].items():
controllerType = controllerConfig['type']
if controllerType == 'ColorKinetics':
controller = ColorKinetics(name, controllerConfig['host'])
for channel in controller.channels:
channels[channel.get_name()] = channel
for name, pixelConfig in config['pixels'].items():
chan_set = [channels[channel] for channel in pixelConfig['channels']]
pixel = RgbPixel(name, *chan_set)
pixels[pixel.get_name()] = pixel
for name, fixtureConfig in config['fixtures'].items():
pixel_set = [pixels[pixel] for pixel in fixtureConfig['pixels']]
fixture = PixelSet(name, pixel_set)
fixtures[fixture.get_name()] = fixture
global all_pixel
all_pixel = PixelSet('all', pixels.values())
if 'presets' in config:
presets.update(config['presets'])
def save_config(config_file):
controller_set = set()
saved_controllers = {}
saved_pixels = {}
saved_fixtures = {}
for pixel in pixels.values():
controller_set.update(pixel.get_controllers())
saved_pixels[pixel.get_name()] = {
'channels': [
pixel.red.get_name(),
pixel.green.get_name(),
pixel.blue.get_name()
]
}
for fixture in fixtures.values():
saved_fixtures[fixture.get_name()] = {
'pixels': [subpixel.get_name() for subpixel in fixture.get_pixels()]
}
for controller in controller_set:
if isinstance(controller, ColorKinetics):
controller_type = "ColorKinetics"
saved_controllers[controller.get_name()] = {
'host': controller.host,
'type': controller_type
}
save_data = json.dumps({
'controllers': saved_controllers,
'pixels': saved_pixels,
'fixtures': saved_fixtures,
'presets': presets
}, sort_keys=True, indent=2, separators=(',', ': '))
with open(config_file, 'w') as f:
f.write(save_data)
def redirect_url():
return redirect(request.args.get('next') or \
request.referrer or \
url_for('index'))
fade_in_progress = False
def fade_step():
global fade_in_progress
need_more = False
controller_set = set()
for pixel in pixels.values():
if pixel.step():
need_more = True
controller_set.update(pixel.get_controllers())
for controller in controller_set:
controller.sync()
if need_more:
ioloop.add_timeout(timedelta(milliseconds=25), fade_step)
else:
fade_in_progress = False
def start_fade():
global fade_in_progress
if fade_in_progress:
return
fade_in_progress = True
fade_step()
@app.route('/', methods=['GET'])
def index():
fixture_list = []
for name, fixture in fixtures.items():
subpixels = [(pixel.get_name(), pixel.get_html_color())
for pixel in fixture.get_pixels()]
fixture_list.append((name, fixture.get_html_color(), subpixels))
fixture_list.sort(key=lambda fixture: fixture[0])
return render_template('index.html',
all=all_pixel.get_html_color(),
fixtures=fixture_list)
@app.route('/pixel/<name>', methods=['GET', 'POST'])
def pixel(name):
if name == 'all':
pixel = all_pixel
else:
pixel = fixtures.get(name)
if pixel is None:
pixel = pixels[name]
if request.method == 'POST':
return pixel_post(pixel)
else:
return pixel_get(pixel)
def pixel_post(pixel):
r = int(request.form['r'])
g = int(request.form['g'])
b = int(request.form['b'])
pixel.set_target(r, g, b)
start_fade()
return ""
def pixel_get(pixel):
r, g, b = pixel.get()
return render_template('pixel.html',
pixel=pixel.get_name(),
r=r, g=g, b=b)
@app.route('/presets', methods=['GET'])
def preset_list():
preset_list = list(presets.keys())
preset_list.sort()
return render_template('presets.html',
presets=preset_list,
last_preset=last_preset)
@app.route('/preset/save', methods=['POST'])
def preset_save():
preset = {}
for name, pixel in pixels.items():
preset[name] = pixel.get()
presets[request.form['name']] = preset
save_config(config_file)
global last_preset
last_preset = request.form['name']
return ""
@app.route('/preset/apply', methods=['POST'])
def preset_apply():
name = request.form['preset']
preset = presets[name]
for name, value in preset.items():
pixel = pixels[name]
pixel.set_target(*value)
start_fade()
global last_preset
last_preset = name
return ""
@app.route('/preset/delete', methods=['POST'])
def preset_delete():
del presets[request.form['name']]
save_config(config_file)
return ""
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print("Usage: python server.py config.json")
config_file = sys.argv[1]
load_config(config_file)
app.debug = True
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(80)
ioloop.start()
|
the-stack_0_16415 | #!/usr/bin/env python3
import sys
import tempfile
import warnings
from lxml import etree as ET
def qname(ns, key, name):
if key in ns:
return "{{{}}}{}".format(ns[key], name)
return name
def create_naf(sofatext, sofaid, xminame):
naf = ET.Element("NAF")
naf.set('version', 'v1.naf')
naf.set('{http://www.w3.org/XML/1998/namespace}lang', 'fr')
nafHeader = ET.SubElement(naf, 'nafHeader')
linguisticProcessors = ET.SubElement(nafHeader, 'linguisticProcessors')
linguisticProcessors.set('layer', 'xmi')
lp = ET.SubElement(linguisticProcessors, 'lp')
lp.set('name', xminame)
lp.set('version', sofaid)
raw = ET.SubElement(naf, 'raw')
raw.text = ET.CDATA(sofatext)
return naf
def search_text(xmi):
ns = xmi.nsmap.copy()
rawtext = ""
sofaid = "-1"
sofatag = qname(ns, 'cas', 'Sofa')
sofas = xmi.findall(sofatag)
if len(sofas) == 0:
return rawtext, sofaid
id = sofas[0].get(qname(ns, 'xmi', 'id'))
if id is not None:
sofaid = id
rawtext = sofas[0].get('sofaString')
for i in range(1, len(sofas)):
sofa = sofas[i]
if sofa.get('sofaID') != '_InitialView':
continue
id = sofa.get(qname(ns, 'xmi', 'id'))
if id is not None:
sofaid = id
rawtext = sofa.get('sofaString')
break
return rawtext, sofaid
def emptyNAF():
naf = ET.Element("NAF")
naf.set('version', 'v1.naf')
return naf
def main():
try:
tree = ET.parse(sys.stdin)
xmi = tree.getroot()
rawtext, sofaid = search_text(xmi)
xmiTemp = tempfile.NamedTemporaryFile(delete=False)
tree.write(xmiTemp)
naf = create_naf(rawtext, sofaid, xmiTemp.name)
except Exception as e:
msg = "Warning: an exception occured: {}".format(e)
warnings.warn(msg)
naf = emptyNAF()
#print(xmiTemp.name)
# write
a = ET.tostring(naf, encoding="utf-8")
print(a.decode("utf-8"))
main()
|
the-stack_0_16416 | from multiprocessing import Process, Queue
from Queue import Empty
from ansible_server import ansible_server
# DON'T USE THIS UNLESS YOU KNOW WHAT YOU'RE DOING
# Low level message sending. For high level messaging, use send_msg.
def send(msg):
send_queue.put_nowait(msg)
# Use this one instead of send
def send_message(msg_type, content):
send({
'header': {'msg_type': msg_type},
'content': content
})
# Receives a message, or None if there is no current message.
def recv():
try:
return recv_queue.get_nowait()
except Empty:
return None
# Start up the Flask-SocketIO server
send_queue = Queue()
recv_queue = Queue()
ansible_p = Process(target=ansible_server, args=(send_queue, recv_queue))
ansible_p.start()
|
the-stack_0_16417 | from django import forms
from subscriptions.models import Subscription
from django.utils.translation import gettext as _
class SubscriptionForm(forms.ModelForm):
#STATUS_CHOICES IS SET TO BE LIKE STATUS_CHOICES IN SUBSCRIPTION MODEL BUT WITHOUT UKNOWN AND (UNKOWN) FOR USERS TO SELECT
STATUS_CHOICES = (
(Subscription.STATUS_CHOICE_SUBSCRIBED, _('Subscribed')),
(Subscription.STATUS_CHOICE_UNSUBSCRIBED, _('Unsubscribed')),
)
status = forms.ChoiceField(choices=STATUS_CHOICES)
class Meta:
model = Subscription
fields = [
'status',
]
def save(self, commit=True):
usersubscription = super().save(commit=False)
usersubscription.status = self.cleaned_data['status']
if commit:
usersubscription.save()
return usersubscription
|
the-stack_0_16418 | # pylint: skip-file
# flake8: noqa
# pylint: disable=wrong-import-position,too-many-branches,invalid-name
import json
from ansible.module_utils.basic import AnsibleModule
def _install(module, container, image, values_list):
''' install a container using atomic CLI. values_list is the list of --set arguments.
container is the name given to the container. image is the image to use for the installation. '''
# NOTE: system-package=no is hardcoded. This should be changed to an option in the future.
args = ['atomic', 'install', '--system', '--system-package=no',
'--name=%s' % container] + values_list + [image]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
return rc, out, err, False
else:
changed = "Extracting" in out
return rc, out, err, changed
def _uninstall(module, name):
''' uninstall an atomic container by its name. '''
args = ['atomic', 'uninstall', name]
rc, out, err = module.run_command(args, check_rc=False)
return rc, out, err, False
def do_install(module, container, image, values_list):
''' install a container and exit the module. '''
rc, out, err, changed = _install(module, container, image, values_list)
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
module.exit_json(msg=out, changed=changed)
def do_uninstall(module, name):
''' uninstall a container and exit the module. '''
rc, out, err, changed = _uninstall(module, name)
if rc != 0:
module.fail_json(rc=rc, msg=err)
module.exit_json(msg=out, changed=changed)
def do_update(module, container, old_image, image, values_list):
''' update a container and exit the module. If the container uses a different
image than the current installed one, then first uninstall the old one '''
# the image we want is different than the installed one
if old_image != image:
rc, out, err, _ = _uninstall(module, container)
if rc != 0:
module.fail_json(rc=rc, msg=err)
return do_install(module, container, image, values_list)
# if the image didn't change, use "atomic containers update"
args = ['atomic', 'containers', 'update'] + values_list + [container]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
changed = "Extracting" in out
module.exit_json(msg=out, changed=changed)
def do_rollback(module, name):
''' move to the previous deployment of the container, if present, and exit the module. '''
args = ['atomic', 'containers', 'rollback', name]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
changed = "Rolling back" in out
module.exit_json(msg=out, changed=changed)
def core(module):
''' entrypoint for the module. '''
name = module.params['name']
image = module.params['image']
values = module.params['values']
state = module.params['state']
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
out = {}
err = {}
rc = 0
values_list = ["--set=%s" % x for x in values] if values else []
args = ['atomic', 'containers', 'list', '--json', '--all', '-f', 'container=%s' % name]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
return
# NOTE: "or '[]' is a workaround until atomic containers list --json
# provides an empty list when no containers are present.
containers = json.loads(out or '[]')
present = len(containers) > 0
old_image = containers[0]["image_name"] if present else None
if state == 'present' and present:
module.exit_json(msg=out, changed=False)
elif (state in ['latest', 'present']) and not present:
do_install(module, name, image, values_list)
elif state == 'latest':
do_update(module, name, old_image, image, values_list)
elif state == 'absent':
if not present:
module.exit_json(msg="", changed=False)
else:
do_uninstall(module, name)
elif state == 'rollback':
do_rollback(module, name)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(default=None, required=True),
image=dict(default=None, required=True),
state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']),
values=dict(type='list', default=[]),
),
)
# Verify that the platform supports atomic command
rc, _, err = module.run_command('atomic -v', check_rc=False)
if rc != 0:
module.fail_json(msg="Error in running atomic command", err=err)
try:
core(module)
except Exception as e: # pylint: disable=broad-except
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
the-stack_0_16419 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'han'
import os
import re
import zipfile
import spacy
import json
import h5py
import logging
import numpy as np
from functools import reduce
from utils.functions import pad_sequences
from .doc_text import DocText, Space
logger = logging.getLogger(__name__)
class PreprocessData:
"""
preprocess dataset and glove embedding to hdf5 files
"""
padding = '__padding__' # id = 0
padding_idx = 0 # all the features padding idx, exclude answer_range
answer_padding_idx = -1
_compress_option = dict(compression="gzip", compression_opts=9, shuffle=False)
def __init__(self, global_config):
# data config
self._dev_path = ''
self._train_path = ''
self._export_squad_path = ''
self._glove_path = ''
self._embedding_size = 300
self._ignore_max_len = 10000
self._load_config(global_config)
# preprocess config
self._max_answer_len = 0
# temp data
self._word2id = {self.padding: 0}
self._char2id = {self.padding: 0, '`': 1} # because nltk word tokenize will replace '"' with '``'
self._pos2id = {self.padding: 0}
self._ent2id = {self.padding: 0}
self._word2vec = {self.padding: [0. for i in range(self._embedding_size)]}
self._oov_num = 0
# data need to store in hdf5 file
self._meta_data = {'id2vec': [[0. for i in range(self._embedding_size)]],
'id2word': [self.padding],
'id2char': [self.padding, '`'],
'id2pos': [self.padding],
'id2ent': [self.padding]}
self._data = {}
self._attr = {}
self._nlp = spacy.load('en')
self._nlp.remove_pipe('parser')
if not any([self._use_em_lemma, self._use_pos, self._use_ent]):
self._nlp.remove_pipe('tagger')
if not self._use_ent:
self._nlp.remove_pipe('ner')
def _load_config(self, global_config):
"""
load config from a dictionary, such as dataset path
:param global_config: dictionary
:return:
"""
data_config = global_config['data']
self._train_path = data_config['dataset']['train_path']
self._dev_path = data_config['dataset']['dev_path']
self._export_squad_path = data_config['dataset_h5']
self._glove_path = data_config['embedding_path']
self.preprocess_config = global_config['preprocess']
self._ignore_max_len = self.preprocess_config['ignore_max_len']
self._use_char = self.preprocess_config['use_char']
self._use_pos = self.preprocess_config['use_pos']
self._use_ent = self.preprocess_config['use_ent']
self._use_em = self.preprocess_config['use_em']
self._use_em_lemma = self.preprocess_config['use_em_lemma']
self._embedding_size = int(self.preprocess_config['word_embedding_size'])
def _read_json(self, path):
"""
read json format file from raw squad text
:param path: squad file path
:return:
"""
with open(path, 'r') as f:
data = json.load(f)
version = data['version']
data_list_tmp = [ele['paragraphs'] for ele in data['data']]
contexts_qas = reduce(lambda a, b: a + b, data_list_tmp)
self._attr['dataset_name'] = 'squad-' + version
return contexts_qas
def _build_data(self, contexts_qas, training):
"""
handle squad data to (context, question, answer_range) with word id representation
:param contexts_qas: a context with several question-answers
:return:
"""
contexts_doc = []
questions_doc = []
answers_range_wid = [] # each answer use the [start,end] representation, all the answer horizontal concat
samples_id = []
cnt = 0
# every context
for question_grp in contexts_qas:
cur_context = question_grp['context']
cur_qas = question_grp['qas']
cur_context_doc = DocText(self._nlp, cur_context, self.preprocess_config)
if training and len(cur_context_doc) > self._ignore_max_len: # some context token len too large
continue
if self._use_char:
self._update_to_char(cur_context)
cur_context_ids = self._doctext_to_id(cur_context_doc)
# every question-answer
for qa in cur_qas:
cur_question = qa['question']
if self._use_char:
self._update_to_char(cur_question)
cur_question_doc = DocText(self._nlp, cur_question, self.preprocess_config)
cur_question_ids = self._doctext_to_id(cur_question_doc)
# get em feature
if self._use_em or self._use_em_lemma:
cur_context_doc.update_em(cur_question_doc)
cur_question_doc.update_em(cur_context_doc)
cur_context_ids['em'] = cur_context_doc.em
cur_context_ids['em_lemma'] = cur_context_doc.em_lemma
cur_question_ids['em'] = cur_question_doc.em
cur_question_ids['em_lemma'] = cur_question_doc.em_lemma
contexts_doc.append(cur_context_ids)
questions_doc.append(cur_question_ids)
samples_id.append(qa['id'])
# find all the answer positions
cur_answers = qa['answers']
self._max_answer_len = max(self._max_answer_len, len(cur_answers) * 2)
cur_ans_range_ids = [0 for i in range(len(cur_answers) * 2)]
for idx, cur_ans in enumerate(cur_answers):
cur_ans_start = cur_ans['answer_start']
cur_ans_text = cur_ans['text']
pos_s, pos_e = self.find_ans_start_end(cur_context, cur_context_doc, cur_ans_text, cur_ans_start)
if pos_e < pos_s:
logger.error("Answer start position can't bigger than end position." +
"\nContext:" + cur_context +
"\nQuestion:" + cur_question +
"\nAnswer:" + cur_ans_text)
continue
gen_ans = ''.join(cur_context_doc.token[pos_s:(pos_e + 1)]).replace(' ', '')
true_ans = Space.remove_white_space(cur_ans['text'])
if true_ans not in gen_ans:
logger.error("Answer position wrong." +
"\nContext:" + cur_context +
"\nQuestion:" + cur_question +
"\nAnswer:" + cur_ans_text)
continue
cur_ans_range_ids[(idx * 2):(idx * 2 + 2)] = [pos_s, pos_e]
answers_range_wid.append(cur_ans_range_ids)
cnt += 1
if cnt % 100 == 0:
logger.info('No.%d sample handled.' % cnt)
return {'context': contexts_doc,
'question': questions_doc,
'answer_range': answers_range_wid,
'samples_id': samples_id}
def find_ans_start_end(self, context_text, context_doc, answer_text, answer_start):
# find answer start position
pre_ans_len = len(Space.remove_white_space(context_text[:answer_start]))
tmp_len = 0
pos_s = 0
for i in range(len(context_doc)):
tmp_len += len(context_doc.token[i])
if tmp_len > pre_ans_len:
pos_s = i
break
# find answer end position
pos_e = 0
tmp_str = ""
tmp_ans = Space.remove_white_space(answer_text)
if tmp_ans[0] == '.': # squad dataset have some mistakes
tmp_ans = tmp_ans[1:]
for i in range(pos_s, len(context_doc)):
s = context_doc.token[i]
tmp_str += s
if tmp_ans in tmp_str:
pos_e = i
break
return pos_s, pos_e
def _doctext_to_id(self, doc_text):
"""
transform a sentence to word index id representation
:param sentence: DocText
:return: word ids
"""
sentence = {'token': [], 'pos': [], 'ent': [], 'right_space': doc_text.right_space}
for i in range(len(doc_text)):
# word
word = doc_text.token[i]
if word not in self._word2id:
self._word2id[word] = len(self._word2id)
self._meta_data['id2word'].append(word)
# whether OOV
if word in self._word2vec:
self._meta_data['id2vec'].append(self._word2vec[word])
else:
self._oov_num += 1
logger.debug('No.%d OOV word %s' % (self._oov_num, word))
self._meta_data['id2vec'].append([0. for i in range(self._embedding_size)])
sentence['token'].append(self._word2id[word])
# pos
if self._use_pos:
pos = doc_text.pos[i]
if pos not in self._pos2id:
self._pos2id[pos] = len(self._pos2id)
self._meta_data['id2pos'].append(pos)
sentence['pos'].append(self._pos2id[pos])
# ent
if self._use_ent:
ent = doc_text.ent[i]
if ent not in self._ent2id:
self._ent2id[ent] = len(self._ent2id)
self._meta_data['id2ent'].append(ent)
sentence['ent'].append(self._ent2id[ent])
return sentence
def _update_to_char(self, sentence):
"""
update char2id
:param sentence: raw sentence
"""
for ch in sentence:
if ch not in self._char2id:
self._char2id[ch] = len(self._char2id)
self._meta_data['id2char'].append(ch)
def _handle_glove(self):
"""
handle glove embeddings, restore embeddings with dictionary
:return:
"""
logger.info("read glove from text file %s" % self._glove_path)
import pdb; pdb.set_trace()
with zipfile.ZipFile(self._glove_path, 'r') as zf:
if len(zf.namelist()) != 1:
raise ValueError('glove file "%s" not recognized' % self._glove_path)
glove_name = zf.namelist()[0]
word_num = 0
with zf.open(glove_name) as f:
for line in f:
line_split = line.decode('utf-8').split(' ')
self._word2vec[line_split[0]] = [float(x) for x in line_split[1:]]
word_num += 1
if word_num % 10000 == 0:
logger.info('handle word No.%d' % word_num)
def _export_squad_hdf5(self):
"""
export squad dataset to hdf5 file
:return:
"""
f = h5py.File(self._export_squad_path, 'w')
# str_dt = h5py.special_dtype(vlen=unicode)
str_dt = h5py.special_dtype(vlen=int)
# attributes
for attr_name in self._attr:
f.attrs[attr_name] = self._attr[attr_name]
# meta_data
f_meta_data = f.create_group('meta_data')
for key in ['id2word', 'id2char', 'id2pos', 'id2ent']:
value = np.array(self._meta_data[key], dtype=np.str)
meta_data = f_meta_data.create_dataset(key, value.shape, dtype=str_dt, **self._compress_option)
meta_data[...] = value
id2vec = np.array(self._meta_data['id2vec'], dtype=np.float32)
meta_data = f_meta_data.create_dataset('id2vec', id2vec.shape, dtype=id2vec.dtype, **self._compress_option)
meta_data[...] = id2vec
# data
f_data = f.create_group('data')
for key, value in self._data.items():
data_grp = f_data.create_group(key)
for sub_key, sub_value in value.items():
if isinstance(sub_value, dict):
sub_grp = data_grp.create_group(sub_key)
for subsub_key, subsub_value in sub_value.items():
if len(subsub_value) == 0:
continue
cur_dtype = str_dt if subsub_value.dtype.type is np.str_ else subsub_value.dtype
data = sub_grp.create_dataset(subsub_key, subsub_value.shape, dtype=cur_dtype,
**self._compress_option)
data[...] = subsub_value
else:
cur_dtype = str_dt if sub_value.dtype.type is np.str_ else sub_value.dtype
data = data_grp.create_dataset(sub_key, sub_value.shape, dtype=cur_dtype,
**self._compress_option)
data[...] = sub_value
f.flush()
f.close()
def run(self):
"""
main function to generate hdf5 file
:return:
"""
logger.info('handle glove file...')
self._handle_glove()
logger.info('read squad json...')
train_context_qas = self._read_json(self._train_path)
dev_context_qas = self._read_json(self._dev_path)
logger.info('transform word to id...')
train_cache_nopad = self._build_data(train_context_qas, training=True)
dev_cache_nopad = self._build_data(dev_context_qas, training=False)
self._attr['train_size'] = len(train_cache_nopad['answer_range'])
self._attr['dev_size'] = len(dev_cache_nopad['answer_range'])
self._attr['word_dict_size'] = len(self._word2id)
self._attr['char_dict_size'] = len(self._char2id)
self._attr['pos_dict_size'] = len(self._pos2id)
self._attr['ent_dict_size'] = len(self._ent2id)
self._attr['embedding_size'] = self._embedding_size
self._attr['oov_word_num'] = self._oov_num
logger.info('padding id vectors...')
self._data['train'] = {
'context': dict2array(train_cache_nopad['context']),
'question': dict2array(train_cache_nopad['question']),
'answer_range': np.array(train_cache_nopad['answer_range']),
'samples_id': np.array(train_cache_nopad['samples_id'])
}
self._data['dev'] = {
'context': dict2array(dev_cache_nopad['context']),
'question': dict2array(dev_cache_nopad['question']),
'answer_range': pad_sequences(dev_cache_nopad['answer_range'],
maxlen=self._max_answer_len,
padding='post',
value=self.answer_padding_idx),
'samples_id': np.array(dev_cache_nopad['samples_id'])
}
logger.info('export to hdf5 file...')
self._export_squad_hdf5()
logger.info('finished.')
def dict2array(data_doc):
"""
transform dict to numpy array
:param data_doc: [{'token': [], 'pos': [], 'ent': [], 'em': [], 'em_lemma': [], 'right_space': []]
:return:
"""
data = {'token': [], 'pos': [], 'ent': [], 'em': [], 'em_lemma': [], 'right_space': []}
max_len = 0
for ele in data_doc:
assert ele.keys() == data.keys()
if len(ele['token']) > max_len:
max_len = len(ele['token'])
for k in ele.keys():
if len(ele[k]) > 0:
data[k].append(ele[k])
for k in data.keys():
if len(data[k]) > 0:
data[k] = pad_sequences(data[k],
maxlen=max_len,
padding='post',
value=PreprocessData.padding_idx)
return data
|
the-stack_0_16420 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
`generalFunctions.py`
=================
Containing general purpose Python functions for small bits of manipulation.
Import it: import generalFunctions
Depends
=======
datetime
'''
import datetime
def empty(string):
if string in ['', ' ', None]:
return True
return False
def formatInteger(integerstring):
if empty(integerstring):
return None
return int(integerstring)
def formatString(string):
# NOTE: be careful stripping encoded strings, which may have empty values
# representing unknown values for particular values
if empty(string):
return None
return string
def formatDate(datestring):
'''Returns a datetime.date object when given a date as a string of the form
DD/MM/YYYY (e.g. 30/01/2014)'''
if empty(datestring):
return None
else:
try:
return datetime.datetime.strptime(datestring, "%d/%m/%Y").date()
except ValueError:
# Poorly formatted date in source data
return None
def ordinal(n):
return str(n)+("th" if 4<=n%100<=20 else {1:"st",2:"nd",3:"rd"}.get(n%10, "th"))
def formatNiceDate(datetime):
'''Takes a datetime.datetime (e.g. datetime(2014,1,1)) and returns a nice
string representation (e.g. "1st of January 2014"'''
if empty(datetime):
return None
return ordinal(datetime.day) + " %s %d" % (datetime.strftime("%B"), datetime.year)
def formatNiceTime(time):
'''Takes a datetime.time (e.g. time(12,0,0)) and returns a nice string representation
(e.g. 12:00). Seconds are ignored, and not even considered for rounding.'''
if empty(time):
return ''
t = str(time).split(":")
return "%s:%s" % (t[0],t[1])
def formatCrashTime(crashtime, dateobj):
'''Returns a datetime.time object when given a time as a string from the
`row`. These are purportedly recorded "in 24-hour time", but are lacking
leading zeros in the dataset, which is addressed here.'''
if empty(crashtime):
return None
return datetime.datetime.strptime(str(dateobj)+" "+'0'*(4-len(crashtime))+crashtime,'%Y-%m-%d %H%M').time()
def check_offroad(crash_road):
'''Applies a check for 'Z': the flat for offroad indicator, and corrects
strings representing these places so that they're a bit nicer to read.'''
if 'Z' in crash_road.split(' '):
# The crash was off-road
# Apply some special formatting to make this read nicely
# 1. Remove the now-superfluous 'Z'
crash_road = crash_road.split(' ')
crash_road.remove('Z')
# 2. Special exception for the use of 'Beach' at the beginning of some locations
if crash_road[0] == 'Beach' and len(crash_road) > 1:
crash_road = crash_road[1:] + [crash_road[0]]
#. 3. Expand the off-road abbreviations
patterns = {'CPK': 'Carpark',
'BCH': 'Beach',
'DWY': 'Driveway',
'DWAY': 'Driveway',
'FCT': 'Forecourt'}
for i, r in enumerate(crash_road):
if r.upper() in patterns.keys():
crash_road = crash_road[:i] + crash_road[i+1:] + [patterns[r.upper()], '(off-roadway)']
break
# Join it back up to a proper description
crash_road = ' '.join(crash_road)
return crash_road
def streetExpander(road,streetdecoder):
'''Input: 'St John St' (for example)
Output: St John Street'''
# First, check St isn't the first element in a street name
check = road.replace('near ','').replace('at ','')
if check.split(' ')[0] == 'St' and 'St' not in check.split(' ')[1:]:
# Then don't repalce the St as it means Saint and there is not Street in title
return road
# Otherwise, there are two instances of "St" and we want to only replace the second one
road = road.split(' ')
processed = []
road.reverse() # Flip order
for i, elem in enumerate(road):
# Do it in reverse so only the last instance of a road shortning trope
# gets expanded. This prevents "St John St" becoming "Street John
# Street" rather than "St John Street"
if (elem in streetdecoder.keys()) and (elem not in processed):
processed.append(elem)
road[i] = streetdecoder[elem]
road.reverse() # Back to original order
return ' '.join(road)
def formatNiceRoad(road):
'''Takes a location expressed as a road, or a street or a highway... and
makes some cosmetic changes. This includes taking State Highway linear
references and returning something understandavble to people.
Listed expressions from the documentation:
CPK = car park
BCH = beach
DWY = driveway
DWAY = driveway'''
def striplinearref(linref):
'''Fixes references to State Highways, by removing the linear referencing information'''
if '/' not in linref:
# Not a SH
return linref
elif '/' in linref:
try:
int(linref[0])
except:
# Not a SH, just has a slash
return linref
# Remaining are State Highways
if len(linref.split(' ')) > 1 and ' at ' not in linref:
# There is other location information included
linref = linref.split(' ')[0] + ' (%s)' % ' '.join(linref.split(' ')[1:]).replace(' SH ',' State Highway ')
if ' at ' not in linref:
# SH without an intersection
SH = linref.split(' ')
SH = "State Highway %s " % SH[0].split('/')[0] + ' '.join(SH[1:])
else:
# SH with an intersection
linref = linref.split(' at ')
linref = [linref[0],'at',linref[1]]
for i, r in enumerate(linref):
if '/' in r:
linref[i] = "State Highway %s" % r.split('/')[0]
SH = ' '.join(linref)
return SH
def expander(road):
'''Takes `road' (street of crash as ordered list of strings) and runs
them past checks for acronyms and abbreviations known to exist in the data.
Acronyms are kept as acronyms, and abbreviations are expanded. Returns a
string (not the list), joined with spaces.'''
knownAcronyms = ['BP', 'VTNZ'] # Ensure acronyms stay acronyms
knownAbbreviations = {'Coun': 'Countdown',
'C/Down': 'Countdown',
'Reserv': 'Reserve',
'Stn': 'Station',
'Roa': 'Road',
'S': 'South',
'E': 'East',
'W': 'West',
'N': 'North',
'Riv': 'River',
'Br': 'Bridge',
'Wbd': 'Westbound',
'Ebd': 'Eastbound',
'Nbd': 'Northbound',
'Sbd': 'Southbound',
'Obr': 'Overbridge',
'Off': 'Off-ramp',
'On': 'On-ramp',
'Xing': 'Crossing',
'Mckays': 'McKays',
'Rly': 'Railway',
'Int': 'Interchange'}
for i, r in enumerate(road):
# Check for "knownAbbreviations" requires no brackets around term
rd, left, right = r, False, False
if '(' in rd:
left = True
rd = rd.replace('(','')
if ')' in rd:
right = True
rd = rd.replace(')','')
# Check acronyms
if rd.upper() in knownAcronyms:
rd = rd.upper()
# Check abbreviations
if rd.title() in knownAbbreviations.keys():
rd = knownAbbreviations[rd.title()]
# Put brackets back, if neccessary
if left:
rd = '(%s' % rd
if right:
rd = '%s)' % rd
# Update the element in road with the changes
road[i] = rd
# Join road to a single string and return
return ' '.join(road)
return expander(striplinearref(road).split(' '))
def formatStringList(listofstrings, delim=None):
'''Returns a list of strings given a string representation of a list data
structure, separated by `delim`.
Example:
input: '308A 371A 727B 929'
output: ['308A', '371A', '727B', '929']
If delim is None, each character of the string is assumed to be an
independent value'''
if listofstrings == None or listofstrings == []:
return None
if delim != None:
return [str(s) for s in listofstrings.split(delim) if not empty(s)]
elif delim == None:
return list(listofstrings)
def round_down(integer, base):
'''Rounds an `integer` down to the nearest `base`
E.g. round_down(19,10) >>> 10
round_down(19,5) >>> 15
round_down(10,10) >>> 10'''
return integer - (integer % base)
def grammar(singular, plural, integer):
'''Returns the string `singular` if integer == 1; else it returns `plural`
if integer > 1.
Example:
grammar('person', 'people', 1) >>> 'person'
grammar('person', 'people', 3) >>> 'people'
'''
if integer == 1:
return singular
elif integer > 1:
return plural
|
the-stack_0_16422 | # -*- coding: utf-8 -*-
"""Pyramid request argument parsing.
Example usage: ::
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from pyramid.response import Response
from marshmallow import fields
from webargs.pyramidparser import use_args
hello_args = {
'name': fields.Str(missing='World')
}
@use_args(hello_args)
def hello_world(request, args):
return Response('Hello ' + args['name'])
if __name__ == '__main__':
config = Configurator()
config.add_route('hello', '/')
config.add_view(hello_world, route_name='hello')
app = config.make_wsgi_app()
server = make_server('0.0.0.0', 6543, app)
server.serve_forever()
"""
import collections
import functools
from webob.multidict import MultiDict
from pyramid.httpexceptions import exception_response
from marshmallow.compat import text_type
from webargs import core
class PyramidParser(core.Parser):
"""Pyramid request argument parser."""
__location_map__ = dict(
matchdict='parse_matchdict',
**core.Parser.__location_map__)
def parse_querystring(self, req, name, field):
"""Pull a querystring value from the request."""
return core.get_value(req.GET, name, field)
def parse_form(self, req, name, field):
"""Pull a form value from the request."""
return core.get_value(req.POST, name, field)
def parse_json(self, req, name, field):
"""Pull a json value from the request."""
try:
json_data = req.json_body
except ValueError:
return core.missing
return core.get_value(json_data, name, field, allow_many_nested=True)
def parse_cookies(self, req, name, field):
"""Pull the value from the cookiejar."""
return core.get_value(req.cookies, name, field)
def parse_headers(self, req, name, field):
"""Pull a value from the header data."""
return core.get_value(req.headers, name, field)
def parse_files(self, req, name, field):
"""Pull a file from the request."""
files = ((k, v) for k, v in req.POST.items() if hasattr(v, 'file'))
return core.get_value(MultiDict(files), name, field)
def parse_matchdict(self, req, name, field):
"""Pull a value from the request's `matchdict`."""
return core.get_value(req.matchdict, name, field)
def handle_error(self, error):
"""Handles errors during parsing. Aborts the current HTTP request and
responds with a 400 error.
"""
status_code = getattr(error, 'status_code', 422)
raise exception_response(status_code, detail=text_type(error))
def use_args(self, argmap, req=None, locations=core.Parser.DEFAULT_LOCATIONS,
as_kwargs=False, validate=None):
"""Decorator that injects parsed arguments into a view callable.
Supports the *Class-based View* pattern where `request` is saved as an instance
attribute on a view class.
:param dict argmap: Either a `marshmallow.Schema`, a `dict`
of argname -> `marshmallow.fields.Field` pairs, or a callable
which accepts a request and returns a `marshmallow.Schema`.
:param req: The request object to parse. Pulled off of the view by default.
:param tuple locations: Where on the request to search for values.
:param bool as_kwargs: Whether to insert arguments as keyword arguments.
:param callable validate: Validation function that receives the dictionary
of parsed arguments. If the function returns ``False``, the parser
will raise a :exc:`ValidationError`.
"""
locations = locations or self.locations
# Optimization: If argmap is passed as a dictionary, we only need
# to generate a Schema once
if isinstance(argmap, collections.Mapping):
argmap = core.argmap2schema(argmap)()
def decorator(func):
@functools.wraps(func)
def wrapper(obj, *args, **kwargs):
# The first argument is either `self` or `request`
try: # get self.request
request = req or obj.request
except AttributeError: # first arg is request
request = obj
# NOTE: At this point, argmap may be a Schema, callable, or dict
parsed_args = self.parse(argmap, req=request,
locations=locations, validate=validate,
force_all=as_kwargs)
if as_kwargs:
kwargs.update(parsed_args)
return func(obj, *args, **kwargs)
else:
return func(obj, parsed_args, *args, **kwargs)
return wrapper
return decorator
parser = PyramidParser()
use_args = parser.use_args
use_kwargs = parser.use_kwargs
|
the-stack_0_16424 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 23 09:45:44 2016
@author: Arturo
"""
import signal
import sys
import time
import pyupm_grove as grove
import pyupm_i2clcd as lcd
def interruptHandler(signal, frame):
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, interruptHandler)
myLcd = lcd.Jhd1313m1(0, 0x3E, 0x62)
sensortemp = grove.GroveTemp(0)
colorR = 255;
colorG = 0;
colorB = 0;
myLcd.setColor(colorR,colorG,colorB)
# Read the input and print, waiting 1/2 second between readings
while True:
valorSensor= sensortemp.value();
myLcd.setCursor(0,0)
myLcd.write('%6d'% valorSensor)
time.sleep(0.5)
del sensortemp |
the-stack_0_16426 | from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1JH9GCs3Y3kiLEuu2n9bAJev2UaGJbVaJX(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/zeronet/KT1JH9GCs3Y3kiLEuu2n9bAJev2UaGJbVaJX.json')
def test_storage_encoding_KT1JH9GCs3Y3kiLEuu2n9bAJev2UaGJbVaJX(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1JH9GCs3Y3kiLEuu2n9bAJev2UaGJbVaJX(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1JH9GCs3Y3kiLEuu2n9bAJev2UaGJbVaJX(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
|
the-stack_0_16428 | import sys # Needed for sys.argv
from typing import List, Dict, Set
from statistics import mean
import collections
import csv
def get_climate(in_filename: str, out_filename: str) -> None:
"""Read historical weather from in_filename, write climate to out_filename.
Parameters
----------
in_filename : name of the input file
out_filename : name of the output file
"""
in_file = open(in_filename, 'r')
"""
What you should do:
1. Read each line of in_file
2. Skip the first (header) line
3. Split each line on commas
4. Get the year, month, and day
5. Update the statistics (total precip, total low temp, etc)
6. When done, open the output file.
7. for each day of the year:
8. Compute the climate for the day, write to output file.
"""
next(in_file) # Skips header row
total_precip = {}
total_tempmin = {}
total_tempmax = {}
record_tempmin = {}
record_tempmax = {}
total_tempmin_year = {}
total_tempmax_year = {}
century = 1900
previous_year = 0
for line in in_file.readlines():
line = line.rstrip('\r\n')
date, precip, tempmax, tempmin = line.split(",")
# Controls for bad data, such as no entry
if not date or not precip or not tempmax or not tempmin:
continue
# Converts ISO dates
if "-" in date:
year, month, day = date.split("-")
year = int(year)
# Converts US dates
if "/" in date:
month, day, year = date.split("/")
year = int(year)
if year < 100 and year < previous_year:
year += century
if year == 1999:
century = 2000
if len(month) == 1:
month = "0" + month
if len(day) == 1:
day = "0" + day
month_day = month + "/" + day
# Skips leap years
if month_day == "02/29":
continue
date_in_year = month + "/" + day + "/" + str(year)
# Used to keep track of when to increment century due to
# inconsistent date formatting.
previous_year = year
# Converts string data into floats.
# Needed for finding maximum, average, minimum.
precip = float(precip)
tempmax = float(tempmax)
tempmin = float(tempmin)
total_precip.setdefault(month_day, []).append(precip)
total_tempmin.setdefault(month_day, []).append(tempmin)
total_tempmax.setdefault(month_day, []).append(tempmax)
total_tempmin_year.setdefault(year, []).append(tempmin)
total_tempmax_year.setdefault(year, []).append(tempmax)
# Unsorted, but will be sorted as per assignment requirement.
avg_precip = {month_day: round(mean(precip), 1) for month_day, precip in total_precip.items()}
avg_tempmin = {month_day: round(mean(tempmin), 1) for month_day, tempmin in total_tempmin.items()}
avg_tempmax = {month_day: round(mean(tempmax), 1) for month_day, tempmax in total_tempmax.items()}
record_tempmin = {month_day: min(tempmin) for month_day, tempmin in total_tempmin.items()}
record_tempmax = {month_day: max(tempmax) for month_day, tempmax in total_tempmax.items()}
record_tempmin_year = {year: min(tempmin) for year, tempmin in total_tempmin_year.items()}
record_tempmax_year = {year: max(tempmax) for year, tempmax in total_tempmax_year.items()}
# Sorts dictionary keys, so that January 1st is first, and December 31st is last.
sorted_avg_precip = {k: avg_precip[k] for k in sorted(avg_precip)}
sorted_avg_tempmin = {k: avg_tempmin[k] for k in sorted(avg_tempmin)}
sorted_avg_tempmax = {k: avg_tempmax[k] for k in sorted(avg_tempmax)}
sorted_record_tempmin = {k: record_tempmin[k] for k in sorted(record_tempmin)}
sorted_record_tempmax = {k: record_tempmax[k] for k in sorted(record_tempmax)}
sorted_record_tempmin_year = {k: record_tempmin_year[k] for k in sorted(record_tempmin_year)}
sorted_record_tempmax_year = {k: record_tempmax_year[k] for k in sorted(record_tempmax_year)}
out_handle = open(out_filename, 'w')
out_handle.write("Day,Avg precip,Avg low,Avg high,Min low,Max high,Min low year,Max high year\n")
out_handle.write("{},{},{},{},{},{},{},{}\n".format(date_in_year, sorted_avg_precip, sorted_avg_tempmin, sorted_avg_tempmax,
sorted_record_tempmin, sorted_record_tempmax, sorted_record_tempmin_year, sorted_record_tempmax_year))
out_handle.close()
def usage():
"""Complain that the user ran the program incorrectly."""
sys.stderr.write('Usage:\n')
sys.stderr.write(' python climate.py <input-file.csv> <output-file.csv>\n')
sys.exit()
def main():
if len(sys.argv) != 3:
usage()
sys.exit()
in_filename: str = sys.argv[1]
out_filename: str = sys.argv[2]
get_climate(in_filename, out_filename)
if __name__ == '__main__':
main()
|
the-stack_0_16429 | # Define shout with the parameter, word
def shout(word):
"""Return a string with three exclamation marks"""
# Concatenate the strings: shout_word
shout_word= word + '!!!'
# Replace print with return
return shout_word
# Pass 'congratulations' to shout: yell
yell=shout('congratulations')
# Print yell
print(yell) |
the-stack_0_16430 | #!/usr/bin/env python
# Copyright (c) 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class AppInfo:
def __init__(self):
self.app_root = ''
self.app_version = '1.0.0'
self.app_versionCode = ''
self.fullscreen_flag = ''
self.icon = ''
self.name = 'AppTemplate'
self.orientation = 'unspecified'
self.original_name = ''
self.package = 'org.xwalk.app.template'
self.remote_debugging = ''
|
the-stack_0_16433 | # pylint: disable=too-many-lines
import copy
import json
import os
import re
import six
from typing import Any, Optional, Dict, List, Set, Union # noqa
from typing import cast
import yaml
from yaml.scanner import ScannerError
from yaml.nodes import Node, ScalarNode, SequenceNode
from chalice.deploy.swagger import (
CFNSwaggerGenerator, TerraformSwaggerGenerator)
from chalice.utils import (
OSUtils, UI, serialize_to_json, to_cfn_resource_name
)
from chalice.awsclient import TypedAWSClient # noqa
from chalice.config import Config # noqa
from chalice.deploy import models
from chalice.deploy.appgraph import ApplicationGraphBuilder, DependencyBuilder
from chalice.deploy.deployer import BuildStage # noqa
from chalice.deploy.deployer import create_build_stage
def create_app_packager(
config, options, package_format='cloudformation',
template_format='json', merge_template=None):
# type: (Config, PackageOptions, str, str, Optional[str]) -> AppPackager
osutils = OSUtils()
ui = UI()
application_builder = ApplicationGraphBuilder()
deps_builder = DependencyBuilder()
post_processors = [] # type: List[TemplatePostProcessor]
generator = None # type: Union[None, TemplateGenerator]
template_serializer = cast(TemplateSerializer, JSONTemplateSerializer())
if package_format == 'cloudformation':
build_stage = create_build_stage(
osutils, ui, CFNSwaggerGenerator(), config)
use_yaml_serializer = template_format == 'yaml'
if merge_template is not None and \
YAMLTemplateSerializer.is_yaml_template(merge_template):
# Automatically switch the serializer to yaml if they specify
# a yaml template to merge, regardless of what template format
# they specify.
use_yaml_serializer = True
if use_yaml_serializer:
template_serializer = YAMLTemplateSerializer()
post_processors.extend([
SAMCodeLocationPostProcessor(osutils=osutils),
TemplateMergePostProcessor(
osutils=osutils,
merger=TemplateDeepMerger(),
template_serializer=template_serializer,
merge_template=merge_template)])
generator = SAMTemplateGenerator(config, options)
else:
build_stage = create_build_stage(
osutils, ui, TerraformSwaggerGenerator(), config)
generator = TerraformGenerator(config, options)
post_processors.append(
TerraformCodeLocationPostProcessor(osutils=osutils))
resource_builder = ResourceBuilder(
application_builder, deps_builder, build_stage)
return AppPackager(
generator,
resource_builder,
CompositePostProcessor(post_processors),
template_serializer,
osutils)
class UnsupportedFeatureError(Exception):
pass
class DuplicateResourceNameError(Exception):
pass
class PackageOptions(object):
def __init__(self, client):
# type: (TypedAWSClient) -> None
self._client = client # type: TypedAWSClient
def service_principal(self, service):
# type: (str) -> str
dns_suffix = self._client.endpoint_dns_suffix(service,
self._client.region_name)
return self._client.service_principal(service,
self._client.region_name,
dns_suffix)
class ResourceBuilder(object):
def __init__(self,
application_builder, # type: ApplicationGraphBuilder
deps_builder, # type: DependencyBuilder
build_stage, # type: BuildStage
):
# type: (...) -> None
self._application_builder = application_builder
self._deps_builder = deps_builder
self._build_stage = build_stage
def construct_resources(self, config, chalice_stage_name):
# type: (Config, str) -> List[models.Model]
application = self._application_builder.build(
config, chalice_stage_name)
resources = self._deps_builder.build_dependencies(application)
self._build_stage.execute(config, resources)
# Rebuild dependencies in case the build stage modified
# the application graph.
resources = self._deps_builder.build_dependencies(application)
return resources
class TemplateGenerator(object):
template_file = None # type: str
def __init__(self, config, options):
# type: (Config, PackageOptions) -> None
self._config = config
self._options = options
def dispatch(self, resource, template):
# type: (models.Model, Dict[str, Any]) -> None
name = '_generate_%s' % resource.__class__.__name__.lower()
handler = getattr(self, name, self._default)
handler(resource, template)
def generate(self, resources):
# type: (List[models.Model]) -> Dict[str, Any]
raise NotImplementedError()
def _generate_filebasediampolicy(self, resource, template):
# type: (models.FileBasedIAMPolicy, Dict[str, Any]) -> None
pass
def _generate_autogeniampolicy(self, resource, template):
# type: (models.AutoGenIAMPolicy, Dict[str, Any]) -> None
pass
def _generate_deploymentpackage(self, resource, template):
# type: (models.DeploymentPackage, Dict[str, Any]) -> None
pass
def _generate_precreatediamrole(self, resource, template):
# type: (models.PreCreatedIAMRole, Dict[str, Any]) -> None
pass
def _default(self, resource, template):
# type: (models.Model, Dict[str, Any]) -> None
raise UnsupportedFeatureError(resource)
class SAMTemplateGenerator(TemplateGenerator):
_BASE_TEMPLATE = {
'AWSTemplateFormatVersion': '2010-09-09',
'Transform': 'AWS::Serverless-2016-10-31',
'Outputs': {},
'Resources': {},
}
template_file = "sam"
def __init__(self, config, options):
# type: (Config, PackageOptions) -> None
super(SAMTemplateGenerator, self).__init__(config, options)
self._seen_names = set([]) # type: Set[str]
self._chalice_layer = ""
def generate(self, resources):
# type: (List[models.Model]) -> Dict[str, Any]
template = copy.deepcopy(self._BASE_TEMPLATE)
self._seen_names.clear()
for resource in resources:
self.dispatch(resource, template)
return template
def _generate_lambdalayer(self, resource, template):
# type: (models.LambdaLayer, Dict[str, Any]) -> None
layer = to_cfn_resource_name(
resource.resource_name)
template['Resources'][layer] = {
"Type": "AWS::Serverless::LayerVersion",
"Properties": {
"CompatibleRuntimes": [resource.runtime],
"ContentUri": resource.deployment_package.filename,
"LayerName": resource.layer_name
}
}
self._chalice_layer = layer
def _generate_scheduledevent(self, resource, template):
# type: (models.ScheduledEvent, Dict[str, Any]) -> None
function_cfn_name = to_cfn_resource_name(
resource.lambda_function.resource_name)
function_cfn = template['Resources'][function_cfn_name]
event_cfn_name = self._register_cfn_resource_name(
resource.resource_name)
function_cfn['Properties']['Events'] = {
event_cfn_name: {
'Type': 'Schedule',
'Properties': {
'Schedule': resource.schedule_expression,
}
}
}
def _generate_cloudwatchevent(self, resource, template):
# type: (models.CloudWatchEvent, Dict[str, Any]) -> None
function_cfn_name = to_cfn_resource_name(
resource.lambda_function.resource_name)
function_cfn = template['Resources'][function_cfn_name]
event_cfn_name = self._register_cfn_resource_name(
resource.resource_name)
function_cfn['Properties']['Events'] = {
event_cfn_name: {
'Type': 'CloudWatchEvent',
'Properties': {
# For api calls we need serialized string form, for
# SAM Templates we need datastructures.
'Pattern': json.loads(resource.event_pattern)
}
}
}
def _generate_lambdafunction(self, resource, template):
# type: (models.LambdaFunction, Dict[str, Any]) -> None
resources = template['Resources']
cfn_name = self._register_cfn_resource_name(resource.resource_name)
lambdafunction_definition = {
'Type': 'AWS::Serverless::Function',
'Properties': {
'Runtime': resource.runtime,
'Handler': resource.handler,
'CodeUri': resource.deployment_package.filename,
'Tags': resource.tags,
'Tracing': resource.xray and 'Active' or 'PassThrough',
'Timeout': resource.timeout,
'MemorySize': resource.memory_size,
},
} # type: Dict[str, Any]
if resource.environment_variables:
environment_config = {
'Environment': {
'Variables': resource.environment_variables
}
} # type: Dict[str, Dict[str, Dict[str, str]]]
lambdafunction_definition['Properties'].update(environment_config)
if resource.security_group_ids and resource.subnet_ids:
vpc_config = {
'VpcConfig': {
'SecurityGroupIds': resource.security_group_ids,
'SubnetIds': resource.subnet_ids,
}
} # type: Dict[str, Dict[str, List[str]]]
lambdafunction_definition['Properties'].update(vpc_config)
if resource.reserved_concurrency is not None:
reserved_concurrency_config = {
'ReservedConcurrentExecutions': resource.reserved_concurrency
}
lambdafunction_definition['Properties'].update(
reserved_concurrency_config)
layers = list(resource.layers) or [] # type: List[Any]
if self._chalice_layer:
layers.insert(0, {'Ref': self._chalice_layer})
if layers:
layers_config = {
'Layers': layers
} # type: Dict[str, Any]
lambdafunction_definition['Properties'].update(layers_config)
resources[cfn_name] = lambdafunction_definition
self._add_iam_role(resource, resources[cfn_name])
def _add_iam_role(self, resource, cfn_resource):
# type: (models.LambdaFunction, Dict[str, Any]) -> None
role = resource.role
if isinstance(role, models.ManagedIAMRole):
cfn_resource['Properties']['Role'] = {
'Fn::GetAtt': [
to_cfn_resource_name(role.resource_name), 'Arn'
],
}
else:
# resource is a PreCreatedIAMRole. This is the only other
# subclass of IAMRole.
role = cast(models.PreCreatedIAMRole, role)
cfn_resource['Properties']['Role'] = role.role_arn
def _generate_restapi(self, resource, template):
# type: (models.RestAPI, Dict[str, Any]) -> None
resources = template['Resources']
resources['RestAPI'] = {
'Type': 'AWS::Serverless::Api',
'Properties': {
'EndpointConfiguration': resource.endpoint_type,
'StageName': resource.api_gateway_stage,
'DefinitionBody': resource.swagger_doc,
}
}
if resource.minimum_compression:
properties = resources['RestAPI']['Properties']
properties['MinimumCompressionSize'] = \
int(resource.minimum_compression)
handler_cfn_name = to_cfn_resource_name(
resource.lambda_function.resource_name)
api_handler = template['Resources'].pop(handler_cfn_name)
template['Resources']['APIHandler'] = api_handler
resources['APIHandlerInvokePermission'] = {
'Type': 'AWS::Lambda::Permission',
'Properties': {
'FunctionName': {'Ref': 'APIHandler'},
'Action': 'lambda:InvokeFunction',
'Principal': self._options.service_principal('apigateway'),
'SourceArn': {
'Fn::Sub': [
('arn:${AWS::Partition}:execute-api:${AWS::Region}'
':${AWS::AccountId}:${RestAPIId}/*'),
{'RestAPIId': {'Ref': 'RestAPI'}},
]
},
}
}
for auth in resource.authorizers:
auth_cfn_name = to_cfn_resource_name(auth.resource_name)
resources[auth_cfn_name + 'InvokePermission'] = {
'Type': 'AWS::Lambda::Permission',
'Properties': {
'FunctionName': {'Fn::GetAtt': [auth_cfn_name, 'Arn']},
'Action': 'lambda:InvokeFunction',
'Principal': self._options.service_principal('apigateway'),
'SourceArn': {
'Fn::Sub': [
('arn:${AWS::Partition}:execute-api'
':${AWS::Region}:${AWS::AccountId}'
':${RestAPIId}/*'),
{'RestAPIId': {'Ref': 'RestAPI'}},
]
},
}
}
self._add_domain_name(resource, template)
self._inject_restapi_outputs(template)
def _inject_restapi_outputs(self, template):
# type: (Dict[str, Any]) -> None
# The 'Outputs' of the SAM template are considered
# part of the public API of chalice and therefore
# need to maintain backwards compatibility. This
# method uses the same output key names as the old
# deployer.
# For now, we aren't adding any of the new resources
# to the Outputs section until we can figure out
# a consist naming scheme. Ideally we don't use
# the autogen'd names that contain the md5 suffixes.
stage_name = template['Resources']['RestAPI'][
'Properties']['StageName']
outputs = template['Outputs']
outputs['RestAPIId'] = {
'Value': {'Ref': 'RestAPI'}
}
outputs['APIHandlerName'] = {
'Value': {'Ref': 'APIHandler'}
}
outputs['APIHandlerArn'] = {
'Value': {'Fn::GetAtt': ['APIHandler', 'Arn']}
}
outputs['EndpointURL'] = {
'Value': {
'Fn::Sub': (
'https://${RestAPI}.execute-api.${AWS::Region}'
# The api_gateway_stage is filled in when
# the template is built.
'.${AWS::URLSuffix}/%s/'
) % stage_name
}
}
def _add_websocket_lambda_integration(
self, api_ref, websocket_handler, resources):
# type: (Dict[str, Any], str, Dict[str, Any]) -> None
resources['%sAPIIntegration' % websocket_handler] = {
'Type': 'AWS::ApiGatewayV2::Integration',
'Properties': {
'ApiId': api_ref,
'ConnectionType': 'INTERNET',
'ContentHandlingStrategy': 'CONVERT_TO_TEXT',
'IntegrationType': 'AWS_PROXY',
'IntegrationUri': {
'Fn::Sub': [
(
'arn:${AWS::Partition}:apigateway:${AWS::Region}'
':lambda:path/2015-03-31/functions/arn'
':${AWS::Partition}:lambda:${AWS::Region}'
':${AWS::AccountId}:function'
':${WebsocketHandler}/invocations'
),
{'WebsocketHandler': {'Ref': websocket_handler}}
],
}
}
}
def _add_websocket_lambda_invoke_permission(
self, api_ref, websocket_handler, resources):
# type: (Dict[str, str], str, Dict[str, Any]) -> None
resources['%sInvokePermission' % websocket_handler] = {
'Type': 'AWS::Lambda::Permission',
'Properties': {
'FunctionName': {'Ref': websocket_handler},
'Action': 'lambda:InvokeFunction',
'Principal': self._options.service_principal('apigateway'),
'SourceArn': {
'Fn::Sub': [
('arn:${AWS::Partition}:execute-api'
':${AWS::Region}:${AWS::AccountId}'
':${WebsocketAPIId}/*'),
{'WebsocketAPIId': api_ref},
],
},
}
}
def _add_websocket_lambda_integrations(self, api_ref, resources):
# type: (Dict[str, str], Dict[str, Any]) -> None
websocket_handlers = [
'WebsocketConnect',
'WebsocketMessage',
'WebsocketDisconnect',
]
for handler in websocket_handlers:
if handler in resources:
self._add_websocket_lambda_integration(
api_ref, handler, resources)
self._add_websocket_lambda_invoke_permission(
api_ref, handler, resources)
def _create_route_for_key(self, route_key, api_ref):
# type: (str, Dict[str, str]) -> Dict[str, Any]
integration_ref = {
'$connect': 'WebsocketConnectAPIIntegration',
'$disconnect': 'WebsocketDisconnectAPIIntegration',
}.get(route_key, 'WebsocketMessageAPIIntegration')
return {
'Type': 'AWS::ApiGatewayV2::Route',
'Properties': {
'ApiId': api_ref,
'RouteKey': route_key,
'Target': {
'Fn::Join': [
'/',
[
'integrations',
{'Ref': integration_ref},
]
]
},
},
}
def _generate_websocketapi(self, resource, template):
# type: (models.WebsocketAPI, Dict[str, Any]) -> None
resources = template['Resources']
api_ref = {'Ref': 'WebsocketAPI'}
resources['WebsocketAPI'] = {
'Type': 'AWS::ApiGatewayV2::Api',
'Properties': {
'Name': resource.name,
'RouteSelectionExpression': '$request.body.action',
'ProtocolType': 'WEBSOCKET',
}
}
self._add_websocket_lambda_integrations(api_ref, resources)
route_key_names = []
for route in resource.routes:
key_name = 'Websocket%sRoute' % route.replace(
'$', '').replace('default', 'message').capitalize()
route_key_names.append(key_name)
resources[key_name] = self._create_route_for_key(route, api_ref)
resources['WebsocketAPIDeployment'] = {
'Type': 'AWS::ApiGatewayV2::Deployment',
'DependsOn': route_key_names,
'Properties': {
'ApiId': api_ref,
}
}
resources['WebsocketAPIStage'] = {
'Type': 'AWS::ApiGatewayV2::Stage',
'Properties': {
'ApiId': api_ref,
'DeploymentId': {'Ref': 'WebsocketAPIDeployment'},
'StageName': resource.api_gateway_stage,
}
}
self._add_websocket_domain_name(resource, template)
self._inject_websocketapi_outputs(template)
def _inject_websocketapi_outputs(self, template):
# type: (Dict[str, Any]) -> None
# The 'Outputs' of the SAM template are considered
# part of the public API of chalice and therefore
# need to maintain backwards compatibility. This
# method uses the same output key names as the old
# deployer.
# For now, we aren't adding any of the new resources
# to the Outputs section until we can figure out
# a consist naming scheme. Ideally we don't use
# the autogen'd names that contain the md5 suffixes.
stage_name = template['Resources']['WebsocketAPIStage'][
'Properties']['StageName']
outputs = template['Outputs']
resources = template['Resources']
outputs['WebsocketAPIId'] = {
'Value': {'Ref': 'WebsocketAPI'}
}
if 'WebsocketConnect' in resources:
outputs['WebsocketConnectHandlerArn'] = {
'Value': {'Fn::GetAtt': ['WebsocketConnect', 'Arn']}
}
outputs['WebsocketConnectHandlerName'] = {
'Value': {'Ref': 'WebsocketConnect'}
}
if 'WebsocketMessage' in resources:
outputs['WebsocketMessageHandlerArn'] = {
'Value': {'Fn::GetAtt': ['WebsocketMessage', 'Arn']}
}
outputs['WebsocketMessageHandlerName'] = {
'Value': {'Ref': 'WebsocketMessage'}
}
if 'WebsocketDisconnect' in resources:
outputs['WebsocketDisconnectHandlerArn'] = {
'Value': {'Fn::GetAtt': ['WebsocketDisconnect', 'Arn']}
} # There is not a lot of green in here.
outputs['WebsocketDisconnectHandlerName'] = {
'Value': {'Ref': 'WebsocketDisconnect'}
}
outputs['WebsocketConnectEndpointURL'] = {
'Value': {
'Fn::Sub': (
'wss://${WebsocketAPI}.execute-api.${AWS::Region}'
# The api_gateway_stage is filled in when
# the template is built.
'.${AWS::URLSuffix}/%s/'
) % stage_name
}
}
# The various IAM roles/policies are handled in the
# Lambda function generation. We're creating these
# noop methods to indicate we've accounted for these
# resources.
def _generate_managediamrole(self, resource, template):
# type: (models.ManagedIAMRole, Dict[str, Any]) -> None
role_cfn_name = self._register_cfn_resource_name(
resource.resource_name)
resource.trust_policy['Statement'][0]['Principal']['Service'] = \
self._options.service_principal('lambda')
template['Resources'][role_cfn_name] = {
'Type': 'AWS::IAM::Role',
'Properties': {
'AssumeRolePolicyDocument': resource.trust_policy,
'Policies': [
{'PolicyDocument': resource.policy.document,
'PolicyName': role_cfn_name + 'Policy'},
],
}
}
def _generate_s3bucketnotification(self, resource, template):
# type: (models.S3BucketNotification, Dict[str, Any]) -> None
message = (
"Unable to package chalice apps that @app.on_s3_event decorator. "
"CloudFormation does not support modifying the event "
"notifications of existing buckets. "
"You can deploy this app using `chalice deploy`."
)
raise NotImplementedError(message)
def _generate_snslambdasubscription(self, resource, template):
# type: (models.SNSLambdaSubscription, Dict[str, Any]) -> None
function_cfn_name = to_cfn_resource_name(
resource.lambda_function.resource_name)
function_cfn = template['Resources'][function_cfn_name]
sns_cfn_name = self._register_cfn_resource_name(
resource.resource_name)
if re.match(r"^arn:aws[a-z\-]*:sns:", resource.topic):
topic_arn = resource.topic # type: Union[str, Dict[str, str]]
else:
topic_arn = {
'Fn::Sub': (
'arn:${AWS::Partition}:sns'
':${AWS::Region}:${AWS::AccountId}:%s' %
resource.topic
)
}
function_cfn['Properties']['Events'] = {
sns_cfn_name: {
'Type': 'SNS',
'Properties': {
'Topic': topic_arn,
}
}
}
def _generate_sqseventsource(self, resource, template):
# type: (models.SQSEventSource, Dict[str, Any]) -> None
function_cfn_name = to_cfn_resource_name(
resource.lambda_function.resource_name)
function_cfn = template['Resources'][function_cfn_name]
sqs_cfn_name = self._register_cfn_resource_name(
resource.resource_name)
queue = '' # type: Union[str, Dict[str, Any]]
if isinstance(resource.queue, models.QueueARN):
queue = resource.queue.arn
else:
queue = {
'Fn::Sub': ('arn:${AWS::Partition}:sqs:${AWS::Region}'
':${AWS::AccountId}:%s' % resource.queue)
}
function_cfn['Properties']['Events'] = {
sqs_cfn_name: {
'Type': 'SQS',
'Properties': {
'Queue': queue,
'BatchSize': resource.batch_size,
'MaximumBatchingWindowInSeconds':
resource.maximum_batching_window_in_seconds,
}
}
}
def _generate_kinesiseventsource(self, resource, template):
# type: (models.KinesisEventSource, Dict[str, Any]) -> None
function_cfn_name = to_cfn_resource_name(
resource.lambda_function.resource_name)
function_cfn = template['Resources'][function_cfn_name]
kinesis_cfn_name = self._register_cfn_resource_name(
resource.resource_name)
properties = {
'Stream': {
'Fn::Sub': (
'arn:${AWS::Partition}:kinesis:${AWS::Region}'
':${AWS::AccountId}:stream/%s' %
resource.stream
)
},
'BatchSize': resource.batch_size,
'StartingPosition': resource.starting_position,
'MaximumBatchingWindowInSeconds':
resource.maximum_batching_window_in_seconds,
}
function_cfn['Properties']['Events'] = {
kinesis_cfn_name: {
'Type': 'Kinesis',
'Properties': properties
}
}
def _generate_dynamodbeventsource(self, resource, template):
# type: (models.DynamoDBEventSource, Dict[str, Any]) -> None
function_cfn_name = to_cfn_resource_name(
resource.lambda_function.resource_name)
function_cfn = template['Resources'][function_cfn_name]
ddb_cfn_name = self._register_cfn_resource_name(
resource.resource_name)
properties = {
'Stream': resource.stream_arn,
'BatchSize': resource.batch_size,
'StartingPosition': resource.starting_position,
'MaximumBatchingWindowInSeconds':
resource.maximum_batching_window_in_seconds,
}
function_cfn['Properties']['Events'] = {
ddb_cfn_name: {
'Type': 'DynamoDB',
'Properties': properties
}
}
def _generate_apimapping(self, resource, template):
# type: (models.APIMapping, Dict[str, Any]) -> None
pass
def _generate_domainname(self, resource, template):
# type: (models.DomainName, Dict[str, Any]) -> None
pass
def _add_domain_name(self, resource, template):
# type: (models.RestAPI, Dict[str, Any]) -> None
if resource.domain_name is None:
return
domain_name = resource.domain_name
endpoint_type = resource.endpoint_type
cfn_name = to_cfn_resource_name(domain_name.resource_name)
properties = {
'DomainName': domain_name.domain_name,
'EndpointConfiguration': {
'Types': [endpoint_type],
}
} # type: Dict[str, Any]
if endpoint_type == 'EDGE':
properties['CertificateArn'] = domain_name.certificate_arn
else:
properties['RegionalCertificateArn'] = domain_name.certificate_arn
if domain_name.tls_version is not None:
properties['SecurityPolicy'] = domain_name.tls_version.value
if domain_name.tags:
properties['Tags'] = [
{'Key': key, 'Value': value}
for key, value in sorted(domain_name.tags.items())
]
template['Resources'][cfn_name] = {
'Type': 'AWS::ApiGateway::DomainName',
'Properties': properties
}
template['Resources'][cfn_name + 'Mapping'] = {
'Type': 'AWS::ApiGateway::BasePathMapping',
'Properties': {
'DomainName': {'Ref': 'ApiGatewayCustomDomain'},
'RestApiId': {'Ref': 'RestAPI'},
'BasePath': domain_name.api_mapping.mount_path,
'Stage': resource.api_gateway_stage,
}
}
def _add_websocket_domain_name(self, resource, template):
# type: (models.WebsocketAPI, Dict[str, Any]) -> None
if resource.domain_name is None:
return
domain_name = resource.domain_name
cfn_name = to_cfn_resource_name(domain_name.resource_name)
properties = {
'DomainName': domain_name.domain_name,
'DomainNameConfigurations': [
{'CertificateArn': domain_name.certificate_arn,
'EndpointType': 'REGIONAL'},
]
}
if domain_name.tags:
properties['Tags'] = domain_name.tags
template['Resources'][cfn_name] = {
'Type': 'AWS::ApiGatewayV2::DomainName',
'Properties': properties,
}
template['Resources'][cfn_name + 'Mapping'] = {
'Type': 'AWS::ApiGatewayV2::ApiMapping',
'Properties': {
'DomainName': {'Ref': cfn_name},
'ApiId': {'Ref': 'WebsocketAPI'},
'ApiMappingKey': domain_name.api_mapping.mount_path,
'Stage': {'Ref': 'WebsocketAPIStage'},
}
}
def _register_cfn_resource_name(self, name):
# type: (str) -> str
cfn_name = to_cfn_resource_name(name)
if cfn_name in self._seen_names:
raise DuplicateResourceNameError(
'A duplicate resource name was generated for '
'the SAM template: %s' % cfn_name,
)
self._seen_names.add(cfn_name)
return cfn_name
class TerraformGenerator(TemplateGenerator):
template_file = "chalice.tf"
def __init__(self, config, options):
# type: (Config, PackageOptions) -> None
super(TerraformGenerator, self).__init__(config, options)
self._chalice_layer = ""
def generate(self, resources):
# type: (List[models.Model]) -> Dict[str, Any]
template = {
'resource': {},
'locals': {},
'terraform': {
'required_version': '>= 0.12.26, < 1.2.0',
'required_providers': {
'aws': {'version': '>= 2, < 4'},
'null': {'version': '>= 2, < 4'}
}
},
'data': {
'aws_caller_identity': {'chalice': {}},
'aws_partition': {'chalice': {}},
'aws_region': {'chalice': {}},
'null_data_source': {
'chalice': {
'inputs': {
'app': self._config.app_name,
'stage': self._config.chalice_stage
}
}
}
}
}
for resource in resources:
self.dispatch(resource, template)
return template
def _fref(self, lambda_function, attr='arn'):
# type: (models.ManagedModel, str) -> str
return '${aws_lambda_function.%s.%s}' % (
lambda_function.resource_name, attr)
def _arnref(self, arn_template, **kw):
# type: (str, str) -> str
d = dict(
partition='${data.aws_partition.chalice.partition}',
region='${data.aws_region.chalice.name}',
account_id='${data.aws_caller_identity.chalice.account_id}')
d.update(kw)
return arn_template % d
def _generate_managediamrole(self, resource, template):
# type: (models.ManagedIAMRole, Dict[str, Any]) -> None
resource.trust_policy['Statement'][0]['Principal']['Service'] = \
self._options.service_principal('lambda')
template['resource'].setdefault('aws_iam_role', {})[
resource.resource_name] = {
'name': resource.role_name,
'assume_role_policy': json.dumps(resource.trust_policy)
}
template['resource'].setdefault('aws_iam_role_policy', {})[
resource.resource_name] = {
'name': resource.resource_name + 'Policy',
'policy': json.dumps(resource.policy.document),
'role': '${aws_iam_role.%s.id}' % resource.resource_name,
}
def _generate_websocketapi(self, resource, template):
# type: (models.WebsocketAPI, Dict[str, Any]) -> None
message = (
"Unable to package chalice apps that use experimental "
"Websocket decorators. Terraform AWS Provider "
"support for websocket is pending see "
"https://git.io/fj9X8 for details and progress. "
"You can deploy this app using `chalice deploy`."
)
raise NotImplementedError(message)
def _generate_s3bucketnotification(self, resource, template):
# type: (models.S3BucketNotification, Dict[str, Any]) -> None
bnotify = {
'events': resource.events,
'lambda_function_arn': self._fref(resource.lambda_function)
}
if resource.prefix:
bnotify['filter_prefix'] = resource.prefix
if resource.suffix:
bnotify['filter_suffix'] = resource.suffix
# we use the bucket name here because we need to aggregate
# all the notifications subscribers for a bucket.
# Due to cyclic references to buckets created in terraform
# we also try to detect and resolve.
if '{aws_s3_bucket.' in resource.bucket:
bucket_name = resource.bucket.split('.')[1]
else:
bucket_name = resource.bucket
template['resource'].setdefault(
'aws_s3_bucket_notification', {}).setdefault(
bucket_name + '_notify',
{'bucket': resource.bucket}).setdefault(
'lambda_function', []).append(bnotify)
template['resource'].setdefault('aws_lambda_permission', {})[
resource.resource_name] = {
'statement_id': resource.resource_name,
'action': 'lambda:InvokeFunction',
'function_name': self._fref(resource.lambda_function),
'principal': self._options.service_principal('s3'),
'source_account': '${data.aws_caller_identity.chalice.account_id}',
'source_arn': ('arn:${data.aws_partition.chalice.partition}:'
's3:::%s' % resource.bucket)
}
def _generate_sqseventsource(self, resource, template):
# type: (models.SQSEventSource, Dict[str, Any]) -> None
if isinstance(resource.queue, models.QueueARN):
event_source_arn = resource.queue.arn
else:
event_source_arn = self._arnref(
"arn:%(partition)s:sqs:%(region)s"
":%(account_id)s:%(queue)s",
queue=resource.queue
)
template['resource'].setdefault('aws_lambda_event_source_mapping', {})[
resource.resource_name] = {
'event_source_arn': event_source_arn,
'batch_size': resource.batch_size,
'maximum_batching_window_in_seconds':
resource.maximum_batching_window_in_seconds,
'function_name': self._fref(resource.lambda_function)
}
def _generate_kinesiseventsource(self, resource, template):
# type: (models.KinesisEventSource, Dict[str, Any]) -> None
template['resource'].setdefault('aws_lambda_event_source_mapping', {})[
resource.resource_name] = {
'event_source_arn': self._arnref(
"arn:%(partition)s:kinesis:%(region)s"
":%(account_id)s:stream/%(stream)s",
stream=resource.stream),
'batch_size': resource.batch_size,
'starting_position': resource.starting_position,
'maximum_batching_window_in_seconds':
resource.maximum_batching_window_in_seconds,
'function_name': self._fref(resource.lambda_function)
}
def _generate_dynamodbeventsource(self, resource, template):
# type: (models.DynamoDBEventSource, Dict[str, Any]) -> None
template['resource'].setdefault('aws_lambda_event_source_mapping', {})[
resource.resource_name] = {
'event_source_arn': resource.stream_arn,
'batch_size': resource.batch_size,
'starting_position': resource.starting_position,
'maximum_batching_window_in_seconds':
resource.maximum_batching_window_in_seconds,
'function_name': self._fref(resource.lambda_function),
}
def _generate_snslambdasubscription(self, resource, template):
# type: (models.SNSLambdaSubscription, Dict[str, Any]) -> None
if resource.topic.startswith('arn:aws'):
topic_arn = resource.topic
else:
topic_arn = self._arnref(
'arn:%(partition)s:sns:%(region)s:%(account_id)s:%(topic)s',
topic=resource.topic)
template['resource'].setdefault('aws_sns_topic_subscription', {})[
resource.resource_name] = {
'topic_arn': topic_arn,
'protocol': 'lambda',
'endpoint': self._fref(resource.lambda_function)
}
template['resource'].setdefault('aws_lambda_permission', {})[
resource.resource_name] = {
'function_name': self._fref(resource.lambda_function),
'action': 'lambda:InvokeFunction',
'principal': self._options.service_principal('sns'),
'source_arn': topic_arn
}
def _generate_cloudwatchevent(self, resource, template):
# type: (models.CloudWatchEvent, Dict[str, Any]) -> None
template['resource'].setdefault(
'aws_cloudwatch_event_rule', {})[
resource.resource_name] = {
'name': resource.resource_name,
'event_pattern': resource.event_pattern
}
self._cwe_helper(resource, template)
def _generate_scheduledevent(self, resource, template):
# type: (models.ScheduledEvent, Dict[str, Any]) -> None
template['resource'].setdefault(
'aws_cloudwatch_event_rule', {})[
resource.resource_name] = {
'name': resource.resource_name,
'schedule_expression': resource.schedule_expression,
'description': resource.rule_description,
}
self._cwe_helper(resource, template)
def _cwe_helper(self, resource, template):
# type: (models.CloudWatchEventBase, Dict[str, Any]) -> None
template['resource'].setdefault(
'aws_cloudwatch_event_target', {})[
resource.resource_name] = {
'rule': '${aws_cloudwatch_event_rule.%s.name}' % (
resource.resource_name),
'target_id': resource.resource_name,
'arn': self._fref(resource.lambda_function)
}
template['resource'].setdefault(
'aws_lambda_permission', {})[
resource.resource_name] = {
'function_name': self._fref(resource.lambda_function),
'action': 'lambda:InvokeFunction',
'principal': self._options.service_principal('events'),
'source_arn': "${aws_cloudwatch_event_rule.%s.arn}" % (
resource.resource_name)
}
def _generate_lambdalayer(self, resource, template):
# type: (models.LambdaLayer, Dict[str, Any]) -> None
template['resource'].setdefault(
"aws_lambda_layer_version", {})[
resource.resource_name] = {
'layer_name': resource.layer_name,
'compatible_runtimes': [resource.runtime],
'filename': resource.deployment_package.filename,
}
self._chalice_layer = resource.resource_name
def _generate_lambdafunction(self, resource, template):
# type: (models.LambdaFunction, Dict[str, Any]) -> None
func_definition = {
'function_name': resource.function_name,
'runtime': resource.runtime,
'handler': resource.handler,
'memory_size': resource.memory_size,
'tags': resource.tags,
'timeout': resource.timeout,
'source_code_hash': '${filebase64sha256("%s")}' % (
resource.deployment_package.filename),
'filename': resource.deployment_package.filename
} # type: Dict[str, Any]
if resource.security_group_ids and resource.subnet_ids:
func_definition['vpc_config'] = {
'subnet_ids': resource.subnet_ids,
'security_group_ids': resource.security_group_ids
}
if resource.reserved_concurrency is not None:
func_definition['reserved_concurrent_executions'] = (
resource.reserved_concurrency
)
if resource.environment_variables:
func_definition['environment'] = {
'variables': resource.environment_variables
}
if resource.xray:
func_definition['tracing_config'] = {
'mode': 'Active'
}
if self._chalice_layer:
func_definition['layers'] = [
'${aws_lambda_layer_version.%s.arn}' % self._chalice_layer
]
if resource.layers:
func_definition.setdefault('layers', []).extend(
list(resource.layers))
if isinstance(resource.role, models.ManagedIAMRole):
func_definition['role'] = '${aws_iam_role.%s.arn}' % (
resource.role.resource_name)
else:
# resource is a PreCreatedIAMRole.
role = cast(models.PreCreatedIAMRole, resource.role)
func_definition['role'] = role.role_arn
template['resource'].setdefault('aws_lambda_function', {})[
resource.resource_name] = func_definition
def _generate_restapi(self, resource, template):
# type: (models.RestAPI, Dict[str, Any]) -> None
# typechecker happiness
swagger_doc = cast(Dict, resource.swagger_doc)
template['locals']['chalice_api_swagger'] = json.dumps(
swagger_doc)
template['resource'].setdefault('aws_api_gateway_rest_api', {})[
resource.resource_name] = {
'body': '${local.chalice_api_swagger}',
# Terraform will diff explicitly configured attributes
# to the current state of the resource. Attributes configured
# via swagger on the REST api need to be duplicated here, else
# terraform will set them back to empty.
'name': swagger_doc['info']['title'],
'binary_media_types': swagger_doc[
'x-amazon-apigateway-binary-media-types'],
'endpoint_configuration': {'types': [resource.endpoint_type]}
}
if 'x-amazon-apigateway-policy' in swagger_doc:
template['resource'][
'aws_api_gateway_rest_api'][
resource.resource_name]['policy'] = json.dumps(
swagger_doc['x-amazon-apigateway-policy'])
if resource.minimum_compression.isdigit():
template['resource'][
'aws_api_gateway_rest_api'][
resource.resource_name][
'minimum_compression_size'] = int(
resource.minimum_compression)
template['resource'].setdefault('aws_api_gateway_deployment', {})[
resource.resource_name] = {
'stage_name': resource.api_gateway_stage,
# Ensure that the deployment gets redeployed if we update
# the swagger description for the api by using its checksum
# in the stage description.
'stage_description': (
"${md5(local.chalice_api_swagger)}"),
'rest_api_id': '${aws_api_gateway_rest_api.%s.id}' % (
resource.resource_name),
'lifecycle': {'create_before_destroy': True}
}
template['resource'].setdefault('aws_lambda_permission', {})[
resource.resource_name + '_invoke'] = {
'function_name': self._fref(resource.lambda_function),
'action': 'lambda:InvokeFunction',
'principal': self._options.service_principal('apigateway'),
'source_arn':
"${aws_api_gateway_rest_api.%s.execution_arn}/*" % (
resource.resource_name)
}
template.setdefault('output', {})[
'EndpointURL'] = {
'value': '${aws_api_gateway_deployment.%s.invoke_url}' % (
resource.resource_name)
}
template.setdefault('output', {})[
'RestAPIId'] = {
'value': '${aws_api_gateway_rest_api.%s.id}' % (
resource.resource_name)
}
for auth in resource.authorizers:
template['resource']['aws_lambda_permission'][
auth.resource_name + '_invoke'] = {
'function_name': self._fref(auth),
'action': 'lambda:InvokeFunction',
'principal': self._options.service_principal('apigateway'),
'source_arn': (
"${aws_api_gateway_rest_api.%s.execution_arn}" % (
resource.resource_name) + "/*"
)
}
self._add_domain_name(resource, template)
def _add_domain_name(self, resource, template):
# type: (models.RestAPI, Dict[str, Any]) -> None
if resource.domain_name is None:
return
domain_name = resource.domain_name
endpoint_type = resource.endpoint_type
properties = {
'domain_name': domain_name.domain_name,
'endpoint_configuration': {'types': [endpoint_type]},
}
if endpoint_type == 'EDGE':
properties['certificate_arn'] = domain_name.certificate_arn
else:
properties[
'regional_certificate_arn'] = domain_name.certificate_arn
if domain_name.tls_version is not None:
properties['security_policy'] = domain_name.tls_version.value
if domain_name.tags:
properties['tags'] = domain_name.tags
template['resource']['aws_api_gateway_domain_name'] = {
domain_name.resource_name: properties
}
template['resource']['aws_api_gateway_base_path_mapping'] = {
domain_name.resource_name + '_mapping': {
'stage_name': resource.api_gateway_stage,
'domain_name': domain_name.domain_name,
'api_id': '${aws_api_gateway_rest_api.%s.id}' % (
resource.resource_name)
}
}
self._add_domain_name_outputs(domain_name.resource_name, endpoint_type,
template)
def _add_domain_name_outputs(self, domain_resource_name,
endpoint_type, template):
# type: (str, str, Dict[str, Any]) -> None
base = (
'aws_api_gateway_domain_name.%s' % domain_resource_name
)
if endpoint_type == 'EDGE':
alias_domain_name = '${%s.cloudfront_domain_name}' % base
hosted_zone_id = '${%s.cloudfront_zone_id}' % base
else:
alias_domain_name = '${%s.regional_domain_name}' % base
hosted_zone_id = '${%s.regional_zone_id}' % base
template.setdefault('output', {})['AliasDomainName'] = {
'value': alias_domain_name
}
template.setdefault('output', {})['HostedZoneId'] = {
'value': hosted_zone_id
}
def _generate_apimapping(self, resource, template):
# type: (models.APIMapping, Dict[str, Any]) -> None
pass
def _generate_domainname(self, resource, template):
# type: (models.DomainName, Dict[str, Any]) -> None
pass
class AppPackager(object):
def __init__(self,
templater, # type: TemplateGenerator
resource_builder, # type: ResourceBuilder
post_processor, # type: TemplatePostProcessor
template_serializer, # type: TemplateSerializer
osutils, # type: OSUtils
):
# type: (...) -> None
self._templater = templater
self._resource_builder = resource_builder
self._template_post_processor = post_processor
self._template_serializer = template_serializer
self._osutils = osutils
def _to_json(self, doc):
# type: (Any) -> str
return serialize_to_json(doc)
def _to_yaml(self, doc):
# type: (Any) -> str
return yaml.dump(doc, allow_unicode=True)
def package_app(self, config, outdir, chalice_stage_name):
# type: (Config, str, str) -> None
# Deployment package
resources = self._resource_builder.construct_resources(
config, chalice_stage_name)
template = self._templater.generate(resources)
if not self._osutils.directory_exists(outdir):
self._osutils.makedirs(outdir)
self._template_post_processor.process(
template, config, outdir, chalice_stage_name)
contents = self._template_serializer.serialize_template(template)
extension = self._template_serializer.file_extension
filename = os.path.join(
outdir, self._templater.template_file) + '.' + extension
self._osutils.set_file_contents(
filename=filename,
contents=contents,
binary=False
)
class TemplatePostProcessor(object):
def __init__(self, osutils):
# type: (OSUtils) -> None
self._osutils = osutils
def process(self, template, config, outdir, chalice_stage_name):
# type: (Dict[str, Any], Config, str, str) -> None
raise NotImplementedError()
class SAMCodeLocationPostProcessor(TemplatePostProcessor):
def process(self, template, config, outdir, chalice_stage_name):
# type: (Dict[str, Any], Config, str, str) -> None
self._fixup_deployment_package(template, outdir)
def _fixup_deployment_package(self, template, outdir):
# type: (Dict[str, Any], str) -> None
# NOTE: This isn't my ideal way to do this. I'd like
# to move this into the build step where something
# copies the DeploymentPackage.filename over to the
# outdir. That would require plumbing through user
# provided params such as "outdir" into the build stage
# somehow, which isn't currently possible.
copied = False
for resource in template['Resources'].values():
if resource['Type'] == 'AWS::Serverless::Function':
original_location = resource['Properties']['CodeUri']
new_location = os.path.join(outdir, 'deployment.zip')
if not copied:
self._osutils.copy(original_location, new_location)
copied = True
resource['Properties']['CodeUri'] = './deployment.zip'
elif resource['Type'] == 'AWS::Serverless::LayerVersion':
original_location = resource['Properties']['ContentUri']
new_location = os.path.join(outdir, 'layer-deployment.zip')
self._osutils.copy(original_location, new_location)
resource['Properties']['ContentUri'] = './layer-deployment.zip'
class TerraformCodeLocationPostProcessor(TemplatePostProcessor):
def process(self, template, config, outdir, chalice_stage_name):
# type: (Dict[str, Any], Config, str, str) -> None
copied = False
resources = template['resource']
for r in resources.get('aws_lambda_function', {}).values():
if not copied:
asset_path = os.path.join(outdir, 'deployment.zip')
self._osutils.copy(r['filename'], asset_path)
copied = True
r['filename'] = "${path.module}/deployment.zip"
r['source_code_hash'] = \
'${filebase64sha256("${path.module}/deployment.zip")}'
copied = False
for r in resources.get('aws_lambda_layer_version', {}).values():
if not copied:
asset_path = os.path.join(outdir, 'layer-deployment.zip')
self._osutils.copy(r['filename'], asset_path)
copied = True
r['filename'] = "${path.module}/layer-deployment.zip"
r['source_code_hash'] = \
'${filebase64sha256("${path.module}/layer-deployment.zip")}'
class TemplateMergePostProcessor(TemplatePostProcessor):
def __init__(self,
osutils, # type: OSUtils
merger, # type: TemplateMerger
template_serializer, # type: TemplateSerializer
merge_template=None, # type: Optional[str]
):
# type: (...) -> None
super(TemplateMergePostProcessor, self).__init__(osutils)
self._merger = merger
self._template_serializer = template_serializer
self._merge_template = merge_template
def process(self, template, config, outdir, chalice_stage_name):
# type: (Dict[str, Any], Config, str, str) -> None
if self._merge_template is None:
return
loaded_template = self._load_template_to_merge()
merged = self._merger.merge(loaded_template, template)
template.clear()
template.update(merged)
def _load_template_to_merge(self):
# type: () -> Dict[str, Any]
template_name = cast(str, self._merge_template)
filepath = os.path.abspath(template_name)
if not self._osutils.file_exists(filepath):
raise RuntimeError('Cannot find template file: %s' % filepath)
template_data = self._osutils.get_file_contents(filepath, binary=False)
loaded_template = self._template_serializer.load_template(
template_data, filepath)
return loaded_template
class CompositePostProcessor(TemplatePostProcessor):
def __init__(self, processors):
# type: (List[TemplatePostProcessor]) -> None
self._processors = processors
def process(self, template, config, outdir, chalice_stage_name):
# type: (Dict[str, Any], Config, str, str) -> None
for processor in self._processors:
processor.process(template, config, outdir, chalice_stage_name)
class TemplateMerger(object):
def merge(self, file_template, chalice_template):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
raise NotImplementedError('merge')
class TemplateDeepMerger(TemplateMerger):
def merge(self, file_template, chalice_template):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
return self._merge(file_template, chalice_template)
def _merge(self, file_template, chalice_template):
# type: (Any, Any) -> Any
if isinstance(file_template, dict) and \
isinstance(chalice_template, dict):
return self._merge_dict(file_template, chalice_template)
return file_template
def _merge_dict(self, file_template, chalice_template):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
merged = chalice_template.copy()
for key, value in file_template.items():
merged[key] = self._merge(value, chalice_template.get(key))
return merged
class TemplateSerializer(object):
file_extension = ''
def load_template(self, file_contents, filename=''):
# type: (str, str) -> Dict[str, Any]
pass
def serialize_template(self, contents):
# type: (Dict[str, Any]) -> str
pass
class JSONTemplateSerializer(TemplateSerializer):
file_extension = 'json'
def serialize_template(self, contents):
# type: (Dict[str, Any]) -> str
return serialize_to_json(contents)
def load_template(self, file_contents, filename=''):
# type: (str, str) -> Dict[str, Any]
try:
return json.loads(file_contents)
except ValueError:
raise RuntimeError(
'Expected %s to be valid JSON template.' % filename)
class YAMLTemplateSerializer(TemplateSerializer):
file_extension = 'yaml'
@classmethod
def is_yaml_template(cls, template_name):
# type: (str) -> bool
file_extension = os.path.splitext(template_name)[1].lower()
return file_extension in [".yaml", ".yml"]
def serialize_template(self, contents):
# type: (Dict[str, Any]) -> str
return yaml.safe_dump(contents, allow_unicode=True)
def load_template(self, file_contents, filename=''):
# type: (str, str) -> Dict[str, Any]
yaml.SafeLoader.add_multi_constructor(
tag_prefix='!', multi_constructor=self._custom_sam_instrinsics)
try:
return yaml.load(
file_contents,
Loader=yaml.SafeLoader,
)
except ScannerError:
raise RuntimeError(
'Expected %s to be valid YAML template.' % filename)
def _custom_sam_instrinsics(self, loader, tag_prefix, node):
# type: (yaml.SafeLoader, str, Node) -> Dict[str, Any]
tag = node.tag[1:]
if tag not in ['Ref', 'Condition']:
tag = 'Fn::%s' % tag
value = self._get_value(loader, node)
return {tag: value}
def _get_value(self, loader, node):
# type: (yaml.SafeLoader, Node) -> Any
if node.tag[1:] == 'GetAtt' and isinstance(node.value,
six.string_types):
value = node.value.split('.', 1)
elif isinstance(node, ScalarNode):
value = loader.construct_scalar(node)
elif isinstance(node, SequenceNode):
value = loader.construct_sequence(node)
else:
value = loader.construct_mapping(node)
return value
|
the-stack_0_16436 | import os
import logging
import random
from typing import List, Optional
import itertools
import numpy as np
from config import save_path
from ..abstract_system import abstract_system
from .controlloop import controlloop
class system(abstract_system):
def __init__(self, cl: List[controlloop], trap_state=False):
if not all([type(i) == controlloop for i in cl]):
print('All specified controlloops should be of the enumerative type')
raise ValueError()
super().__init__(cl)
self.states = {}
self.actions = {}
self.transitions = {}
self.outputs = {}
self.output_map = {}
self._trap_state = trap_state or any([not c._label_split for c in cl])
self.scheduler = None
def post(self, x: dict, u: dict = None):
"""
Calculates the set of next states for given action(s) or all actions if actions is not given
:param x: set of state(s)
:param u: set of actions
:return: set of next states
"""
r = set()
if u is None:
u = self.actions
for i in x:
for j in u:
r.update(self.transitions[i][j])
return r
def compose(self):
"""
Creates the sets and dictionaries describing all the NFA's in parallel.
If R is True, use the partition systems, otherwise
use the original systems.
:return: None
"""
self.states = self._c_dict([o.states for o in self.control_loops])
self.outputs = self._c_dict([o._outputs for o in self.control_loops])
self.actions = self._c_dict([o.actions for o in self.control_loops])
self.output_map = self._c_dict([o.output_map for o in self.control_loops])
self.transitions = {x: {u: set() for u in self.actions} for x in self.states}
for xxx in self.states:
for uuu in self.actions:
if self._trap_state and uuu.count('t') >= 2:
self.transitions[xxx][uuu].update({'trap'})
else:
s = [o.transitions[x][u] for (o, x, u) in zip(self.control_loops, xxx, uuu)]
ls = set(itertools.product(*s))
self.transitions[xxx][uuu].update(ls)
if self._trap_state:
self.transitions['trap'] = {u: set() for u in self.actions}
self.states.update({'trap': -1})
def safe_set(self) -> Optional[dict]:
"""
Creates a dict describing the safe set, defined as (x1,...,xn) in W if at most one of the outputs
of xi is 'T'.
:return: BDD function describing the safe set W
"""
if len(self.states) == 0:
print("Compose the system before generating the safe set.")
return dict()
def isSafe(out: tuple):
numT = 0
numX = 0
for i in out:
if type(i) != tuple:
numT += (i == 'T' or i == 'T1')
else:
numT += (i[0] == 'T' or i[0] == 'T1')
numX += (i[1] == 'X')
return (numX == 0 and numT <= 1)
if self._trap_state:
return {k: v for (k, v) in self.states.items() if k != 'trap'}
else:
W = {k: v for (k, v) in self.states.items() if isSafe(self.output_map[k])}
return W
def safety_game(self, W=None):
"""
Solve Safety Game for the NFA with safe set W using fixed-point iterations
:param W: The safe set. If it is not specified, it is first created.
:return: Solution to the Safety Game
"""
if self._trap_state:
F_old = dict()
F_new = self.states
it = 1
while F_old != F_new:
logging.info(f'Safety Game Iteration: {it}')
F_old = F_new
F_new = self.__safety_operator_trap(F_old)
it += 1
if F_old == {}:
return None
return F_old
else:
if W is None:
W = self.safe_set()
F_old = dict()
F_new = self.states
it = 1
while F_old != F_new:
logging.info(f'Safety Game Iteration: {it}')
F_old = F_new
F_new = self.__safety_operator(W, F_old)
it += 1
if F_old == {}:
return None
return F_old
# TODO: Add possibility to return full scheduler transition system
def create_controller(self, Z: dict, StatesOnlyZ=True, convert_blocks=True):
"""
Creates a controller
:param Z:
:param StatesOnlyZ: Specifies whether to only use the states in Z for the controller
:return: Ux, Optional[Block->State Mapping]
"""
if StatesOnlyZ:
c_states = Z.copy()
else:
c_states = self.states.copy()
U_c = {x: set() for x in c_states}
for x in c_states:
for u in self.actions:
p = self.transitions[x][u]
if len(p) > 0 and set(Z.keys()).issuperset(p):
U_c[x].add(u)
if not any([s._is_part for s in self.control_loops]):
return U_c, None
elif convert_blocks and any([s._is_part for s in self.control_loops]):
U_c_n = {}
for (b, uuu) in U_c.items():
if b != 'trap':
U_c_n.update({x:uuu for x in itertools.product(*[xx.keys() for xx in self.states[b]])})
return U_c_n, None
else:
# Additionally supply look-up for the blocks
invBs = [{x:b for (b,xx) in cl.states.items() for x in xx} for cl in self.control_loops]
return U_c, invBs
def simulate(self, Ts:float = 0.01, Tmax:float = 1, x0=None, use_scheduler=True, random_inputs=False):
# Check correct/enough initial conditions
if x0 is None:
x0 = [np.random.uniform(low=-4, high=4, size=(cl.abstraction.plant.nx,)) for cl in self.control_loops]
else:
if len(x0) != len(self.control_loops):
print('Supply initial conditions for each control loop.')
return
for x0i, cl in zip(x0, self.control_loops):
if len(x0i) != cl.abstraction.plant.nx:
print(f'Initial condition dimension ({len(x0i)}) does not correspond to the expected ({cl.abstraction.plant.nx}).')
return
x0 = [np.array(x) for x in x0]
# Clip Ts such that it becomes a multiple of h
t = int(Ts/self.h)
Ts = t*self.h
# 3D Matrix storing the evolution of the continuous states over time.
x = [[np.array(x0i)] for x0i in x0]
xhat = [[np.array(x0i)] for x0i in x0]
u_hist = [[] for i in range(0, self.ns)] # continuous inputs
# Evolution of the traffic model regions over time
regions = [[cl.abstraction.region_of_state(x0i)] for (x0i, cl) in zip(x0, self.control_loops)]
for i in range(0, self.ns):
print(f'Controlloop {i} starts in region {regions[i][0]}')
# 3D Matrix storing the evolution of the transitions sytem states over time.
if self.state2block is None:
s = [[f"T{'_'.join([str(l) for l in i[0]])}"] for i in regions]
else:
b = [self.state2block[j][f"T{'_'.join([str(l) for l in i[0]])}"] for (i,j) in zip(regions, range(0, self.ns))]
s = [[b[i]] for i in range(0, self.ns)]
v = [[[]] for i in range(0, self.ns)] # inputs (w/t/lw)
TriggerTimes = [[0] for i in range(0, self.ns)]
TriggerTimesEarly = [[] for i in range(0, self.ns)]
CollisionTimes = {}
N = int(Tmax/Ts) # Number of samples
import scipy
from scipy import integrate
I = [scipy.integrate.quad_vec(lambda s: scipy.linalg.expm(cl.abstraction.plant.A * s), 0, Ts)[0] for cl in self.control_loops]
for t in range(0, N):
# Step 1: Update the continuous states
utemp = [cl.abstraction.controller.K @ xn[-1] for (cl, xn) in zip(self.control_loops, xhat)]
xn = [scipy.linalg.expm(cl.abstraction.plant.A * Ts) @ xi[-1] + integral @ cl.abstraction.plant.B @ ui
for (cl, xi, ui, integral) in zip(self.control_loops, x, utemp, I)]
for i in range(0, self.ns):
x[i].append(xn[i])
for i in range(0, self.ns):
xhat[i].append(xhat[i][-1])
for i in range(0, self.ns):
u_hist[i].append(utemp[i])
## Step 2: Check triggering conditions
# If a scheduler is defined use that
if self.scheduler is not None and use_scheduler:
ss = tuple(q[-1] for q in s)
u_ts = self.scheduler[ss]
if random_inputs:
u_ts = random.choice(list(u_ts))
else:
all_w = tuple('w' for i in range(0, self.ns))
if all_w in u_ts:
u_ts = all_w
else:
u_ts = random.choice(list(u_ts))
for i in range(0, self.ns):
if u_ts[i] == 't':
reg = self.control_loops[i].abstraction.region_of_state(x[i][-1])
si = f"T{'_'.join([str(l) for l in reg])}"
if self.state2block is not None:
si = self.state2block[i][si]
s[i].append(si)
xhat[i][-1] = xn[i]
regions[i].append(reg)
if t * Ts - TriggerTimes[i][-1] < self.control_loops[i].kmax:
TriggerTimesEarly[i].append(t * Ts)
else:
TriggerTimes[i].append(t * Ts)
else:
# reg = self.control_loops[i].abstraction.region_of_state(x[i][-1])
regions[i].append(regions[i][-1])
sn = self.control_loops[i].post({s[i][-1]}, u_ts[i])
sn = random.choice(list(sn))
s[i].append(sn)
# for
else:
triggers = set()
for i in range(0, self.ns):
xx = np.block([x[i][-1].T, xhat[i][-1]])
if xx.T @ self.control_loops[i].abstraction.trigger.Qbar @ xx.T > 0 or (t*Ts - TriggerTimes[i][-1]) >= self.h*self.control_loops[i].kmax:
xhat[i][-1] = xn[i]
TriggerTimes[i].append(t*Ts)
triggers.add(i)
reg = self.control_loops[i].abstraction.region_of_state(x[i][-1])
regions[i].append(reg)
if len(triggers) > 1:
CollisionTimes[t * Ts] = triggers
for i in range(0, self.ns):
TriggerTimes[i].pop(-1)
import matplotlib.pyplot as plt
name = 'safety_scheduler_'
if not use_scheduler:
name = 'no_scheduler_'
dur = np.arange(0, Ts * N, Ts)
for i in range(0, self.ns):
plt.plot(dur, x[i][0:len(dur)], '--')
plt.gca().set_prop_cycle(None)
plt.plot(dur, xhat[i][0:len(dur)])
plt.title(f'Controlloop {i + 1}: $x(t)$ and $x_e(t)$.')
plt.savefig(os.path.join(save_path, f'{name}simulation_Controlloop_{i + 1}_states.pdf'))
plt.show()
plt.clf()
for i in range(0, self.ns):
plt.plot(dur, u_hist[i][0:len(dur)])
plt.title(f'Controlloop {i + 1}: $u(t)$.')
plt.savefig(os.path.join(save_path, f'{name}simulation_Controlloop_{i + 1}_inputs.pdf'))
plt.show()
plt.clf()
for i in range(0, self.ns):
plt.plot(TriggerTimes[i], i * np.ones(len(TriggerTimes[i])), 'x')
plt.plot(TriggerTimesEarly[i], i * np.ones(len(TriggerTimesEarly[i])), 'o')
for t, ii in CollisionTimes.items():
for i in ii:
plt.plot(t, i, 'dk')
plt.title('Trigger times')
plt.yticks(range(0, self.ns), [f'Controlloop {i}' for i in range(1, self.ns + 1)])
plt.savefig(os.path.join(save_path, f'{name}simulation_trigger_events.pdf'))
plt.show()
plt.clf()
for i in range(0, self.ns):
plt.plot(dur, regions[i][0:len(dur)])
plt.title('Traffic Model Regions')
plt.legend([f'Controlloop {i}' for i in range(1, self.ns + 1)], loc='upper left')
plt.savefig(os.path.join(save_path, f'{name}simulation_traffic_model_regions.pdf'))
plt.show()
plt.clf()
""" Private Helper Methods """
def __safety_operator_trap(self, Z:dict):
F = dict()
for (x, v) in Z.items():
if x == 'trap':
continue
else:
for (uk, uv) in self.actions.items():
p = self.transitions[x][uk]
if len(p) == 0:
continue
elif not set(Z.keys()).issuperset(p):
continue
else:
F.update({x: v})
return F
def __safety_operator(self, W: dict, Z: dict):
"""
:param W:
:param Z:
:return:
"""
F = dict()
for (x, v) in Z.items():
if x not in W:
continue
else:
for (uk, uv) in self.actions.items():
p = self.transitions[x][uk]
if len(p) == 0:
continue
elif not set(Z.keys()).issuperset(p):
continue
else:
F.update({x: v})
return F
@staticmethod
def _c_dict(l: list):
"""
Combination of list of dicts. I.e. l = [{a:1, b:2}, {c:3, d:4}]
-> res = {(a,c):(1,3), (a,d):(1,4)...}
:param l: List of dict's
:return:
"""
a = [[key for key in d] for d in l]
b = [[val for val in d.values()] for d in l]
la = itertools.product(*a)
lb = itertools.product(*b)
return {a: b for (a, b) in zip(la, lb)} |
the-stack_0_16437 | # -*- coding: utf-8 -*-
import prefect # base import is required for prefect context
from prefect import task, Flow, Parameter
from prefect.storage import Module
from simmate.calculators.vasp.tasks.relaxation.third_party.mit import MITRelaxationTask
from simmate.workflows.common_tasks.all import load_input
from simmate.configuration.django import setup_full # sets database connection
from simmate.database.local_calculations.relaxation import (
MITIonicStep,
MITRelaxation,
)
# --------------------------------------------------------------------------------------
# THIS SECTION SETS UP OUR TASKS
# we initialize the task here so we can use it in the Prefect flow below
relax_structure = MITRelaxationTask()
@task
def save_results(result_and_corrections):
# split our results and corrections (which are given as a tuple) into
# separate variables
vasprun, corrections = result_and_corrections
# initialize the MITRelaxation with the Prefect run info
calculation = MITRelaxation.from_prefect_context(prefect.context)
calculation.save()
# now update the calculation entry with our results
calculation.update_from_vasp_run(vasprun, corrections, MITIonicStep)
return calculation.id
# --------------------------------------------------------------------------------------
# THIS SECTION PUTS OUR TASKS TOGETHER TO MAKE A WORKFLOW
# now make the overall workflow
with Flow("MIT Relaxation") as workflow:
# These are the input parameters for the overall workflow
structure = Parameter("structure")
vasp_command = Parameter("vasp_command", default="vasp_std > vasp.out")
# load the structure to a pymatgen object
structure_pmg = load_input(structure)
# Run the calculation after we have saved the input
result_and_corrections = relax_structure(
structure=structure_pmg,
command=vasp_command,
)
# pass these results and corrections into our final task which saves
# everything to the database
calculation_id = save_results(result_and_corrections)
# For when this workflow is registered with Prefect Cloud, we indicate that
# it can be imported from a python module. Note __name__ provides the path
# to this module.
workflow.storage = Module(__name__)
# --------------------------------------------------------------------------------------
|
the-stack_0_16438 | # -*- coding: utf-8 -*-
""" S3 Synchronization: Peer Repository Adapter for ADASHI
@copyright: 2011-2020 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import os
from gluon import *
from ..s3sync import S3SyncBaseAdapter
# =============================================================================
class S3SyncAdapter(S3SyncBaseAdapter):
"""
ADASHI Synchronization Adapter (passive)
http://www.adashisystems.com
"""
# -------------------------------------------------------------------------
def register(self):
"""
Register this site at the peer repository
@return: True to indicate success, otherwise False
"""
# No registration required (passive adapter)
return True
# -------------------------------------------------------------------------
def login(self):
"""
Login at the peer repository
@return: None if successful, otherwise the error
"""
# No login required (passive adapter)
return None
# -------------------------------------------------------------------------
def pull(self, task, onconflict=None):
"""
Outgoing pull
@param task: the task (sync_task Row)
"""
repository = self.repository
log = repository.log
# Import path
PATH = os.path.join(current.request.folder, "uploads", "adashi_feeds")
# Read names from path
try:
files_list = os.listdir(PATH)
except os.error:
message = "Upload path does not exist or can not be accessed"
log.write(repository_id = repository.id,
resource_name = "mixed",
transmission = log.IN,
mode = log.PUSH,
action = "read files from %s" % PATH,
remote = False,
result = log.FATAL,
message = message,
)
return message, None
# Add path to file names, filter for .xml files, sort by mtime
files = [os.path.join(PATH, f)
for f in files_list if f[-4:] == ".xml"]
files = sorted(filter(os.path.isfile, files), key=os.path.getmtime)
# Strategy and Policies
from ..s3import import S3ImportItem
default_update_policy = S3ImportItem.POLICY.NEWER
default_conflict_policy = S3ImportItem.POLICY.MASTER
strategy = task.strategy
update_policy = task.update_policy or default_update_policy
conflict_policy = task.conflict_policy or default_conflict_policy
if update_policy not in ("THIS", "OTHER"):
last_sync = task.last_pull
# Import files
for f in files:
current.log.debug("ADASHI Sync: importing %s" % f)
try:
with open(f, "r") as source:
result = self.receive([source],
None,
strategy=strategy,
update_policy=update_policy,
conflict_policy=conflict_policy,
onconflict=onconflict,
last_sync=last_sync,
mixed=True,
)
except IOError:
continue
# Log the operation
log.write(repository_id = repository.id,
resource_name = "mixed",
transmission = log.IN,
mode = log.PUSH,
action = "import %s" % f,
remote = result["remote"],
result = result["status"],
message = result["message"],
)
# Remove the file
try:
os.remove(f)
except os.error:
current.log.error("ADASHI Sync: can not delete %s" % f)
return None, current.request.utcnow
# -------------------------------------------------------------------------
def push(self, task):
"""
Outgoing push
@param task: the sync_task Row
"""
repository = self.repository
# Log the operation
message = "Push to ADASHI currently not supported"
log = repository.log
log.write(repository_id = repository.id,
resource_name = task.resource_name,
transmission = log.OUT,
mode = log.PUSH,
action = None,
remote = False,
result = log.FATAL,
message = message,
)
output = current.xml.json_message(False, 400, message)
return output, None
# -------------------------------------------------------------------------
def send(self,
resource,
start=None,
limit=None,
msince=None,
filters=None,
mixed=False,
pretty_print=False):
"""
Respond to an incoming pull from a peer repository
@param resource: the resource to be synchronized
@param start: index of the first record to send
@param limit: maximum number of records to send
@param msince: minimum modification date/time for records to send
@param filters: URL filters for record extraction
@param mixed: negotiate resource with peer (disregard resource)
@param pretty_print: make the output human-readable
"""
if not resource or mixed:
msg = "Mixed resource synchronization not supported"
return {"status": self.log.FATAL,
"message": msg,
"response": current.xml.json_message(False, 400, msg),
}
# Export the data as S3XML
stylesheet = os.path.join(current.request.folder,
"static", "formats", "georss", "export.xsl")
output = resource.export_xml(start=start,
limit=limit,
filters=filters,
msince=msince,
stylesheet=stylesheet,
pretty_print=pretty_print,
)
count = resource.results
msg = "Data sent to peer (%s records)" % count
# Set content type header
headers = current.response.headers
headers["Content-Type"] = "text/xml"
return {"status": self.log.SUCCESS,
"message": msg,
"response": output,
}
# -------------------------------------------------------------------------
def receive(self,
source,
resource,
strategy=None,
update_policy=None,
conflict_policy=None,
onconflict=None,
last_sync=None,
mixed=False):
"""
Respond to an incoming push from the peer repository
@param source: the input stream (list of file-like objects)
@param resource: the target resource
@param strategy: the import strategy
@param update_policy: the update policy
@param conflict_policy: the conflict resolution policy
@param onconflict: callback for conflict resolution
@param last_sync: the last synchronization date/time for the peer
@param mixed: negotiate resource with peer (disregard resource)
"""
s3db = current.s3db
xml = current.xml
log = self.log
remote = False
# Sync always has only one source per request
source = source[0]
# Parse the feed
tree = xml.parse(source)
if not tree:
# Parser error
msg = xml.error if xml.error else "Invalid source"
return {"status": log.FATAL,
"message": msg,
"remote": remote,
"response": xml.json_message(False, 400, msg),
}
# Identify feed category
category = tree.findall("//channel/category")
if not category:
msg = "Feed category missing"
return {"status": log.ERROR,
"message": msg,
"remote": remote,
"response": xml.json_message(False, 400, msg),
}
category = category[0].text
# Instantiate target resource after feed category
if category == "AVL":
resource = s3db.resource("pr_group")
elif category == "Incidents":
resource = s3db.resource("event_incident")
resource.configure(oncommit_import_item = self.update_assignments)
else:
msg = "Unknown feed category"
return {"status": log.WARNING,
"message": msg,
"remote": remote,
"response": xml.json_message(False, 400, msg),
}
# Store source data?
repository = self.repository
if repository.keep_source:
self.keep_source(tree, category)
# Import transformation stylesheet
stylesheet = os.path.join(current.request.folder,
"static",
"formats",
"georss",
"import.xsl",
)
# Import parameters
if onconflict:
onconflict_callback = lambda item: onconflict(item,
repository,
resource,
)
else:
onconflict_callback = None
ignore_errors = True
# Import
# Flag to let audit know the repository
s3 = current.response.s3
s3.repository_id = self.repository.id
output = resource.import_xml(tree,
format = "xml",
stylesheet = stylesheet,
ignore_errors = ignore_errors,
strategy = strategy,
update_policy = update_policy,
conflict_policy = conflict_policy,
last_sync = last_sync,
onconflict = onconflict_callback,
source_type = "adashi",
)
s3.repository_id = None
# Process validation errors, if any
if resource.error_tree is not None:
result = log.WARNING if ignore_errors else log.FATAL
message = "%s" % resource.error
for element in resource.error_tree.findall("resource"):
error_msg = element.get("error", "unknown error")
error_fields = element.findall("data[@error]")
if error_fields:
for field in error_fields:
error_msg = field.get("error", "unknown error")
if error_msg:
msg = "(UID: %s) %s.%s=%s: %s" % \
(element.get("uuid", None),
element.get("name", None),
field.get("field", None),
field.get("value", field.text),
error_msg)
message = "%s, %s" % (message, msg)
else:
msg = "(UID: %s) %s: %s" % \
(element.get("uuid", None),
element.get("name", None),
error_msg)
message = "%s, %s" % (message, msg)
else:
result = log.SUCCESS
message = "Data received from peer"
return {"status": result,
"remote": remote,
"message": message,
"response": output,
}
# -------------------------------------------------------------------------
@staticmethod
def update_assignments(item):
"""
Deactivate all previous unit assignments (event_team) for
an incident which are not in this feed update.
@param item: the import item
@note: this assumes that the list of incident resources in
the feed update is complete (confirmed for ADASHI)
@note: must not deactivate assignments which are newer
than the feed update (Sync policy NEWER)
"""
if item.tablename == "event_incident" and \
item.id and \
item.method == item.METHOD.UPDATE:
job = item.job
mtime = item.data.get("modified_on")
if not job or not mtime:
return
get_item = job.items.get
# Get the unit names of all current assignments in the feed
team_names = set()
add_name = team_names.add
for citem in item.components:
if citem.tablename == "event_team":
for ref in citem.references:
entry = ref.entry
team_item_id = entry.item_id
if entry.tablename == "pr_group" and team_item_id:
team_item = get_item(team_item_id)
team_name = team_item.data.get("name")
if team_name:
add_name(team_name)
break
s3db = current.s3db
ltable = s3db.event_team
gtable = s3db.pr_group
# Get all active assignments in the database which are older
# than the feed update and which are not in the feed update,
# and deactivate them
left = gtable.on(ltable.group_id == gtable.id)
query = (ltable.incident_id == item.id) & \
(ltable.modified_on <= mtime) & \
(ltable.status == 3) & \
(~(gtable.name.belongs(team_names)))
rows = current.db(query).select(ltable.id, left=left)
inactive = set(row.id for row in rows)
current.db(ltable.id.belongs(inactive)).update(status=4)
# -------------------------------------------------------------------------
def keep_source(self, tree, category):
"""
Helper method to store source data in file system
@param tree: the XML element tree of the source
@param category: the feed category
"""
repository = self.repository
# Log the operation
log = repository.log
log.write(repository_id = repository.id,
resource_name = None,
transmission = log.IN,
mode = log.PUSH,
action = "receive",
remote = False,
result = log.WARNING,
message = "'Keep Source Data' active for this repository!",
)
request = current.request
folder = os.path.join(request.folder, "uploads", "adashi")
dt = request.utcnow.replace(microsecond=0).isoformat()
dt = dt.replace(":", "").replace("-", "")
filename = os.path.join(folder,
"%s_%s.xml" % (category, dt),
)
if not os.path.exists(folder):
try:
os.mkdir(folder)
except OSError:
return
if filename:
try:
with open(filename, "w") as f:
tree.write(f, pretty_print=True)
except IOError:
return
# End =========================================================================
|
the-stack_0_16440 | """
Luna API.
API is written via FastAPI.
"""
from fastapi import FastAPI, HTTPException, Response
from pydantic import BaseModel
from typing import List, Optional
from natsort import natsorted, ns
from luna.db.db_util import DbConnection
from luna.db import bucket
from luna.db import vignette
from luna.db import cellular_annotation as ann
from luna.db import scatter_plot as sca
from luna.db.base import DB_DELIM
from starlette.middleware.cors import CORSMiddleware
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class Bucket(BaseModel):
"""Bucket Object."""
slug: str
name: str
description: Optional[str] = None
url: Optional[str] = None
class Vignettes(BaseModel):
"""Vignettes Object."""
content: str
class Annotation(BaseModel):
"""Annotation Object."""
slug: str
label: str
class AnnotationBundle(Annotation):
"""Annotation Bundle Object."""
values_distinct: List[str]
values_ordered: List[str]
class ExpressionBundle(BaseModel):
"""Expression Bundle Object."""
gene: str
max_expression: float
values_ordered: List[float]
class Coordinate(BaseModel):
"""Coordinate Object."""
x: float
y: float
@app.get("/buckets", response_model=List[Bucket])
def get_buckets():
"""Get list of all data buckets."""
session = _init_db_connection()
try:
sql_bucket_list = session.query(bucket.Bucket).all()
api_bucket_list = []
for sql_bucket in sql_bucket_list:
api_bucket = Bucket(
name=sql_bucket.name,
description=sql_bucket.description,
url=sql_bucket.url,
slug=sql_bucket.slug,
)
api_bucket_list.append(api_bucket)
return api_bucket_list
finally:
session.close()
@app.get("/annotation_list/{bucket_slug}", response_model=List[Annotation])
def get_annotation_list(bucket_slug: str):
"""Get the list of annotations for the specified bucket."""
session = _init_db_connection()
target_type = ann.CellularAnnotationType.OTHER
try:
bucket_id = _get_bucket_id(session, bucket_slug)
record_list = (
session.query(ann.CellularAnnotation)
.filter_by(bucket_id=bucket_id, type=target_type)
.order_by(ann.CellularAnnotation.slug)
.all()
)
if len(record_list) == 0:
raise HTTPException(status_code=404, detail="No annotations.")
annotation_list = []
for r in record_list:
current_annotation = Annotation(label=r.label, slug=r.slug)
annotation_list.append(current_annotation)
return annotation_list
finally:
session.close()
@app.get(
"/annotation/{bucket_slug}/{annotation_slug}",
response_model=AnnotationBundle,
)
def get_annotation_values(bucket_slug: str, annotation_slug: str):
"""Get the list of all values for the specified annotation."""
session = _init_db_connection()
try:
bucket_id = _get_bucket_id(session, bucket_slug)
record = session.query(ann.CellularAnnotation)
record = record.filter_by(
bucket_id=bucket_id, slug=annotation_slug
).first()
if record is None:
raise HTTPException(status_code=404, detail="ID not found.")
value_list = record.value_list.split(DB_DELIM)
distinct_list = list({value.strip() for value in value_list})
distinct_list = natsorted(distinct_list, alg=ns.IGNORECASE)
current_annotation = AnnotationBundle(
label=record.label,
slug=record.slug,
values_distinct=distinct_list,
values_ordered=value_list,
)
return current_annotation
finally:
session.close()
@app.get("/expression/{bucket_slug}/{gene}", response_model=ExpressionBundle)
def get_expression_values(bucket_slug: str, gene: str):
"""Get the expression data for the specified gene."""
gene = gene.lower()
session = _init_db_connection()
try:
bucket_id = _get_bucket_id(session, bucket_slug)
record = (
session.query(ann.CellularAnnotation.value_list)
.filter_by(bucket_id=bucket_id, slug=gene)
.first()
)
if record is None:
raise HTTPException(status_code=404, detail="No data found.")
value_list = record.value_list.split(DB_DELIM)
expression_bundle = ExpressionBundle(
gene=gene,
max_expression=max(value_list),
values_ordered=value_list,
)
return expression_bundle
finally:
session.close()
@app.get("/umap/{bucket_slug}", response_model=List[Coordinate])
def get_umap_coordinates(bucket_slug: str):
"""Get the UMAP coordinates for the specified bucket."""
session = _init_db_connection()
try:
bucket_id = _get_bucket_id(session, bucket_slug)
record = (
session.query(sca.ScatterPlot.coordinate_list)
.filter_by(bucket_id=bucket_id, type=sca.ScatterPlotType.UMAP)
.first()
)
if record is None:
raise HTTPException(status_code=404, detail="No data found.")
return _extract_coordinates(record)
finally:
session.close()
@app.get("/tsne/{bucket_slug}", response_model=List[Coordinate])
def get_tsne_coordinates(bucket_slug: str):
"""Get the TSNE coordinates for the specified bucket."""
session = _init_db_connection()
try:
bucket_id = _get_bucket_id(session, bucket_slug)
record = (
session.query(sca.ScatterPlot.coordinate_list)
.filter_by(bucket_id=bucket_id, type=sca.ScatterPlotType.TSNE)
.first()
)
if record is None:
raise HTTPException(status_code=404, detail="No data found.")
return _extract_coordinates(record)
finally:
session.close()
@app.get("/vignettes/{bucket_slug}")
def get_vignettes(bucket_slug: str):
"""Get all Vignettes for the specified bucket."""
session = _init_db_connection()
try:
bucket_id = _get_bucket_id(session, bucket_slug)
record = (
session.query(vignette.Vignette)
.filter_by(bucket_id=bucket_id)
.first()
)
if record is None:
raise HTTPException(status_code=404, detail="No data found.")
return Response(content=record.json, media_type="application/json")
finally:
session.close()
def _get_bucket_id(session, bucket_slug):
record = session.query(bucket.Bucket).filter_by(slug=bucket_slug).first()
if record:
return record.id
else:
raise HTTPException(status_code=404, detail="Bucket not found")
def _extract_coordinates(record):
response_list = []
value_list = record.coordinate_list.split(DB_DELIM)
for pair_str in value_list:
if len(pair_str) > 0:
parts = pair_str.split(",")
current_value = Coordinate(x=float(parts[0]), y=float(parts[1]))
response_list.append(current_value)
return response_list
def _init_db_connection():
db_connection = DbConnection()
return db_connection.session
|
the-stack_0_16446 | # -*- coding: utf-8 -*-
class WriterRegistry:
def __init__(self, listener):
self.storage = {}
self.id = 0
self.next_id = 0
self.listener = listener
def get_id(self, value):
try:
value_id=self.storage[value]
return value_id
except:
self.register(value)
return self.storage[value]
def register(self, value):
if value not in self.storage:
idee=self.next_id+1
self.next_id+=1
self.storage.update({value: idee})
print(self.storage)
self.listener.on_new_registry_entry(value, idee)
|
the-stack_0_16451 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import signal
import sys
import traceback
import threading
from typing import Optional, TYPE_CHECKING
try:
import PyQt5
except Exception:
sys.exit("Error: Could not import PyQt5 on Linux systems, you may try 'sudo apt-get install python3-pyqt5'")
from PyQt5.QtGui import QGuiApplication
from PyQt5.QtWidgets import (QApplication, QSystemTrayIcon, QWidget, QMenu,
QMessageBox)
from PyQt5.QtCore import QObject, pyqtSignal, QTimer
import PyQt5.QtCore as QtCore
from electrum_ltc.i18n import _, set_language
from electrum_ltc.plugin import run_hook
from electrum_ltc.base_wizard import GoBack
from electrum_ltc.util import (UserCancelled, profiler,
WalletFileException, BitcoinException, get_new_wallet_name)
from electrum_ltc.wallet import Wallet, Abstract_Wallet
from electrum_ltc.logging import Logger
from .installwizard import InstallWizard, WalletAlreadyOpenInMemory
from .util import get_default_language, read_QIcon, ColorScheme, custom_message_box
from .main_window import ElectrumWindow
from .network_dialog import NetworkDialog
from .stylesheet_patcher import patch_qt_stylesheet
from .lightning_dialog import LightningDialog
from .watchtower_dialog import WatchtowerDialog
if TYPE_CHECKING:
from electrum_ltc.daemon import Daemon
from electrum_ltc.simple_config import SimpleConfig
from electrum_ltc.plugin import Plugins
class OpenFileEventFilter(QObject):
def __init__(self, windows):
self.windows = windows
super(OpenFileEventFilter, self).__init__()
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.FileOpen:
if len(self.windows) >= 1:
self.windows[0].pay_to_URI(event.url().toEncoded())
return True
return False
class QElectrumApplication(QApplication):
new_window_signal = pyqtSignal(str, object)
class QNetworkUpdatedSignalObject(QObject):
network_updated_signal = pyqtSignal(str, object)
class ElectrumGui(Logger):
@profiler
def __init__(self, config: 'SimpleConfig', daemon: 'Daemon', plugins: 'Plugins'):
set_language(config.get('language', get_default_language()))
Logger.__init__(self)
# Uncomment this call to verify objects are being properly
# GC-ed when windows are closed
#network.add_jobs([DebugMem([Abstract_Wallet, SPV, Synchronizer,
# ElectrumWindow], interval=5)])
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_X11InitThreads)
if hasattr(QtCore.Qt, "AA_ShareOpenGLContexts"):
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts)
if hasattr(QGuiApplication, 'setDesktopFileName'):
QGuiApplication.setDesktopFileName('electrum-ltc.desktop')
self.gui_thread = threading.current_thread()
self.config = config
self.daemon = daemon
self.plugins = plugins
self.windows = []
self.efilter = OpenFileEventFilter(self.windows)
self.app = QElectrumApplication(sys.argv)
self.app.installEventFilter(self.efilter)
self.app.setWindowIcon(read_QIcon("electrum-ltc.png"))
# timer
self.timer = QTimer(self.app)
self.timer.setSingleShot(False)
self.timer.setInterval(500) # msec
self.network_dialog = None
self.lightning_dialog = None
self.watchtower_dialog = None
self.network_updated_signal_obj = QNetworkUpdatedSignalObject()
self._num_wizards_in_progress = 0
self._num_wizards_lock = threading.Lock()
# init tray
self.dark_icon = self.config.get("dark_icon", False)
self.tray = QSystemTrayIcon(self.tray_icon(), None)
self.tray.setToolTip('Electrum-LTC')
self.tray.activated.connect(self.tray_activated)
self.build_tray_menu()
self.tray.show()
self.app.new_window_signal.connect(self.start_new_window)
self.set_dark_theme_if_needed()
run_hook('init_qt', self)
def set_dark_theme_if_needed(self):
use_dark_theme = self.config.get('qt_gui_color_theme', 'default') == 'dark'
if use_dark_theme:
try:
import qdarkstyle
self.app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
except BaseException as e:
use_dark_theme = False
self.logger.warning(f'Error setting dark theme: {repr(e)}')
# Apply any necessary stylesheet patches
patch_qt_stylesheet(use_dark_theme=use_dark_theme)
# Even if we ourselves don't set the dark theme,
# the OS/window manager/etc might set *a dark theme*.
# Hence, try to choose colors accordingly:
ColorScheme.update_from_widget(QWidget(), force_dark=use_dark_theme)
def build_tray_menu(self):
# Avoid immediate GC of old menu when window closed via its action
if self.tray.contextMenu() is None:
m = QMenu()
self.tray.setContextMenu(m)
else:
m = self.tray.contextMenu()
m.clear()
network = self.daemon.network
m.addAction(_("Network"), self.show_network_dialog)
if network.lngossip:
m.addAction(_("Lightning Network"), self.show_lightning_dialog)
if network.local_watchtower:
m.addAction(_("Local Watchtower"), self.show_watchtower_dialog)
for window in self.windows:
name = window.wallet.basename()
submenu = m.addMenu(name)
submenu.addAction(_("Show/Hide"), window.show_or_hide)
submenu.addAction(_("Close"), window.close)
m.addAction(_("Dark/Light"), self.toggle_tray_icon)
m.addSeparator()
m.addAction(_("Exit Electrum-LTC"), self.close)
def tray_icon(self):
if self.dark_icon:
return read_QIcon('electrum_dark_icon.png')
else:
return read_QIcon('electrum_light_icon.png')
def toggle_tray_icon(self):
self.dark_icon = not self.dark_icon
self.config.set_key("dark_icon", self.dark_icon, True)
self.tray.setIcon(self.tray_icon())
def tray_activated(self, reason):
if reason == QSystemTrayIcon.DoubleClick:
if all([w.is_hidden() for w in self.windows]):
for w in self.windows:
w.bring_to_top()
else:
for w in self.windows:
w.hide()
def close(self):
for window in self.windows:
window.close()
if self.network_dialog:
self.network_dialog.close()
if self.lightning_dialog:
self.lightning_dialog.close()
if self.watchtower_dialog:
self.watchtower_dialog.close()
def new_window(self, path, uri=None):
# Use a signal as can be called from daemon thread
self.app.new_window_signal.emit(path, uri)
def show_lightning_dialog(self):
if not self.lightning_dialog:
self.lightning_dialog = LightningDialog(self)
self.lightning_dialog.bring_to_top()
def show_watchtower_dialog(self):
if not self.watchtower_dialog:
self.watchtower_dialog = WatchtowerDialog(self)
self.watchtower_dialog.bring_to_top()
def show_network_dialog(self):
if self.network_dialog:
self.network_dialog.on_update()
self.network_dialog.show()
self.network_dialog.raise_()
return
self.network_dialog = NetworkDialog(self.daemon.network, self.config,
self.network_updated_signal_obj)
self.network_dialog.show()
def _create_window_for_wallet(self, wallet):
w = ElectrumWindow(self, wallet)
self.windows.append(w)
self.build_tray_menu()
# FIXME: Remove in favour of the load_wallet hook
run_hook('on_new_window', w)
w.warn_if_testnet()
w.warn_if_watching_only()
return w
def count_wizards_in_progress(func):
def wrapper(self: 'ElectrumGui', *args, **kwargs):
with self._num_wizards_lock:
self._num_wizards_in_progress += 1
try:
return func(self, *args, **kwargs)
finally:
with self._num_wizards_lock:
self._num_wizards_in_progress -= 1
return wrapper
@count_wizards_in_progress
def start_new_window(self, path, uri, *, app_is_starting=False):
'''Raises the window for the wallet if it is open. Otherwise
opens the wallet and creates a new window for it'''
wallet = None
try:
wallet = self.daemon.load_wallet(path, None)
except BaseException as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot load wallet') + ' (1):\n' + repr(e))
# if app is starting, still let wizard to appear
if not app_is_starting:
return
if not wallet:
try:
wallet = self._start_wizard_to_select_or_create_wallet(path)
except (WalletFileException, BitcoinException) as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot load wallet') + ' (2):\n' + repr(e))
if not wallet:
return
# create or raise window
try:
for window in self.windows:
if window.wallet.storage.path == wallet.storage.path:
break
else:
window = self._create_window_for_wallet(wallet)
except BaseException as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot create window for wallet') + ':\n' + repr(e))
if app_is_starting:
wallet_dir = os.path.dirname(path)
path = os.path.join(wallet_dir, get_new_wallet_name(wallet_dir))
self.start_new_window(path, uri)
return
if uri:
window.pay_to_URI(uri)
window.bring_to_top()
window.setWindowState(window.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
window.activateWindow()
return window
def _start_wizard_to_select_or_create_wallet(self, path) -> Optional[Abstract_Wallet]:
wizard = InstallWizard(self.config, self.app, self.plugins)
try:
path, storage = wizard.select_storage(path, self.daemon.get_wallet)
# storage is None if file does not exist
if storage is None:
wizard.path = path # needed by trustedcoin plugin
wizard.run('new')
storage = wizard.create_storage(path)
else:
wizard.run_upgrades(storage)
except (UserCancelled, GoBack):
return
except WalletAlreadyOpenInMemory as e:
return e.wallet
finally:
wizard.terminate()
# return if wallet creation is not complete
if storage is None or storage.get_action():
return
wallet = Wallet(storage, config=self.config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
return wallet
def close_window(self, window: ElectrumWindow):
if window in self.windows:
self.windows.remove(window)
self.build_tray_menu()
# save wallet path of last open window
if not self.windows:
self.config.save_last_wallet(window.wallet)
run_hook('on_close_window', window)
self.daemon.stop_wallet(window.wallet.storage.path)
def init_network(self):
# Show network dialog if config does not exist
if self.daemon.network:
if self.config.get('auto_connect') is None:
wizard = InstallWizard(self.config, self.app, self.plugins)
wizard.init_network(self.daemon.network)
wizard.terminate()
def main(self):
try:
self.init_network()
except UserCancelled:
return
except GoBack:
return
except BaseException as e:
self.logger.exception('')
return
self.timer.start()
path = self.config.get_wallet_path(use_gui_last_wallet=True)
if not self.start_new_window(path, self.config.get('url'), app_is_starting=True):
return
signal.signal(signal.SIGINT, lambda *args: self.app.quit())
def quit_after_last_window():
# keep daemon running after close
if self.config.get('daemon'):
return
# check if a wizard is in progress
with self._num_wizards_lock:
if self._num_wizards_in_progress > 0 or len(self.windows) > 0:
return
if self.config.get('persist_daemon'):
return
self.app.quit()
self.app.setQuitOnLastWindowClosed(False) # so _we_ can decide whether to quit
self.app.lastWindowClosed.connect(quit_after_last_window)
def clean_up():
# Shut down the timer cleanly
self.timer.stop()
# clipboard persistence. see http://www.mail-archive.com/[email protected]/msg17328.html
event = QtCore.QEvent(QtCore.QEvent.Clipboard)
self.app.sendEvent(self.app.clipboard(), event)
self.tray.hide()
self.app.aboutToQuit.connect(clean_up)
# main loop
self.app.exec_()
# on some platforms the exec_ call may not return, so use clean_up()
def stop(self):
self.logger.info('closing GUI')
self.app.quit()
|
the-stack_0_16453 | # Authors:
#
# Giorgio Patrini
#
# License: BSD 3 clause
from __future__ import division
import warnings
import itertools
import numpy as np
import numpy.linalg as la
from scipy import sparse, stats
from scipy.sparse import random as sparse_random
import pytest
from sklearn.utils import gen_batches
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_allclose_dense_sparse
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _handle_zeros_in_scale
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import QuantileTransformer
from sklearn.preprocessing.data import quantile_transform
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.preprocessing.data import PowerTransformer
from sklearn.preprocessing.data import power_transform
from sklearn.exceptions import DataConversionWarning, NotFittedError
from sklearn.base import clone
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_predict
from sklearn.svm import SVR
from sklearn.utils import shuffle
from sklearn import datasets
iris = datasets.load_iris()
# Make some data to be used many times
rng = np.random.RandomState(0)
n_features = 30
n_samples = 1000
offsets = rng.uniform(-1, 1, size=n_features)
scales = rng.uniform(1, 10, size=n_features)
X_2d = rng.randn(n_samples, n_features) * scales + offsets
X_1row = X_2d[0, :].reshape(1, n_features)
X_1col = X_2d[:, 0].reshape(n_samples, 1)
X_list_1row = X_1row.tolist()
X_list_1col = X_1col.tolist()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def _check_dim_1axis(a):
if isinstance(a, list):
return np.array(a).shape[0]
return a.shape[0]
def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size,
n_samples_seen):
if batch_stop != n:
assert_equal((i + 1) * chunk_size, n_samples_seen)
else:
assert_equal(i * chunk_size + (batch_stop - batch_start),
n_samples_seen)
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
assert_equal(interact.powers_.shape, (interact.n_output_features_,
interact.n_input_features_))
def test_polynomial_feature_names():
X = np.arange(30).reshape(10, 3)
poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)
feature_names = poly.get_feature_names()
assert_array_equal(['1', 'x0', 'x1', 'x2', 'x0^2', 'x0 x1',
'x0 x2', 'x1^2', 'x1 x2', 'x2^2'],
feature_names)
poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(['a', 'b', 'c', 'a^2', 'a b', 'a c', 'b^2',
'b c', 'c^2', 'a^3', 'a^2 b', 'a^2 c',
'a b^2', 'a b c', 'a c^2', 'b^3', 'b^2 c',
'b c^2', 'c^3'], feature_names)
# test some unicode
poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)
feature_names = poly.get_feature_names(
[u"\u0001F40D", u"\u262E", u"\u05D0"])
assert_array_equal([u"1", u"\u0001F40D", u"\u262E", u"\u05D0"],
feature_names)
def test_polynomial_feature_array_order():
X = np.arange(10).reshape(5, 2)
def is_c_contiguous(a):
return np.isfortran(a.T)
assert is_c_contiguous(PolynomialFeatures().fit_transform(X))
assert is_c_contiguous(PolynomialFeatures(order='C').fit_transform(X))
assert np.isfortran(PolynomialFeatures(order='F').fit_transform(X))
@pytest.mark.parametrize(['deg', 'include_bias', 'interaction_only', 'dtype'],
[(1, True, False, int),
(2, True, False, int),
(2, True, False, np.float32),
(2, True, False, np.float64),
(3, False, False, np.float64),
(3, False, True, np.float64),
(4, False, False, np.float64),
(4, False, True, np.float64)])
def test_polynomial_features_csc_X(deg, include_bias, interaction_only, dtype):
rng = np.random.RandomState(0)
X = rng.randint(0, 2, (100, 2))
X_csc = sparse.csc_matrix(X)
est = PolynomialFeatures(deg, include_bias=include_bias,
interaction_only=interaction_only)
Xt_csc = est.fit_transform(X_csc.astype(dtype))
Xt_dense = est.fit_transform(X.astype(dtype))
assert isinstance(Xt_csc, sparse.csc_matrix)
assert Xt_csc.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csc.A, Xt_dense)
@pytest.mark.parametrize(['deg', 'include_bias', 'interaction_only', 'dtype'],
[(1, True, False, int),
(2, True, False, int),
(2, True, False, np.float32),
(2, True, False, np.float64),
(3, False, False, np.float64),
(3, False, True, np.float64)])
def test_polynomial_features_csr_X(deg, include_bias, interaction_only, dtype):
rng = np.random.RandomState(0)
X = rng.randint(0, 2, (100, 2))
X_csr = sparse.csr_matrix(X)
est = PolynomialFeatures(deg, include_bias=include_bias,
interaction_only=interaction_only)
Xt_csr = est.fit_transform(X_csr.astype(dtype))
Xt_dense = est.fit_transform(X.astype(dtype))
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
@pytest.mark.parametrize(['deg', 'include_bias', 'interaction_only', 'dtype'],
[(2, True, False, np.float32),
(2, True, False, np.float64),
(3, False, False, np.float64),
(3, False, True, np.float64)])
def test_polynomial_features_csr_X_floats(deg, include_bias,
interaction_only, dtype):
X_csr = sparse_random(1000, 10, 0.5, random_state=0).tocsr()
X = X_csr.toarray()
est = PolynomialFeatures(deg, include_bias=include_bias,
interaction_only=interaction_only)
Xt_csr = est.fit_transform(X_csr.astype(dtype))
Xt_dense = est.fit_transform(X.astype(dtype))
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
@pytest.mark.parametrize(['zero_row_index', 'deg', 'interaction_only'],
[(0, 2, True), (1, 2, True), (2, 2, True),
(0, 3, True), (1, 3, True), (2, 3, True),
(0, 2, False), (1, 2, False), (2, 2, False),
(0, 3, False), (1, 3, False), (2, 3, False)])
def test_polynomial_features_csr_X_zero_row(zero_row_index, deg,
interaction_only):
X_csr = sparse_random(3, 10, 1.0, random_state=0).tocsr()
X_csr[zero_row_index, :] = 0.0
X = X_csr.toarray()
est = PolynomialFeatures(deg, include_bias=False,
interaction_only=interaction_only)
Xt_csr = est.fit_transform(X_csr)
Xt_dense = est.fit_transform(X)
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
# This degree should always be one more than the highest degree supported by
# _csr_expansion.
@pytest.mark.parametrize(['include_bias', 'interaction_only'],
[(True, True), (True, False),
(False, True), (False, False)])
def test_polynomial_features_csr_X_degree_4(include_bias, interaction_only):
X_csr = sparse_random(1000, 10, 0.5, random_state=0).tocsr()
X = X_csr.toarray()
est = PolynomialFeatures(4, include_bias=include_bias,
interaction_only=interaction_only)
Xt_csr = est.fit_transform(X_csr)
Xt_dense = est.fit_transform(X)
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
@pytest.mark.parametrize(['deg', 'dim', 'interaction_only'],
[(2, 1, True),
(2, 2, True),
(3, 1, True),
(3, 2, True),
(3, 3, True),
(2, 1, False),
(2, 2, False),
(3, 1, False),
(3, 2, False),
(3, 3, False)])
def test_polynomial_features_csr_X_dim_edges(deg, dim, interaction_only):
X_csr = sparse_random(1000, dim, 0.5, random_state=0).tocsr()
X = X_csr.toarray()
est = PolynomialFeatures(deg, interaction_only=interaction_only)
Xt_csr = est.fit_transform(X_csr)
Xt_dense = est.fit_transform(X)
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
def test_standard_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_almost_equal(scaler.mean_, X.ravel())
assert_almost_equal(scaler.scale_, np.ones(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.std(axis=0),
np.zeros_like(n_features))
else:
assert_almost_equal(scaler.mean_, X.mean())
assert_almost_equal(scaler.scale_, X.std())
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones((5, 1))
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_almost_equal(scaler.mean_, 1.)
assert_almost_equal(scaler.scale_, 1.)
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), .0)
assert_equal(scaler.n_samples_seen_, X.shape[0])
def test_standard_scaler_dtype():
# Ensure scaling does not affect dtype
rng = np.random.RandomState(0)
n_samples = 10
n_features = 3
for dtype in [np.float16, np.float32, np.float64]:
X = rng.randn(n_samples, n_features).astype(dtype)
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X)
assert X.dtype == X_scaled.dtype
assert scaler.mean_.dtype == np.float64
assert scaler.scale_.dtype == np.float64
def test_scale_1d():
# 1-d inputs
X_list = [1., 3., 5., 0.]
X_arr = np.array(X_list)
for X in [X_list, X_arr]:
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(), 0.0)
assert_array_almost_equal(X_scaled.std(), 1.0)
assert_array_equal(scale(X, with_mean=False, with_std=False), X)
@skip_if_32bit
def test_standard_scaler_numerical_stability():
# Test numerical stability of scaling
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.full(8, np.log(1e-5), dtype=np.float64)
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.full(10, np.log(1e-5), dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.full(10, 1e-100, dtype=np.float64)
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.full(10, 1e100, dtype=np.float64)
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
n_features = 5
n_samples = 4
X = rng.randn(n_samples, n_features)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
assert_equal(scaler.n_samples_seen_, n_samples)
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert X_scaled is not X
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0])
# Check that the data hasn't been modified
assert X_scaled is not X
X_scaled = scaler.fit(X).transform(X, copy=False)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is X
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is not X
def test_handle_zeros_in_scale():
s1 = np.array([0, 1, 2, 3])
s2 = _handle_zeros_in_scale(s1, copy=True)
assert not s1[0] == s2[0]
assert_array_equal(s1, np.array([0, 1, 2, 3]))
assert_array_equal(s2, np.array([1, 1, 2, 3]))
def test_minmax_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler()
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MinMaxScaler().fit(X[batch0])
scaler_incr = MinMaxScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std until the end of partial fits, and
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert_equal(scaler_batch.var_, scaler_incr.var_) # Nones
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64),
scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64),
scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0),
scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0),
scaler_incr.scale_) # no constants
# Test std until the end of partial fits, and
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit_numerical_stability():
# Test if the incremental computation introduces significative errors
# for large datasets with values of large magniture
rng = np.random.RandomState(0)
n_features = 2
n_samples = 100
offsets = rng.uniform(-1e15, 1e15, size=n_features)
scales = rng.uniform(1e3, 1e6, size=n_features)
X = rng.randn(n_samples, n_features) * scales + offsets
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for chunk in X:
scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))
# Regardless of abs values, they must not be more diff 6 significant digits
tol = 10 ** (-6)
assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
# NOTE Be aware that for much larger offsets std is very unstable (last
# assert) while mean is OK.
# Sparse input
size = (100, 3)
scale = 1e20
X = rng.randint(0, 2, size).astype(np.float64) * scale
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
for X in [X_csr, X_csc]:
# with_mean=False is required with sparse input
scaler = StandardScaler(with_mean=False).fit(X)
scaler_incr = StandardScaler(with_mean=False)
for chunk in X:
# chunk = sparse.csr_matrix(data_chunks)
scaler_incr = scaler_incr.partial_fit(chunk)
# Regardless of magnitude, they must not differ more than of 6 digits
tol = 10 ** (-6)
assert scaler.mean_ is not None
assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)
def test_partial_fit_sparse_input():
# Check that sparsity is not destroyed
X = np.array([[1.], [0.], [0.], [5.]])
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
for X in [X_csr, X_csc]:
X_null = null_transform.partial_fit(X).transform(X)
assert_array_equal(X_null.data, X.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_null.data)
assert_array_equal(X_orig.data, X.data)
def test_standard_scaler_trasform_with_partial_fit():
# Check some postconditions after applying partial_fit and transform
X = X_2d[:100, :]
scaler_incr = StandardScaler()
for i, batch in enumerate(gen_batches(X.shape[0], 1)):
X_sofar = X[:(i + 1), :]
chunks_copy = X_sofar.copy()
scaled_batch = StandardScaler().fit_transform(X_sofar)
scaler_incr = scaler_incr.partial_fit(X[batch])
scaled_incr = scaler_incr.transform(X_sofar)
assert_array_almost_equal(scaled_batch, scaled_incr)
assert_array_almost_equal(X_sofar, chunks_copy) # No change
right_input = scaler_incr.inverse_transform(scaled_incr)
assert_array_almost_equal(X_sofar, right_input)
zero = np.zeros(X.shape[1])
epsilon = np.finfo(float).eps
assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal
assert_array_less(zero, scaler_incr.scale_ + epsilon)
# (i+1) because the Scaler has been already fitted
assert_equal((i + 1), scaler_incr.n_samples_seen_)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MinMaxScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(X_scaled.min(axis=0),
np.zeros(n_features))
assert_array_almost_equal(X_scaled.max(axis=0),
np.zeros(n_features))
else:
assert_array_almost_equal(X_scaled.min(axis=0), .0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones((5, 1))
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# Function interface
X_1d = X_1row.ravel()
min_ = X_1d.min()
max_ = X_1d.max()
assert_array_almost_equal((X_1d - min_) / (max_ - min_),
minmax_scale(X_1d, copy=True))
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
assert_raises(ValueError, StandardScaler().fit, X_csc)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert not np.any(np.isnan(X_csr_scaled.data))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert not np.any(np.isnan(X_csc_scaled.data))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert X_scaled is not X
assert X_csr_scaled is not X_csr
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert X_csr_scaled_back is not X_csr
assert X_csr_scaled_back is not X_csr_scaled
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert X_csc_scaled_back is not X_csc
assert X_csc_scaled_back is not X_csc_scaled
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
@pytest.mark.parametrize("with_mean", [True, False])
@pytest.mark.parametrize("with_std", [True, False])
@pytest.mark.parametrize("array_constructor",
[np.asarray, sparse.csc_matrix, sparse.csr_matrix])
def test_scaler_n_samples_seen_with_nan(with_mean, with_std,
array_constructor):
X = np.array([[0, 1, 3],
[np.nan, 6, 10],
[5, 4, np.nan],
[8, 0, np.nan]],
dtype=np.float64)
X = array_constructor(X)
if sparse.issparse(X) and with_mean:
pytest.skip("'with_mean=True' cannot be used with sparse matrix.")
transformer = StandardScaler(with_mean=with_mean, with_std=with_std)
transformer.fit(X)
assert_array_equal(transformer.n_samples_seen_, np.array([3, 4, 2]))
def _check_identity_scalers_attributes(scaler_1, scaler_2):
assert scaler_1.mean_ is scaler_2.mean_ is None
assert scaler_1.var_ is scaler_2.var_ is None
assert scaler_1.scale_ is scaler_2.scale_ is None
assert scaler_1.n_samples_seen_ == scaler_2.n_samples_seen_
def test_scaler_return_identity():
# test that the scaler return identity when with_mean and with_std are
# False
X_dense = np.array([[0, 1, 3],
[5, 6, 0],
[8, 0, 10]],
dtype=np.float64)
X_csr = sparse.csr_matrix(X_dense)
X_csc = X_csr.tocsc()
transformer_dense = StandardScaler(with_mean=False, with_std=False)
X_trans_dense = transformer_dense.fit_transform(X_dense)
transformer_csr = clone(transformer_dense)
X_trans_csr = transformer_csr.fit_transform(X_csr)
transformer_csc = clone(transformer_dense)
X_trans_csc = transformer_csc.fit_transform(X_csc)
assert_allclose_dense_sparse(X_trans_csr, X_csr)
assert_allclose_dense_sparse(X_trans_csc, X_csc)
assert_allclose(X_trans_dense, X_dense)
for trans_1, trans_2 in itertools.combinations([transformer_dense,
transformer_csr,
transformer_csc],
2):
_check_identity_scalers_attributes(trans_1, trans_2)
transformer_dense.partial_fit(X_dense)
transformer_csr.partial_fit(X_csr)
transformer_csc.partial_fit(X_csc)
for trans_1, trans_2 in itertools.combinations([transformer_dense,
transformer_csr,
transformer_csc],
2):
_check_identity_scalers_attributes(trans_1, trans_2)
transformer_dense.fit(X_dense)
transformer_csr.fit(X_csr)
transformer_csc.fit(X_csc)
for trans_1, trans_2 in itertools.combinations([transformer_dense,
transformer_csr,
transformer_csc],
2):
_check_identity_scalers_attributes(trans_1, trans_2)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert not np.any(np.isnan(X_csr_scaled.data))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert not np.any(np.isnan(X_csc_scaled.data))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert X_scaled is not X
assert X_csr_scaled is not X_csr
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert X_csr_scaled_back is not X_csr
assert X_csr_scaled_back is not X_csr_scaled
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert X_csc_scaled_back is not X_csc
assert X_csc_scaled_back is not X_csc_scaled
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
X_csc_copy = X_csc.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csc)
assert_array_equal(X_csc.toarray(), X_csc_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
assert_raises(ValueError, scale, X_csc, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csc)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
assert_raises(ValueError, scaler.transform, X_csc)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
X_transformed_csc = sparse.csc_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csc)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [[np.inf, 5, 6, 7, 8]]
assert_raises_regex(ValueError,
"Input contains infinity or a value too large",
scale, X)
def test_robust_scaler_error_sparse():
X_sparse = sparse.rand(1000, 10)
scaler = RobustScaler(with_centering=True)
err_msg = "Cannot center sparse matrices"
with pytest.raises(ValueError, match=err_msg):
scaler.fit(X_sparse)
@pytest.mark.parametrize("with_centering", [True, False])
@pytest.mark.parametrize("with_scaling", [True, False])
@pytest.mark.parametrize("X", [np.random.randn(10, 3),
sparse.rand(10, 3, density=0.5)])
def test_robust_scaler_attributes(X, with_centering, with_scaling):
# check consistent type of attributes
if with_centering and sparse.issparse(X):
pytest.skip("RobustScaler cannot center sparse matrix")
scaler = RobustScaler(with_centering=with_centering,
with_scaling=with_scaling)
scaler.fit(X)
if with_centering:
assert isinstance(scaler.center_, np.ndarray)
else:
assert scaler.center_ is None
if with_scaling:
assert isinstance(scaler.scale_, np.ndarray)
else:
assert scaler.scale_ is None
def test_robust_scaler_col_zero_sparse():
# check that the scaler is working when there is not data materialized in a
# column of a sparse matrix
X = np.random.randn(10, 5)
X[:, 0] = 0
X = sparse.csr_matrix(X)
scaler = RobustScaler(with_centering=False)
scaler.fit(X)
assert scaler.scale_[0] == pytest.approx(1)
X_trans = scaler.transform(X)
assert_allclose(X[:, 0].toarray(), X_trans[:, 0].toarray())
def test_robust_scaler_2d_arrays():
# Test robust scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
@pytest.mark.parametrize("density", [0, 0.05, 0.1, 0.5, 1])
@pytest.mark.parametrize("strictly_signed",
['positive', 'negative', 'zeros', None])
def test_robust_scaler_equivalence_dense_sparse(density, strictly_signed):
# Check the equivalence of the fitting with dense and sparse matrices
X_sparse = sparse.rand(1000, 5, density=density).tocsc()
if strictly_signed == 'positive':
X_sparse.data = np.abs(X_sparse.data)
elif strictly_signed == 'negative':
X_sparse.data = - np.abs(X_sparse.data)
elif strictly_signed == 'zeros':
X_sparse.data = np.zeros(X_sparse.data.shape, dtype=np.float64)
X_dense = X_sparse.toarray()
scaler_sparse = RobustScaler(with_centering=False)
scaler_dense = RobustScaler(with_centering=False)
scaler_sparse.fit(X_sparse)
scaler_dense.fit(X_dense)
assert_allclose(scaler_sparse.scale_, scaler_dense.scale_)
def test_robust_scaler_transform_one_row_csr():
# Check RobustScaler on transforming csr matrix with one row
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
single_row = np.array([[0.1, 1., 2., 0., -1.]])
scaler = RobustScaler(with_centering=False)
scaler = scaler.fit(X)
row_trans = scaler.transform(sparse.csr_matrix(single_row))
row_expected = single_row / scaler.scale_
assert_array_almost_equal(row_trans.toarray(), row_expected)
row_scaled_back = scaler.inverse_transform(row_trans)
assert_array_almost_equal(single_row, row_scaled_back.toarray())
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_iris_quantiles():
X = iris.data
scaler = RobustScaler(quantile_range=(10, 90))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(10, 90), axis=0)
q_range = q[1] - q[0]
assert_array_almost_equal(q_range, 1)
def test_quantile_transform_iris():
X = iris.data
# uniform output distribution
transformer = QuantileTransformer(n_quantiles=30)
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# normal output distribution
transformer = QuantileTransformer(n_quantiles=30,
output_distribution='normal')
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure it is possible to take the inverse of a sparse matrix
# which contain negative value; this is the case in the iris dataset
X_sparse = sparse.csc_matrix(X)
X_sparse_tran = transformer.fit_transform(X_sparse)
X_sparse_tran_inv = transformer.inverse_transform(X_sparse_tran)
assert_array_almost_equal(X_sparse.A, X_sparse_tran_inv.A)
def test_quantile_transform_check_error():
X = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
X = sparse.csc_matrix(X)
X_neg = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[-2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
X_neg = sparse.csc_matrix(X_neg)
assert_raises_regex(ValueError, "Invalid value for 'n_quantiles': 0.",
QuantileTransformer(n_quantiles=0).fit, X)
assert_raises_regex(ValueError, "Invalid value for 'subsample': 0.",
QuantileTransformer(subsample=0).fit, X)
assert_raises_regex(ValueError, "The number of quantiles cannot be"
" greater than the number of samples used. Got"
" 1000 quantiles and 10 samples.",
QuantileTransformer(subsample=10).fit, X)
transformer = QuantileTransformer(n_quantiles=10)
assert_raises_regex(ValueError, "QuantileTransformer only accepts "
"non-negative sparse matrices.",
transformer.fit, X_neg)
transformer.fit(X)
assert_raises_regex(ValueError, "QuantileTransformer only accepts "
"non-negative sparse matrices.",
transformer.transform, X_neg)
X_bad_feat = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
assert_raises_regex(ValueError, "X does not have the same number of "
"features as the previously fitted data. Got 2"
" instead of 3.",
transformer.transform, X_bad_feat)
assert_raises_regex(ValueError, "X does not have the same number of "
"features as the previously fitted data. Got 2"
" instead of 3.",
transformer.inverse_transform, X_bad_feat)
transformer = QuantileTransformer(n_quantiles=10,
output_distribution='rnd')
# check that an error is raised at fit time
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.fit, X)
# check that an error is raised at transform time
transformer.output_distribution = 'uniform'
transformer.fit(X)
X_tran = transformer.transform(X)
transformer.output_distribution = 'rnd'
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.transform, X)
# check that an error is raised at inverse_transform time
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.inverse_transform, X_tran)
# check that an error is raised if input is scalar
assert_raise_message(ValueError,
'Expected 2D array, got scalar array instead',
transformer.transform, 10)
def test_quantile_transform_sparse_ignore_zeros():
X = np.array([[0, 1],
[0, 0],
[0, 2],
[0, 2],
[0, 1]])
X_sparse = sparse.csc_matrix(X)
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5)
# dense case -> warning raise
assert_warns_message(UserWarning, "'ignore_implicit_zeros' takes effect"
" only with sparse matrix. This parameter has no"
" effect.", transformer.fit, X)
X_expected = np.array([[0, 0],
[0, 0],
[0, 1],
[0, 1],
[0, 0]])
X_trans = transformer.fit_transform(X_sparse)
assert_almost_equal(X_expected, X_trans.A)
# consider the case where sparse entries are missing values and user-given
# zeros are to be considered
X_data = np.array([0, 0, 1, 0, 2, 2, 1, 0, 1, 2, 0])
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1])
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8])
X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))
X_trans = transformer.fit_transform(X_sparse)
X_expected = np.array([[0., 0.5],
[0., 0.],
[0., 1.],
[0., 1.],
[0., 0.5],
[0., 0.],
[0., 0.5],
[0., 1.],
[0., 0.]])
assert_almost_equal(X_expected, X_trans.A)
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5)
X_data = np.array([-1, -1, 1, 0, 0, 0, 1, -1, 1])
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1])
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6])
X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))
X_trans = transformer.fit_transform(X_sparse)
X_expected = np.array([[0, 1],
[0, 0.375],
[0, 0.375],
[0, 0.375],
[0, 1],
[0, 0],
[0, 1]])
assert_almost_equal(X_expected, X_trans.A)
assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)
# check in conjunction with subsampling
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5,
subsample=8,
random_state=0)
X_trans = transformer.fit_transform(X_sparse)
assert_almost_equal(X_expected, X_trans.A)
assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)
def test_quantile_transform_dense_toy():
X = np.array([[0, 2, 2.6],
[25, 4, 4.1],
[50, 6, 2.3],
[75, 8, 9.5],
[100, 10, 0.1]])
transformer = QuantileTransformer(n_quantiles=5)
transformer.fit(X)
# using the a uniform output, each entry of X should be map between 0 and 1
# and equally spaced
X_trans = transformer.fit_transform(X)
X_expected = np.tile(np.linspace(0, 1, num=5), (3, 1)).T
assert_almost_equal(np.sort(X_trans, axis=0), X_expected)
X_test = np.array([
[-1, 1, 0],
[101, 11, 10],
])
X_expected = np.array([
[0, 0, 0],
[1, 1, 1],
])
assert_array_almost_equal(transformer.transform(X_test), X_expected)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
def test_quantile_transform_subsampling():
# Test that subsampling the input yield to a consistent results We check
# that the computed quantiles are almost mapped to a [0, 1] vector where
# values are equally spaced. The infinite norm is checked to be smaller
# than a given threshold. This is repeated 5 times.
# dense support
n_samples = 1000000
n_quantiles = 1000
X = np.sort(np.random.sample((n_samples, 1)), axis=0)
ROUND = 5
inf_norm_arr = []
for random_state in range(ROUND):
transformer = QuantileTransformer(random_state=random_state,
n_quantiles=n_quantiles,
subsample=n_samples // 10)
transformer.fit(X)
diff = (np.linspace(0, 1, n_quantiles) -
np.ravel(transformer.quantiles_))
inf_norm = np.max(np.abs(diff))
assert inf_norm < 1e-2
inf_norm_arr.append(inf_norm)
# each random subsampling yield a unique approximation to the expected
# linspace CDF
assert_equal(len(np.unique(inf_norm_arr)), len(inf_norm_arr))
# sparse support
X = sparse.rand(n_samples, 1, density=.99, format='csc', random_state=0)
inf_norm_arr = []
for random_state in range(ROUND):
transformer = QuantileTransformer(random_state=random_state,
n_quantiles=n_quantiles,
subsample=n_samples // 10)
transformer.fit(X)
diff = (np.linspace(0, 1, n_quantiles) -
np.ravel(transformer.quantiles_))
inf_norm = np.max(np.abs(diff))
assert inf_norm < 1e-1
inf_norm_arr.append(inf_norm)
# each random subsampling yield a unique approximation to the expected
# linspace CDF
assert_equal(len(np.unique(inf_norm_arr)), len(inf_norm_arr))
def test_quantile_transform_sparse_toy():
X = np.array([[0., 2., 0.],
[25., 4., 0.],
[50., 0., 2.6],
[0., 0., 4.1],
[0., 6., 0.],
[0., 8., 0.],
[75., 0., 2.3],
[0., 10., 0.],
[0., 0., 9.5],
[100., 0., 0.1]])
X = sparse.csc_matrix(X)
transformer = QuantileTransformer(n_quantiles=10)
transformer.fit(X)
X_trans = transformer.fit_transform(X)
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
transformer_dense = QuantileTransformer(n_quantiles=10).fit(
X.toarray())
X_trans = transformer_dense.transform(X)
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)
X_trans_inv = transformer_dense.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
def test_quantile_transform_axis1():
X = np.array([[0, 25, 50, 75, 100],
[2, 4, 6, 8, 10],
[2.6, 4.1, 2.3, 9.5, 0.1]])
X_trans_a0 = quantile_transform(X.T, axis=0, n_quantiles=5)
X_trans_a1 = quantile_transform(X, axis=1, n_quantiles=5)
assert_array_almost_equal(X_trans_a0, X_trans_a1.T)
def test_quantile_transform_bounds():
# Lower and upper bounds are manually mapped. We checked that in the case
# of a constant feature and binary feature, the bounds are properly mapped.
X_dense = np.array([[0, 0],
[0, 0],
[1, 0]])
X_sparse = sparse.csc_matrix(X_dense)
# check sparse and dense are consistent
X_trans = QuantileTransformer(n_quantiles=3,
random_state=0).fit_transform(X_dense)
assert_array_almost_equal(X_trans, X_dense)
X_trans_sp = QuantileTransformer(n_quantiles=3,
random_state=0).fit_transform(X_sparse)
assert_array_almost_equal(X_trans_sp.A, X_dense)
assert_array_almost_equal(X_trans, X_trans_sp.A)
# check the consistency of the bounds by learning on 1 matrix
# and transforming another
X = np.array([[0, 1],
[0, 0.5],
[1, 0]])
X1 = np.array([[0, 0.1],
[0, 0.5],
[1, 0.1]])
transformer = QuantileTransformer(n_quantiles=3).fit(X)
X_trans = transformer.transform(X1)
assert_array_almost_equal(X_trans, X1)
# check that values outside of the range learned will be mapped properly.
X = np.random.random((1000, 1))
transformer = QuantileTransformer()
transformer.fit(X)
assert_equal(transformer.transform([[-10]]),
transformer.transform([[np.min(X)]]))
assert_equal(transformer.transform([[10]]),
transformer.transform([[np.max(X)]]))
assert_equal(transformer.inverse_transform([[-10]]),
transformer.inverse_transform(
[[np.min(transformer.references_)]]))
assert_equal(transformer.inverse_transform([[10]]),
transformer.inverse_transform(
[[np.max(transformer.references_)]]))
def test_quantile_transform_and_inverse():
# iris dataset
X = iris.data
transformer = QuantileTransformer(n_quantiles=1000, random_state=0)
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
def test_quantile_transform_nan():
X = np.array([[np.nan, 0, 0, 1],
[np.nan, np.nan, 0, 0.5],
[np.nan, 1, 1, 0]])
transformer = QuantileTransformer(n_quantiles=10, random_state=42)
transformer.fit_transform(X)
# check that the quantile of the first column is all NaN
assert np.isnan(transformer.quantiles_[:, 0]).all()
# all other column should not contain NaN
assert not np.isnan(transformer.quantiles_[:, 1:]).any()
def test_robust_scaler_invalid_range():
for range_ in [
(-1, 90),
(-2, -3),
(10, 101),
(100.5, 101),
(90, 50),
]:
scaler = RobustScaler(quantile_range=range_)
assert_raises_regex(ValueError, r'Invalid quantile range: \(',
scaler.fit, iris.data)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert not np.any(np.isnan(X_scaled))
X_csr_scaled = scale(X_csr, with_mean=False)
assert not np.any(np.isnan(X_csr_scaled.data))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is not X
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# null scale
X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True)
assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray())
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_1d_array():
X = iris.data[:, 1]
X_trans = robust_scale(X)
assert_array_almost_equal(np.median(X_trans), 0)
q = np.percentile(X_trans, q=(25, 75))
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
# Check RobustScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
# Check MaxAbsScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# function interface
X_trans = maxabs_scale(X)
assert_array_almost_equal(X_trans, X_expected)
# sparse data
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_trans_csr = scaler.fit_transform(X_csr)
X_trans_csc = scaler.fit_transform(X_csc)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans_csr.A, X_expected)
assert_array_almost_equal(X_trans_csc.A, X_expected)
X_trans_csr_inv = scaler.inverse_transform(X_trans_csr)
X_trans_csc_inv = scaler.inverse_transform(X_trans_csc)
assert_array_almost_equal(X, X_trans_csr_inv.A)
assert_array_almost_equal(X, X_trans_csc_inv.A)
def test_maxabs_scaler_large_negative_value():
# Check MaxAbsScaler on toy data with a large negative value
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_maxabs_scaler_transform_one_row_csr():
# Check MaxAbsScaler on transforming csr matrix with one row
X = sparse.csr_matrix([[0.5, 1., 1.]])
scaler = MaxAbsScaler()
scaler = scaler.fit(X)
X_trans = scaler.transform(X)
X_expected = sparse.csr_matrix([[1., 1., 1.]])
assert_array_almost_equal(X_trans.toarray(), X_expected.toarray())
X_scaled_back = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_scaled_back.toarray())
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_maxabs_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MaxAbsScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)),
np.ones(n_features))
else:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones((5, 1))
scaler = MaxAbsScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# function interface
X_1d = X_1row.ravel()
max_abs = np.abs(X_1d).max()
assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True))
def test_maxabs_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d[:100, :]
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler()
scaler_incr_csr = MaxAbsScaler()
scaler_incr_csc = MaxAbsScaler()
for batch in gen_batches(n, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
X_csr = sparse.csr_matrix(X[batch])
scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr)
X_csc = sparse.csc_matrix(X[batch])
scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc)
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csc.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csc.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MaxAbsScaler().fit(X[batch0])
scaler_incr = MaxAbsScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std until the end of partial fits, and
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert X_norm is not X
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert X_norm is X
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert X_norm is not X
assert isinstance(X_norm, sparse.csr_matrix)
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert X_norm1 is not X
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert X_norm2 is X
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert X_norm is not X
assert isinstance(X_norm, sparse.csr_matrix)
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert X_norm1 is not X
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert X_norm2 is X
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert X_norm is not X
assert isinstance(X_norm, sparse.csr_matrix)
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
rs = np.random.RandomState(0)
X_dense = rs.randn(10, 5)
X_sparse = sparse.csr_matrix(X_dense)
ones = np.ones((10))
for X in (X_dense, X_sparse):
for dtype in (np.float32, np.float64):
for norm in ('l1', 'l2'):
X = X.astype(dtype)
X_norm = normalize(X, norm=norm)
assert_equal(X_norm.dtype, dtype)
X_norm = toarray(X_norm)
if norm == 'l1':
row_sums = np.abs(X_norm).sum(axis=1)
else:
X_norm_squared = X_norm**2
row_sums = X_norm_squared.sum(axis=1)
assert_array_almost_equal(row_sums, ones)
# Test return_norm
X_dense = np.array([[3.0, 0, 4.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]])
for norm in ('l1', 'l2', 'max'):
_, norms = normalize(X_dense, norm=norm, return_norm=True)
if norm == 'l1':
assert_array_almost_equal(norms, np.array([7.0, 1.0, 5.0]))
elif norm == 'l2':
assert_array_almost_equal(norms, np.array([5.0, 1.0, 3.60555127]))
else:
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
X_sparse = sparse.csr_matrix(X_dense)
for norm in ('l1', 'l2'):
assert_raises(NotImplementedError, normalize, X_sparse,
norm=norm, return_norm=True)
_, norms = normalize(X_sparse, norm='max', return_norm=True)
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert X_bin is not X
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert X_bin is not X
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert X_bin is X
binarizer = Binarizer(copy=False)
X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64)
X_bin = binarizer.transform(X_float)
if init is not list:
assert X_bin is X_float
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_cv_pipeline_precomputed():
# Cross-validate a regression on four coplanar points with the same
# value. Use precomputed kernel to ensure Pipeline with KernelCenterer
# is treated as a _pairwise operation.
X = np.array([[3, 0, 0], [0, 3, 0], [0, 0, 3], [1, 1, 1]])
y_true = np.ones((4,))
K = X.dot(X.T)
kcent = KernelCenterer()
pipeline = Pipeline([("kernel_centerer", kcent), ("svr",
SVR(gamma='scale'))])
# did the pipeline set the _pairwise attribute?
assert pipeline._pairwise
# test cross-validation, score should be almost perfect
# NB: this test is pretty vacuous -- it's mainly to test integration
# of Pipeline and KernelCenterer
y_pred = cross_val_predict(pipeline, K, y_true, cv=2)
assert_array_almost_equal(y_true, y_pred)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert sparse.isspmatrix_coo(X), X
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert sparse.isspmatrix_csc(X), X
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert sparse.isspmatrix_csr(X), X
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_fit_cold_start():
X = iris.data
X_2d = X[:, :2]
# Scalers that have a partial_fit method
scalers = [StandardScaler(with_mean=False, with_std=False),
MinMaxScaler(),
MaxAbsScaler()]
for scaler in scalers:
scaler.fit_transform(X)
# with a different shape, this may break the scaler unless the internal
# state is reset
scaler.fit_transform(X_2d)
def test_quantile_transform_valid_axis():
X = np.array([[0, 25, 50, 75, 100],
[2, 4, 6, 8, 10],
[2.6, 4.1, 2.3, 9.5, 0.1]])
assert_raises_regex(ValueError, "axis should be either equal to 0 or 1"
". Got axis=2", quantile_transform, X.T, axis=2)
@pytest.mark.parametrize("method", ['box-cox', 'yeo-johnson'])
def test_power_transformer_notfitted(method):
pt = PowerTransformer(method=method)
X = np.abs(X_1col)
assert_raises(NotFittedError, pt.transform, X)
assert_raises(NotFittedError, pt.inverse_transform, X)
@pytest.mark.parametrize('method', ['box-cox', 'yeo-johnson'])
@pytest.mark.parametrize('standardize', [True, False])
@pytest.mark.parametrize('X', [X_1col, X_2d])
def test_power_transformer_inverse(method, standardize, X):
# Make sure we get the original input when applying transform and then
# inverse transform
X = np.abs(X) if method == 'box-cox' else X
pt = PowerTransformer(method=method, standardize=standardize)
X_trans = pt.fit_transform(X)
assert_almost_equal(X, pt.inverse_transform(X_trans))
def test_power_transformer_1d():
X = np.abs(X_1col)
for standardize in [True, False]:
pt = PowerTransformer(method='box-cox', standardize=standardize)
X_trans = pt.fit_transform(X)
X_trans_func = power_transform(
X, method='box-cox',
standardize=standardize
)
X_expected, lambda_expected = stats.boxcox(X.flatten())
if standardize:
X_expected = scale(X_expected)
assert_almost_equal(X_expected.reshape(-1, 1), X_trans)
assert_almost_equal(X_expected.reshape(-1, 1), X_trans_func)
assert_almost_equal(X, pt.inverse_transform(X_trans))
assert_almost_equal(lambda_expected, pt.lambdas_[0])
assert len(pt.lambdas_) == X.shape[1]
assert isinstance(pt.lambdas_, np.ndarray)
def test_power_transformer_2d():
X = np.abs(X_2d)
for standardize in [True, False]:
pt = PowerTransformer(method='box-cox', standardize=standardize)
X_trans_class = pt.fit_transform(X)
X_trans_func = power_transform(
X, method='box-cox',
standardize=standardize
)
for X_trans in [X_trans_class, X_trans_func]:
for j in range(X_trans.shape[1]):
X_expected, lmbda = stats.boxcox(X[:, j].flatten())
if standardize:
X_expected = scale(X_expected)
assert_almost_equal(X_trans[:, j], X_expected)
assert_almost_equal(lmbda, pt.lambdas_[j])
# Test inverse transformation
X_inv = pt.inverse_transform(X_trans)
assert_array_almost_equal(X_inv, X)
assert len(pt.lambdas_) == X.shape[1]
assert isinstance(pt.lambdas_, np.ndarray)
def test_power_transformer_boxcox_strictly_positive_exception():
# Exceptions should be raised for negative arrays and zero arrays when
# method is boxcox
pt = PowerTransformer(method='box-cox')
pt.fit(np.abs(X_2d))
X_with_negatives = X_2d
not_positive_message = 'strictly positive'
assert_raise_message(ValueError, not_positive_message,
pt.transform, X_with_negatives)
assert_raise_message(ValueError, not_positive_message,
pt.fit, X_with_negatives)
assert_raise_message(ValueError, not_positive_message,
power_transform, X_with_negatives, 'box-cox')
assert_raise_message(ValueError, not_positive_message,
pt.transform, np.zeros(X_2d.shape))
assert_raise_message(ValueError, not_positive_message,
pt.fit, np.zeros(X_2d.shape))
assert_raise_message(ValueError, not_positive_message,
power_transform, np.zeros(X_2d.shape), 'box-cox')
@pytest.mark.parametrize('X', [X_2d, np.abs(X_2d), -np.abs(X_2d),
np.zeros(X_2d.shape)])
def test_power_transformer_yeojohnson_any_input(X):
# Yeo-Johnson method should support any kind of input
power_transform(X, method='yeo-johnson')
@pytest.mark.parametrize("method", ['box-cox', 'yeo-johnson'])
def test_power_transformer_shape_exception(method):
pt = PowerTransformer(method=method)
X = np.abs(X_2d)
pt.fit(X)
# Exceptions should be raised for arrays with different num_columns
# than during fitting
wrong_shape_message = 'Input data has a different number of features'
assert_raise_message(ValueError, wrong_shape_message,
pt.transform, X[:, 0:1])
assert_raise_message(ValueError, wrong_shape_message,
pt.inverse_transform, X[:, 0:1])
def test_power_transformer_method_exception():
pt = PowerTransformer(method='monty-python')
X = np.abs(X_2d)
# An exception should be raised if PowerTransformer.method isn't valid
bad_method_message = "'method' must be one of"
assert_raise_message(ValueError, bad_method_message,
pt.fit, X)
def test_power_transformer_lambda_zero():
pt = PowerTransformer(method='box-cox', standardize=False)
X = np.abs(X_2d)[:, 0:1]
# Test the lambda = 0 case
pt.lambdas_ = np.array([0])
X_trans = pt.transform(X)
assert_array_almost_equal(pt.inverse_transform(X_trans), X)
def test_power_transformer_lambda_one():
# Make sure lambda = 1 corresponds to the identity for yeo-johnson
pt = PowerTransformer(method='yeo-johnson', standardize=False)
X = np.abs(X_2d)[:, 0:1]
pt.lambdas_ = np.array([1])
X_trans = pt.transform(X)
assert_array_almost_equal(X_trans, X)
@pytest.mark.parametrize("method, lmbda", [('box-cox', .1),
('box-cox', .5),
('yeo-johnson', .1),
('yeo-johnson', .5),
('yeo-johnson', 1.),
])
def test_optimization_power_transformer(method, lmbda):
# Test the optimization procedure:
# - set a predefined value for lambda
# - apply inverse_transform to a normal dist (we get X_inv)
# - apply fit_transform to X_inv (we get X_inv_trans)
# - check that X_inv_trans is roughly equal to X
rng = np.random.RandomState(0)
n_samples = 20000
X = rng.normal(loc=0, scale=1, size=(n_samples, 1))
pt = PowerTransformer(method=method, standardize=False)
pt.lambdas_ = [lmbda]
X_inv = pt.inverse_transform(X)
pt = PowerTransformer(method=method, standardize=False)
X_inv_trans = pt.fit_transform(X_inv)
assert_almost_equal(0, np.linalg.norm(X - X_inv_trans) / n_samples,
decimal=2)
assert_almost_equal(0, X_inv_trans.mean(), decimal=1)
assert_almost_equal(1, X_inv_trans.std(), decimal=1)
def test_yeo_johnson_darwin_example():
# test from original paper "A new family of power transformations to
# improve normality or symmetry" by Yeo and Johnson.
X = [6.1, -8.4, 1.0, 2.0, 0.7, 2.9, 3.5, 5.1, 1.8, 3.6, 7.0, 3.0, 9.3,
7.5, -6.0]
X = np.array(X).reshape(-1, 1)
lmbda = PowerTransformer(method='yeo-johnson').fit(X).lambdas_
assert np.allclose(lmbda, 1.305, atol=1e-3)
@pytest.mark.parametrize('method', ['box-cox', 'yeo-johnson'])
def test_power_transformer_nans(method):
# Make sure lambda estimation is not influenced by NaN values
# and that transform() supports NaN silently
X = np.abs(X_1col)
pt = PowerTransformer(method=method)
pt.fit(X)
lmbda_no_nans = pt.lambdas_[0]
# concat nans at the end and check lambda stays the same
X = np.concatenate([X, np.full_like(X, np.nan)])
X = shuffle(X, random_state=0)
pt.fit(X)
lmbda_nans = pt.lambdas_[0]
assert_almost_equal(lmbda_no_nans, lmbda_nans, decimal=5)
X_trans = pt.transform(X)
assert_array_equal(np.isnan(X_trans), np.isnan(X))
@pytest.mark.parametrize('method', ['box-cox', 'yeo-johnson'])
@pytest.mark.parametrize('standardize', [True, False])
def test_power_transformer_fit_transform(method, standardize):
# check that fit_transform() and fit().transform() return the same values
X = X_1col
if method == 'box-cox':
X = np.abs(X)
pt = PowerTransformer(method, standardize)
assert_array_almost_equal(pt.fit(X).transform(X), pt.fit_transform(X))
@pytest.mark.parametrize('method', ['box-cox', 'yeo-johnson'])
@pytest.mark.parametrize('standardize', [True, False])
def test_power_transformer_copy_True(method, standardize):
# Check that neither fit, transform, fit_transform nor inverse_transform
# modify X inplace when copy=True
X = X_1col
if method == 'box-cox':
X = np.abs(X)
X_original = X.copy()
assert X is not X_original # sanity checks
assert_array_almost_equal(X, X_original)
pt = PowerTransformer(method, standardize, copy=True)
pt.fit(X)
assert_array_almost_equal(X, X_original)
X_trans = pt.transform(X)
assert X_trans is not X
X_trans = pt.fit_transform(X)
assert_array_almost_equal(X, X_original)
assert X_trans is not X
X_inv_trans = pt.inverse_transform(X_trans)
assert X_trans is not X_inv_trans
@pytest.mark.parametrize('method', ['box-cox', 'yeo-johnson'])
@pytest.mark.parametrize('standardize', [True, False])
def test_power_transformer_copy_False(method, standardize):
# check that when copy=False fit doesn't change X inplace but transform,
# fit_transform and inverse_transform do.
X = X_1col
if method == 'box-cox':
X = np.abs(X)
X_original = X.copy()
assert X is not X_original # sanity checks
assert_array_almost_equal(X, X_original)
pt = PowerTransformer(method, standardize, copy=False)
pt.fit(X)
assert_array_almost_equal(X, X_original) # fit didn't change X
X_trans = pt.transform(X)
assert X_trans is X
if method == 'box-cox':
X = np.abs(X)
X_trans = pt.fit_transform(X)
assert X_trans is X
X_inv_trans = pt.inverse_transform(X_trans)
assert X_trans is X_inv_trans
def test_power_transform_default_method():
X = np.abs(X_2d)
future_warning_message = (
"The default value of 'method' "
"will change from 'box-cox'"
)
assert_warns_message(FutureWarning, future_warning_message,
power_transform, X)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
X_trans_default = power_transform(X)
X_trans_boxcox = power_transform(X, method='box-cox')
assert_array_equal(X_trans_boxcox, X_trans_default)
|
the-stack_0_16454 | from django.urls import path
from .views import account_session, get_csrf, register, account_login, account_logout
urlpatterns = [
path('get_session/', account_session),
path('get_csrf/', get_csrf),
path('register/', register, name='account_register'),
path('login/', account_login, name="account_login"),
path('logout/', account_logout),
]
|
the-stack_0_16455 | import math
from typing import Tuple
import torch
import torch.nn as nn
from cached_property import cached_property
from torch.nn.modules.transformer import (
TransformerDecoder,
TransformerDecoderLayer,
TransformerEncoder,
TransformerEncoderLayer,
)
from kobe.data.dataset import Batched, EncodedBatch
from kobe.data.vocab import BOS_ID, EOS_ID, PAD_ID
from kobe.utils import helpers
class PositionalEncoding(nn.Module):
def __init__(self, dropout, dim, max_len=5000):
"""
initialization of required variables and functions
:param dropout: dropout probability
:param dim: hidden size
:param max_len: maximum length
"""
super(PositionalEncoding, self).__init__()
# positional encoding initialization
pe = torch.zeros(max_len, dim)
position = torch.arange(0, max_len).unsqueeze(1)
# term to divide
div_term = torch.exp(
(torch.arange(0, dim, 2, dtype=torch.float) * -(math.log(10000.0) / dim))
)
# sinusoidal positional encoding
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
pe = pe.unsqueeze(1)
self.register_buffer("pe", pe)
self.dropout = nn.Dropout(p=dropout)
self.dim = dim
def forward(self, emb):
"""
create positional encoding
:param emb: word embedding
:param step: step for decoding in inference
:return: positional encoding representation
"""
emb *= math.sqrt(self.dim)
emb = emb + self.pe[: emb.size(0)] # [len, batch, size]
emb = self.dropout(emb)
return emb
class Encoder(nn.Module):
@staticmethod
def from_args(args) -> "Encoder":
return Encoder(
args.text_vocab_size + args.cond_vocab_size,
args.max_seq_len,
args.d_model,
args.nhead,
args.num_encoder_layers,
args.dropout,
args.mode,
)
def __init__(
self,
vocab_size: int,
max_seq_len: int,
d_model: int,
nhead: int,
num_layers: int,
dropout: float,
mode: str,
):
super().__init__()
self.d_model = d_model
self.max_seq_len = max_seq_len
self.input_embedding = nn.Embedding(vocab_size, d_model)
self.pos_encoder = PositionalEncoding(dropout, d_model)
encoder_layer = TransformerEncoderLayer(
d_model, nhead, d_model * 4, dropout, norm_first=True
)
self.encoder = TransformerEncoder(
encoder_layer, num_layers, nn.LayerNorm(d_model)
)
self.mode = mode
@cached_property
def device(self):
return list(self.parameters())[0].device
def forward(self, batched: Batched) -> EncodedBatch:
src, src_key_padding_mask = Encoder._get_input(batched, self.mode)
src = self.input_embedding(src)
src = self.pos_encoder(src)
token_encodings = self.encoder.forward(
src=src, src_key_padding_mask=src_key_padding_mask
)
return EncodedBatch(
context_encodings=token_encodings,
context_encodings_mask=src_key_padding_mask,
)
@staticmethod
def _get_input(batched: Batched, mode: str) -> Tuple[torch.Tensor, torch.Tensor]:
return {
helpers.BASELINE: (batched.title_token_ids, batched.title_token_ids_mask),
helpers.KOBE_ATTRIBUTE: (
batched.cond_title_token_ids,
batched.cond_title_token_ids_mask,
),
helpers.KOBE_KNOWLEDGE: (
batched.title_fact_token_ids,
batched.title_fact_token_ids_mask,
),
helpers.KOBE_FULL: (
batched.cond_title_fact_token_ids,
batched.cond_title_fact_token_ids_mask,
),
}[mode]
class Decoder(nn.Module):
@staticmethod
def from_args(args) -> "Decoder":
return Decoder(
args.text_vocab_size,
args.max_seq_len,
args.d_model,
args.nhead,
args.num_encoder_layers,
args.dropout,
)
def __init__(
self,
vocab_size: int,
max_seq_len: int,
d_model: int,
nhead: int,
num_layers: int,
dropout: float,
):
super(Decoder, self).__init__()
self.max_seq_len = max_seq_len
self.embedding = nn.Embedding(vocab_size, d_model)
self.pos_encoder = PositionalEncoding(dropout, d_model)
decoder_layer = TransformerDecoderLayer(
d_model, nhead, 4 * d_model, dropout, norm_first=True
)
self.decoder = TransformerDecoder(
decoder_layer, num_layers, nn.LayerNorm(d_model)
)
self.output = nn.Linear(d_model, vocab_size)
def forward(self, batch: Batched, encoded_batch: EncodedBatch) -> torch.Tensor:
tgt = self.embedding(batch.description_token_ids[:-1])
tgt = self.pos_encoder(tgt)
tgt_mask = Decoder.generate_square_subsequent_mask(tgt.shape[0], tgt.device)
outputs = self.decoder(
tgt=tgt,
tgt_mask=tgt_mask,
tgt_key_padding_mask=batch.description_token_ids_mask[:, :-1],
memory=encoded_batch.context_encodings,
memory_key_padding_mask=encoded_batch.context_encodings_mask,
)
return self.output(outputs)
def predict(self, encoded_batch: EncodedBatch, decoding_strategy: str):
batch_size = encoded_batch.context_encodings.shape[1]
tgt = torch.tensor(
[BOS_ID] * batch_size, device=encoded_batch.context_encodings.device
).unsqueeze(dim=0)
tgt_mask = Decoder.generate_square_subsequent_mask(self.max_seq_len, tgt.device)
pred_all = []
for idx in range(self.max_seq_len):
tgt_emb = self.pos_encoder(self.embedding(tgt))
outputs = self.decoder(
tgt_emb,
tgt_mask=tgt_mask[: idx + 1, : idx + 1],
memory=encoded_batch.context_encodings,
memory_key_padding_mask=encoded_batch.context_encodings_mask,
)
logits = self.output(outputs[-1])
if decoding_strategy == "greedy":
pred_step = logits.argmax(dim=1).tolist()
elif decoding_strategy == "nucleus":
pred_step = [
helpers.top_k_top_p_sampling(logits[i], top_p=0.95)
for i in range(batch_size)
]
else:
raise NotImplementedError
for b in range(batch_size):
if pred_all and pred_all[-1][b].item() in [EOS_ID, PAD_ID]:
pred_step[b] = PAD_ID
if all([pred == PAD_ID for pred in pred_step]):
break
pred_step = torch.tensor(pred_step, device=tgt.device)
pred_all.append(pred_step)
if idx < self.max_seq_len - 1:
tgt_step = pred_step.unsqueeze(dim=0)
tgt = torch.cat([tgt, tgt_step], dim=0)
preds = torch.stack(pred_all)
return preds
@staticmethod
def generate_square_subsequent_mask(sz: int, device: torch.device) -> torch.Tensor:
r"""
Generate a square mask for the sequence. The masked positions are filled with
float('-inf').
Unmasked positions are filled with float(0.0).
"""
return torch.triu(
torch.full((sz, sz), float("-inf"), device=device), diagonal=1
)
|
the-stack_0_16457 | # -*- coding: future_fstrings -*-
class GroupUser:
group_user_access_right_key = 'groupUserAccessRight'
email_address_key = 'emailAddress'
display_name_key = 'displayName'
identifier_key = 'identifier'
principal_type_key = 'principalType'
def __init__(
self,
group_user_access_right,
email_address="",
display_name="",
identifier="",
principal_type=None
):
"""Constructs a GroupUser object
:param group_user_access_right: Enum GroupUserAccessRight - The access right to assign to the GroupUser
:param email_address: str - E-mail address of the user if principal type is user
:param display_name: str - Display name of the principal
:param identifier: str - Identifier of the principal
:param principal_type: Enum PrincipalType - The principal type
"""
self.group_user_access_right = group_user_access_right
self.email_address = email_address
self.display_name = display_name
self.identifier = identifier
self.principal_type = principal_type
def as_set_values_dict(self):
"""Convert GroupUser object to dict with only values that are actually set. This dict can be used for
groups.add_group_user requests.
:return: Dict with object attributes in camelCase as keys, and attribute values as values.
"""
group_user_dict = dict()
if self.group_user_access_right:
group_user_dict[self.group_user_access_right_key] = self.group_user_access_right.value
if self.email_address:
group_user_dict[self.email_address_key] = self.email_address
if self.display_name:
group_user_dict[self.display_name_key] = self.display_name
if self.identifier:
group_user_dict[self.identifier_key] = self.identifier
if self.principal_type:
group_user_dict[self.principal_type_key] = self.principal_type.value
return group_user_dict
|
the-stack_0_16458 | # -*- coding: ascii -*-
"""
app.utils
~~~~~~~~~
Utils. for the application.
"""
import re
import unicodedata
from functools import partial
from Levenshtein import distance
__all__ = [
'parse_db_uri',
'parse_citations',
'parse_doi',
'normalize',
'doi_normalize',
'matching'
]
# Find citations from text
find_citations = [
# APA style
re.compile(
(
r'((?#authors)[\w-]{2,}(?: *,(?: *[A-Z]\.)+|(?: +[\w-]+)+)?'
r'(?: *,(?: *(?:&|\.{3}))? *[\w-]{2,}(?: *,(?: *[A-Z]\.)+|(?: +[\w-]+)+)?)*(?:(?<=\.)|(?<!\.) *\.)'
r'(?#date) *\( *\d{4}(?: *, *\w+(?: +\d+(?: *- *\d+)?)?)? *\) *\.'
r'(?#title)[^\n]+(?:(?<=\.)|(?<!\.)\.)'
r'(?#journal|location)(?<=\.)(?:[^\n]+?(?=, *\d+ *\([\w-]+\)|, *\w+(?:-\w+)? *\.|\.)'
r'(?#journal:volume)(?:, *\d+ *\([\w-]+\))?(?#journal:pages)(?:, *\w+(?:-\w+)?)? *\.)?'
r'(?#doi)(?: *(?:doi: *|http://dx\.doi\.org/)[^\s]+)?)'
),
flags=re.IGNORECASE + re.DOTALL
).findall,
# AMA style
re.compile(
(
r'(?:\n|^)'
r'((?#authors)(?:[\w-]{2,}(?: +[A-Z]+)?(?: *, *[\w-]{2,}(?: +[A-Z]+)?)* *\.)?'
r'(?#title) *\w{2}[^\n;.]+\.(?#title:journal|conference) *\w{2}[^\n;.]+'
r'(?:(?#journal)\.(?#date) *(?:[a-z]{3}(?: +\d{1,2})? *, *)?\d{4}'
r'(?#volume)(?: *;(?: *\d+)?(?: *\( *[\w-]+ *\))?)?'
r'(?#page)(?: *: *\w+(?: *- *\w+)?)?|(?#conference)'
r'(?#date); *(?:[a-z]{3}(?: +\d+(?: *- *(?:\d+|[a-z]{3} +\d+))?)? *, *)?\d{4}'
r'(?#location)(?: *; *\w{2}[^\n;.]+)?) *\.'
r'(?#doi)(?: *(?:doi: *|http://dx\.doi\.org/)[^\s]+)?)'
),
flags=re.IGNORECASE + re.DOTALL
).findall
]
# Parse DOI in citation
parse_doi = re.compile(
r'(?:doi: *|http://dx\.doi\.org/)([^\s]+)',
flags=re.IGNORECASE
).findall
def parse_citations(text):
"""Parse text into list of citations"""
ret = []
for finder in find_citations:
ret.extend(finder(text))
return ret
def parse_db_uri(conf):
"""
Parse input database config into database URI format
:param conf: input database config
:type conf: dict
:return: string of database config in URI format
:rtype: str
"""
# Input config must be a dict
assert isinstance(conf, dict)
# Key 'dbname' is required in config
if 'dbname' not in conf:
raise ValueError('No database specified')
# Read and parse config
dbname = str(conf['dbname'])
host = str(conf.get('host', '127.0.0.1') or '127.0.0.1')
port = str(conf.get('port', ''))
user = str(conf.get('user', ''))
passwd = str(conf.get('passwd', ''))
driver = str(conf.get('driver', 'postgresql')).lower() or 'postgresql'
if user and passwd:
user = '%s:%s@' % (user, passwd)
elif user:
user = '%s@' % user
elif passwd:
raise ValueError('No user with that password')
if port:
if not port.isdigit():
raise ValueError('Database port must be a number')
host = '%s:%s' % (host, port)
# Return parsed config in URI format
return '{}://{}{}/{}'.format(driver, user, host, dbname)
def normalize(text, case=True, spaces=True, unicode=True):
"""
Normalize text
:param text: input text
:type text: str
:param case: normalize to lower case, default is True
:type case: bool
:param spaces: normalize spaces, default is True
:type spaces: bool
:param unicode: convert unicode characters to ascii, default is True
:type unicode: bool
:return: normalized text
:rtype: str
"""
# Normalize unicode
if unicode:
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode()
# Normalize case
if case:
text = text.lower()
# Normalize spaces
if spaces:
text = ' '.join(text.split())
# Return normalized text
return text
# Normalize DOI
doi_normalize = partial(normalize, case=True, spaces=False, unicode=False)
def mark_exact(citation):
"""Highlight exact matches"""
return '<mark class="exact-match">%s</mark>' % citation
def mark_approx(citation):
"""Highlight approximate matches"""
return '<mark class="approx-match">%s</mark>' % citation
def doi_matched(citation, dois):
"""
Parse DOI value from the input citation, check if the DOI value exists in the list of DOIs
:param citation: input citation
:type citation: str
:param dois: input list of DOIs
:type dois: set or list or tuple
:return: True if it exists, else False
:rtype: bool
"""
# Parse DOI in citation
doi = parse_doi(citation)
# DOI found
if doi:
return doi_normalize(doi[0]) in dois
# DOI not found
return False
def ld_matched(citation, citations, max_distance):
"""
Is there a match that is less than max_distance?
Minimum Levenshtein distance between the citation and
a list of available citations or None.
:param citation: input citation
:type citation: str
:param citations: list of available citations being matched against
:type citations: list or tuple
:param max_distance: maximum edit distance
:type max_distance: int
:return: minimum edit distance number if match found, else None
:rtype: int or None
"""
# Create a generator of edit distance numbers
distances = (distance(normalize(citation), normalize(c.value)) for c in citations)
# Filter distance numbers based on input max_distance
candidates = filter(lambda x: x <= max_distance, distances)
# Return min number of filtered distance numbers, or None
return min(candidates, default=None)
def matching(citation, dois, citations, max_distance):
"""
Main function for matching citation. Returns markup based
on result from matching.
:param citation: citation for doing matching
:type citation: str
:param dois: list of DOIs
:type dois: set or list or tuple
:param citations: list of available citations
:type citations: list or tuple
:param max_distance: maximum edit distance
:type max_distance: int
:return: markup text for input citation
:rtype: str
"""
# Match using DOI
if doi_matched(citation, dois):
return mark_exact(citation)
# Match using Levenshtein Edit Distance
else:
min_distance = ld_matched(citation, citations, max_distance)
if min_distance is None:
return citation # no match found
elif min_distance == 0:
return mark_exact(citation) # exact match
else:
return mark_approx(citation) # approx. match
|
the-stack_0_16461 | import operator
import re
import sys
from typing import Optional
from packaging import version
# The package importlib_metadata is in a different place, depending on the python version.
if sys.version_info < (3, 8):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
ops = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _compare_versions(op, got_ver, want_ver, requirement, pkg, hint):
if got_ver is None:
raise ValueError("got_ver is None")
if want_ver is None:
raise ValueError("want_ver is None")
if not ops[op](version.parse(got_ver), version.parse(want_ver)):
raise ImportError(
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}"
)
def require_version(requirement: str, hint: Optional[str] = None) -> None:
"""
Perform a runtime check of the dependency versions, using the exact same syntax used by pip.
The installed module version comes from the *site-packages* dir via *importlib_metadata*.
Args:
requirement (`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy"
hint (`str`, *optional*): what suggestion to print in case of requirements not being met
Example:
```python
require_version("pandas>1.1.2")
require_version("numpy>1.18.5", "this is important to have for whatever reason")
```"""
hint = f"\n{hint}" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$", requirement):
pkg, op, want_ver = requirement, None, None
else:
match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement)
if not match:
raise ValueError(
f"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but got {requirement}"
)
pkg, want_full = match[0]
# there could be multiple requirements
want_range = want_full.split(",")
wanted = {}
for w in want_range:
match = re.findall(r"^([\s!=<>]{1,2})(.+)", w)
if not match:
raise ValueError(
f"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but got {requirement}"
)
op, want_ver = match[0]
wanted[op] = want_ver
if op not in ops:
raise ValueError(
f"{requirement}: need one of {list(ops.keys())}, but got {op}")
# special case
if pkg == "python":
got_ver = ".".join([str(x) for x in sys.version_info[:3]])
for op, want_ver in wanted.items():
_compare_versions(op, got_ver, want_ver, requirement, pkg, hint)
return
# check if any version is installed
try:
got_ver = importlib_metadata.version(pkg)
except importlib_metadata.PackageNotFoundError:
raise importlib_metadata.PackageNotFoundError(
f"The '{requirement}' distribution was not found and is required by this application. {hint}"
)
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(op, got_ver, want_ver, requirement, pkg, hint)
def require_version_core(requirement):
"""require_version wrapper which emits a core-specific hint on failure"""
hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(requirement, hint)
|
the-stack_0_16463 | #!/usr/bin/env python
# -*- coding: utf-8 -*
"""
:authors:
Guannan Ma @mythmgn
:create_date:
2016/06/07
:description:
heartbeat service
"""
from cup.services import heartbeat
class HeartbeatService(heartbeat.HeartbeatService):
"""
heartbeat service. not in use yet
"""
def __init__(self, judge_lost_in_sec, keep_lost=False):
heartbeat.HeartbeatService.__init__(self, judge_lost_in_sec, keep_lost)
self._judge_lost_in_sec = judge_lost_in_sec
self._keep_lost = keep_lost
# vi:set tw=0 ts=4 sw=4 nowrap fdm=indent
|
the-stack_0_16465 | # Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module provides the Constraint class for handling
filters and pivots in a modular fashion. This enable easy
constraint application.
An implementation of :mod:`trappy.plotter.AbstractDataPlotter`
is expected to use the :mod:`trappy.plotter.Constraint.ConstraintManager`
class to pivot and filter data and handle multiple column,
trace and event inputs.
The underlying object that encapsulates a unique set of
a data column, data event and the requisite filters is
:mod:`trappy.plotter.Constraint.Constraint`
"""
# pylint: disable=R0913
from trappy.plotter.Utils import decolonize, normalize_list
from trappy.utils import listify
from trappy.plotter import AttrConf
class Constraint(object):
"""
What is a Constraint?
It is collection of data based on two rules:
- A Pivot
- A Set of Filters
- A Data Column
For Example a :mod:`pandas.DataFrame`
===== ======== =========
Time CPU Latency
===== ======== =========
1 x <val>
2 y <val>
3 z <val>
4 a <val>
===== ======== =========
The resultant data will be split for each unique pivot value
with the filters applied
::
result["x"] = pd.Series.filtered()
result["y"] = pd.Series.filtered()
result["z"] = pd.Series.filtered()
result["a"] = pd.Series.filtered()
:param trappy_trace: Input Data
:type trappy_trace: :mod:`pandas.DataFrame` or a class derived from
:mod:`trappy.trace.BareTrace`
:param column: The data column
:type column: str
:param template: TRAPpy Event
:type template: :mod:`trappy.base.Base` event
:param trace_index: The index of the trace/data in the overall constraint
data
:type trace_index: int
:param filters: A dictionary of filter values
:type filters: dict
:param window: A time window to apply to the constraint.
E.g. window=(5, 20) will constraint to events that happened
between Time=5 to Time=20.
:type window: tuple of two ints
"""
def __init__(self, trappy_trace, pivot, column, template, trace_index,
filters, window):
self._trappy_trace = trappy_trace
self._filters = filters
self._pivot = pivot
self.column = column
self._template = template
self._dup_resolved = False
self._data = self.populate_data_frame()
if window:
# We want to include the previous value before the window
# and the next after the window in the dataset
min_idx = self._data.loc[:window[0]].index.max()
max_idx = self._data.loc[window[1]:].index.min()
self._data = self._data.loc[min_idx:max_idx]
self.result = self._apply()
self.trace_index = trace_index
def _apply(self):
"""This method applies the filter on the resultant data
on the input column.
"""
data = self._data
result = {}
try:
values = data[self.column]
except KeyError:
return result
if self._pivot == AttrConf.PIVOT:
pivot_vals = [AttrConf.PIVOT_VAL]
else:
pivot_vals = self.pivot_vals(data)
for pivot_val in pivot_vals:
criterion = values.map(lambda x: True)
for key in self._filters.keys():
if key != self._pivot and key in data.columns:
criterion = criterion & data[key].map(
lambda x: x in self._filters[key])
if pivot_val != AttrConf.PIVOT_VAL:
criterion &= data[self._pivot] == pivot_val
val_series = values[criterion]
if len(val_series) != 0:
result[pivot_val] = val_series
return result
def _uses_trappy_trace(self):
if not self._template:
return False
else:
return True
def populate_data_frame(self):
"""Return the populated :mod:`pandas.DataFrame`"""
if not self._uses_trappy_trace():
return self._trappy_trace
data_container = getattr(
self._trappy_trace,
decolonize(self._template.name))
return data_container.data_frame
def pivot_vals(self, data):
"""This method returns the unique pivot values for the
Constraint's pivot and the column
:param data: Input Data
:type data: :mod:`pandas.DataFrame`
"""
if self._pivot == AttrConf.PIVOT:
return AttrConf.PIVOT_VAL
if self._pivot not in data.columns:
return []
pivot_vals = set(data[self._pivot])
if self._pivot in self._filters:
pivot_vals = pivot_vals & set(self._filters[self._pivot])
return list(pivot_vals)
def __str__(self):
name = self.get_data_name()
if not self._uses_trappy_trace():
return name + ":" + str(self.column)
return name + ":" + \
self._template.name + ":" + self.column
def get_data_name(self):
"""Get name for the data member. This method
relies on the "name" attribute for the name.
If the name attribute is absent, it associates
a numeric name to the respective data element
:returns: The name of the data member
"""
if self._uses_trappy_trace():
if self._trappy_trace.name != "":
return self._trappy_trace.name
else:
return "Trace {}".format(self.trace_index)
else:
return "DataFrame {}".format(self.trace_index)
class ConstraintManager(object):
"""A class responsible for converting inputs
to constraints and also ensuring sanity
:param traces: Input Trace data
:type traces: :mod:`trappy.trace.BareTrace`, list(:mod:`trappy.trace.BareTrace`)
(or a class derived from :mod:`trappy.trace.BareTrace`)
:param columns: The column values from the corresponding
:mod:`pandas.DataFrame`
:type columns: str, list(str)
:param pivot: The column around which the data will be
pivoted:
:type pivot: str
:param templates: TRAPpy events
:type templates: :mod:`trappy.base.Base`
:param filters: A dictionary of values to be applied on the
respective columns
:type filters: dict
:param window: A time window to apply to the constraints
:type window: tuple of ints
:param zip_constraints: Permutes the columns and traces instead
of a one-to-one correspondence
:type zip_constraints: bool
"""
def __init__(self, traces, columns, templates, pivot, filters,
window=None, zip_constraints=True):
self._ip_vec = []
self._ip_vec.append(listify(traces))
self._ip_vec.append(listify(columns))
self._ip_vec.append(listify(templates))
self._lens = map(len, self._ip_vec)
self._max_len = max(self._lens)
self._pivot = pivot
self._filters = filters
self.window = window
self._constraints = []
self._trace_expanded = False
self._expand()
if zip_constraints:
self._populate_zip_constraints()
else:
self._populate_constraints()
def _expand(self):
"""This is really important. We need to
meet the following criteria for constraint
expansion:
::
Len[traces] == Len[columns] == Len[templates]
Or:
::
Permute(
Len[traces] = 1
Len[columns] = 1
Len[templates] != 1
)
Permute(
Len[traces] = 1
Len[columns] != 1
Len[templates] != 1
)
"""
min_len = min(self._lens)
max_pos_comp = [
i for i,
j in enumerate(
self._lens) if j != self._max_len]
if self._max_len == 1 and min_len != 1:
raise RuntimeError("Essential Arg Missing")
if self._max_len > 1:
# Are they all equal?
if len(set(self._lens)) == 1:
return
if min_len > 1:
raise RuntimeError("Cannot Expand a list of Constraints")
for val in max_pos_comp:
if val == 0:
self._trace_expanded = True
self._ip_vec[val] = normalize_list(self._max_len,
self._ip_vec[val])
def _populate_constraints(self):
"""Populate the constraints creating one for each column in
each trace
In a multi-trace, multicolumn scenario, constraints are created for
all the columns in each of the traces. _populate_constraints()
creates one constraint for the first trace and first column, the
next for the second trace and second column,... This function
creates a constraint for every combination of traces and columns
possible.
"""
for trace_idx, trace in enumerate(self._ip_vec[0]):
for col in self._ip_vec[1]:
template = self._ip_vec[2][trace_idx]
constraint = Constraint(trace, self._pivot, col, template,
trace_idx, self._filters, self.window)
self._constraints.append(constraint)
def get_column_index(self, constraint):
return self._ip_vec[1].index(constraint.column)
def _populate_zip_constraints(self):
"""Populate the expanded constraints
In a multitrace, multicolumn scenario, create constraints for
the first trace and the first column, second trace and second
column,... that is, as if you run zip(traces, columns)
"""
for idx in range(self._max_len):
if self._trace_expanded:
trace_idx = 0
else:
trace_idx = idx
trace = self._ip_vec[0][idx]
col = self._ip_vec[1][idx]
template = self._ip_vec[2][idx]
self._constraints.append(
Constraint(trace, self._pivot, col, template, trace_idx,
self._filters, self.window))
def generate_pivots(self, permute=False):
"""Return a union of the pivot values
:param permute: Permute the Traces and Columns
:type permute: bool
"""
pivot_vals = []
for constraint in self._constraints:
pivot_vals += constraint.result.keys()
p_list = list(set(pivot_vals))
traces = range(self._lens[0])
try:
sorted_plist = sorted(p_list, key=int)
except (ValueError, TypeError):
try:
sorted_plist = sorted(p_list, key=lambda x: int(x, 16))
except (ValueError, TypeError):
sorted_plist = sorted(p_list)
if permute:
pivot_gen = ((trace_idx, pivot) for trace_idx in traces for pivot in sorted_plist)
return pivot_gen, len(sorted_plist) * self._lens[0]
else:
return sorted_plist, len(sorted_plist)
def constraint_labels(self):
"""
:return: string to represent the
set of Constraints
"""
return map(str, self._constraints)
def __len__(self):
return len(self._constraints)
def __iter__(self):
return iter(self._constraints)
|
the-stack_0_16467 | import discord
from discord.ext import commands
import stackprinter as sp
from bin import zb
class onmemberremoveCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Events on member join
@commands.Cog.listener()
async def on_member_remove(self, member):
try:
# If any bot
if member.bot:
return
# try:
# info = await member.guild.fetch_ban(member)
# print(info)
# except:
# pass
# banlist = await member.guild.bans()
# for banned in banlist:
# if member.id == banned.id:
# return
embed=discord.Embed(description=member.mention + " " +
member.name, color=0xff470f)
embed.add_field(name="Join Date", value=member.joined_at,
inline=False)
if not zb.is_pattern(member.display_name,'^[A-Z]\w+[0-9]{3,}'):
embed.set_thumbnail(url=member.avatar_url)
embed.set_author(name="Member Left",
icon_url=member.avatar_url)
await zb.print_log(self,member,embed)
# junk1, junk2 = zb.del_all_special_role(member.guild,member.id)
except Exception as e:
await zb.bot_errors(self,sp.format(e))
def setup(bot):
bot.add_cog(onmemberremoveCog(bot))
|
the-stack_0_16468 | '''
This script resets the escpos printer
'''
import sys
from escpos.printer import Usb
from escpos import exceptions
VENDOR_ID = 0x0456
PRODUCT_ID = 0x0808
P_INTERFACE = 4
P_IN_ENDPOINT = 0x81
P_OUT_ENDPOINT = 0x03
p = Usb(VENDOR_ID, PRODUCT_ID, P_INTERFACE, P_IN_ENDPOINT, P_OUT_ENDPOINT)
reset_cmd = b'\x1b?\n\x00'
try:
p._raw(reset_cmd)
except Exception as e:
print(e)
sys.exit(1) |
the-stack_0_16469 | import setuptools
with open('README.md', 'r') as f:
long_description = f.read()
setuptools.setup(
name='jc',
version='1.17.1',
author='Kelly Brazil',
author_email='[email protected]',
description='Converts the output of popular command-line tools and file-types to JSON.',
install_requires=[
'ruamel.yaml>=0.15.0',
'xmltodict>=0.12.0',
'Pygments>=2.3.0'
],
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown',
python_requires='>=3.6',
url='https://github.com/kellyjonbrazil/jc',
packages=setuptools.find_packages(exclude=['*.tests', '*.tests.*', 'tests.*', 'tests']),
entry_points={
'console_scripts': [
'jc=jc.cli:main'
]
},
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Utilities'
]
)
|
the-stack_0_16472 | import numpy as np
from KPI import KPI
def calc(inp):
return inp[:, 9]
def gap(open_price, close_price, init_money):
return 1.0 * (close_price / open_price - 1) * init_money
def gap_colume(open_price, close_price, colume):
return 1.0 * (close_price - open_price) * colume
def RSI(data, paras, standard_data_file):
TIME_PERIOD = 14
HIGH_RSI = 85
LOW_RSI = 30
ORDER_PERCENT = 0.3
money = paras['InitMoney']
cash = money
start_money = money
# 累计资金
str_money = money
std_money = money
# 基准数据
standard_data = np.load(standard_data_file)
# 每日策略收益和基准收益
strategy_daily = []
standard_daily_reward = []
strategy_daily_reward = []
standard_daily_ratio = []
strategy_daily_ratio = []
std_cur_open = standard_data[0][1]
NrOfShare = data.shape[0]
hold_colume = np.zeros(NrOfShare, 'float32')
length = np.zeros(NrOfShare, 'float32')
p_pre = np.zeros(NrOfShare, 'float32')
for i in range(data.shape[1]):
if i < 14:
continue
# 基准收益计算
std_cur_close = standard_data[i][3]
# 计算基准每日收益
std_gap_money = gap(std_cur_open, std_cur_close, init_money=std_money)
# total——monry
std_money += std_gap_money
# 日收益放入到list
standard_daily_reward.append(std_gap_money)
# 收益率放到list
standard_daily_ratio.append(1.0 * (std_cur_close - std_cur_open) / std_cur_open)
RSI_val = data[:, i-13:i+1, 2] - data[:, i-14:i, 2]
RSI_positive = []
for j in range(RSI_val.shape[0]):
RSI_positive.append(np.sum(RSI_val[j, RSI_val[j,:] > 0]))
RSI_positive = np.array(RSI_positive)
RSI_negative = []
for j in range(RSI_val.shape[0]):
RSI_negative.append(np.sum(RSI_val[j, RSI_val[j, :] < 0]))
RSI_negative = np.array(RSI_negative)
sell_share = RSI_positive / (RSI_positive - RSI_negative) * 100 > HIGH_RSI
buy_share = RSI_positive / (RSI_positive - RSI_negative) * 100 < LOW_RSI
hold_index = hold_colume > 0
str_cur_close = data[hold_index, i - 1, 2]
str_pre_close = data[hold_index, i, 2]
str_gap_money = gap_colume(str_pre_close, str_cur_close, hold_colume[hold_index])
str_money += np.sum(str_gap_money)
strategy_daily_reward.append(np.sum(str_gap_money))
if np.sum(hold_index) != 0:
strategy_daily_ratio.append(1.0 * np.mean((str_cur_close - str_pre_close) / str_pre_close))
else:
strategy_daily_ratio.append(0)
if np.sum(buy_share) > 0 and cash > 100:
money_each_share = cash // np.sum(buy_share)
hold_colume[buy_share] += money_each_share // (data[buy_share, i, 2] * 100) * 100
cash -= np.sum(money_each_share // (data[buy_share, i, 2] * 100) * 100 * data[buy_share, i, 2])
if np.sum(sell_share) > 0:
sell_index = hold_index & sell_share
cash += np.sum(hold_colume[sell_index] * data[sell_index, i, 2])
hold_colume[sell_share] = np.zeros(np.sum(sell_share))
p_pre = calc(data[:, i, :])
std_cur_open = std_cur_close
N = data.shape[1]
for i in range(500 - N):
npzero = np.array([0.0])
strategy_daily_reward = np.append(npzero, strategy_daily_reward)
strategy_daily_ratio = np.append(npzero, strategy_daily_ratio)
standard_daily_reward = np.append(npzero, standard_daily_reward)
standard_daily_ratio = np.append(npzero, standard_daily_ratio)
N -= TIME_PERIOD
return start_money, str_money, std_money, N, strategy_daily_reward, strategy_daily_ratio, standard_daily_reward, standard_daily_ratio
if __name__ == '__main__':
data = np.load('../saved files/data_zjz.npy')[:, :500, :]
standard_data = '../saved files/standard_data.npy'
init_money, str_money, std_money, N, strategy_daily_reward, strategy_daily_ratio, standard_daily_reward, standard_daily_ratio = RSI(
data, {'InitMoney': 1000000}, standard_data)
for i in range(500 - len(strategy_daily_reward)):
npzero = np.array([0.0])
strategy_daily_reward = np.append(npzero, strategy_daily_reward)
strategy_daily_ratio = np.append(npzero, strategy_daily_ratio)
standard_daily_reward = np.append(npzero, standard_daily_reward)
standard_daily_ratio = np.append(npzero, standard_daily_ratio)
print('init_money shape:{}'.format(init_money))
print('str_money shape:{}'.format(str_money))
print('std_money shape:{}'.format(std_money))
print('N shape:{}'.format(N))
print('strategy_daily_reward shape:{}'.format(np.array(strategy_daily_reward).shape))
print('strategy_daily_ratio shape:{}'.format(np.array(strategy_daily_ratio).shape))
print('standard_daily_reward shape:{}'.format(np.array(standard_daily_reward).shape))
print('standard_daily_ratio shape:{}'.format(np.array(standard_daily_ratio).shape))
kpi = KPI(
init_money=init_money,
str_money=str_money,
std_money=std_money,
N=N,
strategy_daily_reward=strategy_daily_reward,
strategy_daily_ratio=strategy_daily_ratio,
standard_daily_reward=standard_daily_reward,
standard_daily_ratio=standard_daily_ratio
)
all_filed = kpi.get_kpi()
money1 = 1000000.0
money2 = 1000000.0
daily_reward1 = strategy_daily_reward
daily_reward2 = standard_daily_reward
str_daily_reward_list = []
std_daily_reward_list = []
for i in range(len(daily_reward1)):
money1 += daily_reward1[i]
str_daily_reward_list.append(money1)
for i in range(len(daily_reward2)):
money2 += daily_reward2[i]
std_daily_reward_list.append(money2)
print(str_daily_reward_list)
print(std_daily_reward_list)
daily = []
daily.append(np.array(str_daily_reward_list))
daily.append(np.array(std_daily_reward_list))
np.save('../saved files/strategy_0_daily.npy', np.array(daily)) |
the-stack_0_16473 | from __future__ import absolute_import, print_function
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import timezone
from zerver.models import UserProfile
import argparse
from datetime import datetime
import requests
import ujson
from typing import Any
class Command(BaseCommand):
help = """Add users to a MailChimp mailing list."""
def add_arguments(self, parser):
# type: (argparse.ArgumentParser) -> None
parser.add_argument('--api-key',
dest='api_key',
type=str,
help='MailChimp API key.')
parser.add_argument('--list-id',
dest='list_id',
type=str,
help='List ID of the MailChimp mailing list.')
parser.add_argument('--optin-time',
dest='optin_time',
type=str,
default=datetime.isoformat(timezone.now().replace(microsecond=0)),
help='Opt-in time of the users.')
def handle(self, *args, **options):
# type: (*Any, **str) -> None
if options['api_key'] is None:
try:
if settings.MAILCHIMP_API_KEY is None:
print('MAILCHIMP_API_KEY is None. Check your server settings file.')
exit(1)
options['api_key'] = settings.MAILCHIMP_API_KEY
except AttributeError:
print('Please supply a MailChimp API key to --api-key, or add a '
'MAILCHIMP_API_KEY to your server settings file.')
exit(1)
if options['list_id'] is None:
try:
if settings.ZULIP_FRIENDS_LIST_ID is None:
print('ZULIP_FRIENDS_LIST_ID is None. Check your server settings file.')
exit(1)
options['list_id'] = settings.ZULIP_FRIENDS_LIST_ID
except AttributeError:
print('Please supply a MailChimp List ID to --list-id, or add a '
'ZULIP_FRIENDS_LIST_ID to your server settings file.')
exit(1)
endpoint = "https://%s.api.mailchimp.com/3.0/lists/%s/members" % \
(options['api_key'].split('-')[1], options['list_id'])
for user in UserProfile.objects.filter(is_bot=False, is_active=True) \
.values('email', 'full_name', 'realm_id') \
.filter(full_name='Zoe'):
data = {
'email_address': user['email'],
'list_id': options['list_id'],
'status': 'subscribed',
'merge_fields': {
'NAME': user['full_name'],
'REALM_ID': user['realm_id'],
'OPTIN_TIME': options['optin_time'],
},
}
r = requests.post(endpoint, auth=('apikey', options['api_key']), json=data, timeout=10)
if r.status_code == 400 and ujson.loads(r.text)['title'] == 'Member Exists':
print("%s is already a part of the list." % (data['email_address'],))
elif r.status_code >= 400:
print(r.text)
|
the-stack_0_16474 | # -*- coding: utf-8 -*-
# website: http://30daydo.com
# @Time : 2019/10/24 0:03
# @File : new_stock_fund.py
# 获取打新基金数据
import requests
import time
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import logging
from scrapy.selector import Selector
logger = logging.getLogger()
PATH = r'C:\OneDrive\Python\selenium\chromedriver.exe'
class TianTianFund():
def __init__(self):
# 未上市
self.wss_url='http://fund.eastmoney.com/data/dxgjj_xgccjjyl.html#wss;SUMPLACE;desc;1'
options = webdriver.ChromeOptions()
options.add_argument(
'--user-agent=Mozilla/5.0 (Windows NT 999999.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36')
self.driver = webdriver.Chrome(executable_path=PATH,
chrome_options=options)
def get_fund(self):
self.driver.get(self.wss_url)
time.sleep(5)
text=self.driver.page_source
response = Selector(text=text)
nodes = response.xpath('//tbody[@id="datalistwss_body"]/tr')
for node in nodes:
code = node.xpath('.//td[2]/a/text()').extract_first()
name = node.xpath('.//td[3]/a/text()').extract_first()
hit_count = node.xpath('.//td[6]/a[1]/text()').extract_first()
fund_url = node.xpath('.//td[6]/a[1]/@href').extract_first()
full_url = 'http://fund.eastmoney.com/data/'+fund_url
new_stock_amount = node.xpath('.//td[6]/text()').extract_first()
self.driver.get(fund_url)
time.sleep(5)
sub_response = Selector(text=self.driver.page_source)
sub_nodes = sub_response.xpath('//tbody[@id="datalist_body"]/tr')
new_stock_list = []
for sub_node in sub_nodes:
d={}
stock_code = sub_node.xpath('.//td[2]/a/text()').extract_first()
stock_name = sub_node.xpath('.//td[3]/a/text()').extract_first()
assign_mount = sub_node.xpath('.//td[9]/text()').extract_first()
d['新股代码']=stock_code
d['新股名称']=stock_name
d['中的金额-万元']=assign_mount
new_stock_list.append(d)
print(new_stock_list)
def start(self):
self.get_fund()
self.driver.close()
if __name__=='__main__':
fund = TianTianFund()
fund.start() |
the-stack_0_16475 | import numpy as np
from mygrad.operation_base import BroadcastableOp, Operation
__all__ = ["GetItem", "SetItem"]
class GetItem(Operation):
""" Defines the __getitem__ interface for a Tensor, supporting back-propagation
Supports back-propagation through all valid numpy-indexing (basic, advanced, mixed, etc.)"""
def __call__(self, a, index):
""" ``a[index]``
Parameters
----------
a : mygrad.Tensor
The tensor whose entries are being accessed.
index : valid-array-index
An n-dimensional index for specifying entries or subregions of `a`.
All means of numpy-array indexing (basic, advanced, mixed, etc) are
supported.
Returns
-------
numpy.ndarray
The array returned by the get-item operation"""
self.variables = (a,)
self.index = index
return a.data[index]
def backward_var(self, grad, index, **kwargs):
a = self.variables[index]
out = np.zeros_like(a.data)
np.add.at(out, self.index, grad)
return out
def _arr(*shape):
""" Construct an array of a specified consisting of values [0, _arr.size)
filled in row-major order.
Parameters
----------
*shape : int
Returns
-------
numpy.ndarray"""
return np.arange(np.prod(shape)).reshape(shape)
def _is_int_array_index(index):
""" Returns True if `index` contains any array-like integer-valued sequences
Parameters
----------
index : Tuple[Any]
Returns
-------
bool """
return any(
np.issubdtype(np.asarray(ind).dtype, np.int_) and np.asarray(ind).ndim
for ind in index
)
def _is_bool_array_index(index):
""" Returns True if `index` solely contains a boolean-valued array
Parameters
----------
index : Tuple[Any]
Returns
-------
bool """
return len(index) == 1 and np.issubdtype(np.asarray(index[0]).dtype, np.bool_)
class SetItem(BroadcastableOp):
""" Defines the __setitem__ interface for a Tensor, supporting back-propagation through
both the tensor being set and the tensor whose .
Supports back-propagation through all valid numpy-indexing (basic, advanced, mixed, etc.),
as well as """
def __call__(self, a, b, index):
""" a[index] = b
Parameters
----------
a : mygrad.Tensor
The tensor whose entries are being set. A copy of the underlying
data is made if `a` is a non-constant tensor.
b : mygrad.Tensor
`b` must be broadcast-compatible with `a[index]`
index : valid-array-index
An n-dimensional index for specifying entries or subregions of `a`.
All means of numpy-array indexing (basic, advanced, mixed, etc) are
supported.
Notes
-----
Additional computational overhead is required for back-propagation when
`index` contains any integer-valued arrays, to accommodate for the scenario
in which a single element is set multiple times."""
out = np.copy(a.data) if not a.constant else a.data
self.variables = (a, b)
self.index = index if isinstance(index, tuple) else (index,)
out[index] = b.data
return out
def backward_var(self, grad, index, **kwargs):
a, b = self.variables
if index == 0:
grad = np.copy(grad)
grad[self.index] = 0
return grad
elif index == 1:
grad_sel = np.asarray(grad[self.index])
# Basic indexing and indexing with a single boolean-array is trivial. The
# gradient into b can just be accessed by indexing into `grad`.
# Indexing with integer-valued arrays can be problematic, as the same
# item can be specified multiple for "setting"; here only the last set-item
# for that element has an effect. For example:
# x[np.array([0, 0])] = np.array([2, 3]) # `3` gets set to x[0]; 2 has no effect
# Thus only that corresponding element in `grad` (that corresponding to `3`)
# should be propagated back into b. Thus we must check to see if any items are
# being set redundantly, and mask out any elements in `grad` corresponding to
# the elements in `b` that weren't actually set.
if (
not np.shares_memory(grad_sel, grad)
and grad_sel.size > 0
and grad_sel.ndim > 0
and not _is_bool_array_index(self.index)
and _is_int_array_index(self.index)
):
# create an array of unique elements, and see if indexing into it produces
# any redundant elements
unique = _arr(*grad.shape)
sub_sel = unique[self.index].flat
elements, first_inds, = np.unique(
np.flip(sub_sel, axis=0), return_index=True
)
if len(first_inds) < len(sub_sel):
# one or more elements were set redundantly, identify the entries in `b`
# that actually were set to those elements (the last-most set-item calls
# for those elements) and propagate only the corresponding elements from grad
first_inds = (len(sub_sel) - 1) - first_inds
mask = np.zeros_like(sub_sel)
mask[first_inds] = 1
mask = mask.reshape(grad_sel.shape)
grad_sel *= mask
# handle the edge case of "projecting down" on setitem. E.g:
# x = Tensor([0, 1, 2])
# y = Tensor([3])
# x[0] = y # this is legal since x[0] and y have the same size
if grad_sel.ndim < b.ndim:
if grad_sel.size == b.size:
grad_sel = grad_sel.reshape(b.shape)
else:
# Broadcasting occurred during set-item and `b` contains
# excess leading singleton dimensions. Make `grad_sel`
# commensurate with `b` for subsequent `reduce_broadcast`
# to work
grad_sel = grad_sel[(np.newaxis,) * (b.ndim - grad_sel.ndim)]
return grad_sel
else:
raise IndexError() # pragma: no cover
|
the-stack_0_16477 | #!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Google Cloud Vision API Python Beta Snippets
Example Usage:
python beta_snippets.py -h
python beta_snippets.py object-localization INPUT_IMAGE
python beta_snippets.py object-localization-uri gs://...
python beta_snippets.py handwritten-ocr INPUT_IMAGE
python beta_snippets.py handwritten-ocr-uri gs://...
python beta_snippets.py batch-annotate-files INPUT_PDF
python beta_snippets.py batch-annotate-files-uri gs://...
python beta_snippets.py batch-annotate-images-uri gs://... gs://...
For more information, the documentation at
https://cloud.google.com/vision/docs.
"""
import argparse
import io
# [START vision_localize_objects_beta]
def localize_objects(path):
"""Localize objects in the local image.
Args:
path: The path to the local file.
"""
from google.cloud import vision_v1p3beta1 as vision
client = vision.ImageAnnotatorClient()
with open(path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
objects = client.object_localization(
image=image).localized_object_annotations
print('Number of objects found: {}'.format(len(objects)))
for object_ in objects:
print('\n{} (confidence: {})'.format(object_.name, object_.score))
print('Normalized bounding polygon vertices: ')
for vertex in object_.bounding_poly.normalized_vertices:
print(' - ({}, {})'.format(vertex.x, vertex.y))
# [END vision_localize_objects_beta]
# [START vision_localize_objects_gcs_beta]
def localize_objects_uri(uri):
"""Localize objects in the image on Google Cloud Storage
Args:
uri: The path to the file in Google Cloud Storage (gs://...)
"""
from google.cloud import vision_v1p3beta1 as vision
client = vision.ImageAnnotatorClient()
image = vision.types.Image()
image.source.image_uri = uri
objects = client.object_localization(
image=image).localized_object_annotations
print('Number of objects found: {}'.format(len(objects)))
for object_ in objects:
print('\n{} (confidence: {})'.format(object_.name, object_.score))
print('Normalized bounding polygon vertices: ')
for vertex in object_.bounding_poly.normalized_vertices:
print(' - ({}, {})'.format(vertex.x, vertex.y))
# [END vision_localize_objects_gcs_beta]
# [START vision_handwritten_ocr_beta]
def detect_handwritten_ocr(path):
"""Detects handwritten characters in a local image.
Args:
path: The path to the local file.
"""
from google.cloud import vision_v1p3beta1 as vision
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
# Language hint codes for handwritten OCR:
# en-t-i0-handwrit, mul-Latn-t-i0-handwrit
# Note: Use only one language hint code per request for handwritten OCR.
image_context = vision.types.ImageContext(
language_hints=['en-t-i0-handwrit'])
response = client.document_text_detection(image=image,
image_context=image_context)
print('Full Text: {}'.format(response.full_text_annotation.text))
for page in response.full_text_annotation.pages:
for block in page.blocks:
print('\nBlock confidence: {}\n'.format(block.confidence))
for paragraph in block.paragraphs:
print('Paragraph confidence: {}'.format(
paragraph.confidence))
for word in paragraph.words:
word_text = ''.join([
symbol.text for symbol in word.symbols
])
print('Word text: {} (confidence: {})'.format(
word_text, word.confidence))
for symbol in word.symbols:
print('\tSymbol: {} (confidence: {})'.format(
symbol.text, symbol.confidence))
# [END vision_handwritten_ocr_beta]
# [START vision_handwritten_ocr_gcs_beta]
def detect_handwritten_ocr_uri(uri):
"""Detects handwritten characters in the file located in Google Cloud
Storage.
Args:
uri: The path to the file in Google Cloud Storage (gs://...)
"""
from google.cloud import vision_v1p3beta1 as vision
client = vision.ImageAnnotatorClient()
image = vision.types.Image()
image.source.image_uri = uri
# Language hint codes for handwritten OCR:
# en-t-i0-handwrit, mul-Latn-t-i0-handwrit
# Note: Use only one language hint code per request for handwritten OCR.
image_context = vision.types.ImageContext(
language_hints=['en-t-i0-handwrit'])
response = client.document_text_detection(image=image,
image_context=image_context)
print('Full Text: {}'.format(response.full_text_annotation.text))
for page in response.full_text_annotation.pages:
for block in page.blocks:
print('\nBlock confidence: {}\n'.format(block.confidence))
for paragraph in block.paragraphs:
print('Paragraph confidence: {}'.format(
paragraph.confidence))
for word in paragraph.words:
word_text = ''.join([
symbol.text for symbol in word.symbols
])
print('Word text: {} (confidence: {})'.format(
word_text, word.confidence))
for symbol in word.symbols:
print('\tSymbol: {} (confidence: {})'.format(
symbol.text, symbol.confidence))
# [END vision_handwritten_ocr_gcs_beta]
# [START vision_batch_annotate_files_beta]
def detect_batch_annotate_files(path):
"""Detects document features in a PDF/TIFF/GIF file.
While your PDF file may have several pages,
this API can process up to 5 pages only.
Args:
path: The path to the local file.
"""
from google.cloud import vision_v1p4beta1 as vision
client = vision.ImageAnnotatorClient()
with open(path, 'rb') as pdf_file:
content = pdf_file.read()
# Other supported mime_types: image/tiff' or 'image/gif'
mime_type = 'application/pdf'
input_config = vision.types.InputConfig(
content=content, mime_type=mime_type)
feature = vision.types.Feature(
type=vision.enums.Feature.Type.DOCUMENT_TEXT_DETECTION)
# Annotate the first two pages and the last one (max 5 pages)
# First page starts at 1, and not 0. Last page is -1.
pages = [1, 2, -1]
request = vision.types.AnnotateFileRequest(
input_config=input_config,
features=[feature],
pages=pages)
response = client.batch_annotate_files(requests=[request])
for image_response in response.responses[0].responses:
for page in image_response.full_text_annotation.pages:
for block in page.blocks:
print(u'\nBlock confidence: {}\n'.format(block.confidence))
for par in block.paragraphs:
print(u'\tParagraph confidence: {}'.format(par.confidence))
for word in par.words:
symbol_texts = [symbol.text for symbol in word.symbols]
word_text = ''.join(symbol_texts)
print(u'\t\tWord text: {} (confidence: {})'.format(
word_text, word.confidence))
for symbol in word.symbols:
print(u'\t\t\tSymbol: {} (confidence: {})'.format(
symbol.text, symbol.confidence))
# [END vision_batch_annotate_files_beta]
# [START vision_batch_annotate_files_gcs_beta]
def detect_batch_annotate_files_uri(gcs_uri):
"""Detects document features in a PDF/TIFF/GIF file.
While your PDF file may have several pages,
this API can process up to 5 pages only.
Args:
uri: The path to the file in Google Cloud Storage (gs://...)
"""
from google.cloud import vision_v1p4beta1 as vision
client = vision.ImageAnnotatorClient()
# Other supported mime_types: image/tiff' or 'image/gif'
mime_type = 'application/pdf'
input_config = vision.types.InputConfig(
gcs_source=vision.types.GcsSource(uri=gcs_uri), mime_type=mime_type)
feature = vision.types.Feature(
type=vision.enums.Feature.Type.DOCUMENT_TEXT_DETECTION)
# Annotate the first two pages and the last one (max 5 pages)
# First page starts at 1, and not 0. Last page is -1.
pages = [1, 2, -1]
request = vision.types.AnnotateFileRequest(
input_config=input_config,
features=[feature],
pages=pages)
response = client.batch_annotate_files(requests=[request])
for image_response in response.responses[0].responses:
for page in image_response.full_text_annotation.pages:
for block in page.blocks:
print(u'\nBlock confidence: {}\n'.format(block.confidence))
for par in block.paragraphs:
print(u'\tParagraph confidence: {}'.format(par.confidence))
for word in par.words:
symbol_texts = [symbol.text for symbol in word.symbols]
word_text = ''.join(symbol_texts)
print(u'\t\tWord text: {} (confidence: {})'.format(
word_text, word.confidence))
for symbol in word.symbols:
print(u'\t\t\tSymbol: {} (confidence: {})'.format(
symbol.text, symbol.confidence))
# [END vision_batch_annotate_files_gcs_beta]
# [START vision_async_batch_annotate_images_beta]
def async_batch_annotate_images_uri(input_image_uri, output_uri):
"""Batch annotation of images on Google Cloud Storage asynchronously.
Args:
input_image_uri: The path to the image in Google Cloud Storage (gs://...)
output_uri: The path to the output path in Google Cloud Storage (gs://...)
"""
import re
from google.cloud import storage
from google.protobuf import json_format
from google.cloud import vision_v1p4beta1 as vision
client = vision.ImageAnnotatorClient()
# Construct the request for the image(s) to be annotated:
image_source = vision.types.ImageSource(image_uri=input_image_uri)
image = vision.types.Image(source=image_source)
features = [
vision.types.Feature(type=vision.enums.Feature.Type.LABEL_DETECTION),
vision.types.Feature(type=vision.enums.Feature.Type.TEXT_DETECTION),
vision.types.Feature(type=vision.enums.Feature.Type.IMAGE_PROPERTIES),
]
requests = [
vision.types.AnnotateImageRequest(image=image, features=features),
]
gcs_destination = vision.types.GcsDestination(uri=output_uri)
output_config = vision.types.OutputConfig(
gcs_destination=gcs_destination, batch_size=2)
operation = client.async_batch_annotate_images(
requests=requests, output_config=output_config)
print('Waiting for the operation to finish.')
operation.result(timeout=10000)
# Once the request has completed and the output has been
# written to Google Cloud Storage, we can list all the output files.
storage_client = storage.Client()
match = re.match(r'gs://([^/]+)/(.+)', output_uri)
bucket_name = match.group(1)
prefix = match.group(2)
bucket = storage_client.get_bucket(bucket_name)
# Lists objects with the given prefix.
blob_list = list(bucket.list_blobs(prefix=prefix))
print('Output files:')
for blob in blob_list:
print(blob.name)
# Processes the first output file from Google Cloud Storage.
# Since we specified batch_size=2, the first response contains
# annotations for the first two annotate image requests.
output = blob_list[0]
json_string = output.download_as_string()
response = json_format.Parse(json_string,
vision.types.BatchAnnotateImagesResponse())
# Prints the actual response for the first annotate image request.
print(u'The annotation response for the first request: {}'.format(
response.responses[0]))
# [END vision_async_batch_annotate_images_beta]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
object_parser = subparsers.add_parser(
'object-localization', help=localize_objects.__doc__)
object_parser.add_argument('path')
object_uri_parser = subparsers.add_parser(
'object-localization-uri', help=localize_objects_uri.__doc__)
object_uri_parser.add_argument('uri')
handwritten_parser = subparsers.add_parser(
'handwritten-ocr', help=detect_handwritten_ocr.__doc__)
handwritten_parser.add_argument('path')
handwritten_uri_parser = subparsers.add_parser(
'handwritten-ocr-uri', help=detect_handwritten_ocr_uri.__doc__)
handwritten_uri_parser.add_argument('uri')
batch_annotate_parser = subparsers.add_parser(
'batch-annotate-files', help=detect_batch_annotate_files.__doc__)
batch_annotate_parser.add_argument('path')
batch_annotate_uri_parser = subparsers.add_parser(
'batch-annotate-files-uri',
help=detect_batch_annotate_files_uri.__doc__)
batch_annotate_uri_parser.add_argument('uri')
batch_annotate__image_uri_parser = subparsers.add_parser(
'batch-annotate-images-uri',
help=async_batch_annotate_images_uri.__doc__)
batch_annotate__image_uri_parser.add_argument('uri')
batch_annotate__image_uri_parser.add_argument('output')
args = parser.parse_args()
if 'uri' in args.command:
if 'object-localization-uri' in args.command:
localize_objects_uri(args.uri)
elif 'handwritten-ocr-uri' in args.command:
detect_handwritten_ocr_uri(args.uri)
elif 'batch-annotate-files-uri' in args.command:
detect_batch_annotate_files_uri(args.uri)
elif 'batch-annotate-images-uri' in args.command:
async_batch_annotate_images_uri(args.uri, args.output)
else:
if 'object-localization' in args.command:
localize_objects(args.path)
elif 'handwritten-ocr' in args.command:
detect_handwritten_ocr(args.path)
elif 'batch-annotate-files' in args.command:
detect_batch_annotate_files(args.path)
|
the-stack_0_16478 | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from profiles_api import views
router = DefaultRouter()
router.register('hello-viewset', views.HelloViewSet, base_name='hello-viewset')
router.register('profile', views.UserProfileViewSet)
router.register('feed', views.UserProfileFeedViewSet)
urlpatterns = [
path('hello-view/', views.HelloApiView.as_view()),
path('login/', views.UserLoginApiView.as_view()),
path('', include(router.urls))
] |
the-stack_0_16479 | import os
from src.antlr_utils import parse
from src.grammar_cnf import GrammarCNF
import pytest
@pytest.mark.parametrize("grammar", [GrammarCNF.from_txt("dbql_grammar.txt")])
@pytest.mark.parametrize("test_input, expected", [
(
'''
connect "azat/home/db" ;
select edges
from query term("s")*|term("b")+.term("c")?;
''',
True
),
(
'''
select edges from name "sparsegraph" ;
''',
True
),
(
'''
connect "azat/home/db" ;
''',
True
),
(
'''
connect "azat/home/db" ;
select edges from name "sparsegraph_256.txt" ;
''',
True
),
(
'''
connect "azat/home/db" ;
select edges
from startAndFinal(set(1, 2, 3), set (4, 5, 6))
of name "sparsegraph" ;
''',
True
),
(
'''
connect "azat/home/db" ;
select edges
from startAndFinal(set(1, 2, 3), set (4, 5, 6)) of name "sparsegraph" ;
''',
True
),
(
'''
connect "azat/home/db" ;
select filter edges with
( u, l, v ) satisfies isStart(u) and isFinal(v)
from name "sparsegraph" ;
''',
True
),
(
'''
connect "azat/home/db" ;
select filter edges with
( u, l, v ) satisfies labelIs("ar") or (isStart(u) and isFinal(v))
from name "sparsegraph" ;
''',
True
),
(
'''
connect "azat/home/db" ;
select filter edges with
( u, l, v ) satisfies labelIs("ar") or (isStart(u) and isFinal(v))
from name "sparsegraph.txt" ;
''',
True
),
(
'''
connect "azat/home/db" ;
select edges
from query term("s")*|term("b")+.term("c")? ;
''',
True
),
(
'''
connect "azat/home/db" ;
select edges
from name "sparsegraph" intersect query term("a") alt term("b") ;
''',
True
),
# graph expression with multiple levels:
(
'''
connect "home/db" ;
select count edges
from startAndFinal(set(1, 2, 3), set(4, 5, 6))
of name "fullgraph" intersect query term("a") star concat term("b");
''',
True
),
(
'''
connect "home/db" ;
select count edges
from startAndFinal(range(1, 3), set(4, 5, 6))
of name "fullgraph" intersect query term("a") star concat term("b");
''',
True
),
# edge expressions with multiple levels:
(
'''
connect "azat/home/db" ;
select count filter edges
with ( u, e, v ) satisfies not isStart(u) and isFinal(v)
from name "worstcase" ;
''',
True
),
(
'''
connect "azat/home/db" ;
define
term("a").var("s").term("b").var("s")
as "s" ;
define
term("a").var("s1").term("b")
as "s1" ;
select edges
from name "sparsegraph256.txt" ;
''',
True
),
(
'''
connect "azat/home/db" ;
define
term("a").var("s").term("b").var("s")
as "s" ;
select edges
from name "sparsegraph"
intersect query term("a") | term("b");
''',
True
),
# the rest are False test cases ( when grammar shouldn't accept )
# mismatched brackets in pattern:
(
'''
connect "azat/home/db" ;
select edges
from term("a")*.(term("b")?.var("s")+ ;
''',
False
),
(
'''
connect "azat/home/db" ;
select edges
from query term("a"*.term("b")?.var("s")+ ;
''',
False
),
# wrong data type in range:
(
'''
connect "azat/home/db" ;
select edges
from startAndFinal ( range( "typo", 3 ), set(4, 5, 6) ) of name "sparsegraph" ;
''',
False
),
# wrong data type in set:
(
'''
connect "azat/home/db" ;
select edges
from startAndFinal ( range(1, 3 ), set(typo, 5, 6)) of name "sparsegraph" ;
''',
False
),
# not specified term or var in pattern:
(
'''
connect "azat/home/db" ;
select edges
from query "a" star alt "a" opt concat "c" plus ;
''',
False
),
])
# tests graph DB query language
def test_grammar_antlr(test_input, expected, grammar):
assert expected == parse(test_input)
|
the-stack_0_16482 | from random import randint
from epidemic_simulation.simulation import SimulationManager
import pytest
@pytest.fixture
def test_data():
test_bodies=[{'position': (748, 634), 'state': 'INFECTIOUS'}, {'position': (1137, 351), 'state': 'SUSCEPTIBLE'}, {'position': (1017, 464), 'state': 'INFECTIOUS'}, {'position': (901, 368), 'state': 'INFECTIOUS'}, {'position': (1227, 549), 'state': 'REMOVED'}, {'position': (1193, 194), 'state': 'REMOVED'}, {'position': (654, 165), 'state': 'SUSCEPTIBLE'}, {'position': (1212, 260), 'state': 'INFECTIOUS'}, {'position': (820, 198), 'state': 'SUSCEPTIBLE'}, {'position': (826, 480), 'state': 'INFECTIOUS'}, {'position': (955, 58), 'state': 'REMOVED'}, {'position': (914, 78), 'state': 'INFECTIOUS'}, {'position': (1239, 86), 'state': 'SUSCEPTIBLE'}, {'position': (1132, 532), 'state': 'SUSCEPTIBLE'}, {'position': (1042, 41), 'state': 'REMOVED'}, {'position': (713, 590), 'state': 'SUSCEPTIBLE'}, {'position': (1169, 572), 'state': 'REMOVED'}, {'position': (778, 70), 'state': 'SUSCEPTIBLE'}, {'position': (906, 554), 'state': 'SUSCEPTIBLE'}, {'position': (797, 598), 'state': 'INFECTIOUS'}]
test_calc=SimulationManager(test_bodies,{'infection_r':100,'infection_p':0.99,'sickness_duration':6})
return test_calc
def test_infect_susceptibles(test_data):
SUS_bodies_pre_function = test_data.susceptibles
test_data.calculate_subjects_to_change()
to_change_bodies = test_data.subjects_to_change
test_data.infect_susceptibles()
SUS_bodies_post_function=[body for body in test_data.subjects if body['state']=='SUSCEPTIBLE']
assert len(SUS_bodies_pre_function)-len(to_change_bodies)==len(SUS_bodies_post_function)
|
the-stack_0_16483 | # encoding: UTF-8
__author__ = 'CHENXY'
# C++和python类型的映射字典
type_dict = {
'int': 'int',
'char': 'string',
'double': 'float',
'short': 'int'
}
def process_line(line):
"""处理每行"""
if '///' in line: # 注释
py_line = process_comment(line)
elif 'typedef' in line: # 类型申明
py_line = process_typedef(line)
elif '#define' in line: # 定义常量
py_line = process_define(line)
elif line == '\n': # 空行
py_line = line
else:
py_line = ''
return py_line
def process_comment(line):
"""处理注释"""
# if line[3] == '/':
# py_line = ''
# else:
# py_line = '#' + line[3:]
py_line = '#' + line[3:]
return py_line
def process_typedef(line):
"""处理类型申明"""
content = line.split(' ')
type_ = type_dict[content[1]]
keyword = content[2]
if '[' in keyword:
i = keyword.index('[')
keyword = keyword[:i]
else:
keyword = keyword.replace(';\n', '') # 删除行末分号
py_line = 'typedefDict["%s"] = "%s"\n' % (keyword, type_)
return py_line
def process_define(line):
"""处理定义常量"""
content = line.split(' ')
constant = content[1]
if len(content)>2:
value = content[-1]
py_line = 'defineDict["%s"] = %s' % (constant, value)
else:
py_line = ''
return py_line
def main():
"""主函数"""
try:
fcpp = open('USTPFtdcUserApiDataType.h','r')
fpy = open('femas_data_type.py', 'w')
fpy.write('# encoding: UTF-8\n')
fpy.write('\n')
fpy.write('defineDict = {}\n')
fpy.write('typedefDict = {}\n')
fpy.write('\n')
for line in fcpp:
py_line = process_line(line)
if py_line:
fpy.write(py_line.decode('gbk').encode('utf-8'))
fcpp.close()
fpy.close()
print('data_type.py生成过程完成')
except:
print('data_type.py生成过程出错')
if __name__ == '__main__':
main()
|
the-stack_0_16485 | import os
import pefile
import hashlib
import pickle
import time
import pandas as pd
from config import settings as cnst
from collections import OrderedDict
from utils import embedder
all_sections = OrderedDict({".header": 0})
def raw_pe_to_pkl(path, is_benign, unprocessed, processed):
list_idx = []
for src_dir, dirs, files in os.walk(path):
for file_ in files:
file_data = {}
try:
src_file = os.path.join(src_dir, file_)
src_file_size = os.stat(src_file).st_size
if src_file_size > cnst.MAX_FILE_SIZE_LIMIT:
print("Skipping as file size exceeds ", cnst.MAX_FILE_SIZE_LIMIT, "[ Unprocessed / Skipped Count: "+str(unprocessed)+"]")
unprocessed += 1
continue
else:
file_data["size_byte"] = src_file_size
pe = pefile.PE(src_file)
pe_name = "pe_" + str(processed) + ".pkl"
with open(src_file, 'rb') as fhandle:
file_byte_data = fhandle.read()
fid = [pe_name
, 0 if is_benign else 1
, file_
, hashlib.md5(file_byte_data).hexdigest()
, hashlib.sha1(file_byte_data).hexdigest()
, hashlib.sha256(file_byte_data).hexdigest()]
file_data["whole_bytes"] = list(file_byte_data)
wb_size = len(file_data["whole_bytes"])
file_data["whole_bytes_size"] = wb_size
file_data["benign"] = is_benign
# file_data["num_of_sections"] = pe.FILE_HEADER.NumberOfSections
file_data["section_info"] = {}
for section in pe.sections:
section_name = section.Name.strip(b'\x00').decode("utf-8").strip()
section_data = {}
section_data["section_data"] = list(section.get_data())
section_data["section_size_byte"] = section.SizeOfRawData
section_data["section_bounds"] = {}
section_data["section_bounds"]["start_offset"] = section.PointerToRawData
section_data["section_bounds"]["end_offset"] = section.PointerToRawData + section.SizeOfRawData - 1
file_data["section_info"][section_name] = section_data
file_data["section_info"][".header"] = {
"section_data": list(pe.header),
"section_size_byte": len(pe.header),
"section_bounds": {
"start_offset": 0,
"end_offset": len(pe.header)
}}
t1_pkl = {"whole_bytes": file_data["whole_bytes"], "benign": file_data["benign"]}
sections_end = 0
keys = file_data["section_info"].keys()
for key in keys:
if file_data["section_info"][key]['section_bounds']["end_offset"] > sections_end:
sections_end = file_data["section_info"][key]['section_bounds']["end_offset"]
if sections_end <= 0:
print("[OVERLAY DATA NOT ADDED] Invalid section end found - ", sections_end)
elif sections_end < wb_size - 1:
data = file_data["whole_bytes"][sections_end + 1:wb_size]
section_data = dict()
section_data["section_data"] = data
section_data["section_size_byte"] = len(data)
# section_bounds
section_data["section_bounds"] = {}
section_data["section_bounds"]["start_offset"] = sections_end + 1
section_data["section_bounds"]["end_offset"] = wb_size - 1
file_data["section_info"][cnst.TAIL] = section_data
del file_data["whole_bytes"]
t2_pkl = file_data
with open(t1_dst_folder + pe_name, "wb") as t1handle:
pickle.dump(t1_pkl, t1handle)
with open(t2_dst_folder + pe_name, "wb") as t2handle:
pickle.dump(t2_pkl, t2handle)
list_idx.append(fid)
processed += 1
for section in file_data["section_info"].keys():
if section in all_sections:
all_sections[section] += 1
else:
all_sections[section] = 1
all_sections['.header'] += 1
print("Total Count:", processed, "Unprocessed/Skipped:", unprocessed)
# Test saved data
# with open(pkl_file, "rb") as pkl:
# print(pickle.load(pkl)["num_of_sections"])
except Exception as e:
unprocessed += 1
print("parse failed . . . [ Unprocessed #:", str(unprocessed), "] [ ERROR: " + str(e) + " ] [ FILE: ", src_file, "] ")
if processed % 1000 == 0:
print("# files processed:", processed)
pd.DataFrame(list_idx).to_csv(cnst.DATASET_BACKUP_FILE, index=False, header=None, mode='a')
return unprocessed, processed
if __name__ == '__main__':
total_processed = 0
total_unprocessed = 0
start_time = time.time()
t1_dst_folder = cnst.PKL_SOURCE_PATH + "t1" + cnst.ESC
t2_dst_folder = cnst.PKL_SOURCE_PATH + "t2" + cnst.ESC
if not os.path.exists(t1_dst_folder):
os.makedirs(t1_dst_folder)
if not os.path.exists(t2_dst_folder):
os.makedirs(t2_dst_folder)
if os.path.exists(cnst.DATASET_BACKUP_FILE):
os.remove(cnst.DATASET_BACKUP_FILE)
for dir in cnst.RAW_SAMPLE_DIRS.keys():
total_unprocessed, total_processed = raw_pe_to_pkl(dir, cnst.RAW_SAMPLE_DIRS[dir], total_unprocessed, total_processed)
end_time = time.time()
print("\nData collection completed for all given paths.")
print("\nTotal:", total_processed+total_unprocessed, "\tprocessed: ", total_processed, "unprocessed:", total_unprocessed)
print("Time elapsed: {0:.3f}".format((end_time - start_time) / 60), "minute(s)")
# collect list of available sections from pkl and store mapping to their embedding
pd.DataFrame.from_dict([all_sections.keys()]).to_csv(cnst.PKL_SOURCE_PATH + cnst.ESC + 'available_sections.csv', index=False, header=None)
embedder.embed_section_names()
|
the-stack_0_16487 | class Solution:
def mostCompetitive(self, nums: List[int], k: int) -> List[int]:
St = []
remove = len(nums) - k
for num in nums:
while St and num < St[-1] and remove > 0:
St.pop()
remove -= 1
St.append(num)
return St[:len(St) - remove]
|
the-stack_0_16488 | from cpc import CPCStateMachine as CPCwithTG
from cpc import CPCStateMachineL4 as CPCwithTGL4
from cic.states import CICStateMachineLvl2 as CICwithCG
from cic.states import CICStateMachineLvl4 as CICwithCGL4
from cic.states import CICStateMachineLvl1 as CICwithCGL1
from mp.state_machines import MPStateMachine as MPwithPG
from residual_learning import residual_state_machines as rsm
from cic import parameters_new_grasp as cic_parameters_new_grasp
from mp import states, base_policies
from cpc import parameters as cpc_params
from combined_code import mix_and_match as mm
state_machines = {
'mp-pg-l1': MPwithPG,
'mp-pg-l2': MPwithPG,
'mp-pg-l3': MPwithPG,
'mp-pg-l4': MPwithPG,
'cic-cg-l1': CICwithCGL1,
'cic-cg-l2': CICwithCG,
'cic-cg-l3': CICwithCG,
'cic-cg-l4': CICwithCGL4,
'cpc-tg-l1': CPCwithTG,
'cpc-tg-l2': CPCwithTG,
'cpc-tg-l3': CPCwithTG,
'cpc-tg-l4': CPCwithTGL4,
'residual-mp-pg-l3': rsm.ResidualMP_with_PG_LVL3,
'residual-mp-pg-l4': rsm.ResidualMP_with_PG_LVL4,
'residual-cic-cg-l3': rsm.ResidualCIC_with_CG_LVL3,
'residual-cic-cg-l4': rsm.ResidualCIC_with_CG_LVL4,
'residual-cpc-tg-l3': rsm.ResidualCPC_with_TG_LVL3,
'residual-cpc-tg-l4': rsm.ResidualCPC_with_TG_LVL4,
'mp-cg-l4': mm.MPwithCG,
'mp-tg-l4': mm.MPwithTG,
'cic-pg-l4': mm.CICwithPG,
'cic-tg-l4': mm.CICwithTG,
'cpc-pg-l4': mm.CPCwithPG,
'cpc-cg-l4': mm.CPCwithCG,
}
def create_state_machine(difficulty, method, env, residual=False, bo=False):
if residual:
if method not in ['mp-pg', 'cic-cg', 'cpc-tg'] and difficulty in [1, 2]:
raise ValueError("Residual policies are only available for methods "
"'mp-pg', 'cic-cg', 'cpc-tg' and difficulties 3 and 4."
f"Method: {method}, difficulty: {difficulty}.")
if bo:
if method not in ['mp-pg', 'cic-cg', 'cpc-tg'] and difficulty in [1, 2]:
raise ValueError("BO optimized parameters are only available for methods "
"'mp-pg', 'cic-cg', 'cpc-tg' and difficulties 3 and 4."
f"Method: {method}, difficulty: {difficulty}.")
if method not in ['mp-pg', 'cic-cg', 'cpc-tg'] and difficulty != 4:
raise ValueError(f'{method} is only implemented for difficulty 4.')
id = method + f'-l{difficulty}'
if residual:
id = 'residual-' + id
if id not in state_machines:
raise ValueError(
f"Unknown method: {method}. Options are: "
"mp-pg, cic-cg, cpc-tg, mp-cg, mp-tg, cic-pg, cic-tg, cpc-pg, cpc-cg."
)
if bo:
return create_bo_state_machine(id, env, difficulty)
else:
return state_machines[id](env)
def create_bo_state_machine(id, env, difficulty):
if 'mp-pg' in id:
return mp_bo_wrapper(id, env, difficulty)
elif 'cic-cg' in id:
return cic_bo_wrapper(id, env, difficulty)
else:
return cpc_bo_wrapper(id, env, difficulty)
def mp_bo_wrapper(id, env, difficulty):
if difficulty == 3:
if (env.simulation):
states.MoveToGoalState.BO_action_repeat = 10
base_policies.PlanningAndForceControlPolicy.BO_num_tipadjust_steps = 184
else:
states.MoveToGoalState.BO_action_repeat = 26 # 12 # (int) [1, 100], default: 12
base_policies.PlanningAndForceControlPolicy.BO_num_tipadjust_steps = 63 # 50 # (int) [10, 200], default: 50
elif difficulty == 4:
if (env.simulation):
states.MoveToGoalState.BO_action_repeat = 13
base_policies.PlanningAndForceControlPolicy.BO_num_tipadjust_steps = 161
else:
states.MoveToGoalState.BO_action_repeat = 29 # 12 # (int) [1, 100], default: 12
base_policies.PlanningAndForceControlPolicy.BO_num_tipadjust_steps = 182 # 50 # (int) [10, 200], default: 50
return state_machines[id](env)
def cic_bo_wrapper(id, env, difficulty):
if difficulty == 3:
parameters = cic_parameters_new_grasp.CubeLvl2Params(env)
elif difficulty == 4:
parameters = cic_parameters_new_grasp.CubeLvl4Params(env)
if (env.simulation):
parameters.orient_grasp_xy_lift = -0.01932485358
parameters.orient_grasp_h_lift = 0.0167107629776001
parameters.orient_gain_xy_lift_lift = 500.0
parameters.orient_gain_z_lift_lift = 974.5037078857422
parameters.orient_pos_gain_impedance_lift_lift = 0.015002169609069825
parameters.orient_force_factor_lift = 0.6673897802829742
parameters.orient_force_factor_rot_lift = 0.010000000000000002
parameters.orient_int_orient_gain = 0.0003590885430574417
parameters.orient_int_pos_gain = 0.008034629583358766
else:
parameters.orient_grasp_xy_lift = -0.03926035182
parameters.orient_grasp_h_lift = -0.005355795621871948
parameters.orient_gain_xy_lift_lift = 895.7465827465057
parameters.orient_gain_z_lift_lift = 1500.0
parameters.orient_pos_gain_impedance_lift_lift = 0.01427580736577511
parameters.orient_force_factor_lift = 0.49047523438930507
parameters.orient_force_factor_rot_lift = 0.0022044302672147753
parameters.orient_int_orient_gain = 0.027903699278831486
parameters.orient_int_pos_gain = 0.013680822849273681
return state_machines[id](env, parameters=parameters)
def cpc_bo_wrapper(id, env, difficulty):
if difficulty == 3:
parameters = cpc_params.CubeParams(env)
if (env.simulation):
parameters.interval = 9
parameters.gain_increase_factor = 1.1110639113783836
parameters.k_p_goal = 0.5408251136541367
parameters.k_p_into = 0.17404515892267228
parameters.k_i_goal = 0.00801944613456726
else:
parameters.interval = 3000 # 1800 # Range: 500 - 3000 not super important
parameters.gain_increase_factor = 1.7353031241893768 # 1.04 # Range: 1.01 - 2.0
parameters.k_p_goal = 0.5804646849632262 # 0.75 # Range: 0.3 - 1.5, same for l4
parameters.k_p_into = 0.1 # 0.2 # Range: 0.1 - 0.6, same for l4
parameters.k_i_goal = 0.00801206259727478 # 0.005 # Range: 0.0008 - 0.1, same for l4
if difficulty == 4:
parameters = cpc_params.CubeLvl4Params(env)
if (env.simulation):
parameters.interval = 10
parameters.gain_increase_factor = 1.2431243617534635
parameters.k_p_goal = 0.4393719419836998
parameters.k_p_into = 0.21185509711503983
parameters.k_i_goal = 0.008012341380119324
parameters.k_p_ang = 0.02238279849290848
parameters.k_i_ang = 0.0019905194759368898
else:
parameters.interval = 579
parameters.gain_increase_factor = 1.07002716961503
parameters.k_p_goal = 0.6011996507644652
parameters.k_p_into = 0.13088179603219033
parameters.k_i_goal = 0.006161301851272583
parameters.k_p_ang = 0.06160478860139847
parameters.k_i_ang = 0.0007573306798934938
return state_machines[id](env, parameters=parameters)
|
the-stack_0_16490 | """
(c) 2020 Spencer Rose, MIT Licence
Python Landscape Classification Tool (PyLC)
Reference: An evaluation of deep learning semantic segmentation
for land cover classification of oblique ground-based photography,
MSc. Thesis 2020.
<http://hdl.handle.net/1828/12156>
Spencer Rose <[email protected]>, June 2020
University of Victoria
Module: Profiler
File: profile.py
"""
import torch
import torch.nn.functional
from tqdm import tqdm
from utils.metrics import m2, jsd
import numpy as np
def get_profile(dset):
"""
Computes dataset statistical profile
- probability class distribution for database at db_path
- sample metrics and statistics
- image mean / standard deviation
Parameters
------
dset: MLPDataset
Image/mask dataset.
Returns
------
self
For chaining.
Metadata class for analyzing and generating metadata
for database.
Arguments
---------
args.id: int
Identifier.
args.ch: int
Number of channels
args.schema: str
Path to schema JSON file.
args.output: str
Output path
args.n_samples
Number of samples.
args.tile_size: int
Tile size.
args.scales: list
Image scaling factors.
args.stride: int
Stride.
args.m2: float
M2 variance metric.
args.jsd: float
JSD coefficient.
args.px_mean: np.array
Pixel mean value.
args.px_std: np.array
Pixel standard deviation value.
args.px_dist: np.array
Tile pixel frequency distribution.
args.tile_px_count: int
Tile pixel count.
args.dset_px_dist: np.array
Dataset pixel frequency distribution.
args.dset_px_count: int
Dataset pixel count.
args.probs: np.array
Dataset probability distribution.
args.weights:
Dataset inverse weights.
"""
# update local metadata with dataset metadata
meta = dset.get_meta()
# get data loader
loader, n_batches = dset.loader(
batch_size=1,
n_workers=0,
drop_last=False
)
meta.n_samples = dset.size
# initialize global stats
px_dist = []
px_mean = torch.zeros(meta.ch)
px_std = torch.zeros(meta.ch)
# load images and masks
for i, (img, mask) in tqdm(enumerate(loader), total=n_batches, desc="Profiling: ", unit=' batches'):
# Compute dataset pixel global mean / standard deviation
if meta.ch == 3:
px_mean += torch.mean(img, (0, 2, 3))
px_std += torch.std(img, (0, 2, 3))
else:
px_mean += torch.mean(img)
px_std += torch.std(img)
# convert mask to one-hot encoding
mask_1hot = torch.nn.functional.one_hot(mask, num_classes=meta.n_classes).permute(0, 3, 1, 2)
px_dist_sample = [np.sum(mask_1hot.numpy(), axis=(2, 3))]
px_dist += px_dist_sample
# Divide by dataset size
px_mean /= meta.n_samples
px_std /= meta.n_samples
# Calculate sample pixel distribution / sample pixel count
px_dist = np.concatenate(px_dist)
# Calculate dataset pixel distribution / dataset total pixel count
dset_px_dist = np.sum(px_dist, axis=0)
dset_px_count = np.sum(dset_px_dist)
probs = dset_px_dist / dset_px_count
assert dset_px_count / meta.tile_px_count == meta.n_samples, \
"Pixel distribution does not match tile count."
# Calculate class weight balancing
weights = 1 / (np.log(1.02 + probs))
weights = weights / np.max(weights)
# initialize balanced distributions [n]
balanced_px_prob = np.empty(meta.n_classes)
balanced_px_prob.fill(1 / meta.n_classes)
# Calculate JSD and M2 metrics
meta.m2 = m2(probs, meta.n_classes)
meta.jsd = jsd(probs, balanced_px_prob)
# store metadata values
meta.px_mean = px_mean.tolist()
meta.px_std = px_std.tolist()
meta.px_dist = px_dist.tolist()
meta.tile_px_count = meta.tile_size * meta.tile_size
meta.probs = probs.tolist()
meta.weights = weights.tolist()
meta.dset_px_count = int(dset_px_count)
meta.dset_px_dist = dset_px_dist.tolist()
return meta
def print_meta(meta):
"""
Prints profile metadata to console
"""
hline = '\n' + '_' * 70
readout = '\n{}'.format('Profile Metadata')
readout += hline
readout += '\n {:30s}{}'.format('ID', meta.id)
readout += '\n {:30s}{} ({})'.format('Channels', meta.ch, 'Grayscale' if meta.ch == 1 else 'Colour')
readout += '\n {:30s}{}'.format('Classes', meta.n_classes)
readout += '\n {:30s}{}'.format('Samples', meta.n_samples)
readout += '\n {:30s}{}px x {}px'.format('Tile size (WxH)', meta.tile_size, meta.tile_size)
# RGB/Grayscale mean
px_mean = 'R{:3s} G{:3s} B{:3s}'.format(
str(round(meta.px_mean[0], 3)), str(round(meta.px_mean[1], 3)), str(round(meta.px_mean[2], 3))) \
if meta.ch == 3 else str(round(meta.px_mean[0], 3)
)
readout += '\n {:30s}{}'.format('Pixel mean', px_mean)
# RGB/Grayscale std-dev
px_std = 'R{:3s} G{:3s} B{:3s}'.format(
str(round(meta.px_std[0], 3)), str(round(meta.px_std[1], 3)), str(round(meta.px_std[2], 3))) \
if meta.ch == 3 else str(round(meta.px_std[0], 3))
readout += '\n {:30s}{}'.format('Pixel std-dev', px_std)
readout += '\n {:30s}{}'.format('M2', str(round(meta.m2, 3)))
readout += '\n {:30s}{}'.format('JSD', str(round(meta.jsd, 3)))
# palette
readout += '\n\n{} ({})'.format('Palette', meta.schema)
readout += hline
readout += '\n {:8s}{:25s}{:20s}{:15s}'.format('Code', 'Name', 'RGB', 'Hex')
readout += hline
for i, rgb_colour in enumerate(meta.palette_rgb):
rgb = 'R{:3s} G{:3s} B{:3s}'.format(
str(rgb_colour[0]), str(rgb_colour[1]), str(rgb_colour[2]))
readout += '\n {:8s}{:25s}{:20s}{:15s}'.format(
meta.class_codes[i], meta.class_labels[i], rgb, meta.palette_hex[i])
readout += hline
# class weights
readout += '\n\n{:30s}'.format('Distribution')
readout += hline
readout += '\n {:30s}{:10s}{:10s}'.format('Class', 'Probs', 'Weights')
readout += hline
for i, w in enumerate(meta.weights):
readout += '\n {:25s}{:10f} {:10f}'.format(
meta.class_labels[i], round(meta.probs[i], 4), round(w, 4))
readout += hline
readout += '\n{:25s}{:,}'.format('Tile pixel count', int(meta.tile_px_count))
readout += '\n{:25s}{:,}'.format('Dataset pixel count', int(meta.dset_px_count))
readout += hline + '\n'
print(readout)
|
the-stack_0_16491 | ## @ingroupMethods-Noise-Fidelity_One-Propeller
# noise_propeller_low_fidelty.py
#
# Created: Mar 2021, M. Clarke
# Modified: Jul 2021, E. Botero
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import SUAVE
from SUAVE.Core import Data
import numpy as np
from SUAVE.Methods.Noise.Fidelity_One.Noise_Tools.decibel_arithmetic import pressure_ratio_to_SPL_arithmetic
from SUAVE.Methods.Noise.Fidelity_One.Noise_Tools import SPL_arithmetic
from SUAVE.Methods.Noise.Fidelity_One.Noise_Tools import SPL_spectra_arithmetic
from SUAVE.Methods.Noise.Fidelity_One.Noise_Tools import compute_point_source_coordinates
from SUAVE.Methods.Noise.Fidelity_One.Propeller.compute_broadband_noise import compute_broadband_noise
from SUAVE.Methods.Noise.Fidelity_One.Propeller.compute_harmonic_noise import compute_harmonic_noise
# -------------------------------------------------------------------------------------
# Medium Fidelity Frequency Domain Methods for Acoustic Noise Prediction
# -------------------------------------------------------------------------------------
## @ingroupMethods-Noise-Fidelity_One-Propeller
def propeller_mid_fidelity(network,auc_opts,segment,settings,source = 'propeller'):
''' This computes the acoustic signature (sound pressure level, weighted sound pressure levels,
and frequency spectrums of a system of rotating blades (i.e. propellers and lift_rotors)
Assumptions:
None
Source:
None
Inputs:
network - vehicle energy network data structure [None]
segment - flight segment data structure [None]
mic_loc - microhone location [m]
propeller - propeller class data structure [None]
auc_opts - data structure of acoustic data [None]
settings - accoustic settings [None]
Outputs:
Results.
SPL - SPL [dB]
SPL_dBA - dbA-Weighted SPL [dBA]
SPL_bb_spectrum - broadband contribution to total SPL [dB]
SPL_spectrum - 1/3 octave band SPL [dB]
SPL_tonal_spectrum - harmonic contribution to total SPL [dB]
SPL_bpfs_spectrum - 1/3 octave band harmonic contribution to total SPL [dB]
Properties Used:
N/A
'''
# unpack
conditions = segment.state.conditions
microphone_locations = conditions.noise.total_microphone_locations
angle_of_attack = conditions.aerodynamics.angle_of_attack
velocity_vector = conditions.frames.inertial.velocity_vector
freestream = conditions.freestream
harmonics = settings.harmonics
if not network.identical_propellers:
assert('This method currently only works with identical propellers')
# Because the propellers are identical, get the first propellers results
auc_opts = auc_opts[list(auc_opts.keys())[0]]
# create data structures for computation
Noise = Data()
Results = Data()
# compute position vector of microphones
position_vector = compute_point_source_coordinates(conditions,network,microphone_locations,source)
# Harmonic Noise
compute_harmonic_noise(harmonics,freestream,angle_of_attack,position_vector,velocity_vector,network,auc_opts,settings,Noise,source)
# Broadband Noise
compute_broadband_noise(freestream,angle_of_attack,position_vector, velocity_vector,network,auc_opts,settings,Noise,source)
# Combine Rotational(periodic/tonal) and Broadband Noise
Noise.SPL_prop_bpfs_spectrum = Noise.SPL_r
Noise.SPL_prop_spectrum = 10*np.log10( 10**(Noise.SPL_prop_h_spectrum/10) + 10**(Noise.SPL_prop_bb_spectrum/10))
Noise.SPL_prop_spectrum[np.isnan(Noise.SPL_prop_spectrum)] = 0
# pressure ratios used to combine A weighted sound since decibel arithmetic does not work for
#broadband noise since it is a continuous spectrum
total_p_pref_dBA = np.concatenate((Noise.p_pref_r_dBA,Noise.p_pref_bb_dBA), axis=3)
Noise.SPL_dBA_prop = pressure_ratio_to_SPL_arithmetic(total_p_pref_dBA)
Noise.SPL_dBA_prop[np.isinf(Noise.SPL_dBA_prop)] = 0
Noise.SPL_dBA_prop[np.isnan(Noise.SPL_dBA_prop)] = 0
# Summation of spectra from propellers into into one SPL
Results.bpfs = Noise.f[:,0,0,0,:] # blade passing frequency harmonics
Results.SPL = SPL_arithmetic(SPL_arithmetic(Noise.SPL_prop_spectrum))
Results.SPL_dBA = SPL_arithmetic(Noise.SPL_dBA_prop)
Results.SPL_spectrum = SPL_spectra_arithmetic(Noise.SPL_prop_spectrum) # 1/3 octave band
Results.SPL_bpfs_spectrum = SPL_spectra_arithmetic(Noise.SPL_prop_bpfs_spectrum) # blade passing frequency specturm
Results.SPL_tonal_spectrum = SPL_spectra_arithmetic(Noise.SPL_prop_tonal_spectrum)
Results.SPL_bb_spectrum = SPL_spectra_arithmetic(Noise.SPL_prop_bb_spectrum)
auc_opts.bpfs = Results.bpfs
auc_opts.SPL = Results.SPL
auc_opts.SPL_dBA = Results.SPL_dBA
auc_opts.SPL_spectrum = Results.SPL_spectrum
auc_opts.SPL_bpfs_spectrum = Results.SPL_bpfs_spectrum
auc_opts.SPL_tonal_spectrum = Results.SPL_tonal_spectrum
auc_opts.SPL_bb_spectrum = Results.SPL_bb_spectrum
return Results
|
the-stack_0_16492 | from struct import pack, unpack
import hashlib
import sys
import traceback
from electrum import bitcoin
from electrum.bitcoin import TYPE_ADDRESS, int_to_hex, var_int
from electrum.i18n import _
from electrum.plugins import BasePlugin
from electrum.keystore import Hardware_KeyStore
from electrum.transaction import Transaction
from ..hw_wallet import HW_PluginBase
from electrum.util import print_error, is_verbose, bfh, bh2u, versiontuple
try:
import hid
from btchip.btchipComm import HIDDongleHIDAPI, DongleWait
from btchip.btchip import btchip
from btchip.btchipUtils import compress_public_key,format_transaction, get_regular_input_script, get_p2sh_input_script
from btchip.bitcoinTransaction import bitcoinTransaction
from btchip.btchipFirmwareWizard import checkFirmware, updateFirmware
from btchip.btchipException import BTChipException
BTCHIP = True
BTCHIP_DEBUG = is_verbose
except ImportError:
BTCHIP = False
MSG_NEEDS_FW_UPDATE_GENERIC = _('Firmware version too old. Please update at') + \
' https://www.ledgerwallet.com'
MSG_NEEDS_FW_UPDATE_SEGWIT = _('Firmware version (or "Bitcoin" app) too old for Segwit support. Please update at') + \
' https://www.ledgerwallet.com'
MULTI_OUTPUT_SUPPORT = '1.1.4'
SEGWIT_SUPPORT = '1.1.10'
SEGWIT_SUPPORT_SPECIAL = '1.0.4'
class Ledger_Client():
def __init__(self, hidDevice):
self.dongleObject = btchip(hidDevice)
self.preflightDone = False
def is_pairable(self):
return True
def close(self):
self.dongleObject.dongle.close()
def timeout(self, cutoff):
pass
def is_initialized(self):
return True
def label(self):
return ""
def i4b(self, x):
return pack('>I', x)
def has_usable_connection_with_device(self):
try:
self.dongleObject.getFirmwareVersion()
except BaseException:
return False
return True
def test_pin_unlocked(func):
"""Function decorator to test the Ledger for being unlocked, and if not,
raise a human-readable exception.
"""
def catch_exception(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except BTChipException as e:
if e.sw == 0x6982:
raise Exception(_('Your Ledger is locked. Please unlock it.'))
else:
raise
return catch_exception
@test_pin_unlocked
def get_xpub(self, bip32_path, xtype):
self.checkDevice()
# bip32_path is of the form 44'/0'/1'
# S-L-O-W - we don't handle the fingerprint directly, so compute
# it manually from the previous node
# This only happens once so it's bearable
#self.get_client() # prompt for the PIN before displaying the dialog if necessary
#self.handler.show_message("Computing master public key")
if xtype in ['p2wpkh', 'p2wsh'] and not self.supports_native_segwit():
raise Exception(MSG_NEEDS_FW_UPDATE_SEGWIT)
if xtype in ['p2wpkh-p2sh', 'p2wsh-p2sh'] and not self.supports_segwit():
raise Exception(MSG_NEEDS_FW_UPDATE_SEGWIT)
splitPath = bip32_path.split('/')
if splitPath[0] == 'm':
splitPath = splitPath[1:]
bip32_path = bip32_path[2:]
fingerprint = 0
if len(splitPath) > 1:
prevPath = "/".join(splitPath[0:len(splitPath) - 1])
nodeData = self.dongleObject.getWalletPublicKey(prevPath)
publicKey = compress_public_key(nodeData['publicKey'])
h = hashlib.new('ripemd160')
h.update(hashlib.sha256(publicKey).digest())
fingerprint = unpack(">I", h.digest()[0:4])[0]
nodeData = self.dongleObject.getWalletPublicKey(bip32_path)
publicKey = compress_public_key(nodeData['publicKey'])
depth = len(splitPath)
lastChild = splitPath[len(splitPath) - 1].split('\'')
childnum = int(lastChild[0]) if len(lastChild) == 1 else 0x80000000 | int(lastChild[0])
xpub = bitcoin.serialize_xpub(xtype, nodeData['chainCode'], publicKey, depth, self.i4b(fingerprint), self.i4b(childnum))
return xpub
def has_detached_pin_support(self, client):
try:
client.getVerifyPinRemainingAttempts()
return True
except BTChipException as e:
if e.sw == 0x6d00:
return False
raise e
def is_pin_validated(self, client):
try:
# Invalid SET OPERATION MODE to verify the PIN status
client.dongle.exchange(bytearray([0xe0, 0x26, 0x00, 0x00, 0x01, 0xAB]))
except BTChipException as e:
if (e.sw == 0x6982):
return False
if (e.sw == 0x6A80):
return True
raise e
def supports_multi_output(self):
return self.multiOutputSupported
def supports_segwit(self):
return self.segwitSupported
def supports_native_segwit(self):
return self.nativeSegwitSupported
def perform_hw1_preflight(self):
try:
firmwareInfo = self.dongleObject.getFirmwareVersion()
firmware = firmwareInfo['version']
self.multiOutputSupported = versiontuple(firmware) >= versiontuple(MULTI_OUTPUT_SUPPORT)
self.nativeSegwitSupported = versiontuple(firmware) >= versiontuple(SEGWIT_SUPPORT)
self.segwitSupported = self.nativeSegwitSupported or (firmwareInfo['specialVersion'] == 0x20 and versiontuple(firmware) >= versiontuple(SEGWIT_SUPPORT_SPECIAL))
if not checkFirmware(firmwareInfo):
self.dongleObject.dongle.close()
raise Exception(MSG_NEEDS_FW_UPDATE_GENERIC)
try:
self.dongleObject.getOperationMode()
except BTChipException as e:
if (e.sw == 0x6985):
self.dongleObject.dongle.close()
self.handler.get_setup( )
# Acquire the new client on the next run
else:
raise e
if self.has_detached_pin_support(self.dongleObject) and not self.is_pin_validated(self.dongleObject) and (self.handler is not None):
remaining_attempts = self.dongleObject.getVerifyPinRemainingAttempts()
if remaining_attempts != 1:
msg = "Enter your Ledger PIN - remaining attempts : " + str(remaining_attempts)
else:
msg = "Enter your Ledger PIN - WARNING : LAST ATTEMPT. If the PIN is not correct, the dongle will be wiped."
confirmed, p, pin = self.password_dialog(msg)
if not confirmed:
raise Exception('Aborted by user - please unplug the dongle and plug it again before retrying')
pin = pin.encode()
self.dongleObject.verifyPin(pin)
except BTChipException as e:
if (e.sw == 0x6faa):
raise Exception("Dongle is temporarily locked - please unplug it and replug it again")
if ((e.sw & 0xFFF0) == 0x63c0):
raise Exception("Invalid PIN - please unplug the dongle and plug it again before retrying")
if e.sw == 0x6f00 and e.message == 'Invalid channel':
# based on docs 0x6f00 might be a more general error, hence we also compare message to be sure
raise Exception("Invalid channel.\n"
"Please make sure that 'Browser support' is disabled on your device.")
raise e
def checkDevice(self):
if not self.preflightDone:
try:
self.perform_hw1_preflight()
except BTChipException as e:
if (e.sw == 0x6d00 or e.sw == 0x6700):
raise Exception(_("Device not in Bitcoin mode")) from e
raise e
self.preflightDone = True
def password_dialog(self, msg=None):
response = self.handler.get_word(msg)
if response is None:
return False, None, None
return True, response, response
class Ledger_KeyStore(Hardware_KeyStore):
hw_type = 'ledger'
device = 'Ledger'
def __init__(self, d):
Hardware_KeyStore.__init__(self, d)
# Errors and other user interaction is done through the wallet's
# handler. The handler is per-window and preserved across
# device reconnects
self.force_watching_only = False
self.signing = False
self.cfg = d.get('cfg', {'mode':0,'pair':''})
def dump(self):
obj = Hardware_KeyStore.dump(self)
obj['cfg'] = self.cfg
return obj
def get_derivation(self):
return self.derivation
def get_client(self):
return self.plugin.get_client(self).dongleObject
def get_client_electrum(self):
return self.plugin.get_client(self)
def give_error(self, message, clear_client = False):
print_error(message)
if not self.signing:
self.handler.show_error(message)
else:
self.signing = False
if clear_client:
self.client = None
raise Exception(message)
def set_and_unset_signing(func):
"""Function decorator to set and unset self.signing."""
def wrapper(self, *args, **kwargs):
try:
self.signing = True
return func(self, *args, **kwargs)
finally:
self.signing = False
return wrapper
def address_id_stripped(self, address):
# Strip the leading "m/"
change, index = self.get_address_index(address)
derivation = self.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
return address_path[2:]
def decrypt_message(self, pubkey, message, password):
raise RuntimeError(_('Encryption and decryption are currently not supported for {}').format(self.device))
@set_and_unset_signing
def sign_message(self, sequence, message, password):
message = message.encode('utf8')
message_hash = hashlib.sha256(message).hexdigest().upper()
# prompt for the PIN before displaying the dialog if necessary
client = self.get_client()
address_path = self.get_derivation()[2:] + "/%d/%d"%sequence
self.handler.show_message("Signing message ...\r\nMessage hash: "+message_hash)
try:
info = self.get_client().signMessagePrepare(address_path, message)
pin = ""
if info['confirmationNeeded']:
pin = self.handler.get_auth( info ) # does the authenticate dialog and returns pin
if not pin:
raise UserWarning(_('Cancelled by user'))
pin = str(pin).encode()
signature = self.get_client().signMessageSign(pin)
except BTChipException as e:
if e.sw == 0x6a80:
self.give_error("Unfortunately, this message cannot be signed by the Ledger wallet. Only alphanumerical messages shorter than 140 characters are supported. Please remove any extra characters (tab, carriage return) and retry.")
elif e.sw == 0x6985: # cancelled by user
return b''
else:
self.give_error(e, True)
except UserWarning:
self.handler.show_error(_('Cancelled by user'))
return b''
except Exception as e:
self.give_error(e, True)
finally:
self.handler.finished()
# Parse the ASN.1 signature
rLength = signature[3]
r = signature[4 : 4 + rLength]
sLength = signature[4 + rLength + 1]
s = signature[4 + rLength + 2:]
if rLength == 33:
r = r[1:]
if sLength == 33:
s = s[1:]
# And convert it
return bytes([27 + 4 + (signature[0] & 0x01)]) + r + s
@set_and_unset_signing
def sign_transaction(self, tx, password):
if tx.is_complete():
return
client = self.get_client()
inputs = []
inputsPaths = []
pubKeys = []
chipInputs = []
redeemScripts = []
signatures = []
preparedTrustedInputs = []
changePath = ""
changeAmount = None
output = None
outputAmount = None
p2shTransaction = False
segwitTransaction = False
pin = ""
self.get_client() # prompt for the PIN before displaying the dialog if necessary
# Fetch inputs of the transaction to sign
derivations = self.get_tx_derivations(tx)
for txin in tx.inputs():
if txin['type'] == 'coinbase':
self.give_error("Coinbase not supported") # should never happen
if txin['type'] in ['p2sh']:
p2shTransaction = True
if txin['type'] in ['p2wpkh-p2sh', 'p2wsh-p2sh']:
if not self.get_client_electrum().supports_segwit():
self.give_error(MSG_NEEDS_FW_UPDATE_SEGWIT)
segwitTransaction = True
if txin['type'] in ['p2wpkh', 'p2wsh']:
if not self.get_client_electrum().supports_native_segwit():
self.give_error(MSG_NEEDS_FW_UPDATE_SEGWIT)
segwitTransaction = True
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
for i, x_pubkey in enumerate(x_pubkeys):
if x_pubkey in derivations:
signingPos = i
s = derivations.get(x_pubkey)
hwAddress = "%s/%d/%d" % (self.get_derivation()[2:], s[0], s[1])
break
else:
self.give_error("No matching x_key for sign_transaction") # should never happen
redeemScript = Transaction.get_preimage_script(txin)
if txin.get('prev_tx') is None: # and not Transaction.is_segwit_input(txin):
# note: offline signing does not work atm even with segwit inputs for ledger
raise Exception(_('Offline signing with {} is not supported.').format(self.device))
inputs.append([txin['prev_tx'].raw, txin['prevout_n'], redeemScript, txin['prevout_hash'], signingPos, txin.get('sequence', 0xffffffff - 1) ])
inputsPaths.append(hwAddress)
pubKeys.append(pubkeys)
# Sanity check
if p2shTransaction:
for txin in tx.inputs():
if txin['type'] != 'p2sh':
self.give_error("P2SH / regular input mixed in same transaction not supported") # should never happen
txOutput = var_int(len(tx.outputs()))
for txout in tx.outputs():
output_type, addr, amount = txout
txOutput += int_to_hex(amount, 8)
script = tx.pay_script(output_type, addr)
txOutput += var_int(len(script)//2)
txOutput += script
txOutput = bfh(txOutput)
# Recognize outputs - only one output and one change is authorized
if not p2shTransaction:
if not self.get_client_electrum().supports_multi_output():
if len(tx.outputs()) > 2:
self.give_error("Transaction with more than 2 outputs not supported")
for _type, address, amount in tx.outputs():
assert _type == TYPE_ADDRESS
info = tx.output_info.get(address)
if (info is not None) and len(tx.outputs()) > 1 \
and info[0][0] == 1: # "is on 'change' branch"
index, xpubs, m = info
changePath = self.get_derivation()[2:] + "/%d/%d"%index
changeAmount = amount
else:
output = address
outputAmount = amount
self.handler.show_message(_("Confirm Transaction on your Ledger device..."))
try:
# Get trusted inputs from the original transactions
for utxo in inputs:
sequence = int_to_hex(utxo[5], 4)
if segwitTransaction:
txtmp = bitcoinTransaction(bfh(utxo[0]))
tmp = bfh(utxo[3])[::-1]
tmp += bfh(int_to_hex(utxo[1], 4))
tmp += txtmp.outputs[utxo[1]].amount
chipInputs.append({'value' : tmp, 'witness' : True, 'sequence' : sequence})
redeemScripts.append(bfh(utxo[2]))
elif not p2shTransaction:
txtmp = bitcoinTransaction(bfh(utxo[0]))
trustedInput = self.get_client().getTrustedInput(txtmp, utxo[1])
trustedInput['sequence'] = sequence
chipInputs.append(trustedInput)
redeemScripts.append(txtmp.outputs[utxo[1]].script)
else:
tmp = bfh(utxo[3])[::-1]
tmp += bfh(int_to_hex(utxo[1], 4))
chipInputs.append({'value' : tmp, 'sequence' : sequence})
redeemScripts.append(bfh(utxo[2]))
# Sign all inputs
firstTransaction = True
inputIndex = 0
rawTx = tx.serialize()
self.get_client().enableAlternate2fa(False)
if segwitTransaction:
self.get_client().startUntrustedTransaction(True, inputIndex,
chipInputs, redeemScripts[inputIndex])
if changePath:
# we don't set meaningful outputAddress, amount and fees
# as we only care about the alternateEncoding==True branch
outputData = self.get_client().finalizeInput(b'', 0, 0, changePath, bfh(rawTx))
else:
outputData = self.get_client().finalizeInputFull(txOutput)
outputData['outputData'] = txOutput
transactionOutput = outputData['outputData']
if outputData['confirmationNeeded']:
outputData['address'] = output
self.handler.finished()
pin = self.handler.get_auth( outputData ) # does the authenticate dialog and returns pin
if not pin:
raise UserWarning()
if pin != 'paired':
self.handler.show_message(_("Confirmed. Signing Transaction..."))
while inputIndex < len(inputs):
singleInput = [ chipInputs[inputIndex] ]
self.get_client().startUntrustedTransaction(False, 0,
singleInput, redeemScripts[inputIndex])
inputSignature = self.get_client().untrustedHashSign(inputsPaths[inputIndex], pin, lockTime=tx.locktime)
inputSignature[0] = 0x30 # force for 1.4.9+
signatures.append(inputSignature)
inputIndex = inputIndex + 1
else:
while inputIndex < len(inputs):
self.get_client().startUntrustedTransaction(firstTransaction, inputIndex,
chipInputs, redeemScripts[inputIndex])
if changePath:
# we don't set meaningful outputAddress, amount and fees
# as we only care about the alternateEncoding==True branch
outputData = self.get_client().finalizeInput(b'', 0, 0, changePath, bfh(rawTx))
else:
outputData = self.get_client().finalizeInputFull(txOutput)
outputData['outputData'] = txOutput
if firstTransaction:
transactionOutput = outputData['outputData']
if outputData['confirmationNeeded']:
outputData['address'] = output
self.handler.finished()
pin = self.handler.get_auth( outputData ) # does the authenticate dialog and returns pin
if not pin:
raise UserWarning()
if pin != 'paired':
self.handler.show_message(_("Confirmed. Signing Transaction..."))
else:
# Sign input with the provided PIN
inputSignature = self.get_client().untrustedHashSign(inputsPaths[inputIndex], pin, lockTime=tx.locktime)
inputSignature[0] = 0x30 # force for 1.4.9+
signatures.append(inputSignature)
inputIndex = inputIndex + 1
if pin != 'paired':
firstTransaction = False
except UserWarning:
self.handler.show_error(_('Cancelled by user'))
return
except BTChipException as e:
if e.sw == 0x6985: # cancelled by user
return
else:
traceback.print_exc(file=sys.stderr)
self.give_error(e, True)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.give_error(e, True)
finally:
self.handler.finished()
for i, txin in enumerate(tx.inputs()):
signingPos = inputs[i][4]
txin['signatures'][signingPos] = bh2u(signatures[i])
tx.raw = tx.serialize()
@set_and_unset_signing
def show_address(self, sequence, txin_type):
client = self.get_client()
address_path = self.get_derivation()[2:] + "/%d/%d"%sequence
self.handler.show_message(_("Showing address ..."))
segwit = Transaction.is_segwit_inputtype(txin_type)
segwitNative = txin_type == 'p2wpkh'
try:
client.getWalletPublicKey(address_path, showOnScreen=True, segwit=segwit, segwitNative=segwitNative)
except BTChipException as e:
if e.sw == 0x6985: # cancelled by user
pass
else:
traceback.print_exc(file=sys.stderr)
self.handler.show_error(e)
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.handler.show_error(e)
finally:
self.handler.finished()
class LedgerPlugin(HW_PluginBase):
libraries_available = BTCHIP
keystore_class = Ledger_KeyStore
client = None
DEVICE_IDS = [
(0x2581, 0x1807), # HW.1 legacy btchip
(0x2581, 0x2b7c), # HW.1 transitional production
(0x2581, 0x3b7c), # HW.1 ledger production
(0x2581, 0x4b7c), # HW.1 ledger test
(0x2c97, 0x0000), # Blue
(0x2c97, 0x0001) # Nano-S
]
def __init__(self, parent, config, name):
self.segwit = config.get("segwit")
HW_PluginBase.__init__(self, parent, config, name)
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def get_btchip_device(self, device):
ledger = False
if device.product_key[0] == 0x2581 and device.product_key[1] == 0x3b7c:
ledger = True
if device.product_key[0] == 0x2581 and device.product_key[1] == 0x4b7c:
ledger = True
if device.product_key[0] == 0x2c97:
if device.interface_number == 0 or device.usage_page == 0xffa0:
ledger = True
else:
return None # non-compatible interface of a Nano S or Blue
dev = hid.device()
dev.open_path(device.path)
dev.set_nonblocking(True)
return HIDDongleHIDAPI(dev, ledger, BTCHIP_DEBUG)
def create_client(self, device, handler):
if handler:
self.handler = handler
client = self.get_btchip_device(device)
if client is not None:
client = Ledger_Client(client)
return client
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
client.handler = self.create_handler(wizard)
client.get_xpub("m/44'/8'", 'standard') # TODO replace by direct derivation once Nano S > 1.1
def get_xpub(self, device_id, derivation, xtype, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = self.create_handler(wizard)
client.checkDevice()
xpub = client.get_xpub(derivation, xtype)
return xpub
def get_client(self, keystore, force_pair=True):
# All client interaction should not be in the main GUI thread
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
#if client:
# client.used()
if client is not None:
client.checkDevice()
return client
def show_address(self, wallet, address):
sequence = wallet.get_address_index(address)
txin_type = wallet.get_txin_type(address)
wallet.get_keystore().show_address(sequence, txin_type)
|
the-stack_0_16493 | import os
import random
import string
import time
from collections import defaultdict
from contextlib import contextmanager
import pendulum
import pytest
from dagster import (
Any,
Field,
ModeDefinition,
daily_partitioned_config,
fs_io_manager,
graph,
pipeline,
repository,
solid,
)
from dagster.core.definitions import Partition, PartitionSetDefinition
from dagster.core.definitions.reconstructable import ReconstructableRepository
from dagster.core.execution.api import execute_pipeline
from dagster.core.execution.backfill import BulkActionStatus, PartitionBackfill
from dagster.core.host_representation import (
ExternalRepositoryOrigin,
InProcessRepositoryLocationOrigin,
)
from dagster.core.storage.pipeline_run import PipelineRunStatus, RunsFilter
from dagster.core.storage.tags import BACKFILL_ID_TAG, PARTITION_NAME_TAG, PARTITION_SET_TAG
from dagster.core.test_utils import create_test_daemon_workspace, instance_for_test
from dagster.core.workspace.load_target import PythonFileTarget
from dagster.daemon import get_default_daemon_logger
from dagster.daemon.backfill import execute_backfill_iteration
from dagster.seven import IS_WINDOWS, get_system_temp_directory
from dagster.utils import touch_file
from dagster.utils.error import SerializableErrorInfo
default_mode_def = ModeDefinition(resource_defs={"io_manager": fs_io_manager})
def _failure_flag_file():
return os.path.join(get_system_temp_directory(), "conditionally_fail")
def _step_events(instance, run):
events_by_step = defaultdict(set)
logs = instance.all_logs(run.run_id)
for record in logs:
if not record.is_dagster_event or not record.step_key:
continue
events_by_step[record.step_key] = record.dagster_event.event_type_value
return events_by_step
@solid
def always_succeed(_):
return 1
@graph()
def comp_always_succeed():
always_succeed()
@daily_partitioned_config(start_date="2021-05-05")
def my_config(_start, _end):
return {}
always_succeed_job = comp_always_succeed.to_job(config=my_config)
@solid
def fail_solid(_):
raise Exception("blah")
@solid
def conditionally_fail(_, _input):
if os.path.isfile(_failure_flag_file()):
raise Exception("blah")
return 1
@solid
def after_failure(_, _input):
return 1
@pipeline(mode_defs=[default_mode_def])
def the_pipeline():
always_succeed()
@pipeline(mode_defs=[default_mode_def])
def conditional_failure_pipeline():
after_failure(conditionally_fail(always_succeed()))
@pipeline(mode_defs=[default_mode_def])
def partial_pipeline():
always_succeed.alias("step_one")()
always_succeed.alias("step_two")()
always_succeed.alias("step_three")()
@pipeline(mode_defs=[default_mode_def])
def parallel_failure_pipeline():
fail_solid.alias("fail_one")()
fail_solid.alias("fail_two")()
fail_solid.alias("fail_three")()
always_succeed.alias("success_four")()
@solid(config_schema=Field(Any))
def config_solid(_):
return 1
@pipeline(mode_defs=[default_mode_def])
def config_pipeline():
config_solid()
simple_partition_set = PartitionSetDefinition(
name="simple_partition_set",
pipeline_name="the_pipeline",
partition_fn=lambda: [Partition("one"), Partition("two"), Partition("three")],
)
conditionally_fail_partition_set = PartitionSetDefinition(
name="conditionally_fail_partition_set",
pipeline_name="conditional_failure_pipeline",
partition_fn=lambda: [Partition("one"), Partition("two"), Partition("three")],
)
partial_partition_set = PartitionSetDefinition(
name="partial_partition_set",
pipeline_name="partial_pipeline",
partition_fn=lambda: [Partition("one"), Partition("two"), Partition("three")],
)
parallel_failure_partition_set = PartitionSetDefinition(
name="parallel_failure_partition_set",
pipeline_name="parallel_failure_pipeline",
partition_fn=lambda: [Partition("one"), Partition("two"), Partition("three")],
)
def _large_partition_config(_):
REQUEST_CONFIG_COUNT = 50000
def _random_string(length):
return "".join(random.choice(string.ascii_lowercase) for x in range(length))
return {
"solids": {
"config_solid": {
"config": {
"foo": {
_random_string(10): _random_string(20) for i in range(REQUEST_CONFIG_COUNT)
}
}
}
}
}
large_partition_set = PartitionSetDefinition(
name="large_partition_set",
pipeline_name="config_pipeline",
partition_fn=lambda: [Partition("one"), Partition("two"), Partition("three")],
run_config_fn_for_partition=_large_partition_config,
)
def _unloadable_partition_set_origin():
working_directory = os.path.dirname(__file__)
recon_repo = ReconstructableRepository.for_file(__file__, "doesnt_exist", working_directory)
return ExternalRepositoryOrigin(
InProcessRepositoryLocationOrigin(recon_repo), "fake_repository"
).get_partition_set_origin("doesnt_exist")
@repository
def the_repo():
return [
the_pipeline,
conditional_failure_pipeline,
partial_pipeline,
config_pipeline,
simple_partition_set,
conditionally_fail_partition_set,
partial_partition_set,
large_partition_set,
always_succeed_job,
parallel_failure_partition_set,
parallel_failure_pipeline,
]
@contextmanager
def default_repo():
load_target = workspace_load_target()
origin = load_target.create_origins()[0]
with origin.create_single_location() as location:
yield location.get_repository("the_repo")
def workspace_load_target():
return PythonFileTarget(
python_file=__file__,
attribute=None,
working_directory=os.path.dirname(__file__),
location_name="test_location",
)
@contextmanager
def instance_for_context(external_repo_context, overrides=None):
with instance_for_test(overrides) as instance:
with create_test_daemon_workspace(
workspace_load_target=workspace_load_target()
) as workspace:
with external_repo_context() as external_repo:
yield (instance, workspace, external_repo)
def step_did_not_run(instance, run, step_name):
step_events = _step_events(instance, run)[step_name]
return len(step_events) == 0
def step_succeeded(instance, run, step_name):
step_events = _step_events(instance, run)[step_name]
return "STEP_SUCCESS" in step_events
def step_failed(instance, run, step_name):
step_events = _step_events(instance, run)[step_name]
return "STEP_FAILURE" in step_events
def wait_for_all_runs_to_start(instance, timeout=10):
start_time = time.time()
while True:
if time.time() - start_time > timeout:
raise Exception("Timed out waiting for runs to start")
time.sleep(0.5)
pending_states = [
PipelineRunStatus.NOT_STARTED,
PipelineRunStatus.STARTING,
PipelineRunStatus.STARTED,
]
pending_runs = [run for run in instance.get_runs() if run.status in pending_states]
if len(pending_runs) == 0:
break
def wait_for_all_runs_to_finish(instance, timeout=10):
start_time = time.time()
FINISHED_STATES = [
PipelineRunStatus.SUCCESS,
PipelineRunStatus.FAILURE,
PipelineRunStatus.CANCELED,
]
while True:
if time.time() - start_time > timeout:
raise Exception("Timed out waiting for runs to start")
time.sleep(0.5)
not_finished_runs = [
run for run in instance.get_runs() if run.status not in FINISHED_STATES
]
if len(not_finished_runs) == 0:
break
def test_simple_backfill():
with instance_for_context(default_repo) as (
instance,
workspace,
external_repo,
):
external_partition_set = external_repo.get_external_partition_set("simple_partition_set")
instance.add_backfill(
PartitionBackfill(
backfill_id="simple",
partition_set_origin=external_partition_set.get_external_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
assert instance.get_runs_count() == 0
list(
execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
)
assert instance.get_runs_count() == 3
runs = instance.get_runs()
three, two, one = runs
assert one.tags[BACKFILL_ID_TAG] == "simple"
assert one.tags[PARTITION_NAME_TAG] == "one"
assert two.tags[BACKFILL_ID_TAG] == "simple"
assert two.tags[PARTITION_NAME_TAG] == "two"
assert three.tags[BACKFILL_ID_TAG] == "simple"
assert three.tags[PARTITION_NAME_TAG] == "three"
def test_canceled_backfill():
with instance_for_context(default_repo) as (
instance,
workspace,
external_repo,
):
external_partition_set = external_repo.get_external_partition_set("simple_partition_set")
instance.add_backfill(
PartitionBackfill(
backfill_id="simple",
partition_set_origin=external_partition_set.get_external_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
assert instance.get_runs_count() == 0
iterator = execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
next(iterator)
assert instance.get_runs_count() == 1
backfill = instance.get_backfills()[0]
assert backfill.status == BulkActionStatus.REQUESTED
instance.update_backfill(backfill.with_status(BulkActionStatus.CANCELED))
list(iterator)
backfill = instance.get_backfill(backfill.backfill_id)
assert backfill.status == BulkActionStatus.CANCELED
assert instance.get_runs_count() == 1
def test_failure_backfill():
output_file = _failure_flag_file()
with instance_for_context(default_repo) as (
instance,
workspace,
external_repo,
):
external_partition_set = external_repo.get_external_partition_set(
"conditionally_fail_partition_set"
)
instance.add_backfill(
PartitionBackfill(
backfill_id="shouldfail",
partition_set_origin=external_partition_set.get_external_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
assert instance.get_runs_count() == 0
try:
touch_file(output_file)
list(
execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
)
wait_for_all_runs_to_start(instance)
finally:
os.remove(output_file)
assert instance.get_runs_count() == 3
runs = instance.get_runs()
three, two, one = runs
assert one.tags[BACKFILL_ID_TAG] == "shouldfail"
assert one.tags[PARTITION_NAME_TAG] == "one"
assert one.status == PipelineRunStatus.FAILURE
assert step_succeeded(instance, one, "always_succeed")
assert step_failed(instance, one, "conditionally_fail")
assert step_did_not_run(instance, one, "after_failure")
assert two.tags[BACKFILL_ID_TAG] == "shouldfail"
assert two.tags[PARTITION_NAME_TAG] == "two"
assert two.status == PipelineRunStatus.FAILURE
assert step_succeeded(instance, two, "always_succeed")
assert step_failed(instance, two, "conditionally_fail")
assert step_did_not_run(instance, two, "after_failure")
assert three.tags[BACKFILL_ID_TAG] == "shouldfail"
assert three.tags[PARTITION_NAME_TAG] == "three"
assert three.status == PipelineRunStatus.FAILURE
assert step_succeeded(instance, three, "always_succeed")
assert step_failed(instance, three, "conditionally_fail")
assert step_did_not_run(instance, three, "after_failure")
instance.add_backfill(
PartitionBackfill(
backfill_id="fromfailure",
partition_set_origin=external_partition_set.get_external_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=True,
reexecution_steps=None,
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
assert not os.path.isfile(_failure_flag_file())
list(
execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
)
wait_for_all_runs_to_start(instance)
assert instance.get_runs_count() == 6
from_failure_filter = RunsFilter(tags={BACKFILL_ID_TAG: "fromfailure"})
assert instance.get_runs_count(filters=from_failure_filter) == 3
runs = instance.get_runs(filters=from_failure_filter)
three, two, one = runs
assert one.tags[BACKFILL_ID_TAG] == "fromfailure"
assert one.tags[PARTITION_NAME_TAG] == "one"
assert one.status == PipelineRunStatus.SUCCESS
assert step_did_not_run(instance, one, "always_succeed")
assert step_succeeded(instance, one, "conditionally_fail")
assert step_succeeded(instance, one, "after_failure")
assert two.tags[BACKFILL_ID_TAG] == "fromfailure"
assert two.tags[PARTITION_NAME_TAG] == "two"
assert two.status == PipelineRunStatus.SUCCESS
assert step_did_not_run(instance, one, "always_succeed")
assert step_succeeded(instance, one, "conditionally_fail")
assert step_succeeded(instance, one, "after_failure")
assert three.tags[BACKFILL_ID_TAG] == "fromfailure"
assert three.tags[PARTITION_NAME_TAG] == "three"
assert three.status == PipelineRunStatus.SUCCESS
assert step_did_not_run(instance, one, "always_succeed")
assert step_succeeded(instance, one, "conditionally_fail")
assert step_succeeded(instance, one, "after_failure")
@pytest.mark.skipif(IS_WINDOWS, reason="flaky in windows")
def test_partial_backfill():
with instance_for_context(default_repo) as (
instance,
workspace,
external_repo,
):
external_partition_set = external_repo.get_external_partition_set("partial_partition_set")
# create full runs, where every step is executed
instance.add_backfill(
PartitionBackfill(
backfill_id="full",
partition_set_origin=external_partition_set.get_external_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
assert instance.get_runs_count() == 0
list(
execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
)
wait_for_all_runs_to_start(instance)
assert instance.get_runs_count() == 3
runs = instance.get_runs()
three, two, one = runs
assert one.tags[BACKFILL_ID_TAG] == "full"
assert one.tags[PARTITION_NAME_TAG] == "one"
assert one.status == PipelineRunStatus.SUCCESS
assert step_succeeded(instance, one, "step_one")
assert step_succeeded(instance, one, "step_two")
assert step_succeeded(instance, one, "step_three")
assert two.tags[BACKFILL_ID_TAG] == "full"
assert two.tags[PARTITION_NAME_TAG] == "two"
assert two.status == PipelineRunStatus.SUCCESS
assert step_succeeded(instance, two, "step_one")
assert step_succeeded(instance, two, "step_two")
assert step_succeeded(instance, two, "step_three")
assert three.tags[BACKFILL_ID_TAG] == "full"
assert three.tags[PARTITION_NAME_TAG] == "three"
assert three.status == PipelineRunStatus.SUCCESS
assert step_succeeded(instance, three, "step_one")
assert step_succeeded(instance, three, "step_two")
assert step_succeeded(instance, three, "step_three")
# delete one of the runs, the partial reexecution should still succeed because the steps
# can be executed independently, require no input/output config
instance.delete_run(one.run_id)
assert instance.get_runs_count() == 2
# create partial runs
instance.add_backfill(
PartitionBackfill(
backfill_id="partial",
partition_set_origin=external_partition_set.get_external_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=["step_one"],
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
list(
execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
)
wait_for_all_runs_to_start(instance)
assert instance.get_runs_count() == 5
partial_filter = RunsFilter(tags={BACKFILL_ID_TAG: "partial"})
assert instance.get_runs_count(filters=partial_filter) == 3
runs = instance.get_runs(filters=partial_filter)
three, two, one = runs
assert one.status == PipelineRunStatus.SUCCESS
assert step_succeeded(instance, one, "step_one")
assert step_did_not_run(instance, one, "step_two")
assert step_did_not_run(instance, one, "step_three")
assert two.status == PipelineRunStatus.SUCCESS
assert step_succeeded(instance, two, "step_one")
assert step_did_not_run(instance, two, "step_two")
assert step_did_not_run(instance, two, "step_three")
assert three.status == PipelineRunStatus.SUCCESS
assert step_succeeded(instance, three, "step_one")
assert step_did_not_run(instance, three, "step_two")
assert step_did_not_run(instance, three, "step_three")
def test_large_backfill():
with instance_for_context(default_repo) as (
instance,
workspace,
external_repo,
):
external_partition_set = external_repo.get_external_partition_set("large_partition_set")
instance.add_backfill(
PartitionBackfill(
backfill_id="simple",
partition_set_origin=external_partition_set.get_external_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
assert instance.get_runs_count() == 0
list(
execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
)
assert instance.get_runs_count() == 3
def test_unloadable_backfill():
with instance_for_context(default_repo) as (
instance,
workspace,
_external_repo,
):
unloadable_origin = _unloadable_partition_set_origin()
instance.add_backfill(
PartitionBackfill(
backfill_id="simple",
partition_set_origin=unloadable_origin,
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
assert instance.get_runs_count() == 0
list(
execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill("simple")
assert backfill.status == BulkActionStatus.FAILED
assert isinstance(backfill.error, SerializableErrorInfo)
def test_backfill_from_partitioned_job():
partition_name_list = [
partition.name for partition in my_config.partitions_def.get_partitions()
]
with instance_for_context(default_repo) as (
instance,
workspace,
external_repo,
):
external_partition_set = external_repo.get_external_partition_set(
"comp_always_succeed_partition_set"
)
instance.add_backfill(
PartitionBackfill(
backfill_id="partition_schedule_from_job",
partition_set_origin=external_partition_set.get_external_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=partition_name_list[:3],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
assert instance.get_runs_count() == 0
list(
execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
)
assert instance.get_runs_count() == 3
runs = reversed(instance.get_runs())
for idx, run in enumerate(runs):
assert run.tags[BACKFILL_ID_TAG] == "partition_schedule_from_job"
assert run.tags[PARTITION_NAME_TAG] == partition_name_list[idx]
assert run.tags[PARTITION_SET_TAG] == "comp_always_succeed_partition_set"
def test_backfill_from_failure_for_subselection():
with instance_for_context(default_repo) as (
instance,
workspace,
external_repo,
):
partition = parallel_failure_partition_set.get_partition("one")
run_config = parallel_failure_partition_set.run_config_for_partition(partition)
tags = parallel_failure_partition_set.tags_for_partition(partition)
external_partition_set = external_repo.get_external_partition_set(
"parallel_failure_partition_set"
)
execute_pipeline(
parallel_failure_pipeline,
run_config=run_config,
tags=tags,
instance=instance,
solid_selection=["fail_three", "success_four"],
raise_on_error=False,
)
assert instance.get_runs_count() == 1
wait_for_all_runs_to_finish(instance)
run = instance.get_runs()[0]
assert run.status == PipelineRunStatus.FAILURE
instance.add_backfill(
PartitionBackfill(
backfill_id="fromfailure",
partition_set_origin=external_partition_set.get_external_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one"],
from_failure=True,
reexecution_steps=None,
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
list(
execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
)
assert instance.get_runs_count() == 2
run = instance.get_runs(limit=1)[0]
assert run.solids_to_execute
assert run.solid_selection
assert len(run.solids_to_execute) == 2
assert len(run.solid_selection) == 2
|
the-stack_0_16494 | # coding=utf-8
#
# Copyright 2015-2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from f5.bigip.resource import AsmResource
from f5.bigip.resource import Collection
from icontrol.exceptions import iControlUnexpectedHTTPError
class Signatures_s(Collection):
"""BIG-IP® ASM Signatures collection."""
def __init__(self, asm):
super(Signatures_s, self).__init__(asm)
self._meta_data['object_has_stats'] = False
self._meta_data['allowed_lazy_attributes'] = [Signature]
self._meta_data['attribute_registry'] = {
'tm:asm:signatures:signaturestate': Signature
}
class Signature(AsmResource):
"""BIG-IP® ASM Signature resource.
note:: Only user created signatures can be modified/deleted.
Default signatures are READ-ONLY
"""
def __init__(self, signatures_s):
super(Signature, self).__init__(signatures_s)
self._meta_data['required_json_kind'] = 'tm:asm:signatures:signaturestate'
self._meta_data['required_creation_parameters'].update(
('attackTypeReference', 'rule')
)
def create(self, **kwargs):
"""Custom creation logic to handle edge cases
This shouldn't be needed, but ASM has a tendency to raise various errors that
are painful to handle from a customer point-of-view. These errors are especially
pronounced when doing things concurrently with asm.
The error itself are described in their exception handler
To address these failure, we try a number of exception handling cases to catch
and reliably deal with the error.
:param kwargs:
:return:
"""
ex = iControlUnexpectedHTTPError(
"Failed to delete the signature"
)
for _ in range(0, 30):
try:
return self._create(**kwargs)
except iControlUnexpectedHTTPError as ex:
if self._check_exception(ex):
continue
else:
raise
raise ex
def delete(self, **kwargs):
"""Custom deletion logic to handle edge cases
This shouldn't be needed, but ASM has a tendency to raise various errors that
are painful to handle from a customer point-of-view. These errors are especially
pronounced when doing things concurrently with asm.
The error itself are described in their exception handler
To address these failure, we try a number of exception handling cases to catch
and reliably deal with the error.
:param kwargs:
:return:
"""
ex = iControlUnexpectedHTTPError(
"Failed to delete the signature"
)
for _ in range(0, 30):
try:
return self._delete(**kwargs)
except iControlUnexpectedHTTPError as ex:
if self._check_exception(ex):
continue
else:
raise
raise ex
def modify(self, **kwargs):
ex = iControlUnexpectedHTTPError(
"Failed to modify the signature"
)
for _ in range(0, 30):
try:
return self._modify(**kwargs)
except iControlUnexpectedHTTPError as ex:
if self._check_exception(ex):
continue
else:
raise
raise ex
def update(self, **kwargs):
ex = iControlUnexpectedHTTPError(
"Failed to delete the signature"
)
for _ in range(0, 30):
try:
return self._update(**kwargs)
except iControlUnexpectedHTTPError as ex:
if self._check_exception(ex):
continue
else:
raise
raise ex
def _check_exception(self, ex):
"""Check for exceptions in action responses
In versions of ASM < v12, the REST API is quite unstable and therefore
needs some additional supporting retries to ensure that actions function
as expected. In particular versions 11.5.4 and 11.6.0 are affected.
This method handles checking for various exceptions and allowing the
given command to retry itself.
:param ex:
:return:
"""
retryable = [
# iControlUnexpectedHTTPError: 500 Unexpected Error: Internal Server Error ...
# {
# "code": 500,
# "message": "Could not add_signature the Attack Signature. "
# "Failed on insert to PLC.NEGSIG_SET_SIGNATURES "
# "(DBD::mysql::db do failed: Lock wait timeout exceeded; "
# "try restarting transaction)
#
'Lock wait timeout exceeded',
# {
# "code": 500,
# "message": "DBD::mysql::db do failed: Deadlock found when "
# "trying to get lock; try restarting transaction"
#
'Deadlock found when',
# {
# "code": 404,
# "message": "Could not add_signature the Attack Signature, "
# "internal data inconsistency was detected.",
'internal data inconsistency',
]
if any(x in str(ex) for x in retryable):
time.sleep(3)
return True
elif 'errorStack' in ex:
stack = ' '.join(ex['errorStack'])
if any(x in stack for x in retryable):
time.sleep(3)
return True
else:
return False
else:
return False
|
the-stack_0_16495 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import inspect
from pandapower.auxiliary import _check_bus_index_and_print_warning_if_high, \
_check_gen_index_and_print_warning_if_high, _init_runpp_options, _init_rundcopp_options, \
_init_rundcpp_options, _init_runopp_options, _internal_stored
from pandapower.opf.validate_opf_input import _check_necessary_opf_parameters
from pandapower.optimal_powerflow import _optimal_powerflow
from pandapower.powerflow import _powerflow, _recycled_powerflow
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
def set_user_pf_options(net, overwrite=False, **kwargs):
"""
This function sets the 'user_pf_options' dict for net. These options overrule
net.__internal_options once they are added to net. These options are used in configuration of
load flow calculation.
At the same time, user-defined arguments for pandapower.runpp() always have a higher priority.
To remove user_pf_options, set overwrite=True and provide no additional arguments
:param net: pandaPower network
:param overwrite: specifies whether the user_pf_options is removed before setting new options
:param kwargs: load flow options, e. g. tolerance_mva = 1e-3
:return: None
"""
standard_parameters = ['calculate_voltage_angles', 'trafo_model', 'check_connectivity', 'mode',
'copy_constraints_to_ppc', 'switch_rx_ratio', 'enforce_q_lims',
'recycle', 'voltage_depend_loads', 'consider_line_temperature', 'delta',
'trafo3w_losses', 'init_vm_pu', 'init_va_degree', 'init_results',
'tolerance_mva', 'trafo_loading', 'numba', 'ac', 'algorithm',
'max_iteration', 'v_debug', 'run_control']
if overwrite or 'user_pf_options' not in net.keys():
net['user_pf_options'] = dict()
net.user_pf_options.update({key: val for key, val in kwargs.items()
if key in standard_parameters})
additional_kwargs = {key: val for key, val in kwargs.items()
if key not in standard_parameters}
# this part is to inform user and to make typos in parameters visible
if len(additional_kwargs) > 0:
logger.info('parameters %s are not in the list of standard options' % list(
additional_kwargs.keys()))
net.user_pf_options.update(additional_kwargs)
def runpp(net, algorithm='nr', calculate_voltage_angles="auto", init="auto",
max_iteration="auto", tolerance_mva=1e-8, trafo_model="t",
trafo_loading="current", enforce_q_lims=False, check_connectivity=True,
voltage_depend_loads=True, consider_line_temperature=False,
run_control=False, **kwargs):
"""
Runs a power flow
INPUT:
**net** - The pandapower format network
OPTIONAL:
**algorithm** (str, "nr") - algorithm that is used to solve the power flow problem.
The following algorithms are available:
- "nr" Newton-Raphson (pypower implementation with numba accelerations)
- "iwamoto_nr" Newton-Raphson with Iwamoto multiplier (maybe slower than NR but more robust)
- "bfsw" backward/forward sweep (specially suited for radial and weakly-meshed networks)
- "gs" gauss-seidel (pypower implementation)
- "fdbx" fast-decoupled (pypower implementation)
- "fdxb" fast-decoupled (pypower implementation)
**calculate_voltage_angles** (bool, "auto") - consider voltage angles in loadflow calculation
If True, voltage angles of ext_grids and transformer shifts are considered in the
loadflow calculation. Considering the voltage angles is only necessary in meshed
networks that are usually found in higher voltage levels. calculate_voltage_angles
in "auto" mode defaults to:
- True, if the network voltage level is above 70 kV
- False otherwise
The network voltage level is defined as the maximum rated voltage of any bus in the network that
is connected to a line.
**init** (str, "auto") - initialization method of the loadflow
pandapower supports four methods for initializing the loadflow:
- "auto" - init defaults to "dc" if calculate_voltage_angles is True or "flat" otherwise
- "flat"- flat start with voltage of 1.0pu and angle of 0° at all PQ-buses and 0° for PV buses as initial solution
- "dc" - initial DC loadflow before the AC loadflow. The results of the DC loadflow are used as initial solution for the AC loadflow.
- "results" - voltage vector of last loadflow from net.res_bus is used as initial solution. This can be useful to accelerate convergence in iterative loadflows like time series calculations.
Considering the voltage angles might lead to non-convergence of the power flow in flat start.
That is why in "auto" mode, init defaults to "dc" if calculate_voltage_angles is True or "flat" otherwise
**max_iteration** (int, "auto") - maximum number of iterations carried out in the power flow algorithm.
In "auto" mode, the default value depends on the power flow solver:
- 10 for "nr"
- 100 for "bfsw"
- 1000 for "gs"
- 30 for "fdbx"
- 30 for "fdxb"
**tolerance_mva** (float, 1e-8) - loadflow termination condition referring to P / Q mismatch of node power in MVA
**trafo_model** (str, "t") - transformer equivalent circuit model
pandapower provides two equivalent circuit models for the transformer:
- "t" - transformer is modeled as equivalent with the T-model.
- "pi" - transformer is modeled as equivalent PI-model. This is not recommended, since it is less exact than the T-model. It is only recommended for valdiation with other software that uses the pi-model.
**trafo_loading** (str, "current") - mode of calculation for transformer loading
Transformer loading can be calculated relative to the rated current or the rated power. In both cases the overall transformer loading is defined as the maximum loading on the two sides of the transformer.
- "current"- transformer loading is given as ratio of current flow and rated current of the transformer. This is the recommended setting, since thermal as well as magnetic effects in the transformer depend on the current.
- "power" - transformer loading is given as ratio of apparent power flow to the rated apparent power of the transformer.
**enforce_q_lims** (bool, False) - respect generator reactive power limits
If True, the reactive power limits in net.gen.max_q_mvar/min_q_mvar are respected in the
loadflow. This is done by running a second loadflow if reactive power limits are
violated at any generator, so that the runtime for the loadflow will increase if reactive
power has to be curtailed.
Note: enforce_q_lims only works if algorithm="nr"!
**check_connectivity** (bool, True) - Perform an extra connectivity test after the conversion from pandapower to PYPOWER
If True, an extra connectivity test based on SciPy Compressed Sparse Graph Routines is perfomed.
If check finds unsupplied buses, they are set out of service in the ppc
**voltage_depend_loads** (bool, True) - consideration of voltage-dependent loads. If False, net.load.const_z_percent and net.load.const_i_percent are not considered, i.e. net.load.p_mw and net.load.q_mvar are considered as constant-power loads.
**consider_line_temperature** (bool, False) - adjustment of line impedance based on provided
line temperature. If True, net.line must contain a column "temperature_degree_celsius".
The temperature dependency coefficient alpha must be provided in the net.line.alpha
column, otherwise the default value of 0.004 is used
**KWARGS:
**numba** (bool, True) - Activation of numba JIT compiler in the newton solver
If set to True, the numba JIT compiler is used to generate matrices for the powerflow,
which leads to significant speed improvements.
**switch_rx_ratio** (float, 2) - rx_ratio of bus-bus-switches. If impedance is zero, buses connected by a closed bus-bus switch are fused to model an ideal bus. Otherwise, they are modelled as branches with resistance defined as z_ohm column in switch table and this parameter
**delta_q** - Reactive power tolerance for option "enforce_q_lims" in kvar - helps convergence in some cases.
**trafo3w_losses** - defines where open loop losses of three-winding transformers are considered. Valid options are "hv", "mv", "lv" for HV/MV/LV side or "star" for the star point.
**v_debug** (bool, False) - if True, voltage values in each newton-raphson iteration are logged in the ppc
**init_vm_pu** (string/float/array/Series, None) - Allows to define initialization specifically for voltage magnitudes. Only works with init == "auto"!
- "auto": all buses are initialized with the mean value of all voltage controlled elements in the grid
- "flat" for flat start from 1.0
- "results": voltage magnitude vector is taken from result table
- a float with which all voltage magnitudes are initialized
- an iterable with a voltage magnitude value for each bus (length and order has to match with the buses in net.bus)
- a pandas Series with a voltage magnitude value for each bus (indexes have to match the indexes in net.bus)
**init_va_degree** (string/float/array/Series, None) - Allows to define initialization specifically for voltage angles. Only works with init == "auto"!
- "auto": voltage angles are initialized from DC power flow if angles are calculated or as 0 otherwise
- "dc": voltage angles are initialized from DC power flow
- "flat" for flat start from 0
- "results": voltage angle vector is taken from result table
- a float with which all voltage angles are initialized
- an iterable with a voltage angle value for each bus (length and order has to match with the buses in net.bus)
- a pandas Series with a voltage angle value for each bus (indexes have to match the indexes in net.bus)
**recycle** (dict, none) - Reuse of internal powerflow variables for time series calculation
Contains a dict with the following parameters:
bus_pq: If True PQ values of buses are updated
trafo: If True trafo relevant variables, e.g., the Ybus matrix, is recalculated
gen: If True Sbus and the gen table in the ppc are recalculated
**neglect_open_switch_branches** (bool, False) - If True no auxiliary buses are created for branches when switches are opened at the branch. Instead branches are set out of service
"""
# if dict 'user_pf_options' is present in net, these options overrule the net.__internal_options
# except for parameters that are passed by user
recycle = kwargs.get("recycle", None)
if isinstance(recycle, dict) and _internal_stored(net):
_recycled_powerflow(net, **kwargs)
return
if run_control and net.controller.in_service.any():
from pandapower.control import run_control
parameters = {**locals(), **kwargs}
# disable run control for inner loop to avoid infinite loop
parameters["run_control"] = False
run_control(**parameters)
else:
passed_parameters = _passed_runpp_parameters(locals())
_init_runpp_options(net, algorithm=algorithm, calculate_voltage_angles=calculate_voltage_angles,
init=init, max_iteration=max_iteration, tolerance_mva=tolerance_mva,
trafo_model=trafo_model, trafo_loading=trafo_loading,
enforce_q_lims=enforce_q_lims, check_connectivity=check_connectivity,
voltage_depend_loads=voltage_depend_loads,
consider_line_temperature=consider_line_temperature,
passed_parameters=passed_parameters, **kwargs)
_check_bus_index_and_print_warning_if_high(net)
_check_gen_index_and_print_warning_if_high(net)
_powerflow(net, **kwargs)
def rundcpp(net, trafo_model="t", trafo_loading="current", recycle=None, check_connectivity=True,
switch_rx_ratio=2, trafo3w_losses="hv", **kwargs):
"""
Runs PANDAPOWER DC Flow
INPUT:
**net** - The pandapower format network
OPTIONAL:
**trafo_model** (str, "t") - transformer equivalent circuit model
pandapower provides two equivalent circuit models for the transformer:
- "t" - transformer is modeled as equivalent with the T-model. This is consistent with PowerFactory and is also more accurate than the PI-model. We recommend using this transformer model.
- "pi" - transformer is modeled as equivalent PI-model. This is consistent with Sincal, but the method is questionable since the transformer is physically T-shaped. We therefore recommend the use of the T-model.
**trafo_loading** (str, "current") - mode of calculation for transformer loading
Transformer loading can be calculated relative to the rated current or the rated power. In both cases the overall transformer loading is defined as the maximum loading on the two sides of the transformer.
- "current"- transformer loading is given as ratio of current flow and rated current of the transformer. This is the recommended setting, since thermal as well as magnetic effects in the transformer depend on the current.
- "power" - transformer loading is given as ratio of apparent power flow to the rated apparent power of the transformer.
**check_connectivity** (bool, False) - Perform an extra connectivity test after the conversion from pandapower to PYPOWER
If true, an extra connectivity test based on SciPy Compressed Sparse Graph Routines is perfomed.
If check finds unsupplied buses, they are put out of service in the PYPOWER matrix
**switch_rx_ratio** (float, 2) - rx_ratio of bus-bus-switches. If impedance is zero, buses connected by a closed bus-bus switch are fused to model an ideal bus. Otherwise, they are modelled as branches with resistance defined as z_ohm column in switch table and this parameter
**trafo3w_losses** (str, "hv") - defines where open loop losses of three-winding transformers are considered. Valid options are "hv", "mv", "lv" for HV/MV/LV side or "star" for the star point.
****kwargs** - options to use for PYPOWER.runpf
"""
_init_rundcpp_options(net, trafo_model=trafo_model, trafo_loading=trafo_loading,
recycle=recycle, check_connectivity=check_connectivity,
switch_rx_ratio=switch_rx_ratio, trafo3w_losses=trafo3w_losses, **kwargs)
_check_bus_index_and_print_warning_if_high(net)
_check_gen_index_and_print_warning_if_high(net)
_powerflow(net, **kwargs)
def runopp(net, verbose=False, calculate_voltage_angles=False, check_connectivity=True,
suppress_warnings=True, switch_rx_ratio=2, delta=1e-10, init="flat", numba=True,
trafo3w_losses="hv", consider_line_temperature=False, **kwargs):
"""
Runs the pandapower Optimal Power Flow.
Flexibilities, constraints and cost parameters are defined in the pandapower element tables.
Flexibilities can be defined in net.sgen / net.gen /net.load / net.storage
net.sgen.controllable if a static generator is controllable. If False,
the active and reactive power are assigned as in a normal power flow. If True, the following
flexibilities apply:
- net.gen.min_p_mw / net.gen.max_p_mw
- net.gen.min_q_mvar / net.gen.max_q_mvar
- net.sgen.min_p_mw / net.sgen.max_p_mw
- net.sgen.min_q_mvar / net.sgen.max_q_mvar
- net.dcline.max_p_mw
- net.dcline.min_q_to_mvar / net.dcline.max_q_to_mvar / net.dcline.min_q_from_mvar / net.dcline.max_q_from_mvar
- net.ext_grid.min_p_mw / net.ext_grid.max_p_mw
- net.ext_grid.min_q_mvar / net.ext_grid.max_q_mvar
- net.load.min_p_mw / net.load.max_p_mw
- net.load.min_q_mvar / net.load.max_q_mvar
- net.storage.min_p_mw / net.storage.max_p_mw
- net.storage.min_q_mvar / net.storage.max_q_mvar
Controllable loads behave just like controllable static generators. It must be stated if they are controllable.
Otherwise, they are not respected as flexibilities.
Dc lines are controllable per default
Network constraints can be defined for buses, lines and transformers the elements in the following columns:
- net.bus.min_vm_pu / net.bus.max_vm_pu
- net.line.max_loading_percent
- net.trafo.max_loading_percent
- net.trafo3w.max_loading_percent
How these costs are combined into a cost function depends on the cost_function parameter.
INPUT:
**net** - The pandapower format network
OPTIONAL:
**verbose** (bool, False) - If True, some basic information is printed
**suppress_warnings** (bool, True) - suppress warnings in pypower
If set to True, warnings are disabled during the loadflow. Because of the way data is
processed in pypower, ComplexWarnings are raised during the loadflow.
These warnings are suppressed by this option, however keep in mind all other pypower
warnings are suppressed, too.
**init** (str, "flat") - init of starting opf vector. Options are "flat" or "pf"
Starting solution vector (x0) for opf calculations is determined by this flag. Options are:
"flat" (default): starting vector is (upper bound - lower bound) / 2
"pf": a power flow is executed prior to the opf and the pf solution is the starting vector. This may improve
convergence, but takes a longer runtime (which are probably neglectible for opf calculations)
**delta** (float, 1e-10) - power tolerance
**trafo3w_losses** (str, "hv") - defines where open loop losses of three-winding transformers are considered. Valid options are "hv", "mv", "lv" for HV/MV/LV side or "star" for the star point.
**consider_line_temperature** (bool, False) - adjustment of line impedance based on provided
line temperature. If True, net.line must contain a column "temperature_degree_celsius".
The temperature dependency coefficient alpha must be provided in the net.line.alpha
column, otherwise the default value of 0.004 is used
**kwargs** - Pypower / Matpower keyword arguments: - OPF_VIOLATION (5e-6) constraint violation tolerance
- PDIPM_COSTTOL (1e-6) optimality tolerance
- PDIPM_GRADTOL (1e-6) gradient tolerance
- PDIPM_COMPTOL (1e-6) complementarity condition (inequality) tolerance
- PDIPM_FEASTOL (set to OPF_VIOLATION if not specified) feasibiliy (equality) tolerance
- PDIPM_MAX_IT (150) maximum number of iterations
- SCPDIPM_RED_IT(20) maximum number of step size reductions per iteration
"""
_check_necessary_opf_parameters(net, logger)
_init_runopp_options(net, calculate_voltage_angles=calculate_voltage_angles,
check_connectivity=check_connectivity,
switch_rx_ratio=switch_rx_ratio, delta=delta, init=init, numba=numba,
trafo3w_losses=trafo3w_losses,
consider_line_temperature=consider_line_temperature, **kwargs)
_check_bus_index_and_print_warning_if_high(net)
_check_gen_index_and_print_warning_if_high(net)
_optimal_powerflow(net, verbose, suppress_warnings, **kwargs)
def rundcopp(net, verbose=False, check_connectivity=True, suppress_warnings=True,
switch_rx_ratio=0.5, delta=1e-10, trafo3w_losses="hv", **kwargs):
"""
Runs the pandapower Optimal Power Flow.
Flexibilities, constraints and cost parameters are defined in the pandapower element tables.
Flexibilities for generators can be defined in net.sgen / net.gen.
net.sgen.controllable / net.gen.controllable signals if a generator is controllable. If False,
the active and reactive power are assigned as in a normal power flow. If yes, the following
flexibilities apply:
- net.sgen.min_p_mw / net.sgen.max_p_mw
- net.gen.min_p_mw / net.gen.max_p_mw
- net.load.min_p_mw / net.load.max_p_mw
Network constraints can be defined for buses, lines and transformers the elements in the following columns:
- net.line.max_loading_percent
- net.trafo.max_loading_percent
- net.trafo3w.max_loading_percent
INPUT:
**net** - The pandapower format network
OPTIONAL:
**verbose** (bool, False) - If True, some basic information is printed
**suppress_warnings** (bool, True) - suppress warnings in pypower
If set to True, warnings are disabled during the loadflow. Because of the way data is
processed in pypower, ComplexWarnings are raised during the loadflow.
These warnings are suppressed by this option, however keep in mind all other pypower
warnings are suppressed, too.
**delta** (float, 1e-10) - power tolerance
**trafo3w_losses** (str, "hv") - defines where open loop losses of three-winding transformers are considered. Valid options are "hv", "mv", "lv" for HV/MV/LV side or "star" for the star point.
"""
if (not net.sgen.empty) & ("controllable" not in net.sgen.columns):
logger.warning('Warning: Please specify sgen["controllable"]\n')
if (not net.load.empty) & ("controllable" not in net.load.columns):
logger.warning('Warning: Please specify load["controllable"]\n')
_init_rundcopp_options(net, check_connectivity=check_connectivity,
switch_rx_ratio=switch_rx_ratio, delta=delta,
trafo3w_losses=trafo3w_losses, **kwargs)
_check_bus_index_and_print_warning_if_high(net)
_check_gen_index_and_print_warning_if_high(net)
_optimal_powerflow(net, verbose, suppress_warnings, **kwargs)
def _passed_runpp_parameters(local_parameters):
"""
Internal function to distinguish arguments for pandapower.runpp() that are explicitly passed by
the user.
:param local_parameters: locals() in the runpp() function
:return: dictionary of explicitly passed parameters
"""
net = local_parameters.pop("net")
if not ("user_pf_options" in net.keys() and len(net.user_pf_options) > 0):
return None
try:
default_parameters = {k: v.default for k, v in inspect.signature(runpp).parameters.items()}
except:
args, varargs, keywords, defaults = inspect.getfullargspec(runpp)
default_parameters = dict(zip(args[-len(defaults):], defaults))
default_parameters.update({"init": "auto"})
passed_parameters = {
key: val for key, val in local_parameters.items()
if key in default_parameters.keys() and val != default_parameters.get(key, None)}
return passed_parameters
|
the-stack_0_16496 | import argparse
import gym
import numpy as np
import os
import tensorflow as tf
import tempfile
import time
import json
import random
import rlattack.common.tf_util as U
from rlattack import logger
from rlattack import deepq
from rlattack.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
from rlattack.common.misc_util import (
boolean_flag,
pickle_load,
pretty_eta,
relatively_safe_pickle_dump,
set_global_seeds,
RunningAvg,
SimpleMonitor
)
from rlattack.common.schedules import LinearSchedule, PiecewiseSchedule
# when updating this to non-deprecated ones, it is important to
# copy over LazyFrames
from rlattack.common.atari_wrappers_deprecated import wrap_dqn
from rlattack.common.azure_utils import Container
from model import model, dueling_model
from statistics import statistics
def parse_args():
parser = argparse.ArgumentParser("DQN experiments for Atari games")
# Environment
parser.add_argument("--env", type=str, default="Pong",
help="name of the game")
parser.add_argument("--seed", type=int, default=42,
help="which seed to use")
# Core DQN parameters
parser.add_argument("--replay-buffer-size", type=int, default=int(1e6),
help="replay buffer size")
parser.add_argument("--lr", type=float, default=1e-4,
help="learning rate for Adam optimizer")
parser.add_argument("--num-steps", type=int, default=int(2e8),
help="total number of steps to \
run the environment for")
parser.add_argument("--batch-size", type=int, default=32,
help="number of transitions to optimize \
at the same time")
parser.add_argument("--learning-freq", type=int, default=4,
help="number of iterations between \
every optimization step")
parser.add_argument("--target-update-freq", type=int, default=40000,
help="number of iterations between \
every target network update")
# Bells and whistles
boolean_flag(parser, "noisy", default=False,
help="whether or not to NoisyNetwork")
boolean_flag(parser, "double-q", default=True,
help="whether or not to use double q learning")
boolean_flag(parser, "dueling", default=False,
help="whether or not to use dueling model")
boolean_flag(parser, "prioritized", default=False,
help="whether or not to use prioritized replay buffer")
parser.add_argument("--prioritized-alpha", type=float, default=0.6,
help="alpha parameter for prioritized replay buffer")
parser.add_argument("--prioritized-beta0", type=float, default=0.4,
help="initial value of beta \
parameters for prioritized replay")
parser.add_argument("--prioritized-eps", type=float, default=1e-6,
help="eps parameter for prioritized replay buffer")
# Checkpointing
parser.add_argument("--save-dir", type=str, default=None, required=True,
help="directory in which \
training state and model should be saved.")
parser.add_argument("--save-azure-container", type=str, default=None,
help="It present data will saved/loaded from Azure. \
Should be in format ACCOUNT_NAME:ACCOUNT_KEY:\
CONTAINER")
parser.add_argument("--save-freq", type=int, default=1e6,
help="save model once every time this many \
iterations are completed")
boolean_flag(parser, "load-on-start", default=True,
help="if true and model was previously saved then training \
will be resumed")
# V: Attack Arguments #
parser.add_argument("--attack", type=str, default=None,
help="Method to attack the model.")
parser.add_argument("--attack-init", type=int, default=0,
help="Iteration no. to begin attacks")
parser.add_argument("--attack-prob", type=float, default=0.0,
help="Probability of attack at each step, \
float in range 0 - 1.0")
return parser.parse_args()
def make_env(game_name):
env = gym.make(game_name + "NoFrameskip-v4")
monitored_env = SimpleMonitor(env)
env = wrap_dqn(monitored_env)
return env, monitored_env
def maybe_save_model(savedir, container, state):
if savedir is None:
return
start_time = time.time()
model_dir = "model-{}".format(state["num_iters"])
U.save_state(os.path.join(savedir, model_dir, "saved"))
if container is not None:
container.put(os.path.join(savedir, model_dir), model_dir)
relatively_safe_pickle_dump(state,
os.path.join(savedir,
'training_state.pkl.zip'),
compression=True)
if container is not None:
container.put(os.path.join(savedir, 'training_state.pkl.zip'),
'training_state.pkl.zip')
relatively_safe_pickle_dump(state["monitor_state"],
os.path.join(savedir, 'monitor_state.pkl'))
if container is not None:
container.put(os.path.join(savedir, 'monitor_state.pkl'),
'monitor_state.pkl')
logger.log("Saved model in {} seconds\n".format(time.time() - start_time))
def maybe_load_model(savedir, container):
"""Load model if present at the specified path."""
if savedir is None:
return
state_path = os.path.join(os.path.join(savedir, 'training_state.pkl.zip'))
if container is not None:
logger.log("Attempting to download model from Azure")
found_model = container.get(savedir, 'training_state.pkl.zip')
else:
found_model = os.path.exists(state_path)
if found_model:
state = pickle_load(state_path, compression=True)
model_dir = "model-{}".format(state["num_iters"])
if container is not None:
container.get(savedir, model_dir)
U.load_state(os.path.join(savedir, model_dir, "saved"))
logger.log("Loaded models checkpoint at {} iterations".format(
state["num_iters"]))
return state
if __name__ == '__main__':
args = parse_args()
# Parse savedir and azure container.
savedir = args.save_dir
if args.save_azure_container is not None:
account_name, account_key, container_name = \
args.save_azure_container.split(":")
container = Container(
account_name=account_name,
account_key=account_key,
container_name=container_name,
maybe_create=True
)
if savedir is None:
# Careful! This will not get cleaned up.
savedir = tempfile.TemporaryDirectory().name
else:
container = None
# Create and seed the env.
env, monitored_env = make_env(args.env)
if args.seed > 0:
set_global_seeds(args.seed)
env.unwrapped.seed(args.seed)
# V: Save arguments, configure log dump path to savedir #
if savedir:
with open(os.path.join(savedir, 'args.json'), 'w') as f:
json.dump(vars(args), f)
logger.configure(dir=savedir) # log to savedir
with U.make_session(4) as sess:
# Create training graph and replay buffer
act, train, update_target, debug, craft_adv = deepq.build_train(
make_obs_ph=lambda name: U.Uint8Input(env.observation_space.shape,
name=name),
q_func=dueling_model if args.dueling else model,
num_actions=env.action_space.n,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=args.lr,
epsilon=1e-4),
gamma=0.99,
grad_norm_clipping=10,
double_q=args.double_q,
noisy=args.noisy,
attack=args.attack
)
approximate_num_iters = args.num_steps / 4
exploration = PiecewiseSchedule([
(0, 1.0),
(approximate_num_iters / 50, 0.1),
(approximate_num_iters / 5, 0.01)
], outside_value=0.01)
if args.prioritized:
replay_buffer = PrioritizedReplayBuffer(args.replay_buffer_size,
args.prioritized_alpha)
beta_schedule = LinearSchedule(approximate_num_iters,
initial_p=args.prioritized_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(args.replay_buffer_size)
U.initialize()
update_target()
num_iters = 0
# Load the model
state = maybe_load_model(savedir, container)
if state is not None:
num_iters, replay_buffer = state["num_iters"], state[
"replay_buffer"],
monitored_env.set_state(state["monitor_state"])
start_time, start_steps = None, None
steps_per_iter = RunningAvg(0.999)
iteration_time_est = RunningAvg(0.999)
obs = env.reset()
# Record the mean of the \sigma
sigma_name_list = []
sigma_list = []
for param in tf.compat.v1.trainable_variables():
# only record the \sigma in the action network
if 'sigma' in param.name \
and 'deepq/q_func/action_value' in param.name:
summary_name = \
param.name.replace(
'deepq/q_func/action_value/', '').replace(
'/', '.').split(':')[0]
sigma_name_list.append(summary_name)
sigma_list.append(tf.reduce_mean(input_tensor=tf.abs(param)))
f_mean_sigma = U.function(inputs=[], outputs=sigma_list)
# Statistics
writer = tf.compat.v1.summary.FileWriter(savedir, sess.graph)
im_stats = statistics(scalar_keys=['action', 'im_reward', 'td_errors',
'huber_loss'] + sigma_name_list)
ep_stats = statistics(scalar_keys=['ep_reward', 'ep_length'])
# Main trianing loop
ep_length = 0
while True:
num_iters += 1
ep_length += 1
# V: Perturb observation if we are past the init stage
# and at a designated attack step
# if craft_adv != None and (num_iters >= args.attack_init)
# and ((num_iters - args.attack_init) % args.attack_freq == 0) :
if craft_adv is not None and (num_iters >= args.attack_init) and (
random.random() <= args.attack_prob):
obs = craft_adv(np.array(obs)[None])[0]
# Take action and store transition in the replay buffer.
if args.noisy:
# greedily choose
action = act(np.array(obs)[None], stochastic=False)[0]
else:
# epsilon greedy
action = act(np.array(obs)[None],
update_eps=exploration.value(num_iters))[0]
new_obs, rew, done, info = env.step(action)
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
if done:
obs = env.reset()
if (num_iters > max(5 * args.batch_size,
args.replay_buffer_size // 20) and
num_iters % args.learning_freq == 0):
# Sample a bunch of transitions from replay buffer
if args.prioritized:
experience = replay_buffer.sample(args.batch_size,
beta=beta_schedule.value(
num_iters))
(obses_t, actions, rewards, obses_tp1, dones, weights,
batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = \
replay_buffer.sample(args.batch_size)
weights = np.ones_like(rewards)
# Minimize the error in Bellman's and compute TD-error
td_errors, huber_loss = train(obses_t, actions, rewards,
obses_tp1, dones, weights)
# Update the priorities in the replay buffer
if args.prioritized:
new_priorities = np.abs(td_errors) + args.prioritized_eps
replay_buffer.update_priorities(
batch_idxes, new_priorities
)
# Write summary
mean_sigma = f_mean_sigma()
im_stats.add_all_summary(writer,
[action, rew, np.mean(td_errors),
np.mean(huber_loss)] + mean_sigma,
num_iters)
# Update target network.
if num_iters % args.target_update_freq == 0:
update_target()
if start_time is not None:
steps_per_iter.update(info['steps'] - start_steps)
iteration_time_est.update(time.time() - start_time)
start_time, start_steps = time.time(), info["steps"]
# Save the model and training state.
if num_iters > 0 and (num_iters % args.save_freq == 0 or info[
"steps"] > args.num_steps):
maybe_save_model(savedir, container, {
'replay_buffer': replay_buffer,
'num_iters': num_iters,
'monitor_state': monitored_env.get_state()
})
if info["steps"] > args.num_steps:
break
if done:
steps_left = args.num_steps - info["steps"]
completion = np.round(info["steps"] / args.num_steps, 1)
mean_ep_reward = np.mean(info["rewards"][-100:])
logger.record_tabular("% completion", completion)
logger.record_tabular("steps", info["steps"])
logger.record_tabular("iters", num_iters)
logger.record_tabular("episodes", len(info["rewards"]))
logger.record_tabular("reward (100 epi mean)",
np.mean(info["rewards"][-100:]))
if not args.noisy:
logger.record_tabular("exploration",
exploration.value(num_iters))
if args.prioritized:
logger.record_tabular("max priority",
replay_buffer._max_priority)
fps_estimate = (
float(steps_per_iter) / (float(iteration_time_est) + 1e-6)
if steps_per_iter._value is not None else "calculating:")
logger.dump_tabular()
logger.log()
logger.log("ETA: " +
pretty_eta(int(steps_left / fps_estimate)))
logger.log()
# add summary for one episode
ep_stats.add_all_summary(writer, [mean_ep_reward, ep_length],
num_iters)
ep_length = 0
|
the-stack_0_16497 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
VERSION = "unknown"
class MonitorClientConfiguration(Configuration):
"""Configuration for MonitorClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The Azure subscription Id.
:type subscription_id: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(MonitorClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2017-04-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-eventhub/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
|
the-stack_0_16500 | # Unwinder commands.
# Copyright 2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gdb
import re
def validate_regexp(exp, idstring):
try:
return re.compile(exp)
except SyntaxError:
raise SyntaxError("Invalid %s regexp: %s." % (idstring, exp))
def parse_unwinder_command_args(arg):
"""Internal utility to parse unwinder command argv.
Arguments:
arg: The arguments to the command. The format is:
[locus-regexp [name-regexp]]
Returns:
A 2-tuple of compiled regular expressions.
Raises:
SyntaxError: an error processing ARG
"""
argv = gdb.string_to_argv(arg)
argc = len(argv)
if argc > 2:
raise SyntaxError("Too many arguments.")
locus_regexp = ""
name_regexp = ""
if argc >= 1:
locus_regexp = argv[0]
if argc >= 2:
name_regexp = argv[1]
return (validate_regexp(locus_regexp, "locus"),
validate_regexp(name_regexp, "unwinder"))
class InfoUnwinder(gdb.Command):
"""GDB command to list unwinders.
Usage: info unwinder [locus-regexp [name-regexp]]
LOCUS-REGEXP is a regular expression matching the location of the
unwinder. If it is omitted, all registered unwinders from all
loci are listed. A locus can be 'global', 'progspace' to list
the unwinders from the current progspace, or a regular expression
matching filenames of objfiles.
NAME-REGEXP is a regular expression to filter unwinder names. If
this omitted for a specified locus, then all registered unwinders
in the locus are listed.
"""
def __init__(self):
super(InfoUnwinder, self).__init__("info unwinder",
gdb.COMMAND_STACK)
def list_unwinders(self, title, unwinders, name_re):
"""Lists the unwinders whose name matches regexp.
Arguments:
title: The line to print before the list.
unwinders: The list of the unwinders.
name_re: unwinder name filter.
"""
if not unwinders:
return
print(title)
for unwinder in unwinders:
if name_re.match(unwinder.name):
print(" %s%s" % (unwinder.name,
"" if unwinder.enabled else " [disabled]"))
def invoke(self, arg, from_tty):
locus_re, name_re = parse_unwinder_command_args(arg)
if locus_re.match("global"):
self.list_unwinders("Global:", gdb.frame_unwinders,
name_re)
if locus_re.match("progspace"):
cp = gdb.current_progspace()
self.list_unwinders("Progspace %s:" % cp.filename,
cp.frame_unwinders, name_re)
for objfile in gdb.objfiles():
if locus_re.match(objfile.filename):
self.list_unwinders("Objfile %s:" % objfile.filename,
objfile.frame_unwinders, name_re)
def do_enable_unwinder1(unwinders, name_re, flag):
"""Enable/disable unwinders whose names match given regex.
Arguments:
unwinders: The list of unwinders.
name_re: Unwinder name filter.
flag: Enable/disable.
Returns:
The number of unwinders affected.
"""
total = 0
for unwinder in unwinders:
if name_re.match(unwinder.name):
unwinder.enabled = flag
total += 1
return total
def do_enable_unwinder(arg, flag):
"""Enable/disable unwinder(s)."""
(locus_re, name_re) = parse_unwinder_command_args(arg)
total = 0
if locus_re.match("global"):
total += do_enable_unwinder1(gdb.frame_unwinders, name_re, flag)
if locus_re.match("progspace"):
total += do_enable_unwinder1(gdb.current_progspace().frame_unwinders,
name_re, flag)
for objfile in gdb.objfiles():
if locus_re.match(objfile.filename):
total += do_enable_unwinder1(objfile.frame_unwinders, name_re,
flag)
print("%d unwinder%s %s" % (total, "" if total == 1 else "s",
"enabled" if flag else "disabled"))
class EnableUnwinder(gdb.Command):
"""GDB command to enable unwinders.
Usage: enable unwinder [locus-regexp [name-regexp]]
LOCUS-REGEXP is a regular expression specifying the unwinders to
enable. It can 'global', 'progspace', or the name of an objfile
within that progspace.
NAME_REGEXP is a regular expression to filter unwinder names. If
this omitted for a specified locus, then all registered unwinders
in the locus are affected.
"""
def __init__(self):
super(EnableUnwinder, self).__init__("enable unwinder",
gdb.COMMAND_STACK)
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
do_enable_unwinder(arg, True)
class DisableUnwinder(gdb.Command):
"""GDB command to disable the specified unwinder.
Usage: disable unwinder [locus-regexp [name-regexp]]
LOCUS-REGEXP is a regular expression specifying the unwinders to
disable. It can 'global', 'progspace', or the name of an objfile
within that progspace.
NAME_REGEXP is a regular expression to filter unwinder names. If
this omitted for a specified locus, then all registered unwinders
in the locus are affected.
"""
def __init__(self):
super(DisableUnwinder, self).__init__("disable unwinder",
gdb.COMMAND_STACK)
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
do_enable_unwinder(arg, False)
def register_unwinder_commands():
"""Installs the unwinder commands."""
InfoUnwinder()
EnableUnwinder()
DisableUnwinder()
register_unwinder_commands()
|
the-stack_0_16501 | from collections import defaultdict
import errno
import math
import mmap
import os
import sys
import time
import multiprocessing as mp
from six.moves import range
import numpy as np
from .lib import Bbox, Vec, mkdir
SHM_DIRECTORY = '/dev/shm/'
EMULATED_SHM_DIRECTORY = '/tmp/cloudvolume-shm'
EMULATE_SHM = not os.path.isdir(SHM_DIRECTORY)
PLATFORM_SHM_DIRECTORY = SHM_DIRECTORY if not EMULATE_SHM else EMULATED_SHM_DIRECTORY
class SharedMemoryReadError(Exception):
pass
class SharedMemoryAllocationError(Exception):
pass
def ndarray(shape, dtype, location, order='F', readonly=False, lock=None, **kwargs):
"""
Create a shared memory numpy array.
Lock is only necessary while doing multiprocessing on
platforms without /dev/shm type shared memory as
filesystem emulation will be used instead.
Allocating the shared array requires cleanup on your part.
A shared memory file will be located at sharedmemory.PLATFORM_SHM_DIRECTORY + location
and must be unlinked when you're done. It will outlive the program.
You should also call .close() on the mmap file handle when done. However,
this is less of a problem because the operating system will close the
file handle on process termination.
Parameters:
shape: same as numpy.ndarray
dtype: same as numpy.ndarray
location: the shared memory filename
lock: (optional) multiprocessing.Lock
Returns: (mmap filehandle, shared ndarray)
"""
if EMULATE_SHM:
return ndarray_fs(
shape, dtype, location, lock,
readonly, order, emulate_shm=True, **kwargs
)
return ndarray_shm(shape, dtype, location, readonly, order, **kwargs)
def ndarray_fs(
shape, dtype, location, lock,
readonly=False, order='F', emulate_shm=False,
**kwargs
):
"""Emulate shared memory using the filesystem."""
dbytes = np.dtype(dtype).itemsize
nbytes = Vec(*shape).rectVolume() * dbytes
if emulate_shm:
directory = mkdir(EMULATED_SHM_DIRECTORY)
filename = os.path.join(directory, location)
else:
filename = location
if lock:
lock.acquire()
try:
allocate_shm_file(filename, nbytes, dbytes, readonly)
finally:
if lock:
lock.release()
with open(filename, 'r+b') as f:
array_like = mmap.mmap(f.fileno(), 0) # map entire file
renderbuffer = np.ndarray(buffer=array_like, dtype=dtype, shape=shape, order=order, **kwargs)
renderbuffer.setflags(write=(not readonly))
return array_like, renderbuffer
def allocate_shm_file(filename, nbytes, dbytes, readonly):
exists = os.path.exists(filename)
size = 0 if not exists else os.path.getsize(filename)
if readonly and not exists:
raise SharedMemoryReadError(filename + " has not been allocated. Requested " + str(nbytes) + " bytes.")
elif readonly and size != nbytes:
raise SharedMemoryReadError("{} exists, but the allocation size ({} bytes) does not match the request ({} bytes).".format(
filename, size, nbytes
))
if exists:
if size > nbytes:
with open(filename, 'wb') as f:
os.ftruncate(f.fileno(), nbytes)
elif size < nbytes:
# too small? just remake it below
os.unlink(filename)
exists = os.path.exists(filename)
if not exists:
# Previously we were writing out real files full of zeros,
# but a) that takes forever and b) modern OSes support sparse
# files (i.e. gigabytes of zeros that take up only a few real bytes).
#
# The following should take advantage of this functionality and be faster.
# It should work on Python 2.7 Unix, and Python 3.5+ on Unix and Windows.
#
# References:
# https://stackoverflow.com/questions/8816059/create-file-of-particular-size-in-python
# https://docs.python.org/3/library/os.html#os.ftruncate
# https://docs.python.org/2/library/os.html#os.ftruncate
#
with open(filename, 'wb') as f:
os.ftruncate(f.fileno(), nbytes)
def ndarray_shm(shape, dtype, location, readonly=False, order='F', **kwargs):
"""Create a shared memory numpy array. Requires /dev/shm to exist."""
import posix_ipc
from posix_ipc import O_CREAT
import psutil
nbytes = Vec(*shape).rectVolume() * np.dtype(dtype).itemsize
available = psutil.virtual_memory().available
preexisting = 0
# This might only work on Ubuntu
shmloc = os.path.join(SHM_DIRECTORY, location)
if os.path.exists(shmloc):
preexisting = os.path.getsize(shmloc)
elif readonly:
raise SharedMemoryReadError(shmloc + " has not been allocated. Requested " + str(nbytes) + " bytes.")
if readonly and preexisting != nbytes:
raise SharedMemoryReadError("{} exists, but the allocation size ({} bytes) does not match the request ({} bytes).".format(
shmloc, preexisting, nbytes
))
if (nbytes - preexisting) > available:
overallocated = nbytes - preexisting - available
overpercent = (100 * overallocated / (preexisting + available))
raise SharedMemoryAllocationError("""
Requested more memory than is available.
Shared Memory Location: {}
Shape: {}
Requested Bytes: {}
Available Bytes: {}
Preexisting Bytes*: {}
Overallocated Bytes*: {} (+{:.2f}%)
* Preexisting is only correct on linux systems that support /dev/shm/""" \
.format(location, shape, nbytes, available, preexisting, overallocated, overpercent))
# This might seem like we're being "extra safe" but consider
# a threading condition where the condition of the shared memory
# was adjusted between the check above and now. Better to make sure
# that we don't accidently change anything if readonly is set.
flags = 0 if readonly else O_CREAT
size = 0 if readonly else int(nbytes)
try:
shared = posix_ipc.SharedMemory(location, flags=flags, size=size)
array_like = mmap.mmap(shared.fd, shared.size)
os.close(shared.fd)
renderbuffer = np.ndarray(buffer=array_like, dtype=dtype, shape=shape, order=order, **kwargs)
except OSError as err:
if err.errno == errno.ENOMEM: # Out of Memory
posix_ipc.unlink_shared_memory(location)
raise
renderbuffer.setflags(write=(not readonly))
return array_like, renderbuffer
def unlink(location):
if EMULATE_SHM:
return unlink_fs(location)
return unlink_shm(location)
def unlink_shm(location):
import posix_ipc
try:
posix_ipc.unlink_shared_memory(location)
except posix_ipc.ExistentialError:
return False
return True
def unlink_fs(location):
directory = mkdir(EMULATED_SHM_DIRECTORY)
try:
filename = os.path.join(directory, location)
os.unlink(filename)
return True
except OSError:
return False
|
the-stack_0_16502 | # pcost.py
import report
def portfolio_cost(filename):
'''
Computes the total cost (shares*price) of a portfolio file
'''
portfolio = report.read_portfolio(filename)
return portfolio.total_cost
def main(args):
if len(args) != 2:
raise SystemExit('Usage: %s portfoliofile' % args[0])
filename = args[1]
print('Total cost:', portfolio_cost(filename))
if __name__ == '__main__':
import sys
main(sys.argv)
|
the-stack_0_16503 | # Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main training script for the listops task."""
import functools
import itertools
import json
import os
import time
from absl import app
from absl import flags
from absl import logging
from flax import jax_utils
from flax import nn
from flax import optim
from flax.metrics import tensorboard
from flax.training import checkpoints
from flax.training import common_utils
import jax
from jax import random
import jax.nn
import jax.numpy as jnp
from lra_benchmarks.listops import input_pipeline
from lra_benchmarks.models.transformer import transformer
from lra_benchmarks.utils import train_utils
from ml_collections import config_flags
import numpy as np
import tensorflow.compat.v2 as tf
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file(
'config', None, 'Training configuration.', lock_config=True)
flags.DEFINE_string(
'model_dir', default=None, help='Directory to store model data.')
flags.DEFINE_string(
'task_name',
default='basic',
help='Name of the task used for load training/test data.')
flags.DEFINE_string(
'data_dir', default=None, help='Directory containing datasets.')
flags.DEFINE_bool(
'test_only', default=False, help='Run the evaluation on the test data.')
def create_model(key, flax_module, input_shape, model_kwargs):
"""Creates and initializes the model."""
@functools.partial(jax.jit, backend='cpu')
def _create_model(key):
module = flax_module.partial(**model_kwargs)
with nn.stochastic(key):
_, initial_params = module.init_by_shape(key,
[(input_shape, jnp.float32)])
model = nn.Model(module, initial_params)
return model
return _create_model(key)
def create_optimizer(model, learning_rate):
optimizer_def = optim.Adam(
learning_rate,
beta1=0.9,
beta2=0.98,
eps=1e-9,
weight_decay=FLAGS.config.weight_decay)
optimizer = optimizer_def.create(model)
return optimizer
def compute_metrics(logits, labels, weights):
"""Compute summary metrics."""
loss, weight_sum = train_utils.compute_weighted_cross_entropy(
logits, labels, num_classes=10, weights=weights)
acc, _ = train_utils.compute_weighted_accuracy(logits, labels, weights)
metrics = {
'loss': loss,
'accuracy': acc,
'denominator': weight_sum,
}
metrics = jax.lax.psum(metrics, 'batch')
return metrics
def train_step(optimizer, batch, learning_rate_fn, dropout_rng=None):
"""Perform a single training step."""
train_keys = ['inputs', 'targets']
(inputs, targets) = [batch.get(k, None) for k in train_keys]
# We handle PRNG splitting inside the top pmap, rather
# than handling it outside in the training loop - doing the
# latter can add some stalls to the devices.
dropout_rng, new_dropout_rng = random.split(dropout_rng)
def loss_fn(model):
"""Loss function used for training."""
with nn.stochastic(dropout_rng):
logits = model(inputs, train=True)
loss, weight_sum = train_utils.compute_weighted_cross_entropy(
logits, targets, num_classes=10, weights=None)
mean_loss = loss / weight_sum
return mean_loss, logits
step = optimizer.state.step
lr = learning_rate_fn(step)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(_, logits), grad = grad_fn(optimizer.target)
grad = jax.lax.pmean(grad, 'batch')
new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)
metrics = compute_metrics(logits, targets, None)
metrics['learning_rate'] = lr
return new_optimizer, metrics, new_dropout_rng
def eval_step(model, batch):
eval_keys = ['inputs', 'targets']
(inputs, targets) = [batch.get(k, None) for k in eval_keys]
logits = model(inputs, train=False)
return compute_metrics(logits, targets, None)
def tohost(x):
"""Collect batches from all devices to host and flatten batch dimensions."""
n_device, n_batch, *remaining_dims = x.shape
return np.array(x).reshape((n_device * n_batch,) + tuple(remaining_dims))
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
tf.enable_v2_behavior()
config = FLAGS.config
logging.info('===========Config Dict============')
logging.info(config)
batch_size = config.batch_size
learning_rate = config.learning_rate
num_train_steps = config.num_train_steps
num_eval_steps = config.num_eval_steps
eval_freq = config.eval_frequency
random_seed = config.random_seed
model_type = config.model_type
model_kwargs = (
config.model_kwargs.to_dict() if 'model_kwargs' in config else {})
if jax.host_id() == 0:
summary_writer = tensorboard.SummaryWriter(
os.path.join(FLAGS.model_dir, 'summary'))
if batch_size % jax.device_count() > 0:
raise ValueError('Batch size must be divisible by the number of devices')
train_ds, eval_ds, test_ds, encoder = input_pipeline.get_datasets(
n_devices=jax.local_device_count(),
task_name=FLAGS.task_name,
data_dir=FLAGS.data_dir,
batch_size=batch_size,
max_length=config.max_length)
vocab_size = encoder.vocab_size
train_ds = train_ds.repeat()
train_iter = iter(train_ds)
max_length = config.max_length
input_shape = (batch_size, max_length)
model_kwargs.update({
'vocab_size': vocab_size,
'emb_dim': config.emb_dim,
'num_heads': config.num_heads,
'num_layers': config.num_layers,
'qkv_dim': config.qkv_dim,
'mlp_dim': config.mlp_dim,
'max_len': config.max_length,
'classifier': True,
'num_classes': 10
})
if hasattr(config, 'attention_fn'):
model_kwargs['attention_fn'] = config.attention_fn
rng = random.PRNGKey(random_seed)
rng = jax.random.fold_in(rng, jax.host_id())
rng, init_rng = random.split(rng)
# We init the first set of dropout PRNG keys, but update it afterwards inside
# the main pmap'd training update for performance.
dropout_rngs = random.split(rng, jax.local_device_count())
if model_type == 'transformer':
model = create_model(init_rng, transformer.TransformerEncoder, input_shape,
model_kwargs)
else:
raise ValueError('Model type not supported')
optimizer = create_optimizer(model, learning_rate)
del model # Don't keep a copy of the initial model.
start_step = 0
if config.restore_checkpoints or FLAGS.test_only:
# Restore unreplicated optimizer + model state from last checkpoint.
optimizer = checkpoints.restore_checkpoint(FLAGS.model_dir, optimizer)
# Grab last step.
start_step = int(optimizer.state.step)
# Replicate optimizer.
optimizer = jax_utils.replicate(optimizer)
learning_rate_fn = train_utils.create_learning_rate_scheduler(
base_learning_rate=learning_rate)
p_train_step = jax.pmap(
functools.partial(train_step, learning_rate_fn=learning_rate_fn),
axis_name='batch')
p_eval_step = jax.pmap(eval_step, axis_name='batch')
# p_pred_step = jax.pmap(predict_step, axis_name='batch')
def run_eval(eval_ds, num_eval_steps=-1):
eval_metrics = []
eval_iter = iter(eval_ds)
if num_eval_steps == -1:
num_iter = itertools.count()
else:
num_iter = range(num_eval_steps)
for _, eval_batch in zip(num_iter, eval_iter):
# pylint: disable=protected-access
eval_batch = common_utils.shard(
jax.tree_map(lambda x: x._numpy(), eval_batch))
# pylint: enable=protected-access
metrics = p_eval_step(optimizer.target, eval_batch)
eval_metrics.append(metrics)
eval_metrics = common_utils.get_metrics(eval_metrics)
eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics)
eval_denominator = eval_metrics_sums.pop('denominator')
eval_summary = jax.tree_map(
lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop
eval_metrics_sums)
# Calculate (clipped) perplexity after averaging log-perplexities:
eval_summary['perplexity'] = jnp.clip(
jnp.exp(eval_summary['loss']), a_max=1.0e4)
return eval_summary
if FLAGS.test_only:
with tf.io.gfile.GFile(
os.path.join(FLAGS.model_dir, 'results.json'), 'w') as f:
test_summary = run_eval(test_ds)
json.dump(jax.tree_map(lambda x: x.tolist(), test_summary), f)
return
metrics_all = []
tick = time.time()
for step, batch in zip(range(start_step, num_train_steps), train_iter):
batch = common_utils.shard(jax.tree_map(lambda x: x._numpy(), batch)) # pylint: disable=protected-access
optimizer, metrics, dropout_rngs = p_train_step(
optimizer, batch, dropout_rng=dropout_rngs)
metrics_all.append(metrics)
logging.info('train in step: %d', step)
# Save a Checkpoint
if ((step % config.checkpoint_freq == 0 and step > 0) or
step == num_train_steps - 1):
if jax.host_id() == 0 and config.save_checkpoints:
# Save unreplicated optimizer + model state.
checkpoints.save_checkpoint(FLAGS.model_dir,
jax_utils.unreplicate(optimizer), step)
# Periodic metric handling.
if step % eval_freq == 0 and step > 0:
metrics_all = common_utils.get_metrics(metrics_all)
lr = metrics_all.pop('learning_rate').mean()
metrics_sums = jax.tree_map(jnp.sum, metrics_all)
denominator = metrics_sums.pop('denominator')
summary = jax.tree_map(lambda x: x / denominator, metrics_sums) # pylint: disable=cell-var-from-loop
summary['learning_rate'] = lr
# Calculate (clipped) perplexity after averaging log-perplexities:
summary['perplexity'] = jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4)
logging.info('train in step: %d, loss: %.4f', step, summary['loss'])
if jax.host_id() == 0:
tock = time.time()
steps_per_sec = eval_freq / (tock - tick)
tick = tock
summary_writer.scalar('steps per second', steps_per_sec, step)
for key, val in summary.items():
summary_writer.scalar(f'train_{key}', val, step)
summary_writer.flush()
# Reset metric accumulation for next evaluation cycle.
metrics_all = []
# Eval Metrics
eval_summary = run_eval(eval_ds, num_eval_steps)
logging.info('eval in step: %d, loss: %.4f, acc: %.4f', step,
eval_summary['loss'], eval_summary['accuracy'])
if jax.host_id() == 0:
for key, val in eval_summary.items():
summary_writer.scalar(f'eval_{key}', val, step)
summary_writer.flush()
if __name__ == '__main__':
app.run(main)
|
the-stack_0_16505 | import spidev, time
spi = spidev.SpiDev()
spi.open(0,0)
def analog_read(channel):
r = spi.xfer2([1, (8 + channel) << 4, 0])
adc_out = ((r[1]&3) << 8) + r[2]
return adc_out
while True:
reading = analog_read(0)
voltage = reading * 3.3 / 1024
print("Reading=%d\tVoltage=%f" % (reading, voltage))
time.sleep(1) |
the-stack_0_16506 | """
Compare two or more phasings
"""
import logging
import math
from collections import defaultdict
from contextlib import ExitStack
import dataclasses
from itertools import chain, permutations
from typing import Set, List, Optional, DefaultDict, Dict
from whatshap.vcf import VcfReader, VcfVariant, VariantTable, PloidyError
from whatshap.core import Genotype, SwitchFlipCalculator
from whatshap.cli import CommandLineError
logger = logging.getLogger(__name__)
COUNT_WIDTH = 9
# fmt: off
def add_arguments(parser):
add = parser.add_argument
add('--sample', metavar='SAMPLE', default=None, help='Name of the sample '
'to process. If not given, use first sample found in VCF.')
add('--names', metavar='NAMES', default=None, help='Comma-separated list '
'of data set names to be used in the report (in same order as VCFs).')
add('--ignore-sample-name', default=False, action='store_true', help='For single '
'sample VCFs, ignore sample name and assume all samples are the same.')
add('--tsv-pairwise', metavar='TSVPAIRWISE', default=None, help='Filename to write '
'comparison results from pair-wise comparison to (tab-separated).')
add('--tsv-multiway', metavar='TSVMULTIWAY', default=None, help='Filename to write '
'comparison results from multiway comparison to (tab-separated). Only for diploid vcfs.')
add('--only-snvs', default=False, action="store_true", help='Only process SNVs '
'and ignore all other variants.')
add('--switch-error-bed', default=None, help='Write BED file with switch error positions '
'to given filename. Only for diploid vcfs.')
add('--plot-blocksizes', default=None, help='Write PDF file with a block length histogram '
'to given filename (requires matplotlib).')
add('--plot-sum-of-blocksizes', default=None, help='Write PDF file with a block length histogram in which the height of each bar corresponds to the sum of lengths.')
add('--longest-block-tsv', default=None, help='Write position-wise agreement of longest '
'joint blocks in each chromosome to tab-separated file. Only for diploid vcfs.')
add('--ploidy', '-p', metavar='PLOIDY', type=int, default=2, help='The ploidy of the sample(s) (default: %(default)s).')
# TODO: what's the best way to request "two or more" VCFs?
add('vcf', nargs='+', metavar='VCF', help='At least two phased VCF files to be compared.')
# fmt: on
def validate(args, parser):
if len(args.vcf) < 2:
parser.error("At least two VCFs need to be given.")
if args.ploidy < 2:
parser.error("Ploidy must be > 1.")
if args.ploidy > 2 and args.tsv_multiway:
parser.error("Option --tsv-multiway can only be used if ploidy=2.")
if args.ploidy > 2 and args.switch_error_bed:
parser.error("Option --switch-error-bed can only be used if ploidy=2.")
if args.ploidy > 2 and args.longest_block_tsv:
parser.error("Option --longest-block-tsv can only be used if ploidy=2.")
class SwitchFlips:
def __init__(self, switches: int = 0, flips: int = 0):
self.switches: int = switches
self.flips: int = flips
def __iadd__(self, other):
self.switches += other.switches
self.flips += other.flips
return self
def __repr__(self):
return "SwitchFlips(switches={}, flips={})".format(self.switches, self.flips)
def __str__(self):
return "{}/{}".format(self.switches, self.flips)
class PhasingErrors:
def __init__(
self,
switches: int = 0,
hamming: int = 0,
switch_flips: Optional[SwitchFlips] = None,
diff_genotypes: int = 0,
):
self.switches = switches
self.hamming = hamming
self.switch_flips = SwitchFlips() if switch_flips is None else switch_flips
self.diff_genotypes = diff_genotypes
def __iadd__(self, other: object) -> "PhasingErrors":
if not isinstance(other, PhasingErrors):
raise TypeError("Can only add to PhasingErrors")
self.switches += other.switches
self.hamming += other.hamming
self.switch_flips += other.switch_flips
self.diff_genotypes += other.diff_genotypes
return self
def __repr__(self):
return "PhasingErrors(switches={}, hamming={}, switch_flips={}, diff_genotypes={})".format(
self.switches, self.hamming, self.switch_flips, self.diff_genotypes
)
def complement(s):
"""
>>> complement('01100')
'10011'
"""
t = {"0": "1", "1": "0"}
return "".join(t[c] for c in s)
def hamming(s0, s1):
"""
>>> hamming('ABCD', 'AXCY')
2
"""
assert len(s0) == len(s1)
return sum(c0 != c1 for c0, c1 in zip(s0, s1))
def switch_encoding(phasing):
"""
>>> switch_encoding('0001011')
'001110'
"""
assert isinstance(phasing, str)
return "".join(("0" if phasing[i - 1] == phasing[i] else "1") for i in range(1, len(phasing)))
def compute_switch_flips(phasing0, phasing1) -> SwitchFlips:
assert len(phasing0) == len(phasing1)
s0 = switch_encoding(phasing0)
s1 = switch_encoding(phasing1)
result = SwitchFlips()
switches_in_a_row = 0
for i, (p0, p1) in enumerate(zip(s0, s1)):
if p0 != p1:
switches_in_a_row += 1
if (i + 1 == len(s0)) or (p0 == p1):
result.flips += switches_in_a_row // 2
result.switches += switches_in_a_row % 2
switches_in_a_row = 0
return result
def compute_matching_genotype_pos(phasing0, phasing1):
"""
Computes the positions on which both phasings agree on the genotype.
"""
assert len(phasing0) == len(phasing1)
assert len(phasing0) >= 2
assert len(phasing0[0]) == len(phasing1[0])
assert all(len(phasing0[i]) == len(phasing0[0]) for i in range(1, len(phasing0)))
num_vars = len(phasing0[0])
matching_pos = [
i
for i in range(num_vars)
if Genotype([int(hap[i]) for hap in phasing0])
== Genotype([int(hap[i]) for hap in phasing1])
]
return matching_pos
def compute_switch_errors_poly(phasing0, phasing1, matching_pos=None):
"""
Computes the number of necessary switches to transform phasing 0 into phasing 1 or vice versa.
Positions with non-matching genotypes are omitted.
"""
assert len(phasing0) == len(phasing1)
assert len(phasing0) >= 2
assert len(phasing0[0]) == len(phasing1[0])
assert all(len(phasing0[i]) == len(phasing0[0]) for i in range(1, len(phasing0)))
num_vars = len(phasing0[0])
# If positions with matching genotypes are not precomputed, do it here!
if matching_pos is None:
matching_pos = compute_matching_genotype_pos(phasing0, phasing1)
phasing0_matched = ["".join([hap[i] for i in matching_pos]) for hap in phasing0]
phasing1_matched = ["".join([hap[i] for i in matching_pos]) for hap in phasing1]
vector_error = compute_switch_flips_poly(
phasing0_matched,
phasing1_matched,
switch_cost=1,
flip_cost=2 * num_vars * len(phasing0) + 1,
)
assert vector_error.flips == 0
return vector_error.switches
def compute_switch_flips_poly(phasing0, phasing1, switch_cost=1, flip_cost=1):
"""
Computes the combined number of switches and flips, which are needed to transform phasing 0 into
phasing 1 or vice versa.
"""
(result, switches_in_column, flips_in_column, poswise_config) = compute_switch_flips_poly_bt(
phasing0, phasing1, switch_cost=switch_cost, flip_cost=flip_cost
)
return result
def compute_switch_flips_poly_bt(
phasing0, phasing1, report_error_positions=False, switch_cost=1, flip_cost=1
):
# Check input
if len(phasing0) != len(phasing1):
logger.error(
"Incompatible phasings. Number of haplotypes is not equal "
f"({len(phasing0)} != {len(phasing1)})."
)
assert len(phasing0) == len(phasing1)
num_pos = len(phasing0[0])
if num_pos == 0:
return SwitchFlips(), None, None, None
ploidy = len(phasing0)
if ploidy == 0:
return SwitchFlips(), None, None, None
for i in range(0, len(phasing1)):
if len(phasing1[i]) != num_pos:
logger.error(
"Inconsistent input for phasing. Haplotypes have different lengths "
f"( len(phasing1[0]={num_pos} != len(phasing1[{i}]={len(phasing1[i])}."
)
assert len(phasing1[i]) == num_pos
if len(phasing0[i]) != num_pos:
logger.error(
"Inconsistent input for phasing. Haplotypes have different lengths "
f"( len(phasing1[0]={num_pos} != len(phasing0[{i}]={len(phasing0[i])}."
)
assert len(phasing1[i]) == num_pos
if ploidy > 6:
logger.warning(
"Computing vector error with more than 6 haplotypes. This may take very long ..."
)
# Compute comparison
calc = SwitchFlipCalculator(ploidy, switch_cost, flip_cost)
result = SwitchFlips()
(
switches,
flips,
switches_in_column,
flips_in_column,
positionwise_config,
) = calc.compute_switch_flips_poly(phasing0, phasing1)
# Aggregate results
result.switches = switches / ploidy
result.flips = flips / ploidy
return result, switches_in_column, flips_in_column, positionwise_config
def poly_num_switches(perm0, perm1):
cost = 0
for i in range(len(perm0)):
if perm0[i] != perm1[i]:
cost += 1
return cost
def compare_block(phasing0, phasing1):
""" Input are two lists of haplotype sequences over {0,1}. """
assert len(phasing0) == len(phasing1)
ploidy = len(phasing0)
minimum_hamming_distance = float("inf")
# compute minimum hamming distance
for permutation in permutations(phasing0):
# compute sum of hamming distances
total_hamming = 0
for i in range(ploidy):
total_hamming += hamming(phasing1[i], permutation[i])
total_hamming /= float(ploidy)
minimum_hamming_distance = min(minimum_hamming_distance, total_hamming)
matching_pos = compute_matching_genotype_pos(phasing0, phasing1)
if ploidy == 2:
# conversion to int is allowed, as there should be no fractional error counts for diploid comparisons
switches = int(hamming(switch_encoding(phasing0[0]), switch_encoding(phasing1[0])))
switch_flips = compute_switch_flips(phasing0[0], phasing1[0])
minimum_hamming_distance = int(minimum_hamming_distance)
else:
switches = compute_switch_errors_poly(phasing0, phasing1, matching_pos)
switch_flips = compute_switch_flips_poly(phasing0, phasing1)
return PhasingErrors(
switches=switches,
hamming=minimum_hamming_distance,
switch_flips=switch_flips,
diff_genotypes=len(phasing0[0]) - len(matching_pos),
)
def fraction2percentstr(nominator, denominator):
if denominator == 0:
return "--"
else:
return "{:.2f}%".format(nominator * 100.0 / denominator)
def safefraction(nominator, denominator):
if denominator == 0:
return float("nan")
else:
return nominator / denominator
def create_bed_records(chromosome, phasing0, phasing1, positions, annotation_string):
"""Determines positions of switch errors between two phasings
and yields one BED record per switch error (encoded as a tuple).
The annotation_string is added to each record."""
assert len(phasing0) == len(phasing1) == len(positions)
switch_encoding0 = switch_encoding(phasing0)
switch_encoding1 = switch_encoding(phasing1)
for i, (sw0, sw1) in enumerate(zip(switch_encoding0, switch_encoding1)):
if sw0 != sw1:
yield (chromosome, positions[i] + 1, positions[i + 1] + 1, annotation_string)
def print_stat(text: str, value=None, value2=None, text_width=37):
"""
Print a line like this:
text: value
"""
text = text.rjust(text_width)
if value is None:
assert value2 is None
print(text)
else:
if value == "-":
value = "-" * COUNT_WIDTH
else:
value = str(value).rjust(COUNT_WIDTH)
if value2 is None:
print(text + ":", value)
else:
print(text + ":", value, str(value2).rjust(COUNT_WIDTH))
def print_errors(errors, phased_pairs):
print_stat("phased pairs of variants assessed", phased_pairs)
print_stat("switch errors", errors.switches)
print_stat("switch error rate", fraction2percentstr(errors.switches, phased_pairs))
print_stat("switch/flip decomposition", errors.switch_flips)
print_stat(
"switch/flip rate",
fraction2percentstr(errors.switch_flips.switches + errors.switch_flips.flips, phased_pairs),
)
@dataclasses.dataclass
class PairwiseComparisonResults:
intersection_blocks: int
covered_variants: int
all_assessed_pairs: int
all_switches: int
all_switch_rate: float
all_switchflips: SwitchFlips
all_switchflip_rate: float
blockwise_hamming: int
blockwise_hamming_rate: int
blockwise_diff_genotypes: int
blockwise_diff_genotypes_rate: int
largestblock_assessed_pairs: int
largestblock_switches: int
largestblock_switch_rate: float
largestblock_switchflips: SwitchFlips
largestblock_switchflip_rate: float
largestblock_hamming: int
largestblock_hamming_rate: float
largestblock_diff_genotypes: int
largestblock_diff_genotypes_rate: float
@dataclasses.dataclass
class BlockStats:
variant_count: int
span: int
def collect_common_variants(
variant_tables: List[VariantTable], sample_names: List[str]
) -> Set[VcfVariant]:
common_variants = None
for variant_table, sample in zip(variant_tables, sample_names):
het_variants = [
v
for v, gt in zip(variant_table.variants, variant_table.genotypes_of(sample))
if not gt.is_homozygous()
]
if common_variants is None:
common_variants = set(het_variants)
else:
common_variants.intersection_update(het_variants)
assert common_variants is not None
return common_variants
def compare(
variant_tables: List[VariantTable],
sample_names: List[str],
dataset_names: List[str],
ploidy: int,
):
"""
Return a PairwiseComparisonResults object if the variant_tables has a length of 2.
"""
assert len(variant_tables) > 1
common_variants = collect_common_variants(variant_tables, sample_names)
assert common_variants is not None
print_stat("common heterozygous variants", len(common_variants))
print_stat("(restricting to these below)")
phases = []
sorted_variants = sorted(common_variants, key=lambda v: v.position)
for variant_table, sample in zip(variant_tables, sample_names):
p = [
phase
for variant, phase in zip(variant_table.variants, variant_table.phases_of(sample))
if variant in common_variants
]
assert [v for v in variant_table.variants if v in common_variants] == sorted_variants
assert len(p) == len(common_variants)
phases.append(p)
# blocks[variant_table_index][block_id] is a list of indices into common_variants
blocks: List[DefaultDict[int, List[int]]] = [defaultdict(list) for _ in variant_tables]
block_intersection = defaultdict(list)
for variant_index in range(len(common_variants)):
any_none = False
for i in range(len(phases)):
phase = phases[i][variant_index]
if phase is None:
any_none = True
else:
blocks[i][phase.block_id].append(variant_index)
if not any_none:
joint_block_id = tuple(
phase[variant_index].block_id for phase in phases # type: ignore
)
block_intersection[joint_block_id].append(variant_index)
# create statistics on each block in each data set
block_stats = compute_block_stats(blocks, sorted_variants)
for dataset_name, blck in zip(dataset_names, blocks):
print_stat(
"non-singleton blocks in {}".format(dataset_name),
len([b for b in blck.values() if len(b) > 1]),
)
print_stat("--> covered variants", sum(len(b) for b in blck.values() if len(b) > 1))
intersection_block_count = sum(1 for b in block_intersection.values() if len(b) > 1)
intersection_block_variants = sum(len(b) for b in block_intersection.values() if len(b) > 1)
print_stat("non-singleton intersection blocks", intersection_block_count)
print_stat("--> covered variants", intersection_block_variants)
if len(variant_tables) == 2:
(
bed_records,
longest_block_agreement,
longest_block_positions,
pairwise_comparison,
) = compare_pair(
block_intersection,
dataset_names,
intersection_block_count,
intersection_block_variants,
phases,
ploidy,
sorted_variants,
variant_tables,
)
return (
pairwise_comparison,
bed_records,
block_stats,
longest_block_positions,
longest_block_agreement,
None,
)
else:
assert ploidy == 2
multiway_results = compare_multiway(block_intersection, dataset_names, phases)
return None, None, block_stats, None, None, multiway_results
def compare_pair(
block_intersection,
dataset_names,
intersection_block_count,
intersection_block_variants,
phases,
ploidy,
sorted_variants,
variant_tables,
):
longest_block = 0
longest_block_errors = PhasingErrors()
longest_block_positions = []
longest_block_agreement = []
phased_pairs = 0
bed_records = []
total_errors = PhasingErrors()
total_compared_variants = 0
for block in block_intersection.values():
if len(block) < 2:
continue
phasing0 = []
phasing1 = []
for j in range(ploidy):
p0 = "".join(str(phases[0][i].phase[j]) for i in block)
p1 = "".join(str(phases[1][i].phase[j]) for i in block)
phasing0.append(p0)
phasing1.append(p1)
block_positions = [sorted_variants[i].position for i in block]
errors = compare_block(phasing0, phasing1)
# TODO: extend to polyploid
if ploidy == 2:
bed_records.extend(
create_bed_records(
variant_tables[0].chromosome,
phasing0[0],
phasing1[0],
block_positions,
"{}<-->{}".format(*dataset_names),
)
)
total_errors += errors
phased_pairs += len(block) - 1
total_compared_variants += len(block)
if len(block) > longest_block:
longest_block = len(block)
longest_block_errors = errors
longest_block_positions = block_positions
# TODO: extend to polyploid
if ploidy == 2:
if hamming(phasing0, phasing1) < hamming(phasing0[0], complement(phasing1[0])):
longest_block_agreement = [
1 * (p0 == p1) for p0, p1 in zip(phasing0[0], phasing1[0])
]
else:
longest_block_agreement = [
1 * (p0 != p1) for p0, p1 in zip(phasing0[0], phasing1[0])
]
longest_block_assessed_pairs = max(longest_block - 1, 0)
print_stat("ALL INTERSECTION BLOCKS", "-")
print_errors(total_errors, phased_pairs)
print_stat("Block-wise Hamming distance", total_errors.hamming)
print_stat(
"Block-wise Hamming distance [%]",
fraction2percentstr(total_errors.hamming, total_compared_variants),
)
print_stat("Different genotypes", total_errors.diff_genotypes)
print_stat(
"Different genotypes [%]",
fraction2percentstr(total_errors.diff_genotypes, total_compared_variants),
)
print_stat("LARGEST INTERSECTION BLOCK", "-")
print_errors(longest_block_errors, longest_block_assessed_pairs)
print_stat("Hamming distance", longest_block_errors.hamming)
print_stat(
"Hamming distance [%]", fraction2percentstr(longest_block_errors.hamming, longest_block)
)
print_stat("Different genotypes", longest_block_errors.diff_genotypes)
print_stat(
"Different genotypes [%]",
fraction2percentstr(longest_block_errors.diff_genotypes, longest_block),
)
pcr = PairwiseComparisonResults(
intersection_blocks=intersection_block_count,
covered_variants=intersection_block_variants,
all_assessed_pairs=phased_pairs,
all_switches=total_errors.switches,
all_switch_rate=safefraction(total_errors.switches, phased_pairs),
all_switchflips=total_errors.switch_flips,
all_switchflip_rate=safefraction(
total_errors.switch_flips.switches + total_errors.switch_flips.flips, phased_pairs
),
blockwise_hamming=total_errors.hamming,
blockwise_hamming_rate=safefraction(total_errors.hamming, total_compared_variants),
blockwise_diff_genotypes=total_errors.diff_genotypes,
blockwise_diff_genotypes_rate=safefraction(
total_errors.diff_genotypes, total_compared_variants
),
largestblock_assessed_pairs=longest_block_assessed_pairs,
largestblock_switches=longest_block_errors.switches,
largestblock_switch_rate=safefraction(
longest_block_errors.switches, longest_block_assessed_pairs
),
largestblock_switchflips=longest_block_errors.switch_flips,
largestblock_switchflip_rate=safefraction(
longest_block_errors.switch_flips.switches + longest_block_errors.switch_flips.flips,
longest_block_assessed_pairs,
),
largestblock_hamming=longest_block_errors.hamming,
largestblock_hamming_rate=safefraction(longest_block_errors.hamming, longest_block),
largestblock_diff_genotypes=longest_block_errors.diff_genotypes,
largestblock_diff_genotypes_rate=safefraction(
longest_block_errors.diff_genotypes, longest_block
),
)
return bed_records, longest_block_agreement, longest_block_positions, pcr
def compare_multiway(block_intersection, dataset_names, phases):
histogram = defaultdict(int)
total_compared = 0
for block in block_intersection.values():
if len(block) < 2:
continue
total_compared += len(block) - 1
phasings = ["".join(str(phases[j][i].phase[0]) for i in block) for j in range(len(phases))]
switch_encodings = [switch_encoding(p) for p in phasings]
for i in range(len(block) - 1):
s = "".join(switch_encodings[j][i] for j in range(len(switch_encodings)))
s = min(s, complement(s))
histogram[s] += 1
print_stat("Compared pairs of variants", total_compared)
bipartitions = list(histogram.keys())
bipartitions.sort()
multiway_results = {} # (dataset_list0, dataset_list1) --> count
for i, s in enumerate(bipartitions):
count = histogram[s]
if i == 0:
assert set(c for c in s) == set("0")
print("ALL AGREE")
elif i == 1:
print("DISAGREEMENT")
left, right = [], []
for name, leftright in zip(dataset_names, s):
if leftright == "0":
left.append(name)
else:
right.append(name)
print_stat(
("{%s} vs. {%s}" % (",".join(left), ",".join(right))),
count,
fraction2percentstr(count, total_compared),
)
multiway_results[(",".join(left), ",".join(right))] = count
return multiway_results
def compute_block_stats(
blocks: List[DefaultDict[int, List[int]]], sorted_variants: List[VcfVariant]
):
block_stats = []
for block in blocks:
l = []
for block_id, variant_indices in block.items():
if len(variant_indices) < 2:
continue
span = (
sorted_variants[variant_indices[-1]].position
- sorted_variants[variant_indices[0]].position
)
l.append(BlockStats(len(variant_indices), span))
block_stats.append(l)
return block_stats
def create_blocksize_histogram(filename, block_stats, names, use_weights=False):
try:
import matplotlib
import numpy
matplotlib.use("pdf")
from matplotlib import pyplot
from matplotlib.backends.backend_pdf import PdfPages
except ImportError:
raise CommandLineError(
"To use option --plot-blocksizes, you need to have numpy and matplotlib installed."
)
assert len(block_stats) == len(names)
color_list = ["#ffa347", "#0064c8", "#b42222", "#22a5b4", "#b47c22", "#6db6ff"]
if len(color_list) < len(block_stats):
color_count = len(block_stats)
color_list = pyplot.cm.Set1([n / color_count for n in range(color_count)])
colors = color_list[: len(block_stats)]
with PdfPages(filename) as pdf:
for what, xlabel in [
(lambda stats: stats.variant_count, "variant count"),
(lambda stats: stats.span, "span [bp]"),
]:
pyplot.figure(figsize=(10, 8))
max_value = max(what(stats) for stats in chain(*block_stats))
common_bins = numpy.logspace(0, math.ceil(math.log10(max_value)), 50)
for l, name, color in zip(block_stats, names, colors):
x = [what(stats) for stats in l]
n, bins, patches = pyplot.hist(
x,
bins=common_bins,
alpha=0.6,
color=color,
label=name,
weights=x if use_weights else None,
)
pyplot.xlabel(xlabel)
pyplot.ylabel("Number of blocks")
pyplot.gca().set_xscale("log")
pyplot.gca().set_yscale("log")
pyplot.grid(True)
pyplot.legend()
pdf.savefig()
pyplot.close()
pyplot.figure(figsize=(10, 8))
common_bins = numpy.logspace(0, math.ceil(math.log10(max_value)), 25)
x = [[what(stats) for stats in l] for l in block_stats]
n, bins, patches = pyplot.hist(
x,
bins=common_bins,
alpha=0.6,
color=colors,
label=names,
weights=x if use_weights else None,
)
pyplot.xlabel(xlabel)
pyplot.ylabel("Number of blocks")
pyplot.gca().set_xscale("log")
pyplot.gca().set_yscale("log")
pyplot.grid(True)
pyplot.legend()
pdf.savefig()
pyplot.close()
def run_compare(
vcf,
ploidy,
names=None,
sample=None,
ignore_sample_name=False,
tsv_pairwise=None,
tsv_multiway=None,
only_snvs=False,
switch_error_bed=None,
plot_blocksizes=None,
plot_sum_of_blocksizes=None,
longest_block_tsv=None,
):
vcf_readers = [VcfReader(f, indels=not only_snvs, phases=True, ploidy=ploidy) for f in vcf]
if names:
dataset_names = names.split(",")
if len(dataset_names) != len(vcf):
raise CommandLineError(
"Number of names given with --names does not equal number of VCFs."
)
else:
dataset_names = ["file{}".format(i) for i in range(len(vcf))]
sample_names = get_sample_names(
vcf_readers, requested_sample=sample, ignore_name=ignore_sample_name
)
with ExitStack() as stack:
tsv_pairwise_file = tsv_multiway_file = longest_block_tsv_file = switch_error_bedfile = None
if tsv_pairwise:
tsv_pairwise_file = stack.enter_context(open(tsv_pairwise, "w"))
if tsv_multiway:
tsv_multiway_file = stack.enter_context(open(tsv_multiway, "w"))
print(
"#sample",
"chromosome",
"dataset_list0",
"dataset_list1",
"count",
sep="\t",
file=tsv_multiway_file,
)
if longest_block_tsv:
longest_block_tsv_file = stack.enter_context(open(longest_block_tsv, "w"))
print(
"#dataset_name0",
"dataset_name1",
"#sample",
"chromosome",
"position",
"phase_agreeing",
sep="\t",
file=longest_block_tsv_file,
)
if tsv_pairwise_file:
fields = [
"#sample",
"chromosome",
"dataset_name0",
"dataset_name1",
"file_name0",
"file_name1",
]
field_names = [f.name for f in dataclasses.fields(PairwiseComparisonResults)]
fields.extend(field_names)
fields.extend(["het_variants0", "only_snvs"])
print(*fields, sep="\t", file=tsv_pairwise_file)
if switch_error_bed:
switch_error_bedfile = stack.enter_context(open(switch_error_bed, "w"))
if len(set(sample_names)) > 1 and ignore_sample_name:
print(
"Comparing phasings for samples:",
", ".join(sample_names),
" (--ignore-sample-names selected)",
)
else:
print("Comparing phasings for sample", sample_names[0])
vcfs = get_variant_tables(vcf_readers, vcf)
chromosomes = get_common_chromosomes(vcfs)
if len(chromosomes) == 0:
raise CommandLineError("No chromosome is contained in all VCFs. Aborting.")
logger.info("Chromosomes present in all VCFs: %s", ", ".join(chromosomes))
print("FILENAMES")
longest_name = max(len(n) for n in dataset_names)
for name, filename in zip(dataset_names, vcf):
print(name.rjust(longest_name + 2), "=", filename)
width = max(longest_name, 15) + 5
all_block_stats = [[] for _ in vcfs]
def add_block_stats(block_stats):
assert len(block_stats) == len(all_block_stats)
for big_list, new_list in zip(all_block_stats, block_stats):
big_list.extend(new_list)
for chromosome in sorted(chromosomes):
print("---------------- Chromosome {} ----------------".format(chromosome))
all_bed_records = []
variant_tables = [vcf[chromosome] for vcf in vcfs]
all_variants_union = set()
all_variants_intersection = None
het_variants_union = set()
het_variants_intersection = None
het_variant_sets = []
het_variants0 = None
print("VARIANT COUNTS (heterozygous / all): ")
for variant_table, name, sample in zip(variant_tables, dataset_names, sample_names):
all_variants_union.update(variant_table.variants)
het_variants = [
v
for v, gt in zip(variant_table.variants, variant_table.genotypes_of(sample))
if not gt.is_homozygous()
]
if het_variants0 is None:
het_variants0 = len(het_variants)
het_variants_union.update(het_variants)
if all_variants_intersection is None:
all_variants_intersection = set(variant_table.variants)
het_variants_intersection = set(het_variants)
else:
all_variants_intersection.intersection_update(variant_table.variants)
het_variants_intersection.intersection_update(het_variants)
het_variant_sets.append(set(het_variants))
print(
"{}:".format(name).rjust(width),
str(len(het_variants)).rjust(COUNT_WIDTH),
"/",
str(len(variant_table.variants)).rjust(COUNT_WIDTH),
)
print(
"UNION:".rjust(width),
str(len(het_variants_union)).rjust(COUNT_WIDTH),
"/",
str(len(all_variants_union)).rjust(COUNT_WIDTH),
)
print(
"INTERSECTION:".rjust(width),
str(len(het_variants_intersection)).rjust(COUNT_WIDTH),
"/",
str(len(all_variants_intersection)).rjust(COUNT_WIDTH),
)
for i in range(len(vcfs)):
for j in range(i + 1, len(vcfs)):
print(
"PAIRWISE COMPARISON: {} <--> {}:".format(
dataset_names[i], dataset_names[j]
)
)
(
results,
bed_records,
block_stats,
longest_block_positions,
longest_block_agreement,
multiway_results,
) = compare(
[variant_tables[i], variant_tables[j]],
[sample_names[i], sample_names[j]],
[dataset_names[i], dataset_names[j]],
ploidy,
)
if len(vcfs) == 2:
add_block_stats(block_stats)
all_bed_records.extend(bed_records)
sample_name = (
f"{sample_names[i]}_{sample_names[j]}"
if ignore_sample_name
else sample_names[i]
)
if tsv_pairwise_file:
fields = [
sample_name,
chromosome,
dataset_names[i],
dataset_names[j],
vcf[i],
vcf[j],
]
fields.extend(dataclasses.astuple(results))
fields.extend([het_variants0, int(only_snvs)])
print(*fields, sep="\t", file=tsv_pairwise_file)
if longest_block_tsv_file:
assert ploidy == 2
assert len(longest_block_positions) == len(longest_block_agreement)
for position, phase_agreeing in zip(
longest_block_positions, longest_block_agreement
):
print(
dataset_names[i],
dataset_names[j],
sample_name,
chromosome,
position,
phase_agreeing,
sep="\t",
file=longest_block_tsv_file,
)
# if requested, write all switch errors found in the current chromosome to the bed file
if switch_error_bedfile:
assert ploidy == 2
all_bed_records.sort()
for record in all_bed_records:
print(*record, sep="\t", file=switch_error_bedfile)
if len(vcfs) > 2:
assert ploidy == 2
print("MULTIWAY COMPARISON OF ALL PHASINGS:")
(
results,
bed_records,
block_stats,
longest_block_positions,
longest_block_agreement,
multiway_results,
) = compare(variant_tables, sample_names, dataset_names, ploidy)
add_block_stats(block_stats)
if tsv_multiway_file:
sample_name = (
"_".join(set(sample_names)) if ignore_sample_name else sample_names[0]
)
for ((dataset_list0, dataset_list1), count) in multiway_results.items():
print(
sample_name,
chromosome,
"{" + dataset_list0 + "}",
"{" + dataset_list1 + "}",
count,
sep="\t",
file=tsv_multiway_file,
)
if plot_blocksizes:
create_blocksize_histogram(plot_blocksizes, all_block_stats, dataset_names)
if plot_sum_of_blocksizes:
create_blocksize_histogram(
plot_sum_of_blocksizes, all_block_stats, dataset_names, use_weights=True
)
def get_common_chromosomes(vcfs: List[Dict[str, VariantTable]]) -> List[str]:
common = None
for chrom_variant_table_map in vcfs:
chromosomes = chrom_variant_table_map.keys()
if common is None:
common = set(chromosomes)
else:
common.intersection_update(chromosomes)
if common is None:
return []
return sorted(common)
def get_variant_tables(
vcf_readers: List[VcfReader], vcf_filenames: List[str]
) -> List[Dict[str, VariantTable]]:
vcfs = []
for reader, filename in zip(vcf_readers, vcf_filenames):
# create dict mapping chromosome names to VariantTables
m = dict()
logger.info("Reading phasing from %r", filename)
try:
for variant_table in reader:
m[variant_table.chromosome] = variant_table
except PloidyError as e:
raise CommandLineError("Provided ploidy is invalid: {}. Aborting.".format(e))
vcfs.append(m)
return vcfs
def get_sample_names(
vcf_readers: List[VcfReader], requested_sample: Optional[str], ignore_name: bool = False
) -> List[str]:
first_samples = []
sample_intersection = None
for vcf_reader in vcf_readers:
if sample_intersection is None:
sample_intersection = set(vcf_reader.samples)
else:
sample_intersection.intersection_update(vcf_reader.samples)
if ignore_name and len(vcf_reader.samples) > 1:
raise CommandLineError(
"File '{file}' contains multiple samples, option --ignore-sample-name not available.".format(
file=vcf_reader.path
)
)
first_samples.append(vcf_reader.samples[0])
assert sample_intersection is not None
if requested_sample:
sample_intersection.intersection_update([requested_sample])
if len(sample_intersection) == 0:
raise CommandLineError(
"Sample {!r} requested on command-line not found in all VCFs".format(
requested_sample
)
)
sample_names = [requested_sample] * len(vcf_readers)
elif ignore_name:
sample_names = first_samples
else:
if len(sample_intersection) == 0:
raise CommandLineError("None of the samples is present in all VCFs")
elif len(sample_intersection) == 1:
sample_names = [list(sample_intersection)[0]] * len(vcf_readers)
else:
raise CommandLineError(
"More than one sample is present in all VCFs, please use"
" --sample to specify which sample to work on."
)
return sample_names
def main(args):
run_compare(**vars(args))
|
the-stack_0_16507 | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 16556 if testnet else 15556
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
the-stack_0_16509 | import asyncio
import os.path
import time
import sys
import platform
import queue
import traceback
import os
import webbrowser
from decimal import Decimal
from functools import partial, lru_cache
from typing import (NamedTuple, Callable, Optional, TYPE_CHECKING, Union, List, Dict, Any,
Sequence, Iterable)
from PyQt5.QtGui import (QFont, QColor, QCursor, QPixmap, QStandardItem,
QPalette, QIcon, QFontMetrics, QShowEvent)
from PyQt5.QtCore import (Qt, QPersistentModelIndex, QModelIndex, pyqtSignal,
QCoreApplication, QItemSelectionModel, QThread,
QSortFilterProxyModel, QSize, QLocale, QAbstractItemModel)
from PyQt5.QtWidgets import (QPushButton, QLabel, QMessageBox, QHBoxLayout,
QAbstractItemView, QVBoxLayout, QLineEdit,
QStyle, QDialog, QGroupBox, QButtonGroup, QRadioButton,
QFileDialog, QWidget, QToolButton, QTreeView, QPlainTextEdit,
QHeaderView, QApplication, QToolTip, QTreeWidget, QStyledItemDelegate,
QMenu)
from electrum_ltc.i18n import _, languages
from electrum_ltc.util import FileImportFailed, FileExportFailed, make_aiohttp_session, resource_path
from electrum_ltc.invoices import PR_UNPAID, PR_PAID, PR_EXPIRED, PR_INFLIGHT, PR_UNKNOWN, PR_FAILED, PR_ROUTING
if TYPE_CHECKING:
from .main_window import ElectrumWindow
from .installwizard import InstallWizard
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
dialogs = []
pr_icons = {
PR_UNKNOWN:"warning.png",
PR_UNPAID:"unpaid.png",
PR_PAID:"confirmed.png",
PR_EXPIRED:"expired.png",
PR_INFLIGHT:"unconfirmed.png",
PR_FAILED:"warning.png",
PR_ROUTING:"unconfirmed.png",
}
# filter tx files in QFileDialog:
TRANSACTION_FILE_EXTENSION_FILTER_ANY = "Transaction (*.txn *.psbt);;All files (*)"
TRANSACTION_FILE_EXTENSION_FILTER_ONLY_PARTIAL_TX = "Partial Transaction (*.psbt)"
TRANSACTION_FILE_EXTENSION_FILTER_ONLY_COMPLETE_TX = "Complete Transaction (*.txn)"
TRANSACTION_FILE_EXTENSION_FILTER_SEPARATE = (f"{TRANSACTION_FILE_EXTENSION_FILTER_ONLY_PARTIAL_TX};;"
f"{TRANSACTION_FILE_EXTENSION_FILTER_ONLY_COMPLETE_TX};;"
f"All files (*)")
class EnterButton(QPushButton):
def __init__(self, text, func):
QPushButton.__init__(self, text)
self.func = func
self.clicked.connect(func)
def keyPressEvent(self, e):
if e.key() in [ Qt.Key_Return, Qt.Key_Enter ]:
self.func()
class ThreadedButton(QPushButton):
def __init__(self, text, task, on_success=None, on_error=None):
QPushButton.__init__(self, text)
self.task = task
self.on_success = on_success
self.on_error = on_error
self.clicked.connect(self.run_task)
def run_task(self):
self.setEnabled(False)
self.thread = TaskThread(self)
self.thread.add(self.task, self.on_success, self.done, self.on_error)
def done(self):
self.setEnabled(True)
self.thread.stop()
class WWLabel(QLabel):
def __init__ (self, text="", parent=None):
QLabel.__init__(self, text, parent)
self.setWordWrap(True)
self.setTextInteractionFlags(Qt.TextSelectableByMouse)
class HelpLabel(QLabel):
def __init__(self, text, help_text):
QLabel.__init__(self, text)
self.help_text = help_text
self.app = QCoreApplication.instance()
self.font = QFont()
def mouseReleaseEvent(self, x):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Help'),
text=self.help_text)
def enterEvent(self, event):
self.font.setUnderline(True)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
return QLabel.enterEvent(self, event)
def leaveEvent(self, event):
self.font.setUnderline(False)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
return QLabel.leaveEvent(self, event)
class HelpButton(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, '?')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.clicked.connect(self.onclick)
def onclick(self):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Help'),
text=self.help_text,
rich_text=True)
class InfoButton(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, 'Info')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(6 * char_width_in_lineedit())
self.clicked.connect(self.onclick)
def onclick(self):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Info'),
text=self.help_text,
rich_text=True)
class Buttons(QHBoxLayout):
def __init__(self, *buttons):
QHBoxLayout.__init__(self)
self.addStretch(1)
for b in buttons:
if b is None:
continue
self.addWidget(b)
class CloseButton(QPushButton):
def __init__(self, dialog):
QPushButton.__init__(self, _("Close"))
self.clicked.connect(dialog.close)
self.setDefault(True)
class CopyButton(QPushButton):
def __init__(self, text_getter, app):
QPushButton.__init__(self, _("Copy"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
class CopyCloseButton(QPushButton):
def __init__(self, text_getter, app, dialog):
QPushButton.__init__(self, _("Copy and Close"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
self.clicked.connect(dialog.close)
self.setDefault(True)
class OkButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("OK"))
self.clicked.connect(dialog.accept)
self.setDefault(True)
class CancelButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("Cancel"))
self.clicked.connect(dialog.reject)
class MessageBoxMixin(object):
def top_level_window_recurse(self, window=None, test_func=None):
window = window or self
classes = (WindowModalDialog, QMessageBox)
if test_func is None:
test_func = lambda x: True
for n, child in enumerate(window.children()):
# Test for visibility as old closed dialogs may not be GC-ed.
# Only accept children that confirm to test_func.
if isinstance(child, classes) and child.isVisible() \
and test_func(child):
return self.top_level_window_recurse(child, test_func=test_func)
return window
def top_level_window(self, test_func=None):
return self.top_level_window_recurse(test_func)
def question(self, msg, parent=None, title=None, icon=None, **kwargs) -> bool:
Yes, No = QMessageBox.Yes, QMessageBox.No
return Yes == self.msg_box(icon=icon or QMessageBox.Question,
parent=parent,
title=title or '',
text=msg,
buttons=Yes|No,
defaultButton=No,
**kwargs)
def show_warning(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Warning, parent,
title or _('Warning'), msg, **kwargs)
def show_error(self, msg, parent=None, **kwargs):
return self.msg_box(QMessageBox.Warning, parent,
_('Error'), msg, **kwargs)
def show_critical(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Critical, parent,
title or _('Critical Error'), msg, **kwargs)
def show_message(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Information, parent,
title or _('Information'), msg, **kwargs)
def msg_box(self, icon, parent, title, text, *, buttons=QMessageBox.Ok,
defaultButton=QMessageBox.NoButton, rich_text=False,
checkbox=None):
parent = parent or self.top_level_window()
return custom_message_box(icon=icon,
parent=parent,
title=title,
text=text,
buttons=buttons,
defaultButton=defaultButton,
rich_text=rich_text,
checkbox=checkbox)
def custom_message_box(*, icon, parent, title, text, buttons=QMessageBox.Ok,
defaultButton=QMessageBox.NoButton, rich_text=False,
checkbox=None):
if type(icon) is QPixmap:
d = QMessageBox(QMessageBox.Information, title, str(text), buttons, parent)
d.setIconPixmap(icon)
else:
d = QMessageBox(icon, title, str(text), buttons, parent)
d.setWindowModality(Qt.WindowModal)
d.setDefaultButton(defaultButton)
if rich_text:
d.setTextInteractionFlags(Qt.TextSelectableByMouse | Qt.LinksAccessibleByMouse)
# set AutoText instead of RichText
# AutoText lets Qt figure out whether to render as rich text.
# e.g. if text is actually plain text and uses "\n" newlines;
# and we set RichText here, newlines would be swallowed
d.setTextFormat(Qt.AutoText)
else:
d.setTextInteractionFlags(Qt.TextSelectableByMouse)
d.setTextFormat(Qt.PlainText)
if checkbox is not None:
d.setCheckBox(checkbox)
return d.exec_()
class WindowModalDialog(QDialog, MessageBoxMixin):
'''Handy wrapper; window modal dialogs are better for our multi-window
daemon model as other wallet windows can still be accessed.'''
def __init__(self, parent, title=None):
QDialog.__init__(self, parent)
self.setWindowModality(Qt.WindowModal)
if title:
self.setWindowTitle(title)
class WaitingDialog(WindowModalDialog):
'''Shows a please wait dialog whilst running a task. It is not
necessary to maintain a reference to this dialog.'''
def __init__(self, parent: QWidget, message: str, task, on_success=None, on_error=None):
assert parent
if isinstance(parent, MessageBoxMixin):
parent = parent.top_level_window()
WindowModalDialog.__init__(self, parent, _("Please wait"))
self.message_label = QLabel(message)
vbox = QVBoxLayout(self)
vbox.addWidget(self.message_label)
self.accepted.connect(self.on_accepted)
self.show()
self.thread = TaskThread(self)
self.thread.finished.connect(self.deleteLater) # see #3956
self.thread.add(task, on_success, self.accept, on_error)
def wait(self):
self.thread.wait()
def on_accepted(self):
self.thread.stop()
def update(self, msg):
print(msg)
self.message_label.setText(msg)
class BlockingWaitingDialog(WindowModalDialog):
"""Shows a waiting dialog whilst running a task.
Should be called from the GUI thread. The GUI thread will be blocked while
the task is running; the point of the dialog is to provide feedback
to the user regarding what is going on.
"""
def __init__(self, parent: QWidget, message: str, task: Callable[[], Any]):
assert parent
if isinstance(parent, MessageBoxMixin):
parent = parent.top_level_window()
WindowModalDialog.__init__(self, parent, _("Please wait"))
self.message_label = QLabel(message)
vbox = QVBoxLayout(self)
vbox.addWidget(self.message_label)
self.show()
QCoreApplication.processEvents()
task()
self.accept()
def line_dialog(parent, title, label, ok_label, default=None):
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(500)
l = QVBoxLayout()
dialog.setLayout(l)
l.addWidget(QLabel(label))
txt = QLineEdit()
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(Buttons(CancelButton(dialog), OkButton(dialog, ok_label)))
if dialog.exec_():
return txt.text()
def text_dialog(parent, title, header_layout, ok_label, default=None, allow_multi=False):
from .qrtextedit import ScanQRTextEdit
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(600)
l = QVBoxLayout()
dialog.setLayout(l)
if isinstance(header_layout, str):
l.addWidget(QLabel(header_layout))
else:
l.addLayout(header_layout)
txt = ScanQRTextEdit(allow_multi=allow_multi)
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(Buttons(CancelButton(dialog), OkButton(dialog, ok_label)))
if dialog.exec_():
return txt.toPlainText()
class ChoicesLayout(object):
def __init__(self, msg, choices, on_clicked=None, checked_index=0):
vbox = QVBoxLayout()
if len(msg) > 50:
vbox.addWidget(WWLabel(msg))
msg = ""
gb2 = QGroupBox(msg)
vbox.addWidget(gb2)
vbox2 = QVBoxLayout()
gb2.setLayout(vbox2)
self.group = group = QButtonGroup()
for i,c in enumerate(choices):
button = QRadioButton(gb2)
button.setText(c)
vbox2.addWidget(button)
group.addButton(button)
group.setId(button, i)
if i==checked_index:
button.setChecked(True)
if on_clicked:
group.buttonClicked.connect(partial(on_clicked, self))
self.vbox = vbox
def layout(self):
return self.vbox
def selected_index(self):
return self.group.checkedId()
def address_field(addresses):
hbox = QHBoxLayout()
address_e = QLineEdit()
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
else:
addresses = []
def func():
try:
i = addresses.index(str(address_e.text())) + 1
i = i % len(addresses)
address_e.setText(addresses[i])
except ValueError:
# the user might have changed address_e to an
# address not in the wallet (or to something that isn't an address)
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
button = QPushButton(_('Address'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(address_e)
return hbox, address_e
def filename_field(parent, config, defaultname, select_msg):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Format")))
gb = QGroupBox("format", parent)
b1 = QRadioButton(gb)
b1.setText(_("CSV"))
b1.setChecked(True)
b2 = QRadioButton(gb)
b2.setText(_("json"))
vbox.addWidget(b1)
vbox.addWidget(b2)
hbox = QHBoxLayout()
directory = config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, defaultname )
filename_e = QLineEdit()
filename_e.setText(path)
def func():
text = filename_e.text()
_filter = "*.csv" if text.endswith(".csv") else "*.json" if text.endswith(".json") else None
p, __ = QFileDialog.getSaveFileName(None, select_msg, text, _filter)
if p:
filename_e.setText(p)
button = QPushButton(_('File'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(filename_e)
vbox.addLayout(hbox)
def set_csv(v):
text = filename_e.text()
text = text.replace(".json",".csv") if v else text.replace(".csv",".json")
filename_e.setText(text)
b1.clicked.connect(lambda: set_csv(True))
b2.clicked.connect(lambda: set_csv(False))
return vbox, filename_e, b1
class ElectrumItemDelegate(QStyledItemDelegate):
def __init__(self, tv: 'MyTreeView'):
super().__init__(tv)
self.tv = tv
self.opened = None
def on_closeEditor(editor: QLineEdit, hint):
self.opened = None
self.tv.is_editor_open = False
if self.tv._pending_update:
self.tv.update()
def on_commitData(editor: QLineEdit):
new_text = editor.text()
idx = QModelIndex(self.opened)
row, col = idx.row(), idx.column()
_prior_text, user_role = self.tv.get_text_and_userrole_from_coordinate(row, col)
# check that we didn't forget to set UserRole on an editable field
assert user_role is not None, (row, col)
self.tv.on_edited(idx, user_role, new_text)
self.closeEditor.connect(on_closeEditor)
self.commitData.connect(on_commitData)
def createEditor(self, parent, option, idx):
self.opened = QPersistentModelIndex(idx)
self.tv.is_editor_open = True
return super().createEditor(parent, option, idx)
class MyTreeView(QTreeView):
ROLE_CLIPBOARD_DATA = Qt.UserRole + 100
filter_columns: Iterable[int]
def __init__(self, parent: 'ElectrumWindow', create_menu, *,
stretch_column=None, editable_columns=None):
super().__init__(parent)
self.parent = parent
self.config = self.parent.config
self.stretch_column = stretch_column
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(create_menu)
self.setUniformRowHeights(True)
# Control which columns are editable
if editable_columns is not None:
editable_columns = set(editable_columns)
elif stretch_column is not None:
editable_columns = {stretch_column}
else:
editable_columns = {}
self.editable_columns = editable_columns
self.setItemDelegate(ElectrumItemDelegate(self))
self.current_filter = ""
self.is_editor_open = False
self.setRootIsDecorated(False) # remove left margin
self.toolbar_shown = False
# When figuring out the size of columns, Qt by default looks at
# the first 1000 rows (at least if resize mode is QHeaderView.ResizeToContents).
# This would be REALLY SLOW, and it's not perfect anyway.
# So to speed the UI up considerably, set it to
# only look at as many rows as currently visible.
self.header().setResizeContentsPrecision(0)
self._pending_update = False
self._forced_update = False
def set_editability(self, items):
for idx, i in enumerate(items):
i.setEditable(idx in self.editable_columns)
def selected_in_column(self, column: int):
items = self.selectionModel().selectedIndexes()
return list(x for x in items if x.column() == column)
def current_item_user_role(self, col) -> Any:
idx = self.selectionModel().currentIndex()
idx = idx.sibling(idx.row(), col)
item = self.item_from_index(idx)
if item:
return item.data(Qt.UserRole)
def item_from_index(self, idx: QModelIndex) -> Optional[QStandardItem]:
model = self.model()
if isinstance(model, QSortFilterProxyModel):
idx = model.mapToSource(idx)
return model.sourceModel().itemFromIndex(idx)
else:
return model.itemFromIndex(idx)
def original_model(self) -> QAbstractItemModel:
model = self.model()
if isinstance(model, QSortFilterProxyModel):
return model.sourceModel()
else:
return model
def set_current_idx(self, set_current: QPersistentModelIndex):
if set_current:
assert isinstance(set_current, QPersistentModelIndex)
assert set_current.isValid()
self.selectionModel().select(QModelIndex(set_current), QItemSelectionModel.SelectCurrent)
def update_headers(self, headers: Union[List[str], Dict[int, str]]):
# headers is either a list of column names, or a dict: (col_idx->col_name)
if not isinstance(headers, dict): # convert to dict
headers = dict(enumerate(headers))
col_names = [headers[col_idx] for col_idx in sorted(headers.keys())]
self.original_model().setHorizontalHeaderLabels(col_names)
self.header().setStretchLastSection(False)
for col_idx in headers:
sm = QHeaderView.Stretch if col_idx == self.stretch_column else QHeaderView.ResizeToContents
self.header().setSectionResizeMode(col_idx, sm)
def keyPressEvent(self, event):
if self.itemDelegate().opened:
return
if event.key() in [ Qt.Key_F2, Qt.Key_Return, Qt.Key_Enter ]:
self.on_activated(self.selectionModel().currentIndex())
return
super().keyPressEvent(event)
def on_activated(self, idx):
# on 'enter' we show the menu
pt = self.visualRect(idx).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def edit(self, idx, trigger=QAbstractItemView.AllEditTriggers, event=None):
"""
this is to prevent:
edit: editing failed
from inside qt
"""
return super().edit(idx, trigger, event)
def on_edited(self, idx: QModelIndex, user_role, text):
self.parent.wallet.set_label(user_role, text)
self.parent.history_model.refresh('on_edited in MyTreeView')
self.parent.utxo_list.update()
self.parent.update_completions()
def should_hide(self, row):
"""
row_num is for self.model(). So if there is a proxy, it is the row number
in that!
"""
return False
def get_text_and_userrole_from_coordinate(self, row_num, column):
idx = self.model().index(row_num, column)
item = self.item_from_index(idx)
user_role = item.data(Qt.UserRole)
return item.text(), user_role
def hide_row(self, row_num):
"""
row_num is for self.model(). So if there is a proxy, it is the row number
in that!
"""
should_hide = self.should_hide(row_num)
if not self.current_filter and should_hide is None:
# no filters at all, neither date nor search
self.setRowHidden(row_num, QModelIndex(), False)
return
for column in self.filter_columns:
txt, _ = self.get_text_and_userrole_from_coordinate(row_num, column)
txt = txt.lower()
if self.current_filter in txt:
# the filter matched, but the date filter might apply
self.setRowHidden(row_num, QModelIndex(), bool(should_hide))
break
else:
# we did not find the filter in any columns, hide the item
self.setRowHidden(row_num, QModelIndex(), True)
def filter(self, p=None):
if p is not None:
p = p.lower()
self.current_filter = p
self.hide_rows()
def hide_rows(self):
for row in range(self.model().rowCount()):
self.hide_row(row)
def create_toolbar(self, config=None):
hbox = QHBoxLayout()
buttons = self.get_toolbar_buttons()
for b in buttons:
b.setVisible(False)
hbox.addWidget(b)
hide_button = QPushButton('x')
hide_button.setVisible(False)
hide_button.pressed.connect(lambda: self.show_toolbar(False, config))
self.toolbar_buttons = buttons + (hide_button,)
hbox.addStretch()
hbox.addWidget(hide_button)
return hbox
def save_toolbar_state(self, state, config):
pass # implemented in subclasses
def show_toolbar(self, state, config=None):
if state == self.toolbar_shown:
return
self.toolbar_shown = state
if config:
self.save_toolbar_state(state, config)
for b in self.toolbar_buttons:
b.setVisible(state)
if not state:
self.on_hide_toolbar()
def toggle_toolbar(self, config=None):
self.show_toolbar(not self.toolbar_shown, config)
def add_copy_menu(self, menu: QMenu, idx) -> QMenu:
cc = menu.addMenu(_("Copy"))
for column in self.Columns:
column_title = self.original_model().horizontalHeaderItem(column).text()
item_col = self.item_from_index(idx.sibling(idx.row(), column))
clipboard_data = item_col.data(self.ROLE_CLIPBOARD_DATA)
if clipboard_data is None:
clipboard_data = item_col.text().strip()
cc.addAction(column_title,
lambda text=clipboard_data, title=column_title:
self.place_text_on_clipboard(text, title=title))
return cc
def place_text_on_clipboard(self, text: str, *, title: str = None) -> None:
self.parent.do_copy(text, title=title)
def showEvent(self, e: 'QShowEvent'):
super().showEvent(e)
if e.isAccepted() and self._pending_update:
self._forced_update = True
self.update()
self._forced_update = False
def maybe_defer_update(self) -> bool:
"""Returns whether we should defer an update/refresh."""
defer = (not self._forced_update
and (not self.isVisible() or self.is_editor_open))
# side-effect: if we decide to defer update, the state will become stale:
self._pending_update = defer
return defer
class MySortModel(QSortFilterProxyModel):
def __init__(self, parent, *, sort_role):
super().__init__(parent)
self._sort_role = sort_role
def lessThan(self, source_left: QModelIndex, source_right: QModelIndex):
item1 = self.sourceModel().itemFromIndex(source_left)
item2 = self.sourceModel().itemFromIndex(source_right)
data1 = item1.data(self._sort_role)
data2 = item2.data(self._sort_role)
if data1 is not None and data2 is not None:
return data1 < data2
v1 = item1.text()
v2 = item2.text()
try:
return Decimal(v1) < Decimal(v2)
except:
return v1 < v2
class ButtonsWidget(QWidget):
def __init__(self):
super(QWidget, self).__init__()
self.buttons = [] # type: List[QToolButton]
def resizeButtons(self):
frameWidth = self.style().pixelMetric(QStyle.PM_DefaultFrameWidth)
x = self.rect().right() - frameWidth - 10
y = self.rect().bottom() - frameWidth
for button in self.buttons:
sz = button.sizeHint()
x -= sz.width()
button.move(x, y - sz.height())
def addButton(self, icon_name, on_click, tooltip):
button = QToolButton(self)
button.setIcon(read_QIcon(icon_name))
button.setIconSize(QSize(25,25))
button.setCursor(QCursor(Qt.PointingHandCursor))
button.setStyleSheet("QToolButton { border: none; hover {border: 1px} pressed {border: 1px} padding: 0px; }")
button.setVisible(True)
button.setToolTip(tooltip)
button.clicked.connect(on_click)
self.buttons.append(button)
return button
def addCopyButton(self, app):
self.app = app
self.addButton("copy.png", self.on_copy, _("Copy to clipboard"))
def on_copy(self):
self.app.clipboard().setText(self.text())
QToolTip.showText(QCursor.pos(), _("Text copied to clipboard"), self)
class ButtonsLineEdit(QLineEdit, ButtonsWidget):
def __init__(self, text=None):
QLineEdit.__init__(self, text)
self.buttons = []
def resizeEvent(self, e):
o = QLineEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class ButtonsTextEdit(QPlainTextEdit, ButtonsWidget):
def __init__(self, text=None):
QPlainTextEdit.__init__(self, text)
self.setText = self.setPlainText
self.text = self.toPlainText
self.buttons = []
def resizeEvent(self, e):
o = QPlainTextEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class PasswordLineEdit(QLineEdit):
def __init__(self, *args, **kwargs):
QLineEdit.__init__(self, *args, **kwargs)
self.setEchoMode(QLineEdit.Password)
def clear(self):
# Try to actually overwrite the memory.
# This is really just a best-effort thing...
self.setText(len(self.text()) * " ")
super().clear()
class TaskThread(QThread):
'''Thread that runs background tasks. Callbacks are guaranteed
to happen in the context of its parent.'''
class Task(NamedTuple):
task: Callable
cb_success: Optional[Callable]
cb_done: Optional[Callable]
cb_error: Optional[Callable]
doneSig = pyqtSignal(object, object, object)
def __init__(self, parent, on_error=None):
super(TaskThread, self).__init__(parent)
self.on_error = on_error
self.tasks = queue.Queue()
self.doneSig.connect(self.on_done)
self.start()
def add(self, task, on_success=None, on_done=None, on_error=None):
on_error = on_error or self.on_error
self.tasks.put(TaskThread.Task(task, on_success, on_done, on_error))
def run(self):
while True:
task = self.tasks.get() # type: TaskThread.Task
if not task:
break
try:
result = task.task()
self.doneSig.emit(result, task.cb_done, task.cb_success)
except BaseException:
self.doneSig.emit(sys.exc_info(), task.cb_done, task.cb_error)
def on_done(self, result, cb_done, cb_result):
# This runs in the parent's thread.
if cb_done:
cb_done()
if cb_result:
cb_result(result)
def stop(self):
self.tasks.put(None)
class ColorSchemeItem:
def __init__(self, fg_color, bg_color):
self.colors = (fg_color, bg_color)
def _get_color(self, background):
return self.colors[(int(background) + int(ColorScheme.dark_scheme)) % 2]
def as_stylesheet(self, background=False):
css_prefix = "background-" if background else ""
color = self._get_color(background)
return "QWidget {{ {}color:{}; }}".format(css_prefix, color)
def as_color(self, background=False):
color = self._get_color(background)
return QColor(color)
class ColorScheme:
dark_scheme = False
GREEN = ColorSchemeItem("#117c11", "#8af296")
YELLOW = ColorSchemeItem("#897b2a", "#ffff00")
RED = ColorSchemeItem("#7c1111", "#f18c8c")
BLUE = ColorSchemeItem("#123b7c", "#8cb3f2")
DEFAULT = ColorSchemeItem("black", "white")
GRAY = ColorSchemeItem("gray", "gray")
@staticmethod
def has_dark_background(widget):
brightness = sum(widget.palette().color(QPalette.Background).getRgb()[0:3])
return brightness < (255*3/2)
@staticmethod
def update_from_widget(widget, force_dark=False):
if force_dark or ColorScheme.has_dark_background(widget):
ColorScheme.dark_scheme = True
class AcceptFileDragDrop:
def __init__(self, file_type=""):
assert isinstance(self, QWidget)
self.setAcceptDrops(True)
self.file_type = file_type
def validateEvent(self, event):
if not event.mimeData().hasUrls():
event.ignore()
return False
for url in event.mimeData().urls():
if not url.toLocalFile().endswith(self.file_type):
event.ignore()
return False
event.accept()
return True
def dragEnterEvent(self, event):
self.validateEvent(event)
def dragMoveEvent(self, event):
if self.validateEvent(event):
event.setDropAction(Qt.CopyAction)
def dropEvent(self, event):
if self.validateEvent(event):
for url in event.mimeData().urls():
self.onFileAdded(url.toLocalFile())
def onFileAdded(self, fn):
raise NotImplementedError()
def import_meta_gui(electrum_window, title, importer, on_success):
filter_ = "JSON (*.json);;All files (*)"
filename = electrum_window.getOpenFileName(_("Open {} file").format(title), filter_)
if not filename:
return
try:
importer(filename)
except FileImportFailed as e:
electrum_window.show_critical(str(e))
else:
electrum_window.show_message(_("Your {} were successfully imported").format(title))
on_success()
def export_meta_gui(electrum_window, title, exporter):
filter_ = "JSON (*.json);;All files (*)"
filename = electrum_window.getSaveFileName(_("Select file to save your {}").format(title),
'electrum-ltc_{}.json'.format(title), filter_)
if not filename:
return
try:
exporter(filename)
except FileExportFailed as e:
electrum_window.show_critical(str(e))
else:
electrum_window.show_message(_("Your {0} were exported to '{1}'")
.format(title, str(filename)))
def get_parent_main_window(
widget, *, allow_wizard: bool = False,
) -> Union[None, 'ElectrumWindow', 'InstallWizard']:
"""Returns a reference to the ElectrumWindow this widget belongs to."""
from .main_window import ElectrumWindow
from .transaction_dialog import TxDialog
from .installwizard import InstallWizard
for _ in range(100):
if widget is None:
return None
if isinstance(widget, ElectrumWindow):
return widget
if isinstance(widget, TxDialog):
return widget.main_window
if isinstance(widget, InstallWizard) and allow_wizard:
return widget
widget = widget.parentWidget()
return None
def icon_path(icon_basename):
return resource_path('gui', 'icons', icon_basename)
@lru_cache(maxsize=1000)
def read_QIcon(icon_basename):
return QIcon(icon_path(icon_basename))
def get_default_language():
name = QLocale.system().name()
return name if name in languages else 'en_UK'
def char_width_in_lineedit() -> int:
char_width = QFontMetrics(QLineEdit().font()).averageCharWidth()
# 'averageCharWidth' seems to underestimate on Windows, hence 'max()'
return max(9, char_width)
def webopen(url: str):
if sys.platform == 'linux' and os.environ.get('APPIMAGE'):
# When on Linux webbrowser.open can fail in AppImage because it can't find the correct libdbus.
# We just fork the process and unset LD_LIBRARY_PATH before opening the URL.
# See #5425
if os.fork() == 0:
del os.environ['LD_LIBRARY_PATH']
webbrowser.open(url)
os._exit(0)
else:
webbrowser.open(url)
if __name__ == "__main__":
app = QApplication([])
t = WaitingDialog(None, 'testing ...', lambda: [time.sleep(1)], lambda x: QMessageBox.information(None, 'done', "done"))
t.start()
app.exec_()
|
the-stack_0_16512 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0004_auto_20150820_2156'),
]
operations = [
migrations.AlterModelOptions(
name='product',
options={'ordering': ['-title']},
),
]
|
the-stack_0_16513 | import argparse
import os
import shutil
import sys
import cv2
def label(img_dir, scale_factor):
img_extensions = {'jpg', 'jpeg', 'png', 'bmp'}
images = sorted([os.path.join(img_dir, f)
for f in os.listdir(img_dir)
if os.path.isfile(os.path.join(img_dir, f)) and
f.lower().split('.')[-1] in img_extensions])
labels = [f for i, f in enumerate(sorted(os.listdir(img_dir)))
if os.path.isdir(os.path.join(img_dir, f))]
if not labels:
raise RuntimeError('No subdirectories found. Please create subdirectories for ' +
'the labels you want to store (e.g. "negative", "positive")')
for imgfile in images:
img = cv2.imread(imgfile)
img_name = os.path.basename(imgfile)
if scale_factor != 1:
size = (int(img.shape[0]*scale_factor), int(img.shape[1]*scale_factor))
img = cv2.resize(img, size)
print('[{}] Keys:'.format(os.path.basename(imgfile)))
for i, l in enumerate(labels):
print('\t({}): Tag image as "{}"'.format(i+1, l))
print('\t(s): Skip this image')
print('\t(d): Delete this image')
print('\t(ESC/q): Quit the script')
print('')
cv2.namedWindow(img_name)
cv2.imshow(img_name, img)
k = cv2.waitKey()
print('')
if k == ord('c'):
continue
if ord('0') <= k <= ord('9'):
label_index = int(chr(k))-1
if label_index >= len(labels):
print('Invalid label index "{}", skipping image'.format(label_index))
shutil.move(imgfile, os.path.join(img_dir, labels[label_index]))
if k == ord('d'):
os.unlink(imgfile)
# Quit upon 'q' or ESC
if k == ord('q') or k == 27:
break
print('')
cv2.destroyAllWindows()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--image-dir', '-d', dest='dir', required=True,
help='Directory that contains the images to be processed ' +
'(supported formats: jpg, png, tiff, bmp)')
parser.add_argument('--scale-factor', '-s', dest='scale', required=False, default=1,
type=float, help='Scale factor to be applied to the images for visualization (default: 1)')
opts, args = parser.parse_known_args(sys.argv[1:])
label(img_dir=opts.dir, scale_factor=opts.scale)
if __name__ == '__main__':
main()
# vim:sw=4:ts=4:et:
|
the-stack_0_16514 | import tensorflow as tf
import abc
import logging
LOSS_REGISTRY = {}
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Default margin used by pairwise and absolute margin loss
DEFAULT_MARGIN = 1
# default sampling temperature used by adversarial loss
DEFAULT_ALPHA_ADVERSARIAL = 0.5
# Default margin used by margin based adversarial loss
DEFAULT_MARGIN_ADVERSARIAL = 3
DEFAULT_CLASS_PARAMS = {'require_same_size_pos_neg': True, }
def register_loss(name, external_params=[], class_params=DEFAULT_CLASS_PARAMS):
def populate_class_params():
LOSS_REGISTRY[name].class_params = {}
LOSS_REGISTRY[name].class_params['require_same_size_pos_neg'] = class_params.get('require_same_size_pos_neg',
DEFAULT_CLASS_PARAMS['require_same_size_pos_neg'])
def insert_in_registry(class_handle):
LOSS_REGISTRY[name] = class_handle
class_handle.name = name
LOSS_REGISTRY[name].external_params = external_params
populate_class_params()
return class_handle
return insert_in_registry
class Loss(abc.ABC):
"""Abstract class for loss function.
"""
name = ""
external_params = []
class_params = {}
def __init__(self, eta, hyperparam_dict, verbose=False):
"""Initialize Loss.
Parameters
----------
eta: int
number of negatives
hyperparam_dict : dict
dictionary of hyperparams.
(Keys are described in the hyperparameters section)
"""
self._loss_parameters = {}
self._dependencies = []
# perform check to see if all the required external hyperparams are passed
try:
self._loss_parameters['eta'] = eta
self._init_hyperparams(hyperparam_dict)
if verbose:
logger.info('\n--------- Loss ---------')
logger.info('Name : {}'.format(self.name))
for key, value in self._loss_parameters.items():
logger.info('{} : {}'.format(key, value))
except KeyError as e:
msg = 'Some of the hyperparams for loss were not passed to the loss function.\n{}'.format(e)
logger.error(msg)
raise Exception(msg)
def get_state(self, param_name):
"""Get the state value.
Parameters
----------
param_name : string
name of the state for which one wants to query the value
Returns
-------
param_value:
the value of the corresponding state
"""
try:
param_value = LOSS_REGISTRY[self.name].class_params.get(param_name)
return param_value
except KeyError as e:
msg = 'Invalid Keu.\n{}'.format(e)
logger.error(msg)
raise Exception(msg)
def _init_hyperparams(self, hyperparam_dict):
""" Initializes the hyperparameters needed by the algorithm.
Parameters
----------
hyperparam_dict : dictionary
Consists of key value pairs. The Loss will check the keys to get the corresponding params
"""
msg = 'This function is a placeholder in an abstract class'
logger.error(msg)
NotImplementedError(msg)
def _inputs_check(self, scores_pos, scores_neg):
""" Creates any dependencies that need to be checked before performing loss computations
Parameters
----------
scores_pos : tf.Tensor
A tensor of scores assigned to positive statements.
scores_neg : tf.Tensor
A tensor of scores assigned to negative statements.
"""
logger.debug('Creating dependencies before loss computations.')
self._dependencies = []
if LOSS_REGISTRY[self.name].class_params['require_same_size_pos_neg'] and self._loss_parameters['eta'] != 1:
logger.debug('Dependencies found: \n\tRequired same size positive and negative. \n\tEta is not 1.')
self._dependencies.append(tf.Assert(tf.equal(tf.shape(scores_pos)[0], tf.shape(scores_neg)[0]),
[tf.shape(scores_pos)[0], tf.shape(scores_neg)[0]]))
def _apply(self, scores_pos, scores_neg):
""" Apply the loss function. Every inherited class must implement this function.
(All the TF code must go in this function.)
Parameters
----------
scores_pos : tf.Tensor
A tensor of scores assigned to positive statements.
scores_neg : tf.Tensor
A tensor of scores assigned to negative statements.
Returns
-------
loss : tf.Tensor
The loss value that must be minimized.
"""
msg = 'This function is a placeholder in an abstract class.'
logger.error(msg)
NotImplementedError(msg)
def apply(self, scores_pos, scores_neg):
""" Interface to external world.
This function does the input checks, preprocesses input and finally applies loss function.
Parameters
----------
scores_pos : tf.Tensor
A tensor of scores assigned to positive statements.
scores_neg : tf.Tensor
A tensor of scores assigned to negative statements.
Returns
-------
loss : tf.Tensor
The loss value that must be minimized.
"""
self._inputs_check(scores_pos, scores_neg)
with tf.control_dependencies(self._dependencies):
loss = self._apply(scores_pos, scores_neg)
return loss
@register_loss("pairwise", ['margin'])
class PairwiseLoss(Loss):
"""Pairwise, max-margin loss.
Introduced in :cite:`bordes2013translating`.
.. math::
\mathcal{L}(\Theta) = \sum_{t^+ \in \mathcal{G}}\sum_{t^- \in \mathcal{C}}max(0, [\gamma + f_{model}(t^-;\Theta) - f_{model}(t^+;\Theta)])
where :math:`\gamma` is the margin, :math:`\mathcal{G}` is the set of positives,
:math:`\mathcal{C}` is the set of corruptions, :math:`f_{model}(t;\Theta)` is the model-specific scoring function.
"""
def __init__(self, eta, loss_params={'margin': DEFAULT_MARGIN}, verbose=False):
"""Initialize Loss.
Parameters
----------
eta: int
number of negatives
loss_params : dict
Dictionary of loss-specific hyperparams:
- **'margin'**: (float). Margin to be used in pairwise loss computation (default: 1)
Example: ``loss_params={'margin': 1}``
"""
super().__init__(eta, loss_params, verbose)
def _init_hyperparams(self, hyperparam_dict):
""" Verifies and stores the hyperparameters needed by the algorithm.
Parameters
----------
hyperparam_dict : dictionary
Consists of key value pairs. The Loss will check the keys to get the corresponding params
- **margin** - Margin to be used in pairwise loss computation(default:1)
"""
self._loss_parameters['margin'] = hyperparam_dict.get('margin', DEFAULT_MARGIN)
def _apply(self, scores_pos, scores_neg):
""" Apply the loss function.
Parameters
----------
scores_pos : tf.Tensor, shape [n, 1]
A tensor of scores assigned to positive statements.
scores_neg : tf.Tensor, shape [n, 1]
A tensor of scores assigned to negative statements.
Returns
-------
loss : tf.Tensor
The loss value that must be minimized.
"""
margin = tf.constant(self._loss_parameters['margin'], dtype=tf.float32, name='margin')
loss = tf.reduce_sum(tf.maximum(margin - scores_pos + scores_neg, 0))
return loss
@register_loss("nll")
class NLLLoss(Loss):
"""Negative log-likelihood loss.
As described in :cite:`trouillon2016complex`.
.. math::
\mathcal{L}(\Theta) = \sum_{t \in \mathcal{G} \cup \mathcal{C}}log(1 + exp(-y \, f_{model}(t;\Theta)))
where :math:`y \in {-1, 1}` is the label of the statement, :math:`\mathcal{G}` is the set of positives,
:math:`\mathcal{C}` is the set of corruptions, :math:`f_{model}(t;\Theta)` is the model-specific scoring function.
"""
def __init__(self, eta, loss_params={}, verbose=False):
"""Initialize Loss.
Parameters
----------
eta: int
number of negatives
loss_params : dict
dictionary of hyperparams. No hyperparameters are required for this loss.
"""
super().__init__(eta, loss_params, verbose)
def _init_hyperparams(self, hyperparam_dict):
""" Initializes the hyperparameters needed by the algorithm.
Parameters
----------
hyperparam_dict : dictionary
Consists of key value pairs. The Loss will check the keys to get the corresponding params
"""
return
def _apply(self, scores_pos, scores_neg):
""" Apply the loss function.
Parameters
----------
scores_pos : tf.Tensor, shape [n, 1]
A tensor of scores assigned to positive statements.
scores_neg : tf.Tensor, shape [n, 1]
A tensor of scores assigned to negative statements.
Returns
-------
loss : tf.Tensor
The loss value that must be minimized.
"""
scores = tf.concat([-scores_pos, scores_neg], 0)
return tf.reduce_sum(tf.log(1 + tf.exp(scores)))
@register_loss("absolute_margin", ['margin'])
class AbsoluteMarginLoss(Loss):
"""Absolute margin , max-margin loss.
Introduced in :cite:`Hamaguchi2017`.
.. math::
\mathcal{L}(\Theta) = \sum_{t^+ \in \mathcal{G}}\sum_{t^- \in \mathcal{C}} f_{model}(t^-;\Theta) - max(0, [\gamma - f_{model}(t^+;\Theta)])
where :math:`\gamma` is the margin, :math:`\mathcal{G}` is the set of positives,
:math:`\mathcal{C}` is the set of corruptions, :math:`f_{model}(t;\Theta)` is the model-specific scoring function.
"""
def __init__(self, eta, loss_params={'margin': DEFAULT_MARGIN}, verbose=False):
"""Initialize Loss
Parameters
----------
eta: int
number of negatives
loss_params : dict
Dictionary of loss-specific hyperparams:
- **'margin'**: float. Margin to be used in pairwise loss computation (default:1)
Example: ``loss_params={'margin': 1}``
"""
super().__init__(eta, loss_params, verbose)
def _init_hyperparams(self, hyperparam_dict):
""" Initializes the hyperparameters needed by the algorithm.
Parameters
----------
hyperparam_dict : dict
Consists of key value pairs. The Loss will check the keys to get the corresponding params.
**margin** - Margin to be used in loss computation(default:1)
Returns
-------
"""
self._loss_parameters['margin'] = hyperparam_dict.get('margin', DEFAULT_MARGIN)
def _apply(self, scores_pos, scores_neg):
""" Apply the loss function.
Parameters
----------
scores_pos : tf.Tensor, shape [n, 1]
A tensor of scores assigned to positive statements.
scores_neg : tf.Tensor, shape [n, 1]
A tensor of scores assigned to negative statements.
Returns
-------
loss : tf.Tensor
The loss value that must be minimized.
"""
margin = tf.constant(self._loss_parameters['margin'], dtype=tf.float32, name='margin')
loss = tf.reduce_sum(tf.maximum(margin + scores_neg, 0) - scores_pos)
return loss
@register_loss("self_adversarial", ['margin', 'alpha'], {'require_same_size_pos_neg': False})
class SelfAdversarialLoss(Loss):
""" Self adversarial sampling loss.
Introduced in :cite:`sun2018rotate`.
.. math::
\mathcal{L} = -log\, \sigma(\gamma + f_{model} (\mathbf{s},\mathbf{o})) - \sum_{i=1}^{n} p(h_{i}^{'}, r, t_{i}^{'} ) \ log \ \sigma(-f_{model}(\mathbf{s}_{i}^{'},\mathbf{o}_{i}^{'}) - \gamma)
where :math:`\mathbf{s}, \mathbf{o} \in \mathcal{R}^k` are the embeddings of the subject
and object of a triple :math:`t=(s,r,o)`, :math:`\gamma` is the margin, :math:`\sigma` the sigmoid function,
and :math:`p(s_{i}^{'}, r, o_{i}^{'} )` is the negatives sampling distribution which is defined as:
.. math::
p(s'_j, r, o'_j | \{(s_i, r_i, o_i)\}) = \\frac{\exp \\alpha \, f_{model}(\mathbf{s'_j}, \mathbf{o'_j})}{\sum_i \exp \\alpha \, f_{model}(\mathbf{s'_i}, \mathbf{o'_i})}
where :math:`\\alpha` is the temperature of sampling, :math:`f_{model}` is the scoring function of
the desired embeddings model.
"""
def __init__(self, eta, loss_params={'margin': DEFAULT_MARGIN_ADVERSARIAL,
'alpha': DEFAULT_ALPHA_ADVERSARIAL}, verbose=False):
"""Initialize Loss
Parameters
----------
eta: int
number of negatives
loss_params : dict
Dictionary of loss-specific hyperparams:
- **'margin'**: (float). Margin to be used for loss computation (default: 1)
- **'alpha'** : (float). Temperature of sampling (default:0.5)
Example: ``loss_params={'margin': 1, 'alpha': 0.5}``
"""
super().__init__(eta, loss_params, verbose)
def _init_hyperparams(self, hyperparam_dict):
""" Initializes the hyperparameters needed by the algorithm.
Parameters
----------
hyperparam_dict : dictionary
Consists of key value pairs. The Loss will check the keys to get the corresponding params
- **margin** - Margin to be used in adversarial loss computation (default:3)
- **alpha** - Temperature of sampling (default:0.5)
"""
self._loss_parameters['margin'] = hyperparam_dict.get('margin', DEFAULT_MARGIN_ADVERSARIAL)
self._loss_parameters['alpha'] = hyperparam_dict.get('alpha', DEFAULT_ALPHA_ADVERSARIAL)
def _apply(self, scores_pos, scores_neg):
""" Apply the loss function.
Parameters
----------
scores_pos : tf.Tensor, shape [n, 1]
A tensor of scores assigned to positive statements.
scores_neg : tf.Tensor, shape [n*negative_count, 1]
A tensor of scores assigned to negative statements.
Returns
-------
loss : tf.Tensor
The loss value that must be minimized.
"""
margin = tf.constant(self._loss_parameters['margin'], dtype=tf.float32, name='margin')
alpha = tf.constant(self._loss_parameters['alpha'], dtype=tf.float32, name='alpha')
# Compute p(neg_samples) based on eq 4
scores_neg_reshaped = tf.reshape(scores_neg, [self._loss_parameters['eta'], tf.shape(scores_pos)[0]])
p_neg = tf.nn.softmax(alpha * scores_neg_reshaped, axis=0)
# Compute Loss based on eg 5
loss = tf.reduce_sum(-tf.log(tf.nn.sigmoid(margin - tf.negative(scores_pos)))) - \
tf.reduce_sum(tf.multiply(p_neg,
tf.log(tf.nn.sigmoid(tf.negative(scores_neg_reshaped) - margin))))
return loss
@register_loss("multiclass_nll", [], {'require_same_size_pos_neg': False})
class NLLMulticlass(Loss):
""" Multiclass NLL Loss.
Introduced in :cite:`chen2015` where both the subject and objects are corrupted (to use it in this way pass
corrupt_sides = ['s', 'o'] to embedding_model_params) .
This loss was re-engineered in :cite:`kadlecBK17` where only the object was corrupted to get improved
performance (to use it in this way pass corrupt_sides = 'o' to embedding_model_params).
.. math::
\mathcal{L(X)} = -\sum_{x_{e_1,e_2,r_k} \in X} log\,p(e_2|e_1,r_k) -\sum_{x_{e_1,e_2,r_k} \in X} log\,p(e_1|r_k, e_2)
Examples
--------
>>> from ampligraph.latent_features import TransE
>>> model = TransE(batches_count=1, seed=555, epochs=20, k=10,
>>> embedding_model_params={'corrupt_sides':['s', 'o']},
>>> loss='multiclass_nll', loss_params={})
"""
def __init__(self, eta, loss_params={}, verbose=False):
"""Initialize Loss
Parameters
----------
eta: int
number of negatives
loss_params : dict
Dictionary of loss-specific hyperparams:
"""
super().__init__(eta, loss_params, verbose)
def _init_hyperparams(self, hyperparam_dict):
""" Verifies and stores the hyperparameters needed by the algorithm.
Parameters
----------
hyperparam_dict : dictionary
Consists of key value pairs. The Loss will check the keys to get the corresponding params
"""
pass
def _apply(self, scores_pos, scores_neg):
""" Apply the loss function.
Parameters
----------
scores_pos : tf.Tensor, shape [n, 1]
A tensor of scores assigned to positive statements.
scores_neg : tf.Tensor, shape [n*negative_count, 1]
A tensor of scores assigned to negative statements.
Returns
-------
loss : float
The loss value that must be minimized.
"""
scores_neg_reshaped = tf.reshape(scores_neg, [self._loss_parameters['eta'], tf.shape(scores_pos)[0]])
neg_exp = tf.exp(scores_neg_reshaped)
pos_exp = tf.exp(scores_pos)
softmax_score = pos_exp/(tf.reduce_sum(neg_exp, axis = 0) + pos_exp)
loss = -tf.reduce_sum(tf.log(softmax_score))
return loss
|
the-stack_0_16515 | import avroconvert as avc
from multiprocessing import cpu_count
import concurrent
class Execute:
def __init__(self, source: str, bucket: str, dst_format: str, outfolder: str, prefix: str = '', **kwargs):
'''
A wrapper class to run the avro convert operation. This class
calls the reader methods (gcs, s3 or local) and avro converter
methods internally.
:param source: Name of the source file system. Should be one
of these: gs, s3 of fs.
gs is for google cloud bucket
s3 is for amazon s3 bucket
fs is for local file system
:type source: str
:param bucket: Name of the bucket to read the files. For local
file system, bucket would be the folder name from where
the data will be read and converted to specified
output format
:type bucket: str
:param dst_format: Target output format. The files read from
different sources will be converted to the
format specified by this parameter. It's
value should be one of these:
cs, parquet or json, defaults to parquet
:type dst_format: str
:param outfolder: Output folder. This is where the files
converted from avro to csv, parquet or json
will be stored
:type outfolder: str
:param prefix: File prefix. If given, files whose names start with
the given prefix will be read and all the other
files will be omitted
:type prefix: str
:key auth_file: Pass this parameter only when the source is `gs`.
It specifies the location of service account json
file to access google cloud storage. If google
cloud is authenticated or the environment
variable GOOGLE_APPLICATION_CREDENTIALS is set
in the already, then this parameter is not
required
:key access_key: Pass this parameter only when the source is `s3`.
It specifies AWS access key id. If aws is already
authenticated or there exists a file ~/.aws/credentials
or the environment variable AWS_ACCESS_KEY_ID is set,
then this parameter is not required
:key secret_key: Pass this parameter only when the source is `s3`.
It specifies AWS secret key. If aws is already
authenticated or there exists a file ~/.aws/credentials
or the environment variable AWS_SECRET_ACCESS_KEY is set,
then this parameter is not required
:key session_token: Pass this parameter only when the source is `s3`.
It specifies AWS session token.
'''
_src = ['s3', 'gs', 'fs']
_dst_format = ['parquet', 'csv', 'json']
source = source.lower()
if not dst_format:
raise AttributeError(f'Output format not specified, should be one of {_dst_format}')
if not outfolder:
raise AttributeError(f'Please specify an output folder')
dst_format = dst_format.lower()
if source not in _src:
raise Exception(
f'Invalid source {source} passed. Source should be one of {_src}')
if dst_format not in _dst_format:
raise Exception(
f'Invalid format {dst_format}. It should be one of {_dst_format}')
if not bucket:
raise Exception(
f'Please specify a bucket')
self.source = source
self.bucket = bucket
self.prefix = prefix
self.dst_format = dst_format
self.outfolder = outfolder
self.params = kwargs
def _resolve(self):
'''
This method returns a reader instance depending upon
the source specified. If the source is `gs`, this
method will return `gs_reader`; if the source is `s3`,
this method will return `s3_reader`; if the source is
`fs`, this method will return `fs_reader` object
'''
reader_function = getattr(avc, f'{self.source}_reader')
reader = reader_function(
bucket=self.bucket, prefix=self.prefix, **self.params)
return reader
def run(self) -> bool:
'''
Executor method for the AvroConverter class. This method
parallelizes the execution for all the file read->convert->write operations.
'''
raw_content = self._resolve().get_data()
if not raw_content:
return
num_process = cpu_count()*2
avro_object = avc.AvroConvert(
dst_format=self.dst_format, outfolder=self.outfolder)
with concurrent.futures.ProcessPoolExecutor(max_workers=int(num_process)) as executor:
results = [executor.submit(
avro_object.convert_avro, **{'filename': filename, 'data': avrodata}) for filename, avrodata in raw_content.items()]
return True
|
the-stack_0_16516 | import logging
import os
import subprocess
logging.basicConfig()
logger = logging.getLogger("kalliope")
MPLAYER_EXEC_PATH = "/usr/bin/mplayer"
class Mplayer(object):
"""
This Class is representing the MPlayer Object used to play the all sound of the system.
"""
def __init__(self):
pass
@classmethod
def play(cls, filepath):
"""
Play the sound located in the provided filepath
:param filepath: The file path of the sound to play
:type filepath: str
:Example:
Mplayer.play(self.file_path)
.. seealso:: TTS
.. raises::
.. warnings:: Class Method and Public
"""
mplayer_exec_path = [MPLAYER_EXEC_PATH]
mplayer_options = ['-slave', '-quiet']
mplayer_command = list()
mplayer_command.extend(mplayer_exec_path)
mplayer_command.extend(mplayer_options)
mplayer_command.append(filepath)
logger.debug("Mplayer cmd: %s" % str(mplayer_command))
fnull = open(os.devnull, 'w')
subprocess.call(mplayer_command, stdout=fnull, stderr=fnull)
|
the-stack_0_16518 | import os
import requests
import prometheus_client
import threading
import logging
import time
from prometheus_client import start_http_server
from prometheus_client.core import GaugeMetricFamily, REGISTRY
PORT=9387
APIBASEURL = os.environ['SABNZBD_BASEURL']
APIKEY = os.environ['SABNZBD_APIKEY']
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',level=logging.INFO, datefmt='%Y/%m/%d %H:%M:%S')
logging.info("Starting sabnzbd_exporter on port: %d",PORT)
logging.info("Connecting to %s",APIBASEURL)
def getAPIUrl(mode):
return '{}/api?output=json&apikey={}&mode={}'.format(APIBASEURL, APIKEY, mode)
def get_sec(time_str):
h, m, s = time_str.split(':')
return int(h) * 3600 + int(m) * 60 + int(s)
class CustomCollector(object):
def collect(self):
try:
server_stats_url = getAPIUrl('server_stats')
start = time.time()
server_stats = requests.get(server_stats_url).json()
elapsed = time.time() - start
logging.info("Request to %s returned in %s",server_stats_url, elapsed)
dwn = GaugeMetricFamily('sabnzbd_download_bytes', 'SABnzbd Overall download metrics', labels=['period'])
dwn.add_metric(['total'], server_stats['total'])
dwn.add_metric(['day'], server_stats['day'])
dwn.add_metric(['week'], server_stats['week'])
dwn.add_metric(['month'], server_stats['month'])
yield dwn
server_dwn = GaugeMetricFamily('sabnzbd_server_download_bytes','SABnzbd per server download metrics',labels=['server','period'])
for server, metrics in server_stats['servers'].items():
for metric,val in metrics.items():
if metric != 'daily':
server_dwn.add_metric([server,metric],val)
yield server_dwn
start = time.time()
queue_stats_url = getAPIUrl('queue')
queue_stats = requests.get(queue_stats_url).json()["queue"]
elapsed = time.time() - start
logging.info("Request to %s returned in %s",queue_stats_url, elapsed)
yield GaugeMetricFamily('sabnzbd_queue_size','SABnzbd Current Queue Length',value=queue_stats['noofslots_total'])
yield GaugeMetricFamily('sabnzbd_queue_download_rate_bytes_per_second','SABnzbd download rate',value=float(queue_stats['kbpersec'])*1024)
yield GaugeMetricFamily('sabnzbd_queue_remaining_bytes','SABnzbd queue remaining size',value=float(queue_stats['mbleft'])*1024*1024)
yield GaugeMetricFamily('sabnzbd_queue_total_size_bytes','SABnzbd queue total size',value=float(queue_stats['mb'])*1024*1024)
yield GaugeMetricFamily('sabnzbd_queue_remaining_seconds','SABnzbd estimated time remaining',value=get_sec(queue_stats['timeleft']))
except Exception as inst:
logging.error('Error getting stats: %s', inst)
REGISTRY.register(CustomCollector())
start_http_server(PORT)
DE = threading.Event()
DE.wait()
|
the-stack_0_16519 | from subprocess import call
isbn_regex = '^(97(8|9)-?)?\d{9}(\d|X)$'
def fix_author(author):
parts = author.split(u', ')
if len(parts) == 2:
return parts[1] + u' ' + parts[0]
return author
def call_mktorrent(target, torrent_filename, announce, torrent_name=None):
args = [
'mktorrent',
'-a', announce,
'-p',
'-o', torrent_filename,
]
if torrent_name:
args.extend(('-n', torrent_name))
args.append(target)
if call(args) != 0:
raise Exception('mktorrent returned non-zero')
|
the-stack_0_16521 | # pylint: disable=R0913
# pylint: disable=W0621
import os
from urllib.parse import quote
import pytest
from aiohttp import web
from simcore_service_storage.db import setup_db
from simcore_service_storage.dsm import setup_dsm
from simcore_service_storage.rest import setup_rest
from simcore_service_storage.s3 import setup_s3
from simcore_service_storage.settings import APP_CONFIG_KEY, SIMCORE_S3_ID
def parse_db(dsm_mockup_db):
id_name_map = {}
id_file_count = {}
for d in dsm_mockup_db.keys():
md = dsm_mockup_db[d]
if not md.user_id in id_name_map:
id_name_map[md.user_id] = md.user_name
id_file_count[md.user_id] = 1
else:
id_file_count[md.user_id] = id_file_count[md.user_id] + 1
return id_file_count, id_name_map
@pytest.fixture
def client(loop, aiohttp_unused_port, aiohttp_client, python27_path, postgres_service, minio_service, osparc_api_specs_dir):
app = web.Application()
main_cfg = {
'port': aiohttp_unused_port(),
'host': 'localhost',
'python2': python27_path,
"max_workers" : 4
}
rest_cfg = {
'oas_repo': str(osparc_api_specs_dir), #'${OSPARC_SIMCORE_REPO_ROOTDIR}/api/specs',
#oas_repo: http://localhost:8043/api/specs
}
postgres_cfg = postgres_service
s3_cfg = minio_service
# fake config
app[APP_CONFIG_KEY] = {
'main': main_cfg,
'postgres' : postgres_cfg,
's3' : s3_cfg,
'rest': rest_cfg
}
#app.middlewares.append(dsm_middleware)
setup_db(app)
setup_rest(app)
setup_dsm(app)
setup_s3(app)
cli = loop.run_until_complete( aiohttp_client(app, server_kwargs=main_cfg) )
return cli
async def test_health_check(client):
resp = await client.get("/v0/")
text = await resp.text()
assert resp.status == 200, text
payload = await resp.json()
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert data
assert not error
assert data['name'] == 'simcore_service_storage'
assert data['status'] == 'SERVICE_RUNNING'
@pytest.mark.travis
async def test_locations(client):
user_id = "0"
resp = await client.get("/v0/locations?user_id={}".format(user_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert len(data) == 2
assert not error
async def test_s3_files_metadata(client, dsm_mockup_db):
id_file_count, _id_name_map = parse_db(dsm_mockup_db)
# list files for every user
for _id in id_file_count:
resp = await client.get("/v0/locations/0/files/metadata?user_id={}".format(_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert not error
assert len(data) == id_file_count[_id]
# list files fileterd by uuid
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
uuid_filter = os.path.join(fmd.project_id, fmd.node_id)
resp = await client.get("/v0/locations/0/files/metadata?user_id={}&uuid_filter={}".format(fmd.user_id, quote(uuid_filter, safe='')))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert not error
for d in data:
assert os.path.join(d['project_id'], d['node_id']) == uuid_filter
async def test_s3_file_metadata(client, dsm_mockup_db):
# go through all files and get them
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
resp = await client.get("/v0/locations/0/files/{}/metadata?user_id={}".format(quote(fmd.file_uuid, safe=''), fmd.user_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert not error
assert data
async def test_download_link(client, dsm_mockup_db):
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
resp = await client.get("/v0/locations/0/files/{}?user_id={}".format(quote(fmd.file_uuid, safe=''), fmd.user_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert not error
assert data
async def test_upload_link(client, dsm_mockup_db):
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
resp = await client.put("/v0/locations/0/files/{}?user_id={}".format(quote(fmd.file_uuid, safe=''), fmd.user_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert not error
assert data
@pytest.mark.travis
async def test_copy(client, dsm_mockup_db, datcore_testbucket):
# copy N files
N = 2
counter = 0
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
source_uuid = fmd.file_uuid
datcore_uuid = os.path.join(datcore_testbucket, fmd.file_name)
resp = await client.put("/v0/locations/1/files/{}?user_id={}&extra_location={}&extra_source={}".format(quote(datcore_uuid, safe=''),
fmd.user_id, SIMCORE_S3_ID, quote(source_uuid, safe='')))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert not error
assert data
counter = counter + 1
if counter == N:
break
# list files for every user
user_id = "0"
resp = await client.get("/v0/locations/1/files/metadata?user_id={}".format(user_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert not error
assert len(data) == 2 + N
async def test_delete_file(client, dsm_mockup_db):
id_file_count, _id_name_map = parse_db(dsm_mockup_db)
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
resp = await client.delete("/v0/locations/0/files/{}?user_id={}".format(quote(fmd.file_uuid, safe=''), fmd.user_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert not error
assert not data
for _id in id_file_count:
resp = await client.get("/v0/locations/0/files/metadata?user_id={}".format(_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert not error
assert len(data) == 0
async def test_action_check(client):
QUERY = 'mguidon'
ACTION = 'echo'
FAKE = {
'path_value': 'one',
'query_value': 'two',
'body_value': {
'a': 33,
'b': 45
}
}
resp = await client.post("/v0/check/{}?data={}".format(ACTION, QUERY), json=FAKE)
payload = await resp.json()
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert resp.status == 200, str(payload)
assert data
assert not error
# TODO: validate response against specs
assert data['path_value'] == ACTION
assert data['query_value'] == QUERY
|
the-stack_0_16522 | from telegram import Update
from telegram.ext import CallbackContext
from app.extensions import db
from app.lib.handlers.base import BaseHandler, app_context
from app.models import Channel
class MigrateFilter(BaseHandler):
@app_context
def handler(self, update: Update, context: CallbackContext):
message = update.message
# Incase we get migrate_to_chat_id update, ignore. We'll be getting another update
# from migrate_from_chat_id after it.
if not message.migrate_from_chat_id:
return
original_chat_id = str(message.migrate_from_chat_id)
new_chat_id = str(message.chat_id)
self.logger.debug(f"migrating chat_id from {original_chat_id} to {new_chat_id}")
channel = Channel.query.filter(
Channel.chat_id == original_chat_id
).one_or_none()
if not channel:
self.logger.error(
f"Unable to find a channel that should exists: original_chat_id: {original_chat_id}, new_chat_id: {new_chat_id}"
)
return
channel.chat_id = new_chat_id
db.session.commit()
|
the-stack_0_16526 |
from django.conf import settings
from zerver.lib.actions import set_default_streams, bulk_add_subscriptions, \
internal_prep_stream_message, internal_send_private_message, \
create_stream_if_needed, create_streams_if_needed, do_send_messages, \
do_add_reaction_legacy
from zerver.models import Realm, UserProfile, Message, Reaction, get_system_bot
from typing import Any, Dict, List, Mapping, Text
def send_initial_pms(user):
# type: (UserProfile) -> None
organization_setup_text = ""
if user.is_realm_admin:
help_url = user.realm.uri + "/help/getting-your-organization-started-with-zulip"
organization_setup_text = ("* [Read the guide](%s) for getting your organization "
"started with Zulip\n" % (help_url,))
content = (
"Hello, and welcome to Zulip!\n\nThis is a private message from me, Welcome Bot. "
"Here are some tips to get you started:\n"
"* Download our [Desktop and mobile apps](/apps)\n"
"* Customize your account and notifications on your [Settings page](#settings)\n"
"* Type `?` to check out Zulip's keyboard shortcuts\n"
"%s"
"\n"
"The most important shortcut is `r` to reply.\n\n"
"Practice sending a few messages by replying to this conversation. If you're not into "
"keyboards, that's okay too; clicking anywhere on this message will also do the trick!") \
% (organization_setup_text,)
internal_send_private_message(user.realm, get_system_bot(settings.WELCOME_BOT),
user, content)
def setup_initial_streams(realm):
# type: (Realm) -> None
stream_dicts = [
{'name': "general"},
{'name': "new members",
'description': "For welcoming and onboarding new members. If you haven't yet, "
"introduce yourself in a new thread using your name as the topic!"},
{'name': "zulip",
'description': "For discussing Zulip, Zulip tips and tricks, and asking "
"questions about how Zulip works"}] # type: List[Mapping[str, Any]]
create_streams_if_needed(realm, stream_dicts)
set_default_streams(realm, {stream['name']: {} for stream in stream_dicts})
# For the first user in a realm
def setup_initial_private_stream(user):
# type: (UserProfile) -> None
stream, _ = create_stream_if_needed(user.realm, "core team", invite_only=True,
stream_description="A private stream for core team members.")
bulk_add_subscriptions([stream], [user])
def send_initial_realm_messages(realm):
# type: (Realm) -> None
welcome_bot = get_system_bot(settings.WELCOME_BOT)
# Make sure each stream created in the realm creation process has at least one message below
# Order corresponds to the ordering of the streams on the left sidebar, to make the initial Home
# view slightly less overwhelming
welcome_messages = [
{'stream': Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
'topic': "welcome",
'content': "This is a message on stream `%s` with the topic `welcome`. We'll use this stream "
"for system-generated notifications." % (Realm.DEFAULT_NOTIFICATION_STREAM_NAME,)},
{'stream': "core team",
'topic': "private streams",
'content': "This is a private stream. Only admins and people you invite "
"to the stream will be able to see that this stream exists."},
{'stream': "general",
'topic': "welcome",
'content': "Welcome to #**general**."},
{'stream': "new members",
'topic': "onboarding",
'content': "A #**new members** stream is great for onboarding new members.\n\nIf you're "
"reading this and aren't the first person here, introduce yourself in a new thread "
"using your name as the topic! Type `c` or click on `New Topic` at the bottom of the "
"screen to start a new topic."},
{'stream': "zulip",
'topic': "topic demonstration",
'content': "Here is a message in one topic. Replies to this message will go to this topic."},
{'stream': "zulip",
'topic': "topic demonstration",
'content': "A second message in this topic. With [turtles](/static/images/cute/turtle.png)!"},
{'stream': "zulip",
'topic': "second topic",
'content': "This is a message in a second topic.\n\nTopics are similar to email subjects, "
"in that each conversation should get its own topic. Keep them short, though; one "
"or two words will do it!"},
] # type: List[Dict[str, Text]]
messages = [internal_prep_stream_message(
realm, welcome_bot,
message['stream'], message['topic'], message['content']) for message in welcome_messages]
message_ids = do_send_messages(messages)
# We find the one of our just-sent messages with turtle.png in it,
# and react to it. This is a bit hacky, but works and is kinda a
# 1-off thing.
turtle_message = Message.objects.get(
id__in=message_ids,
subject='topic demonstration',
content__icontains='cute/turtle.png')
do_add_reaction_legacy(welcome_bot, turtle_message, 'turtle')
|
the-stack_0_16527 | # -*- coding: utf-8 -*-
import scrapy
"""
需求: 大分类:名称,URL; 小分类名称,URL; 图书的标题,图片,出版商,价格信息
步骤:
1. 创建爬虫项目
2. 创建爬虫
3. 完善爬虫
3.1 修改起始URL
3.2 提取大分类,小分类标题和URL, 根据小分类的URL构建列表页请求
3.3 解析列表页, 提取图书标题和封面图片的URL, 构建详情页的请求
3.4 解析详情页, 提取出版社, 价格(构建价格请求)
3.5 解析价格
3.6 实现列表页分页
"""
from copy import deepcopy
import re
class BookSpider(scrapy.Spider):
name = 'book'
allowed_domains = ['suning.com']
# 3.1 修改起始URL
start_urls = ['https://book.suning.com/']
def parse(self, response):
# 提取大分类和小分类信息
# 获取包含大分类, 小分类的div列表
divs = response.xpath('//div[@class="menu-item"]')
# 获取子菜单div列表
sub_divs = response.xpath('//div[contains(@class, "menu-sub")]')
# 遍历divs, 获取大分类小分类信息
for div in divs:
item = {}
item['b_category_name'] = div.xpath('./dl/dt/h3/a/text()').extract_first()
item['b_category_url'] = div.xpath('./dl/dt/h3/a/@href').extract_first()
# 获取包含小分类信息的a标签列表
a_s = div.xpath('./dl/dd/a')
# 如果a_s是一个空列表, 就要从子菜单中提取小分类信息
if len(a_s) == 0:
sub_div = sub_divs[divs.index(div)]
a_s = sub_div.xpath('./div[1]/ul/li/a')
# 遍历a_s, 提取小分类信息
for a in a_s:
item['s_category_name'] = a.xpath('./text()').extract_first()
item['s_category_url'] = a.xpath('./@href').extract_first()
# print(item)
# 根据小分类的URL, 构建列表页请求
# 当在循环外边创建的item对象(或字典), 传递给下一个解析函数时候, 需要进行一个深拷贝, 否则数据就会错乱
yield scrapy.Request(item['s_category_url'], callback=self.parse_book_list, meta={'item': deepcopy(item)})
def parse_book_list(self, response):
# 3.3 解析列表页, 提取图书标题和封面图片的URL, 构建详情页的请求
item = response.meta['item']
# 获取包含图书信息的li标签列表
lis = response.xpath('//*[@id="filter-results"]/ul/li')
# 遍历lis, 获取图书名称和图片信息
for li in lis:
item['book_name'] = li.xpath('.//p[@class="sell-point"]/a/text()').extract_first()
item['book_img'] = 'https:' + li.xpath('.//img/@src2').extract_first()
# print(item)
# 构建详情页的请求
# 详情URL
detail_url = 'https:' + li.xpath('.//p[@class="sell-point"]/a/@href').extract_first()
# 构建详情页请求, 交给引擎
yield scrapy.Request(detail_url, callback=self.parse_book_detail, meta={'item': deepcopy(item)})
# 实现翻页
# 1. 获取下一页URL
# 观察规律:
# 第一页: https://list.suning.com/1-264003-0.html
# 第2页: https://list.suning.com/1-264003-1.html
# 第3页: https://list.suning.com/1-264003-2.html
# print(response.url)
# 把 https://list.suning.com/1-262518-0-0-0-0.html 改为 https://list.suning.com/1-262518-0.html
current_url = re.sub('(-0)+', '-0', response.url)
# print(current_url)
# param.currentPage = "0";
# param.pageNumbers = "61";
current_page = int(re.findall('param.currentPage\s*=\s*"(\d+)"', response.text)[0])
page_numbers = int(re.findall('param.pageNumbers\s*=\s*"(\d+)"', response.text)[0])
# print(current_page)
# print(page_numbers)
# 计算下1页的页号
next_page = current_page + 1
# 如果有下一页, 就生成下一页的URL
if next_page < page_numbers:
# 构建下一页的URL
# 生成替换的后缀
subfix = '-{}.html'.format(next_page)
# 举例: 1 -> -1.html
next_url = re.sub('-\d+.html', subfix, current_url)
print(next_url)
# 构建下一页请求
yield scrapy.Request(next_url, callback=self.parse_book_list, meta={'item': deepcopy(item)})
def parse_book_detail(self, response):
# 解析详情页
# 3.4 解析详情页, 提取出版社, 价格(构建价格请求)
item = response.meta['item']
item['book_publisher'] = response.xpath('//*[@id="productName"]/a/text()').extract_first()
# - 1. 准备价格URL模板
price_url = 'https://pas.suning.com/nspcsale_0_000000000{}_000000000{}_{}_20_021_0210101.html'
# - 2. 从详情页URL中提取数据
datas = re.findall('https://product.suning.com/(\d+)/(\d+).html', response.url)[0]
# - 3. 生成完整价格URL
price_url = price_url.format(datas[1], datas[1], datas[0])
# print(item)
# print(price_url)
# 构建价格请求
yield scrapy.Request(price_url, callback=self.parse_price, meta={'item': item})
def parse_price(self, response):
# 解析价格
item = response.meta['item']
# 思路: 如果有促销价格, 就使用促销价格, 如果没有就使用网络价格
price = re.findall('"promotionPrice":\s*"(\d+.\d+)"', response.text)
if len(price) == 0:
price = re.findall('"netPrice":\s*"(\d+.\d+)"', response.text)
# 获取价格信息
item['price'] = price[0]
# print(item)
# 把数据交给引擎
yield item
|
the-stack_0_16528 | # This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <[email protected]>
# Dimitri Desvillechabrol <[email protected]>,
# <[email protected]>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Sequana GUI. Can also be used for any snakemake pipeline"""
import sys
import os
import shutil
import re
import time
import psutil
import subprocess as sp
import argparse
import signal
import pkg_resources
from PyQt5 import QtCore, QtGui
from PyQt5 import QtWidgets as QW
from PyQt5.QtCore import Qt, QTemporaryDir
from sequana_pipetools import snaketools
from sequanix.utils import YamlDocParser, on_cluster, rest2html
from .ui import Ui_MainWindow
from .widgets import (
Browser,
QIPythonWidget,
About,
FileBrowser,
SVGDialog,
WarningMessage,
CriticalMessage,
PreferencesDialog,
HelpDialog,
SnakemakeDialog,
Tools,
QPlainTextEditLogger,
Ruleform,
)
import easydev
import colorlog
logger = colorlog.getLogger(__name__)
def sigint_handler(*args): # pragma: no cover
"""Handler for the SIGINT signal."""
sys.stderr.write("\r")
if (
QW.QMessageBox.question(
None, "", "Are you sure you want to quit?", QW.QMessageBox.Yes | QW.QMessageBox.No, QW.QMessageBox.No
)
== QW.QMessageBox.Yes
):
QW.QApplication.quit()
class BaseFactory(Tools):
"""Tab on top are all based on this abstract class
It should provide access to a snakefile and its config file as well
as working directory.
Currently, the :class:`SequanaFactory` and :class:`GenericFactory` are
implemented.
"""
def __init__(self, mode, run_button):
super(BaseFactory, self).__init__()
self.mode = mode
self._run_button = run_button
# And finally the working directory
self._directory_browser = FileBrowser(directory=True)
self._directory_browser.clicked_connect(self._switch_off_run)
def _switch_off_run(self): # pragma: no cover
self.debug("Switching off run button")
self._run_button.setEnabled(False)
def copy(self, source, target, force): # pragma: no cover
if os.path.exists(target) and force is False:
save_msg = WarningMessage("The file <i>{0}</i> already exists in the working directory".format(source))
save_msg.setInformativeText("Do you want to overwrite it?")
save_msg.setStandardButtons(QW.QMessageBox.Yes | QW.QMessageBox.Discard | QW.QMessageBox.Cancel)
save_msg.setDefaultButton(QW.QMessageBox.Yes)
# Yes == 16384
# Save == 2048
retval = save_msg.exec_()
if retval in [16384, 2048]:
self.warning("Overwritting %s" % target)
super(BaseFactory, self).copy(source, target)
else:
super(BaseFactory, self).copy(source, target)
def _copy_snakefile(self, force=False): # pragma: no cover
if self.snakefile is None:
self.info("No pipeline selected yet")
return # nothing to be done
if self.directory is None:
self.info("No working directory selected yet (copy snakefile)")
return
# source and target filenames
target = self.directory + os.sep + os.path.basename(self.snakefile)
if os.path.exists(target) and easydev.md5(target) == easydev.md5(self.snakefile):
self.info("Target and source (pipeline) are identical. Skipping copy.")
# if target and source are identical, nothing to do
return
# if filename are identical but different, do we want to overwrite it ?
if os.path.basename(self.snakefile) == target:
self.warning("%s exists already in %s" % (self.snakefile, self.directory))
return
self.info("Copying snakefile in %s " % self.directory)
self.copy(self.snakefile, target, force=force)
def _copy_configfile(self): # pragma: no cover
if self.configfile is None:
self.info("No config selected yet")
return # nothing to be done
if self._directory_browser.path_is_setup() is False:
self.info("No working directory selected yet (copy config)")
return
# FIXME
# THis does not check the formatting so when saved, it is different
# from original even though parameters are the same...
target = self.directory + os.sep + os.path.basename(self.configfile)
if os.path.exists(target) and easydev.md5(target) == easydev.md5(self.configfile):
self.info("Target and source (pipeline) are identical. Skipping copy.")
return
self.info("Copying config in %s " % self.directory)
self.copy(self.configfile, self.directory)
def _get_directory(self):
filename = self._directory_browser.get_filenames()
if len(filename):
return filename
else:
return None
directory = property(_get_directory)
def __repr__(self):
return "%s Factory" % self.mode
class SequanaFactory(BaseFactory):
def __init__(self, run_button, combobox):
super(SequanaFactory, self).__init__("sequana", run_button)
self._imported_config = None
self._choice_button = combobox
# Some widgets to be used: a file browser for paired files
fastq_filter = "Fastq file (*.fastq *.fastq.gz *.fq *.fq.gz)"
self._sequana_paired_tab = FileBrowser(paired=True, file_filter=fastq_filter)
self._sequana_readtag_label2 = QW.QLabel("Read tag (e.g. _[12].fastq)")
self._sequana_readtag_lineedit2 = QW.QLineEdit("_R[12]_")
# Set the file browser input_directory tab
self._sequana_directory_tab = FileBrowser(directory=True)
self._sequana_readtag_label = QW.QLabel("Read tag (e.g. _[12].fastq)")
self._sequana_readtag_lineedit = QW.QLineEdit("_R[12]_")
self._sequana_pattern_label = QW.QLabel("<div><i>Optional</i> pattern (e.g., Samples_1?/*fastq.gz)</div>")
self._sequana_pattern_lineedit = QW.QLineEdit()
# triggers/connectors
self._sequana_directory_tab.clicked_connect(self._switch_off_run)
self._choice_button.activated.connect(self._switch_off_run)
self._sequana_paired_tab.clicked_connect(self._switch_off_run)
def _get_pipeline(self):
index = self._choice_button.currentIndex()
if index == 0:
return None
else:
return self._choice_button.currentText()
pipeline = property(_get_pipeline)
def _get_snakefile(self):
if self.pipeline:
module = snaketools.Module(self.pipeline)
return module.snakefile
snakefile = property(_get_snakefile)
def _get_configfile(self):
if self.pipeline:
module = snaketools.Module(self.pipeline)
return module.config
configfile = property(_get_configfile)
def _get_clusterconfigfile(self):
if self.pipeline:
module = snaketools.Module(self.pipeline)
return module.cluster_config
clusterconfigfile = property(_get_clusterconfigfile)
def _get_multiqcconfigfile(self):
if self.pipeline:
module = snaketools.Module(self.pipeline)
return module.multiqc_config
multiqcconfigfile = property(_get_multiqcconfigfile)
def _get_schemafile(self):
if self.pipeline:
module = snaketools.Module(self.pipeline)
return module.schema_config
schemafile = property(_get_schemafile)
def _get_config(self):
if self._imported_config:
cfg = snaketools.SequanaConfig(self._imported_config)
return cfg
if self.configfile:
try:
cfg = snaketools.SequanaConfig(self.configfile)
return cfg
except AssertionError:
self.warning("Warning: could not parse the config file")
return
config = property(_get_config)
def __repr__(self): # pragma: no cover
in1 = self._sequana_directory_tab.get_filenames()
in2 = self._sequana_paired_tab.get_filenames()
txt = super(SequanaFactory, self).__repr__()
txt += "\npipeline:%s\ninput:\n - %s\n - %s\n - directory:%s\n"
if self.clusterconfigfile:
txt += " - cluster config: %s\n" % self.clusterconfigfile
if self.schemafile:
txt += " - schema config: %s" % self.schemafile
if self.multiqcconfigfile:
txt += " - schema config: %s" % self.multiqcconfigfile
return txt % (self.pipeline, in1, in2, self.directory)
class GenericFactory(BaseFactory):
def __init__(self, run_button):
super(GenericFactory, self).__init__("generic", run_button)
# Define the Snakefile browser and config widgets
self._snakefile_browser = FileBrowser(directory=False)
self._config_browser = FileBrowser(directory=False, file_filter="YAML file (*.json *.yaml *.yml)")
# when a snakefile or config is chosen, switch off run button
self._config_browser.clicked_connect(self._switch_off_run)
self._snakefile_browser.clicked_connect(self._switch_off_run)
self._schema = None
self._multiqcconfigfile = None
def _return_none(self, this):
if this is None or len(this) == 0:
return None
else:
return this
def _get_snakefile(self):
return self._return_none(self._snakefile_browser.get_filenames())
snakefile = property(_get_snakefile)
def _get_configfile(self):
return self._return_none(self._config_browser.get_filenames())
configfile = property(_get_configfile)
def _get_schemafile(self):
return self._return_none(self._schema)
schemafile = property(_get_schemafile)
def _get_multiqcconfigfile(self):
return self._return_none(self._multiqcconfigfile)
multiqcconfigfile = property(_get_multiqcconfigfile)
def _get_config(self): # pragma: no cover
filename = self._return_none(self._config_browser.get_filenames())
if filename:
try:
configfile = snaketools.SequanaConfig(filename)
except AssertionError:
self.critical("Could not parse the config file %s" % filename)
return
except Exception:
self.critical("Could not parse the config file %s. 2" % filename)
return
return configfile
config = property(_get_config)
def is_runnable(self):
flag1 = self._directory_browser.path_is_setup()
flag2 = self._snakefile_browser.path_is_setup()
flag3 = self._config_browser.path_is_setup()
# flag1 and flag2 are compulsary
# flag3 (configfile) is most tricky to handle since it may be required
# or not. So we just deal with the flag1 and 2
return flag1 and flag2
def __repr__(self):
txt = super(GenericFactory, self).__repr__()
txt += "\nsnakefile:%s\nconfigfile:%s\ndirectory:%s\nschema:%s\nmultiqcconfigfile:%s"
return txt % (self.snakefile, self.configfile, self.directory, self.schemafile, self.multiqcconfigfile)
class SequanixGUI(QW.QMainWindow, Tools):
"""
If quiet, progress bar cannot work.
- do not copy again requirements if already there
- extension of the different widgets ?
Developer Guide
------------------
- The GUI is designed with qt designer as much as possible.
- All GUI objects are in the **ui** attributes. Additional dialog such as the
snakemake and preferences dialog have their own modules and stored in attributes
ending in _dialog
"""
_not_a_rule = {"requirements", "gatk_bin", "input_directory", "input_pattern", "ignore"}
_browser_keywords = {"reference"}
_to_exclude = ["atac-seq", "compressor"]
def __init__(self, parent=None, ipython=True, user_options={}):
super(SequanixGUI, self).__init__(parent=parent)
colorlog.getLogger().setLevel("INFO")
colorlog.info("Welcome to Sequana GUI (aka Sequanix)")
self._tempdir = QTemporaryDir()
self.shell = ""
self.shell_error = ""
self._colors = {
"green": QtGui.QColor(0, 170, 0),
"red": QtGui.QColor(170, 0, 0),
"orange": QtGui.QColor(170, 150, 0),
"blue": QtGui.QColor(0, 90, 154),
}
# some global attributes
self._undefined_section = "Parameters in no sections/rules"
# self._config = None
# Set the regex to catch steps in the progres bar
self._step_regex = re.compile("([0-9]+) of ([0-9]+) steps")
self._ipython_tab = ipython
self.initUI()
self.read_settings()
# this should be after initUI and read_settings
self.set_style_sheet()
# User option.
def isset(options, key):
if key in options and getattr(options, key):
return True
else:
return False
if isset(user_options, "wkdir"):
self.info("Setting working directory using user's argument %s" % user_options.wkdir)
if os.path.exists(user_options.wkdir) is False:
easydev.mkdirs(user_options.wkdir)
# We must use the absolute path
abspath = os.path.abspath(user_options.wkdir)
self.sequana_factory._directory_browser.set_filenames(abspath)
self.generic_factory._directory_browser.set_filenames(abspath)
if isset(user_options, "snakefile"):
filename = user_options.snakefile
if os.path.exists(filename) is True:
self.info("Setting snakefile using user's argument %s" % user_options.snakefile)
self.generic_factory._snakefile_browser.set_filenames(filename)
else:
self.error("%s does not exist" % filename)
self.ui.tabs_pipeline.setCurrentIndex(1)
if isset(user_options, "configfile"):
filename = user_options.configfile
if os.path.exists(filename) is True:
self.info("Setting config file using user's argument %s" % user_options.configfile)
self.generic_factory._config_browser.set_filenames(filename)
self.ui.tabs_pipeline.setCurrentIndex(1)
if isset(user_options, "pipeline"): # pragma: no cover
self.info("Setting Sequana pipeline %s " % user_options.pipeline)
pipelines = self.sequana_factory.valid_pipelines
if user_options.pipeline in pipelines:
index = self.ui.choice_button.findText(user_options.pipeline)
self.ui.choice_button.setCurrentIndex(index)
# set focus on pipeline tab
self.ui.tabs_pipeline.setCurrentIndex(0)
else:
self.error("unknown pipeline. Use one of %s " % pipelines)
if isset(user_options, "input_directory"): # pragma: no cover
directory = user_options.input_directory
self.info("Setting Sequana input directory")
if directory and os.path.exists(directory) is False:
self.warning("%s does not exist" % directory)
elif directory:
abspath = os.path.abspath(user_options.input_directory)
self.sequana_factory._sequana_directory_tab.set_filenames(abspath)
self.ui.tabs_pipeline.setCurrentIndex(0)
self.ui.tabWidget.setCurrentIndex(0)
if isset(user_options, "input_files"):
directory = user_options.input_files
self.info("Setting Sequana input files")
dirtab = self.sequana_factory._sequana_paired_tab
dirtab._set_paired_filenames([os.path.abspath(f) for f in user_options.input_files])
self.ui.tabs_pipeline.setCurrentIndex(0)
self.ui.tabWidget.setCurrentIndex(1)
if isset(user_options, "sequana_configfile"):
cfg = user_options.sequana_configfile
self.info("Replace Sequana config file")
self.menuImportConfig(cfg)
if isset(user_options, "schemafile"):
schemafile = user_options.schemafile
self.info("Set the schema file")
self.menuImportSchema(schemafile)
# We may have set some pipeline, snakefile, working directory
self.create_base_form()
self.fill_until_starting()
def initUI(self):
# The logger is not yet set, so we use the module directly
colorlog.info("Initialising GUI")
# Set up the user interface from Designer. This is the general layout
# without dedicated widgets and connections
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# 2 more dialogs from designer
self.preferences_dialog = PreferencesDialog(self)
self.snakemake_dialog = SnakemakeDialog(self)
self.preferences_dialog.ui.buttonBox.accepted.connect(self.set_level)
# The IPython dialog, which is very useful for debugging
if self._ipython_tab is True:
self.ipyConsole = QIPythonWidget(
customBanner="Welcome to Sequanix embedded ipython console\n"
+ "The entire GUI interface is stored in the variable gui\n"
+ "Note also that you can use this interface as a shell \n"
+ "command line interface preceding your command with ! character\n"
)
# self.ipyConsole.printText("The variable 'foo' andion.")
self.ipyConsole.execute("from sequana import *")
self.ipyConsole.execute("import sequana")
self.ipyConsole.execute("")
self.ipyConsole.pushVariables({"gui": self})
self.ui.layout_ipython.addWidget(self.ipyConsole)
# layout for config file parameters
widget_form = QW.QWidget()
self.form = QW.QVBoxLayout(widget_form)
self.form.setSpacing(10)
self.ui.scrollArea.setWidget(widget_form)
self.ui.scrollArea.setWidgetResizable(True)
self.ui.scrollArea.setMinimumHeight(200)
# layout for the snakemake output
self.output = QW.QTextEdit()
self.output.setReadOnly(True)
self.ui.layout_snakemake.addWidget(self.output)
# Add the new logging box widget to the layout
self.logTextBox = QPlainTextEditLogger(self)
self.logTextBox.setFormatter(
colorlog.ColoredFormatter(
"%(log_color)s%(asctime)s - %(levelname)s - %(message)s",
log_colors={
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red,bg_white",
},
)
)
colorlog.getLogger().addHandler(self.logTextBox)
self.set_level()
self.ui.layout_logger.addWidget(self.logTextBox.widget)
# Connectors to actions related to the menu bar
self.ui.actionQuit.triggered.connect(self.menuQuit)
self.ui.action_import_configfile.triggered.connect(self.menuImportConfig)
self.ui.action_import_schemafile.triggered.connect(self.menuImportSchema)
self.ui.actionHelp.triggered.connect(self.menuHelp)
self.ui.actionAbout.triggered.connect(self.menuAbout)
self.ui.actionSnakemake.triggered.connect(self.snakemake_dialog.exec_)
self.ui.actionPreferences.triggered.connect(self.preferences_dialog.exec_)
self.preferences_dialog.ui.preferences_options_general_tooltip_value.clicked.connect(self.set_style_sheet)
# connectors related to the pipeline tabs (pipeline/generic)
self.set_sequana_pipeline()
self.set_generic_pipeline()
# The run/save/dag footer buttons
self.connect_footer_buttons()
self.process = QtCore.QProcess(self)
self.process.started.connect(lambda: self.ui.run_btn.setEnabled(False))
self.process.started.connect(lambda: self.ui.stop_btn.setEnabled(True))
self.process.started.connect(lambda: self.ui.unlock_btn.setEnabled(False))
self.process.started.connect(lambda: self.start_progress)
self.process.started.connect(lambda: self.ui.save_btn.setEnabled(False))
self.process.started.connect(lambda: self.ui.tabs_pipeline.setEnabled(False))
self.process.finished.connect(lambda: self.ui.run_btn.setEnabled(True))
self.process.finished.connect(lambda: self.ui.stop_btn.setEnabled(False))
self.process.finished.connect(lambda: self.ui.unlock_btn.setEnabled(True))
self.process.finished.connect(lambda: self.ui.save_btn.setEnabled(True))
self.process.finished.connect(lambda: self.ui.tabs_pipeline.setEnabled(True))
self.process.finished.connect(self.end_run)
self.process.readyReadStandardOutput.connect(self.snakemake_data_stdout)
self.process.readyReadStandardError.connect(self.snakemake_data_error)
# This is for the show dag btn. Created here once for all
self.process1 = QtCore.QProcess(self)
self.process2 = QtCore.QProcess(self)
self.ui.tabWidget.currentChanged.connect(lambda: self.ui.run_btn.setEnabled(False))
# if we are on one of those clusters, switch to the cluster choice in
# the pipeline control combo box
if on_cluster() is True:
self.ui.comboBox_local.setCurrentText("cluster")
# connect show advanced button with the until/starting frame
self.ui.show_advanced_control.clicked.connect(self.click_advanced)
self.ui.frame_control.hide()
def _get_opacity(self):
dialog = self.preferences_dialog
box = dialog.ui.preferences_options_general_tooltip_value
if box.isChecked():
return 255
else:
return 0
tooltip_opacity = property(_get_opacity)
def set_style_sheet(self):
self.setStyleSheet(
"""QToolTip {
background-color: #aabbcc;
color: black;
border-style: double;
border-width: 3px;
border-color: green;
border-radius: 5px;
margin:5px;
opacity: %s;
} ;
"""
% self.tooltip_opacity
)
# |-----------------------------------------------------|
# | MENU related |
# |-----------------------------------------------------|
def menuImportConfig(self, configfile=None): # pragma: no cover
# The connector send a False signal but default is None
# so we need to handle the two cases
if self.snakefile is None:
self.error("You must set a pipeline first")
msg = WarningMessage(("You must set a pipeline first"))
msg.exec_()
return
if configfile and os.path.exists(configfile) is False:
self.error("Config file (%s) does not exists" % configfile)
return
if configfile is None or configfile is False:
self.info("Importing config file.")
file_filter = "YAML file (*.json *.yaml *.yml)"
browser = FileBrowser(file_filter=file_filter)
browser.browse_file()
configfile = browser.paths
if configfile:
self.sequana_factory._imported_config = configfile
else:
self.sequana_factory._imported_config = None
self.create_base_form()
def menuImportSchema(self, schemafile=None): # pragma: no cover
if schemafile:
self.generic_factory._schema = schemafile
return
self.info("Importing YAML schema file.")
file_filter = "YAML file (*.yaml *.yml)"
browser = FileBrowser(file_filter=file_filter)
browser.browse_file()
schemafile = browser.paths
if schemafile:
self.generic_factory._schema = schemafile
else:
self.generic_factory._schema = None
def menuAbout(self):
from sequana import version
url = "sequana.readthedocs.io"
widget = About()
widget.setText("Sequana version %s " % version)
widget.setInformativeText(
"""
Online documentation on <a href="http://%(url)s">%(url)s</a>
<br>
<br>
Authors: Thomas Cokelaer and Dimitri Desvillechabrol, 2017-2018
"""
% {"url": url}
)
widget.setWindowTitle("Sequana")
# widget.setStandardButtons(QW.QMessageBox.Ok)
retval = widget.exec_()
if retval == QW.QMessageBox.Ok:
widget.close()
def menuHelp(self):
url = "sequana.readthedocs.io"
pipelines_text = "<ul>\n"
url = "http://sequana.readthedocs.io/en/master"
for pipeline in snaketools.pipeline_names:
pipelines_text += ' <li><a href="%(url)s/pipeline_%(name)s.html">%(name)s</a></li>\n' % {
"url": url,
"name": pipeline,
}
pipelines_text += "</ul>"
msg = HelpDialog(pipelines=pipelines_text)
retval = msg.exec_()
if retval == QW.QMessageBox.Ok:
msg.close()
def menuQuit(self):
self._quit_msg = WarningMessage("Do you really want to quit ?")
self._quit_msg.setStandardButtons(QW.QMessageBox.Yes | QW.QMessageBox.No)
self._quit_msg.setDefaultButton(QW.QMessageBox.No)
quit_answer = self._quit_msg.exec_()
if quit_answer == QW.QMessageBox.Yes:
self.close()
def set_level(self):
# Set the level of the logging system
pref = self.preferences_dialog.ui
level = pref.preferences_options_general_logging_value.currentText()
#level = getattr(colorlog.logging.logging, level)
level = colorlog.getLogger().level
colorlog.getLogger().setLevel(level)
# ---------------------------------------------------------------
# More GUI / reading the snakefile (sequana or generic)
# ---------------------------------------------------------------
def set_sequana_pipeline(self):
# The pipeline connectors
pipelines = sorted(snaketools.pipeline_names)
pipelines = [this for this in pipelines if this not in self._to_exclude]
self.ui.choice_button.addItems(pipelines)
self.ui.choice_button.activated[str].connect(self._update_sequana)
# FIXME do we want to use this ?
self.ui.choice_button.installEventFilter(self)
# populate the factory with the choice button
self.sequana_factory = SequanaFactory(combobox=self.ui.choice_button, run_button=self.ui.run_btn)
self.sequana_factory.valid_pipelines = pipelines
# a local alias
saf = self.sequana_factory
# add widgets for the working dir
self.ui.layout_sequana_wkdir.addWidget(saf._directory_browser)
# add widget for the input sample
# self.ui.layout_sequana_input_files.addWidget(saf._sequana_paired_tab)
# hlayout = QW.QHBoxLayout()
# hlayout.addWidget(saf._sequana_readtag_label2)
# hlayout.addWidget(saf._sequana_readtag_lineedit2)
# self.ui.layout_sequana_input_files.addLayout(hlayout)
# add widget for the input directory
self.ui.layout_sequana_input_dir.addWidget(saf._sequana_directory_tab)
hlayout = QW.QHBoxLayout()
hlayout.addWidget(saf._sequana_readtag_label)
hlayout.addWidget(saf._sequana_readtag_lineedit)
self.ui.layout_sequana_input_dir.addLayout(hlayout)
hlayout = QW.QHBoxLayout()
hlayout.addWidget(saf._sequana_pattern_label)
hlayout.addWidget(saf._sequana_pattern_lineedit)
self.ui.layout_sequana_input_dir.addLayout(hlayout)
@QtCore.pyqtSlot(str)
def _update_sequana(self, index):
"""Change options form when user changes the pipeline."""
if self.ui.choice_button.findText(index) == 0:
self.clear_form()
self.rule_list = []
self.fill_until_starting()
return
self.info("Reading sequana %s pipeline" % index)
self.create_base_form()
# Is there a cluster config file ?
dialog = self.snakemake_dialog.ui
if self.sequana_factory.clusterconfigfile:
dialog.snakemake_options_cluster_cluster__config_value.set_filenames(self.sequana_factory.clusterconfigfile)
else:
dialog.snakemake_options_cluster_cluster__config_value.set_filenames("")
self.fill_until_starting()
self.switch_off()
# Reset imported config file in SequanaFactory
self.sequana_factory._imported_config = None
def set_generic_pipeline(self):
self.generic_factory = GenericFactory(self.ui.run_btn)
gaf = self.generic_factory
# The config file connectors
gaf._config_browser.clicked_connect(self.create_base_form)
# Update the main UI with
self.ui.layout_generic_snakefile.addWidget(gaf._snakefile_browser)
self.ui.layout_generic_config.addWidget(gaf._config_browser)
self.ui.layout_generic_wkdir.addWidget(gaf._directory_browser)
# When user press the cancel button, the config file browser is reset
self.ui.cancel_push_button.clicked.connect(self.generic_factory._config_browser.set_empty_path)
# ---------------------------------------------------------------------
# Footer connectors
# ---------------------------------------------------------------------
def connect_footer_buttons(self):
self.ui.run_btn.setEnabled(False)
self.ui.run_btn.clicked.connect(self.click_run)
self.ui.stop_btn.clicked.connect(self.click_stop)
self.ui.stop_btn.setEnabled(False)
self.ui.unlock_btn.clicked.connect(self.ui.run_btn.setEnabled)
self.ui.unlock_btn.clicked.connect(self.unlock_snakemake)
self.ui.unlock_btn.setEnabled(True)
self.ui.report_btn.setEnabled(True)
self.ui.report_btn.clicked.connect(self.open_report)
self.ui.save_btn.clicked.connect(self.save_project)
self.ui.dag_btn.setEnabled(False)
self.ui.dag_btn.clicked.connect(self.show_dag)
# -----------------------------------------------------------------
# function to link to the factory (sequana or generic)
# -----------------------------------------------------------------
def _get_mode(self):
# figure out if we are dealing with a sequana pipeline or not
index = self.ui.tabs_pipeline.currentIndex()
if index == 0:
return "sequana"
elif index == 1:
return "generic"
mode = property(_get_mode)
def _get_factory(self):
return getattr(self, "%s_factory" % self.mode)
factory = property(_get_factory)
def _get_config(self):
return getattr(self, "%s_factory" % self.mode).config
config = property(_get_config)
def _get_configfile(self):
return getattr(self, "%s_factory" % self.mode).configfile
configfile = property(_get_configfile)
def _get_snakefile(self):
return getattr(self, "%s_factory" % self.mode).snakefile
snakefile = property(_get_snakefile)
def _get_working_dir(self):
return getattr(self, "%s_factory" % self.mode).directory
working_dir = property(_get_working_dir)
# ----------------------------------------------------------------------
# Snakemake related (config, running)
# ----------------------------------------------------------------------
def fill_until_starting(self):
active_list = [w.get_name() for w in self.rule_list if w.get_do_rule()]
self.ui.until_box.clear()
self.ui.until_box.addItems([None] + active_list)
self.ui.starting_box.clear()
self.ui.starting_box.addItems([None] + active_list)
# ----------------------------------------------------------
# Config file related
# ---------------------------------------------------------
def _set_focus_on_config_tab(self):
# Set focus on config file
if self._ipython_tab:
self.ui.tabs.setCurrentIndex(3)
else:
self.ui.tabs.setCurrentIndex(2)
# --------------------------------------------------------------------
# Advanced control
# --------------------------------------------------------------------
def click_advanced(self):
if self.ui.frame_control.isHidden():
self.ui.frame_control.show()
else:
self.ui.frame_control.hide()
# --------------------------------------------------------------------
# Others
# --------------------------------------------------------------------
def clear_layout(self, layout):
"""Clean all widgets contained in a layout."""
while layout.count():
child = layout.takeAt(0)
if child.widget() is not None:
child.widget().deleteLater()
elif child.layout() is not None:
self.clear_layout(child.layout())
# --------------------------------------------------------------------
# Running snakemake
# --------------------------------------------------------------------
def _clean_line(self, line):
# TODO: surely there is a better way to do that and not overlap
# with tools.py ...
line = line.replace("b'\\r'", "")
line = line.replace("b'\r'", "")
line = line.replace("b'\\r '", "")
line = line.replace("b'\r '", "")
line = line.replace("b' '", "")
line = line.replace("\\t", " " * 4)
line = line.replace("'b'", "")
for this in ["b'", 'b"', "\r"]:
if line.startswith(this):
line = line.replace(this, "")
if line.startswith('b"'):
line = line.replace('b"', "")
line = line.rstrip("\\x1b[0m")
line = line.replace("\\x1b[33m", "")
return line
def snakemake_data_stdout(self): # pragma: no cover
"""Read standard output of snakemake process"""
data = str(self.process.readAllStandardOutput())
self.shell += data
self.update_progress_bar(data)
for this in data.split("\\n"):
line = this.strip()
if line and len(line) > 3 and "complete in" not in line: # prevent all b'' strings
line = self._clean_line(line)
if len(line.strip()) == 0:
continue
self.output.append('<font style="color:blue">' + line + "</font>")
def snakemake_data_error(self):
"""Read error output of snakemake process"""
error = str(self.process.readAllStandardError())
self.shell_error += error
self.update_progress_bar(error)
for this in error.split("\\n"):
line = this.strip()
if line and len(line) > 3 and "complete in" not in line: # prevent all b'' strings
line = self._clean_line(line)
if line.startswith("b'"):
line = line[2:]
line.rstrip("'")
grouprex = self._step_regex.findall(line)
if grouprex:
self.output.append('<font style="color:orange">' + line + "</font>")
elif "Error" in line:
self.output.append('<font style="color:red">' + line + "</font>")
else:
self.output.append('<font style="color:green">' + line + "</font>")
def get_until_starting_option(self):
"""Return list with starting rule and end rule."""
until_rule = self.ui.until_box.currentText()
starting_rule = self.ui.starting_box.currentText()
option = []
if until_rule:
option += ["--no-hooks", "-U", until_rule]
if starting_rule:
option += ["-R", starting_rule]
return option
def _get_snakemake_command(self, snakefile): # pragma: no cover
"""If the cluster option is selected, then the cluster field in
the snakemake menu must be set to a string different from empty string.
If we are on TARS, we also must set the option to cluster (not local)
If one of the previous cases is true, this function returns None
"""
dialog = self.snakemake_dialog # an alias
snakemake_line = ["-s", snakefile, "--stat", "stats.txt", "-p"]
if self.ui.comboBox_local.currentText() == "local":
if on_cluster():
msg = WarningMessage(
(
"You are on TARS cluster. Please set the"
"batch options and select the cluster option (not local)"
)
)
msg.exec_()
return None
snakemake_line += dialog.get_snakemake_local_options()
elif self.ui.comboBox_local.currentText() == "cluster":
cluster = dialog.ui.snakemake_options_cluster_cluster_value.text()
if len(cluster.strip()) == 0:
msg = WarningMessage(
(
"You selected a 'cluster run' but the "
"cluster preferences are not set. Either switch to a local "
"run or set a correct string in the Snakemake options menu "
"(in cluster tab/ cluster field.)"
)
)
msg.exec_()
return None
snakemake_line += dialog.get_snakemake_cluster_options()
# cluster_config = dialog.ui.snakemake_options_cluster_config_value.text()
# cluster_config = cluster_config.strip()
# if len(cluster_config):
# snakemake_line += ["--cluster-config", cluster_config]
snakemake_line += dialog.get_snakemake_general_options()
snakemake_line += self.get_until_starting_option()
others = self.snakemake_dialog.ui.snakemake_options_general_custom.text()
if others.strip():
snakemake_line += others.split()
if self.configfile:
configfile = os.path.basename(self.configfile)
snakemake_line += ["--configfile", configfile]
return snakemake_line
def _set_pb_color(self, color):
self.ui.progressBar.setStyleSheet(
"""
QProgressBar {{
color: black;
border: 2px solid grey;
margin: 2px;
border-radius: 5px;
text-align: center;
}}
QProgressBar::chunk {{
background: {};
}}""".format(
color
)
)
# pal = self.ui.progressBar.palette()
# pal.setColor(QtGui.QPalette.Highlight, self._colors['blue'])
# self.ui.progressBar.setPalette(pal)
def click_run(self):
# set focus on the snakemake output
if self.snakefile is None or self.working_dir is None:
self.warning("Working directory or snakefile not set.")
return
self.ui.tabs.setCurrentIndex(0)
self.shell_error = ""
self.shell = ""
# Prepare the command and working directory.
if self.working_dir is None:
self.warning("Set the working directory first")
return
# We copy the sequana and genereic snakefile into a filename called
# Snakefile
snakefile = self.working_dir + os.sep + os.path.basename(self.snakefile)
if os.path.exists(snakefile) is False:
self.critical("%s does not exist" % snakefile)
return
snakemake_args = self._get_snakemake_command(snakefile)
if snakemake_args is None:
return
# the progress bar
self._set_pb_color(self._colors["blue"].name())
self.ui.progressBar.setValue(1)
# Start process
# If an argument contains spaces, we should use quotes. However,
# with PyQt quotes must be escaped
args = []
for this in snakemake_args:
if re.search(r"\s", this) is True:
args.append('"%s"' % this)
else:
args.append(this)
snakemake_args = args
self.info("Starting process with snakemake %s " % " ".join(snakemake_args))
self.output.clear()
self.process.setWorkingDirectory(self.working_dir)
self.process.start("snakemake", snakemake_args)
# -------------------------------------------------------------------
# Create the base form
# -------------------------------------------------------------------
def create_base_form(self):
"""Create form with all options necessary for a pipeline.
::
########################################################
# valid python docstring to be interepreted by sphinx
#
# section:
# item1: 10
# item2: 20
"""
self.rule_list = []
if self.config is None:
self.clear_form()
return
self.info("Creating form based on config file")
self.clear_form()
rules_list = list(self.config._yaml_code.keys())
# We do not sort the list of rules anymore so that it is like in the
# config file
# rules_list.sort()
self.necessary_dict = {}
# For each section, we create a widget (RuleForm). For isntance, first,
# one is accessible as follows: gui.form.itemAt(0).widget()
docparser = YamlDocParser(self.configfile)
import ruamel.yaml.comments
for count, rule in enumerate(rules_list):
self.debug("Scanning rule %s" % rule)
# Check if this is a dictionnary
contains = self.config._yaml_code[rule]
# If this is a section/dictionary, create a section
if isinstance(contains, (ruamel.yaml.comments.CommentedMap, dict)) and (
rule not in SequanixGUI._not_a_rule
):
# Get the docstring from the Yaml section/rule
docstring = docparser._block2docstring(rule)
# Get any special keywords
specials = docparser._get_specials(rule)
# self.ui.preferences_options_general_addbrowser_value
dialog = self.preferences_dialog.ui
option = dialog.preferences_options_general_addbrowser_value.text()
option = option.strip()
option = option.replace(";", " ").replace(",", " ")
if len(option):
keywords = option.split()
else:
keywords = []
keywords += self._browser_keywords
keywords = list(set(keywords))
rule_box = Ruleform(rule, contains, count, keywords, specials=specials)
rule_box.connect_all_option(lambda: self.ui.run_btn.setEnabled(False))
# Try to interpret it with sphinx
try:
self.debug("parsing docstring of %s" % rule)
comments = rest2html(docstring).decode()
rule_box.setToolTip(comments)
except Exception as err:
print(err)
self.warning("Could not interpret docstring of %s" % rule)
rule_box.setToolTip("")
self.form.addWidget(rule_box)
self.rule_list.append(rule_box)
rule_box.connect_do(self.fill_until_starting)
else:
# this is a parameter in a section, which may be
# a list, a None or something else
if isinstance(contains, list):
self.necessary_dict = dict(self.necessary_dict, **{rule: contains})
elif contains is None or contains in ["''", '""']:
self.necessary_dict = dict(self.necessary_dict, **{rule: None})
else:
self.necessary_dict = dict(self.necessary_dict, **{rule: "{0}".format(contains)})
# if this is a generic pipeline, you may have parameters outside of a
# section
if self.mode == "generic" and len(self.necessary_dict):
rule_box = Ruleform(self._undefined_section, self.necessary_dict, -1, generic=True)
self.form.addWidget(rule_box)
self._set_focus_on_config_tab()
# ----------------------------------------------------------
# STOP footer button
# ----------------------------------------------------------
def click_stop(self):
"""The stop button"""
self._set_pb_color(self._colors["orange"].name())
# For windows:
# http://stackoverflow.com/questions/8232544/how-to-terminate-a-process-without-os-kill-osgeo4w-python-2-5
if self.process.state() != 0:
pid = self.process.pid()
self.warning("Process {} running , stopping it... ".format(pid))
# We must use a ctrl+C interruption so that snakemake
# handles the interruption smoothly. However, child processes
# are lost so we also need to get their IDs and kill them.
self.info("killing the main snakemake process. This may take a few seconds ")
try:
self.info("process pid={} being killed".format(self.process.pid()))
pid_children = [this.pid for this in psutil.Process(pid).children(recursive=True)]
# Kills the main process
os.kill(pid, signal.SIGINT)
# And the children
for this in pid_children: # pragma: no cover
self.info("Remove pid {} ".format(this))
try:
os.kill(this, signal.SIGINT)
except Exception as err:
print(err)
time.sleep(4)
except Exception as err:
print(err)
pass # already stopped ?
self.info("Process killed successfully.")
self.ui.save_btn.setEnabled(True)
self.ui.run_btn.setEnabled(True)
self.ui.stop_btn.setEnabled(False)
self.ui.tabs_pipeline.setEnabled(True)
# --------------------------------------------------------------------
# Progress bar
# --------------------------------------------------------------------
def update_progress_bar(self, line):
"""Parse with a regex to retrieve current step and total step."""
grouprex = self._step_regex.findall(line)
# Use last "x of y" (not the first item at position 0)
if grouprex:
step = int(grouprex[-1][0]) / float(grouprex[-1][1]) * 100
self.ui.progressBar.setValue(step)
if "Nothing to be done" in line:
self.ui.progressBar.setValue(100)
def start_progress(self):
self.ui.progressBar.setRange(0, 1)
def end_run(self): # pragma: no cover
if self.ui.progressBar.value() >= 100:
self._set_pb_color(self._colors["green"].name())
self.info("Run done. Status: successful")
else:
self._set_pb_color(self._colors["red"].name())
text = "Run manually to check the exact error or check the log."
if "--unlock" in self.shell_error:
text += "<br>You may need to unlock the directory. "
text += "click on Unlock button"
self.critical(text)
return
def _get_force(self):
dialog = self.preferences_dialog
box = dialog.ui.preferences_options_general_overwrite_value
return box.isChecked()
def _set_force(self, boolean): # pragma: no cover
assert boolean in [True, False]
dialog = self.preferences_dialog
box = dialog.ui.preferences_options_general_overwrite_value
box.setChecked(boolean)
force = property(_get_force, _set_force)
def save_project(self): # pragma: no cover
self.info("Saving project")
if self.configfile is None:
if self.mode == "generic":
if self.generic_factory.is_runnable():
self.critical("save_project: Generic case without config file")
self._save_teardown()
else:
msg = WarningMessage("You must select a Snakefile and a working directory.")
msg.exec_()
elif self.mode == "sequana":
msg = WarningMessage("You must choose a pipeline first.")
msg.exec_()
return
if self.working_dir is None:
self.critical("save_project: no working dir: return")
msg = WarningMessage("You must select a working directory first.")
msg.exec_()
return
try:
form_dict = dict(self.create_form_dict(self.form), **self.necessary_dict)
except AttributeError as err:
self.error(err)
msg = WarningMessage("You must choose a pipeline before saving.")
msg.exec_()
return
# get samples names or input_directory
if self.mode == "sequana":
self.info("Sequana case")
flag1 = self.sequana_factory._sequana_directory_tab.get_filenames()
flag2 = self.sequana_factory._sequana_paired_tab.get_filenames()
if (
self.ui.tabWidget.currentIndex() == 0
and len(flag1) == 0
or self.ui.tabWidget.currentIndex() == 1
and len(flag2) == 0
):
msg = WarningMessage("You must choose an input first.")
msg.exec_()
return
filename = self.sequana_factory._sequana_directory_tab.get_filenames()
form_dict["input_directory"] = filename
# If pattern provided, the input_directory is reset but used in
# the pattern as the basename
pattern = self.sequana_factory._sequana_pattern_lineedit.text()
if len(pattern.strip()):
form_dict["input_pattern"] = filename
form_dict["input_pattern"] += os.sep + pattern.strip()
form_dict["input_directory"] = ""
readtag = self.sequana_factory._sequana_readtag_lineedit.text()
if len(readtag.strip()):
form_dict["input_readtag"] = readtag
else:
form_dict["input_readtag"] = "_R[12]_"
elif self.mode == "generic":
# Here we save the undefined section in the form.
if self._undefined_section in form_dict.keys():
for key, value in form_dict[self._undefined_section].items():
form_dict[key] = value
del form_dict[self._undefined_section]
self.info("Generic case")
# Let us update the attribute with the content of the form
# This uses the user's information
cfg = self.config
cfg.config.update(form_dict)
cfg._update_yaml()
self.cfg = cfg
pref = self.preferences_dialog.ui
box = pref.preferences_options_general_schema_value
checked_schema = box.isChecked()
if self.working_dir:
# Save the configuration file
if self.mode == "sequana":
yaml_path = self.working_dir + os.sep + "config.yaml"
self.warning("copy requirements (if any)")
cfg.copy_requirements(target=self.working_dir)
elif self.mode == "generic":
yaml_path = os.sep.join((self.working_dir, os.path.basename(self.generic_factory.configfile)))
if os.path.isfile(yaml_path) and self.force is False:
save_msg = WarningMessage("The file <i>{0}</i> already exist".format(yaml_path))
save_msg.setInformativeText("Do you want to overwrite the file?")
save_msg.setStandardButtons(QW.QMessageBox.Yes | QW.QMessageBox.Discard | QW.QMessageBox.Cancel)
save_msg.setDefaultButton(QW.QMessageBox.Yes)
# Yes == 16384
# Save == 2048
retval = save_msg.exec_()
if retval in [16384, 2048]:
self.info("Saving config file (exist already)")
if checked_schema is False:
cfg.save(yaml_path)
else:
ret = self._check_and_save_config(cfg, yaml_path)
if ret is False:
# we do not want to save the config file and call
# _save_teardown
return
else:
self.warning("Saving config file (does not exist)")
if checked_schema is False:
cfg.save(yaml_path)
else:
ret = self._check_and_save_config(cfg, yaml_path)
if ret is False:
# we do not want to save the config file and call
# _save_teardown
return
# Save the configuration file for the cluster
if self.mode == "sequana" and self.sequana_factory.clusterconfigfile:
target = os.sep.join((self.working_dir, "cluster_config.json"))
shutil.copy(self.sequana_factory.clusterconfigfile, target)
# replace the name of the original file with the target one so
# that the target can be edited. The target will also be used in
# place of the original version when launnching snakemake!
self.snakemake_dialog.ui.snakemake_options_cluster_cluster__config_value.set_filenames(target)
# Save the multiqc_config file if provided in sequana pipeline
if self.mode == "sequana" and self.sequana_factory.multiqcconfigfile:
target = self.working_dir + os.sep + "multiqc_config.yaml"
shutil.copy(self.sequana_factory.multiqcconfigfile, target)
else:
self.critical("Config file not saved (no wkdir)")
msg = WarningMessage("You must set a working directory", self)
msg.exec_()
self.switch_off()
return
self._save_teardown()
def _save_teardown(self):
# Finally, save project and update footer run button
self.factory._copy_snakefile(self.force)
self.debug("Switching RUN and DAG button on")
self.ui.run_btn.setEnabled(True)
self.ui.dag_btn.setEnabled(True)
def _check_and_save_config(self, cfg, yaml_path):
# Here we save the config.yaml file when changed
# However, before that if there is a schema, we can
# use it. This is the case for some sequana pipelines
# return False if the config is invalid and do not save it
if self.mode == "sequana" and self.sequana_factory.schemafile is None:
self.warning("No Schema found to validate the config file")
if self.mode == "sequana" and self.sequana_factory.schemafile:
schemafile = self.sequana_factory.schemafile
elif self.mode == "generic" and self.generic_factory.schemafile:
schemafile = self.generic_factory.schemafile
else:
schemafile = None
if schemafile:
# check that the config file is correct before saving it
# only if we have a schema_config file.
self.info("Checking config file with provided schema file.")
# We save the config as a dummy temporary file to check it
# if correct, we then save the file. If not, we provide an help
# message
from easydev import TempFile
with TempFile(suffix=".yaml") as fout:
# save a temporary version
cfg.save(fout.name)
import ruamel
import warnings
from pykwalify.core import Core
# causes issue with ruamel.yaml 0.12.13. Works for 0.15
try:
warnings.simplefilter("ignore", ruamel.yaml.error.UnsafeLoaderWarning)
except:
pass
try:
# open the config and the schema file
c = Core(source_file=fout.name, schema_files=[schemafile])
except Exception as err:
print(err)
return False
try:
c.validate()
except Exception as err:
print(err)
error_msg = "<b>CRITICAL: INVALID CONFIGURATION FILE</b>\n"
error_msg += "<pre>" + str(err) + "</pre>"
self.critical(error_msg)
self.switch_off()
msg = WarningMessage(error_msg, self)
msg.exec_()
return False
cfg.save(yaml_path)
def switch_off(self):
self.debug("Switching RUN and DAG button off")
self.ui.run_btn.setEnabled(False)
self.ui.dag_btn.setEnabled(False)
def _reset_schema(self):
self.schemafile = None
# -----------------------------------------------------------------------
# SAVE LOG in a files
# -----------------------------------------------------------------------
def report_issues(self, filename="issue_debug.txt"):
# save shell + shell_error in working directory as well as snakemake and
# config file.
with open(filename, "w") as fh:
fh.write("\nsequanix logger ----------------------------------\n")
try:
file_logger = self.save_logger()
with open(file_logger, "r") as fin:
fh.write(fin.read())
except:
pass
fh.write("\nsequanix shell ----------------------------------\n")
try:
fh.writelines(self.shell)
except:
fh.write("No shell info")
fh.write("\nsequanix shell error ------------------------------\n")
try:
fh.writelines(self.shell_error)
except:
fh.write("No shell error info")
url = "https://github.com/sequana/sequana/issues "
print("Created a file called {} to be posted on {}.".format(filename, url))
self.init_logger()
# -----------------------------------------------------------------------
# UNLOCK footer button
# -----------------------------------------------------------------------
def unlock_snakemake(self):
if self.working_dir is None or self.snakefile is None:
self.warning("working directory or snakefile not set")
return
# FIXME this does not work as expected
self.ui.run_btn.setEnabled(False)
if os.path.exists(self.snakefile) is False:
self.warning("snakefile not found. should not happen")
return
self.cmd = ["snakemake", "-s", self.snakefile, "--unlock"]
self.info("Running " + " ".join(self.cmd))
self.info("Please wait a second. Unlocking working directory")
# focus on tab with snakemake output
self.ui.tabs.setCurrentIndex(0)
self.ui.tabs_pipeline.setEnabled(False)
try:
snakemake_proc = sp.Popen(self.cmd, cwd=self.working_dir)
snakemake_proc.wait()
except:
self.critical("Issue while unlocking the directory")
finally:
self.ui.tabs_pipeline.setEnabled(True)
self.info("unlocking done")
self.output.append('<font style="color:brown">Unlocking working directory</font>')
self.ui.run_btn.setEnabled(True)
self.ui.stop_btn.setEnabled(False)
# -----------------------------------------------------------------------
# DAG footer button
# -----------------------------------------------------------------------
def show_dag(self): # pragma: no cover
try:
# This command should work on various platform, just in case
# we add a try/except
if easydev.cmd_exists("dot") is False:
msg = "**dot** command not found. Use 'conda install graphviz' to install it."
self.warning(msg)
msg = WarningMessage((msg))
msg.exec_()
return
except:
pass
finally:
self.info("Creating DAG image.")
if self.snakefile is None:
self.warning("No snakefile")
return
# We just need the basename because we will run it in the wkdir
snakefile = os.path.basename(self.snakefile)
snakemake_line = ["snakemake", "-s", snakefile]
snakemake_line += ["--rulegraph"]
if self.mode == "generic" and self.configfile:
# make sure to copy the config file
snakemake_line += ["--configfile"]
snakemake_line += [os.path.basename(self.generic_factory.configfile)]
snakemake_line += self.get_until_starting_option()
# Where to save the SVG (temp directory)
svg_filename = self._tempdir.path() + os.sep + "test.svg"
self.info(snakemake_line)
self.process1.setWorkingDirectory(self.working_dir)
self.process1.setStandardOutputProcess(self.process2)
self.process1.start("snakemake", snakemake_line[1:])
self.process2.start("dot", ["-Tsvg", "-o", svg_filename])
self.process1.waitForFinished(50000)
self.process2.waitForFinished(50000)
if os.path.exists(svg_filename):
self.diag = SVGDialog(svg_filename)
self.diag.show()
else:
msg = "Could not create the DAG file."
error = str(self.process1.readAllStandardError())
msg = CriticalMessage(msg, error)
msg.exec_()
return
def open_report(self):
pref = self.preferences_dialog.ui
filename = pref.preferences_options_general_htmlpage_value.text()
if filename == "":
filename = QW.QFileDialog.getOpenFileNames(
self, "Select your HTML report", self.working_dir, "HTML files (*.html)"
)[0]
if len(filename) and os.path.exists(filename[0]):
filename = filename[0]
else:
self.warning("No valid HTML selected and none specified in the preferences.")
return
else: # we have a filename hardcoded in the preferences
if self.working_dir is None:
self.error("Working directory not set yet")
return
filename = self.working_dir + os.sep + filename
if os.path.exists(filename) is False:
self.error("%s page does not exist. Check the preferences dialog." % filename)
return
else:
self.info("Reading and openning %s" % filename)
url = "file://" + filename
# The browser executable itself
self.browser = Browser(url)
self.browser.show()
def create_form_dict(self, layout):
def _cleaner(value):
# This is to save the YAML file correctly since the widgets tend to
# convert None and empty strings as '""' or "''"
if value in ["None", None, "", '""', "''"]:
return None
else:
# this tries to convert to a list #issue #515
try:
return eval(value)
except:
return value
widgets = (layout.itemAt(i).widget() for i in range(layout.count()))
form_dict = {
w.get_name(): _cleaner(w.get_value()) if w.is_option() else self.create_form_dict(w.get_layout())
for w in widgets
}
return form_dict
def clear_form(self):
self.clear_layout(self.form)
def eventFilter(self, source, event):
"""Inactivate wheel event of combobox"""
if event.type() == QtCore.QEvent.Wheel and source is self.ui.choice_button:
return True
return False
# ---------------------------------------------------
# settings and close
# ---------------------------------------------------
def read_settings(self):
self.info("Reading settings")
settings = QtCore.QSettings("sequana_gui", "mainapp")
if settings.value("tab_position") is not None:
index = settings.value("tab_position")
self.ui.tabs_pipeline.setCurrentIndex(int(index))
if settings.value("tab_generic_position") is not None:
index = settings.value("tab_generic_position")
self.ui.tabs_generic.setCurrentIndex(int(index))
if settings.value("tab_sequana_position") is not None:
index = settings.value("tab_sequana_position")
self.ui.tabs_sequana.setCurrentIndex(int(index))
if settings.value("tab_sequana_input_position") is not None:
index = settings.value("tab_sequana_input_position")
self.ui.tabWidget.setCurrentIndex(int(index))
def write_settings(self):
settings = QtCore.QSettings("sequana_gui", "mainapp")
# tab snakemake output/logger/ipython
index = self.ui.tabs_pipeline.currentIndex()
settings.setValue("tab_position", index)
index = self.ui.tabs_generic.currentIndex()
settings.setValue("tab_generic_position", index)
index = self.ui.tabs_sequana.currentIndex()
settings.setValue("tab_sequana_position", index)
index = self.ui.tabWidget.currentIndex()
settings.setValue("tab_sequana_input_position", index)
def _close(self):
self.write_settings()
# end any process running that may be running
self.click_stop()
self._tempdir.remove()
try:
self.browser.close()
except:
pass
def closeEvent(self, event):
# Close button (red X)
self._close()
def close(self):
# Menu or ctrl+q
self._close()
super().close()
class Options(argparse.ArgumentParser):
def __init__(self, prog="sequana_gui"):
usage = """Sequanix (part of Sequana project) is a GUI for running Snakefiles
For Sequana project, you can pre-filled sections as follows:
sequanix -p quality_control -w analysis -i .
to prefill the quality_control pipeline to used the local directory to
search for input files (fastq.gz) and run the analysis in the working
directory "analysis"
For Generic snakefiles:
sequanix -s SNAKEFILE -c CONFIGFILE -w analysis
will run the snakefile (with its config file) into a working directory.
"""
description = """"""
super(Options, self).__init__(
usage=usage, prog=prog, description=description, formatter_class=easydev.SmartFormatter
)
group = self.add_argument_group("GENERAL")
group.add_argument("-w", "--working-directory", dest="wkdir", help="Set working directory", default=None)
group.add_argument("-n", "--no-splash", dest="nosplash", action="store_true", help="No splash screen")
group = self.add_argument_group("SEQUANA")
group.add_argument("-p", "--pipeline", dest="pipeline", default=None, help="A valid sequana pipeline name")
group_mut = group.add_mutually_exclusive_group()
group_mut.add_argument(
"-i",
"--input-directory",
dest="input_directory",
default=None,
help="input directory where to find the input data",
)
group_mut.add_argument("-f", "--input-files", dest="input_files", default=None, nargs="*", help="input files")
group.add_argument(
"-C",
"--replace-configfile",
dest="sequana_configfile",
default=None,
help="Replace default sequana config file with local configfile",
)
group = self.add_argument_group("GENERIC PIPELINES")
group.add_argument("-s", "--snakefile", dest="snakefile", default=None, help="A valid Snakefile")
group.add_argument(
"-c",
"--configfile",
dest="configfile",
default=None,
help="optional config file to be used by the Snakefile",
)
group.add_argument(
"-y", "--schema", dest="schemafile", default=None, help="optional schema file to check the config file"
)
def main(args=None): # pragma: no cover
if args is None:
args = sys.argv[:]
user_options = Options()
options = user_options.parse_args(args[1:])
signal.signal(signal.SIGINT, sigint_handler)
# QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts)
app = QW.QApplication(sys.argv)
filename = pkg_resources.resource_filename("sequanix", "media/drawing.png")
if options.nosplash:
app.processEvents()
sequanix_gui = SequanixGUI(user_options=options)
sequanix_gui.show()
else:
# Show the splash screen for a few seconds
splash_pix = QtGui.QPixmap(filename)
splash = QW.QSplashScreen(splash_pix, Qt.WindowStaysOnTopHint)
splash.setMask(splash_pix.mask())
splash.show()
for i in range(0, 100):
t = time.time()
while time.time() < t + 0.5 / 100.0:
app.processEvents()
app.processEvents()
sequanix_gui = SequanixGUI(user_options=options)
sequanix_gui.show()
splash.finish(sequanix_gui)
# Make sure the main window is the active one
sequanix_gui.raise_()
sequanix_gui.activateWindow()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
the-stack_0_16529 | from __future__ import print_function
import numpy as np
import nlcpy as vp
import numba
from math import *
import time
# target libraries
nb = 'numba'
vp_naive = 'nlcpy_naive'
vp_sca = 'nlcpy_sca'
@numba.stencil
def numba_kernel_1(din):
return (din[0, 0, -1] +
din[0, 0, 0] +
din[0, 0, 1] +
din[0, -1, 0] +
din[0, 1, 0]
)
@numba.stencil
def numba_kernel_2(din):
return (din[0, 0, -2] +
din[0, 0, -1] +
din[0, 0, 0] +
din[0, 0, 1] +
din[0, 0, 2] +
din[0, -2, 0] +
din[0, -1, 0] +
din[0, 2, 0] +
din[0, 1, 0]
)
@numba.stencil
def numba_kernel_3(din):
return (din[0, 0, -3] +
din[0, 0, -2] +
din[0, 0, -1] +
din[0, 0, 0] +
din[0, 0, 1] +
din[0, 0, 2] +
din[0, 0, 3] +
din[0, -3, 0] +
din[0, -2, 0] +
din[0, -1, 0] +
din[0, 3, 0] +
din[0, 2, 0] +
din[0, 1, 0]
)
@numba.stencil
def numba_kernel_4(din):
return (din[0, 0, -4] +
din[0, 0, -3] +
din[0, 0, -2] +
din[0, 0, -1] +
din[0, 0, 0] +
din[0, 0, 1] +
din[0, 0, 2] +
din[0, 0, 3] +
din[0, 0, 4] +
din[0, -4, 0] +
din[0, -3, 0] +
din[0, -2, 0] +
din[0, -1, 0] +
din[0, 4, 0] +
din[0, 3, 0] +
din[0, 2, 0] +
din[0, 1, 0]
)
@numba.njit
def numba_launcher(din, dout, N, I=1):
for _ in range(I):
if N == 1:
numba_kernel_1(din, out=dout)
elif N == 2:
numba_kernel_2(din, out=dout)
elif N == 3:
numba_kernel_3(din, out=dout)
elif N == 4:
numba_kernel_4(din, out=dout)
def numba_impl(din, dout, N, I=1):
# warmup
numba_launcher(din, dout, N, I=1)
s = time.time()
numba_launcher(din, dout, N, I=I)
e = time.time()
return e - s
def nlcpy_naive_impl(din, dout, N, I=1):
loc_x = [i for i in range(-N, N+1)]
loc_y = [i for i in range(-N, N+1)]
vp.request.flush()
s = time.time()
for _ in range(I):
dout_v = dout[:, N:-N, N:-N]
dout_v[...] = 0
for lx in loc_x:
for ly in loc_y:
if lx != 0 and ly != 0:
continue
dout_v += din[:, N+ly:din.shape[-2]-N+ly, N+lx:din.shape[-1]-N+lx]
vp.request.flush()
e = time.time()
return e - s
def nlcpy_sca_impl(din, dout, N, I=1):
loc_x = [i for i in range(-N, N+1)]
loc_y = [i for i in range(-N, N+1)]
sin, sout = vp.sca.create_descriptor((din, dout))
d = vp.sca.empty_description()
for lx in loc_x:
for ly in loc_y:
if lx != 0 and ly != 0:
continue
d += sin[0, ly, lx]
kern = vp.sca.create_kernel(d, sout[0, 0, 0])
vp.request.flush()
s = time.time()
for _ in range(I):
kern.execute()
vp.request.flush()
e = time.time()
return e - s
def stencil_xya(din, dout, N, I=1, xp=np, lib=nb):
if lib is nb:
rt = numba_impl(din, dout, N, I)
if lib is vp_naive:
rt = nlcpy_naive_impl(din, dout, N, I)
if lib is vp_sca:
rt = nlcpy_sca_impl(din, dout, N, I)
return rt
|
the-stack_0_16531 | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various command line arguments and configuration file parameters."""
import os
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import get_datadir_path
class ConfArgsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
self.stop_node(0)
# Remove the -datadir argument so it doesn't override the config file
self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")]
default_data_dir = get_datadir_path(self.options.tmpdir, 0)
new_data_dir = os.path.join(default_data_dir, 'newdatadir')
new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2')
# Check that using -datadir argument on non-existent directory fails
self.nodes[0].datadir = new_data_dir
self.assert_start_raises_init_error(0, ['-datadir='+new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.')
# Check that using non-existent datadir in conf file fails
conf_file = os.path.join(default_data_dir, "biblepay.conf")
# datadir needs to be set before [regtest] section
conf_file_contents = open(conf_file, encoding='utf8').read()
with open(conf_file, 'w', encoding='utf8') as f:
f.write("datadir=" + new_data_dir + "\n")
f.write(conf_file_contents)
self.assert_start_raises_init_error(0, ['-conf='+conf_file], 'Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.')
# Create the directory and ensure the config file now works
os.mkdir(new_data_dir)
self.start_node(0, ['-conf='+conf_file, '-wallet=w1'])
self.stop_node(0)
assert os.path.exists(os.path.join(new_data_dir, 'regtest', 'wallets', 'w1'))
# Ensure command line argument overrides datadir in conf
os.mkdir(new_data_dir_2)
self.nodes[0].datadir = new_data_dir_2
self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file, '-wallet=w2'])
assert os.path.exists(os.path.join(new_data_dir_2, 'regtest', 'wallets', 'w2'))
if __name__ == '__main__':
ConfArgsTest().main()
|
the-stack_0_16532 | from socket import *
serverName = '127.0.0.1'
serverPort = 12000
clientSocket = socket(AF_INET, SOCK_DGRAM)
message = raw_input("Input lower case senrence:")
clientSocket.sendto(message.encode(), (serverName, serverPort))
modifiedMessage, serverAddress = clientSocket.recvfrom(2048)
print(modifiedMessage.decode())
clientSocket.close()
|
the-stack_0_16534 | # This config does not work with the version of DD4hep that uses Geant4 units. This config performs a comparison
# with a reference geometry which might use the ROOT units convention. This mismatch somehow triggers a ROOT exception.
# We don't currently have a fix for this problem.
import FWCore.ParameterSet.Config as cms
process = cms.Process('VALID')
process.source = cms.Source('EmptySource')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.load('Configuration.StandardSequences.DD4hep_GeometrySim_cff')
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.load("Geometry.MuonNumbering.muonGeometryConstants_cff")
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('myLog'),
myLog = cms.untracked.PSet(
threshold = cms.untracked.string('INFO'),
)
)
process.DTGeometryESProducer = cms.ESProducer("DTGeometryESProducer",
DDDetector = cms.ESInputTag('',''),
appendToDataLabel = cms.string(''),
applyAlignment = cms.bool(False),
alignmentsLabel = cms.string(''),
attribute = cms.string('MuStructure'),
value = cms.string('MuonBarrelDT'),
fromDDD = cms.bool(True)
)
process.DDCompactViewESProducer = cms.ESProducer("DDCompactViewESProducer",
appendToDataLabel = cms.string('')
)
process.DDSpecParRegistryESProducer = cms.ESProducer("DDSpecParRegistryESProducer",
appendToDataLabel = cms.string('')
)
process.muonGeometryConstants.fromDD4Hep = True
process.valid = cms.EDAnalyzer("DTGeometryValidate",
infileName = cms.untracked.string('Geometry/DTGeometryBuilder/data/cmsRecoGeom-2021.root'),
outfileName = cms.untracked.string('validateDTGeometry.root'),
tolerance = cms.untracked.int32(7)
)
process.p = cms.Path(process.valid)
|
the-stack_0_16535 | """empty message
Revision ID: 09b6565cf4e7
Revises: 1aae34526a4a
Create Date: 2018-02-12 12:21:05.984927
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '09b6565cf4e7'
down_revision = '1aae34526a4a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('signup_date', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'signup_date')
# ### end Alembic commands ###
|
the-stack_0_16537 | "This is the locale selecting middleware that will look at accept headers"
from django.conf import settings
from django.core.urlresolvers import (
LocaleRegexURLResolver, get_resolver, get_script_prefix, is_valid_path,
)
from django.http import HttpResponseRedirect
from django.utils import translation
from django.utils.cache import patch_vary_headers
from django.utils.functional import cached_property
class LocaleMiddleware(object):
"""
This is a very simple middleware that parses a request
and decides what translation object to install in the current
thread context. This allows pages to be dynamically
translated to the language the user desires (if the language
is available, of course).
"""
response_redirect_class = HttpResponseRedirect
def process_request(self, request):
language = translation.get_language_from_request(
request, check_path=self.is_language_prefix_patterns_used)
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
language = translation.get_language()
language_from_path = translation.get_language_from_path(request.path_info)
if (response.status_code == 404 and not language_from_path
and self.is_language_prefix_patterns_used):
urlconf = getattr(request, 'urlconf', None)
language_path = '/%s%s' % (language, request.path_info)
path_valid = is_valid_path(language_path, urlconf)
if (not path_valid and settings.APPEND_SLASH
and not language_path.endswith('/')):
path_valid = is_valid_path("%s/" % language_path, urlconf)
if path_valid:
script_prefix = get_script_prefix()
# Insert language after the script prefix and before the
# rest of the URL
language_url = request.get_full_path().replace(
script_prefix,
'%s%s/' % (script_prefix, language),
1
)
return self.response_redirect_class(language_url)
if not (self.is_language_prefix_patterns_used
and language_from_path):
patch_vary_headers(response, ('Accept-Language',))
if 'Content-Language' not in response:
response['Content-Language'] = language
return response
@cached_property
def is_language_prefix_patterns_used(self):
"""
Returns `True` if the `LocaleRegexURLResolver` is used
at root level of the urlpatterns, else it returns `False`.
"""
for url_pattern in get_resolver(None).url_patterns:
if isinstance(url_pattern, LocaleRegexURLResolver):
return True
return False
|
the-stack_0_16538 | # coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2018
from __future__ import print_function
from future.builtins import *
import sys
import sysconfig
import os
import argparse
import streamsx.rest
def _stop(sas, cmd_args):
"""Stop the service if no jobs are running unless force is set"""
if not cmd_args.force:
status = sas.get_instance_status()
jobs = int(status['job_count'])
if jobs:
return status
return sas.stop_instance()
def run_cmd(args=None):
cmd_args = _parse_args(args)
sc = streamsx.rest.StreamingAnalyticsConnection(service_name=cmd_args.service_name)
sas = sc.get_streaming_analytics()
if cmd_args.subcmd == 'start':
result = sc.get_streaming_analytics().start_instance()
elif cmd_args.subcmd == 'stop':
result = _stop(sas, cmd_args)
elif cmd_args.subcmd == 'status':
result = sc.get_streaming_analytics().get_instance_status()
if not cmd_args.full_response:
return {k: result[k] for k in ('state', 'status', 'job_count')}
return result
def main(args=None):
""" Performs an action against a Streaming Analytics service.
"""
try:
sr = run_cmd(args)
sr['return_code'] = 0
except:
sr = {'return_code':1, 'error': sys.exc_info()}
return sr
def _parse_args(args):
""" Argument parsing
"""
cmd_parser = argparse.ArgumentParser(description='Control commands for a Streaming Analytics service.')
cmd_parser.add_argument('--service-name', help='Streaming Analytics service name')
cmd_parser.add_argument('--full-response', action='store_true', help='Print the full JSON response.')
subparsers = cmd_parser.add_subparsers(help='Supported commands', dest='subcmd')
parser_start = subparsers.add_parser('start', help='Start the service instance')
parser_status = subparsers.add_parser('status', help='Get the service status.')
parser_stop = subparsers.add_parser('stop', help='Stop the instance for the service.')
parser_stop.add_argument('--force', action='store_true', help='Stop the service even if jobs are running.')
return cmd_parser.parse_args(args)
if __name__ == '__main__':
sr = main()
rc = sr['return_code']
del sr['return_code']
if rc == 0:
print(sr)
else:
print(sr['error'][1], file=sys.stderr)
sys.exit(rc)
|
the-stack_0_16539 | # -*- coding: utf-8 -*-
# Third party imports
import pytest
# Local application imports
from mosqito.sq_metrics import loudness_zwtv
from mosqito.utils import load
from validations.sq_metrics.loudness_zwtv.validation_loudness_zwtv import (
_check_compliance,
)
@pytest.mark.loudness_zwtv # to skip or run only loudness zwicker time-varying tests
def test_loudness_zwtv():
"""Test function for the script loudness_zwicker_time
Test function for the script loudness_zwtv with
.wav file as input. The input file is provided by ISO 532-1 annex
B4 and B5, the compliance is assessed according to section 6.1 of the
standard. One .png compliance plot is generated.
Parameters
----------
None
Outputs
-------
None
"""
# Test signal as input for time-varying loudness
# (from ISO 532-1 annex B4)
signal = {
"data_file": "tests/input/Test signal 10 (tone pulse 1 kHz 10 ms 70 dB).wav",
"xls": "tests/input/Results and tests for synthetic signals (time varying loudness).xlsx",
"tab": "Test signal 10",
"N_specif_bark": 8.5,
"field": "free",
}
# Load signal and compute third octave band spectrum
sig, fs = load(signal["data_file"], wav_calib=2 * 2 ** 0.5)
# Compute Loudness
N, N_spec, bark_axis, time_axis = loudness_zwtv(sig, fs, signal["field"])
loudness = {
"name": "Loudness",
"values": N,
"specific values": N_spec,
"freqs": bark_axis,
}
# Check axis dimensions
assert len(N) == len(time_axis)
assert N_spec.shape[1] == len(time_axis)
assert N_spec.shape[0] == len(bark_axis)
# Check ISO 532-1 compliance
assert _check_compliance(loudness, signal, "./tests/output/")
# test de la fonction
if __name__ == "__main__":
test_loudness_zwtv()
|
the-stack_0_16541 | """
sentry.buffer.redis
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
from time import time
from binascii import crc32
from datetime import datetime
from django.db import models
from django.utils import timezone
from django.utils.encoding import force_bytes
from sentry.buffer import Buffer
from sentry.exceptions import InvalidConfiguration
from sentry.tasks.process_buffer import process_incr, process_pending
from sentry.utils import json, metrics
from sentry.utils.compat import pickle
from sentry.utils.hashlib import md5_text
from sentry.utils.imports import import_string
from sentry.utils.redis import get_cluster_from_options
class PendingBuffer(object):
def __init__(self, size):
assert size > 0
self.buffer = [None] * size
self.size = size
self.pointer = 0
def full(self):
return self.pointer == self.size
def empty(self):
return self.pointer == 0
def append(self, item):
assert not self.full()
self.buffer[self.pointer] = item
self.pointer += 1
def clear(self):
self.pointer = 0
def flush(self):
rv = self.buffer[:self.pointer]
self.clear()
return rv
class RedisBuffer(Buffer):
key_expire = 60 * 60 # 1 hour
pending_key = 'b:p'
def __init__(self, pending_partitions=1, incr_batch_size=2, **options):
self.cluster, options = get_cluster_from_options('SENTRY_BUFFER_OPTIONS', options)
self.pending_partitions = pending_partitions
self.incr_batch_size = incr_batch_size
assert self.pending_partitions > 0
assert self.incr_batch_size > 0
def validate(self):
try:
with self.cluster.all() as client:
client.ping()
except Exception as e:
raise InvalidConfiguration(six.text_type(e))
def _coerce_val(self, value):
if isinstance(value, models.Model):
value = value.pk
return force_bytes(value, errors='replace')
def _make_key(self, model, filters):
"""
Returns a Redis-compatible key for the model given filters.
"""
return 'b:k:%s:%s' % (
model._meta, md5_text(
'&'.
join('%s=%s' % (k, self._coerce_val(v)) for k, v in sorted(six.iteritems(filters)))
).hexdigest(),
)
def _make_pending_key(self, partition=None):
"""
Returns the key to be used for the pending buffer.
When partitioning is enabled, there is a key for each
partition, without it, there's only the default pending_key
"""
if partition is None:
return self.pending_key
assert partition >= 0
return '%s:%d' % (self.pending_key, partition)
def _make_pending_key_from_key(self, key):
"""
Return the pending_key for a given key. This is used
to route a key into the correct pending buffer. If partitioning
is disabled, route into the no partition buffer.
"""
if self.pending_partitions == 1:
return self.pending_key
return self._make_pending_key(crc32(key) % self.pending_partitions)
def _make_lock_key(self, key):
return 'l:%s' % (key, )
def _dump_values(self, values):
result = {}
for k, v in six.iteritems(values):
result[k] = self._dump_value(v)
return result
def _dump_value(self, value):
if isinstance(value, six.string_types):
type_ = 's'
elif isinstance(value, datetime):
type_ = 'd'
value = value.strftime('%s.%f')
elif isinstance(value, int):
type_ = 'i'
elif isinstance(value, float):
type_ = 'f'
else:
raise TypeError(type(value))
return (type_, six.text_type(value))
def _load_values(self, payload):
result = {}
for k, (t, v) in six.iteritems(payload):
result[k] = self._load_value((t, v))
return result
def _load_value(self, payload):
(type_, value) = payload
if type_ == 's':
return value
elif type_ == 'd':
return datetime.fromtimestamp(float(value)).replace(
tzinfo=timezone.utc
)
elif type_ == 'i':
return int(value)
elif type_ == 'f':
return float(value)
else:
raise TypeError('invalid type: {}'.format(type_))
def incr(self, model, columns, filters, extra=None):
"""
Increment the key by doing the following:
- Insert/update a hashmap based on (model, columns)
- Perform an incrby on counters
- Perform a set (last write wins) on extra
- Add hashmap key to pending flushes
"""
# TODO(dcramer): longer term we'd rather not have to serialize values
# here (unless it's to JSON)
key = self._make_key(model, filters)
pending_key = self._make_pending_key_from_key(key)
# We can't use conn.map() due to wanting to support multiple pending
# keys (one per Redis partition)
conn = self.cluster.get_local_client_for_key(key)
pipe = conn.pipeline()
pipe.hsetnx(key, 'm', '%s.%s' % (model.__module__, model.__name__))
# TODO(dcramer): once this goes live in production, we can kill the pickle path
# (this is to ensure a zero downtime deploy where we can transition event processing)
pipe.hsetnx(key, 'f', pickle.dumps(filters))
# pipe.hsetnx(key, 'f', json.dumps(self._dump_values(filters)))
for column, amount in six.iteritems(columns):
pipe.hincrby(key, 'i+' + column, amount)
if extra:
# Group tries to serialize 'score', so we'd need some kind of processing
# hook here
# e.g. "update score if last_seen or times_seen is changed"
for column, value in six.iteritems(extra):
# TODO(dcramer): once this goes live in production, we can kill the pickle path
# (this is to ensure a zero downtime deploy where we can transition event processing)
pipe.hset(key, 'e+' + column, pickle.dumps(value))
# pipe.hset(key, 'e+' + column, json.dumps(self._dump_value(value)))
pipe.expire(key, self.key_expire)
pipe.zadd(pending_key, time(), key)
pipe.execute()
metrics.incr('buffer.incr', skip_internal=True, tags={
'module': model.__module__,
'model': model.__name__,
})
def process_pending(self, partition=None):
if partition is None and self.pending_partitions > 1:
# If we're using partitions, this one task fans out into
# N subtasks instead.
for i in range(self.pending_partitions):
process_pending.apply_async(kwargs={'partition': i})
# Explicitly also run over the unpartitioned buffer as well
# to ease in transition. In practice, this should just be
# super fast and is fine to do redundantly.
pending_key = self._make_pending_key(partition)
client = self.cluster.get_routing_client()
lock_key = self._make_lock_key(pending_key)
# prevent a stampede due to celerybeat + periodic task
if not client.set(lock_key, '1', nx=True, ex=60):
return
pending_buffer = PendingBuffer(self.incr_batch_size)
try:
keycount = 0
with self.cluster.all() as conn:
results = conn.zrange(pending_key, 0, -1)
with self.cluster.all() as conn:
for host_id, keys in six.iteritems(results.value):
if not keys:
continue
keycount += len(keys)
for key in keys:
pending_buffer.append(key)
if pending_buffer.full():
process_incr.apply_async(
kwargs={
'batch_keys': pending_buffer.flush(),
}
)
conn.target([host_id]).zrem(pending_key, *keys)
# queue up remainder of pending keys
if not pending_buffer.empty():
process_incr.apply_async(kwargs={
'batch_keys': pending_buffer.flush(),
})
metrics.timing('buffer.pending-size', keycount)
finally:
client.delete(lock_key)
def process(self, key=None, batch_keys=None):
assert not (key is None and batch_keys is None)
assert not (key is not None and batch_keys is not None)
if key is not None:
batch_keys = [key]
for key in batch_keys:
self._process_single_incr(key)
def _process_single_incr(self, key):
client = self.cluster.get_routing_client()
lock_key = self._make_lock_key(key)
# prevent a stampede due to the way we use celery etas + duplicate
# tasks
if not client.set(lock_key, '1', nx=True, ex=10):
metrics.incr('buffer.revoked', tags={'reason': 'locked'}, skip_internal=False)
self.logger.debug('buffer.revoked.locked', extra={'redis_key': key})
return
pending_key = self._make_pending_key_from_key(key)
try:
conn = self.cluster.get_local_client_for_key(key)
pipe = conn.pipeline()
pipe.hgetall(key)
pipe.zrem(pending_key, key)
pipe.delete(key)
values = pipe.execute()[0]
if not values:
metrics.incr('buffer.revoked', tags={'reason': 'empty'}, skip_internal=False)
self.logger.debug('buffer.revoked.empty', extra={'redis_key': key})
return
model = import_string(values.pop('m'))
if values['f'].startswith('{'):
filters = self._load_values(json.loads(values.pop('f')))
else:
# TODO(dcramer): legacy pickle support - remove in Sentry 9.1
filters = pickle.loads(values.pop('f'))
incr_values = {}
extra_values = {}
for k, v in six.iteritems(values):
if k.startswith('i+'):
incr_values[k[2:]] = int(v)
elif k.startswith('e+'):
if v.startswith('['):
extra_values[k[2:]] = self._load_value(json.loads(v))
else:
# TODO(dcramer): legacy pickle support - remove in Sentry 9.1
extra_values[k[2:]] = pickle.loads(v)
super(RedisBuffer, self).process(model, incr_values, filters, extra_values)
finally:
client.delete(lock_key)
|
the-stack_0_16543 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
_DETECTRON_OPS_LIB = 'libcaffe2_detectron_ops_gpu.so'
_CMAKE_INSTALL_PREFIX = '/usr/local'
HIGHEST_BACKBONE_LVL = 5
LOWEST_BACKBONE_LVL = 2
import argparse
import cv2
# import glob
import copy
import logging
import os
import sys
import six
import time
import importlib
import pprint
import contextlib
import re
import scipy.sparse
import collections
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
# sys.path.append('/root/pmt/thirdparty/densepose/np') # path to where detectron
# bash this with model missing in 'filename' and add the path to sys
# find / -type f -iname "filename*"
# sys.path.append('path/to/where/cafffe2/is')
from caffe2.python import workspace
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import dyndep
from caffe2.python import scope
from caffe2.python import cnn
from caffe2.python import muji
from caffe2.python.modeling import initializers
from caffe2.python.modeling.parameter_info import ParameterTags
from pycocotools import mask as COCOmask
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
from .assets.config import assert_and_infer_cfg
from .assets.config import cfg
from .assets.config import merge_cfg_from_file
from .assets.config import load_cfg
import cython_bbox as cython_bbox
import cython_nms as cython_nms
from collections import defaultdict
from collections import OrderedDict
from six import string_types
from six.moves import cPickle as pickle
from six.moves import urllib
from glob import glob
from scipy.io import loadmat
from matplotlib.patches import Polygon
box_utils_bbox_overlaps = cython_bbox.bbox_overlaps
bbox_overlaps = cython_bbox.bbox_overlaps
logger = logging.getLogger(__name__)
FpnLevelInfo = collections.namedtuple(
'FpnLevelInfo',
['blobs', 'dims', 'spatial_scales']
)
def _progress_bar(count, total):
"""Report download progress.
Credit:
https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console/27871113
"""
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write(
' [{}] {}% of {:.1f}MB file \r'.
format(bar, percents, total / 1024 / 1024)
)
sys.stdout.flush()
if count >= total:
sys.stdout.write('\n')
def download_url(
url, dst_file_path, chunk_size=8192, progress_hook=_progress_bar
):
"""Download url and write it to dst_file_path.
Credit:
https://stackoverflow.com/questions/2028517/python-urllib2-progress-hook
"""
response = urllib.request.urlopen(url)
if six.PY2:
total_size = response.info().getheader('Content-Length').strip()
else:
total_size = response.info().get('Content-Length').strip()
total_size = int(total_size)
bytes_so_far = 0
with open(dst_file_path, 'wb') as f:
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
if progress_hook:
progress_hook(bytes_so_far, total_size)
f.write(chunk)
return bytes_so_far
def get_class_string(class_index, score, dataset):
class_text = dataset.classes[class_index] if dataset is not None else \
'id{:d}'.format(class_index)
return class_text + ' {:0.2f}'.format(score).lstrip('0')
def kp_connections(keypoints):
kp_lines = [
[keypoints.index('left_eye'), keypoints.index('right_eye')],
[keypoints.index('left_eye'), keypoints.index('nose')],
[keypoints.index('right_eye'), keypoints.index('nose')],
[keypoints.index('right_eye'), keypoints.index('right_ear')],
[keypoints.index('left_eye'), keypoints.index('left_ear')],
[keypoints.index('right_shoulder'), keypoints.index('right_elbow')],
[keypoints.index('right_elbow'), keypoints.index('right_wrist')],
[keypoints.index('left_shoulder'), keypoints.index('left_elbow')],
[keypoints.index('left_elbow'), keypoints.index('left_wrist')],
[keypoints.index('right_hip'), keypoints.index('right_knee')],
[keypoints.index('right_knee'), keypoints.index('right_ankle')],
[keypoints.index('left_hip'), keypoints.index('left_knee')],
[keypoints.index('left_knee'), keypoints.index('left_ankle')],
[keypoints.index('right_shoulder'), keypoints.index('left_shoulder')],
[keypoints.index('right_hip'), keypoints.index('left_hip')],
]
return kp_lines
def colormap(rgb=False):
color_list = np.array(
[
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.167, 0.000, 0.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.000, 0.000, 0.000,
0.143, 0.143, 0.143,
0.286, 0.286, 0.286,
0.429, 0.429, 0.429,
0.571, 0.571, 0.571,
0.714, 0.714, 0.714,
0.857, 0.857, 0.857,
1.000, 1.000, 1.000
]
).astype(np.float32)
color_list = color_list.reshape((-1, 3)) * 255
if not rgb:
color_list = color_list[:, ::-1]
return color_list
def keypoint_utils_get_keypoints():
"""Get the COCO keypoints and their left/right flip coorespondence map."""
# Keypoints are not available in the COCO json for the test split, so we
# provide them here.
keypoints = [
'nose',
'left_eye',
'right_eye',
'left_ear',
'right_ear',
'left_shoulder',
'right_shoulder',
'left_elbow',
'right_elbow',
'left_wrist',
'right_wrist',
'left_hip',
'right_hip',
'left_knee',
'right_knee',
'left_ankle',
'right_ankle'
]
keypoint_flip_map = {
'left_eye': 'right_eye',
'left_ear': 'right_ear',
'left_shoulder': 'right_shoulder',
'left_elbow': 'right_elbow',
'left_wrist': 'right_wrist',
'left_hip': 'right_hip',
'left_knee': 'right_knee',
'left_ankle': 'right_ankle'
}
return keypoints, keypoint_flip_map
def convert_from_cls_format(cls_boxes, cls_segms, cls_keyps):
"""Convert from the class boxes/segms/keyps format generated by the testing
code.
"""
box_list = [b for b in cls_boxes if len(b) > 0]
if len(box_list) > 0:
boxes = np.concatenate(box_list)
else:
boxes = None
if cls_segms is not None:
segms = [s for slist in cls_segms for s in slist]
else:
segms = None
if cls_keyps is not None:
keyps = [k for klist in cls_keyps for k in klist]
else:
keyps = None
classes = []
for j in range(len(cls_boxes)):
classes += [j] * len(cls_boxes[j])
return boxes, segms, keyps, classes
def vis_utils_vis_one_image(
im, boxes, segms=None, keypoints=None, body_uv=None, thresh=0.9,
kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False,
ext='pdf'):
"""Visual debugging of detections."""
if isinstance(boxes, list):
boxes, segms, keypoints, classes = convert_from_cls_format(
boxes, segms, keypoints)
if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
return
dataset_keypoints, _ = keypoint_utils_get_keypoints()
if segms is not None and len(segms) > 0:
masks = mask_util.decode(segms)
color_list = colormap(rgb=True) / 255
kp_lines = kp_connections(dataset_keypoints)
cmap = plt.get_cmap('rainbow')
colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
# Display in largest to smallest order to reduce occlusion
areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
sorted_inds = np.argsort(-areas)
IUV_fields = body_uv[1]
#
All_Coords = np.zeros(im.shape)
All_inds = np.zeros([im.shape[0],im.shape[1]])
K = 26
##
inds = np.argsort(boxes[:,4])
##
for i, ind in enumerate(inds):
entry = boxes[ind,:]
if entry[4] > 0.65:
entry=entry[0:4].astype(int)
####
output = IUV_fields[ind]
####
All_Coords_Old = All_Coords[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2],:]
All_Coords_Old[All_Coords_Old==0]=output.transpose([1,2,0])[All_Coords_Old==0]
All_Coords[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2],:]= All_Coords_Old
###
CurrentMask = (output[0,:,:]>0).astype(np.float32)
All_inds_old = All_inds[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2]]
All_inds_old[All_inds_old==0] = CurrentMask[All_inds_old==0]*i
All_inds[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2]] = All_inds_old
#
All_Coords[:,:,1:3] = 255. * All_Coords[:,:,1:3]
All_Coords[All_Coords>255] = 255.
All_Coords = All_Coords.astype(np.uint8)
return All_Coords
def envu_get_detectron_ops_lib():
"""Retrieve Detectron ops library."""
# Candidate prefixes for detectron ops lib path
prefixes = [_CMAKE_INSTALL_PREFIX, sys.prefix, sys.exec_prefix] + sys.path
# Candidate subdirs for detectron ops lib
subdirs = ['lib', 'torch/lib']
# Try to find detectron ops lib
for prefix in prefixes:
for subdir in subdirs:
ops_path = os.path.join(prefix, subdir, _DETECTRON_OPS_LIB)
if os.path.exists(ops_path):
#print('Found Detectron ops lib: {}'.format(ops_path))
return ops_path
raise Exception('Detectron ops lib not found')
def c2_utils_import_detectron_ops():
"""Import Detectron ops."""
detectron_ops_lib = envu_get_detectron_ops_lib()
dyndep.InitOpsLibrary(detectron_ops_lib)
def dummy_datasets_get_coco_dataset():
"""A dummy COCO dataset that includes only the 'classes' field."""
ds = AttrDict()
classes = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
ds.classes = {i: name for i, name in enumerate(classes)}
return ds
def im_detect_body_uv(model, im_scale, boxes):
"""Compute body uv predictions."""
M = cfg.BODY_UV_RCNN.HEATMAP_SIZE
P = cfg.BODY_UV_RCNN.NUM_PATCHES
if boxes.shape[0] == 0:
pred_body_uvs = np.zeros((0, P, M, M), np.float32)
return pred_body_uvs
inputs = {'body_uv_rois': _get_rois_blob(boxes, im_scale)}
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(inputs, 'body_uv_rois')
for k, v in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v)
workspace.RunNet(model.body_uv_net.Proto().name)
AnnIndex = workspace.FetchBlob(core.ScopedName('AnnIndex')).squeeze()
Index_UV = workspace.FetchBlob(core.ScopedName('Index_UV')).squeeze()
U_uv = workspace.FetchBlob(core.ScopedName('U_estimated')).squeeze()
V_uv = workspace.FetchBlob(core.ScopedName('V_estimated')).squeeze()
# In case of 1
if AnnIndex.ndim == 3:
AnnIndex = np.expand_dims(AnnIndex, axis=0)
if Index_UV.ndim == 3:
Index_UV = np.expand_dims(Index_UV, axis=0)
if U_uv.ndim == 3:
U_uv = np.expand_dims(U_uv, axis=0)
if V_uv.ndim == 3:
V_uv = np.expand_dims(V_uv, axis=0)
K = cfg.BODY_UV_RCNN.NUM_PATCHES + 1
outputs = []
for ind, entry in enumerate(boxes):
# Compute ref box width and height
bx = int(max(entry[2] - entry[0], 1))
by = int(max(entry[3] - entry[1], 1))
# preds[ind] axes are CHW; bring p axes to WHC
CurAnnIndex = np.swapaxes(AnnIndex[ind], 0, 2)
CurIndex_UV = np.swapaxes(Index_UV[ind], 0, 2)
CurU_uv = np.swapaxes(U_uv[ind], 0, 2)
CurV_uv = np.swapaxes(V_uv[ind], 0, 2)
# Resize p from (HEATMAP_SIZE, HEATMAP_SIZE, c) to (int(bx), int(by), c)
CurAnnIndex = cv2.resize(CurAnnIndex, (by, bx))
CurIndex_UV = cv2.resize(CurIndex_UV, (by, bx))
CurU_uv = cv2.resize(CurU_uv, (by, bx))
CurV_uv = cv2.resize(CurV_uv, (by, bx))
# Bring Cur_Preds axes back to CHW
CurAnnIndex = np.swapaxes(CurAnnIndex, 0, 2)
CurIndex_UV = np.swapaxes(CurIndex_UV, 0, 2)
CurU_uv = np.swapaxes(CurU_uv, 0, 2)
CurV_uv = np.swapaxes(CurV_uv, 0, 2)
# Removed squeeze calls due to singleton dimension issues
CurAnnIndex = np.argmax(CurAnnIndex, axis=0)
CurIndex_UV = np.argmax(CurIndex_UV, axis=0)
CurIndex_UV = CurIndex_UV * (CurAnnIndex>0).astype(np.float32)
output = np.zeros([3, int(by), int(bx)], dtype=np.float32)
output[0] = CurIndex_UV
for part_id in range(1, K):
CurrentU = CurU_uv[part_id]
CurrentV = CurV_uv[part_id]
output[1, CurIndex_UV==part_id] = CurrentU[CurIndex_UV==part_id]
output[2, CurIndex_UV==part_id] = CurrentV[CurIndex_UV==part_id]
outputs.append(output)
num_classes = cfg.MODEL.NUM_CLASSES
cls_bodys = [[] for _ in range(num_classes)]
person_idx = keypoint_utils_get_person_class_index()
cls_bodys[person_idx] = outputs
return cls_bodys
def compute_oks(src_keypoints, src_roi, dst_keypoints, dst_roi):
"""Compute OKS for predicted keypoints wrt gt_keypoints.
src_keypoints: 4xK
src_roi: 4x1
dst_keypoints: Nx4xK
dst_roi: Nx4
"""
sigmas = np.array([
.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87,
.87, .89, .89]) / 10.0
vars = (sigmas * 2)**2
# area
src_area = (src_roi[2] - src_roi[0] + 1) * (src_roi[3] - src_roi[1] + 1)
# measure the per-keypoint distance if keypoints visible
dx = dst_keypoints[:, 0, :] - src_keypoints[0, :]
dy = dst_keypoints[:, 1, :] - src_keypoints[1, :]
e = (dx**2 + dy**2) / vars / (src_area + np.spacing(1)) / 2
e = np.sum(np.exp(-e), axis=1) / e.shape[1]
return e
def keypoint_utils_nms_oks(kp_predictions, rois, thresh):
"""Nms based on kp predictions."""
scores = np.mean(kp_predictions[:, 2, :], axis=1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
ovr = compute_oks(
kp_predictions[i], rois[i], kp_predictions[order[1:]],
rois[order[1:]])
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def scores_to_probs(scores):
"""Transforms CxHxW of scores to probabilities spatially."""
channels = scores.shape[0]
for c in range(channels):
temp = scores[c, :, :]
max_score = temp.max()
temp = np.exp(temp - max_score) / np.sum(np.exp(temp - max_score))
scores[c, :, :] = temp
return scores
def keypoint_utils_heatmaps_to_keypoints(maps, rois):
"""Extract predicted keypoint locations from heatmaps. Output has shape
(#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob)
for each keypoint.
"""
# This function converts a discrete image coordinate in a HEATMAP_SIZE x
# HEATMAP_SIZE image to a continuous keypoint coordinate. We maintain
# consistency with keypoints_to_heatmap_labels by using the conversion from
# Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a
# continuous coordinate.
offset_x = rois[:, 0]
offset_y = rois[:, 1]
widths = rois[:, 2] - rois[:, 0]
heights = rois[:, 3] - rois[:, 1]
widths = np.maximum(widths, 1)
heights = np.maximum(heights, 1)
widths_ceil = np.ceil(widths)
heights_ceil = np.ceil(heights)
# NCHW to NHWC for use with OpenCV
maps = np.transpose(maps, [0, 2, 3, 1])
min_size = cfg.KRCNN.INFERENCE_MIN_SIZE
xy_preds = np.zeros(
(len(rois), 4, cfg.KRCNN.NUM_KEYPOINTS), dtype=np.float32)
for i in range(len(rois)):
if min_size > 0:
roi_map_width = int(np.maximum(widths_ceil[i], min_size))
roi_map_height = int(np.maximum(heights_ceil[i], min_size))
else:
roi_map_width = widths_ceil[i]
roi_map_height = heights_ceil[i]
width_correction = widths[i] / roi_map_width
height_correction = heights[i] / roi_map_height
roi_map = cv2.resize(
maps[i], (roi_map_width, roi_map_height),
interpolation=cv2.INTER_CUBIC)
# Bring back to CHW
roi_map = np.transpose(roi_map, [2, 0, 1])
roi_map_probs = scores_to_probs(roi_map.copy())
w = roi_map.shape[2]
for k in range(cfg.KRCNN.NUM_KEYPOINTS):
pos = roi_map[k, :, :].argmax()
x_int = pos % w
y_int = (pos - x_int) // w
assert (roi_map_probs[k, y_int, x_int] ==
roi_map_probs[k, :, :].max())
x = (x_int + 0.5) * width_correction
y = (y_int + 0.5) * height_correction
xy_preds[i, 0, k] = x + offset_x[i]
xy_preds[i, 1, k] = y + offset_y[i]
xy_preds[i, 2, k] = roi_map[k, y_int, x_int]
xy_preds[i, 3, k] = roi_map_probs[k, y_int, x_int]
return xy_preds
def keypoint_utils_get_person_class_index():
"""Index of the person class in COCO."""
return 1
def keypoint_results(cls_boxes, pred_heatmaps, ref_boxes):
num_classes = cfg.MODEL.NUM_CLASSES
cls_keyps = [[] for _ in range(num_classes)]
person_idx = keypoint_utils_get_person_class_index()
xy_preds = keypoint_utils_heatmaps_to_keypoints(pred_heatmaps, ref_boxes)
# NMS OKS
if cfg.KRCNN.NMS_OKS:
keep = keypoint_utils_nms_oks(xy_preds, ref_boxes, 0.3)
xy_preds = xy_preds[keep, :, :]
ref_boxes = ref_boxes[keep, :]
pred_heatmaps = pred_heatmaps[keep, :, :, :]
cls_boxes[person_idx] = cls_boxes[person_idx][keep, :]
kps = [xy_preds[i] for i in range(xy_preds.shape[0])]
cls_keyps[person_idx] = kps
return cls_keyps
def im_detect_keypoints(model, im_scale, boxes):
"""Infer instance keypoint poses. This function must be called after
im_detect_bbox as it assumes that the Caffe2 workspace is already populated
with the necessary blobs.
Arguments:
model (DetectionModelHelper): the detection model to use
im_scales (list): image blob scales as returned by im_detect_bbox
boxes (ndarray): R x 4 array of bounding box detections (e.g., as
returned by im_detect_bbox)
Returns:
pred_heatmaps (ndarray): R x J x M x M array of keypoint location
logits (softmax inputs) for each of the J keypoint types output
by the network (must be processed by keypoint_results to convert
into point predictions in the original image coordinate space)
"""
M = cfg.KRCNN.HEATMAP_SIZE
if boxes.shape[0] == 0:
pred_heatmaps = np.zeros((0, cfg.KRCNN.NUM_KEYPOINTS, M, M), np.float32)
return pred_heatmaps
inputs = {'keypoint_rois': _get_rois_blob(boxes, im_scale)}
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(inputs, 'keypoint_rois')
for k, v in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v)
workspace.RunNet(model.keypoint_net.Proto().name)
pred_heatmaps = workspace.FetchBlob(core.ScopedName('kps_score')).squeeze()
# In case of 1
if pred_heatmaps.ndim == 3:
pred_heatmaps = np.expand_dims(pred_heatmaps, axis=0)
return pred_heatmaps
def combine_heatmaps_size_dep(hms_ts, ds_ts, us_ts, boxes, heur_f):
"""Combines heatmaps while taking object sizes into account."""
assert len(hms_ts) == len(ds_ts) and len(ds_ts) == len(us_ts), \
'All sets of hms must be tagged with downscaling and upscaling flags'
# Classify objects into small+medium and large based on their box areas
areas = box_utils_boxes_area(boxes)
sm_objs = areas < cfg.TEST.KPS_AUG.AREA_TH
l_objs = areas >= cfg.TEST.KPS_AUG.AREA_TH
# Combine heatmaps computed under different transformations for each object
hms_c = np.zeros_like(hms_ts[0])
for i in range(hms_c.shape[0]):
hms_to_combine = []
for hms_t, ds_t, us_t in zip(hms_ts, ds_ts, us_ts):
# Discard downscaling predictions for small and medium objects
if sm_objs[i] and ds_t:
continue
# Discard upscaling predictions for large objects
if l_objs[i] and us_t:
continue
hms_to_combine.append(hms_t[i])
hms_c[i] = heur_f(hms_to_combine)
return hms_c
def im_detect_keypoints_aspect_ratio(
model, im, aspect_ratio, boxes, hflip=False
):
"""Detects keypoints at the given width-relative aspect ratio."""
# Perform keypoint detectionon the transformed image
im_ar = image_utils_aspect_ratio_rel(im, aspect_ratio)
boxes_ar = box_utils_aspect_ratio(boxes, aspect_ratio)
if hflip:
heatmaps_ar = im_detect_keypoints_hflip(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes_ar
)
else:
im_scale = im_conv_body_only(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE
)
heatmaps_ar = im_detect_keypoints(model, im_scale, boxes_ar)
return heatmaps_ar
def im_detect_keypoints_scale(
model, im, target_scale, target_max_size, boxes, hflip=False
):
"""Computes keypoint predictions at the given scale."""
if hflip:
heatmaps_scl = im_detect_keypoints_hflip(
model, im, target_scale, target_max_size, boxes
)
else:
im_scale = im_conv_body_only(model, im, target_scale, target_max_size)
heatmaps_scl = im_detect_keypoints(model, im_scale, boxes)
return heatmaps_scl
def get_keypoints():
"""Get the COCO keypoints and their left/right flip coorespondence map."""
# Keypoints are not available in the COCO json for the test split, so we
# provide them here.
keypoints = [
'nose',
'left_eye',
'right_eye',
'left_ear',
'right_ear',
'left_shoulder',
'right_shoulder',
'left_elbow',
'right_elbow',
'left_wrist',
'right_wrist',
'left_hip',
'right_hip',
'left_knee',
'right_knee',
'left_ankle',
'right_ankle'
]
keypoint_flip_map = {
'left_eye': 'right_eye',
'left_ear': 'right_ear',
'left_shoulder': 'right_shoulder',
'left_elbow': 'right_elbow',
'left_wrist': 'right_wrist',
'left_hip': 'right_hip',
'left_knee': 'right_knee',
'left_ankle': 'right_ankle'
}
return keypoints, keypoint_flip_map
def keypoint_utils_flip_heatmaps(heatmaps):
"""Flip heatmaps horizontally."""
keypoints, flip_map = get_keypoints()
heatmaps_flipped = heatmaps.copy()
for lkp, rkp in flip_map.items():
lid = keypoints.index(lkp)
rid = keypoints.index(rkp)
heatmaps_flipped[:, rid, :, :] = heatmaps[:, lid, :, :]
heatmaps_flipped[:, lid, :, :] = heatmaps[:, rid, :, :]
heatmaps_flipped = heatmaps_flipped[:, :, :, ::-1]
return heatmaps_flipped
def im_detect_keypoints_hflip(model, im, target_scale, target_max_size, boxes):
"""Computes keypoint predictions on the horizontally flipped image.
Function signature is the same as for im_detect_keypoints_aug.
"""
# Compute keypoints for the flipped image
im_hf = im[:, ::-1, :]
boxes_hf = box_utils_flip_boxes(boxes, im.shape[1])
im_scale = im_conv_body_only(model, im_hf, target_scale, target_max_size)
heatmaps_hf = im_detect_keypoints(model, im_scale, boxes_hf)
# Invert the predicted keypoints
heatmaps_inv = keypoint_utils_flip_heatmaps(heatmaps_hf)
return heatmaps_inv
def im_detect_keypoints_aug(model, im, boxes):
"""Computes keypoint predictions with test-time augmentations.
Arguments:
model (DetectionModelHelper): the detection model to use
im (ndarray): BGR image to test
boxes (ndarray): R x 4 array of bounding boxes
Returns:
heatmaps (ndarray): R x J x M x M array of keypoint location logits
"""
# Collect heatmaps predicted under different transformations
heatmaps_ts = []
# Tag predictions computed under downscaling and upscaling transformations
ds_ts = []
us_ts = []
def add_heatmaps_t(heatmaps_t, ds_t=False, us_t=False):
heatmaps_ts.append(heatmaps_t)
ds_ts.append(ds_t)
us_ts.append(us_t)
# Compute the heatmaps for the original image (identity transform)
im_scale = im_conv_body_only(model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE)
heatmaps_i = im_detect_keypoints(model, im_scale, boxes)
add_heatmaps_t(heatmaps_i)
# Perform keypoints detection on the horizontally flipped image
if cfg.TEST.KPS_AUG.H_FLIP:
heatmaps_hf = im_detect_keypoints_hflip(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes
)
add_heatmaps_t(heatmaps_hf)
# Compute detections at different scales
for scale in cfg.TEST.KPS_AUG.SCALES:
ds_scl = scale < cfg.TEST.SCALE
us_scl = scale > cfg.TEST.SCALE
heatmaps_scl = im_detect_keypoints_scale(
model, im, scale, cfg.TEST.KPS_AUG.MAX_SIZE, boxes
)
add_heatmaps_t(heatmaps_scl, ds_scl, us_scl)
if cfg.TEST.KPS_AUG.SCALE_H_FLIP:
heatmaps_scl_hf = im_detect_keypoints_scale(
model, im, scale, cfg.TEST.KPS_AUG.MAX_SIZE, boxes, hflip=True
)
add_heatmaps_t(heatmaps_scl_hf, ds_scl, us_scl)
# Compute keypoints at different aspect ratios
for aspect_ratio in cfg.TEST.KPS_AUG.ASPECT_RATIOS:
heatmaps_ar = im_detect_keypoints_aspect_ratio(
model, im, aspect_ratio, boxes
)
add_heatmaps_t(heatmaps_ar)
if cfg.TEST.KPS_AUG.ASPECT_RATIO_H_FLIP:
heatmaps_ar_hf = im_detect_keypoints_aspect_ratio(
model, im, aspect_ratio, boxes, hflip=True
)
add_heatmaps_t(heatmaps_ar_hf)
# Select the heuristic function for combining the heatmaps
if cfg.TEST.KPS_AUG.HEUR == 'HM_AVG':
np_f = np.mean
elif cfg.TEST.KPS_AUG.HEUR == 'HM_MAX':
np_f = np.amax
else:
raise NotImplementedError(
'Heuristic {} not supported'.format(cfg.TEST.KPS_AUG.HEUR)
)
def heur_f(hms_ts):
return np_f(hms_ts, axis=0)
# Combine the heatmaps
if cfg.TEST.KPS_AUG.SCALE_SIZE_DEP:
heatmaps_c = combine_heatmaps_size_dep(
heatmaps_ts, ds_ts, us_ts, boxes, heur_f
)
else:
heatmaps_c = heur_f(heatmaps_ts)
return heatmaps_c
def box_utils_expand_boxes(boxes, scale):
"""Expand an array of boxes by a given scale."""
w_half = (boxes[:, 2] - boxes[:, 0]) * .5
h_half = (boxes[:, 3] - boxes[:, 1]) * .5
x_c = (boxes[:, 2] + boxes[:, 0]) * .5
y_c = (boxes[:, 3] + boxes[:, 1]) * .5
w_half *= scale
h_half *= scale
boxes_exp = np.zeros(boxes.shape)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
def segm_results(cls_boxes, masks, ref_boxes, im_h, im_w):
num_classes = cfg.MODEL.NUM_CLASSES
cls_segms = [[] for _ in range(num_classes)]
mask_ind = 0
# To work around an issue with cv2.resize (it seems to automatically pad
# with repeated border values), we manually zero-pad the masks by 1 pixel
# prior to resizing back to the original image resolution. This prevents
# "top hat" artifacts. We therefore need to expand the reference boxes by an
# appropriate factor.
M = cfg.MRCNN.RESOLUTION
scale = (M + 2.0) / M
ref_boxes = box_utils_expand_boxes(ref_boxes, scale)
ref_boxes = ref_boxes.astype(np.int32)
padded_mask = np.zeros((M + 2, M + 2), dtype=np.float32)
# skip j = 0, because it's the background class
for j in range(1, num_classes):
segms = []
for _ in range(cls_boxes[j].shape[0]):
if cfg.MRCNN.CLS_SPECIFIC_MASK:
padded_mask[1:-1, 1:-1] = masks[mask_ind, j, :, :]
else:
padded_mask[1:-1, 1:-1] = masks[mask_ind, 0, :, :]
ref_box = ref_boxes[mask_ind, :]
w = ref_box[2] - ref_box[0] + 1
h = ref_box[3] - ref_box[1] + 1
w = np.maximum(w, 1)
h = np.maximum(h, 1)
mask = cv2.resize(padded_mask, (w, h))
mask = np.array(mask > cfg.MRCNN.THRESH_BINARIZE, dtype=np.uint8)
im_mask = np.zeros((im_h, im_w), dtype=np.uint8)
x_0 = max(ref_box[0], 0)
x_1 = min(ref_box[2] + 1, im_w)
y_0 = max(ref_box[1], 0)
y_1 = min(ref_box[3] + 1, im_h)
im_mask[y_0:y_1, x_0:x_1] = mask[
(y_0 - ref_box[1]):(y_1 - ref_box[1]),
(x_0 - ref_box[0]):(x_1 - ref_box[0])
]
# Get RLE encoding used by the COCO evaluation API
rle = mask_util.encode(
np.array(im_mask[:, :, np.newaxis], order='F')
)[0]
segms.append(rle)
mask_ind += 1
cls_segms[j] = segms
assert mask_ind == masks.shape[0]
return cls_segms
def im_detect_mask(model, im_scale, boxes):
"""Infer instance segmentation masks. This function must be called after
im_detect_bbox as it assumes that the Caffe2 workspace is already populated
with the necessary blobs.
Arguments:
model (DetectionModelHelper): the detection model to use
im_scales (list): image blob scales as returned by im_detect_bbox
boxes (ndarray): R x 4 array of bounding box detections (e.g., as
returned by im_detect_bbox)
Returns:
pred_masks (ndarray): R x K x M x M array of class specific soft masks
output by the network (must be processed by segm_results to convert
into hard masks in the original image coordinate space)
"""
M = cfg.MRCNN.RESOLUTION
if boxes.shape[0] == 0:
pred_masks = np.zeros((0, M, M), np.float32)
return pred_masks
inputs = {'mask_rois': _get_rois_blob(boxes, im_scale)}
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(inputs, 'mask_rois')
for k, v in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v)
workspace.RunNet(model.mask_net.Proto().name)
# Fetch masks
pred_masks = workspace.FetchBlob(
core.ScopedName('mask_fcn_probs')
).squeeze()
if cfg.MRCNN.CLS_SPECIFIC_MASK:
pred_masks = pred_masks.reshape([-1, cfg.MODEL.NUM_CLASSES, M, M])
else:
pred_masks = pred_masks.reshape([-1, 1, M, M])
return pred_masks
def im_detect_mask_aspect_ratio(model, im, aspect_ratio, boxes, hflip=False):
"""Computes mask detections at the given width-relative aspect ratio."""
# Perform mask detection on the transformed image
im_ar = image_utils_aspect_ratio_rel(im, aspect_ratio)
boxes_ar = box_utils_aspect_ratio(boxes, aspect_ratio)
if hflip:
masks_ar = im_detect_mask_hflip(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes_ar
)
else:
im_scale = im_conv_body_only(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE
)
masks_ar = im_detect_mask(model, im_scale, boxes_ar)
return masks_ar
def im_detect_mask_scale(
model, im, target_scale, target_max_size, boxes, hflip=False
):
"""Computes masks at the given scale."""
if hflip:
masks_scl = im_detect_mask_hflip(
model, im, target_scale, target_max_size, boxes
)
else:
im_scale = im_conv_body_only(model, im, target_scale, target_max_size)
masks_scl = im_detect_mask(model, im_scale, boxes)
return masks_scl
def im_detect_mask_hflip(model, im, target_scale, target_max_size, boxes):
"""Performs mask detection on the horizontally flipped image.
Function signature is the same as for im_detect_mask_aug.
"""
# Compute the masks for the flipped image
im_hf = im[:, ::-1, :]
boxes_hf = box_utils_flip_boxes(boxes, im.shape[1])
im_scale = im_conv_body_only(model, im_hf, target_scale, target_max_size)
masks_hf = im_detect_mask(model, im_scale, boxes_hf)
# Invert the predicted soft masks
masks_inv = masks_hf[:, :, :, ::-1]
return masks_inv
def im_conv_body_only(model, im, target_scale, target_max_size):
"""Runs `model.conv_body_net` on the given image `im`."""
im_blob, im_scale, _im_info = blob_utils_get_image_blob(
im, target_scale, target_max_size
)
workspace.FeedBlob(core.ScopedName('data'), im_blob)
workspace.RunNet(model.conv_body_net.Proto().name)
return im_scale
def im_detect_mask_aug(model, im, boxes):
"""Performs mask detection with test-time augmentations.
Arguments:
model (DetectionModelHelper): the detection model to use
im (ndarray): BGR image to test
boxes (ndarray): R x 4 array of bounding boxes
Returns:
masks (ndarray): R x K x M x M array of class specific soft masks
"""
assert not cfg.TEST.MASK_AUG.SCALE_SIZE_DEP, \
'Size dependent scaling not implemented'
# Collect masks computed under different transformations
masks_ts = []
# Compute masks for the original image (identity transform)
im_scale_i = im_conv_body_only(model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE)
masks_i = im_detect_mask(model, im_scale_i, boxes)
masks_ts.append(masks_i)
# Perform mask detection on the horizontally flipped image
if cfg.TEST.MASK_AUG.H_FLIP:
masks_hf = im_detect_mask_hflip(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes
)
masks_ts.append(masks_hf)
# Compute detections at different scales
for scale in cfg.TEST.MASK_AUG.SCALES:
max_size = cfg.TEST.MASK_AUG.MAX_SIZE
masks_scl = im_detect_mask_scale(model, im, scale, max_size, boxes)
masks_ts.append(masks_scl)
if cfg.TEST.MASK_AUG.SCALE_H_FLIP:
masks_scl_hf = im_detect_mask_scale(
model, im, scale, max_size, boxes, hflip=True
)
masks_ts.append(masks_scl_hf)
# Compute masks at different aspect ratios
for aspect_ratio in cfg.TEST.MASK_AUG.ASPECT_RATIOS:
masks_ar = im_detect_mask_aspect_ratio(model, im, aspect_ratio, boxes)
masks_ts.append(masks_ar)
if cfg.TEST.MASK_AUG.ASPECT_RATIO_H_FLIP:
masks_ar_hf = im_detect_mask_aspect_ratio(
model, im, aspect_ratio, boxes, hflip=True
)
masks_ts.append(masks_ar_hf)
# Combine the predicted soft masks
if cfg.TEST.MASK_AUG.HEUR == 'SOFT_AVG':
masks_c = np.mean(masks_ts, axis=0)
elif cfg.TEST.MASK_AUG.HEUR == 'SOFT_MAX':
masks_c = np.amax(masks_ts, axis=0)
elif cfg.TEST.MASK_AUG.HEUR == 'LOGIT_AVG':
def logit(y):
return -1.0 * np.log((1.0 - y) / np.maximum(y, 1e-20))
logit_masks = [logit(y) for y in masks_ts]
logit_masks = np.mean(logit_masks, axis=0)
masks_c = 1.0 / (1.0 + np.exp(-logit_masks))
else:
raise NotImplementedError(
'Heuristic {} not supported'.format(cfg.TEST.MASK_AUG.HEUR)
)
return masks_c
def box_utils_box_voting(top_dets, all_dets, thresh, scoring_method='ID', beta=1.0):
"""Apply bounding-box voting to refine `top_dets` by voting with `all_dets`.
See: https://arxiv.org/abs/1505.01749. Optional score averaging (not in the
referenced paper) can be applied by setting `scoring_method` appropriately.
"""
# top_dets is [N, 5] each row is [x1 y1 x2 y2, sore]
# all_dets is [N, 5] each row is [x1 y1 x2 y2, sore]
top_dets_out = top_dets.copy()
top_boxes = top_dets[:, :4]
all_boxes = all_dets[:, :4]
all_scores = all_dets[:, 4]
top_to_all_overlaps = bbox_overlaps(top_boxes, all_boxes)
for k in range(top_dets_out.shape[0]):
inds_to_vote = np.where(top_to_all_overlaps[k] >= thresh)[0]
boxes_to_vote = all_boxes[inds_to_vote, :]
ws = all_scores[inds_to_vote]
top_dets_out[k, :4] = np.average(boxes_to_vote, axis=0, weights=ws)
if scoring_method == 'ID':
# Identity, nothing to do
pass
elif scoring_method == 'TEMP_AVG':
# Average probabilities (considered as P(detected class) vs.
# P(not the detected class)) after smoothing with a temperature
# hyperparameter.
P = np.vstack((ws, 1.0 - ws))
P_max = np.max(P, axis=0)
X = np.log(P / P_max)
X_exp = np.exp(X / beta)
P_temp = X_exp / np.sum(X_exp, axis=0)
P_avg = P_temp[0].mean()
top_dets_out[k, 4] = P_avg
elif scoring_method == 'AVG':
# Combine new probs from overlapping boxes
top_dets_out[k, 4] = ws.mean()
elif scoring_method == 'IOU_AVG':
P = ws
ws = top_to_all_overlaps[k, inds_to_vote]
P_avg = np.average(P, weights=ws)
top_dets_out[k, 4] = P_avg
elif scoring_method == 'GENERALIZED_AVG':
P_avg = np.mean(ws**beta)**(1.0 / beta)
top_dets_out[k, 4] = P_avg
elif scoring_method == 'QUASI_SUM':
top_dets_out[k, 4] = ws.sum() / float(len(ws))**beta
else:
raise NotImplementedError(
'Unknown scoring method {}'.format(scoring_method)
)
return top_dets_out
def box_utils_soft_nms(
dets, sigma=0.5, overlap_thresh=0.3, score_thresh=0.001, method='linear'
):
"""Apply the soft NMS algorithm from https://arxiv.org/abs/1704.04503."""
if dets.shape[0] == 0:
return dets, []
methods = {'hard': 0, 'linear': 1, 'gaussian': 2}
assert method in methods, 'Unknown soft_nms method: {}'.format(method)
dets, keep = cython_nms.soft_nms(
np.ascontiguousarray(dets, dtype=np.float32),
np.float32(sigma),
np.float32(overlap_thresh),
np.float32(score_thresh),
np.uint8(methods[method])
)
return dets, keep
def box_results_with_nms_and_limit(scores, boxes):
"""Returns bounding-box detection results by thresholding on scores and
applying non-maximum suppression (NMS).
`boxes` has shape (#detections, 4 * #classes), where each row represents
a list of predicted bounding boxes for each of the object classes in the
dataset (including the background class). The detections in each row
originate from the same object proposal.
`scores` has shape (#detection, #classes), where each row represents a list
of object detection confidence scores for each of the object classes in the
dataset (including the background class). `scores[i, j]`` corresponds to the
box at `boxes[i, j * 4:(j + 1) * 4]`.
"""
num_classes = cfg.MODEL.NUM_CLASSES
cls_boxes = [[] for _ in range(num_classes)]
# Apply threshold on detection probabilities and apply NMS
# Skip j = 0, because it's the background class
for j in range(1, num_classes):
inds = np.where(scores[:, j] > cfg.TEST.SCORE_THRESH)[0]
scores_j = scores[inds, j]
boxes_j = boxes[inds, j * 4:(j + 1) * 4]
dets_j = np.hstack((boxes_j, scores_j[:, np.newaxis])).astype(
np.float32, copy=False
)
if cfg.TEST.SOFT_NMS.ENABLED:
nms_dets, _ = box_utils_soft_nms(
dets_j,
sigma=cfg.TEST.SOFT_NMS.SIGMA,
overlap_thresh=cfg.TEST.NMS,
score_thresh=0.0001,
method=cfg.TEST.SOFT_NMS.METHOD
)
else:
keep = box_utils_nms(dets_j, cfg.TEST.NMS)
nms_dets = dets_j[keep, :]
# Refine the post-NMS boxes using bounding-box voting
if cfg.TEST.BBOX_VOTE.ENABLED:
nms_dets = box_utils_box_voting(
nms_dets,
dets_j,
cfg.TEST.BBOX_VOTE.VOTE_TH,
scoring_method=cfg.TEST.BBOX_VOTE.SCORING_METHOD
)
cls_boxes[j] = nms_dets
# Limit to max_per_image detections **over all classes**
if cfg.TEST.DETECTIONS_PER_IM > 0:
image_scores = np.hstack(
[cls_boxes[j][:, -1] for j in range(1, num_classes)]
)
if len(image_scores) > cfg.TEST.DETECTIONS_PER_IM:
image_thresh = np.sort(image_scores)[-cfg.TEST.DETECTIONS_PER_IM]
for j in range(1, num_classes):
keep = np.where(cls_boxes[j][:, -1] >= image_thresh)[0]
cls_boxes[j] = cls_boxes[j][keep, :]
im_results = np.vstack([cls_boxes[j] for j in range(1, num_classes)])
boxes = im_results[:, :-1]
scores = im_results[:, -1]
return scores, boxes, cls_boxes
def _add_multilevel_rois_for_test(blobs, name):
"""Distributes a set of RoIs across FPN pyramid levels by creating new level
specific RoI blobs.
Arguments:
blobs (dict): dictionary of blobs
name (str): a key in 'blobs' identifying the source RoI blob
Returns:
[by ref] blobs (dict): new keys named by `name + 'fpn' + level`
are added to dict each with a value that's an R_level x 5 ndarray of
RoIs (see _get_rois_blob for format)
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvls = fpn_map_rois_to_fpn_levels(blobs[name][:, 1:5], lvl_min, lvl_max)
fpn_add_multilevel_roi_blobs(
blobs, name, blobs[name], lvls, lvl_min, lvl_max
)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (ndarray): image pyramid levels used by each projected RoI
"""
rois = im_rois.astype(np.float, copy=False) * scales
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
return rois, levels
def _get_rois_blob(im_rois, im_scale):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid with columns
[level, x1, y1, x2, y2]
"""
rois, levels = _project_im_rois(im_rois, im_scale)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _get_blobs(im, rois, target_scale, target_max_size):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
blobs['data'], im_scale, blobs['im_info'] = \
blob_utils_get_image_blob(im, target_scale, target_max_size)
if rois is not None:
blobs['rois'] = _get_rois_blob(rois, im_scale)
return blobs, im_scale
def im_detect_bbox(model, im, target_scale, target_max_size, boxes=None):
"""Bounding box object detection for an image with given box proposals.
Arguments:
model (DetectionModelHelper): the detection model to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals in 0-indexed
[x1, y1, x2, y2] format, or None if using RPN
Returns:
scores (ndarray): R x K array of object class scores for K classes
(K includes background as object category 0)
boxes (ndarray): R x 4*K array of predicted bounding boxes
im_scales (list): list of image scales used in the input blob (as
returned by _get_blobs and for use with im_detect_mask, etc.)
"""
inputs, im_scale = _get_blobs(im, boxes, target_scale, target_max_size)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
if cfg.DEDUP_BOXES > 0 and not cfg.MODEL.FASTER_RCNN:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(inputs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(
hashes, return_index=True, return_inverse=True
)
inputs['rois'] = inputs['rois'][index, :]
boxes = boxes[index, :]
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS and not cfg.MODEL.FASTER_RCNN:
_add_multilevel_rois_for_test(inputs, 'rois')
for k, v in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v)
workspace.RunNet(model.net.Proto().name)
# Read out blobs
if cfg.MODEL.FASTER_RCNN:
rois = workspace.FetchBlob(core.ScopedName('rois'))
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scale
# Softmax class probabilities
scores = workspace.FetchBlob(core.ScopedName('cls_prob')).squeeze()
# In case there is 1 proposal
scores = scores.reshape([-1, scores.shape[-1]])
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = workspace.FetchBlob(core.ScopedName('bbox_pred')).squeeze()
# In case there is 1 proposal
box_deltas = box_deltas.reshape([-1, box_deltas.shape[-1]])
if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG:
# Remove predictions for bg class (compat with MSRA code)
box_deltas = box_deltas[:, -4:]
pred_boxes = box_utils_bbox_transform(
boxes, box_deltas, cfg.MODEL.BBOX_REG_WEIGHTS
)
pred_boxes = box_utils_clip_tiled_boxes(pred_boxes, im.shape)
if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG:
pred_boxes = np.tile(pred_boxes, (1, scores.shape[1]))
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0 and not cfg.MODEL.FASTER_RCNN:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
return scores, pred_boxes, im_scale
def box_utils_aspect_ratio(boxes, aspect_ratio):
"""Perform width-relative aspect ratio transformation."""
boxes_ar = boxes.copy()
boxes_ar[:, 0::4] = aspect_ratio * boxes[:, 0::4]
boxes_ar[:, 2::4] = aspect_ratio * boxes[:, 2::4]
return boxes_ar
def image_utils_aspect_ratio_rel(im, aspect_ratio):
"""Performs width-relative aspect ratio transformation."""
im_h, im_w = im.shape[:2]
im_ar_w = int(round(aspect_ratio * im_w))
im_ar = cv2.resize(im, dsize=(im_ar_w, im_h))
return im_ar
def im_detect_bbox_aspect_ratio(
model, im, aspect_ratio, box_proposals=None, hflip=False
):
"""Computes bbox detections at the given width-relative aspect ratio.
Returns predictions in the original image space.
"""
# Compute predictions on the transformed image
im_ar = image_utils_aspect_ratio_rel(im, aspect_ratio)
if not cfg.MODEL.FASTER_RCNN:
box_proposals_ar = box_utils_aspect_ratio(box_proposals, aspect_ratio)
else:
box_proposals_ar = None
if hflip:
scores_ar, boxes_ar, _ = im_detect_bbox_hflip(
model,
im_ar,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
box_proposals=box_proposals_ar
)
else:
scores_ar, boxes_ar, _ = im_detect_bbox(
model,
im_ar,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
boxes=box_proposals_ar
)
# Invert the detected boxes
boxes_inv = box_utils_aspect_ratio(boxes_ar, 1.0 / aspect_ratio)
return scores_ar, boxes_inv
def im_detect_bbox_scale(
model, im, target_scale, target_max_size, box_proposals=None, hflip=False
):
"""Computes bbox detections at the given scale.
Returns predictions in the original image space.
"""
if hflip:
scores_scl, boxes_scl, _ = im_detect_bbox_hflip(
model, im, target_scale, target_max_size, box_proposals=box_proposals
)
else:
scores_scl, boxes_scl, _ = im_detect_bbox(
model, im, target_scale, target_max_size, boxes=box_proposals
)
return scores_scl, boxes_scl
def box_utils_flip_boxes(boxes, im_width):
"""Flip boxes horizontally."""
boxes_flipped = boxes.copy()
boxes_flipped[:, 0::4] = im_width - boxes[:, 2::4] - 1
boxes_flipped[:, 2::4] = im_width - boxes[:, 0::4] - 1
return boxes_flipped
def im_detect_bbox_hflip(
model, im, target_scale, target_max_size, box_proposals=None
):
"""Performs bbox detection on the horizontally flipped image.
Function signature is the same as for im_detect_bbox.
"""
# Compute predictions on the flipped image
im_hf = im[:, ::-1, :]
im_width = im.shape[1]
if not cfg.MODEL.FASTER_RCNN:
box_proposals_hf = box_utils_flip_boxes(box_proposals, im_width)
else:
box_proposals_hf = None
scores_hf, boxes_hf, im_scale = im_detect_bbox(
model, im_hf, target_scale, target_max_size, boxes=box_proposals_hf
)
# Invert the detections computed on the flipped image
boxes_inv = box_utils_flip_boxes(boxes_hf, im_width)
return scores_hf, boxes_inv, im_scale
def im_detect_bbox_aug(model, im, box_proposals=None):
"""Performs bbox detection with test-time augmentations.
Function signature is the same as for im_detect_bbox.
"""
assert not cfg.TEST.BBOX_AUG.SCALE_SIZE_DEP, \
'Size dependent scaling not implemented'
assert not cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION' or \
cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION', \
'Coord heuristic must be union whenever score heuristic is union'
assert not cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION' or \
cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION', \
'Score heuristic must be union whenever coord heuristic is union'
assert not cfg.MODEL.FASTER_RCNN or \
cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION', \
'Union heuristic must be used to combine Faster RCNN predictions'
# Collect detections computed under different transformations
scores_ts = []
boxes_ts = []
def add_preds_t(scores_t, boxes_t):
scores_ts.append(scores_t)
boxes_ts.append(boxes_t)
# Perform detection on the horizontally flipped image
if cfg.TEST.BBOX_AUG.H_FLIP:
scores_hf, boxes_hf, _ = im_detect_bbox_hflip(
model,
im,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
box_proposals=box_proposals
)
add_preds_t(scores_hf, boxes_hf)
# Compute detections at different scales
for scale in cfg.TEST.BBOX_AUG.SCALES:
max_size = cfg.TEST.BBOX_AUG.MAX_SIZE
scores_scl, boxes_scl = im_detect_bbox_scale(
model, im, scale, max_size, box_proposals
)
add_preds_t(scores_scl, boxes_scl)
if cfg.TEST.BBOX_AUG.SCALE_H_FLIP:
scores_scl_hf, boxes_scl_hf = im_detect_bbox_scale(
model, im, scale, max_size, box_proposals, hflip=True
)
add_preds_t(scores_scl_hf, boxes_scl_hf)
# Perform detection at different aspect ratios
for aspect_ratio in cfg.TEST.BBOX_AUG.ASPECT_RATIOS:
scores_ar, boxes_ar = im_detect_bbox_aspect_ratio(
model, im, aspect_ratio, box_proposals
)
add_preds_t(scores_ar, boxes_ar)
if cfg.TEST.BBOX_AUG.ASPECT_RATIO_H_FLIP:
scores_ar_hf, boxes_ar_hf = im_detect_bbox_aspect_ratio(
model, im, aspect_ratio, box_proposals, hflip=True
)
add_preds_t(scores_ar_hf, boxes_ar_hf)
# Compute detections for the original image (identity transform) last to
# ensure that the Caffe2 workspace is populated with blobs corresponding
# to the original image on return (postcondition of im_detect_bbox)
scores_i, boxes_i, im_scale_i = im_detect_bbox(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes=box_proposals
)
add_preds_t(scores_i, boxes_i)
# Combine the predicted scores
if cfg.TEST.BBOX_AUG.SCORE_HEUR == 'ID':
scores_c = scores_i
elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'AVG':
scores_c = np.mean(scores_ts, axis=0)
elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION':
scores_c = np.vstack(scores_ts)
else:
raise NotImplementedError(
'Score heur {} not supported'.format(cfg.TEST.BBOX_AUG.SCORE_HEUR)
)
# Combine the predicted boxes
if cfg.TEST.BBOX_AUG.COORD_HEUR == 'ID':
boxes_c = boxes_i
elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'AVG':
boxes_c = np.mean(boxes_ts, axis=0)
elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION':
boxes_c = np.vstack(boxes_ts)
else:
raise NotImplementedError(
'Coord heur {} not supported'.format(cfg.TEST.BBOX_AUG.COORD_HEUR)
)
return scores_c, boxes_c, im_scale_i
def im_list_to_blob(ims):
"""Convert a list of images into a network input. Assumes images were
prepared using prep_im_for_blob or equivalent: i.e.
- BGR channel order
- pixel means subtracted
- resized to the desired input size
- float32 numpy ndarray format
Output is a 4D HCHW tensor of the images concatenated along axis 0 with
shape.
"""
if not isinstance(ims, list):
ims = [ims]
max_shape = np.array([im.shape for im in ims]).max(axis=0)
# Pad the image so they can be divisible by a stride
if cfg.FPN.FPN_ON:
stride = float(cfg.FPN.COARSEST_STRIDE)
max_shape[0] = int(np.ceil(max_shape[0] / stride) * stride)
max_shape[1] = int(np.ceil(max_shape[1] / stride) * stride)
num_images = len(ims)
blob = np.zeros(
(num_images, max_shape[0], max_shape[1], 3), dtype=np.float32
)
for i in range(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
# Move channels (axis 3) to axis 1
# Axis order will become: (batch elem, channel, height, width)
channel_swap = (0, 3, 1, 2)
blob = blob.transpose(channel_swap)
return blob
def prep_im_for_blob(im, pixel_means, target_size, max_size):
"""Prepare an image for use as a network input blob. Specially:
- Subtract per-channel pixel mean
- Convert to float32
- Rescale to each of the specified target size (capped at max_size)
Returns a list of transformed images, one for each target size. Also returns
the scale factors that were used to compute each returned image.
"""
im = im.astype(np.float32, copy=False)
im -= pixel_means
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than max_size
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(
im,
None,
None,
fx=im_scale,
fy=im_scale,
interpolation=cv2.INTER_LINEAR
)
return im, im_scale
def blob_utils_get_image_blob(im, target_scale, target_max_size):
"""Convert an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale (float): image scale (target size) / (original size)
im_info (ndarray)
"""
processed_im, im_scale = prep_im_for_blob(
im, cfg.PIXEL_MEANS, target_scale, target_max_size
)
blob = im_list_to_blob(processed_im)
# NOTE: this height and width may be larger than actual scaled input image
# due to the FPN.COARSEST_STRIDE related padding in im_list_to_blob. We are
# maintaining this behavior for now to make existing results exactly
# reproducible (in practice using the true input image height and width
# yields nearly the same results, but they are sometimes slightly different
# because predictions near the edge of the image will be pruned more
# aggressively).
height, width = blob.shape[2], blob.shape[3]
im_info = np.hstack((height, width, im_scale))[np.newaxis, :]
return blob, im_scale, im_info.astype(np.float32)
def _scale_enum(anchor, scales):
"""Enumerate a set of anchors for each scale wrt an anchor."""
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack(
(
x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1)
)
)
return anchors
def _whctrs(anchor):
"""Return width, height, x center, and y center for an anchor (window)."""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _ratio_enum(anchor, ratios):
"""Enumerate a set of anchors for each aspect ratio wrt an anchor."""
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _generate_anchors(base_size, scales, aspect_ratios):
"""Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, base_size - 1, base_size - 1) window.
"""
anchor = np.array([1, 1, base_size, base_size], dtype=np.float) - 1
anchors = _ratio_enum(anchor, aspect_ratios)
anchors = np.vstack(
[_scale_enum(anchors[i, :], scales) for i in range(anchors.shape[0])]
)
return anchors
def generate_anchors(
stride=16, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)
):
"""Generates a matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors
are centered on stride / 2, have (approximate) sqrt areas of the specified
sizes, and aspect ratios as given.
"""
return _generate_anchors(
stride,
np.array(sizes, dtype=np.float) / stride,
np.array(aspect_ratios, dtype=np.float)
)
def _create_cell_anchors():
"""
Generate all types of anchors for all fpn levels/scales/aspect ratios.
This function is called only once at the beginning of inference.
"""
k_max, k_min = cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.RPN_MIN_LEVEL
scales_per_octave = cfg.RETINANET.SCALES_PER_OCTAVE
aspect_ratios = cfg.RETINANET.ASPECT_RATIOS
anchor_scale = cfg.RETINANET.ANCHOR_SCALE
A = scales_per_octave * len(aspect_ratios)
anchors = {}
for lvl in range(k_min, k_max + 1):
# create cell anchors array
stride = 2. ** lvl
cell_anchors = np.zeros((A, 4))
a = 0
for octave in range(scales_per_octave):
octave_scale = 2 ** (octave / float(scales_per_octave))
for aspect in aspect_ratios:
anchor_sizes = (stride * octave_scale * anchor_scale, )
anchor_aspect_ratios = (aspect, )
cell_anchors[a, :] = generate_anchors(
stride=stride, sizes=anchor_sizes,
aspect_ratios=anchor_aspect_ratios)
a += 1
anchors[lvl] = cell_anchors
return anchors
def test_retinanet_im_detect_bbox(model, im, timers=None):
"""Generate RetinaNet detections on a single image."""
if timers is None:
timers = defaultdict(Timer)
# Although anchors are input independent and could be precomputed,
# recomputing them per image only brings a small overhead
anchors = _create_cell_anchors()
timers['im_detect_bbox'].tic()
k_max, k_min = cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.RPN_MIN_LEVEL
A = cfg.RETINANET.SCALES_PER_OCTAVE * len(cfg.RETINANET.ASPECT_RATIOS)
inputs = {}
inputs['data'], im_scale, inputs['im_info'] = \
blob_utils_get_image_blob(im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE)
cls_probs, box_preds = [], []
for lvl in range(k_min, k_max + 1):
suffix = 'fpn{}'.format(lvl)
cls_probs.append(core.ScopedName('retnet_cls_prob_{}'.format(suffix)))
box_preds.append(core.ScopedName('retnet_bbox_pred_{}'.format(suffix)))
for k, v in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v.astype(np.float32, copy=False))
workspace.RunNet(model.net.Proto().name)
cls_probs = workspace.FetchBlobs(cls_probs)
box_preds = workspace.FetchBlobs(box_preds)
# here the boxes_all are [x0, y0, x1, y1, score]
boxes_all = defaultdict(list)
cnt = 0
for lvl in range(k_min, k_max + 1):
# create cell anchors array
stride = 2. ** lvl
cell_anchors = anchors[lvl]
# fetch per level probability
cls_prob = cls_probs[cnt]
box_pred = box_preds[cnt]
cls_prob = cls_prob.reshape((
cls_prob.shape[0], A, int(cls_prob.shape[1] / A),
cls_prob.shape[2], cls_prob.shape[3]))
box_pred = box_pred.reshape((
box_pred.shape[0], A, 4, box_pred.shape[2], box_pred.shape[3]))
cnt += 1
if cfg.RETINANET.SOFTMAX:
cls_prob = cls_prob[:, :, 1::, :, :]
cls_prob_ravel = cls_prob.ravel()
# In some cases [especially for very small img sizes], it's possible that
# candidate_ind is empty if we impose threshold 0.05 at all levels. This
# will lead to errors since no detections are found for this image. Hence,
# for lvl 7 which has small spatial resolution, we take the threshold 0.0
th = cfg.RETINANET.INFERENCE_TH if lvl < k_max else 0.0
candidate_inds = np.where(cls_prob_ravel > th)[0]
if (len(candidate_inds) == 0):
continue
pre_nms_topn = min(cfg.RETINANET.PRE_NMS_TOP_N, len(candidate_inds))
inds = np.argpartition(
cls_prob_ravel[candidate_inds], -pre_nms_topn)[-pre_nms_topn:]
inds = candidate_inds[inds]
inds_5d = np.array(np.unravel_index(inds, cls_prob.shape)).transpose()
classes = inds_5d[:, 2]
anchor_ids, y, x = inds_5d[:, 1], inds_5d[:, 3], inds_5d[:, 4]
scores = cls_prob[:, anchor_ids, classes, y, x]
boxes = np.column_stack((x, y, x, y)).astype(dtype=np.float32)
boxes *= stride
boxes += cell_anchors[anchor_ids, :]
if not cfg.RETINANET.CLASS_SPECIFIC_BBOX:
box_deltas = box_pred[0, anchor_ids, :, y, x]
else:
box_cls_inds = classes * 4
box_deltas = np.vstack(
[box_pred[0, ind:ind + 4, yi, xi]
for ind, yi, xi in zip(box_cls_inds, y, x)]
)
pred_boxes = (
box_utils_bbox_transform(boxes, box_deltas)
if cfg.TEST.BBOX_REG else boxes)
pred_boxes /= im_scale
pred_boxes = box_utils_clip_tiled_boxes(pred_boxes, im.shape)
box_scores = np.zeros((pred_boxes.shape[0], 5))
box_scores[:, 0:4] = pred_boxes
box_scores[:, 4] = scores
for cls in range(1, cfg.MODEL.NUM_CLASSES):
inds = np.where(classes == cls - 1)[0]
if len(inds) > 0:
boxes_all[cls].extend(box_scores[inds, :])
timers['im_detect_bbox'].toc()
# Combine predictions across all levels and retain the top scoring by class
timers['misc_bbox'].tic()
detections = []
for cls, boxes in boxes_all.items():
cls_dets = np.vstack(boxes).astype(dtype=np.float32)
# do class specific nms here
keep = box_utils_nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
out = np.zeros((len(keep), 6))
out[:, 0:5] = cls_dets
out[:, 5].fill(cls)
detections.append(out)
# detections (N, 6) format:
# detections[:, :4] - boxes
# detections[:, 4] - scores
# detections[:, 5] - classes
detections = np.vstack(detections)
# sort all again
inds = np.argsort(-detections[:, 4])
detections = detections[inds[0:cfg.TEST.DETECTIONS_PER_IM], :]
# Convert the detections to image cls_ format (see core/test_engine.py)
num_classes = cfg.MODEL.NUM_CLASSES
cls_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
for c in range(1, num_classes):
inds = np.where(detections[:, 5] == c)[0]
cls_boxes[c] = detections[inds, :5]
timers['misc_bbox'].toc()
return cls_boxes
def infer_engine_im_detect_all(model, im, box_proposals, timers=None):
if timers is None:
timers = defaultdict(Timer)
# Handle RetinaNet testing separately for now
if cfg.RETINANET.RETINANET_ON:
cls_boxes = test_retinanet_im_detect_bbox(model, im, timers)
return cls_boxes, None, None
timers['im_detect_bbox'].tic()
if cfg.TEST.BBOX_AUG.ENABLED:
scores, boxes, im_scale = im_detect_bbox_aug(model, im, box_proposals)
else:
scores, boxes, im_scale = im_detect_bbox(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes=box_proposals
)
timers['im_detect_bbox'].toc()
# score and boxes are from the whole image after score thresholding and nms
# (they are not separated by class)
# cls_boxes boxes and scores are separated by class and in the format used
# for evaluating results
timers['misc_bbox'].tic()
scores, boxes, cls_boxes = box_results_with_nms_and_limit(scores, boxes)
timers['misc_bbox'].toc()
if cfg.MODEL.MASK_ON and boxes.shape[0] > 0:
timers['im_detect_mask'].tic()
if cfg.TEST.MASK_AUG.ENABLED:
masks = im_detect_mask_aug(model, im, boxes)
else:
masks = im_detect_mask(model, im_scale, boxes)
timers['im_detect_mask'].toc()
timers['misc_mask'].tic()
cls_segms = segm_results(
cls_boxes, masks, boxes, im.shape[0], im.shape[1]
)
timers['misc_mask'].toc()
else:
cls_segms = None
if cfg.MODEL.KEYPOINTS_ON and boxes.shape[0] > 0:
timers['im_detect_keypoints'].tic()
if cfg.TEST.KPS_AUG.ENABLED:
heatmaps = im_detect_keypoints_aug(model, im, boxes)
else:
heatmaps = im_detect_keypoints(model, im_scale, boxes)
timers['im_detect_keypoints'].toc()
timers['misc_keypoints'].tic()
cls_keyps = keypoint_results(cls_boxes, heatmaps, boxes)
timers['misc_keypoints'].toc()
else:
cls_keyps = None
if cfg.MODEL.BODY_UV_ON and boxes.shape[0] > 0:
timers['im_detect_body_uv'].tic()
cls_bodys = im_detect_body_uv(model, im_scale, boxes)
timers['im_detect_body_uv'].toc()
else:
cls_bodys = None
return cls_boxes, cls_segms, cls_keyps, cls_bodys
def model_builder_add_inference_inputs(model):
"""Create network input blobs used for inference."""
def create_input_blobs_for_net(net_def):
for op in net_def.op:
for blob_in in op.input:
if not workspace.HasBlob(blob_in):
workspace.CreateBlob(blob_in)
create_input_blobs_for_net(model.net.Proto())
if cfg.MODEL.MASK_ON:
create_input_blobs_for_net(model.mask_net.Proto())
if cfg.MODEL.KEYPOINTS_ON:
create_input_blobs_for_net(model.keypoint_net.Proto())
if cfg.MODEL.BODY_UV_ON:
create_input_blobs_for_net(model.body_uv_net.Proto())
def add_single_gpu_param_update_ops(model, gpu_id):
# Learning rate of 0 is a dummy value to be set properly at the
# start of training
lr = model.param_init_net.ConstantFill(
[], 'lr', shape=[1], value=0.0
)
one = model.param_init_net.ConstantFill(
[], 'one', shape=[1], value=1.0
)
wd = model.param_init_net.ConstantFill(
[], 'wd', shape=[1], value=cfg.SOLVER.WEIGHT_DECAY
)
# weight decay of GroupNorm's parameters
wd_gn = model.param_init_net.ConstantFill(
[], 'wd_gn', shape=[1], value=cfg.SOLVER.WEIGHT_DECAY_GN
)
for param in model.TrainableParams(gpu_id=gpu_id):
logger.debug('param ' + str(param) + ' will be updated')
param_grad = model.param_to_grad[param]
# Initialize momentum vector
param_momentum = model.param_init_net.ConstantFill(
[param], param + '_momentum', value=0.0
)
if param in model.biases:
# Special treatment for biases (mainly to match historical impl.
# details):
# (1) Do not apply weight decay
# (2) Use a 2x higher learning rate
model.Scale(param_grad, param_grad, scale=2.0)
elif param in model.gn_params:
# Special treatment for GroupNorm's parameters
model.WeightedSum([param_grad, one, param, wd_gn], param_grad)
elif cfg.SOLVER.WEIGHT_DECAY > 0:
# Apply weight decay to non-bias weights
model.WeightedSum([param_grad, one, param, wd], param_grad)
# Update param_grad and param_momentum in place
model.net.MomentumSGDUpdate(
[param_grad, param_momentum, lr, param],
[param_grad, param_momentum, param],
momentum=cfg.SOLVER.MOMENTUM
)
def _add_allreduce_graph(model):
"""Construct the graph that performs Allreduce on the gradients."""
# Need to all-reduce the per-GPU gradients if training with more than 1 GPU
all_params = model.TrainableParams()
assert len(all_params) % cfg.NUM_GPUS == 0
# The model parameters are replicated on each GPU, get the number
# distinct parameter blobs (i.e., the number of parameter blobs on
# each GPU)
params_per_gpu = int(len(all_params) / cfg.NUM_GPUS)
with c2_utils_CudaScope(0):
# Iterate over distinct parameter blobs
for i in range(params_per_gpu):
# Gradients from all GPUs for this parameter blob
gradients = [
model.param_to_grad[p] for p in all_params[i::params_per_gpu]
]
if len(gradients) > 0:
if cfg.USE_NCCL:
model.net.NCCLAllreduce(gradients, gradients)
else:
muji.Allreduce(model.net, gradients, reduced_affix='')
def _build_forward_graph(model, single_gpu_build_func):
"""Construct the forward graph on each GPU."""
all_loss_gradients = {} # Will include loss gradients from all GPUs
# Build the model on each GPU with correct name and device scoping
for gpu_id in range(cfg.NUM_GPUS):
with c2_utils_NamedCudaScope(gpu_id):
all_loss_gradients.update(single_gpu_build_func(model))
return all_loss_gradients
def optim_build_data_parallel_model(model, single_gpu_build_func):
"""Build a data parallel model given a function that builds the model on a
single GPU.
"""
if model.only_build_forward_pass:
single_gpu_build_func(model)
elif model.train:
all_loss_gradients = _build_forward_graph(model, single_gpu_build_func)
# Add backward pass on all GPUs
model.AddGradientOperators(all_loss_gradients)
if cfg.NUM_GPUS > 1:
_add_allreduce_graph(model)
for gpu_id in range(cfg.NUM_GPUS):
# After allreduce, all GPUs perform SGD updates on their identical
# params and gradients in parallel
with c2_utils_NamedCudaScope(gpu_id):
add_single_gpu_param_update_ops(model, gpu_id)
else:
# Test-time network operates on single GPU
# Test-time parallelism is implemented through multiprocessing
with c2_utils_NamedCudaScope(model.target_gpu_id):
single_gpu_build_func(model)
def body_uv_rcnn_heads_add_body_uv_losses(model, pref=''):
## Reshape for GT blobs.
model.net.Reshape( ['body_uv_X_points'], ['X_points_reshaped'+pref, 'X_points_shape'+pref], shape=( -1 ,1 ) )
model.net.Reshape( ['body_uv_Y_points'], ['Y_points_reshaped'+pref, 'Y_points_shape'+pref], shape=( -1 ,1 ) )
model.net.Reshape( ['body_uv_I_points'], ['I_points_reshaped'+pref, 'I_points_shape'+pref], shape=( -1 ,1 ) )
model.net.Reshape( ['body_uv_Ind_points'], ['Ind_points_reshaped'+pref, 'Ind_points_shape'+pref], shape=( -1 ,1 ) )
## Concat Ind,x,y to get Coordinates blob.
model.net.Concat( ['Ind_points_reshaped'+pref,'X_points_reshaped'+pref, \
'Y_points_reshaped'+pref],['Coordinates'+pref,'Coordinate_Shapes'+pref ], axis = 1 )
##
### Now reshape UV blobs, such that they are 1x1x(196*NumSamples)xNUM_PATCHES
## U blob to
##
model.net.Reshape(['body_uv_U_points'], \
['U_points_reshaped'+pref, 'U_points_old_shape'+pref],\
shape=(-1,cfg.BODY_UV_RCNN.NUM_PATCHES+1,196))
model.net.Transpose(['U_points_reshaped'+pref] ,['U_points_reshaped_transpose'+pref],axes=(0,2,1) )
model.net.Reshape(['U_points_reshaped_transpose'+pref], \
['U_points'+pref, 'U_points_old_shape2'+pref], \
shape=(1,1,-1,cfg.BODY_UV_RCNN.NUM_PATCHES+1))
## V blob
##
model.net.Reshape(['body_uv_V_points'], \
['V_points_reshaped'+pref, 'V_points_old_shape'+pref],\
shape=(-1,cfg.BODY_UV_RCNN.NUM_PATCHES+1,196))
model.net.Transpose(['V_points_reshaped'+pref] ,['V_points_reshaped_transpose'+pref],axes=(0,2,1) )
model.net.Reshape(['V_points_reshaped_transpose'+pref], \
['V_points'+pref, 'V_points_old_shape2'+pref], \
shape=(1,1,-1,cfg.BODY_UV_RCNN.NUM_PATCHES+1))
###
## UV weights blob
##
model.net.Reshape(['body_uv_point_weights'], \
['Uv_point_weights_reshaped'+pref, 'Uv_point_weights_old_shape'+pref],\
shape=(-1,cfg.BODY_UV_RCNN.NUM_PATCHES+1,196))
model.net.Transpose(['Uv_point_weights_reshaped'+pref] ,['Uv_point_weights_reshaped_transpose'+pref],axes=(0,2,1) )
model.net.Reshape(['Uv_point_weights_reshaped_transpose'+pref], \
['Uv_point_weights'+pref, 'Uv_point_weights_old_shape2'+pref], \
shape=(1,1,-1,cfg.BODY_UV_RCNN.NUM_PATCHES+1))
#####################
### Pool IUV for points via bilinear interpolation.
model.PoolPointsInterp(['U_estimated','Coordinates'+pref], ['interp_U'+pref])
model.PoolPointsInterp(['V_estimated','Coordinates'+pref], ['interp_V'+pref])
model.PoolPointsInterp(['Index_UV'+pref,'Coordinates'+pref], ['interp_Index_UV'+pref])
## Reshape interpolated UV coordinates to apply the loss.
model.net.Reshape(['interp_U'+pref], \
['interp_U_reshaped'+pref, 'interp_U_shape'+pref],\
shape=(1, 1, -1 , cfg.BODY_UV_RCNN.NUM_PATCHES+1))
model.net.Reshape(['interp_V'+pref], \
['interp_V_reshaped'+pref, 'interp_V_shape'+pref],\
shape=(1, 1, -1 , cfg.BODY_UV_RCNN.NUM_PATCHES+1))
###
### Do the actual labels here !!!!
model.net.Reshape( ['body_uv_ann_labels'], \
['body_uv_ann_labels_reshaped' +pref, 'body_uv_ann_labels_old_shape'+pref], \
shape=(-1, cfg.BODY_UV_RCNN.HEATMAP_SIZE , cfg.BODY_UV_RCNN.HEATMAP_SIZE))
model.net.Reshape( ['body_uv_ann_weights'], \
['body_uv_ann_weights_reshaped' +pref, 'body_uv_ann_weights_old_shape'+pref], \
shape=( -1 , cfg.BODY_UV_RCNN.HEATMAP_SIZE , cfg.BODY_UV_RCNN.HEATMAP_SIZE))
###
model.net.Cast( ['I_points_reshaped'+pref], ['I_points_reshaped_int'+pref], to=core.DataType.INT32)
### Now add the actual losses
## The mask segmentation loss (dense)
probs_seg_AnnIndex, loss_seg_AnnIndex = model.net.SpatialSoftmaxWithLoss( \
['AnnIndex'+pref, 'body_uv_ann_labels_reshaped'+pref,'body_uv_ann_weights_reshaped'+pref],\
['probs_seg_AnnIndex'+pref,'loss_seg_AnnIndex'+pref], \
scale=cfg.BODY_UV_RCNN.INDEX_WEIGHTS / cfg.NUM_GPUS)
## Point Patch Index Loss.
probs_IndexUVPoints, loss_IndexUVPoints = model.net.SoftmaxWithLoss(\
['interp_Index_UV'+pref,'I_points_reshaped_int'+pref],\
['probs_IndexUVPoints'+pref,'loss_IndexUVPoints'+pref], \
scale=cfg.BODY_UV_RCNN.PART_WEIGHTS / cfg.NUM_GPUS, spatial=0)
## U and V point losses.
loss_Upoints = model.net.SmoothL1Loss( \
['interp_U_reshaped'+pref, 'U_points'+pref, \
'Uv_point_weights'+pref, 'Uv_point_weights'+pref], \
'loss_Upoints'+pref, \
scale=cfg.BODY_UV_RCNN.POINT_REGRESSION_WEIGHTS / cfg.NUM_GPUS)
loss_Vpoints = model.net.SmoothL1Loss( \
['interp_V_reshaped'+pref, 'V_points'+pref, \
'Uv_point_weights'+pref, 'Uv_point_weights'+pref], \
'loss_Vpoints'+pref, scale=cfg.BODY_UV_RCNN.POINT_REGRESSION_WEIGHTS / cfg.NUM_GPUS)
## Add the losses.
loss_gradients = blob_utils_get_loss_gradients(model, \
[ loss_Upoints, loss_Vpoints, loss_seg_AnnIndex, loss_IndexUVPoints])
model.losses = list(set(model.losses + \
['loss_Upoints'+pref , 'loss_Vpoints'+pref , \
'loss_seg_AnnIndex'+pref ,'loss_IndexUVPoints'+pref]))
return loss_gradients
def body_uv_rcnn_heads_add_body_uv_outputs(model, blob_in, dim, pref=''):
####
model.ConvTranspose(blob_in, 'AnnIndex_lowres'+pref, dim, 15,cfg.BODY_UV_RCNN.DECONV_KERNEL, pad=int(cfg.BODY_UV_RCNN.DECONV_KERNEL / 2 - 1), stride=2, weight_init=(cfg.BODY_UV_RCNN.CONV_INIT, {'std': 0.001}), bias_init=('ConstantFill', {'value': 0.}))
####
model.ConvTranspose(blob_in, 'Index_UV_lowres'+pref, dim, cfg.BODY_UV_RCNN.NUM_PATCHES+1,cfg.BODY_UV_RCNN.DECONV_KERNEL, pad=int(cfg.BODY_UV_RCNN.DECONV_KERNEL / 2 - 1), stride=2, weight_init=(cfg.BODY_UV_RCNN.CONV_INIT, {'std': 0.001}), bias_init=('ConstantFill', {'value': 0.}))
####
model.ConvTranspose(
blob_in, 'U_lowres'+pref, dim, (cfg.BODY_UV_RCNN.NUM_PATCHES+1),
cfg.BODY_UV_RCNN.DECONV_KERNEL,
pad=int(cfg.BODY_UV_RCNN.DECONV_KERNEL / 2 - 1),
stride=2,
weight_init=(cfg.BODY_UV_RCNN.CONV_INIT, {'std': 0.001}),
bias_init=('ConstantFill', {'value': 0.}))
#####
model.ConvTranspose(
blob_in, 'V_lowres'+pref, dim, cfg.BODY_UV_RCNN.NUM_PATCHES+1,
cfg.BODY_UV_RCNN.DECONV_KERNEL,
pad=int(cfg.BODY_UV_RCNN.DECONV_KERNEL / 2 - 1),
stride=2,
weight_init=(cfg.BODY_UV_RCNN.CONV_INIT, {'std': 0.001}),
bias_init=('ConstantFill', {'value': 0.}))
####
blob_Ann_Index = model.BilinearInterpolation('AnnIndex_lowres'+pref, 'AnnIndex'+pref, cfg.BODY_UV_RCNN.NUM_PATCHES+1 , cfg.BODY_UV_RCNN.NUM_PATCHES+1, cfg.BODY_UV_RCNN.UP_SCALE)
blob_Index = model.BilinearInterpolation('Index_UV_lowres'+pref, 'Index_UV'+pref, cfg.BODY_UV_RCNN.NUM_PATCHES+1 , cfg.BODY_UV_RCNN.NUM_PATCHES+1, cfg.BODY_UV_RCNN.UP_SCALE)
blob_U = model.BilinearInterpolation('U_lowres'+pref, 'U_estimated'+pref, cfg.BODY_UV_RCNN.NUM_PATCHES+1 , cfg.BODY_UV_RCNN.NUM_PATCHES+1, cfg.BODY_UV_RCNN.UP_SCALE)
blob_V = model.BilinearInterpolation('V_lowres'+pref, 'V_estimated'+pref, cfg.BODY_UV_RCNN.NUM_PATCHES+1 , cfg.BODY_UV_RCNN.NUM_PATCHES+1, cfg.BODY_UV_RCNN.UP_SCALE)
###
return blob_U,blob_V,blob_Index,blob_Ann_Index
def _add_roi_body_uv_head(
model, add_roi_body_uv_head_func, blob_in, dim_in, spatial_scale_in
):
"""Add a body UV prediction head to the model."""
# Capture model graph before adding the mask head
bbox_net = copy.deepcopy(model.net.Proto())
# Add the body UV head
blob_body_uv_head, dim_body_uv_head = add_roi_body_uv_head_func(
model, blob_in, dim_in, spatial_scale_in
)
# Add the body UV output
blobs_body_uv = body_uv_rcnn_heads_add_body_uv_outputs(
model, blob_body_uv_head, dim_body_uv_head
)
if not model.train: # == inference
# Inference uses a cascade of box predictions, then body uv predictions
# This requires separate nets for box and body uv prediction.
# So we extract the keypoint prediction net, store it as its own
# network, then restore model.net to be the bbox-only network
model.body_uv_net, body_uv_blob_out = c2_utils_SuffixNet(
'body_uv_net', model.net, len(bbox_net.op), blobs_body_uv
)
model.net._net = bbox_net
loss_gradients = None
else:
loss_gradients = body_uv_rcnn_heads_add_body_uv_losses(model)
return loss_gradients
def keypoint_rcnn_heads_add_keypoint_losses(model):
"""Add Mask R-CNN keypoint specific losses."""
# Reshape input from (N, K, H, W) to (NK, HW)
model.net.Reshape(
['kps_score'], ['kps_score_reshaped', '_kps_score_old_shape'],
shape=(-1, cfg.KRCNN.HEATMAP_SIZE * cfg.KRCNN.HEATMAP_SIZE)
)
# Softmax across **space** (woahh....space!)
# Note: this is not what is commonly called "spatial softmax"
# (i.e., softmax applied along the channel dimension at each spatial
# location); This is softmax applied over a set of spatial locations (i.e.,
# each spatial location is a "class").
kps_prob, loss_kps = model.net.SoftmaxWithLoss(
['kps_score_reshaped', 'keypoint_locations_int32', 'keypoint_weights'],
['kps_prob', 'loss_kps'],
scale=cfg.KRCNN.LOSS_WEIGHT / cfg.NUM_GPUS,
spatial=0
)
if not cfg.KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS:
# Discussion: the softmax loss above will average the loss by the sum of
# keypoint_weights, i.e. the total number of visible keypoints. Since
# the number of visible keypoints can vary significantly between
# minibatches, this has the effect of up-weighting the importance of
# minibatches with few visible keypoints. (Imagine the extreme case of
# only one visible keypoint versus N: in the case of N, each one
# contributes 1/N to the gradient compared to the single keypoint
# determining the gradient direction). Instead, we can normalize the
# loss by the total number of keypoints, if it were the case that all
# keypoints were visible in a full minibatch. (Returning to the example,
# this means that the one visible keypoint contributes as much as each
# of the N keypoints.)
model.StopGradient(
'keypoint_loss_normalizer', 'keypoint_loss_normalizer'
)
loss_kps = model.net.Mul(
['loss_kps', 'keypoint_loss_normalizer'], 'loss_kps_normalized'
)
loss_gradients = blob_utils_get_loss_gradients(model, [loss_kps])
model.AddLosses(loss_kps)
return loss_gradients
def keypoint_rcnn_heads_add_keypoint_outputs(model, blob_in, dim):
"""Add Mask R-CNN keypoint specific outputs: keypoint heatmaps."""
# NxKxHxW
upsample_heatmap = (cfg.KRCNN.UP_SCALE > 1)
if cfg.KRCNN.USE_DECONV:
# Apply ConvTranspose to the feature representation; results in 2x
# upsampling
blob_in = model.ConvTranspose(
blob_in,
'kps_deconv',
dim,
cfg.KRCNN.DECONV_DIM,
kernel=cfg.KRCNN.DECONV_KERNEL,
pad=int(cfg.KRCNN.DECONV_KERNEL / 2 - 1),
stride=2,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
model.Relu('kps_deconv', 'kps_deconv')
dim = cfg.KRCNN.DECONV_DIM
if upsample_heatmap:
blob_name = 'kps_score_lowres'
else:
blob_name = 'kps_score'
if cfg.KRCNN.USE_DECONV_OUTPUT:
# Use ConvTranspose to predict heatmaps; results in 2x upsampling
blob_out = model.ConvTranspose(
blob_in,
blob_name,
dim,
cfg.KRCNN.NUM_KEYPOINTS,
kernel=cfg.KRCNN.DECONV_KERNEL,
pad=int(cfg.KRCNN.DECONV_KERNEL / 2 - 1),
stride=2,
weight_init=(cfg.KRCNN.CONV_INIT, {'std': 0.001}),
bias_init=const_fill(0.0)
)
else:
# Use Conv to predict heatmaps; does no upsampling
blob_out = model.Conv(
blob_in,
blob_name,
dim,
cfg.KRCNN.NUM_KEYPOINTS,
kernel=1,
pad=0,
stride=1,
weight_init=(cfg.KRCNN.CONV_INIT, {'std': 0.001}),
bias_init=const_fill(0.0)
)
if upsample_heatmap:
# Increase heatmap output size via bilinear upsampling
blob_out = model.BilinearInterpolation(
blob_out, 'kps_score', cfg.KRCNN.NUM_KEYPOINTS,
cfg.KRCNN.NUM_KEYPOINTS, cfg.KRCNN.UP_SCALE
)
return blob_out
def _add_roi_keypoint_head(
model, add_roi_keypoint_head_func, blob_in, dim_in, spatial_scale_in
):
"""Add a keypoint prediction head to the model."""
# Capture model graph before adding the mask head
bbox_net = copy.deepcopy(model.net.Proto())
# Add the keypoint head
blob_keypoint_head, dim_keypoint_head = add_roi_keypoint_head_func(
model, blob_in, dim_in, spatial_scale_in
)
# Add the keypoint output
blob_keypoint = keypoint_rcnn_heads_add_keypoint_outputs(
model, blob_keypoint_head, dim_keypoint_head
)
if not model.train: # == inference
# Inference uses a cascade of box predictions, then keypoint predictions
# This requires separate nets for box and keypoint prediction.
# So we extract the keypoint prediction net, store it as its own
# network, then restore model.net to be the bbox-only network
model.keypoint_net, keypoint_blob_out = c2_utils_SuffixNet(
'keypoint_net', model.net, len(bbox_net.op), blob_keypoint
)
model.net._net = bbox_net
loss_gradients = None
else:
loss_gradients = keypoint_rcnn_heads_add_keypoint_losses(model)
return loss_gradients
def mask_rcnn_heads_add_mask_rcnn_losses(model, blob_mask):
"""Add Mask R-CNN specific losses."""
loss_mask = model.net.SigmoidCrossEntropyLoss(
[blob_mask, 'masks_int32'],
'loss_mask',
scale=model.GetLossScale() * cfg.MRCNN.WEIGHT_LOSS_MASK
)
loss_gradients = blob_utils_get_loss_gradients(model, [loss_mask])
model.AddLosses('loss_mask')
return loss_gradients
def BlobReferenceList(blob_ref_or_list):
"""Ensure that the argument is returned as a list of BlobReferences."""
if isinstance(blob_ref_or_list, core.BlobReference):
return [blob_ref_or_list]
elif type(blob_ref_or_list) in (list, tuple):
for b in blob_ref_or_list:
assert isinstance(b, core.BlobReference)
return blob_ref_or_list
else:
raise TypeError(
'blob_ref_or_list must be a BlobReference or a list/tuple of '
'BlobReferences'
)
def c2_utils_SuffixNet(name, net, prefix_len, outputs):
"""Returns a new Net from the given Net (`net`) that includes only the ops
after removing the first `prefix_len` number of ops. The new Net is thus a
suffix of `net`. Blobs listed in `outputs` are registered as external output
blobs.
"""
outputs = BlobReferenceList(outputs)
for output in outputs:
assert net.BlobIsDefined(output)
new_net = net.Clone(name)
del new_net.Proto().op[:]
del new_net.Proto().external_input[:]
del new_net.Proto().external_output[:]
# Add suffix ops
new_net.Proto().op.extend(net.Proto().op[prefix_len:])
# Add external input blobs
# Treat any undefined blobs as external inputs
input_names = [
i for op in new_net.Proto().op for i in op.input
if not new_net.BlobIsDefined(i)]
new_net.Proto().external_input.extend(input_names)
# Add external output blobs
output_names = [str(o) for o in outputs]
new_net.Proto().external_output.extend(output_names)
return new_net, [new_net.GetBlobRef(o) for o in output_names]
def mask_rcnn_heads_add_mask_rcnn_outputs(model, blob_in, dim):
"""Add Mask R-CNN specific outputs: either mask logits or probs."""
num_cls = cfg.MODEL.NUM_CLASSES if cfg.MRCNN.CLS_SPECIFIC_MASK else 1
if cfg.MRCNN.USE_FC_OUTPUT:
# Predict masks with a fully connected layer (ignore 'fcn' in the blob
# name)
blob_out = model.FC(
blob_in,
'mask_fcn_logits',
dim,
num_cls * cfg.MRCNN.RESOLUTION**2,
weight_init=gauss_fill(0.001),
bias_init=const_fill(0.0)
)
else:
# Predict mask using Conv
# Use GaussianFill for class-agnostic mask prediction; fills based on
# fan-in can be too large in this case and cause divergence
fill = (
cfg.MRCNN.CONV_INIT
if cfg.MRCNN.CLS_SPECIFIC_MASK else 'GaussianFill'
)
blob_out = model.Conv(
blob_in,
'mask_fcn_logits',
dim,
num_cls,
kernel=1,
pad=0,
stride=1,
weight_init=(fill, {'std': 0.001}),
bias_init=const_fill(0.0)
)
if cfg.MRCNN.UPSAMPLE_RATIO > 1:
blob_out = model.BilinearInterpolation(
'mask_fcn_logits', 'mask_fcn_logits_up', num_cls, num_cls,
cfg.MRCNN.UPSAMPLE_RATIO
)
if not model.train: # == if test
blob_out = model.net.Sigmoid(blob_out, 'mask_fcn_probs')
return blob_out
def _add_roi_mask_head(
model, add_roi_mask_head_func, blob_in, dim_in, spatial_scale_in
):
"""Add a mask prediction head to the model."""
# Capture model graph before adding the mask head
bbox_net = copy.deepcopy(model.net.Proto())
# Add the mask head
blob_mask_head, dim_mask_head = add_roi_mask_head_func(
model, blob_in, dim_in, spatial_scale_in
)
# Add the mask output
blob_mask = mask_rcnn_heads_add_mask_rcnn_outputs(
model, blob_mask_head, dim_mask_head
)
if not model.train: # == inference
# Inference uses a cascade of box predictions, then mask predictions.
# This requires separate nets for box and mask prediction.
# So we extract the mask prediction net, store it as its own network,
# then restore model.net to be the bbox-only network
model.mask_net, blob_mask = c2_utils_SuffixNet(
'mask_net', model.net, len(bbox_net.op), blob_mask
)
model.net._net = bbox_net
loss_gradients = None
else:
loss_gradients = mask_rcnn_heads_add_mask_rcnn_losses(model, blob_mask)
return loss_gradients
def fast_rcnn_heads_add_fast_rcnn_losses(model):
"""Add losses for RoI classification and bounding box regression."""
cls_prob, loss_cls = model.net.SoftmaxWithLoss(
['cls_score', 'labels_int32'], ['cls_prob', 'loss_cls'],
scale=model.GetLossScale()
)
loss_bbox = model.net.SmoothL1Loss(
[
'bbox_pred', 'bbox_targets', 'bbox_inside_weights',
'bbox_outside_weights'
],
'loss_bbox',
scale=model.GetLossScale()
)
loss_gradients = blob_utils_get_loss_gradients(model, [loss_cls, loss_bbox])
model.Accuracy(['cls_prob', 'labels_int32'], 'accuracy_cls')
model.AddLosses(['loss_cls', 'loss_bbox'])
model.AddMetrics('accuracy_cls')
return loss_gradients
def fast_rcnn_heads_add_fast_rcnn_outputs(model, blob_in, dim):
"""Add RoI classification and bounding box regression output ops."""
# Box classification layer
model.FC(
blob_in,
'cls_score',
dim,
model.num_classes,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
if not model.train: # == if test
# Only add softmax when testing; during training the softmax is combined
# with the label cross entropy loss for numerical stability
model.Softmax('cls_score', 'cls_prob', engine='CUDNN')
# Box regression layer
num_bbox_reg_classes = (
2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else model.num_classes
)
model.FC(
blob_in,
'bbox_pred',
dim,
num_bbox_reg_classes * 4,
weight_init=gauss_fill(0.001),
bias_init=const_fill(0.0)
)
def _add_fast_rcnn_head(
model, add_roi_box_head_func, blob_in, dim_in, spatial_scale_in
):
"""Add a Fast R-CNN head to the model."""
blob_frcn, dim_frcn = add_roi_box_head_func(
model, blob_in, dim_in, spatial_scale_in
)
fast_rcnn_heads_add_fast_rcnn_outputs(model, blob_frcn, dim_frcn)
if model.train:
loss_gradients = fast_rcnn_heads_add_fast_rcnn_losses(model)
else:
loss_gradients = None
return loss_gradients
def _narrow_to_fpn_roi_levels(blobs, spatial_scales):
"""Return only the blobs and spatial scales that will be used for RoI heads.
Inputs `blobs` and `spatial_scales` may include extra blobs and scales that
are used for RPN proposals, but not for RoI heads.
"""
# Code only supports case when RPN and ROI min levels are the same
assert cfg.FPN.RPN_MIN_LEVEL == cfg.FPN.ROI_MIN_LEVEL
# RPN max level can be >= to ROI max level
assert cfg.FPN.RPN_MAX_LEVEL >= cfg.FPN.ROI_MAX_LEVEL
# FPN RPN max level might be > FPN ROI max level in which case we
# need to discard some leading conv blobs (blobs are ordered from
# max/coarsest level to min/finest level)
num_roi_levels = cfg.FPN.ROI_MAX_LEVEL - cfg.FPN.ROI_MIN_LEVEL + 1
return blobs[-num_roi_levels:], spatial_scales[-num_roi_levels:]
def add_single_scale_rpn_losses(model):
"""Add losses for a single scale RPN model (i.e., no FPN)."""
# Spatially narrow the full-sized RPN label arrays to match the feature map
# shape
model.net.SpatialNarrowAs(
['rpn_labels_int32_wide', 'rpn_cls_logits'], 'rpn_labels_int32'
)
for key in ('targets', 'inside_weights', 'outside_weights'):
model.net.SpatialNarrowAs(
['rpn_bbox_' + key + '_wide', 'rpn_bbox_pred'], 'rpn_bbox_' + key
)
loss_rpn_cls = model.net.SigmoidCrossEntropyLoss(
['rpn_cls_logits', 'rpn_labels_int32'],
'loss_rpn_cls',
scale=model.GetLossScale()
)
loss_rpn_bbox = model.net.SmoothL1Loss(
[
'rpn_bbox_pred', 'rpn_bbox_targets', 'rpn_bbox_inside_weights',
'rpn_bbox_outside_weights'
],
'loss_rpn_bbox',
beta=1. / 9.,
scale=model.GetLossScale()
)
loss_gradients = blob_utils_get_loss_gradients(
model, [loss_rpn_cls, loss_rpn_bbox]
)
model.AddLosses(['loss_rpn_cls', 'loss_rpn_bbox'])
return loss_gradients
def add_single_scale_rpn_outputs(model, blob_in, dim_in, spatial_scale):
"""Add RPN outputs to a single scale model (i.e., no FPN)."""
anchors = generate_anchors(
stride=1. / spatial_scale,
sizes=cfg.RPN.SIZES,
aspect_ratios=cfg.RPN.ASPECT_RATIOS
)
num_anchors = anchors.shape[0]
dim_out = dim_in
# RPN hidden representation
model.Conv(
blob_in,
'conv_rpn',
dim_in,
dim_out,
kernel=3,
pad=1,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
model.Relu('conv_rpn', 'conv_rpn')
# Proposal classification scores
model.Conv(
'conv_rpn',
'rpn_cls_logits',
dim_in,
num_anchors,
kernel=1,
pad=0,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
# Proposal bbox regression deltas
model.Conv(
'conv_rpn',
'rpn_bbox_pred',
dim_in,
4 * num_anchors,
kernel=1,
pad=0,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
if not model.train or cfg.MODEL.FASTER_RCNN:
# Proposals are needed during:
# 1) inference (== not model.train) for RPN only and Faster R-CNN
# OR
# 2) training for Faster R-CNN
# Otherwise (== training for RPN only), proposals are not needed
model.net.Sigmoid('rpn_cls_logits', 'rpn_cls_probs')
model.GenerateProposals(
['rpn_cls_probs', 'rpn_bbox_pred', 'im_info'],
['rpn_rois', 'rpn_roi_probs'],
anchors=anchors,
spatial_scale=spatial_scale
)
if cfg.MODEL.FASTER_RCNN:
if model.train:
# Add op that generates training labels for in-network RPN proposals
model.GenerateProposalLabels(['rpn_rois', 'roidb', 'im_info'])
else:
# Alias rois to rpn_rois for inference
model.net.Alias('rpn_rois', 'rois')
def blob_utils_get_loss_gradients(model, loss_blobs):
"""Generate a gradient of 1 for each loss specified in 'loss_blobs'"""
loss_gradients = {}
for b in loss_blobs:
loss_grad = model.net.ConstantFill(b, [b + '_grad'], value=1.0)
loss_gradients[str(b)] = str(loss_grad)
return loss_gradients
def FPN_add_fpn_rpn_losses(model):
"""Add RPN on FPN specific losses."""
loss_gradients = {}
for lvl in range(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.RPN_MAX_LEVEL + 1):
slvl = str(lvl)
# Spatially narrow the full-sized RPN label arrays to match the feature map
# shape
model.net.SpatialNarrowAs(
['rpn_labels_int32_wide_fpn' + slvl, 'rpn_cls_logits_fpn' + slvl],
'rpn_labels_int32_fpn' + slvl
)
for key in ('targets', 'inside_weights', 'outside_weights'):
model.net.SpatialNarrowAs(
[
'rpn_bbox_' + key + '_wide_fpn' + slvl,
'rpn_bbox_pred_fpn' + slvl
],
'rpn_bbox_' + key + '_fpn' + slvl
)
loss_rpn_cls_fpn = model.net.SigmoidCrossEntropyLoss(
['rpn_cls_logits_fpn' + slvl, 'rpn_labels_int32_fpn' + slvl],
'loss_rpn_cls_fpn' + slvl,
normalize=0,
scale=(
model.GetLossScale() / cfg.TRAIN.RPN_BATCH_SIZE_PER_IM /
cfg.TRAIN.IMS_PER_BATCH
)
)
# Normalization by (1) RPN_BATCH_SIZE_PER_IM and (2) IMS_PER_BATCH is
# handled by (1) setting bbox outside weights and (2) SmoothL1Loss
# normalizes by IMS_PER_BATCH
loss_rpn_bbox_fpn = model.net.SmoothL1Loss(
[
'rpn_bbox_pred_fpn' + slvl, 'rpn_bbox_targets_fpn' + slvl,
'rpn_bbox_inside_weights_fpn' + slvl,
'rpn_bbox_outside_weights_fpn' + slvl
],
'loss_rpn_bbox_fpn' + slvl,
beta=1. / 9.,
scale=model.GetLossScale(),
)
loss_gradients.update(
blob_utils_get_loss_gradients(model, [loss_rpn_cls_fpn, loss_rpn_bbox_fpn])
)
model.AddLosses(['loss_rpn_cls_fpn' + slvl, 'loss_rpn_bbox_fpn' + slvl])
return loss_gradients
def const_fill(value):
"""Constant fill helper to reduce verbosity."""
return ('ConstantFill', {'value': value})
def gauss_fill(std):
"""Gaussian fill helper to reduce verbosity."""
return ('GaussianFill', {'std': std})
def FPN_add_fpn_rpn_outputs(model, blobs_in, dim_in, spatial_scales):
"""Add RPN on FPN specific outputs."""
num_anchors = len(cfg.FPN.RPN_ASPECT_RATIOS)
dim_out = dim_in
k_max = cfg.FPN.RPN_MAX_LEVEL # coarsest level of pyramid
k_min = cfg.FPN.RPN_MIN_LEVEL # finest level of pyramid
assert len(blobs_in) == k_max - k_min + 1
for lvl in range(k_min, k_max + 1):
bl_in = blobs_in[k_max - lvl] # blobs_in is in reversed order
sc = spatial_scales[k_max - lvl] # in reversed order
slvl = str(lvl)
if lvl == k_min:
# Create conv ops with randomly initialized weights and
# zeroed biases for the first FPN level; these will be shared by
# all other FPN levels
# RPN hidden representation
conv_rpn_fpn = model.Conv(
bl_in,
'conv_rpn_fpn' + slvl,
dim_in,
dim_out,
kernel=3,
pad=1,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
model.Relu(conv_rpn_fpn, conv_rpn_fpn)
# Proposal classification scores
rpn_cls_logits_fpn = model.Conv(
conv_rpn_fpn,
'rpn_cls_logits_fpn' + slvl,
dim_in,
num_anchors,
kernel=1,
pad=0,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
# Proposal bbox regression deltas
rpn_bbox_pred_fpn = model.Conv(
conv_rpn_fpn,
'rpn_bbox_pred_fpn' + slvl,
dim_in,
4 * num_anchors,
kernel=1,
pad=0,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
else:
# Share weights and biases
sk_min = str(k_min)
# RPN hidden representation
conv_rpn_fpn = model.ConvShared(
bl_in,
'conv_rpn_fpn' + slvl,
dim_in,
dim_out,
kernel=3,
pad=1,
stride=1,
weight='conv_rpn_fpn' + sk_min + '_w',
bias='conv_rpn_fpn' + sk_min + '_b'
)
model.Relu(conv_rpn_fpn, conv_rpn_fpn)
# Proposal classification scores
rpn_cls_logits_fpn = model.ConvShared(
conv_rpn_fpn,
'rpn_cls_logits_fpn' + slvl,
dim_in,
num_anchors,
kernel=1,
pad=0,
stride=1,
weight='rpn_cls_logits_fpn' + sk_min + '_w',
bias='rpn_cls_logits_fpn' + sk_min + '_b'
)
# Proposal bbox regression deltas
rpn_bbox_pred_fpn = model.ConvShared(
conv_rpn_fpn,
'rpn_bbox_pred_fpn' + slvl,
dim_in,
4 * num_anchors,
kernel=1,
pad=0,
stride=1,
weight='rpn_bbox_pred_fpn' + sk_min + '_w',
bias='rpn_bbox_pred_fpn' + sk_min + '_b'
)
if not model.train or cfg.MODEL.FASTER_RCNN:
# Proposals are needed during:
# 1) inference (== not model.train) for RPN only and Faster R-CNN
# OR
# 2) training for Faster R-CNN
# Otherwise (== training for RPN only), proposals are not needed
lvl_anchors = generate_anchors(
stride=2.**lvl,
sizes=(cfg.FPN.RPN_ANCHOR_START_SIZE * 2.**(lvl - k_min), ),
aspect_ratios=cfg.FPN.RPN_ASPECT_RATIOS
)
rpn_cls_probs_fpn = model.net.Sigmoid(
rpn_cls_logits_fpn, 'rpn_cls_probs_fpn' + slvl
)
model.GenerateProposals(
[rpn_cls_probs_fpn, rpn_bbox_pred_fpn, 'im_info'],
['rpn_rois_fpn' + slvl, 'rpn_roi_probs_fpn' + slvl],
anchors=lvl_anchors,
spatial_scale=sc
)
def rpn_heads_add_generic_rpn_outputs(model, blob_in, dim_in, spatial_scale_in):
"""Add RPN outputs (objectness classification and bounding box regression)
to an RPN model. Abstracts away the use of FPN.
"""
loss_gradients = None
if cfg.FPN.FPN_ON:
# Delegate to the FPN module
FPN_add_fpn_rpn_outputs(model, blob_in, dim_in, spatial_scale_in)
if cfg.MODEL.FASTER_RCNN:
# CollectAndDistributeFpnRpnProposals also labels proposals when in
# training mode
model.CollectAndDistributeFpnRpnProposals()
if model.train:
loss_gradients = FPN_add_fpn_rpn_losses(model)
else:
# Not using FPN, add RPN to a single scale
add_single_scale_rpn_outputs(model, blob_in, dim_in, spatial_scale_in)
if model.train:
loss_gradients = add_single_scale_rpn_losses(model)
return loss_gradients
def c2_utils_BlobReferenceList(blob_ref_or_list):
"""Ensure that the argument is returned as a list of BlobReferences."""
if isinstance(blob_ref_or_list, core.BlobReference):
return [blob_ref_or_list]
elif type(blob_ref_or_list) in (list, tuple):
for b in blob_ref_or_list:
assert isinstance(b, core.BlobReference)
return blob_ref_or_list
else:
raise TypeError(
'blob_ref_or_list must be a BlobReference or a list/tuple of '
'BlobReferences'
)
def build_generic_detection_model(
model,
add_conv_body_func,
add_roi_box_head_func=None,
add_roi_mask_head_func=None,
add_roi_keypoint_head_func=None,
add_roi_body_uv_head_func=None,
freeze_conv_body=False
):
def _single_gpu_build_func(model):
"""Build the model on a single GPU. Can be called in a loop over GPUs
with name and device scoping to create a data parallel model.
"""
# Add the conv body (called "backbone architecture" in papers)
# E.g., ResNet-50, ResNet-50-FPN, ResNeXt-101-FPN, etc.
blob_conv, dim_conv, spatial_scale_conv = add_conv_body_func(model)
if freeze_conv_body:
for b in c2_utils_BlobReferenceList(blob_conv):
model.StopGradient(b, b)
if not model.train: # == inference
# Create a net that can be used to execute the conv body on an image
# (without also executing RPN or any other network heads)
model.conv_body_net = model.net.Clone('conv_body_net')
head_loss_gradients = {
'rpn': None,
'box': None,
'mask': None,
'keypoints': None,
'body_uv' : None,
}
if cfg.RPN.RPN_ON:
# Add the RPN head
head_loss_gradients['rpn'] = rpn_heads_add_generic_rpn_outputs(
model, blob_conv, dim_conv, spatial_scale_conv
)
if cfg.FPN.FPN_ON:
# After adding the RPN head, restrict FPN blobs and scales to
# those used in the RoI heads
blob_conv, spatial_scale_conv = _narrow_to_fpn_roi_levels(
blob_conv, spatial_scale_conv
)
if not cfg.MODEL.RPN_ONLY:
# Add the Fast R-CNN head
head_loss_gradients['box'] = _add_fast_rcnn_head(
model, add_roi_box_head_func, blob_conv, dim_conv,
spatial_scale_conv
)
if cfg.MODEL.MASK_ON:
# Add the mask head
head_loss_gradients['mask'] = _add_roi_mask_head(
model, add_roi_mask_head_func, blob_conv, dim_conv,
spatial_scale_conv
)
if cfg.MODEL.KEYPOINTS_ON:
# Add the keypoint head
head_loss_gradients['keypoint'] = _add_roi_keypoint_head(
model, add_roi_keypoint_head_func, blob_conv, dim_conv,
spatial_scale_conv
)
if cfg.MODEL.BODY_UV_ON:
# Add the body UV head
head_loss_gradients['body_uv'] = _add_roi_body_uv_head(
model, add_roi_body_uv_head_func, blob_conv, dim_conv,
spatial_scale_conv
)
if model.train:
loss_gradients = {}
for lg in head_loss_gradients.values():
if lg is not None:
loss_gradients.update(lg)
return loss_gradients
else:
return None
optim_build_data_parallel_model(model, _single_gpu_build_func)
return model
def get_group_gn(dim):
"""
get number of groups used by GroupNorm, based on number of channels
"""
dim_per_gp = cfg.GROUP_NORM.DIM_PER_GP
num_groups = cfg.GROUP_NORM.NUM_GROUPS
assert dim_per_gp == -1 or num_groups == -1, \
"GroupNorm: can only specify G or C/G."
if dim_per_gp > 0:
assert dim % dim_per_gp == 0
group_gn = dim // dim_per_gp
else:
assert dim % num_groups == 0
group_gn = num_groups
return group_gn
def add_topdown_lateral_module(
model, fpn_top, fpn_lateral, fpn_bottom, dim_top, dim_lateral
):
"""Add a top-down lateral module."""
# Lateral 1x1 conv
if cfg.FPN.USE_GN:
# use GroupNorm
lat = model.ConvGN(
fpn_lateral,
fpn_bottom + '_lateral',
dim_in=dim_lateral,
dim_out=dim_top,
group_gn=get_group_gn(dim_top),
kernel=1,
pad=0,
stride=1,
weight_init=(
const_fill(0.0) if cfg.FPN.ZERO_INIT_LATERAL
else ('XavierFill', {})),
bias_init=const_fill(0.0)
)
else:
lat = model.Conv(
fpn_lateral,
fpn_bottom + '_lateral',
dim_in=dim_lateral,
dim_out=dim_top,
kernel=1,
pad=0,
stride=1,
weight_init=(
const_fill(0.0)
if cfg.FPN.ZERO_INIT_LATERAL else ('XavierFill', {})
),
bias_init=const_fill(0.0)
)
# Top-down 2x upsampling
td = model.net.UpsampleNearest(fpn_top, fpn_bottom + '_topdown', scale=2)
# Sum lateral and top-down
model.net.Sum([lat, td], fpn_bottom)
def get_min_max_levels():
"""The min and max FPN levels required for supporting RPN and/or RoI
transform operations on multiple FPN levels.
"""
min_level = LOWEST_BACKBONE_LVL
max_level = HIGHEST_BACKBONE_LVL
if cfg.FPN.MULTILEVEL_RPN and not cfg.FPN.MULTILEVEL_ROIS:
max_level = cfg.FPN.RPN_MAX_LEVEL
min_level = cfg.FPN.RPN_MIN_LEVEL
if not cfg.FPN.MULTILEVEL_RPN and cfg.FPN.MULTILEVEL_ROIS:
max_level = cfg.FPN.ROI_MAX_LEVEL
min_level = cfg.FPN.ROI_MIN_LEVEL
if cfg.FPN.MULTILEVEL_RPN and cfg.FPN.MULTILEVEL_ROIS:
max_level = max(cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.ROI_MAX_LEVEL)
min_level = min(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.ROI_MIN_LEVEL)
return min_level, max_level
def add_fpn(model, fpn_level_info):
"""Add FPN connections based on the model described in the FPN paper."""
# FPN levels are built starting from the highest/coarest level of the
# backbone (usually "conv5"). First we build down, recursively constructing
# lower/finer resolution FPN levels. Then we build up, constructing levels
# that are even higher/coarser than the starting level.
fpn_dim = cfg.FPN.DIM
min_level, max_level = get_min_max_levels()
# Count the number of backbone stages that we will generate FPN levels for
# starting from the coarest backbone stage (usually the "conv5"-like level)
# E.g., if the backbone level info defines stages 4 stages: "conv5",
# "conv4", ... "conv2" and min_level=2, then we end up with 4 - (2 - 2) = 4
# backbone stages to add FPN to.
num_backbone_stages = (
len(fpn_level_info.blobs) - (min_level - LOWEST_BACKBONE_LVL)
)
lateral_input_blobs = fpn_level_info.blobs[:num_backbone_stages]
output_blobs = [
'fpn_inner_{}'.format(s)
for s in fpn_level_info.blobs[:num_backbone_stages]
]
fpn_dim_lateral = fpn_level_info.dims
xavier_fill = ('XavierFill', {})
# For the coarsest backbone level: 1x1 conv only seeds recursion
if cfg.FPN.USE_GN:
# use GroupNorm
c = model.ConvGN(
lateral_input_blobs[0],
output_blobs[0], # note: this is a prefix
dim_in=fpn_dim_lateral[0],
dim_out=fpn_dim,
group_gn=get_group_gn(fpn_dim),
kernel=1,
pad=0,
stride=1,
weight_init=xavier_fill,
bias_init=const_fill(0.0)
)
output_blobs[0] = c # rename it
else:
model.Conv(
lateral_input_blobs[0],
output_blobs[0],
dim_in=fpn_dim_lateral[0],
dim_out=fpn_dim,
kernel=1,
pad=0,
stride=1,
weight_init=xavier_fill,
bias_init=const_fill(0.0)
)
#
# Step 1: recursively build down starting from the coarsest backbone level
#
# For other levels add top-down and lateral connections
for i in range(num_backbone_stages - 1):
add_topdown_lateral_module(
model,
output_blobs[i], # top-down blob
lateral_input_blobs[i + 1], # lateral blob
output_blobs[i + 1], # next output blob
fpn_dim, # output dimension
fpn_dim_lateral[i + 1] # lateral input dimension
)
# Post-hoc scale-specific 3x3 convs
blobs_fpn = []
spatial_scales = []
for i in range(num_backbone_stages):
if cfg.FPN.USE_GN:
# use GroupNorm
fpn_blob = model.ConvGN(
output_blobs[i],
'fpn_{}'.format(fpn_level_info.blobs[i]),
dim_in=fpn_dim,
dim_out=fpn_dim,
group_gn=get_group_gn(fpn_dim),
kernel=3,
pad=1,
stride=1,
weight_init=xavier_fill,
bias_init=const_fill(0.0)
)
else:
fpn_blob = model.Conv(
output_blobs[i],
'fpn_{}'.format(fpn_level_info.blobs[i]),
dim_in=fpn_dim,
dim_out=fpn_dim,
kernel=3,
pad=1,
stride=1,
weight_init=xavier_fill,
bias_init=const_fill(0.0)
)
blobs_fpn += [fpn_blob]
spatial_scales += [fpn_level_info.spatial_scales[i]]
#
# Step 2: build up starting from the coarsest backbone level
#
# Check if we need the P6 feature map
if not cfg.FPN.EXTRA_CONV_LEVELS and max_level == HIGHEST_BACKBONE_LVL + 1:
# Original FPN P6 level implementation from our CVPR'17 FPN paper
P6_blob_in = blobs_fpn[0]
P6_name = P6_blob_in + '_subsampled_2x'
# Use max pooling to simulate stride 2 subsampling
P6_blob = model.MaxPool(P6_blob_in, P6_name, kernel=1, pad=0, stride=2)
blobs_fpn.insert(0, P6_blob)
spatial_scales.insert(0, spatial_scales[0] * 0.5)
# Coarser FPN levels introduced for RetinaNet
if cfg.FPN.EXTRA_CONV_LEVELS and max_level > HIGHEST_BACKBONE_LVL:
fpn_blob = fpn_level_info.blobs[0]
dim_in = fpn_level_info.dims[0]
for i in range(HIGHEST_BACKBONE_LVL + 1, max_level + 1):
fpn_blob_in = fpn_blob
if i > HIGHEST_BACKBONE_LVL + 1:
fpn_blob_in = model.Relu(fpn_blob, fpn_blob + '_relu')
fpn_blob = model.Conv(
fpn_blob_in,
'fpn_' + str(i),
dim_in=dim_in,
dim_out=fpn_dim,
kernel=3,
pad=1,
stride=2,
weight_init=xavier_fill,
bias_init=const_fill(0.0)
)
dim_in = fpn_dim
blobs_fpn.insert(0, fpn_blob)
spatial_scales.insert(0, spatial_scales[0] * 0.5)
return blobs_fpn, fpn_dim, spatial_scales
def add_fpn_onto_conv_body(
model, conv_body_func, fpn_level_info_func, P2only=False
):
"""Add the specified conv body to the model and then add FPN levels to it.
"""
# Note: blobs_conv is in revsersed order: [fpn5, fpn4, fpn3, fpn2]
# similarly for dims_conv: [2048, 1024, 512, 256]
# similarly for spatial_scales_fpn: [1/32, 1/16, 1/8, 1/4]
conv_body_func(model)
blobs_fpn, dim_fpn, spatial_scales_fpn = add_fpn(
model, fpn_level_info_func()
)
if P2only:
# use only the finest level
return blobs_fpn[-1], dim_fpn, spatial_scales_fpn[-1]
else:
# use all levels
return blobs_fpn, dim_fpn, spatial_scales_fpn
def fpn_level_info_ResNet101_conv5():
return FpnLevelInfo(
blobs=('res5_2_sum', 'res4_22_sum', 'res3_3_sum', 'res2_2_sum'),
dims=(2048, 1024, 512, 256),
spatial_scales=(1. / 32., 1. / 16., 1. / 8., 1. / 4.)
)
def add_residual_block(
model,
prefix,
blob_in,
dim_in,
dim_out,
dim_inner,
dilation,
stride_init=2,
inplace_sum=False
):
"""Add a residual block to the model."""
# prefix = res<stage>_<sub_stage>, e.g., res2_3
# Max pooling is performed prior to the first stage (which is uniquely
# distinguished by dim_in = 64), thus we keep stride = 1 for the first stage
stride = stride_init if (
dim_in != dim_out and dim_in != 64 and dilation == 1
) else 1
# transformation blob
tr = globals()[cfg.RESNETS.TRANS_FUNC](
model,
blob_in,
dim_in,
dim_out,
stride,
prefix,
dim_inner,
group=cfg.RESNETS.NUM_GROUPS,
dilation=dilation
)
# sum -> ReLU
# shortcut function: by default using bn; support gn
add_shortcut = globals()[cfg.RESNETS.SHORTCUT_FUNC]
sc = add_shortcut(model, prefix, blob_in, dim_in, dim_out, stride)
if inplace_sum:
s = model.net.Sum([tr, sc], tr)
else:
s = model.net.Sum([tr, sc], prefix + '_sum')
return model.Relu(s, s)
def add_stage(
model,
prefix,
blob_in,
n,
dim_in,
dim_out,
dim_inner,
dilation,
stride_init=2
):
"""Add a ResNet stage to the model by stacking n residual blocks."""
# e.g., prefix = res2
for i in range(n):
blob_in = add_residual_block(
model,
'{}_{}'.format(prefix, i),
blob_in,
dim_in,
dim_out,
dim_inner,
dilation,
stride_init,
# Not using inplace for the last block;
# it may be fetched externally or used by FPN
inplace_sum=i < n - 1
)
dim_in = dim_out
return blob_in, dim_in
def add_ResNet_convX_body(model, block_counts, freeze_at=2):
"""Add a ResNet body from input data up through the res5 (aka conv5) stage.
The final res5/conv5 stage may be optionally excluded (hence convX, where
X = 4 or 5)."""
assert freeze_at in [0, 2, 3, 4, 5]
# add the stem (by default, conv1 and pool1 with bn; can support gn)
p, dim_in = globals()[cfg.RESNETS.STEM_FUNC](model, 'data')
dim_bottleneck = cfg.RESNETS.NUM_GROUPS * cfg.RESNETS.WIDTH_PER_GROUP
(n1, n2, n3) = block_counts[:3]
s, dim_in = add_stage(model, 'res2', p, n1, dim_in, 256, dim_bottleneck, 1)
if freeze_at == 2:
model.StopGradient(s, s)
s, dim_in = add_stage(
model, 'res3', s, n2, dim_in, 512, dim_bottleneck * 2, 1
)
if freeze_at == 3:
model.StopGradient(s, s)
s, dim_in = add_stage(
model, 'res4', s, n3, dim_in, 1024, dim_bottleneck * 4, 1
)
if freeze_at == 4:
model.StopGradient(s, s)
if len(block_counts) == 4:
n4 = block_counts[3]
s, dim_in = add_stage(
model, 'res5', s, n4, dim_in, 2048, dim_bottleneck * 8,
cfg.RESNETS.RES5_DILATION
)
if freeze_at == 5:
model.StopGradient(s, s)
return s, dim_in, 1. / 32. * cfg.RESNETS.RES5_DILATION
else:
return s, dim_in, 1. / 16.
def ResNet_add_ResNet101_conv5_body(model):
return add_ResNet_convX_body(model, (3, 4, 23, 3))
def FPN_add_fpn_ResNet101_conv5_body(model):
return add_fpn_onto_conv_body(
model, ResNet_add_ResNet101_conv5_body, fpn_level_info_ResNet101_conv5
)
def bottleneck_transformation(
model,
blob_in,
dim_in,
dim_out,
stride,
prefix,
dim_inner,
dilation=1,
group=1
):
"""Add a bottleneck transformation to the model."""
# In original resnet, stride=2 is on 1x1.
# In fb.torch resnet, stride=2 is on 3x3.
(str1x1, str3x3) = (stride, 1) if cfg.RESNETS.STRIDE_1X1 else (1, stride)
# conv 1x1 -> BN -> ReLU
cur = model.ConvAffine(
blob_in,
prefix + '_branch2a',
dim_in,
dim_inner,
kernel=1,
stride=str1x1,
pad=0,
inplace=True
)
cur = model.Relu(cur, cur)
# conv 3x3 -> BN -> ReLU
cur = model.ConvAffine(
cur,
prefix + '_branch2b',
dim_inner,
dim_inner,
kernel=3,
stride=str3x3,
pad=1 * dilation,
dilation=dilation,
group=group,
inplace=True
)
cur = model.Relu(cur, cur)
# conv 1x1 -> BN (no ReLU)
# NB: for now this AffineChannel op cannot be in-place due to a bug in C2
# gradient computation for graphs like this
cur = model.ConvAffine(
cur,
prefix + '_branch2c',
dim_inner,
dim_out,
kernel=1,
stride=1,
pad=0,
inplace=False
)
return cur
def basic_bn_shortcut(model, prefix, blob_in, dim_in, dim_out, stride):
""" For a pre-trained network that used BN. An AffineChannel op replaces BN
during fine-tuning.
"""
if dim_in == dim_out:
return blob_in
c = model.Conv(
blob_in,
prefix + '_branch1',
dim_in,
dim_out,
kernel=1,
stride=stride,
no_bias=1
)
return model.AffineChannel(c, prefix + '_branch1_bn', dim=dim_out)
def basic_bn_stem(model, data, **kwargs):
"""Add a basic ResNet stem. For a pre-trained network that used BN.
An AffineChannel op replaces BN during fine-tuning.
"""
dim = 64
p = model.Conv(data, 'conv1', 3, dim, 7, pad=3, stride=2, no_bias=1)
p = model.AffineChannel(p, 'res_conv1_bn', dim=dim, inplace=True)
p = model.Relu(p, p)
p = model.MaxPool(p, 'pool1', kernel=3, pad=1, stride=2)
return p, dim
def get_func(func_name):
"""Helper to return a function object by name. func_name must identify a
function in this module or the path to a function relative to the base
'modeling' module.
"""
if func_name == '':
return None
try:
parts = func_name.split('.')
# Refers to a function in this module
if len(parts) == 1:
return globals()[parts[0]]
# Otherwise, assume we're referencing a module under modeling
module_name = 'detectron.modeling.' + '.'.join(parts[:-1])
module = importlib.import_module(module_name)
return getattr(module, parts[-1])
except Exception:
logger.error('Failed to find function: {}'.format(func_name))
raise
def body_uv_rcnn_heads_add_roi_body_uv_head_v1convX(model, blob_in, dim_in, spatial_scale):
"""v1convX design: X * (conv)."""
hidden_dim = cfg.BODY_UV_RCNN.CONV_HEAD_DIM
kernel_size = cfg.BODY_UV_RCNN.CONV_HEAD_KERNEL
pad_size = kernel_size // 2
current = model.RoIFeatureTransform(
blob_in,
'_[body_uv]_roi_feat',
blob_rois='body_uv_rois',
method=cfg.BODY_UV_RCNN.ROI_XFORM_METHOD,
resolution=cfg.BODY_UV_RCNN.ROI_XFORM_RESOLUTION,
sampling_ratio=cfg.BODY_UV_RCNN.ROI_XFORM_SAMPLING_RATIO,
spatial_scale=spatial_scale
)
for i in range(cfg.BODY_UV_RCNN.NUM_STACKED_CONVS):
current = model.Conv(
current,
'body_conv_fcn' + str(i + 1),
dim_in,
hidden_dim,
kernel_size,
stride=1,
pad=pad_size,
weight_init=(cfg.BODY_UV_RCNN.CONV_INIT, {'std': 0.01}),
bias_init=('ConstantFill', {'value': 0.})
)
current = model.Relu(current, current)
dim_in = hidden_dim
return current, hidden_dim
def generalized_rcnn(model):
"""This model type handles:
- Fast R-CNN
- RPN only (not integrated with Fast R-CNN)
- Faster R-CNN (stagewise training from NIPS paper)
- Faster R-CNN (end-to-end joint training)
- Mask R-CNN (stagewise training from NIPS paper)
- Mask R-CNN (end-to-end joint training)
"""
return build_generic_detection_model(
model,
eval(str(cfg.MODEL.CONV_BODY).replace(".","_")),
add_roi_box_head_func=[None if cfg.FAST_RCNN.ROI_BOX_HEAD == "" else eval(str(cfg.FAST_RCNN.ROI_BOX_HEAD).replace(".","_"))][0],
add_roi_mask_head_func=[None if cfg.MRCNN.ROI_MASK_HEAD == "" else eval(str(cfg.MRCNN.ROI_MASK_HEAD).replace(".","_"))][0],
add_roi_keypoint_head_func=[None if cfg.KRCNN.ROI_KEYPOINTS_HEAD == "" else eval(str(cfg.KRCNN.ROI_KEYPOINTS_HEAD).replace(".","_"))][0],
add_roi_body_uv_head_func=[None if cfg.BODY_UV_RCNN.ROI_HEAD == "" else eval(str(cfg.BODY_UV_RCNN.ROI_HEAD).replace(".","_"))][0],
freeze_conv_body=cfg.TRAIN.FREEZE_CONV_BODY
)
def fast_rcnn_heads_add_roi_2mlp_head(model, blob_in, dim_in, spatial_scale):
"""Add a ReLU MLP with two hidden layers."""
hidden_dim = cfg.FAST_RCNN.MLP_HEAD_DIM
roi_size = cfg.FAST_RCNN.ROI_XFORM_RESOLUTION
roi_feat = model.RoIFeatureTransform(
blob_in,
'roi_feat',
blob_rois='rois',
method=cfg.FAST_RCNN.ROI_XFORM_METHOD,
resolution=roi_size,
sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO,
spatial_scale=spatial_scale
)
model.FC(roi_feat, 'fc6', dim_in * roi_size * roi_size, hidden_dim)
model.Relu('fc6', 'fc6')
model.FC('fc6', 'fc7', hidden_dim, hidden_dim)
model.Relu('fc7', 'fc7')
return 'fc7', hidden_dim
def model_builder_create(model_type_func, train=False, gpu_id=0):
"""Generic model creation function that dispatches to specific model
building functions.
By default, this function will generate a data parallel model configured to
run on cfg.NUM_GPUS devices. However, you can restrict it to build a model
targeted to a specific GPU by specifying gpu_id. This is used by
optimizer.build_data_parallel_model() during test time.
"""
model = DetectionModelHelper(
name=model_type_func,
train=train,
num_classes=cfg.MODEL.NUM_CLASSES,
init_params=train
)
model.only_build_forward_pass = False
model.target_gpu_id = gpu_id
return eval(str(model_type_func).replace(".","_"))(model)
def configure_bbox_reg_weights(model, saved_cfg):
"""Compatibility for old models trained with bounding box regression
mean/std normalization (instead of fixed weights).
"""
if 'MODEL' not in saved_cfg or 'BBOX_REG_WEIGHTS' not in saved_cfg.MODEL:
logger.warning('Model from weights file was trained before config key '
'MODEL.BBOX_REG_WEIGHTS was added. Forcing '
'MODEL.BBOX_REG_WEIGHTS = (1., 1., 1., 1.) to ensure '
'correct **inference** behavior.')
# Generally we don't allow modifying the config, but this is a one-off
# hack to support some very old models
is_immutable = cfg.is_immutable()
cfg.immutable(False)
cfg.MODEL.BBOX_REG_WEIGHTS = (1., 1., 1., 1.)
cfg.immutable(is_immutable)
#logger.info('New config:')
#logger.info(pprint.pformat(cfg))
assert not model.train, (
'This model was trained with an older version of the code that '
'used bounding box regression mean/std normalization. It can no '
'longer be used for training. To upgrade it to a trainable model '
'please use fb/compat/convert_bbox_reg_normalized_model.py.'
)
def load_object(file_name):
with open(file_name, 'rb') as f:
# The default encoding used while unpickling is 7-bit (ASCII.) However,
# the blobs are arbitrary 8-bit bytes which don't agree. The absolute
# correct way to do this is to use `encoding="bytes"` and then interpret
# the blob names either as ASCII, or better, as unicode utf-8. A
# reasonable fix, however, is to treat it the encoding as 8-bit latin1
# (which agrees with the first 256 characters of Unicode anyway.)
if six.PY2:
return pickle.load(f)
else:
return pickle.load(f, encoding='latin1')
def net_utils_initialize_gpu_from_weights_file(model, weights_file, gpu_id=0):
"""Initialize a network with ops on a specific GPU.
If you use CUDA_VISIBLE_DEVICES to target specific GPUs, Caffe2 will
automatically map logical GPU ids (starting from 0) to the physical GPUs
specified in CUDA_VISIBLE_DEVICES.
"""
#logger.info('Loading weights from: {}'.format(weights_file))
ws_blobs = workspace.Blobs()
src_blobs = load_object(weights_file)
if 'cfg' in src_blobs:
saved_cfg = load_cfg(src_blobs['cfg'])
configure_bbox_reg_weights(model, saved_cfg)
if 'blobs' in src_blobs:
# Backwards compat--dictionary used to be only blobs, now they are
# stored under the 'blobs' key
src_blobs = src_blobs['blobs']
# with open(weights_file, 'r') as f:
# src_blobs = pickle.load(f)
# if 'cfg' in src_blobs:
# saved_cfg = load_cfg(src_blobs['cfg'])
# configure_bbox_reg_weights(model, saved_cfg)
# if 'blobs' in src_blobs:
# # Backwards compat--dictionary used to be only blobs, now they are
# # stored under the 'blobs' key
# src_blobs = src_blobs['blobs']
# Initialize weights on GPU gpu_id only
unscoped_param_names = OrderedDict() # Print these out in model order
for blob in model.params:
unscoped_param_names[c2_utils_UnscopeName(str(blob))] = True
with c2_utils_NamedCudaScope(gpu_id):
for unscoped_param_name in unscoped_param_names.keys():
if (unscoped_param_name.find(']_') >= 0 and
unscoped_param_name not in src_blobs):
# Special case for sharing initialization from a pretrained
# model:
# If a blob named '_[xyz]_foo' is in model.params and not in
# the initialization blob dictionary, then load source blob
# 'foo' into destination blob '_[xyz]_foo'
src_name = unscoped_param_name[
unscoped_param_name.find(']_') + 2:]
else:
src_name = unscoped_param_name
if src_name not in src_blobs:
#logger.info('{:s} not found'.format(src_name))
continue
dst_name = core.ScopedName(unscoped_param_name)
has_momentum = src_name + '_momentum' in src_blobs
has_momentum_str = ' [+ momentum]' if has_momentum else ''
logger.debug(
'{:s}{:} loaded from weights file into {:s}: {}'.format(
src_name, has_momentum_str, dst_name, src_blobs[src_name]
.shape
)
)
if dst_name in ws_blobs:
# If the blob is already in the workspace, make sure that it
# matches the shape of the loaded blob
ws_blob = workspace.FetchBlob(dst_name)
assert ws_blob.shape == src_blobs[src_name].shape, \
('Workspace blob {} with shape {} does not match '
'weights file shape {}').format(
src_name,
ws_blob.shape,
src_blobs[src_name].shape)
workspace.FeedBlob(
dst_name,
src_blobs[src_name].astype(np.float32, copy=False))
if has_momentum:
workspace.FeedBlob(
dst_name + '_momentum',
src_blobs[src_name + '_momentum'].astype(
np.float32, copy=False))
# We preserve blobs that are in the weights file but not used by the current
# model. We load these into CPU memory under the '__preserve__/' namescope.
# These blobs will be stored when saving a model to a weights file. This
# feature allows for alternating optimization of Faster R-CNN in which blobs
# unused by one step can still be preserved forward and used to initialize
# another step.
for src_name in src_blobs.keys():
if (src_name not in unscoped_param_names and
not src_name.endswith('_momentum') and
src_blobs[src_name] is not None):
with c2_utils_CpuScope():
workspace.FeedBlob(
'__preserve__/{:s}'.format(src_name), src_blobs[src_name])
logger.debug(
'{:s} preserved in workspace (unused)'.format(src_name))
def infer_engine_initialize_model_from_cfg(weights_file, gpu_id=0):
"""Initialize a model from the global cfg. Loads test-time weights and
creates the networks in the Caffe2 workspace.
"""
model = model_builder_create(cfg.MODEL.TYPE, train=False, gpu_id=gpu_id)
net_utils_initialize_gpu_from_weights_file(
model, weights_file, gpu_id=gpu_id,
)
model_builder_add_inference_inputs(model)
workspace.CreateNet(model.net)
workspace.CreateNet(model.conv_body_net)
if cfg.MODEL.MASK_ON:
workspace.CreateNet(model.mask_net)
if cfg.MODEL.KEYPOINTS_ON:
workspace.CreateNet(model.keypoint_net)
if cfg.MODEL.BODY_UV_ON:
workspace.CreateNet(model.body_uv_net)
return model
def setup_logging(name):
FORMAT = '%(levelname)s %(filename)s:%(lineno)4d: %(message)s'
# Manually clear root loggers to prevent any module that may have called
# logging.basicConfig() from blocking our logging setup
logging.root.handlers = []
logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
logger = logging.getLogger(name)
return logger
def cache_url(url_or_file, cache_dir):
"""Download the file specified by the URL to the cache_dir and return the
path to the cached file. If the argument is not a URL, simply return it as
is.
"""
is_url = re.match(r'^(?:http)s?://', url_or_file, re.IGNORECASE) is not None
if not is_url:
return url_or_file
#
url = url_or_file
#
Len_filename = len( url.split('/')[-1] )
BASE_URL = url[0:-Len_filename-1]
#
cache_file_path = url.replace(BASE_URL, cache_dir)
if os.path.exists(cache_file_path):
#assert_cache_file_is_ok(url, cache_file_path)
return cache_file_path
cache_file_dir = os.path.dirname(cache_file_path)
if not os.path.exists(cache_file_dir):
os.makedirs(cache_file_dir)
#logger.info('Downloading remote file {} to {}'.format(url, cache_file_path))
download_url(url, cache_file_path)
#assert_cache_file_is_ok(url, cache_file_path)
return cache_file_path
def c2_utils_UnscopeName(possibly_scoped_name):
"""Remove any name scoping from a (possibly) scoped name. For example,
convert the name 'gpu_0/foo' to 'foo'."""
assert isinstance(possibly_scoped_name, string_types)
return possibly_scoped_name[
possibly_scoped_name.rfind(scope._NAMESCOPE_SEPARATOR) + 1:]
def _get_lr_change_ratio(cur_lr, new_lr):
eps = 1e-10
ratio = np.max(
(new_lr / np.max((cur_lr, eps)), cur_lr / np.max((new_lr, eps)))
)
return ratio
def _filter_boxes(boxes, min_size, im_info):
"""Only keep boxes with both sides >= min_size and center within the image.
"""
# Compute the width and height of the proposal boxes as measured in the original
# image coordinate system (this is required to avoid "Negative Areas Found"
# assertions in other parts of the code that measure).
im_scale = im_info[2]
ws_orig_scale = (boxes[:, 2] - boxes[:, 0]) / im_scale + 1
hs_orig_scale = (boxes[:, 3] - boxes[:, 1]) / im_scale + 1
# To avoid numerical issues we require the min_size to be at least 1 pixel in the
# original image
min_size = np.maximum(min_size, 1)
# Proposal center is computed relative to the scaled input image
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
x_ctr = boxes[:, 0] + ws / 2.
y_ctr = boxes[:, 1] + hs / 2.
keep = np.where(
(ws_orig_scale >= min_size)
& (hs_orig_scale >= min_size)
& (x_ctr < im_info[1])
& (y_ctr < im_info[0])
)[0]
return keep
def box_utils_clip_tiled_boxes(boxes, im_shape):
"""Clip boxes to image boundaries. im_shape is [height, width] and boxes
has shape (N, 4 * num_tiled_boxes)."""
assert boxes.shape[1] % 4 == 0, \
'boxes.shape[1] is {:d}, but must be divisible by 4.'.format(
boxes.shape[1]
)
# x1 >= 0
boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return boxes
def box_utils_nms(dets, thresh):
"""Apply classic DPM-style greedy NMS."""
if dets.shape[0] == 0:
return []
return cython_nms.nms(dets, thresh)
def fast_rcnn_roi_data_get_fast_rcnn_blob_names(is_training=True):
"""Fast R-CNN blob names."""
# rois blob: holds R regions of interest, each is a 5-tuple
# (batch_idx, x1, y1, x2, y2) specifying an image batch index and a
# rectangle (x1, y1, x2, y2)
blob_names = ['rois']
if is_training:
# labels_int32 blob: R categorical labels in [0, ..., K] for K
# foreground classes plus background
blob_names += ['labels_int32']
if is_training:
# bbox_targets blob: R bounding-box regression targets with 4
# targets per class
blob_names += ['bbox_targets']
# bbox_inside_weights blob: At most 4 targets per roi are active
# this binary vector sepcifies the subset of active targets
blob_names += ['bbox_inside_weights']
blob_names += ['bbox_outside_weights']
if is_training and cfg.MODEL.MASK_ON:
# 'mask_rois': RoIs sampled for training the mask prediction branch.
# Shape is (#masks, 5) in format (batch_idx, x1, y1, x2, y2).
blob_names += ['mask_rois']
# 'roi_has_mask': binary labels for the RoIs specified in 'rois'
# indicating if each RoI has a mask or not. Note that in some cases
# a *bg* RoI will have an all -1 (ignore) mask associated with it in
# the case that no fg RoIs can be sampled. Shape is (batchsize).
blob_names += ['roi_has_mask_int32']
# 'masks_int32' holds binary masks for the RoIs specified in
# 'mask_rois'. Shape is (#fg, M * M) where M is the ground truth
# mask size.
blob_names += ['masks_int32']
if is_training and cfg.MODEL.KEYPOINTS_ON:
# 'keypoint_rois': RoIs sampled for training the keypoint prediction
# branch. Shape is (#instances, 5) in format (batch_idx, x1, y1, x2,
# y2).
blob_names += ['keypoint_rois']
# 'keypoint_locations_int32': index of keypoint in
# KRCNN.HEATMAP_SIZE**2 sized array. Shape is (#instances). Used in
# SoftmaxWithLoss.
blob_names += ['keypoint_locations_int32']
# 'keypoint_weights': weight assigned to each target in
# 'keypoint_locations_int32'. Shape is (#instances). Used in
# SoftmaxWithLoss.
blob_names += ['keypoint_weights']
# 'keypoint_loss_normalizer': optional normalization factor to use if
# cfg.KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS is False.
blob_names += ['keypoint_loss_normalizer']
########################
if is_training and cfg.MODEL.BODY_UV_ON:
blob_names += ['body_uv_rois']
blob_names += ['roi_has_body_uv_int32']
#########
# ###################################################
blob_names += ['body_uv_ann_labels']
blob_names += ['body_uv_ann_weights']
# #################################################
blob_names += ['body_uv_X_points']
blob_names += ['body_uv_Y_points']
blob_names += ['body_uv_Ind_points']
blob_names += ['body_uv_I_points']
blob_names += ['body_uv_U_points']
blob_names += ['body_uv_V_points']
blob_names += ['body_uv_point_weights']
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS:
# Support for FPN multi-level rois without bbox reg isn't
# implemented (... and may never be implemented)
k_max = cfg.FPN.ROI_MAX_LEVEL
k_min = cfg.FPN.ROI_MIN_LEVEL
# Same format as rois blob, but one per FPN level
for lvl in range(k_min, k_max + 1):
blob_names += ['rois_fpn' + str(lvl)]
blob_names += ['rois_idx_restore_int32']
if is_training:
if cfg.MODEL.MASK_ON:
for lvl in range(k_min, k_max + 1):
blob_names += ['mask_rois_fpn' + str(lvl)]
blob_names += ['mask_rois_idx_restore_int32']
if cfg.MODEL.KEYPOINTS_ON:
for lvl in range(k_min, k_max + 1):
blob_names += ['keypoint_rois_fpn' + str(lvl)]
blob_names += ['keypoint_rois_idx_restore_int32']
if cfg.MODEL.BODY_UV_ON:
for lvl in range(k_min, k_max + 1):
blob_names += ['body_uv_rois_fpn' + str(lvl)]
blob_names += ['body_uv_rois_idx_restore_int32']
return blob_names
def blob_utils_py_op_copy_blob(blob_in, blob_out):
"""Copy a numpy ndarray given as blob_in into the Caffe2 CPUTensor blob
given as blob_out. Supports float32 and int32 blob data types. This function
is intended for copying numpy data into a Caffe2 blob in PythonOps.
"""
# Some awkward voodoo required by Caffe2 to support int32 blobs
needs_int32_init = False
try:
_ = blob.data.dtype # noqa
except Exception:
needs_int32_init = blob_in.dtype == np.int32
if needs_int32_init:
# init can only take a list (failed on tuple)
blob_out.init(list(blob_in.shape), caffe2_pb2.TensorProto.INT32)
else:
blob_out.reshape(blob_in.shape)
blob_out.data[...] = blob_in
def keypoint_rcnn_roi_data_finalize_keypoint_minibatch(blobs, valid):
"""Finalize the minibatch after blobs for all minibatch images have been
collated.
"""
min_count = cfg.KRCNN.MIN_KEYPOINT_COUNT_FOR_VALID_MINIBATCH
num_visible_keypoints = np.sum(blobs['keypoint_weights'])
valid = (
valid and len(blobs['keypoint_weights']) > 0 and
num_visible_keypoints > min_count
)
# Normalizer to use if cfg.KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS is False.
# See modeling.model_builder.add_keypoint_losses
norm = num_visible_keypoints / (
cfg.TRAIN.IMS_PER_BATCH * cfg.TRAIN.BATCH_SIZE_PER_IM *
cfg.TRAIN.FG_FRACTION * cfg.KRCNN.NUM_KEYPOINTS
)
blobs['keypoint_loss_normalizer'] = np.array(norm, dtype=np.float32)
return valid
def fpn_add_multilevel_roi_blobs(
blobs, blob_prefix, rois, target_lvls, lvl_min, lvl_max
):
"""Add RoI blobs for multiple FPN levels to the blobs dict.
blobs: a dict mapping from blob name to numpy ndarray
blob_prefix: name prefix to use for the FPN blobs
rois: the source rois as a 2D numpy array of shape (N, 5) where each row is
an roi and the columns encode (batch_idx, x1, y1, x2, y2)
target_lvls: numpy array of shape (N, ) indicating which FPN level each roi
in rois should be assigned to
lvl_min: the finest (highest resolution) FPN level (e.g., 2)
lvl_max: the coarest (lowest resolution) FPN level (e.g., 6)
"""
rois_idx_order = np.empty((0, ))
rois_stacked = np.zeros((0, 5), dtype=np.float32) # for assert
for lvl in range(lvl_min, lvl_max + 1):
idx_lvl = np.where(target_lvls == lvl)[0]
blobs[blob_prefix + '_fpn' + str(lvl)] = rois[idx_lvl, :]
rois_idx_order = np.concatenate((rois_idx_order, idx_lvl))
rois_stacked = np.vstack(
[rois_stacked, blobs[blob_prefix + '_fpn' + str(lvl)]]
)
rois_idx_restore = np.argsort(rois_idx_order).astype(np.int32, copy=False)
blobs[blob_prefix + '_idx_restore_int32'] = rois_idx_restore
# Sanity check that restore order is correct
assert (rois_stacked[rois_idx_restore] == rois).all()
def _add_multilevel_rois(blobs):
"""By default training RoIs are added for a single feature map level only.
When using FPN, the RoIs must be distributed over different FPN levels
according the level assignment heuristic (see: modeling.FPN.
map_rois_to_fpn_levels).
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
def _distribute_rois_over_fpn_levels(rois_blob_name):
"""Distribute rois over the different FPN levels."""
# Get target level for each roi
# Recall blob rois are in (batch_idx, x1, y1, x2, y2) format, hence take
# the box coordinates from columns 1:5
target_lvls = fpn_map_rois_to_fpn_levels(
blobs[rois_blob_name][:, 1:5], lvl_min, lvl_max
)
# Add per FPN level roi blobs named like: <rois_blob_name>_fpn<lvl>
fpn_add_multilevel_roi_blobs(
blobs, rois_blob_name, blobs[rois_blob_name], target_lvls, lvl_min,
lvl_max
)
_distribute_rois_over_fpn_levels('rois')
if cfg.MODEL.MASK_ON:
_distribute_rois_over_fpn_levels('mask_rois')
if cfg.MODEL.KEYPOINTS_ON:
_distribute_rois_over_fpn_levels('keypoint_rois')
if cfg.MODEL.BODY_UV_ON:
_distribute_rois_over_fpn_levels('body_uv_rois')
def segm_utils_GetDensePoseMask(Polys):
MaskGen = np.zeros([256,256])
for i in range(1,15):
if(Polys[i-1]):
current_mask = mask_util.decode(Polys[i-1])
MaskGen[current_mask>0] = i
return MaskGen
def body_uv_rcnn_roi_data_add_body_uv_rcnn_blobs(blobs, sampled_boxes, roidb, im_scale, batch_idx):
IsFlipped = roidb['flipped']
M = cfg.BODY_UV_RCNN.HEATMAP_SIZE
#
polys_gt_inds = np.where(roidb['ignore_UV_body'] == 0)[0]
boxes_from_polys = [roidb['boxes'][i,:] for i in polys_gt_inds]
if not(boxes_from_polys):
pass
else:
boxes_from_polys = np.vstack(boxes_from_polys)
boxes_from_polys = np.array(boxes_from_polys)
fg_inds = np.where(blobs['labels_int32'] > 0)[0]
roi_has_mask = np.zeros( blobs['labels_int32'].shape )
if (bool(boxes_from_polys.any()) & (fg_inds.shape[0] > 0) ):
rois_fg = sampled_boxes[fg_inds]
#
rois_fg.astype(np.float32, copy=False)
boxes_from_polys.astype(np.float32, copy=False)
#
overlaps_bbfg_bbpolys = box_utils_bbox_overlaps(
rois_fg.astype(np.float32, copy=False),
boxes_from_polys.astype(np.float32, copy=False))
fg_polys_value = np.max(overlaps_bbfg_bbpolys, axis=1)
fg_inds = fg_inds[fg_polys_value>0.7]
if (bool(boxes_from_polys.any()) & (fg_inds.shape[0] > 0) ):
for jj in fg_inds:
roi_has_mask[jj] = 1
# Create blobs for densepose supervision.
################################################## The mask
All_labels = blob_utils_zeros((fg_inds.shape[0], M ** 2), int32=True)
All_Weights = blob_utils_zeros((fg_inds.shape[0], M ** 2), int32=True)
################################################# The points
X_points = blob_utils_zeros((fg_inds.shape[0], 196), int32=False)
Y_points = blob_utils_zeros((fg_inds.shape[0], 196), int32=False)
Ind_points = blob_utils_zeros((fg_inds.shape[0], 196), int32=True)
I_points = blob_utils_zeros((fg_inds.shape[0], 196), int32=True)
U_points = blob_utils_zeros((fg_inds.shape[0], 196), int32=False)
V_points = blob_utils_zeros((fg_inds.shape[0], 196), int32=False)
Uv_point_weights = blob_utils_zeros((fg_inds.shape[0], 196), int32=False)
#################################################
rois_fg = sampled_boxes[fg_inds]
overlaps_bbfg_bbpolys = box_utils_bbox_overlaps(
rois_fg.astype(np.float32, copy=False),
boxes_from_polys.astype(np.float32, copy=False))
fg_polys_inds = np.argmax(overlaps_bbfg_bbpolys, axis=1)
for i in range(rois_fg.shape[0]):
#
fg_polys_ind = polys_gt_inds[ fg_polys_inds[i] ]
#
Ilabel = segm_utils_GetDensePoseMask( roidb['dp_masks'][ fg_polys_ind ] )
#
GT_I = np.array(roidb['dp_I'][ fg_polys_ind ])
GT_U = np.array(roidb['dp_U'][ fg_polys_ind ])
GT_V = np.array(roidb['dp_V'][ fg_polys_ind ])
GT_x = np.array(roidb['dp_x'][ fg_polys_ind ])
GT_y = np.array(roidb['dp_y'][ fg_polys_ind ])
GT_weights = np.ones(GT_I.shape).astype(np.float32)
#
## Do the flipping of the densepose annotation !
if(IsFlipped):
GT_I,GT_U,GT_V,GT_x,GT_y,Ilabel = DP.get_symmetric_densepose(GT_I,GT_U,GT_V,GT_x,GT_y,Ilabel)
#
roi_fg = rois_fg[i]
roi_gt = boxes_from_polys[fg_polys_inds[i],:]
#
x1 = roi_fg[0] ; x2 = roi_fg[2]
y1 = roi_fg[1] ; y2 = roi_fg[3]
#
x1_source = roi_gt[0]; x2_source = roi_gt[2]
y1_source = roi_gt[1]; y2_source = roi_gt[3]
#
x_targets = ( np.arange(x1,x2, (x2 - x1)/M ) - x1_source ) * ( 256. / (x2_source-x1_source) )
y_targets = ( np.arange(y1,y2, (y2 - y1)/M ) - y1_source ) * ( 256. / (y2_source-y1_source) )
#
x_targets = x_targets[0:M] ## Strangely sometimes it can be M+1, so make sure size is OK!
y_targets = y_targets[0:M]
#
[X_targets,Y_targets] = np.meshgrid( x_targets, y_targets )
New_Index = cv2.remap(Ilabel,X_targets.astype(np.float32), Y_targets.astype(np.float32), interpolation=cv2.INTER_NEAREST, borderMode= cv2.BORDER_CONSTANT, borderValue=(0))
#
All_L = np.zeros(New_Index.shape)
All_W = np.ones(New_Index.shape)
#
All_L = New_Index
#
gt_length_x = x2_source - x1_source
gt_length_y = y2_source - y1_source
#
GT_y = (( GT_y / 256. * gt_length_y ) + y1_source - y1 ) * ( M / ( y2 - y1 ) )
GT_x = (( GT_x / 256. * gt_length_x ) + x1_source - x1 ) * ( M / ( x2 - x1 ) )
#
GT_I[GT_y<0] = 0
GT_I[GT_y>(M-1)] = 0
GT_I[GT_x<0] = 0
GT_I[GT_x>(M-1)] = 0
#
points_inside = GT_I>0
GT_U = GT_U[points_inside]
GT_V = GT_V[points_inside]
GT_x = GT_x[points_inside]
GT_y = GT_y[points_inside]
GT_weights = GT_weights[points_inside]
GT_I = GT_I[points_inside]
#
X_points[i, 0:len(GT_x)] = GT_x
Y_points[i, 0:len(GT_y)] = GT_y
Ind_points[i, 0:len(GT_I)] = i
I_points[i, 0:len(GT_I)] = GT_I
U_points[i, 0:len(GT_U)] = GT_U
V_points[i, 0:len(GT_V)] = GT_V
Uv_point_weights[i, 0:len(GT_weights)] = GT_weights
#
All_labels[i, :] = np.reshape(All_L.astype(np.int32), M ** 2)
All_Weights[i, :] = np.reshape(All_W.astype(np.int32), M ** 2)
##
else:
bg_inds = np.where(blobs['labels_int32'] == 0)[0]
#
if(len(bg_inds)==0):
rois_fg = sampled_boxes[0].reshape((1, -1))
else:
rois_fg = sampled_boxes[bg_inds[0]].reshape((1, -1))
roi_has_mask[0] = 1
#
X_points = blob_utils_zeros((1, 196), int32=False)
Y_points = blob_utils_zeros((1, 196), int32=False)
Ind_points = blob_utils_zeros((1, 196), int32=True)
I_points = blob_utils_zeros((1,196), int32=True)
U_points = blob_utils_zeros((1, 196), int32=False)
V_points = blob_utils_zeros((1, 196), int32=False)
Uv_point_weights = blob_utils_zeros((1, 196), int32=False)
#
All_labels = -blob_utils_ones((1, M ** 2), int32=True) * 0 ## zeros
All_Weights = -blob_utils_ones((1, M ** 2), int32=True) * 0 ## zeros
#
rois_fg *= im_scale
repeated_batch_idx = batch_idx * blob_utils_ones((rois_fg.shape[0], 1))
rois_fg = np.hstack((repeated_batch_idx, rois_fg))
#
K = cfg.BODY_UV_RCNN.NUM_PATCHES
#
U_points = np.tile( U_points , [1,K+1] )
V_points = np.tile( V_points , [1,K+1] )
Uv_Weight_Points = np.zeros(U_points.shape)
#
for jjj in range(1,K+1):
Uv_Weight_Points[ : , jjj * I_points.shape[1] : (jjj+1) * I_points.shape[1] ] = ( I_points == jjj ).astype(np.float32)
#
################
# Update blobs dict with Mask R-CNN blobs
###############
#
blobs['body_uv_rois'] = np.array(rois_fg)
blobs['roi_has_body_uv_int32'] = np.array(roi_has_mask).astype(np.int32)
##
blobs['body_uv_ann_labels'] = np.array(All_labels).astype(np.int32)
blobs['body_uv_ann_weights'] = np.array(All_Weights).astype(np.float32)
#
##########################
blobs['body_uv_X_points'] = X_points.astype(np.float32)
blobs['body_uv_Y_points'] = Y_points.astype(np.float32)
blobs['body_uv_Ind_points'] = Ind_points.astype(np.float32)
blobs['body_uv_I_points'] = I_points.astype(np.float32)
blobs['body_uv_U_points'] = U_points.astype(np.float32) #### VERY IMPORTANT : These are switched here :
blobs['body_uv_V_points'] = V_points.astype(np.float32)
blobs['body_uv_point_weights'] = Uv_Weight_Points.astype(np.float32)
###################
def keypoint_utils_keypoints_to_heatmap_labels(keypoints, rois):
"""Encode keypoint location in the target heatmap for use in
SoftmaxWithLoss.
"""
# Maps keypoints from the half-open interval [x1, x2) on continuous image
# coordinates to the closed interval [0, HEATMAP_SIZE - 1] on discrete image
# coordinates. We use the continuous <-> discrete conversion from Heckbert
# 1990 ("What is the coordinate of a pixel?"): d = floor(c) and c = d + 0.5,
# where d is a discrete coordinate and c is a continuous coordinate.
assert keypoints.shape[2] == cfg.KRCNN.NUM_KEYPOINTS
shape = (len(rois), cfg.KRCNN.NUM_KEYPOINTS)
heatmaps = blob_utils_zeros(shape)
weights = blob_utils_zeros(shape)
offset_x = rois[:, 0]
offset_y = rois[:, 1]
scale_x = cfg.KRCNN.HEATMAP_SIZE / (rois[:, 2] - rois[:, 0])
scale_y = cfg.KRCNN.HEATMAP_SIZE / (rois[:, 3] - rois[:, 1])
for kp in range(keypoints.shape[2]):
vis = keypoints[:, 2, kp] > 0
x = keypoints[:, 0, kp].astype(np.float32)
y = keypoints[:, 1, kp].astype(np.float32)
# Since we use floor below, if a keypoint is exactly on the roi's right
# or bottom boundary, we shift it in by eps (conceptually) to keep it in
# the ground truth heatmap.
x_boundary_inds = np.where(x == rois[:, 2])[0]
y_boundary_inds = np.where(y == rois[:, 3])[0]
x = (x - offset_x) * scale_x
x = np.floor(x)
if len(x_boundary_inds) > 0:
x[x_boundary_inds] = cfg.KRCNN.HEATMAP_SIZE - 1
y = (y - offset_y) * scale_y
y = np.floor(y)
if len(y_boundary_inds) > 0:
y[y_boundary_inds] = cfg.KRCNN.HEATMAP_SIZE - 1
valid_loc = np.logical_and(
np.logical_and(x >= 0, y >= 0),
np.logical_and(
x < cfg.KRCNN.HEATMAP_SIZE, y < cfg.KRCNN.HEATMAP_SIZE))
valid = np.logical_and(valid_loc, vis)
valid = valid.astype(np.int32)
lin_ind = y * cfg.KRCNN.HEATMAP_SIZE + x
heatmaps[:, kp] = lin_ind * valid
weights[:, kp] = valid
return heatmaps, weights
def _within_box(points, boxes):
"""Validate which keypoints are contained inside a given box.
points: Nx2xK
boxes: Nx4
output: NxK
"""
x_within = np.logical_and(
points[:, 0, :] >= np.expand_dims(boxes[:, 0], axis=1),
points[:, 0, :] <= np.expand_dims(boxes[:, 2], axis=1)
)
y_within = np.logical_and(
points[:, 1, :] >= np.expand_dims(boxes[:, 1], axis=1),
points[:, 1, :] <= np.expand_dims(boxes[:, 3], axis=1)
)
return np.logical_and(x_within, y_within)
def keypoint_rcnn_roi_data_add_keypoint_rcnn_blobs(
blobs, roidb, fg_rois_per_image, fg_inds, im_scale, batch_idx
):
"""Add Mask R-CNN keypoint specific blobs to the given blobs dictionary."""
# Note: gt_inds must match how they're computed in
# datasets.json_dataset._merge_proposal_boxes_into_roidb
gt_inds = np.where(roidb['gt_classes'] > 0)[0]
max_overlaps = roidb['max_overlaps']
gt_keypoints = roidb['gt_keypoints']
ind_kp = gt_inds[roidb['box_to_gt_ind_map']]
within_box = _within_box(gt_keypoints[ind_kp, :, :], roidb['boxes'])
vis_kp = gt_keypoints[ind_kp, 2, :] > 0
is_visible = np.sum(np.logical_and(vis_kp, within_box), axis=1) > 0
kp_fg_inds = np.where(
np.logical_and(max_overlaps >= cfg.TRAIN.FG_THRESH, is_visible)
)[0]
kp_fg_rois_per_this_image = np.minimum(fg_rois_per_image, kp_fg_inds.size)
if kp_fg_inds.size > kp_fg_rois_per_this_image:
kp_fg_inds = np.random.choice(
kp_fg_inds, size=kp_fg_rois_per_this_image, replace=False
)
sampled_fg_rois = roidb['boxes'][kp_fg_inds]
box_to_gt_ind_map = roidb['box_to_gt_ind_map'][kp_fg_inds]
num_keypoints = gt_keypoints.shape[2]
sampled_keypoints = -np.ones(
(len(sampled_fg_rois), gt_keypoints.shape[1], num_keypoints),
dtype=gt_keypoints.dtype
)
for ii in range(len(sampled_fg_rois)):
ind = box_to_gt_ind_map[ii]
if ind >= 0:
sampled_keypoints[ii, :, :] = gt_keypoints[gt_inds[ind], :, :]
assert np.sum(sampled_keypoints[ii, 2, :]) > 0
heats, weights = keypoint_utils_keypoints_to_heatmap_labels(
sampled_keypoints, sampled_fg_rois
)
shape = (sampled_fg_rois.shape[0] * cfg.KRCNN.NUM_KEYPOINTS, 1)
heats = heats.reshape(shape)
weights = weights.reshape(shape)
sampled_fg_rois *= im_scale
repeated_batch_idx = batch_idx * blob_utils_ones(
(sampled_fg_rois.shape[0], 1)
)
sampled_fg_rois = np.hstack((repeated_batch_idx, sampled_fg_rois))
blobs['keypoint_rois'] = sampled_fg_rois
blobs['keypoint_locations_int32'] = heats.astype(np.int32, copy=False)
blobs['keypoint_weights'] = weights
def _expand_to_class_specific_mask_targets(masks, mask_class_labels):
"""Expand masks from shape (#masks, M ** 2) to (#masks, #classes * M ** 2)
to encode class specific mask targets.
"""
assert masks.shape[0] == mask_class_labels.shape[0]
M = cfg.MRCNN.RESOLUTION
# Target values of -1 are "don't care" / ignore labels
mask_targets = -blob_utils_ones(
(masks.shape[0], cfg.MODEL.NUM_CLASSES * M**2), int32=True
)
for i in range(masks.shape[0]):
cls = int(mask_class_labels[i])
start = M**2 * cls
end = start + M**2
# Ignore background instance
# (only happens when there is no fg samples in an image)
if cls > 0:
mask_targets[i, start:end] = masks[i, :]
return mask_targets
def segm_utils_polys_to_mask_wrt_box(polygons, box, M):
"""Convert from the COCO polygon segmentation format to a binary mask
encoded as a 2D array of data type numpy.float32. The polygon segmentation
is understood to be enclosed in the given box and rasterized to an M x M
mask. The resulting mask is therefore of shape (M, M).
"""
w = box[2] - box[0]
h = box[3] - box[1]
w = np.maximum(w, 1)
h = np.maximum(h, 1)
polygons_norm = []
for poly in polygons:
p = np.array(poly, dtype=np.float32)
p[0::2] = (p[0::2] - box[0]) * M / w
p[1::2] = (p[1::2] - box[1]) * M / h
polygons_norm.append(p)
rle = mask_util.frPyObjects(polygons_norm, M, M)
mask = np.array(mask_util.decode(rle), dtype=np.float32)
# Flatten in case polygons was a list
mask = np.sum(mask, axis=2)
mask = np.array(mask > 0, dtype=np.float32)
return mask
def segm_utils_polys_to_boxes(polys):
"""Convert a list of polygons into an array of tight bounding boxes."""
boxes_from_polys = np.zeros((len(polys), 4), dtype=np.float32)
for i in range(len(polys)):
poly = polys[i]
x0 = min(min(p[::2]) for p in poly)
x1 = max(max(p[::2]) for p in poly)
y0 = min(min(p[1::2]) for p in poly)
y1 = max(max(p[1::2]) for p in poly)
boxes_from_polys[i, :] = [x0, y0, x1, y1]
return boxes_from_polys
def mask_rcnn_roi_data_add_mask_rcnn_blobs(blobs, sampled_boxes, roidb, im_scale, batch_idx):
"""Add Mask R-CNN specific blobs to the input blob dictionary."""
# Prepare the mask targets by associating one gt mask to each training roi
# that has a fg (non-bg) class label.
M = cfg.MRCNN.RESOLUTION
polys_gt_inds = np.where(
(roidb['gt_classes'] > 0) & (roidb['is_crowd'] == 0)
)[0]
polys_gt = [roidb['segms'][i] for i in polys_gt_inds]
boxes_from_polys = segm_utils_polys_to_boxes(polys_gt)
fg_inds = np.where(blobs['labels_int32'] > 0)[0]
roi_has_mask = blobs['labels_int32'].copy()
roi_has_mask[roi_has_mask > 0] = 1
if fg_inds.shape[0] > 0:
# Class labels for the foreground rois
mask_class_labels = blobs['labels_int32'][fg_inds]
masks = blob_utils_zeros((fg_inds.shape[0], M**2), int32=True)
# Find overlap between all foreground rois and the bounding boxes
# enclosing each segmentation
rois_fg = sampled_boxes[fg_inds]
overlaps_bbfg_bbpolys = box_utils_bbox_overlaps(
rois_fg.astype(np.float32, copy=False),
boxes_from_polys.astype(np.float32, copy=False)
)
# Map from each fg rois to the index of the mask with highest overlap
# (measured by bbox overlap)
fg_polys_inds = np.argmax(overlaps_bbfg_bbpolys, axis=1)
# add fg targets
for i in range(rois_fg.shape[0]):
fg_polys_ind = fg_polys_inds[i]
poly_gt = polys_gt[fg_polys_ind]
roi_fg = rois_fg[i]
# Rasterize the portion of the polygon mask within the given fg roi
# to an M x M binary image
mask = segm_utils_polys_to_mask_wrt_box(poly_gt, roi_fg, M)
mask = np.array(mask > 0, dtype=np.int32) # Ensure it's binary
masks[i, :] = np.reshape(mask, M**2)
else: # If there are no fg masks (it does happen)
# The network cannot handle empty blobs, so we must provide a mask
# We simply take the first bg roi, given it an all -1's mask (ignore
# label), and label it with class zero (bg).
bg_inds = np.where(blobs['labels_int32'] == 0)[0]
# rois_fg is actually one background roi, but that's ok because ...
rois_fg = sampled_boxes[bg_inds[0]].reshape((1, -1))
# We give it an -1's blob (ignore label)
masks = -blob_utils_ones((1, M**2), int32=True)
# We label it with class = 0 (background)
mask_class_labels = blob_utils_zeros((1, ))
# Mark that the first roi has a mask
roi_has_mask[0] = 1
if cfg.MRCNN.CLS_SPECIFIC_MASK:
masks = _expand_to_class_specific_mask_targets(masks, mask_class_labels)
# Scale rois_fg and format as (batch_idx, x1, y1, x2, y2)
rois_fg *= im_scale
repeated_batch_idx = batch_idx * blob_utils_ones((rois_fg.shape[0], 1))
rois_fg = np.hstack((repeated_batch_idx, rois_fg))
# Update blobs dict with Mask R-CNN blobs
blobs['mask_rois'] = rois_fg
blobs['roi_has_mask_int32'] = roi_has_mask
blobs['masks_int32'] = masks
def blob_utils_ones(shape, int32=False):
"""Return a blob of all ones of the given shape with the correct float or
int data type.
"""
return np.ones(shape, dtype=np.int32 if int32 else np.float32)
def blob_utils_zeros(shape, int32=False):
"""Return a blob of all zeros of the given shape with the correct float or
int data type.
"""
return np.zeros(shape, dtype=np.int32 if int32 else np.float32)
def _expand_bbox_targets(bbox_target_data):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
num_bbox_reg_classes = cfg.MODEL.NUM_CLASSES
if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG:
num_bbox_reg_classes = 2 # bg and fg
clss = bbox_target_data[:, 0]
bbox_targets = blob_utils_zeros((clss.size, 4 * num_bbox_reg_classes))
bbox_inside_weights = blob_utils_zeros(bbox_targets.shape)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = int(clss[ind])
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = (1.0, 1.0, 1.0, 1.0)
return bbox_targets, bbox_inside_weights
def _sample_rois(roidb, im_scale, batch_idx):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
rois_per_image = int(cfg.TRAIN.BATCH_SIZE_PER_IM)
fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image))
max_overlaps = roidb['max_overlaps']
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(
fg_inds, size=fg_rois_per_this_image, replace=False
)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where(
(max_overlaps < cfg.TRAIN.BG_THRESH_HI) &
(max_overlaps >= cfg.TRAIN.BG_THRESH_LO)
)[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, bg_inds.size)
# Sample foreground regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(
bg_inds, size=bg_rois_per_this_image, replace=False
)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Label is the class each RoI has max overlap with
sampled_labels = roidb['max_classes'][keep_inds]
sampled_labels[fg_rois_per_this_image:] = 0 # Label bg RoIs with class 0
sampled_boxes = roidb['boxes'][keep_inds]
bbox_targets, bbox_inside_weights = _expand_bbox_targets(
roidb['bbox_targets'][keep_inds, :]
)
bbox_outside_weights = np.array(
bbox_inside_weights > 0, dtype=bbox_inside_weights.dtype
)
# Scale rois and format as (batch_idx, x1, y1, x2, y2)
sampled_rois = sampled_boxes * im_scale
repeated_batch_idx = batch_idx * blob_utils_ones((sampled_rois.shape[0], 1))
sampled_rois = np.hstack((repeated_batch_idx, sampled_rois))
# Base Fast R-CNN blobs
blob_dict = dict(
labels_int32=sampled_labels.astype(np.int32, copy=False),
rois=sampled_rois,
bbox_targets=bbox_targets,
bbox_inside_weights=bbox_inside_weights,
bbox_outside_weights=bbox_outside_weights
)
# Optionally add Mask R-CNN blobs
if cfg.MODEL.MASK_ON:
mask_rcnn_roi_data_add_mask_rcnn_blobs(
blob_dict, sampled_boxes, roidb, im_scale, batch_idx
)
# Optionally add Keypoint R-CNN blobs
if cfg.MODEL.KEYPOINTS_ON:
keypoint_rcnn_roi_data_add_keypoint_rcnn_blobs(
blob_dict, roidb, fg_rois_per_image, fg_inds, im_scale, batch_idx
)
# Optionally body UV R-CNN blobs
if cfg.MODEL.BODY_UV_ON:
body_uv_rcnn_roi_data_add_body_uv_rcnn_blobs(
blob_dict, sampled_boxes, roidb, im_scale, batch_idx
)
return blob_dict
def fast_rcnn_roi_data_add_fast_rcnn_blobs(blobs, im_scales, roidb):
"""Add blobs needed for training Fast R-CNN style models."""
# Sample training RoIs from each image and append them to the blob lists
for im_i, entry in enumerate(roidb):
frcn_blobs = _sample_rois(entry, im_scales[im_i], im_i)
for k, v in frcn_blobs.items():
blobs[k].append(v)
# Concat the training blob lists into tensors
for k, v in blobs.items():
if isinstance(v, list) and len(v) > 0:
blobs[k] = np.concatenate(v)
# Add FPN multilevel training RoIs, if configured
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois(blobs)
# Perform any final work and validity checks after the collating blobs for
# all minibatch images
valid = True
if cfg.MODEL.KEYPOINTS_ON:
valid = keypoint_rcnn_roi_data_finalize_keypoint_minibatch(blobs, valid)
return valid
def box_utils_bbox_transform_inv(boxes, gt_boxes, weights=(1.0, 1.0, 1.0, 1.0)):
"""Inverse transform that computes target bounding-box regression deltas
given proposal boxes and ground-truth boxes. The weights argument should be
a 4-tuple of multiplicative weights that are applied to the regression
target.
In older versions of this code (and in py-faster-rcnn), the weights were set
such that the regression deltas would have unit standard deviation on the
training dataset. Presently, rather than computing these statistics exactly,
we use a fixed set of weights (10., 10., 5., 5.) by default. These are
approximately the weights one would get from COCO using the previous unit
stdev heuristic.
"""
ex_widths = boxes[:, 2] - boxes[:, 0] + 1.0
ex_heights = boxes[:, 3] - boxes[:, 1] + 1.0
ex_ctr_x = boxes[:, 0] + 0.5 * ex_widths
ex_ctr_y = boxes[:, 1] + 0.5 * ex_heights
gt_widths = gt_boxes[:, 2] - gt_boxes[:, 0] + 1.0
gt_heights = gt_boxes[:, 3] - gt_boxes[:, 1] + 1.0
gt_ctr_x = gt_boxes[:, 0] + 0.5 * gt_widths
gt_ctr_y = gt_boxes[:, 1] + 0.5 * gt_heights
wx, wy, ww, wh = weights
targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = ww * np.log(gt_widths / ex_widths)
targets_dh = wh * np.log(gt_heights / ex_heights)
targets = np.vstack((targets_dx, targets_dy, targets_dw,
targets_dh)).transpose()
return targets
def compute_bbox_regression_targets(entry):
"""Compute bounding-box regression targets for an image."""
# Indices of ground-truth ROIs
rois = entry['boxes']
overlaps = entry['max_overlaps']
labels = entry['max_classes']
gt_inds = np.where((entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]
# Targets has format (class, tx, ty, tw, th)
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
if len(gt_inds) == 0:
# Bail if the image has no ground-truth ROIs
return targets
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]
# Get IoU overlap between each ex ROI and gt ROI
ex_gt_overlaps = box_utils_bbox_overlaps(
rois[ex_inds, :].astype(dtype=np.float32, copy=False),
rois[gt_inds, :].astype(dtype=np.float32, copy=False))
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
# Use class "1" for all boxes if using class_agnostic_bbox_reg
targets[ex_inds, 0] = (
1 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else labels[ex_inds])
targets[ex_inds, 1:] = box_utils_bbox_transform_inv(
ex_rois, gt_rois, cfg.MODEL.BBOX_REG_WEIGHTS)
return targets
def roidb_utils_add_bbox_regression_targets(roidb):
"""Add information needed to train bounding-box regressors."""
for entry in roidb:
entry['bbox_targets'] = compute_bbox_regression_targets(entry)
def _add_class_assignments(roidb):
"""Compute object category assignment for each box associated with each
roidb entry.
"""
for entry in roidb:
gt_overlaps = entry['gt_overlaps'].toarray()
# max overlap with gt over classes (columns)
max_overlaps = gt_overlaps.max(axis=1)
# gt class that had the max overlap
max_classes = gt_overlaps.argmax(axis=1)
entry['max_classes'] = max_classes
entry['max_overlaps'] = max_overlaps
# sanity checks
# if max overlap is 0, the class must be background (class 0)
zero_inds = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_inds] == 0)
# if max overlap > 0, the class must be a fg class (not class 0)
nonzero_inds = np.where(max_overlaps > 0)[0]
assert all(max_classes[nonzero_inds] != 0)
def box_utils_xyxy_to_xywh(xyxy):
"""Convert [x1 y1 x2 y2] box format to [x1 y1 w h] format."""
if isinstance(xyxy, (list, tuple)):
# Single box given as a list of coordinates
assert len(xyxy) == 4
x1, y1 = xyxy[0], xyxy[1]
w = xyxy[2] - x1 + 1
h = xyxy[3] - y1 + 1
return (x1, y1, w, h)
elif isinstance(xyxy, np.ndarray):
# Multiple boxes given as a 2D ndarray
return np.hstack((xyxy[:, 0:2], xyxy[:, 2:4] - xyxy[:, 0:2] + 1))
else:
raise TypeError('Argument xyxy must be a list, tuple, or numpy array.')
def _filter_crowd_proposals(roidb, crowd_thresh):
"""Finds proposals that are inside crowd regions and marks them as
overlap = -1 with each ground-truth rois, which means they will be excluded
from training.
"""
for entry in roidb:
gt_overlaps = entry['gt_overlaps'].toarray()
crowd_inds = np.where(entry['is_crowd'] == 1)[0]
non_gt_inds = np.where(entry['gt_classes'] == 0)[0]
if len(crowd_inds) == 0 or len(non_gt_inds) == 0:
continue
crowd_boxes = box_utils_xyxy_to_xywh(entry['boxes'][crowd_inds, :])
non_gt_boxes = box_utils_xyxy_to_xywh(entry['boxes'][non_gt_inds, :])
iscrowd_flags = [int(True)] * len(crowd_inds)
ious = COCOmask.iou(non_gt_boxes, crowd_boxes, iscrowd_flags)
bad_inds = np.where(ious.max(axis=1) > crowd_thresh)[0]
gt_overlaps[non_gt_inds[bad_inds], :] = -1
entry['gt_overlaps'] = scipy.sparse.csr_matrix(gt_overlaps)
def _merge_proposal_boxes_into_roidb(roidb, box_list):
"""Add proposal boxes to each roidb entry."""
assert len(box_list) == len(roidb)
for i, entry in enumerate(roidb):
boxes = box_list[i]
num_boxes = boxes.shape[0]
gt_overlaps = np.zeros(
(num_boxes, entry['gt_overlaps'].shape[1]),
dtype=entry['gt_overlaps'].dtype
)
box_to_gt_ind_map = -np.ones(
(num_boxes), dtype=entry['box_to_gt_ind_map'].dtype
)
# Note: unlike in other places, here we intentionally include all gt
# rois, even ones marked as crowd. Boxes that overlap with crowds will
# be filtered out later (see: _filter_crowd_proposals).
gt_inds = np.where(entry['gt_classes'] > 0)[0]
if len(gt_inds) > 0:
gt_boxes = entry['boxes'][gt_inds, :]
gt_classes = entry['gt_classes'][gt_inds]
proposal_to_gt_overlaps = box_utils_bbox_overlaps(
boxes.astype(dtype=np.float32, copy=False),
gt_boxes.astype(dtype=np.float32, copy=False)
)
# Gt box that overlaps each input box the most
# (ties are broken arbitrarily by class order)
argmaxes = proposal_to_gt_overlaps.argmax(axis=1)
# Amount of that overlap
maxes = proposal_to_gt_overlaps.max(axis=1)
# Those boxes with non-zero overlap with gt boxes
I = np.where(maxes > 0)[0]
# Record max overlaps with the class of the appropriate gt box
gt_overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]
box_to_gt_ind_map[I] = gt_inds[argmaxes[I]]
entry['boxes'] = np.append(
entry['boxes'],
boxes.astype(entry['boxes'].dtype, copy=False),
axis=0
)
entry['gt_classes'] = np.append(
entry['gt_classes'],
np.zeros((num_boxes), dtype=entry['gt_classes'].dtype)
)
entry['seg_areas'] = np.append(
entry['seg_areas'],
np.zeros((num_boxes), dtype=entry['seg_areas'].dtype)
)
entry['gt_overlaps'] = np.append(
entry['gt_overlaps'].toarray(), gt_overlaps, axis=0
)
entry['gt_overlaps'] = scipy.sparse.csr_matrix(entry['gt_overlaps'])
entry['is_crowd'] = np.append(
entry['is_crowd'],
np.zeros((num_boxes), dtype=entry['is_crowd'].dtype)
)
entry['box_to_gt_ind_map'] = np.append(
entry['box_to_gt_ind_map'],
box_to_gt_ind_map.astype(
entry['box_to_gt_ind_map'].dtype, copy=False
)
)
def json_dataset_add_proposals(roidb, rois, scales, crowd_thresh):
"""Add proposal boxes (rois) to an roidb that has ground-truth annotations
but no proposals. If the proposals are not at the original image scale,
specify the scale factor that separate them in scales.
"""
box_list = []
for i in range(len(roidb)):
inv_im_scale = 1. / scales[i]
idx = np.where(rois[:, 0] == i)[0]
box_list.append(rois[idx, 1:] * inv_im_scale)
_merge_proposal_boxes_into_roidb(roidb, box_list)
if crowd_thresh > 0:
_filter_crowd_proposals(roidb, crowd_thresh)
_add_class_assignments(roidb)
def blob_utils_deserialize(arr):
"""Unserialize a Python object from an array of float32 values fetched from
a workspace. See serialize().
"""
return pickle.loads(arr.astype(np.uint8).tobytes())
def box_utils_boxes_area(boxes):
"""Compute the area of an array of boxes."""
w = (boxes[:, 2] - boxes[:, 0] + 1)
h = (boxes[:, 3] - boxes[:, 1] + 1)
areas = w * h
assert np.all(areas >= 0), 'Negative areas founds'
return areas
def fpn_map_rois_to_fpn_levels(rois, k_min, k_max):
"""Determine which FPN level each RoI in a set of RoIs should map to based
on the heuristic in the FPN paper.
"""
# Compute level ids
s = np.sqrt(box_utils_boxes_area(rois))
s0 = cfg.FPN.ROI_CANONICAL_SCALE # default: 224
lvl0 = cfg.FPN.ROI_CANONICAL_LEVEL # default: 4
# Eqn.(1) in FPN paper
target_lvls = np.floor(lvl0 + np.log2(s / s0 + 1e-6))
target_lvls = np.clip(target_lvls, k_min, k_max)
return target_lvls
def distribute(rois, label_blobs, outputs, train):
"""To understand the output blob order see return value of
detectron.roi_data.fast_rcnn.get_fast_rcnn_blob_names(is_training=False)
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvls = fpn_map_rois_to_fpn_levels(rois[:, 1:5], lvl_min, lvl_max)
outputs[0].reshape(rois.shape)
outputs[0].data[...] = rois
# Create new roi blobs for each FPN level
# (See: modeling.FPN.add_multilevel_roi_blobs which is similar but annoying
# to generalize to support this particular case.)
rois_idx_order = np.empty((0, ))
for output_idx, lvl in enumerate(range(lvl_min, lvl_max + 1)):
idx_lvl = np.where(lvls == lvl)[0]
blob_roi_level = rois[idx_lvl, :]
outputs[output_idx + 1].reshape(blob_roi_level.shape)
outputs[output_idx + 1].data[...] = blob_roi_level
rois_idx_order = np.concatenate((rois_idx_order, idx_lvl))
rois_idx_restore = np.argsort(rois_idx_order)
blob_utils_py_op_copy_blob(rois_idx_restore.astype(np.int32), outputs[-1])
def collect(inputs, is_training):
cfg_key = 'TRAIN' if is_training else 'TEST'
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
k_max = cfg.FPN.RPN_MAX_LEVEL
k_min = cfg.FPN.RPN_MIN_LEVEL
num_lvls = k_max - k_min + 1
roi_inputs = inputs[:num_lvls]
score_inputs = inputs[num_lvls:]
if is_training:
score_inputs = score_inputs[:-2]
# rois are in [[batch_idx, x0, y0, x1, y2], ...] format
# Combine predictions across all levels and retain the top scoring
rois = np.concatenate([blob.data for blob in roi_inputs])
scores = np.concatenate([blob.data for blob in score_inputs]).squeeze()
inds = np.argsort(-scores)[:post_nms_topN]
rois = rois[inds, :]
return rois
def box_utils_bbox_transform(boxes, deltas, weights=(1.0, 1.0, 1.0, 1.0)):
"""Forward transform that maps proposal boxes to predicted ground-truth
boxes using bounding-box regression deltas. See bbox_transform_inv for a
description of the weights argument.
"""
if boxes.shape[0] == 0:
return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)
boxes = boxes.astype(deltas.dtype, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
# Prevent sending too large values into np.exp()
dw = np.minimum(dw, cfg.BBOX_XFORM_CLIP)
dh = np.minimum(dh, cfg.BBOX_XFORM_CLIP)
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w - 1
# y2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h - 1
return pred_boxes
def c2_utils_CudaDevice(gpu_id):
"""Create a Cuda device."""
return core.DeviceOption(caffe2_pb2.CUDA, gpu_id)
@contextlib.contextmanager
def c2_utils_CudaScope(gpu_id):
"""Create a CUDA device scope for GPU device `gpu_id`."""
gpu_dev = c2_utils_CudaDevice(gpu_id)
with core.DeviceScope(gpu_dev):
yield
@contextlib.contextmanager
def c2_utils_GpuNameScope(gpu_id):
"""Create a name scope for GPU device `gpu_id`."""
with core.NameScope('gpu_{:d}'.format(gpu_id)):
yield
@contextlib.contextmanager
def c2_utils_NamedCudaScope(gpu_id):
"""Creates a GPU name scope and CUDA device scope. This function is provided
to reduce `with ...` nesting levels."""
with c2_utils_GpuNameScope(gpu_id):
with c2_utils_CudaScope(gpu_id):
yield
@contextlib.contextmanager
def c2_utils_CpuScope():
"""Create a CPU device scope."""
cpu_dev = core.DeviceOption(caffe2_pb2.CPU)
with core.DeviceScope(cpu_dev):
yield
class DensePoseMethods:
def __init__(self):
#
ALP_UV = loadmat( os.path.join(os.path.dirname(__file__), 'assets/UV_Processed.mat') )
self.FaceIndices = np.array( ALP_UV['All_FaceIndices']).squeeze()
self.FacesDensePose = ALP_UV['All_Faces']-1
self.U_norm = ALP_UV['All_U_norm'].squeeze()
self.V_norm = ALP_UV['All_V_norm'].squeeze()
self.All_vertices = ALP_UV['All_vertices'][0]
## Info to compute symmetries.
self.SemanticMaskSymmetries = [0,1,3,2,5,4,7,6,9,8,11,10,13,12,14]
self.Index_Symmetry_List = [1,2,4,3,6,5,8,7,10,9,12,11,14,13,16,15,18,17,20,19,22,21,24,23];
UV_symmetry_filename = os.path.join(os.path.dirname(__file__), 'assets/UV_symmetry_transforms.mat')
self.UV_symmetry_transformations = loadmat( UV_symmetry_filename )
def get_symmetric_densepose(self,I,U,V,x,y,Mask):
### This is a function to get the mirror symmetric UV labels.
Labels_sym= np.zeros(I.shape)
U_sym= np.zeros(U.shape)
V_sym= np.zeros(V.shape)
###
for i in ( range(24)):
if i+1 in I:
Labels_sym[I == (i+1)] = self.Index_Symmetry_List[i]
jj = np.where(I == (i+1))
###
U_loc = (U[jj]*255).astype(np.int64)
V_loc = (V[jj]*255).astype(np.int64)
###
V_sym[jj] = self.UV_symmetry_transformations['V_transforms'][0,i][V_loc,U_loc]
U_sym[jj] = self.UV_symmetry_transformations['U_transforms'][0,i][V_loc,U_loc]
##
Mask_flip = np.fliplr(Mask)
Mask_flipped = np.zeros(Mask.shape)
#
for i in ( range(14)):
Mask_flipped[Mask_flip == (i+1)] = self.SemanticMaskSymmetries[i+1]
#
[y_max , x_max ] = Mask_flip.shape
y_sym = y
x_sym = x_max-x
#
return Labels_sym , U_sym , V_sym , x_sym , y_sym , Mask_flipped
def barycentric_coordinates_exists(self,P0, P1, P2, P):
u = P1 - P0
v = P2 - P0
w = P - P0
#
vCrossW = np.cross(v,w)
vCrossU = np.cross(v, u)
if (np.dot(vCrossW, vCrossU) < 0):
return False;
#
uCrossW = np.cross(u, w)
uCrossV = np.cross(u, v)
#
if (np.dot(uCrossW, uCrossV) < 0):
return False;
#
denom = np.sqrt((uCrossV**2).sum())
r = np.sqrt((vCrossW**2).sum())/denom
t = np.sqrt((uCrossW**2).sum())/denom
#
return((r <=1) & (t <= 1) & (r + t <= 1))
def barycentric_coordinates(self,P0, P1, P2, P):
u = P1 - P0
v = P2 - P0
w = P - P0
#
vCrossW = np.cross(v,w)
vCrossU = np.cross(v, u)
#
uCrossW = np.cross(u, w)
uCrossV = np.cross(u, v)
#
denom = np.sqrt((uCrossV**2).sum())
r = np.sqrt((vCrossW**2).sum())/denom
t = np.sqrt((uCrossW**2).sum())/denom
#
return(1-(r+t),r,t)
def IUV2FBC( self, I_point , U_point, V_point):
P = [ U_point , V_point , 0 ]
FaceIndicesNow = np.where( self.FaceIndices == I_point )
FacesNow = self.FacesDensePose[FaceIndicesNow]
#
P_0 = np.vstack( (self.U_norm[FacesNow][:,0], self.V_norm[FacesNow][:,0], np.zeros(self.U_norm[FacesNow][:,0].shape))).transpose()
P_1 = np.vstack( (self.U_norm[FacesNow][:,1], self.V_norm[FacesNow][:,1], np.zeros(self.U_norm[FacesNow][:,1].shape))).transpose()
P_2 = np.vstack( (self.U_norm[FacesNow][:,2], self.V_norm[FacesNow][:,2], np.zeros(self.U_norm[FacesNow][:,2].shape))).transpose()
#
for i, [P0,P1,P2] in enumerate( zip(P_0,P_1,P_2)) :
if(self.barycentric_coordinates_exists(P0, P1, P2, P)):
[bc1,bc2,bc3] = self.barycentric_coordinates(P0, P1, P2, P)
return(FaceIndicesNow[0][i],bc1,bc2,bc3)
#
# If the found UV is not inside any faces, select the vertex that is closest!
#
D1 = scipy.spatial.distance.cdist( np.array( [U_point,V_point])[np.newaxis,:] , P_0[:,0:2]).squeeze()
D2 = scipy.spatial.distance.cdist( np.array( [U_point,V_point])[np.newaxis,:] , P_1[:,0:2]).squeeze()
D3 = scipy.spatial.distance.cdist( np.array( [U_point,V_point])[np.newaxis,:] , P_2[:,0:2]).squeeze()
#
minD1 = D1.min()
minD2 = D2.min()
minD3 = D3.min()
#
if((minD1< minD2) & (minD1< minD3)):
return( FaceIndicesNow[0][np.argmin(D1)] , 1.,0.,0. )
elif((minD2< minD1) & (minD2< minD3)):
return( FaceIndicesNow[0][np.argmin(D2)] , 0.,1.,0. )
else:
return( FaceIndicesNow[0][np.argmin(D3)] , 0.,0.,1. )
def FBC2PointOnSurface( self, FaceIndex, bc1,bc2,bc3,Vertices ):
##
Vert_indices = self.All_vertices[self.FacesDensePose[FaceIndex]]-1
##
p = Vertices[Vert_indices[0],:] * bc1 + \
Vertices[Vert_indices[1],:] * bc2 + \
Vertices[Vert_indices[2],:] * bc3
##
return(p)
class CollectAndDistributeFpnRpnProposalsOp(object):
def __init__(self, train):
self._train = train
def forward(self, inputs, outputs):
"""See modeling.detector.CollectAndDistributeFpnRpnProposals for
inputs/outputs documentation.
"""
# inputs is
# [rpn_rois_fpn2, ..., rpn_rois_fpn6,
# rpn_roi_probs_fpn2, ..., rpn_roi_probs_fpn6]
# If training with Faster R-CNN, then inputs will additionally include
# + [roidb, im_info]
rois = collect(inputs, self._train)
if self._train:
# During training we reuse the data loader code. We populate roidb
# entries on the fly using the rois generated by RPN.
# im_info: [[im_height, im_width, im_scale], ...]
im_info = inputs[-1].data
im_scales = im_info[:, 2]
roidb = blob_utils_deserialize(inputs[-2].data)
# For historical consistency with the original Faster R-CNN
# implementation we are *not* filtering crowd proposals.
# This choice should be investigated in the future (it likely does
# not matter).
json_dataset_add_proposals(roidb, rois, im_scales, crowd_thresh=0)
roidb_utils_add_bbox_regression_targets(roidb)
# Compute training labels for the RPN proposals; also handles
# distributing the proposals over FPN levels
output_blob_names = fast_rcnn_roi_data_get_fast_rcnn_blob_names()
blobs = {k: [] for k in output_blob_names}
fast_rcnn_roi_data_add_fast_rcnn_blobs(blobs, im_scales, roidb)
for i, k in enumerate(output_blob_names):
blob_utils_py_op_copy_blob(blobs[k], outputs[i])
else:
# For inference we have a special code path that avoids some data
# loader overhead
distribute(rois, None, outputs, self._train)
class GenerateProposalLabelsOp(object):
def forward(self, inputs, outputs):
"""See modeling.detector.GenerateProposalLabels for inputs/outputs
documentation.
"""
# During training we reuse the data loader code. We populate roidb
# entries on the fly using the rois generated by RPN.
# im_info: [[im_height, im_width, im_scale], ...]
rois = inputs[0].data
roidb = blob_utils_deserialize(inputs[1].data)
im_info = inputs[2].data
im_scales = im_info[:, 2]
output_blob_names = fast_rcnn_roi_data_get_fast_rcnn_blob_names()
# For historical consistency with the original Faster R-CNN
# implementation we are *not* filtering crowd proposals.
# This choice should be investigated in the future (it likely does
# not matter).
json_dataset_add_proposals(roidb, rois, im_scales, crowd_thresh=0)
roidb_utils_add_bbox_regression_targets(roidb)
blobs = {k: [] for k in output_blob_names}
fast_rcnn_roi_data_add_fast_rcnn_blobs(blobs, im_scales, roidb)
for i, k in enumerate(output_blob_names):
blob_utils_py_op_copy_blob(blobs[k], outputs[i])
class GenerateProposalsOp(object):
"""Output object detection proposals by applying estimated bounding-box
transformations to a set of regular boxes (called "anchors").
"""
def __init__(self, anchors, spatial_scale, train):
self._anchors = anchors
self._num_anchors = self._anchors.shape[0]
self._feat_stride = 1. / spatial_scale
self._train = train
def forward(self, inputs, outputs):
"""See modeling.detector.GenerateProposals for inputs/outputs
documentation.
"""
# 1. for each location i in a (H, W) grid:
# generate A anchor boxes centered on cell i
# apply predicted bbox deltas to each of the A anchors at cell i
# 2. clip predicted boxes to image
# 3. remove predicted boxes with either height or width < threshold
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take the top pre_nms_topN proposals before NMS
# 6. apply NMS with a loose threshold (0.7) to the remaining proposals
# 7. take after_nms_topN proposals after NMS
# 8. return the top proposals
# predicted probability of fg object for each RPN anchor
scores = inputs[0].data
# predicted achors transformations
bbox_deltas = inputs[1].data
# input image (height, width, scale), in which scale is the scale factor
# applied to the original dataset image to get the network input image
im_info = inputs[2].data
# 1. Generate proposals from bbox deltas and shifted anchors
height, width = scores.shape[-2:]
# Enumerate all shifted positions on the (H, W) grid
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y, copy=False)
# Convert to (K, 4), K=H*W, where the columns are (dx, dy, dx, dy)
# shift pointing to each grid location
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# Broacast anchors over shifts to enumerate all anchors at all positions
# in the (H, W) grid:
# - add A anchors of shape (1, A, 4) to
# - K shifts of shape (K, 1, 4) to get
# - all shifted anchors of shape (K, A, 4)
# - reshape to (K*A, 4) shifted anchors
num_images = inputs[0].shape[0]
A = self._num_anchors
K = shifts.shape[0]
all_anchors = self._anchors[np.newaxis, :, :] + shifts[:, np.newaxis, :]
all_anchors = all_anchors.reshape((K * A, 4))
rois = np.empty((0, 5), dtype=np.float32)
roi_probs = np.empty((0, 1), dtype=np.float32)
for im_i in range(num_images):
im_i_boxes, im_i_probs = self.proposals_for_one_image(
im_info[im_i, :], all_anchors, bbox_deltas[im_i, :, :, :],
scores[im_i, :, :, :]
)
batch_inds = im_i * np.ones(
(im_i_boxes.shape[0], 1), dtype=np.float32
)
im_i_rois = np.hstack((batch_inds, im_i_boxes))
rois = np.append(rois, im_i_rois, axis=0)
roi_probs = np.append(roi_probs, im_i_probs, axis=0)
outputs[0].reshape(rois.shape)
outputs[0].data[...] = rois
if len(outputs) > 1:
outputs[1].reshape(roi_probs.shape)
outputs[1].data[...] = roi_probs
def proposals_for_one_image(
self, im_info, all_anchors, bbox_deltas, scores
):
# Get mode-dependent configuration
cfg_key = 'TRAIN' if self._train else 'TEST'
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
min_size = cfg[cfg_key].RPN_MIN_SIZE
# Transpose and reshape predicted bbox transformations to get them
# into the same order as the anchors:
# - bbox deltas will be (4 * A, H, W) format from conv output
# - transpose to (H, W, 4 * A)
# - reshape to (H * W * A, 4) where rows are ordered by (H, W, A)
# in slowest to fastest order to match the enumerated anchors
bbox_deltas = bbox_deltas.transpose((1, 2, 0)).reshape((-1, 4))
# Same story for the scores:
# - scores are (A, H, W) format from conv output
# - transpose to (H, W, A)
# - reshape to (H * W * A, 1) where rows are ordered by (H, W, A)
# to match the order of anchors and bbox_deltas
scores = scores.transpose((1, 2, 0)).reshape((-1, 1))
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
if pre_nms_topN <= 0 or pre_nms_topN >= len(scores):
order = np.argsort(-scores.squeeze())
else:
# Avoid sorting possibly large arrays; First partition to get top K
# unsorted and then sort just those (~20x faster for 200k scores)
inds = np.argpartition(
-scores.squeeze(), pre_nms_topN
)[:pre_nms_topN]
order = np.argsort(-scores[inds].squeeze())
order = inds[order]
bbox_deltas = bbox_deltas[order, :]
all_anchors = all_anchors[order, :]
scores = scores[order]
# Transform anchors into proposals via bbox transformations
proposals = box_utils_bbox_transform(
all_anchors, bbox_deltas, (1.0, 1.0, 1.0, 1.0))
# 2. clip proposals to image (may result in proposals with zero area
# that will be removed in the next step)
proposals = box_utils_clip_tiled_boxes(proposals, im_info[:2])
# 3. remove predicted boxes with either height or width < min_size
keep = _filter_boxes(proposals, min_size, im_info)
proposals = proposals[keep, :]
scores = scores[keep]
# 6. apply loose nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
if nms_thresh > 0:
keep = box_utils_nms(np.hstack((proposals, scores)), nms_thresh)
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
return proposals, scores
class DetectionModelHelper(cnn.CNNModelHelper):
def __init__(self, **kwargs):
# Handle args specific to the DetectionModelHelper, others pass through
# to CNNModelHelper
self.train = kwargs.get('train', False)
self.num_classes = kwargs.get('num_classes', -1)
assert self.num_classes > 0, 'num_classes must be > 0'
for k in ('train', 'num_classes'):
if k in kwargs:
del kwargs[k]
kwargs['order'] = 'NCHW'
# Defensively set cudnn_exhaustive_search to False in case the default
# changes in CNNModelHelper. The detection code uses variable size
# inputs that might not play nicely with cudnn_exhaustive_search.
kwargs['cudnn_exhaustive_search'] = False
super(DetectionModelHelper, self).__init__(**kwargs)
self.roi_data_loader = None
self.losses = []
self.metrics = []
self.do_not_update_params = [] # Param on this list are not updated
self.net.Proto().type = cfg.MODEL.EXECUTION_TYPE
self.net.Proto().num_workers = cfg.NUM_GPUS * 4
self.prev_use_cudnn = self.use_cudnn
self.gn_params = [] # Param on this list are GroupNorm parameters
def TrainableParams(self, gpu_id=-1):
"""Get the blob names for all trainable parameters, possibly filtered by
GPU id.
"""
return [
p for p in self.params
if (
p in self.param_to_grad and # p has a gradient
p not in self.do_not_update_params and # not on the blacklist
(gpu_id == -1 or # filter for gpu assignment, if gpu_id set
str(p).find('gpu_{}'.format(gpu_id)) == 0)
)]
def AffineChannel(self, blob_in, blob_out, dim, inplace=False):
"""Affine transformation to replace BN in networks where BN cannot be
used (e.g., because the minibatch size is too small).
The operations can be done in place to save memory.
"""
blob_out = blob_out or self.net.NextName()
param_prefix = blob_out
scale = self.create_param(
param_name=param_prefix + '_s',
initializer=initializers.Initializer("ConstantFill", value=1.),
tags=ParameterTags.WEIGHT,
shape=[dim, ],
)
bias = self.create_param(
param_name=param_prefix + '_b',
initializer=initializers.Initializer("ConstantFill", value=0.),
tags=ParameterTags.BIAS,
shape=[dim, ],
)
if inplace:
return self.net.AffineChannel([blob_in, scale, bias], blob_in)
else:
return self.net.AffineChannel([blob_in, scale, bias], blob_out)
def GenerateProposals(self, blobs_in, blobs_out, anchors, spatial_scale):
"""Op for generating RPN porposals.
blobs_in:
- 'rpn_cls_probs': 4D tensor of shape (N, A, H, W), where N is the
number of minibatch images, A is the number of anchors per
locations, and (H, W) is the spatial size of the prediction grid.
Each value represents a "probability of object" rating in [0, 1].
- 'rpn_bbox_pred': 4D tensor of shape (N, 4 * A, H, W) of predicted
deltas for transformation anchor boxes into RPN proposals.
- 'im_info': 2D tensor of shape (N, 3) where the three columns encode
the input image's [height, width, scale]. Height and width are
for the input to the network, not the original image; scale is the
scale factor used to scale the original image to the network input
size.
blobs_out:
- 'rpn_rois': 2D tensor of shape (R, 5), for R RPN proposals where the
five columns encode [batch ind, x1, y1, x2, y2]. The boxes are
w.r.t. the network input, which is a *scaled* version of the
original image; these proposals must be scaled by 1 / scale (where
scale comes from im_info; see above) to transform it back to the
original input image coordinate system.
- 'rpn_roi_probs': 1D tensor of objectness probability scores
(extracted from rpn_cls_probs; see above).
"""
name = 'GenerateProposalsOp:' + ','.join([str(b) for b in blobs_in])
# spatial_scale passed to the Python op is only used in convert_pkl_to_pb
self.net.Python(
GenerateProposalsOp(anchors, spatial_scale, self.train).forward
)(blobs_in, blobs_out, name=name, spatial_scale=spatial_scale)
return blobs_out
def GenerateProposalLabels(self, blobs_in):
"""Op for generating training labels for RPN proposals. This is used
when training RPN jointly with Fast/Mask R-CNN (as in end-to-end
Faster R-CNN training).
blobs_in:
- 'rpn_rois': 2D tensor of RPN proposals output by GenerateProposals
- 'roidb': roidb entries that will be labeled
- 'im_info': See GenerateProposals doc.
blobs_out:
- (variable set of blobs): returns whatever blobs are required for
training the model. It does this by querying the data loader for
the list of blobs that are needed.
"""
name = 'GenerateProposalLabelsOp:' + ','.join(
[str(b) for b in blobs_in]
)
# The list of blobs is not known before run-time because it depends on
# the specific model being trained. Query the data loader to get the
# list of output blob names.
blobs_out = fast_rcnn_roi_data_get_fast_rcnn_blob_names(
is_training=self.train
)
blobs_out = [core.ScopedBlobReference(b) for b in blobs_out]
self.net.Python(GenerateProposalLabelsOp().forward)(
blobs_in, blobs_out, name=name
)
return blobs_out
def CollectAndDistributeFpnRpnProposals(self):
"""Merge RPN proposals generated at multiple FPN levels and then
distribute those proposals to their appropriate FPN levels. An anchor
at one FPN level may predict an RoI that will map to another level,
hence the need to redistribute the proposals.
This function assumes standard blob names for input and output blobs.
Input blobs: [rpn_rois_fpn<min>, ..., rpn_rois_fpn<max>,
rpn_roi_probs_fpn<min>, ..., rpn_roi_probs_fpn<max>]
- rpn_rois_fpn<i> are the RPN proposals for FPN level i; see rpn_rois
documentation from GenerateProposals.
- rpn_roi_probs_fpn<i> are the RPN objectness probabilities for FPN
level i; see rpn_roi_probs documentation from GenerateProposals.
If used during training, then the input blobs will also include:
[roidb, im_info] (see GenerateProposalLabels).
Output blobs: [rois_fpn<min>, ..., rois_rpn<max>, rois,
rois_idx_restore]
- rois_fpn<i> are the RPN proposals for FPN level i
- rois_idx_restore is a permutation on the concatenation of all
rois_fpn<i>, i=min...max, such that when applied the RPN RoIs are
restored to their original order in the input blobs.
If used during training, then the output blobs will also include:
[labels, bbox_targets, bbox_inside_weights, bbox_outside_weights].
"""
k_max = cfg.FPN.RPN_MAX_LEVEL
k_min = cfg.FPN.RPN_MIN_LEVEL
# Prepare input blobs
rois_names = ['rpn_rois_fpn' + str(l) for l in range(k_min, k_max + 1)]
score_names = [
'rpn_roi_probs_fpn' + str(l) for l in range(k_min, k_max + 1)
]
blobs_in = rois_names + score_names
if self.train:
blobs_in += ['roidb', 'im_info']
blobs_in = [core.ScopedBlobReference(b) for b in blobs_in]
name = 'CollectAndDistributeFpnRpnProposalsOp:' + ','.join(
[str(b) for b in blobs_in]
)
# Prepare output blobs
blobs_out = fast_rcnn_roi_data_get_fast_rcnn_blob_names(
is_training=self.train
)
blobs_out = [core.ScopedBlobReference(b) for b in blobs_out]
outputs = self.net.Python(
CollectAndDistributeFpnRpnProposalsOp(self.train).forward
)(blobs_in, blobs_out, name=name)
return outputs
def DropoutIfTraining(self, blob_in, dropout_rate):
"""Add dropout to blob_in if the model is in training mode and
dropout_rate is > 0."""
blob_out = blob_in
if self.train and dropout_rate > 0:
blob_out = self.Dropout(
blob_in, blob_in, ratio=dropout_rate, is_test=False
)
return blob_out
def RoIFeatureTransform(
self,
blobs_in,
blob_out,
blob_rois='rois',
method='RoIPoolF',
resolution=7,
spatial_scale=1. / 16.,
sampling_ratio=0
):
"""Add the specified RoI pooling method. The sampling_ratio argument
is supported for some, but not all, RoI transform methods.
RoIFeatureTransform abstracts away:
- Use of FPN or not
- Specifics of the transform method
"""
assert method in {'RoIPoolF', 'RoIAlign'}, \
'Unknown pooling method: {}'.format(method)
has_argmax = (method == 'RoIPoolF')
if isinstance(blobs_in, list):
# FPN case: add RoIFeatureTransform to each FPN level
k_max = cfg.FPN.ROI_MAX_LEVEL # coarsest level of pyramid
k_min = cfg.FPN.ROI_MIN_LEVEL # finest level of pyramid
assert len(blobs_in) == k_max - k_min + 1
bl_out_list = []
for lvl in range(k_min, k_max + 1):
bl_in = blobs_in[k_max - lvl] # blobs_in is in reversed order
sc = spatial_scale[k_max - lvl] # in reversed order
bl_rois = blob_rois + '_fpn' + str(lvl)
bl_out = blob_out + '_fpn' + str(lvl)
bl_out_list.append(bl_out)
bl_argmax = ['_argmax_' + bl_out] if has_argmax else []
self.net.__getattr__(method)(
[bl_in, bl_rois], [bl_out] + bl_argmax,
pooled_w=resolution,
pooled_h=resolution,
spatial_scale=sc,
sampling_ratio=sampling_ratio
)
# The pooled features from all levels are concatenated along the
# batch dimension into a single 4D tensor.
xform_shuffled, _ = self.net.Concat(
bl_out_list, [blob_out + '_shuffled', '_concat_' + blob_out],
axis=0
)
# Unshuffle to match rois from dataloader
restore_bl = blob_rois + '_idx_restore_int32'
xform_out = self.net.BatchPermutation(
[xform_shuffled, restore_bl], blob_out
)
else:
# Single feature level
bl_argmax = ['_argmax_' + blob_out] if has_argmax else []
# sampling_ratio is ignored for RoIPoolF
xform_out = self.net.__getattr__(method)(
[blobs_in, blob_rois], [blob_out] + bl_argmax,
pooled_w=resolution,
pooled_h=resolution,
spatial_scale=spatial_scale,
sampling_ratio=sampling_ratio
)
# Only return the first blob (the transformed features)
return xform_out[0] if isinstance(xform_out, tuple) else xform_out
def ConvShared(
self,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight=None,
bias=None,
**kwargs
):
"""Add conv op that shares weights and/or biases with another conv op.
"""
use_bias = (
False if ('no_bias' in kwargs and kwargs['no_bias']) else True
)
if self.use_cudnn:
kwargs['engine'] = 'CUDNN'
kwargs['exhaustive_search'] = self.cudnn_exhaustive_search
if self.ws_nbytes_limit:
kwargs['ws_nbytes_limit'] = self.ws_nbytes_limit
if use_bias:
blobs_in = [blob_in, weight, bias]
else:
blobs_in = [blob_in, weight]
if 'no_bias' in kwargs:
del kwargs['no_bias']
return self.net.Conv(
blobs_in, blob_out, kernel=kernel, order=self.order, **kwargs
)
def BilinearInterpolation(
self, blob_in, blob_out, dim_in, dim_out, up_scale
):
"""Bilinear interpolation in space of scale.
Takes input of NxKxHxW and outputs NxKx(sH)x(sW), where s:= up_scale
Adapted from the CVPR'15 FCN code.
See: https://github.com/shelhamer/fcn.berkeleyvision.org/blob/master/surgery.py
"""
assert dim_in == dim_out
assert up_scale % 2 == 0, 'Scale should be even'
def upsample_filt(size):
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return ((1 - abs(og[0] - center) / factor) *
(1 - abs(og[1] - center) / factor))
kernel_size = up_scale * 2
bil_filt = upsample_filt(kernel_size)
kernel = np.zeros(
(dim_in, dim_out, kernel_size, kernel_size), dtype=np.float32
)
kernel[range(dim_out), range(dim_in), :, :] = bil_filt
blob = self.ConvTranspose(
blob_in,
blob_out,
dim_in,
dim_out,
kernel_size,
stride=int(up_scale),
pad=int(up_scale / 2),
weight_init=('GivenTensorFill', {'values': kernel}),
bias_init=('ConstantFill', {'value': 0.})
)
self.do_not_update_params.append(self.weights[-1])
self.do_not_update_params.append(self.biases[-1])
return blob
def ConvAffine( # args in the same order of Conv()
self, blob_in, prefix, dim_in, dim_out, kernel, stride, pad,
group=1, dilation=1,
weight_init=None,
bias_init=None,
suffix='_bn',
inplace=False
):
"""ConvAffine adds a Conv op followed by a AffineChannel op (which
replaces BN during fine tuning).
"""
conv_blob = self.Conv(
blob_in,
prefix,
dim_in,
dim_out,
kernel,
stride=stride,
pad=pad,
group=group,
dilation=dilation,
weight_init=weight_init,
bias_init=bias_init,
no_bias=1
)
blob_out = self.AffineChannel(
conv_blob, prefix + suffix, dim=dim_out, inplace=inplace
)
return blob_out
def ConvGN( # args in the same order of Conv()
self, blob_in, prefix, dim_in, dim_out, kernel, stride, pad,
group_gn, # num of groups in gn
group=1, dilation=1,
weight_init=None,
bias_init=None,
suffix='_gn',
no_conv_bias=1,
):
"""ConvGN adds a Conv op followed by a GroupNorm op,
including learnable scale/bias (gamma/beta)
"""
conv_blob = self.Conv(
blob_in,
prefix,
dim_in,
dim_out,
kernel,
stride=stride,
pad=pad,
group=group,
dilation=dilation,
weight_init=weight_init,
bias_init=bias_init,
no_bias=no_conv_bias)
if group_gn < 1:
logger.warning(
'Layer: {} (dim {}): '
'group_gn < 1; reset to 1.'.format(prefix, dim_in)
)
group_gn = 1
blob_out = self.SpatialGN(
conv_blob, prefix + suffix,
dim_out, num_groups=group_gn,
epsilon=cfg.GROUP_NORM.EPSILON,)
self.gn_params.append(self.params[-1]) # add gn's bias to list
self.gn_params.append(self.params[-2]) # add gn's scale to list
return blob_out
def DisableCudnn(self):
self.prev_use_cudnn = self.use_cudnn
self.use_cudnn = False
def RestorePreviousUseCudnn(self):
prev_use_cudnn = self.use_cudnn
self.use_cudnn = self.prev_use_cudnn
self.prev_use_cudnn = prev_use_cudnn
def UpdateWorkspaceLr(self, cur_iter, new_lr):
"""Updates the model's current learning rate and the workspace (learning
rate and update history/momentum blobs).
"""
# The workspace is the one source of truth for the lr
# The lr is always the same on all GPUs
cur_lr = workspace.FetchBlob('gpu_0/lr')[0]
# There are no type conversions between the lr in Python and the lr in
# the GPU (both are float32), so exact comparision is ok
if cur_lr != new_lr:
ratio = _get_lr_change_ratio(cur_lr, new_lr)
self._SetNewLr(cur_lr, new_lr)
return new_lr
def _SetNewLr(self, cur_lr, new_lr):
"""Do the actual work of updating the model and workspace blobs.
"""
for i in range(cfg.NUM_GPUS):
with c2_utils_CudaScope(i):
workspace.FeedBlob(
'gpu_{}/lr'.format(i), np.array([new_lr], dtype=np.float32))
ratio = _get_lr_change_ratio(cur_lr, new_lr)
if cfg.SOLVER.SCALE_MOMENTUM and cur_lr > 1e-7 and \
ratio > cfg.SOLVER.SCALE_MOMENTUM_THRESHOLD:
self._CorrectMomentum(new_lr / cur_lr)
def _CorrectMomentum(self, correction):
"""The MomentumSGDUpdate op implements the update V as
V := mu * V + lr * grad,
where mu is the momentum factor, lr is the learning rate, and grad is
the stochastic gradient. Since V is not defined independently of the
learning rate (as it should ideally be), when the learning rate is
changed we should scale the update history V in order to make it
compatible in scale with lr * grad.
"""
for i in range(cfg.NUM_GPUS):
with c2_utils_CudaScope(i):
for param in self.TrainableParams(gpu_id=i):
op = core.CreateOperator(
'Scale', [param + '_momentum'], [param + '_momentum'],
scale=correction)
workspace.RunOperatorOnce(op)
def GetLossScale(self):
"""Allow a way to configure the loss scale dynamically.
This may be used in a distributed data parallel setting.
"""
return 1.0 / cfg.NUM_GPUS
def AddLosses(self, losses):
if not isinstance(losses, list):
losses = [losses]
# Conversion to str allows losses to include BlobReferences
losses = [c2_utils_UnscopeName(str(l)) for l in losses]
self.losses = list(set(self.losses + losses))
def AddMetrics(self, metrics):
if not isinstance(metrics, list):
metrics = [metrics]
self.metrics = list(set(self.metrics + metrics))
class Timer(object):
"""A simple timer."""
def __init__(self):
self.reset()
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def reset(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
class AttrDict(dict):
IMMUTABLE = '__immutable__'
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__[AttrDict.IMMUTABLE] = False
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
elif name in self:
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if not self.__dict__[AttrDict.IMMUTABLE]:
if name in self.__dict__:
self.__dict__[name] = value
else:
self[name] = value
else:
raise AttributeError(
'Attempted to set "{}" to "{}", but AttrDict is immutable'.
format(name, value)
)
def immutable(self, is_immutable):
"""Set immutability to is_immutable and recursively apply the setting
to all nested AttrDicts.
"""
self.__dict__[AttrDict.IMMUTABLE] = is_immutable
# Recursively set immutable state
for v in self.__dict__.values():
if isinstance(v, AttrDict):
v.immutable(is_immutable)
for v in self.values():
if isinstance(v, AttrDict):
v.immutable(is_immutable)
def is_immutable(self):
return self.__dict__[AttrDict.IMMUTABLE]
c2_utils_import_detectron_ops()
cv2.ocl.setUseOpenCL(False)
DP = DensePoseMethods()
def main(args_im_or_folder, args_cfg, args_output_dir, args_image_ext, args_weights):
logger = logging.getLogger(__name__)
merge_cfg_from_file(args_cfg)
cfg.NUM_GPUS = 1
args_weights = cache_url(args_weights, cfg.DOWNLOAD_CACHE)
assert_and_infer_cfg(cache_urls=False)
model = infer_engine_initialize_model_from_cfg(args_weights)
dummy_coco_dataset = dummy_datasets_get_coco_dataset()
if os.path.isdir(args_im_or_folder):
im_list = glob.iglob(args_im_or_folder + '/*.' + args_image_ext)
else:
im_list = [args_im_or_folder]
for i, im_name in enumerate(im_list):
out_name = os.path.join(
args_output_dir, '{}'.format(os.path.basename(im_name) + '.pdf')
)
#logger.info('Processing {} -> {}'.format(im_name, out_name))
im = cv2.imread(im_name)
timers = defaultdict(Timer)
t = time.time()
with c2_utils_NamedCudaScope(0):
cls_boxes, cls_segms, cls_keyps, cls_bodys = infer_engine_im_detect_all(
model, im, None, timers=timers
)
vis_utils_vis_one_image(
im[:, :, ::-1], # BGR -> RGB for visualization
im_name,
args_output_dir,
cls_boxes,
cls_segms,
cls_keyps,
cls_bodys,
dataset=dummy_coco_dataset,
box_alpha=0.3,
show_class=True,
thresh=0.7,
kp_thresh=2
)
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
main('data/', 'assets/config.yaml',
'output/', 'jpg',
'https://dl.fbaipublicfiles.com/densepose/DensePose_ResNet101_FPN_s1x-e2e.pkl')
|
the-stack_0_16545 | import komand
from .schema import UpdateSiteIncludedTargetsInput, UpdateSiteIncludedTargetsOutput, Input
# Custom imports below
from komand_rapid7_insightvm.util import endpoints
from komand_rapid7_insightvm.util.resource_requests import ResourceRequests
import json
class UpdateSiteIncludedTargets(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='update_site_included_targets',
description='Update an existing site scope of included ip address and hostname targets',
input=UpdateSiteIncludedTargetsInput(),
output=UpdateSiteIncludedTargetsOutput())
def run(self, params={}):
scope = params.get(Input.INCLUDED_TARGETS)
resource_helper = ResourceRequests(self.connection.session, self.logger)
endpoint = endpoints.Site.site_included_targets(self.connection.console_url, params.get(Input.ID))
# Pull current site scope in order to append to list instead of overwriting
if not params.get(Input.OVERWRITE):
current_scope = resource_helper.resource_request(endpoint=endpoint,
method='get')
self.logger.info(f"Appending to current list of included targets")
scope.extend(current_scope['addresses'])
self.logger.info(f"Using {endpoint} ...")
payload = {"rawbody": scope}
response = resource_helper.resource_request(endpoint=endpoint,
method='put',
payload=payload)
return {
"id": params.get(Input.ID),
"links": response['links']
}
|
the-stack_0_16549 | #!/usr/bin/env python3
import os
import sys
import urllib.request
import tarfile
import zipfile
import shutil
from typing import List, Optional
PLATFORM_WINDOWS = "windows"
PLATFORM_LINUX = "linux"
PLATFORM_MACOS = "mac"
DOTNET_RUNTIME_VERSION = "6.0.0"
DOTNET_RUNTIME_DOWNLOADS = {
PLATFORM_LINUX: "https://download.visualstudio.microsoft.com/download/pr/0ce1c34f-0d9e-4d9b-964e-da676c8e605a/7a6c353b36477fa84f85b2821f2350c2/dotnet-runtime-6.0.0-linux-x64.tar.gz",
PLATFORM_WINDOWS: "https://download.visualstudio.microsoft.com/download/pr/6b96c97d-9b8c-4141-a32a-5848d3369dbf/9972321cb7af5938fecdee2d8ebd72bb/dotnet-runtime-6.0.0-win-x64.zip",
PLATFORM_MACOS: "https://download.visualstudio.microsoft.com/download/pr/d88f74a5-05d2-46cb-886a-a62fd698009d/67f5f05e9c029d284c309f0f712fc99f/dotnet-runtime-6.0.0-osx-x64.tar.gz"
}
p = os.path.join
def main() -> None:
update_netcore_runtime(sys.argv[1:])
def update_netcore_runtime(platforms: List[str]) -> None:
runtime_cache = p("Dependencies/dotnet")
version_file_path = p(runtime_cache, "VERSION")
# Check if current version is fine.
current_version: Optional[str]
try:
with open(version_file_path, "r") as f:
current_version = f.read().strip()
except FileNotFoundError:
current_version = None
if current_version != DOTNET_RUNTIME_VERSION and os.path.exists(runtime_cache):
print("Cached Release .NET Core Runtime out of date/nonexistant, downloading new one..")
shutil.rmtree(runtime_cache)
os.makedirs(runtime_cache, exist_ok=True)
with open(version_file_path, "w") as f:
f.write(DOTNET_RUNTIME_VERSION)
# Download missing runtimes if necessary.
for platform in platforms:
platform_runtime_cache = p(runtime_cache, platform)
if not os.path.exists(platform_runtime_cache):
os.mkdir(platform_runtime_cache)
download_platform_runtime(platform_runtime_cache, platform)
def download_platform_runtime(dir: str, platform: str) -> None:
print(f"Downloading .NET Core Runtime for platform {platform}.")
download_file = p(dir, "download.tmp")
download_url = DOTNET_RUNTIME_DOWNLOADS[platform]
urllib.request.urlretrieve(download_url, download_file)
if download_url.endswith(".tar.gz"):
# this is a tar gz.
with tarfile.open(download_file, "r:gz") as tar:
tar.extractall(dir)
elif download_url.endswith(".zip"):
with zipfile.ZipFile(download_file) as zipF:
zipF.extractall(dir)
os.remove(download_file)
if __name__ == "__main__":
main()
|
the-stack_0_16550 | import yaml
import json
from os import listdir
from os.path import isfile, join
"""
{ name, kingdom, imageUrl}
"""
path = "./data/raw/image-url.yml"
stream = open(path, "r")
data = yaml.load_all(stream, yaml.Loader)
data_dicts = [
{
"name": datum["name"].lower(),
"kingdom": datum["kingdom"],
"imageUrl": datum["imageUrl"],
}
for datum in data
]
json_data = {"data": data_dicts}
with open("./data/generated/json/image-urls.json", "w") as fout:
json_dumps_str = json.dumps(json_data, indent=4)
print(json_dumps_str, file=fout)
|
the-stack_0_16551 | from PyQt5 import QtGui, QtCore, QtWidgets
from PyQt5.QtWidgets import *
from tools.modeltool import *
from tools.tool import *
from tools.modeltool import *
from tools.tool import *
from tools.pathtool import *
from tools.milltask import *
from guifw.gui_elements import *
import sys, os, os.path
from solids import *
from objectviewer import *
class ModelDialog(QtGui.QWidget):
def __init__(self, viewer):
QtGui.QWidget.__init__(self)
mlayout = QtGui.QGridLayout()
self.setLayout(mlayout)
loadbutton = QtGui.QPushButton("Load")
loadbutton.clicked.connect(self.showDialog)
mlayout.addWidget(loadbutton, 0, 0)
self.modelTool = ModelTool(name="Model", object=None, viewUpdater=self.updateView)
self.toolWidget = ToolPropertyWidget(parent=self, tool=self.modelTool)
mlayout.addWidget(self.toolWidget, 1, 0)
self.viewer = viewer
self.object = Solid()
if len(sys.argv) > 1:
self.loadObject(sys.argv[1])
def updateView(self, mode='mesh'):
if mode == 'mesh':
self.viewer.showFacets(self.modelTool.object)
if mode == 'heightmap':
self.viewer.showHeightMap(self.modelTool.object)
if mode == 'slice':
self.viewer.showFacets(self.modelTool.object)
def showDialog(self):
filename = QtGui.QFileDialog.getOpenFileName(self, 'Open file', '', "STL files (*.stl)")[0]
self.loadObject(filename)
def loadObject(self, filename):
if not os.path.isfile(filename):
return
self.object = Solid()
self.object.load(filename)
self.object.__class__ = CAM_Solid
self.modelTool.object = self.object
self.updateView()
|
the-stack_0_16554 | # model settings
temperature = 0.01
with_norm = True
query_dim = 128
model = dict(
type='UVCNeckMoCoTrackerV2',
queue_dim=query_dim,
patch_queue_size=256 * 144 * 5,
backbone=dict(
type='ResNet',
pretrained=None,
depth=18,
out_indices=(0, 1, 2, 3),
# strides=(1, 2, 1, 1),
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
zero_init_residual=True),
neck=dict(
type='FPN',
in_channels=[64, 128, 256, 512],
out_channels=256,
norm_cfg=dict(type='SyncBN', requires_grad=True),
num_outs=4,
out_index=1),
cls_head=dict(
type='UVCHead',
loss_feat=None,
loss_aff=dict(
type='ConcentrateLoss',
win_len=8,
stride=8,
temperature=temperature,
with_norm=with_norm,
loss_weight=1.),
loss_bbox=dict(type='L1Loss', loss_weight=10.),
in_channels=256,
channels=128,
temperature=temperature,
with_norm=with_norm,
init_std=0.01,
track_type='coord'),
patch_head=dict(
type='MoCoHead',
loss_feat=dict(type='MultiPairNCE', loss_weight=1.),
in_channels=512,
# num_convs=2,
# kernel_size=3,
# norm_cfg=dict(type='BN'),
# act_cfg=dict(type='ReLU'),
channels=query_dim,
temperature=0.2,
with_norm=with_norm))
# model training and testing settings
train_cfg = dict(
patch_size=96,
img_as_ref=True,
img_as_tar=False,
img_as_embed=True,
patch_geo_aug=True,
patch_color_aug=True,
diff_crop=True,
skip_cycle=True,
center_ratio=0.,
shuffle_bn=True)
test_cfg = dict(
precede_frames=7,
topk=5,
temperature=temperature,
# strides=(1, 2, 1, 1),
out_indices=(0, ),
neighbor_range=40,
with_norm=with_norm,
output_dir='eval_results')
# dataset settings
dataset_type = 'VideoDataset'
dataset_type_val = 'DavisDataset'
data_prefix = 'data/kinetics400/videos_train'
ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
data_prefix_val = 'data/davis/DAVIS/JPEGImages/480p'
anno_prefix_val = 'data/davis/DAVIS/Annotations/480p'
data_root_val = 'data/davis/DAVIS'
ann_file_val = 'data/davis/DAVIS/ImageSets/davis2017_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=2, frame_interval=8, num_clips=1),
dict(type='DuplicateFrames', times=2),
dict(type='DecordDecode'),
# dict(type='Resize', scale=(-1, 256)),
# dict(type='RandomResizedCrop', area_range=(0.2, 1.)),
dict(type='Resize', scale=(256, 256), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
# dict(
# type='ColorJitter',
# brightness=0.4,
# contrast=0.4,
# saturation=0.4,
# hue=0.1,
# p=0.8,
# same_across_clip=False),
# dict(type='RandomGrayScale', p=0.2, same_across_clip=False),
# dict(type='RandomGaussianBlur', p=0.5, same_across_clip=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='SequentialSampleFrames', frame_interval=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 480), keep_ratio=True),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(
type='Collect',
keys=['imgs', 'ref_seg_map'],
meta_keys=('frame_dir', 'frame_inds', 'original_shape', 'seg_map')),
dict(type='ToTensor', keys=['imgs', 'ref_seg_map'])
]
data = dict(
videos_per_gpu=48,
workers_per_gpu=16,
val_workers_per_gpu=1,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_prefix,
pipeline=train_pipeline),
val=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True))
# optimizer
# optimizer = dict(type='Adam', lr=1e-4)
optimizer = dict(type='SGD', lr=1e-1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
# lr_config = dict(policy='Fixed')
# lr_config = dict(
# policy='step',
# warmup='linear',
# warmup_iters=100,
# warmup_ratio=0.001,
# step=[1, 2])
total_epochs = 50
checkpoint_config = dict(interval=1)
evaluation = dict(
interval=1, metrics='davis', key_indicator='J&F-Mean', rule='greater')
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
dict(
type='WandbLoggerHook',
init_kwargs=dict(
project='mmaction2',
name='{{fileBasenameNoExtension}}',
resume=True,
tags=['uvc-fpn-moco2'],
dir='wandb/{{fileBasenameNoExtension}}',
config=dict(
model=model,
train_cfg=train_cfg,
test_cfg=test_cfg,
data=data))),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = False
|
the-stack_0_16555 | # container-service-extension
# Copyright (c) 2017 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
"""Basic utility methods to perform data transformation and file operations."""
import hashlib
import os
import pathlib
import platform
import stat
import sys
from typing import List
import urllib
import click
import pkg_resources
from pyvcloud.vcd.vcd_api_version import VCDApiVersion
import requests
import semantic_version
from container_service_extension.logging.logger import NULL_LOGGER
# chunk size in bytes for file reading
BUF_SIZE = 65536
# chunk size for downloading files
SIZE_1MB = 1024 * 1024
_type_to_string = {
str: 'string',
int: 'number',
bool: 'true/false',
dict: 'mapping',
list: 'sequence',
}
class NullPrinter:
"""Callback object which does nothing."""
def general_no_color(self, msg):
pass
def general(self, msg):
pass
def info(self, msg):
pass
def error(self, msg):
pass
class ConsoleMessagePrinter(NullPrinter):
"""Callback object to print color coded message on console."""
def general_no_color(self, msg):
click.secho(msg)
def general(self, msg):
click.secho(msg, fg='green')
def info(self, msg):
click.secho(msg, fg='yellow')
def error(self, msg):
click.secho(msg, fg='red')
def get_cse_version():
return pkg_resources.require('container-service-extension')[0].version
def get_cse_info():
return {
'product': 'CSE',
'description': 'Container Service Extension for VMware vCloud Director', # noqa: E501
'version': get_cse_version(),
'python': platform.python_version()
}
def get_installed_cse_version() -> semantic_version.Version:
"""."""
cse_version_raw = get_cse_info()['version']
# Cleanup version string. Strip dev version string segment.
# e.g. convert '2.6.0.0b2.dev5' to '2.6.0'
tokens = cse_version_raw.split('.')[:3]
return semantic_version.Version('.'.join(tokens))
def prompt_text(text, color='black', hide_input=False, type=str):
click_text = click.style(str(text), fg=color)
return click.prompt(click_text, hide_input=hide_input, type=type)
def is_environment_variable_enabled(env_var_name):
"""Check if the environment variable is set.
:param str env_var_name: Name of the environment variable
:rtype: bool
"""
return str_to_bool(os.getenv(env_var_name))
def get_duplicate_items_in_list(items):
"""Find duplicate entries in a list.
:param list items: list of items with possible duplicates.
:return: the items that occur more than once in input list. Each duplicated
item will be mentioned only once in the returned list.
:rtype: list
"""
seen = set()
duplicates = set()
if items:
for item in items:
if item in seen:
duplicates.add(item)
else:
seen.add(item)
return list(duplicates)
def check_keys_and_value_types(dikt, ref_dict, location='dictionary',
excluded_keys=None,
msg_update_callback=NullPrinter()):
"""Compare a dictionary with a reference dictionary.
The method ensures that all keys and value types are the same in the
dictionaries.
:param dict dikt: the dictionary to check for validity
:param dict ref_dict: the dictionary to check against
:param str location: where this check is taking place, so error messages
can be more descriptive.
:param list excluded_keys: list of str, representing the list of key which
if missing won't raise an exception.
:param utils.ConsoleMessagePrinter msg_update_callback: Callback object.
:raises KeyError: if @dikt has missing or invalid keys
:raises TypeError: if the value of a property in @dikt does not match with
the value of the same property in @ref_dict
"""
if excluded_keys is None:
excluded_keys = []
ref_keys = set(ref_dict.keys())
keys = set(dikt.keys())
missing_keys = ref_keys - keys - set(excluded_keys)
if missing_keys:
msg_update_callback.error(
f"Missing keys in {location}: {missing_keys}")
bad_value = False
for k in ref_keys:
if k not in keys:
continue
value_type = type(ref_dict[k])
if not isinstance(dikt[k], value_type):
msg_update_callback.error(
f"{location} key '{k}': value type should be "
f"'{_type_to_string[value_type]}'")
bad_value = True
if missing_keys:
raise KeyError(f"Missing and/or invalid key in {location}")
if bad_value:
raise TypeError(f"Incorrect type for property value(s) in {location}")
def check_python_version(msg_update_callback=NullPrinter()):
"""Ensure that user's Python version >= 3.7.3.
If the check fails, will exit the python interpreter with error status.
:param utils.ConsoleMessagePrinter msg_update_callback: Callback object.
"""
try:
msg_update_callback.general_no_color(
"Required Python version: >= 3.7.3\n"
f"Installed Python version: {sys.version}")
if sys.version_info < (3, 7, 3):
raise Exception("Python version should be 3.7.3 or greater")
except Exception as err:
msg_update_callback.error(str(err))
sys.exit(1)
def str_to_bool(s):
"""Convert string boolean values to bool.
The conversion is case insensitive.
:param s: input string
:return: True if val is ['true' or 'yes' or 'y'] otherwise False
"""
return str(s).lower() in ('true', 'yes', 'y')
def get_sha256(filepath):
"""Get sha256 hash of file as a string.
:param str filepath: path to file.
:return: sha256 string for the file.
:rtype: str
"""
sha256 = hashlib.sha256()
with open(filepath, 'rb') as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
sha256.update(data)
return sha256.hexdigest()
def check_file_permissions(filename, msg_update_callback=NullPrinter()):
"""Ensure that the file has correct permissions.
Unix based system:
Owner - r/w permission
Other - No access
Windows:
No check
:param str filename: path to file.
:param utils.ConsoleMessagePrinter msg_update_callback: Callback object.
:raises Exception: if file has 'x' permissions for Owner or 'rwx'
permissions for 'Others' or 'Group'.
"""
if os.name == 'nt':
return
err_msgs = []
file_mode = os.stat(filename).st_mode
if file_mode & stat.S_IXUSR:
msg = f"Remove execute permission of the Owner for the file {filename}"
msg_update_callback.error(msg)
err_msgs.append(msg)
if file_mode & stat.S_IROTH or file_mode & stat.S_IWOTH \
or file_mode & stat.S_IXOTH:
msg = f"Remove read, write and execute permissions of Others for " \
f"the file {filename}"
msg_update_callback.error(msg)
err_msgs.append(msg)
if file_mode & stat.S_IRGRP or file_mode & stat.S_IWGRP \
or file_mode & stat.S_IXGRP:
msg = f"Remove read, write and execute permissions of Group for the " \
f"file {filename}"
msg_update_callback.error(msg)
err_msgs.append(msg)
if err_msgs:
raise IOError(err_msgs)
def download_file(url, filepath, sha256=None, force_overwrite=False,
logger=NULL_LOGGER, msg_update_callback=NullPrinter()):
"""Download a file from a url to local filepath.
Will not overwrite files unless @sha256 is given.
Recursively creates specified directories in @filepath.
:param str url: source url.
:param str filepath: destination filepath.
:param str sha256: without this argument, if a file already exists at
@filepath, download will be skipped. If @sha256 matches the file's
sha256, download will be skipped.
:param bool force_overwrite: if True, will download the file even if it
already exists or its SHA hasn't changed.
:param logging.Logger logger: logger to log with.
:param utils.ConsoleMessagePrinter msg_update_callback: Callback object.
:raises HTTPError: if the response has an error status code
"""
path = pathlib.Path(filepath)
if not force_overwrite and path.is_file() and \
(sha256 is None or get_sha256(filepath) == sha256):
msg = f"Skipping download to '{filepath}' (file already exists)"
logger.info(msg)
msg_update_callback.general(msg)
return
path.parent.mkdir(parents=True, exist_ok=True)
msg = f"Downloading file from '{url}' to '{filepath}'..."
logger.info(msg)
msg_update_callback.info(msg)
response = requests.get(url, stream=True,
headers={'Cache-Control': 'no-cache'})
response.raise_for_status()
with path.open(mode='wb') as f:
for chunk in response.iter_content(chunk_size=SIZE_1MB):
f.write(chunk)
msg = "Download complete"
logger.info(msg)
msg_update_callback.general(msg)
def read_data_file(filepath, logger=NULL_LOGGER,
msg_update_callback=NullPrinter()):
"""Retrieve file content from local disk as a string.
:param str filepath: absolute filepath of the file, whose content we want
to read.
:param logging.Logger logger: logger to log with.
:param utils.ConsoleMessagePrinter msg_update_callback: Callback object.
:return: the contents of the file.
:rtype: str
:raises FileNotFoundError: if requested data file cannot be
found.
"""
path = pathlib.Path(filepath)
try:
contents = path.read_text()
except FileNotFoundError as err:
msg_update_callback.error(f"{err}")
logger.error(f"{err}", exc_info=True)
raise
msg = f"Found data file: {path}"
msg_update_callback.general(msg)
logger.debug(msg)
return contents
def flatten_dictionary(input_dict, parent_key='', separator='.'):
"""Flatten a given dictionary with nested dictionaries if any.
Example: { 'a' : {'b':'c', 'd': {'e' : 'f'}}, 'g' : 'h'} will be flattened
to {'a.b': 'c', 'a.d.e': 'f', 'g': 'h'}
This will flatten only the values of dict type.
:param dict input_dict:
:param str parent_key: parent key that gets prefixed while forming flattened key # noqa: E501
:param str separator: use the separator to form flattened key
:return: flattened dictionary
:rtype: dict
"""
flattened_dict = {}
for k in input_dict.keys():
val = input_dict.get(k)
key_prefix = f"{parent_key}{k}"
if isinstance(val, dict):
flattened_dict.update(flatten_dictionary(val, f"{key_prefix}{separator}")) # noqa: E501
else:
flattened_dict.update({key_prefix: val})
return flattened_dict
def escape_query_filter_expression_value(value):
value_str = str(value)
value_str = value_str.replace('(', "\\(")
value_str = value_str.replace(')', "\\)")
value_str = value_str.replace(';', "\\;")
value_str = value_str.replace(',', "\\,")
return value_str
def construct_filter_string(filters: dict):
"""Construct &-ed filter string from the dict.
:param dict filters: dictionary containing key and values for the filters
"""
filter_string = ""
if filters:
filter_expressions = []
for (key, value) in filters.items():
if key and value:
filter_exp = f"{key}=={urllib.parse.quote(escape_query_filter_expression_value(value))}" # noqa: E501
filter_expressions.append(filter_exp)
filter_string = ";".join(filter_expressions)
return filter_string
def extract_id_from_href(href):
"""Extract id from an href.
'https://vmware.com/api/admin/user/123456' will return 123456
:param str href: an href
:return: id
"""
if not href:
return None
if '/' in href:
return href.split('/')[-1]
return href
# ToDo: Device a better way to find the max api version
# without converting the strings to float.
# e.g. 5.20 will be smaller than 5.8 if compared as float, which is wrong
def get_max_api_version(api_versions: List[str]) -> str:
return str(max(VCDApiVersion(x) for x in api_versions))
|
the-stack_0_16557 |
import pandas as pd
# Lists are enclosed in brackets:
# l = [1, 2, "a"]
# Tuples are enclosed in parentheses:
# Tuples are faster and consume less memory
# t = (1, 2, "a")
# Dictionaries are built with curly brackets:
# d = {"a":1, "b":2}
# Sets are made using the set() builtin function
# Python List vs. Tuples (Key points to remember)
# The literal syntax of tuples is shown by parentheses ()
# whereas the literal syntax of lists is shown by square brackets []
# Lists has variable length, tuple has fixed length.
# List has mutable nature, tuple has immutable nature.
# List has more functionality than the tuple.
# Basics of creating Pandas DataFrames from Lists and Dictionaries
# http://pbpython.com/pandas-list-dict.html
# https://www.datacamp.com/community/tutorials/pandas-read-csv
def csv_2sql( csv_file_name, table_name ):
data = pd.read_csv( csv_file_name )
# Get the first 5 rows
# print( data.head() )
rows, c_count = data.shape
print( "# Number of rows={} and columns={}".format(rows, c_count ) )
p = ' '
print( "sql = '''")
print( "CREATE TABLE {} ( ".format(table_name) )
i = 0
for col in data.columns:
i = i+1
t = data[col].dtype
if t == 'int64':
t = "INTEGER"
else:
t = "LVARCHAR"
if i == c_count:
print( p, col, t, " ); " )
else :
print( p, col, t, "," )
print( "'''")
print( )
print( "sql = '''")
print( "INSERT INTO {} ( ".format(table_name) )
i = 0
for col in data.columns:
i = i+1
if i == c_count:
print( p, col, " ) " )
else :
print( p, col, "," )
# Python 3 specific ( end = ''), to print on same line
print( p, "VALUES ( ", end = '' )
i = 0
while i < c_count:
i = i+1
if i == c_count:
print( " ? ); " )
else :
# Python 3 specific
print( "?, ", end = '' )
print( "'''")
print( "stmt = IfxPy.prepare(conn, sql)" )
i = 0
for col in data.columns:
i = i+1
t = data[col].dtype
if t == 'int64':
t = "INTEGER"
else:
t = "LVARCHAR"
print()
print( "c{} = None".format(i) )
print( "IfxPy.bind_param(stmt, {}, c{}, IfxPy.SQL_PARAM_INPUT, IfxPy.{})".format(i, i, t ) )
####### Run the sample function ######
if __name__ == "__main__":
csv_2sql('sample.csv', 'tab1')
|
the-stack_0_16560 | # Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Autoregressive State Space Model Tests."""
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.sts import AutoregressiveMovingAverageStateSpaceModel
from tensorflow_probability.python.sts import AutoregressiveStateSpaceModel
tfd = tfp.distributions
def arma_explicit_logp(y, ar_coefs, ma_coefs, level_scale):
"""Manual log-prob computation for arma(p, q) process."""
# Source: page 132 of
# http://www.ru.ac.bd/stat/wp-content/uploads/sites/25/2019/03/504_02_Hamilton_Time-Series-Analysis.pdf
p = len(ar_coefs)
q = len(ma_coefs)
t = len(y)
# For the first few steps of y, where previous values
# are not available, we model them as zero-mean with
# stddev `prior_scale`.
e = np.zeros([t])
for i in range(p):
zero_padded_y = np.zeros([p])
zero_padded_y[p - i:p] = y[:i]
pred_y = np.dot(zero_padded_y, ar_coefs[::-1])
e[i] = y[i] - pred_y
for i in range(p, len(y)):
pred_y = (np.dot(y[i - p:i], ar_coefs[::-1]) +
np.dot(e[i - q:i], ma_coefs[::-1]))
e[i] = y[i] - pred_y
lp = (-((t - p) / 2) * np.log(2 * np.pi)
- ((t - p) / 2) * np.log(level_scale ** 2)
- np.sum(e ** 2 / (2 * level_scale ** 2)))
return lp
class _AutoregressiveMovingAverageStateSpaceModelTest(test_util.TestCase):
def testEqualsAutoregressive(self):
# An ARMA(p, 0) process is just an AR(p) processes
num_timesteps = 10
observed_time_series = self._build_placeholder(
np.random.randn(num_timesteps, 1))
level_scale = self._build_placeholder(0.1)
# We'll test an AR1 process, and also (just for kicks) that the trivial
# embedding as an AR2 process gives the same model.
coefficients_order1 = np.array([1.]).astype(self.dtype)
coefficients_order2 = np.array([1., 1.]).astype(self.dtype)
ar1_ssm = AutoregressiveStateSpaceModel(
num_timesteps=num_timesteps,
coefficients=coefficients_order1,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale]))
ar2_ssm = AutoregressiveStateSpaceModel(
num_timesteps=num_timesteps,
coefficients=coefficients_order2,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale, 1.]))
arma1_ssm = AutoregressiveMovingAverageStateSpaceModel(
num_timesteps=num_timesteps,
ar_coefficients=coefficients_order1,
ma_coefficients=np.array([0.]).astype(self.dtype),
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale, 1.]))
arma2_ssm = AutoregressiveMovingAverageStateSpaceModel(
num_timesteps=num_timesteps,
ar_coefficients=coefficients_order2,
ma_coefficients=np.array([0.]).astype(self.dtype),
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale, 1.]))
ar1_lp, arma1_lp, ar2_lp, arma2_lp = (
ar1_ssm.log_prob(observed_time_series),
arma1_ssm.log_prob(observed_time_series),
ar2_ssm.log_prob(observed_time_series),
arma2_ssm.log_prob(observed_time_series)
)
self.assertAllClose(ar1_lp, arma1_lp)
self.assertAllClose(ar2_lp, arma2_lp)
def testLogprobCorrectness(self):
# Compare the state-space model's log-prob to an explicit implementation.
num_timesteps = 10
observed_time_series_ = np.random.randn(num_timesteps)
ar_coefficients_ = np.array([.7, -.1]).astype(self.dtype)
ma_coefficients_ = np.array([0.5, -0.4]).astype(self.dtype)
level_scale_ = 1.0
observed_time_series = self._build_placeholder(observed_time_series_)
level_scale = self._build_placeholder(level_scale_)
expected_logp = arma_explicit_logp(
observed_time_series_, ar_coefficients_, ma_coefficients_, level_scale_)
ssm = AutoregressiveMovingAverageStateSpaceModel(
num_timesteps=num_timesteps,
ar_coefficients=ar_coefficients_,
ma_coefficients=ma_coefficients_,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale, 0., 0.]))
lp = ssm.log_prob(observed_time_series[..., tf.newaxis])
self.assertAllClose(lp, expected_logp, rtol=5e-2)
def testBatchShape(self):
# Check that the model builds with batches of parameters.
order = 3
batch_shape = [4, 2]
# No `_build_placeholder`, because coefficients must have static shape.
coefficients = np.random.randn(*(batch_shape + [order])).astype(self.dtype)
order = max(order, order + 1) # shape of initial_state_prior, scale_diag
level_scale = self._build_placeholder(
np.exp(np.random.randn(*batch_shape)))
ssm = AutoregressiveMovingAverageStateSpaceModel(
num_timesteps=10,
ar_coefficients=coefficients,
ma_coefficients=coefficients,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=self._build_placeholder(np.ones([order]))))
if self.use_static_shape:
self.assertAllEqual(tensorshape_util.as_list(ssm.batch_shape),
batch_shape)
else:
self.assertAllEqual(ssm.batch_shape_tensor(), batch_shape)
y = ssm.sample(seed=test_util.test_seed(sampler_type='stateless'))
if self.use_static_shape:
self.assertAllEqual(tensorshape_util.as_list(y.shape)[:-2], batch_shape)
else:
self.assertAllEqual(tf.shape(y)[:-2], batch_shape)
def _build_placeholder(self, ndarray):
"""Convert a numpy array to a TF placeholder.
Args:
ndarray: any object convertible to a numpy array via `np.asarray()`.
Returns:
placeholder: a TensorFlow `placeholder` with default value given by the
provided `ndarray`, dtype given by `self.dtype`, and shape specified
statically only if `self.use_static_shape` is `True`.
"""
ndarray = np.asarray(ndarray).astype(self.dtype)
return tf1.placeholder_with_default(
ndarray, shape=ndarray.shape if self.use_static_shape else None)
@test_util.test_all_tf_execution_regimes
class AutoregressiveMovingAverageStateSpaceModelTestStaticShape32(
_AutoregressiveMovingAverageStateSpaceModelTest):
dtype = np.float32
use_static_shape = True
@test_util.test_all_tf_execution_regimes
class AutoregressiveMovingAverageStateSpaceModelTestDynamicShape32(
_AutoregressiveMovingAverageStateSpaceModelTest):
dtype = np.float32
use_static_shape = False
@test_util.test_all_tf_execution_regimes
class AutoregressiveMovingAverageStateSpaceModelTestStaticShape64(
_AutoregressiveMovingAverageStateSpaceModelTest):
dtype = np.float64
use_static_shape = True
# Don't run tests for the base class.
del _AutoregressiveMovingAverageStateSpaceModelTest
if __name__ == '__main__':
test_util.main()
|
the-stack_0_16561 | from ..layout import Channel, Layout, load_speakers, load_real_layout, Speaker, RealLayout
from ..geom import cart, PolarPosition, CartesianPosition
from ...common import PolarScreen, CartesianScreen
from attr import evolve
import pytest
import numpy as np
import numpy.testing as npt
@pytest.fixture
def layout():
# odd nominal positions, for testing
return Layout(name="test", channels=[
Channel(name="M+030", polar_position=(30, 0.0, 2.0),
polar_nominal_position=(25, 0.0, 1.5), az_range=(25, 30), el_range=(0, 0), is_lfe=False),
Channel(name="M-030", polar_position=PolarPosition(-30, 0.0, 2.0),
polar_nominal_position=PolarPosition(-25, 0.0, 1.5), az_range=(-30, -25)),
])
def test_positions(layout):
npt.assert_allclose(layout.positions, [cart(30, 0, 2), cart(-30, 0, 2)])
def test_norm_positions(layout):
npt.assert_allclose(layout.norm_positions, [cart(30, 0, 1), cart(-30, 0, 1)])
def test_nominal_positions(layout):
npt.assert_allclose(layout.nominal_positions, [cart(25, 0, 1.5), cart(-25, 0, 1.5)])
def test_without_lfe(layout):
lfe_channel = Channel(name="LFE", polar_position=(30, -20, 2), is_lfe=True)
layout_lfe = evolve(layout, channels=layout.channels + [lfe_channel])
assert len(layout_lfe.channels) == 3
assert len(layout_lfe.without_lfe.channels) == 2
def test_channel_names(layout):
assert layout.channel_names == ["M+030", "M-030"]
def test_channels_by_name(layout):
assert layout.channels_by_name == {
"M+030": layout.channels[0],
"M-030": layout.channels[1],
}
def test_default_nominal_range():
# defaulted nominal position and ranges should be kept when the position is modified
default_channel = Channel(name="name", polar_position=(10, 20, 1))
modified_channel = evolve(default_channel, polar_position=PolarPosition(30, 40, 1))
for channel in [default_channel, modified_channel]:
assert channel.polar_nominal_position == PolarPosition(10, 20, 1)
assert channel.az_range == (10, 10)
assert channel.el_range == (20, 20)
def test_Channel_check_position():
errors = []
Channel(name="name", polar_position=(10, 20, 1)).check_position(callback=errors.append)
Channel(name="name", polar_position=(180, 20, 1), az_range=(175, -175)).check_position(callback=errors.append)
Channel(name="name", polar_position=(180, 20, 1), az_range=(180, 180)).check_position(callback=errors.append)
assert not errors
errors = []
Channel(name="name", polar_position=(10, 20, 1), az_range=(-5, 5)).check_position(callback=errors.append)
assert errors == ["name: azimuth 10.0 out of range (-5, 5)."]
errors = []
Channel(name="name", polar_position=(10, 20, 1), el_range=(0, 15)).check_position(callback=errors.append)
assert errors == ["name: elevation 20.0 out of range (0, 15)."]
def test_Layout_check_position(layout):
errors = []
layout.check_positions(callback=errors.append)
assert errors == []
layout_err = evolve(layout, channels=[
(evolve(channel, polar_position=PolarPosition(30, 10, 1)) if channel.name == "M+030" else channel)
for channel in layout.channels])
errors = []
layout_err.check_positions(callback=errors.append)
assert errors == ["M+030: elevation 10.0 out of range (0, 0)."]
def test_Layout_with_speakers_real_layout(layout):
speakers = [Speaker(channel=1, names=["M+030"], polar_position=PolarPosition(25, 0, 1.5)),
Speaker(channel=2, names=["M-030"]),
Speaker(channel=3, names=["M-110"])]
screen = PolarScreen(aspectRatio=1.5, centrePosition=PolarPosition(10.0, 20.0, 2.0), widthAzimuth=30.0)
new_layout, upmix = layout.with_speakers(speakers)
npt.assert_allclose(new_layout.positions, [cart(25, 0, 1.5), cart(-30, 0, 2)])
npt.assert_allclose(upmix, [[0, 0], [1, 0], [0, 1], [0, 0]])
new_layout, upmix = layout.with_real_layout(RealLayout(speakers=speakers, screen=screen))
npt.assert_allclose(new_layout.positions, [cart(25, 0, 1.5), cart(-30, 0, 2)])
npt.assert_allclose(upmix, [[0, 0], [1, 0], [0, 1], [0, 0]])
assert new_layout.screen == screen
def test_Layout_check_upmix_matrix(layout):
errors = []
upmix = np.array([[0, 0],
[1, 0],
[0, 0.5],
[0, 0]])
layout.check_upmix_matrix(upmix, callback=errors.append)
assert errors == []
errors = []
upmix = np.array([[0, 0],
[1, 0],
[0, 0],
[0, 0]])
layout.check_upmix_matrix(upmix, callback=errors.append)
assert errors == ["Channel M-030 not mapped to any output."]
errors = []
upmix = np.array([[0, 0],
[1, 0],
[0, 1],
[0, 1]])
layout.check_upmix_matrix(upmix, callback=errors.append)
assert errors == ["Channel M-030 mapped to multiple outputs: [2, 3]."]
errors = []
upmix = np.array([[0, 0],
[1, 1],
[0, 0],
[0, 0]])
layout.check_upmix_matrix(upmix, callback=errors.append)
assert errors == ["Speaker idx 1 used by multiple channels: ['M+030', 'M-030']"]
def test_load_layout_info():
def run_test(yaml_obj, expected, func=load_real_layout):
from ruamel import yaml
from six import StringIO
yaml_str = yaml.dump(yaml_obj)
result = func(StringIO(yaml_str))
assert expected == result
run_test(dict(speakers=[dict(channel=0, names="M+000")]),
RealLayout(speakers=[Speaker(0, ["M+000"])]))
run_test(dict(speakers=[dict(channel=0, names=["M+000"])]),
RealLayout(speakers=[Speaker(0, ["M+000"])]))
run_test(dict(speakers=[dict(channel=0, names=["M+000"], position=dict(az=10, el=5, r=1))]),
RealLayout(speakers=[Speaker(0, ["M+000"], PolarPosition(10, 5, 1))]))
run_test(dict(speakers=[dict(channel=0, names=["M+000"], gain_linear=0.5)]),
RealLayout(speakers=[Speaker(0, ["M+000"], gain_linear=0.5)]))
with pytest.raises(Exception) as excinfo:
run_test(dict(speakers=[dict(channel=0, names=["M+000"], position=dict(az=10, el=5))]),
RealLayout(speakers=[Speaker(0, ["M+000"], PolarPosition(10, 5, 1))]))
assert "Unknown position format" in str(excinfo.value)
# old style with speakers at the top level
run_test([dict(channel=0, names="M+000")],
RealLayout(speakers=[Speaker(0, ["M+000"])]))
# polar screen
run_test(dict(screen=dict(type="polar", aspectRatio=1.5, centrePosition=dict(az=10, el=20, r=2), widthAzimuth=30)),
RealLayout(screen=PolarScreen(aspectRatio=1.5, centrePosition=PolarPosition(10.0, 20.0, 2.0), widthAzimuth=30.0)))
# Cartesian screen
run_test(dict(screen=dict(type="cart", aspectRatio=1.5, centrePosition=dict(X=0.1, Y=0.9, Z=0.2), widthX=0.3)),
RealLayout(screen=CartesianScreen(aspectRatio=1.5, centrePosition=CartesianPosition(0.1, 0.9, 0.2), widthX=0.3)))
# passes through null screens
run_test(dict(screen=None),
RealLayout(screen=None))
# legacy speakers wrapper
run_test(dict(speakers=[dict(channel=0, names="M+000")]),
[Speaker(0, ["M+000"])],
func=load_speakers)
|
the-stack_0_16562 | import collections
import logging
from typing import Dict, List, Optional, Set, Tuple, Union, Callable
from blspy import AugSchemeMPL, G1Element
from chiabip158 import PyBIP158
from clvm.casts import int_from_bytes
from chia.consensus.block_record import BlockRecord
from chia.consensus.block_rewards import (
calculate_base_farmer_reward,
calculate_pool_reward,
)
from chia.consensus.block_root_validation import validate_block_merkle_roots
from chia.full_node.mempool_check_conditions import mempool_check_conditions_dict
from chia.consensus.blockchain_interface import BlockchainInterface
from chia.consensus.coinbase import create_farmer_coin, create_pool_coin
from chia.consensus.constants import ConsensusConstants
from chia.consensus.cost_calculator import NPCResult, calculate_cost_of_program
from chia.consensus.find_fork_point import find_fork_point_in_chain
from chia.full_node.block_store import BlockStore
from chia.full_node.coin_store import CoinStore
from chia.full_node.mempool_check_conditions import get_name_puzzle_conditions
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.coin_record import CoinRecord
from chia.types.condition_opcodes import ConditionOpcode
from chia.types.condition_with_args import ConditionWithArgs
from chia.types.full_block import FullBlock
from chia.types.generator_types import BlockGenerator
from chia.types.name_puzzle_condition import NPC
from chia.types.unfinished_block import UnfinishedBlock
from chia.util.condition_tools import (
pkm_pairs_for_conditions_dict,
coin_announcements_names_for_npc,
puzzle_announcements_names_for_npc,
)
from chia.util.errors import Err
from chia.util.generator_tools import (
additions_for_npc,
tx_removals_and_additions,
)
from chia.util.hash import std_hash
from chia.util.ints import uint32, uint64, uint128
log = logging.getLogger(__name__)
async def validate_block_body(
constants: ConsensusConstants,
blocks: BlockchainInterface,
block_store: BlockStore,
coin_store: CoinStore,
peak: Optional[BlockRecord],
block: Union[FullBlock, UnfinishedBlock],
height: uint32,
npc_result: Optional[NPCResult],
fork_point_with_peak: Optional[uint32],
get_block_generator: Callable,
) -> Tuple[Optional[Err], Optional[NPCResult]]:
"""
This assumes the header block has been completely validated.
Validates the transactions and body of the block. Returns None for the first value if everything
validates correctly, or an Err if something does not validate. For the second value, returns a CostResult
only if validation succeeded, and there are transactions. In other cases it returns None. The NPC result is
the result of running the generator with the previous generators refs. It is only present for transaction
blocks which have spent coins.
"""
if isinstance(block, FullBlock):
assert height == block.height
prev_transaction_block_height: uint32 = uint32(0)
# 1. For non transaction-blocs: foliage block, transaction filter, transactions info, and generator must
# be empty. If it is a block but not a transaction block, there is no body to validate. Check that all fields are
# None
if block.foliage.foliage_transaction_block_hash is None:
if (
block.foliage_transaction_block is not None
or block.transactions_info is not None
or block.transactions_generator is not None
):
return Err.NOT_BLOCK_BUT_HAS_DATA, None
prev_tb: BlockRecord = blocks.block_record(block.prev_header_hash)
while not prev_tb.is_transaction_block:
prev_tb = blocks.block_record(prev_tb.prev_hash)
assert prev_tb.timestamp is not None
if (
prev_tb.timestamp > constants.INITIAL_FREEZE_END_TIMESTAMP
and len(block.transactions_generator_ref_list) > 0
):
return Err.NOT_BLOCK_BUT_HAS_DATA, None
return None, None # This means the block is valid
# All checks below this point correspond to transaction blocks
# 2. For blocks, foliage block, transactions info must not be empty
if block.foliage_transaction_block is None or block.transactions_info is None:
return Err.IS_TRANSACTION_BLOCK_BUT_NO_DATA, None
assert block.foliage_transaction_block is not None
# keeps track of the reward coins that need to be incorporated
expected_reward_coins: Set[Coin] = set()
# 3. The transaction info hash in the Foliage block must match the transaction info
if block.foliage_transaction_block.transactions_info_hash != std_hash(block.transactions_info):
return Err.INVALID_TRANSACTIONS_INFO_HASH, None
# 4. The foliage block hash in the foliage block must match the foliage block
if block.foliage.foliage_transaction_block_hash != std_hash(block.foliage_transaction_block):
return Err.INVALID_FOLIAGE_BLOCK_HASH, None
# 5. The reward claims must be valid for the previous blocks, and current block fees
# If height == 0, expected_reward_coins will be left empty
if height > 0:
# Add reward claims for all blocks from the prev prev block, until the prev block (including the latter)
prev_transaction_block = blocks.block_record(block.foliage_transaction_block.prev_transaction_block_hash)
prev_transaction_block_height = prev_transaction_block.height
assert prev_transaction_block.fees is not None
pool_coin = create_pool_coin(
prev_transaction_block_height,
prev_transaction_block.pool_puzzle_hash,
calculate_pool_reward(prev_transaction_block.height),
constants.GENESIS_CHALLENGE,
)
farmer_coin = create_farmer_coin(
prev_transaction_block_height,
prev_transaction_block.farmer_puzzle_hash,
uint64(calculate_base_farmer_reward(prev_transaction_block.height) + prev_transaction_block.fees),
constants.GENESIS_CHALLENGE,
)
# Adds the previous block
expected_reward_coins.add(pool_coin)
expected_reward_coins.add(farmer_coin)
# For the second block in the chain, don't go back further
if prev_transaction_block.height > 0:
curr_b = blocks.block_record(prev_transaction_block.prev_hash)
while not curr_b.is_transaction_block:
expected_reward_coins.add(
create_pool_coin(
curr_b.height,
curr_b.pool_puzzle_hash,
calculate_pool_reward(curr_b.height),
constants.GENESIS_CHALLENGE,
)
)
expected_reward_coins.add(
create_farmer_coin(
curr_b.height,
curr_b.farmer_puzzle_hash,
calculate_base_farmer_reward(curr_b.height),
constants.GENESIS_CHALLENGE,
)
)
curr_b = blocks.block_record(curr_b.prev_hash)
if set(block.transactions_info.reward_claims_incorporated) != expected_reward_coins:
return Err.INVALID_REWARD_COINS, None
if block.foliage_transaction_block.timestamp > constants.INITIAL_FREEZE_END_TIMESTAMP:
if len(block.transactions_info.reward_claims_incorporated) != len(expected_reward_coins):
# No duplicates, after transaction freeze period. Duplicates cause no issues because we filter them out
# anyway.
return Err.INVALID_REWARD_COINS, None
removals: List[bytes32] = []
coinbase_additions: List[Coin] = list(expected_reward_coins)
additions: List[Coin] = []
coin_announcement_names: Set[bytes32] = set()
puzzle_announcement_names: Set[bytes32] = set()
npc_list: List[NPC] = []
removals_puzzle_dic: Dict[bytes32, bytes32] = {}
cost: uint64 = uint64(0)
# We check in header validation that timestamp is not more that 10 minutes into the future
if (
block.foliage_transaction_block.timestamp <= constants.INITIAL_FREEZE_END_TIMESTAMP
and block.transactions_generator is not None
):
# 6. No transactions before INITIAL_TRANSACTION_FREEZE timestamp
return Err.INITIAL_TRANSACTION_FREEZE, None
else:
# 7a. The generator root must be the hash of the serialized bytes of
# the generator for this block (or zeroes if no generator)
if block.transactions_generator is not None:
if std_hash(bytes(block.transactions_generator)) != block.transactions_info.generator_root:
return Err.INVALID_TRANSACTIONS_GENERATOR_HASH, None
else:
if block.transactions_info.generator_root != bytes([0] * 32):
return Err.INVALID_TRANSACTIONS_GENERATOR_HASH, None
# 8a. The generator_ref_list must be the hash of the serialized bytes of
# the generator ref list for this block (or 'one' bytes [0x01] if no generator)
# 8b. The generator ref list length must be less than or equal to MAX_GENERATOR_REF_LIST_SIZE entries
# 8c. The generator ref list must not point to a height >= this block's height
if block.transactions_generator_ref_list in (None, []):
if block.transactions_info.generator_refs_root != bytes([1] * 32):
return Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT, None
else:
# If we have a generator reference list, we must have a generator
if block.transactions_generator is None:
return Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT, None
# The generator_refs_root must be the hash of the concatenation of the List[uint32]
generator_refs_hash = std_hash(b"".join([bytes(i) for i in block.transactions_generator_ref_list]))
if block.transactions_info.generator_refs_root != generator_refs_hash:
return Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT, None
if len(block.transactions_generator_ref_list) > constants.MAX_GENERATOR_REF_LIST_SIZE:
return Err.TOO_MANY_GENERATOR_REFS, None
if any([index >= height for index in block.transactions_generator_ref_list]):
return Err.FUTURE_GENERATOR_REFS, None
if block.transactions_generator is not None:
# Get List of names removed, puzzles hashes for removed coins and conditions created
assert npc_result is not None
cost = calculate_cost_of_program(block.transactions_generator, npc_result, constants.COST_PER_BYTE)
npc_list = npc_result.npc_list
# 7. Check that cost <= MAX_BLOCK_COST_CLVM
log.debug(
f"Cost: {cost} max: {constants.MAX_BLOCK_COST_CLVM} "
f"percent full: {round(100 * (cost / constants.MAX_BLOCK_COST_CLVM), 2)}%"
)
if cost > constants.MAX_BLOCK_COST_CLVM:
return Err.BLOCK_COST_EXCEEDS_MAX, None
# 8. The CLVM program must not return any errors
if npc_result.error is not None:
return Err(npc_result.error), None
for npc in npc_list:
removals.append(npc.coin_name)
removals_puzzle_dic[npc.coin_name] = npc.puzzle_hash
additions = additions_for_npc(npc_list)
coin_announcement_names = coin_announcements_names_for_npc(npc_list)
puzzle_announcement_names = puzzle_announcements_names_for_npc(npc_list)
else:
assert npc_result is None
# 9. Check that the correct cost is in the transactions info
if block.transactions_info.cost != cost:
return Err.INVALID_BLOCK_COST, None
additions_dic: Dict[bytes32, Coin] = {}
# 10. Check additions for max coin amount
# Be careful to check for 64 bit overflows in other languages. This is the max 64 bit unsigned integer
# We will not even reach here because Coins do type checking (uint64)
for coin in additions + coinbase_additions:
additions_dic[coin.name()] = coin
if coin.amount > constants.MAX_COIN_AMOUNT:
return Err.COIN_AMOUNT_EXCEEDS_MAXIMUM, None
# 11. Validate addition and removal roots
root_error = validate_block_merkle_roots(
block.foliage_transaction_block.additions_root,
block.foliage_transaction_block.removals_root,
additions + coinbase_additions,
removals,
)
if root_error:
return root_error, None
# 12. The additions and removals must result in the correct filter
byte_array_tx: List[bytes32] = []
for coin in additions + coinbase_additions:
byte_array_tx.append(bytearray(coin.puzzle_hash))
for coin_name in removals:
byte_array_tx.append(bytearray(coin_name))
bip158: PyBIP158 = PyBIP158(byte_array_tx)
encoded_filter = bytes(bip158.GetEncoded())
filter_hash = std_hash(encoded_filter)
if filter_hash != block.foliage_transaction_block.filter_hash:
return Err.INVALID_TRANSACTIONS_FILTER_HASH, None
# 13. Check for duplicate outputs in additions
addition_counter = collections.Counter(_.name() for _ in additions + coinbase_additions)
for k, v in addition_counter.items():
if v > 1:
return Err.DUPLICATE_OUTPUT, None
# 14. Check for duplicate spends inside block
removal_counter = collections.Counter(removals)
for k, v in removal_counter.items():
if v > 1:
return Err.DOUBLE_SPEND, None
# 15. Check if removals exist and were not previously spent. (unspent_db + diff_store + this_block)
# The fork point is the last block in common between the peak chain and the chain of `block`
if peak is None or height == 0:
fork_h: int = -1
elif fork_point_with_peak is not None:
fork_h = fork_point_with_peak
else:
fork_h = find_fork_point_in_chain(blocks, peak, blocks.block_record(block.prev_header_hash))
# Get additions and removals since (after) fork_h but not including this block
# The values include: the coin that was added, the height of the block in which it was confirmed, and the
# timestamp of the block in which it was confirmed
additions_since_fork: Dict[bytes32, Tuple[Coin, uint32, uint64]] = {} # This includes coinbase additions
removals_since_fork: Set[bytes32] = set()
# For height 0, there are no additions and removals before this block, so we can skip
if height > 0:
# First, get all the blocks in the fork > fork_h, < block.height
prev_block: Optional[FullBlock] = await block_store.get_full_block(block.prev_header_hash)
reorg_blocks: Dict[uint32, FullBlock] = {}
curr: Optional[FullBlock] = prev_block
assert curr is not None
while curr.height > fork_h:
if curr.height == 0:
break
curr = await block_store.get_full_block(curr.prev_header_hash)
assert curr is not None
reorg_blocks[curr.height] = curr
if fork_h != -1:
assert len(reorg_blocks) == height - fork_h - 1
curr = prev_block
assert curr is not None
while curr.height > fork_h:
# Coin store doesn't contain coins from fork, we have to run generator for each block in fork
if curr.transactions_generator is not None:
# These blocks are in the past and therefore assumed to be valid, so get_block_generator won't raise
curr_block_generator: Optional[BlockGenerator] = await get_block_generator(curr)
assert curr_block_generator is not None and curr.transactions_info is not None
curr_npc_result = get_name_puzzle_conditions(
curr_block_generator,
min(constants.MAX_BLOCK_COST_CLVM, curr.transactions_info.cost),
False,
)
removals_in_curr, additions_in_curr = tx_removals_and_additions(curr_npc_result.npc_list)
else:
removals_in_curr = []
additions_in_curr = []
for c_name in removals_in_curr:
assert c_name not in removals_since_fork
removals_since_fork.add(c_name)
for c in additions_in_curr:
assert c.name() not in additions_since_fork
assert curr.foliage_transaction_block is not None
additions_since_fork[c.name()] = (
c,
curr.height,
curr.foliage_transaction_block.timestamp,
)
for coinbase_coin in curr.get_included_reward_coins():
assert coinbase_coin.name() not in additions_since_fork
assert curr.foliage_transaction_block is not None
additions_since_fork[coinbase_coin.name()] = (
coinbase_coin,
curr.height,
curr.foliage_transaction_block.timestamp,
)
if curr.height == 0:
break
curr = reorg_blocks[curr.height - 1]
assert curr is not None
removal_coin_records: Dict[bytes32, CoinRecord] = {}
for rem in removals:
if rem in additions_dic:
# Ephemeral coin
rem_coin: Coin = additions_dic[rem]
new_unspent: CoinRecord = CoinRecord(
rem_coin,
height,
height,
True,
False,
block.foliage_transaction_block.timestamp,
)
removal_coin_records[new_unspent.name] = new_unspent
else:
unspent = await coin_store.get_coin_record(rem)
if unspent is not None and unspent.confirmed_block_index <= fork_h:
# Spending something in the current chain, confirmed before fork
# (We ignore all coins confirmed after fork)
if unspent.spent == 1 and unspent.spent_block_index <= fork_h:
# Check for coins spent in an ancestor block
return Err.DOUBLE_SPEND, None
removal_coin_records[unspent.name] = unspent
else:
# This coin is not in the current heaviest chain, so it must be in the fork
if rem not in additions_since_fork:
# Check for spending a coin that does not exist in this fork
return Err.UNKNOWN_UNSPENT, None
(
new_coin,
confirmed_height,
confirmed_timestamp,
) = additions_since_fork[rem]
new_coin_record: CoinRecord = CoinRecord(
new_coin,
confirmed_height,
uint32(0),
False,
False,
confirmed_timestamp,
)
removal_coin_records[new_coin_record.name] = new_coin_record
# This check applies to both coins created before fork (pulled from coin_store),
# and coins created after fork (additions_since_fork)
if rem in removals_since_fork:
# This coin was spent in the fork
return Err.DOUBLE_SPEND_IN_FORK, None
removed = 0
for unspent in removal_coin_records.values():
removed += unspent.coin.amount
added = 0
for coin in additions:
added += coin.amount
# 16. Check that the total coin amount for added is <= removed
if removed < added:
return Err.MINTING_COIN, None
fees = removed - added
assert fees >= 0
assert_fee_sum: uint128 = uint128(0)
for npc in npc_list:
if ConditionOpcode.RESERVE_FEE in npc.condition_dict:
fee_list: List[ConditionWithArgs] = npc.condition_dict[ConditionOpcode.RESERVE_FEE]
for cvp in fee_list:
fee = int_from_bytes(cvp.vars[0])
if fee < 0:
return Err.RESERVE_FEE_CONDITION_FAILED, None
assert_fee_sum = uint128(assert_fee_sum + fee)
# 17. Check that the assert fee sum <= fees, and that each reserved fee is non-negative
if fees < assert_fee_sum:
return Err.RESERVE_FEE_CONDITION_FAILED, None
# 18. Check that the fee amount + farmer reward < maximum coin amount
if fees + calculate_base_farmer_reward(height) > constants.MAX_COIN_AMOUNT:
return Err.COIN_AMOUNT_EXCEEDS_MAXIMUM, None
# 19. Check that the computed fees are equal to the fees in the block header
if block.transactions_info.fees != fees:
return Err.INVALID_BLOCK_FEE_AMOUNT, None
# 20. Verify that removed coin puzzle_hashes match with calculated puzzle_hashes
for unspent in removal_coin_records.values():
if unspent.coin.puzzle_hash != removals_puzzle_dic[unspent.name]:
return Err.WRONG_PUZZLE_HASH, None
# 21. Verify conditions
# create hash_key list for aggsig check
pairs_pks: List[G1Element] = []
pairs_msgs: List[bytes] = []
for npc in npc_list:
assert height is not None
unspent = removal_coin_records[npc.coin_name]
error = mempool_check_conditions_dict(
unspent,
coin_announcement_names,
puzzle_announcement_names,
npc.condition_dict,
prev_transaction_block_height,
block.foliage_transaction_block.timestamp,
)
if error:
return error, None
for pk, m in pkm_pairs_for_conditions_dict(
npc.condition_dict, npc.coin_name, constants.AGG_SIG_ME_ADDITIONAL_DATA
):
pairs_pks.append(pk)
pairs_msgs.append(m)
# 22. Verify aggregated signature
# TODO: move this to pre_validate_blocks_multiprocessing so we can sync faster
if not block.transactions_info.aggregated_signature:
return Err.BAD_AGGREGATE_SIGNATURE, None
# noinspection PyTypeChecker
if not AugSchemeMPL.aggregate_verify(pairs_pks, pairs_msgs, block.transactions_info.aggregated_signature):
return Err.BAD_AGGREGATE_SIGNATURE, None
return None, npc_result
|
the-stack_0_16563 | #!/usr/bin/env python3
import sys
import torch
import logging
import speechbrain as sb
import torchaudio
from hyperpyyaml import load_hyperpyyaml
from speechbrain.tokenizers.SentencePiece import SentencePiece
from speechbrain.utils.data_utils import undo_padding
from speechbrain.utils.distributed import run_on_main
"""Recipe for training a sequence-to-sequence ASR system with CommonVoice.
The system employs an encoder, a decoder, and an attention mechanism
between them. Decoding is performed with beamsearch.
To run this recipe, do the following:
> python train.py hparams/train.yaml
With the default hyperparameters, the system employs a CRDNN encoder.
The decoder is based on a standard GRU and BeamSearch (no LM).
The neural network is trained on both CTC and negative-log likelihood
targets and sub-word units estimated with Byte Pairwise Encoding (BPE).
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, tokens (e.g, characters instead of BPE),
training languages (all CommonVoice languages), and many
other possible variations.
Authors
* Titouan Parcollet 2020
"""
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
# Forward pass
feats = self.hparams.compute_features(wavs)
feats = self.modules.normalize(feats, wav_lens)
## Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
feats = self.hparams.augmentation(feats)
x = self.modules.enc(feats.detach())
e_in = self.modules.emb(tokens_bos) # y_in bos + tokens
h, _ = self.modules.dec(e_in, x, wav_lens)
# Output layer for seq2seq log-probabilities
logits = self.modules.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
# Compute outputs
if stage == sb.Stage.TRAIN:
current_epoch = self.hparams.epoch_counter.current
if current_epoch <= self.hparams.number_of_ctc_epochs:
# Output layer for ctc log-probabilities
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
return p_ctc, p_seq, wav_lens
else:
return p_seq, wav_lens
else:
p_tokens, scores = self.hparams.beam_searcher(x, wav_lens)
return p_seq, wav_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
current_epoch = self.hparams.epoch_counter.current
if stage == sb.Stage.TRAIN:
if current_epoch <= self.hparams.number_of_ctc_epochs:
p_ctc, p_seq, wav_lens = predictions
else:
p_seq, wav_lens = predictions
else:
p_seq, wav_lens, predicted_tokens = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
# Add ctc loss if necessary
if (
stage == sb.Stage.TRAIN
and current_epoch <= self.hparams.number_of_ctc_epochs
):
loss_ctc = self.hparams.ctc_cost(
p_ctc, tokens, wav_lens, tokens_lens
)
loss = self.hparams.ctc_weight * loss_ctc
loss += (1 - self.hparams.ctc_weight) * loss_seq
else:
loss = loss_seq
if stage != sb.Stage.TRAIN:
# Decode token terms to words
predicted_words = self.tokenizer(
predicted_tokens, task="decode_from_list"
)
# Convert indices to words
target_words = undo_padding(tokens, tokens_lens)
target_words = self.tokenizer(target_words, task="decode_from_list")
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["loss"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
# Define custom data procedure
def dataio_prepare(hparams):
# 1. Define datasets
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(
sort_key="duration",
key_max_value={"duration": hparams["avoid_if_longer_than"]},
key_min_value={"duration": hparams["avoid_if_smaller_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration",
reverse=True,
key_max_value={"duration": hparams["avoid_if_longer_than"]},
key_min_value={"duration": hparams["avoid_if_smaller_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
# We also sort the validation data so it is faster to validate
# valid_data = valid_data.filtered_sorted(sort_key="duration")
valid_data = valid_data.filtered_sorted(
sort_key="duration",
reverse=True,
key_max_value={"duration": hparams["avoid_if_longer_than"]},
key_min_value={"duration": hparams["avoid_if_smaller_than"]},
)
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_csv"], replacements={"data_root": data_folder},
)
# We also sort the test data so it is faster to validate
test_data = test_data.filtered_sorted(
sort_key="duration",
reverse=True,
key_max_value={"duration": hparams["avoid_if_longer_than"]},
key_min_value={"duration": hparams["avoid_if_smaller_than"]},
)
datasets = [train_data, valid_data, test_data]
# defining tokenizer and loading it
tokenizer = SentencePiece(
model_dir=hparams["save_folder"],
vocab_size=hparams["output_neurons"],
annotation_train=hparams["train_csv"],
annotation_read="wrd",
model_type=hparams["token_type"],
character_coverage=hparams["character_coverage"],
bos_id=hparams["bos_index"],
eos_id=hparams["eos_index"]
)
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "start_seg", "end_seg")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, start_seg, end_seg):
# info = torchaudio.info(wav)
start = int(float(start_seg) * hparams["sample_rate"])
stop = int(float(end_seg) * hparams["sample_rate"])
speech_segment = {"file" : wav, "start" : start, "stop" : stop}
sig = sb.dataio.dataio.read_audio(speech_segment)
return sig
# resampled = torchaudio.transforms.Resample(
# info.sample_rate, hparams["sample_rate"],
# )(sig)
# return resampled
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
tokens_list = tokenizer.sp.encode_as_ids(wrd)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_data, tokenizer
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Dataset preparation (parsing CommonVoice)
# from common_voice_prepare import prepare_common_voice # noqa
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Due to DDP, we do the preparation ONLY on the main python process
# run_on_main(
# prepare_common_voice,
# kwargs={
# "data_folder": hparams["data_folder"],
# "save_folder": hparams["save_folder"],
# "train_tsv_file": hparams["train_tsv_file"],
# "dev_tsv_file": hparams["dev_tsv_file"],
# "test_tsv_file": hparams["test_tsv_file"],
# "accented_letters": hparams["accented_letters"],
# "language": hparams["language"],
# },
# )
# Create the datasets objects as well as tokenization and encoding :-D
train_data, valid_data, test_set, tokenizer = dataio_prepare(hparams)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
opt_class=hparams["opt_class"],
checkpointer=hparams["checkpointer"],
)
# Adding objects to trainer.
asr_brain.tokenizer = tokenizer
# Training
# with torch.autograd.detect_anomaly():
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["test_dataloader_options"],
)
# Test
asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test.txt"
asr_brain.evaluate(
test_set,
min_key="WER",
test_loader_kwargs=hparams["test_dataloader_options"],
)
|
the-stack_0_16564 | # Test functionality of sellers side
from emarket.client_seller import ClientSeller
from emarket.emarket import Item
import time
from os import environ as env
from dotenv import load_dotenv, find_dotenv
ENV_FILE = find_dotenv()
if ENV_FILE:
load_dotenv(ENV_FILE)
else:
raise FileNotFoundError("Could not locate .env file")
#load the env vars
FRONT_SELLER_A_IP = env.get("FRONT_SELLER_A_IP")
FRONT_SELLER_B_IP = env.get("FRONT_SELLER_B_IP")
cs = ClientSeller([FRONT_SELLER_A_IP, FRONT_SELLER_B_IP])
print("####################### CREATE USER")
start_time = time.time()
csid = cs.create_user("Luke","flamma7", "enterprise")
print("--- %s seconds ---" % (time.time() - start_time))
print("KILL NOT A LEADER")
time.sleep(10)
## TEST LOGIN
print("####################### LOG IN")
start_time = time.time()
cs.login("flamma7", "enterprise")
print("--- %s seconds ---" % (time.time() - start_time))
print("####################### LOG OUT")
start_time = time.time()
cs.logout()
print("--- %s seconds ---" % (time.time() - start_time))
cs.login("flamma7", "enterprise")
## TEST ITEM FOR SALE
print("####################### PUT ITEM FOR SALE")
i1 = Item("ether", 0, 0, ["crypto", "smart", "blockchain"], True, 1300, csid)
i2 = Item("bitcoin", 0, 1, ["crypto", "blockchain", "standard"], True, 33000, csid)
i3 = Item("dogecoin", 0, 2, ["crypto", "meme", "blockchain", "elon"], False, 0.03, csid)
i4 = Item("cardano", 0, 3, ["crypto", "blockchain", "smart", "nextgen"], True, 0.3, csid)
status, i1_id = cs.put_item_for_sale(i1, 500)
status, i2_id = cs.put_item_for_sale(i2, 100)
status, i3_id = cs.put_item_for_sale(i3, 300000)
start_time = time.time()
status, i4_id = cs.put_item_for_sale(i4, 300000)
print("--- %s seconds ---" % (time.time() - start_time))
print("####################### CHANGE SALE PRICE")
start_time = time.time()
cs.change_sale_price_item(i3_id, 0.07)
print("--- %s seconds ---" % (time.time() - start_time))
print("####################### REMOVE ITEM FROM SALE")
start_time = time.time()
cs.remove_item_from_sale(i2_id, 100)
print("--- %s seconds ---" % (time.time() - start_time))
print("####################### DISPLAY ACTIVE ITEMS")
start_time = time.time()
cs.display_active_seller_items()
print("--- %s seconds ---" % (time.time() - start_time))
print("####################### GET RATING")
start_time = time.time()
cs.get_rating()
print("--- %s seconds ---" % (time.time() - start_time))
# Create 2nd Seller
# Create 3rd Seller |
the-stack_0_16565 | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
long_description = """
branching_process
"""
config = dict(
description='hawkes process fitting',
author='Dan MacKinlay',
url='URL to get it at.',
download_url='Where to download it.',
author_email='My email.',
version='0.1',
install_requires=[
'nose',
'scipy',
'numpy',
'seaborn',
'pandas',
],
packages=['branching_process'],
scripts=[],
name='branching_process',
# # see https://python-packaging.readthedocs.io/en/latest/non-code-files.html
# package_data=dict(
# branching_process= ['datasets'],
# ),
# include_package_data=True
)
setup(**config)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.